after_merge
stringlengths 28
79.6k
| before_merge
stringlengths 20
79.6k
| url
stringlengths 38
71
| full_traceback
stringlengths 43
922k
| traceback_type
stringclasses 555
values |
|---|---|---|---|---|
def _surround_with_execution_directives(func: Callable, directives: list) -> Callable:
for directive in reversed(directives):
func = partial(
directive["callables"].on_field_execution, directive["args"], func
)
return func
|
def _surround_with_execution_directives(func: Callable, directives: list) -> Callable:
for directive in reversed(directives):
func = partial(directive["callables"].on_execution, directive["args"], func)
return func
|
https://github.com/tartiflette/tartiflette/issues/133
|
Traceback (most recent call last):
File "/usr/local/lib/python3.7/runpy.py", line 193, in _run_module_as_main
"__main__", mod_spec)
File "/usr/local/lib/python3.7/runpy.py", line 85, in _run_code
exec(code, run_globals)
File "/usr/src/app/***/__main__.py", line 10, in <module>
sys.exit(run())
File "/usr/src/app/***/app.py", line 425, in run
"utils/sdl-generator/schema.sdl",
File "/usr/src/app/***/engines/tartiflette.py", line 70, in __init__
error_coercer=_error_coercer,
File "/usr/local/lib/python3.7/site-packages/tartiflette/engine.py", line 26, in __init__
schema_name, custom_default_resolver, exclude_builtins_scalars
File "/usr/local/lib/python3.7/site-packages/tartiflette/schema/bakery.py", line 41, in bake
schema = SchemaBakery._preheat(schema_name, exclude_builtins_scalars)
File "/usr/local/lib/python3.7/site-packages/tartiflette/schema/bakery.py", line 21, in _preheat
build_graphql_schema_from_sdl(sdl, schema=schema)
File "/usr/local/lib/python3.7/site-packages/tartiflette/sdl/builder.py", line 30, in build_graphql_schema_from_sdl
sdl, parse_graphql_sdl_to_ast(sdl), schema=schema
File "/usr/local/lib/python3.7/site-packages/tartiflette/sdl/builder.py", line 76, in transform_ast_to_schema
transformer.transform(raw_ast)
File "/usr/local/lib/python3.7/site-packages/lark/visitors.py", line 93, in transform
tree = t.transform(tree)
File "/usr/local/lib/python3.7/site-packages/lark/visitors.py", line 107, in transform
subtree.children = list(self._transform_children(subtree.children))
File "/usr/local/lib/python3.7/site-packages/lark/visitors.py", line 44, in _transform_children
yield self._transform_tree(c) if isinstance(c, Tree) else c
File "/usr/local/lib/python3.7/site-packages/lark/visitors.py", line 103, in _transform_tree
return self._call_userfunc(tree)
File "/usr/local/lib/python3.7/site-packages/lark/visitors.py", line 37, in _call_userfunc
return f(tree)
File "/usr/local/lib/python3.7/site-packages/lark/visitors.py", line 232, in f
return _f(self, *args, **kwargs)
File "/usr/local/lib/python3.7/site-packages/tartiflette/sdl/transformers/schema_transformer.py", line 389, in input_value_definition
child, child.__class__.__name__
tartiflette.types.exceptions.tartiflette.UnexpectedASTNode: Unexpected AST node `SchemaNode(type='directives', value={'maxLength': {'limit': 512}})`, type `SchemaNode`
|
tartiflette.types.exceptions.tartiflette.UnexpectedASTNode
|
async def __call__(
self,
parent_result: Optional[Any],
args: Dict[str, Any],
ctx: Optional[Dict[str, Any]],
info: "Info",
) -> (Any, Any):
try:
result = await self._func(
parent_result,
await coerce_arguments(self._schema_field.arguments, args, ctx, info),
ctx,
info,
)
if info.execution_ctx.is_introspection:
result = await self._introspection(result, ctx, info)
return result, self._coercer(result, info)
except Exception as e: # pylint: disable=broad-except
return e, None
|
async def __call__(
self,
parent_result: Optional[Any],
args: Dict[str, Any],
ctx: Optional[Dict[str, Any]],
info: "Info",
) -> (Any, Any):
try:
default_args = self._schema_field.get_arguments_default_values()
default_args.update(
{argument.name: argument.value for argument in args.values()}
)
result = await self._func(parent_result, default_args, ctx, info)
if info.execution_ctx.is_introspection:
result = await self._introspection(result, ctx, info)
return result, self._coercer(result, info)
except Exception as e: # pylint: disable=broad-except
return e, None
|
https://github.com/tartiflette/tartiflette/issues/133
|
Traceback (most recent call last):
File "/usr/local/lib/python3.7/runpy.py", line 193, in _run_module_as_main
"__main__", mod_spec)
File "/usr/local/lib/python3.7/runpy.py", line 85, in _run_code
exec(code, run_globals)
File "/usr/src/app/***/__main__.py", line 10, in <module>
sys.exit(run())
File "/usr/src/app/***/app.py", line 425, in run
"utils/sdl-generator/schema.sdl",
File "/usr/src/app/***/engines/tartiflette.py", line 70, in __init__
error_coercer=_error_coercer,
File "/usr/local/lib/python3.7/site-packages/tartiflette/engine.py", line 26, in __init__
schema_name, custom_default_resolver, exclude_builtins_scalars
File "/usr/local/lib/python3.7/site-packages/tartiflette/schema/bakery.py", line 41, in bake
schema = SchemaBakery._preheat(schema_name, exclude_builtins_scalars)
File "/usr/local/lib/python3.7/site-packages/tartiflette/schema/bakery.py", line 21, in _preheat
build_graphql_schema_from_sdl(sdl, schema=schema)
File "/usr/local/lib/python3.7/site-packages/tartiflette/sdl/builder.py", line 30, in build_graphql_schema_from_sdl
sdl, parse_graphql_sdl_to_ast(sdl), schema=schema
File "/usr/local/lib/python3.7/site-packages/tartiflette/sdl/builder.py", line 76, in transform_ast_to_schema
transformer.transform(raw_ast)
File "/usr/local/lib/python3.7/site-packages/lark/visitors.py", line 93, in transform
tree = t.transform(tree)
File "/usr/local/lib/python3.7/site-packages/lark/visitors.py", line 107, in transform
subtree.children = list(self._transform_children(subtree.children))
File "/usr/local/lib/python3.7/site-packages/lark/visitors.py", line 44, in _transform_children
yield self._transform_tree(c) if isinstance(c, Tree) else c
File "/usr/local/lib/python3.7/site-packages/lark/visitors.py", line 103, in _transform_tree
return self._call_userfunc(tree)
File "/usr/local/lib/python3.7/site-packages/lark/visitors.py", line 37, in _call_userfunc
return f(tree)
File "/usr/local/lib/python3.7/site-packages/lark/visitors.py", line 232, in f
return _f(self, *args, **kwargs)
File "/usr/local/lib/python3.7/site-packages/tartiflette/sdl/transformers/schema_transformer.py", line 389, in input_value_definition
child, child.__class__.__name__
tartiflette.types.exceptions.tartiflette.UnexpectedASTNode: Unexpected AST node `SchemaNode(type='directives', value={'maxLength': {'limit': 512}})`, type `SchemaNode`
|
tartiflette.types.exceptions.tartiflette.UnexpectedASTNode
|
def input_value_definition(self, tree: Tree) -> GraphQLArgument:
# TODO: Add directives
description = None
name = None
gql_type = None
default_value = None
directives = None
for child in tree.children:
if child.type == "description":
description = child.value
elif child.type == "IDENT":
name = child.value
elif child.type == "type":
gql_type = child.value
elif child.type == "value":
default_value = child.value
elif child.type == "discard":
pass
elif child.type == "directives":
directives = child.value
else:
raise UnexpectedASTNode(
"Unexpected AST node `{}`, type `{}`".format(
child, child.__class__.__name__
)
)
return GraphQLArgument(
name=name,
gql_type=gql_type,
default_value=default_value,
description=description,
directives=directives,
)
|
def input_value_definition(self, tree: Tree) -> GraphQLArgument:
# TODO: Add directives
description = None
name = None
gql_type = None
default_value = None
for child in tree.children:
if child.type == "description":
description = child.value
elif child.type == "IDENT":
name = child.value
elif child.type == "type":
gql_type = child.value
elif child.type == "value":
default_value = child.value
elif child.type == "discard":
pass
else:
raise UnexpectedASTNode(
"Unexpected AST node `{}`, type `{}`".format(
child, child.__class__.__name__
)
)
return GraphQLArgument(
name=name,
gql_type=gql_type,
default_value=default_value,
description=description,
)
|
https://github.com/tartiflette/tartiflette/issues/133
|
Traceback (most recent call last):
File "/usr/local/lib/python3.7/runpy.py", line 193, in _run_module_as_main
"__main__", mod_spec)
File "/usr/local/lib/python3.7/runpy.py", line 85, in _run_code
exec(code, run_globals)
File "/usr/src/app/***/__main__.py", line 10, in <module>
sys.exit(run())
File "/usr/src/app/***/app.py", line 425, in run
"utils/sdl-generator/schema.sdl",
File "/usr/src/app/***/engines/tartiflette.py", line 70, in __init__
error_coercer=_error_coercer,
File "/usr/local/lib/python3.7/site-packages/tartiflette/engine.py", line 26, in __init__
schema_name, custom_default_resolver, exclude_builtins_scalars
File "/usr/local/lib/python3.7/site-packages/tartiflette/schema/bakery.py", line 41, in bake
schema = SchemaBakery._preheat(schema_name, exclude_builtins_scalars)
File "/usr/local/lib/python3.7/site-packages/tartiflette/schema/bakery.py", line 21, in _preheat
build_graphql_schema_from_sdl(sdl, schema=schema)
File "/usr/local/lib/python3.7/site-packages/tartiflette/sdl/builder.py", line 30, in build_graphql_schema_from_sdl
sdl, parse_graphql_sdl_to_ast(sdl), schema=schema
File "/usr/local/lib/python3.7/site-packages/tartiflette/sdl/builder.py", line 76, in transform_ast_to_schema
transformer.transform(raw_ast)
File "/usr/local/lib/python3.7/site-packages/lark/visitors.py", line 93, in transform
tree = t.transform(tree)
File "/usr/local/lib/python3.7/site-packages/lark/visitors.py", line 107, in transform
subtree.children = list(self._transform_children(subtree.children))
File "/usr/local/lib/python3.7/site-packages/lark/visitors.py", line 44, in _transform_children
yield self._transform_tree(c) if isinstance(c, Tree) else c
File "/usr/local/lib/python3.7/site-packages/lark/visitors.py", line 103, in _transform_tree
return self._call_userfunc(tree)
File "/usr/local/lib/python3.7/site-packages/lark/visitors.py", line 37, in _call_userfunc
return f(tree)
File "/usr/local/lib/python3.7/site-packages/lark/visitors.py", line 232, in f
return _f(self, *args, **kwargs)
File "/usr/local/lib/python3.7/site-packages/tartiflette/sdl/transformers/schema_transformer.py", line 389, in input_value_definition
child, child.__class__.__name__
tartiflette.types.exceptions.tartiflette.UnexpectedASTNode: Unexpected AST node `SchemaNode(type='directives', value={'maxLength': {'limit': 512}})`, type `SchemaNode`
|
tartiflette.types.exceptions.tartiflette.UnexpectedASTNode
|
def __init__(
self,
name: str,
gql_type: Union[str, GraphQLType],
default_value: Optional[Any] = None,
description: Optional[str] = None,
directives: Optional[Dict[str, Optional[dict]]] = None,
schema=None,
) -> None:
# TODO: Narrow the default_value type ?
self.name = name
self.gql_type = gql_type
self.default_value = default_value
self.description = description
self._type = {}
self._schema = schema
self._directives = directives
# Introspection Attribute
self._directives_implementations = None
|
def __init__(
self,
name: str,
gql_type: Union[str, GraphQLType],
default_value: Optional[Any] = None,
description: Optional[str] = None,
schema=None,
) -> None:
# TODO: Narrow the default_value type ?
self.name = name
self.gql_type = gql_type
self.default_value = default_value
self.description = description
self._type = {}
self._schema = schema
|
https://github.com/tartiflette/tartiflette/issues/133
|
Traceback (most recent call last):
File "/usr/local/lib/python3.7/runpy.py", line 193, in _run_module_as_main
"__main__", mod_spec)
File "/usr/local/lib/python3.7/runpy.py", line 85, in _run_code
exec(code, run_globals)
File "/usr/src/app/***/__main__.py", line 10, in <module>
sys.exit(run())
File "/usr/src/app/***/app.py", line 425, in run
"utils/sdl-generator/schema.sdl",
File "/usr/src/app/***/engines/tartiflette.py", line 70, in __init__
error_coercer=_error_coercer,
File "/usr/local/lib/python3.7/site-packages/tartiflette/engine.py", line 26, in __init__
schema_name, custom_default_resolver, exclude_builtins_scalars
File "/usr/local/lib/python3.7/site-packages/tartiflette/schema/bakery.py", line 41, in bake
schema = SchemaBakery._preheat(schema_name, exclude_builtins_scalars)
File "/usr/local/lib/python3.7/site-packages/tartiflette/schema/bakery.py", line 21, in _preheat
build_graphql_schema_from_sdl(sdl, schema=schema)
File "/usr/local/lib/python3.7/site-packages/tartiflette/sdl/builder.py", line 30, in build_graphql_schema_from_sdl
sdl, parse_graphql_sdl_to_ast(sdl), schema=schema
File "/usr/local/lib/python3.7/site-packages/tartiflette/sdl/builder.py", line 76, in transform_ast_to_schema
transformer.transform(raw_ast)
File "/usr/local/lib/python3.7/site-packages/lark/visitors.py", line 93, in transform
tree = t.transform(tree)
File "/usr/local/lib/python3.7/site-packages/lark/visitors.py", line 107, in transform
subtree.children = list(self._transform_children(subtree.children))
File "/usr/local/lib/python3.7/site-packages/lark/visitors.py", line 44, in _transform_children
yield self._transform_tree(c) if isinstance(c, Tree) else c
File "/usr/local/lib/python3.7/site-packages/lark/visitors.py", line 103, in _transform_tree
return self._call_userfunc(tree)
File "/usr/local/lib/python3.7/site-packages/lark/visitors.py", line 37, in _call_userfunc
return f(tree)
File "/usr/local/lib/python3.7/site-packages/lark/visitors.py", line 232, in f
return _f(self, *args, **kwargs)
File "/usr/local/lib/python3.7/site-packages/tartiflette/sdl/transformers/schema_transformer.py", line 389, in input_value_definition
child, child.__class__.__name__
tartiflette.types.exceptions.tartiflette.UnexpectedASTNode: Unexpected AST node `SchemaNode(type='directives', value={'maxLength': {'limit': 512}})`, type `SchemaNode`
|
tartiflette.types.exceptions.tartiflette.UnexpectedASTNode
|
def __repr__(self) -> str:
return (
"{}(name={!r}, gql_type={!r}, "
"default_value={!r}, description={!r}, directives={!r})".format(
self.__class__.__name__,
self.name,
self.gql_type,
self.default_value,
self.description,
self.directives,
)
)
|
def __repr__(self) -> str:
return "{}(name={!r}, gql_type={!r}, default_value={!r}, description={!r})".format(
self.__class__.__name__,
self.name,
self.gql_type,
self.default_value,
self.description,
)
|
https://github.com/tartiflette/tartiflette/issues/133
|
Traceback (most recent call last):
File "/usr/local/lib/python3.7/runpy.py", line 193, in _run_module_as_main
"__main__", mod_spec)
File "/usr/local/lib/python3.7/runpy.py", line 85, in _run_code
exec(code, run_globals)
File "/usr/src/app/***/__main__.py", line 10, in <module>
sys.exit(run())
File "/usr/src/app/***/app.py", line 425, in run
"utils/sdl-generator/schema.sdl",
File "/usr/src/app/***/engines/tartiflette.py", line 70, in __init__
error_coercer=_error_coercer,
File "/usr/local/lib/python3.7/site-packages/tartiflette/engine.py", line 26, in __init__
schema_name, custom_default_resolver, exclude_builtins_scalars
File "/usr/local/lib/python3.7/site-packages/tartiflette/schema/bakery.py", line 41, in bake
schema = SchemaBakery._preheat(schema_name, exclude_builtins_scalars)
File "/usr/local/lib/python3.7/site-packages/tartiflette/schema/bakery.py", line 21, in _preheat
build_graphql_schema_from_sdl(sdl, schema=schema)
File "/usr/local/lib/python3.7/site-packages/tartiflette/sdl/builder.py", line 30, in build_graphql_schema_from_sdl
sdl, parse_graphql_sdl_to_ast(sdl), schema=schema
File "/usr/local/lib/python3.7/site-packages/tartiflette/sdl/builder.py", line 76, in transform_ast_to_schema
transformer.transform(raw_ast)
File "/usr/local/lib/python3.7/site-packages/lark/visitors.py", line 93, in transform
tree = t.transform(tree)
File "/usr/local/lib/python3.7/site-packages/lark/visitors.py", line 107, in transform
subtree.children = list(self._transform_children(subtree.children))
File "/usr/local/lib/python3.7/site-packages/lark/visitors.py", line 44, in _transform_children
yield self._transform_tree(c) if isinstance(c, Tree) else c
File "/usr/local/lib/python3.7/site-packages/lark/visitors.py", line 103, in _transform_tree
return self._call_userfunc(tree)
File "/usr/local/lib/python3.7/site-packages/lark/visitors.py", line 37, in _call_userfunc
return f(tree)
File "/usr/local/lib/python3.7/site-packages/lark/visitors.py", line 232, in f
return _f(self, *args, **kwargs)
File "/usr/local/lib/python3.7/site-packages/tartiflette/sdl/transformers/schema_transformer.py", line 389, in input_value_definition
child, child.__class__.__name__
tartiflette.types.exceptions.tartiflette.UnexpectedASTNode: Unexpected AST node `SchemaNode(type='directives', value={'maxLength': {'limit': 512}})`, type `SchemaNode`
|
tartiflette.types.exceptions.tartiflette.UnexpectedASTNode
|
def __eq__(self, other: Any) -> bool:
return self is other or (
type(self) is type(other)
and self.name == other.name
and self.gql_type == other.gql_type
and self.default_value == other.default_value
and self.directives == other.directives
)
|
def __eq__(self, other: Any) -> bool:
return self is other or (
type(self) is type(other)
and self.name == other.name
and self.gql_type == other.gql_type
and self.default_value == other.default_value
)
|
https://github.com/tartiflette/tartiflette/issues/133
|
Traceback (most recent call last):
File "/usr/local/lib/python3.7/runpy.py", line 193, in _run_module_as_main
"__main__", mod_spec)
File "/usr/local/lib/python3.7/runpy.py", line 85, in _run_code
exec(code, run_globals)
File "/usr/src/app/***/__main__.py", line 10, in <module>
sys.exit(run())
File "/usr/src/app/***/app.py", line 425, in run
"utils/sdl-generator/schema.sdl",
File "/usr/src/app/***/engines/tartiflette.py", line 70, in __init__
error_coercer=_error_coercer,
File "/usr/local/lib/python3.7/site-packages/tartiflette/engine.py", line 26, in __init__
schema_name, custom_default_resolver, exclude_builtins_scalars
File "/usr/local/lib/python3.7/site-packages/tartiflette/schema/bakery.py", line 41, in bake
schema = SchemaBakery._preheat(schema_name, exclude_builtins_scalars)
File "/usr/local/lib/python3.7/site-packages/tartiflette/schema/bakery.py", line 21, in _preheat
build_graphql_schema_from_sdl(sdl, schema=schema)
File "/usr/local/lib/python3.7/site-packages/tartiflette/sdl/builder.py", line 30, in build_graphql_schema_from_sdl
sdl, parse_graphql_sdl_to_ast(sdl), schema=schema
File "/usr/local/lib/python3.7/site-packages/tartiflette/sdl/builder.py", line 76, in transform_ast_to_schema
transformer.transform(raw_ast)
File "/usr/local/lib/python3.7/site-packages/lark/visitors.py", line 93, in transform
tree = t.transform(tree)
File "/usr/local/lib/python3.7/site-packages/lark/visitors.py", line 107, in transform
subtree.children = list(self._transform_children(subtree.children))
File "/usr/local/lib/python3.7/site-packages/lark/visitors.py", line 44, in _transform_children
yield self._transform_tree(c) if isinstance(c, Tree) else c
File "/usr/local/lib/python3.7/site-packages/lark/visitors.py", line 103, in _transform_tree
return self._call_userfunc(tree)
File "/usr/local/lib/python3.7/site-packages/lark/visitors.py", line 37, in _call_userfunc
return f(tree)
File "/usr/local/lib/python3.7/site-packages/lark/visitors.py", line 232, in f
return _f(self, *args, **kwargs)
File "/usr/local/lib/python3.7/site-packages/tartiflette/sdl/transformers/schema_transformer.py", line 389, in input_value_definition
child, child.__class__.__name__
tartiflette.types.exceptions.tartiflette.UnexpectedASTNode: Unexpected AST node `SchemaNode(type='directives', value={'maxLength': {'limit': 512}})`, type `SchemaNode`
|
tartiflette.types.exceptions.tartiflette.UnexpectedASTNode
|
def bake(self, schema: "GraphQLSchema") -> None:
self._schema = schema
self._directives_implementations = get_directive_implem_list(
self._directives, self._schema
)
if isinstance(self.gql_type, GraphQLType):
self._type = self.gql_type
else:
self._type["name"] = self.gql_type
self._type["kind"] = self._schema.find_type(self.gql_type).kind
|
def bake(self, schema: "GraphQLSchema") -> None:
self._schema = schema
if isinstance(self.gql_type, GraphQLType):
self._type = self.gql_type
else:
self._type["name"] = self.gql_type
self._type["kind"] = self._schema.find_type(self.gql_type).kind
|
https://github.com/tartiflette/tartiflette/issues/133
|
Traceback (most recent call last):
File "/usr/local/lib/python3.7/runpy.py", line 193, in _run_module_as_main
"__main__", mod_spec)
File "/usr/local/lib/python3.7/runpy.py", line 85, in _run_code
exec(code, run_globals)
File "/usr/src/app/***/__main__.py", line 10, in <module>
sys.exit(run())
File "/usr/src/app/***/app.py", line 425, in run
"utils/sdl-generator/schema.sdl",
File "/usr/src/app/***/engines/tartiflette.py", line 70, in __init__
error_coercer=_error_coercer,
File "/usr/local/lib/python3.7/site-packages/tartiflette/engine.py", line 26, in __init__
schema_name, custom_default_resolver, exclude_builtins_scalars
File "/usr/local/lib/python3.7/site-packages/tartiflette/schema/bakery.py", line 41, in bake
schema = SchemaBakery._preheat(schema_name, exclude_builtins_scalars)
File "/usr/local/lib/python3.7/site-packages/tartiflette/schema/bakery.py", line 21, in _preheat
build_graphql_schema_from_sdl(sdl, schema=schema)
File "/usr/local/lib/python3.7/site-packages/tartiflette/sdl/builder.py", line 30, in build_graphql_schema_from_sdl
sdl, parse_graphql_sdl_to_ast(sdl), schema=schema
File "/usr/local/lib/python3.7/site-packages/tartiflette/sdl/builder.py", line 76, in transform_ast_to_schema
transformer.transform(raw_ast)
File "/usr/local/lib/python3.7/site-packages/lark/visitors.py", line 93, in transform
tree = t.transform(tree)
File "/usr/local/lib/python3.7/site-packages/lark/visitors.py", line 107, in transform
subtree.children = list(self._transform_children(subtree.children))
File "/usr/local/lib/python3.7/site-packages/lark/visitors.py", line 44, in _transform_children
yield self._transform_tree(c) if isinstance(c, Tree) else c
File "/usr/local/lib/python3.7/site-packages/lark/visitors.py", line 103, in _transform_tree
return self._call_userfunc(tree)
File "/usr/local/lib/python3.7/site-packages/lark/visitors.py", line 37, in _call_userfunc
return f(tree)
File "/usr/local/lib/python3.7/site-packages/lark/visitors.py", line 232, in f
return _f(self, *args, **kwargs)
File "/usr/local/lib/python3.7/site-packages/tartiflette/sdl/transformers/schema_transformer.py", line 389, in input_value_definition
child, child.__class__.__name__
tartiflette.types.exceptions.tartiflette.UnexpectedASTNode: Unexpected AST node `SchemaNode(type='directives', value={'maxLength': {'limit': 512}})`, type `SchemaNode`
|
tartiflette.types.exceptions.tartiflette.UnexpectedASTNode
|
def is_forked(self, owner, repo): # /repos/:owner/:repo parent
logging.info("Querying parent info to verify if the repo is forked\n")
url = f"https://api.github.com/repos/{owner}/{repo}"
r = requests.get(url, headers=self.headers)
self.update_gh_rate_limit(r)
data = self.get_repo_data(url, r)
if "fork" in data:
if "parent" in data:
return data["parent"]["full_name"]
return "Parent not available"
return False
|
def is_forked(self, owner, repo): # /repos/:owner/:repo parent
logging.info("Querying parent info to verify if the repo is forked\n")
url = f"https://api.github.com/repos/{owner}/{repo}"
r = requests.get(url, headers=self.headers)
self.update_gh_rate_limit(r)
data = self.get_repo_data(self, url, r)
if "fork" in data:
if "parent" in data:
return data["parent"]["full_name"]
return "Parent not available"
return False
|
https://github.com/chaoss/augur/issues/737
|
INFO:root:Worker ran into an error for task: {'job_type': 'MAINTAIN', 'models': ['repo_info'], 'display_name': 'repo_info model for url: https://github.com/davepacheco/node-verror.git', 'given': {'github_url': 'https://github.com/davepacheco/node-verror.git'}}
INFO:root:Printing traceback...
INFO:root:Traceback (most recent call last):
File "/mnt/md0/github/augur-census/workers/worker_base.py", line 189, in collect
model_method(message, repo_id)
File "/mnt/md0/github/augur-census/workers/repo_info_worker/repo_info_worker/worker.py", line 149, in repo_info_model
forked = self.is_forked(owner, repo)
File "/mnt/md0/github/augur-census/workers/repo_info_worker/repo_info_worker/worker.py", line 232, in is_forked
data = self.get_repo_data(self, url, r)
TypeError: get_repo_data() takes 3 positional arguments but 4 were given
INFO:root:This task inserted 0 tuples before failure.
INFO:root:Notifying broker and logging task failure in database...
INFO:werkzeug:127.0.0.1 - - [31/May/2020 18:48:58] "GET /AUGWOP/heartbeat HTTP/1.1" 200 -
INFO:root:Recorded job error in the history table for: {'job_type': 'MAINTAIN', 'models': ['repo_info'], 'display_name': 'repo_info model for url: https://github.com/davepacheco/node-verror.git', 'given': {'github_url': 'https://github.com/davepacheco/node-verror.git'}, 'worker_id': 'com.augurlabs.core.repo_info_worker.50700'}
INFO:root:Updated job process for model: repo_info
|
TypeError
|
def is_archived(self, owner, repo):
logging.info("Querying committers count\n")
url = f"https://api.github.com/repos/{owner}/{repo}"
r = requests.get(url, headers=self.headers)
self.update_gh_rate_limit(r)
data = self.get_repo_data(url, r)
if "archived" in data:
if data["archived"]:
if "updated_at" in data:
return data["updated_at"]
return "Date not available"
return False
return False
|
def is_archived(self, owner, repo):
logging.info("Querying committers count\n")
url = f"https://api.github.com/repos/{owner}/{repo}"
r = requests.get(url, headers=self.headers)
self.update_gh_rate_limit(r)
data = self.get_repo_data(self, url, r)
if "archived" in data:
if data["archived"]:
if "updated_at" in data:
return data["updated_at"]
return "Date not available"
return False
return False
|
https://github.com/chaoss/augur/issues/737
|
INFO:root:Worker ran into an error for task: {'job_type': 'MAINTAIN', 'models': ['repo_info'], 'display_name': 'repo_info model for url: https://github.com/davepacheco/node-verror.git', 'given': {'github_url': 'https://github.com/davepacheco/node-verror.git'}}
INFO:root:Printing traceback...
INFO:root:Traceback (most recent call last):
File "/mnt/md0/github/augur-census/workers/worker_base.py", line 189, in collect
model_method(message, repo_id)
File "/mnt/md0/github/augur-census/workers/repo_info_worker/repo_info_worker/worker.py", line 149, in repo_info_model
forked = self.is_forked(owner, repo)
File "/mnt/md0/github/augur-census/workers/repo_info_worker/repo_info_worker/worker.py", line 232, in is_forked
data = self.get_repo_data(self, url, r)
TypeError: get_repo_data() takes 3 positional arguments but 4 were given
INFO:root:This task inserted 0 tuples before failure.
INFO:root:Notifying broker and logging task failure in database...
INFO:werkzeug:127.0.0.1 - - [31/May/2020 18:48:58] "GET /AUGWOP/heartbeat HTTP/1.1" 200 -
INFO:root:Recorded job error in the history table for: {'job_type': 'MAINTAIN', 'models': ['repo_info'], 'display_name': 'repo_info model for url: https://github.com/davepacheco/node-verror.git', 'given': {'github_url': 'https://github.com/davepacheco/node-verror.git'}, 'worker_id': 'com.augurlabs.core.repo_info_worker.50700'}
INFO:root:Updated job process for model: repo_info
|
TypeError
|
def __init__(self, config={}):
worker_type = "repo_info_worker"
# Define what this worker can be given and know how to interpret
given = [["github_url"]]
models = ["repo_info"]
# Define the tables needed to insert, update, or delete on
data_tables = ["repo_info", "repo"]
operations_tables = ["worker_history", "worker_job"]
# Run the general worker initialization
super().__init__(worker_type, config, given, models, data_tables, operations_tables)
# Define data collection info
self.tool_source = "Repo Info Worker"
self.tool_version = "0.0.1"
self.data_source = "GitHub API"
|
def __init__(self, config={}):
worker_type = "repo_info_worker"
# Define what this worker can be given and know how to interpret
given = [["github_url"]]
models = ["repo_info"]
# Define the tables needed to insert, update, or delete on
data_tables = ["repo_info"]
operations_tables = ["worker_history", "worker_job"]
# Run the general worker initialization
super().__init__(worker_type, config, given, models, data_tables, operations_tables)
# Define data collection info
self.tool_source = "Repo Info Worker"
self.tool_version = "0.0.1"
self.data_source = "GitHub API"
|
https://github.com/chaoss/augur/issues/737
|
INFO:root:Worker ran into an error for task: {'job_type': 'MAINTAIN', 'models': ['repo_info'], 'display_name': 'repo_info model for url: https://github.com/davepacheco/node-verror.git', 'given': {'github_url': 'https://github.com/davepacheco/node-verror.git'}}
INFO:root:Printing traceback...
INFO:root:Traceback (most recent call last):
File "/mnt/md0/github/augur-census/workers/worker_base.py", line 189, in collect
model_method(message, repo_id)
File "/mnt/md0/github/augur-census/workers/repo_info_worker/repo_info_worker/worker.py", line 149, in repo_info_model
forked = self.is_forked(owner, repo)
File "/mnt/md0/github/augur-census/workers/repo_info_worker/repo_info_worker/worker.py", line 232, in is_forked
data = self.get_repo_data(self, url, r)
TypeError: get_repo_data() takes 3 positional arguments but 4 were given
INFO:root:This task inserted 0 tuples before failure.
INFO:root:Notifying broker and logging task failure in database...
INFO:werkzeug:127.0.0.1 - - [31/May/2020 18:48:58] "GET /AUGWOP/heartbeat HTTP/1.1" 200 -
INFO:root:Recorded job error in the history table for: {'job_type': 'MAINTAIN', 'models': ['repo_info'], 'display_name': 'repo_info model for url: https://github.com/davepacheco/node-verror.git', 'given': {'github_url': 'https://github.com/davepacheco/node-verror.git'}, 'worker_id': 'com.augurlabs.core.repo_info_worker.50700'}
INFO:root:Updated job process for model: repo_info
|
TypeError
|
def repo_info_model(self, task, repo_id):
github_url = task["given"]["github_url"]
self.logger.info(
"Beginning filling the repo_info model for repo: " + github_url + "\n"
)
owner, repo = self.get_owner_repo(github_url)
url = "https://api.github.com/graphql"
query = """
{
repository(owner:"%s", name:"%s"){
updatedAt
hasIssuesEnabled
issues(states:OPEN) {
totalCount
}
hasWikiEnabled
forkCount
defaultBranchRef {
name
}
watchers {
totalCount
}
id
licenseInfo {
name
url
}
stargazers {
totalCount
}
codeOfConduct {
name
url
}
issue_count: issues {
totalCount
}
issues_closed: issues(states:CLOSED) {
totalCount
}
pr_count: pullRequests {
totalCount
}
pr_open: pullRequests(states: OPEN) {
totalCount
}
pr_closed: pullRequests(states: CLOSED) {
totalCount
}
pr_merged: pullRequests(states: MERGED) {
totalCount
}
ref(qualifiedName: "master") {
target {
... on Commit {
history(first: 0){
totalCount
}
}
}
}
}
}
""" % (owner, repo)
# Hit the graphql endpoint and retry 3 times in case of failure
num_attempts = 0
success = False
while num_attempts < 3:
self.logger.info("Hitting endpoint: {} ...\n".format(url))
r = requests.post(url, json={"query": query}, headers=self.headers)
self.update_gh_rate_limit(r)
try:
data = r.json()
except:
data = json.loads(json.dumps(r.text))
if "errors" in data:
self.logger.info("Error!: {}".format(data["errors"]))
if data["errors"][0]["message"] == "API rate limit exceeded":
self.update_gh_rate_limit(r)
continue
if "data" in data:
success = True
data = data["data"]["repository"]
break
else:
self.logger.info("Request returned a non-data dict: {}\n".format(data))
if data["message"] == "Not Found":
self.logger.info(
"Github repo was not found or does not exist for endpoint: {}\n".format(
url
)
)
break
if (
data["message"]
== "You have triggered an abuse detection mechanism. Please wait a few minutes before you try again."
):
self.update_gh_rate_limit(r, temporarily_disable=True)
continue
if data["message"] == "Bad credentials":
self.update_gh_rate_limit(r, bad_credentials=True)
continue
num_attempts += 1
if not success:
self.register_task_failure(
self.task, repo_id, "Failed to hit endpoint: {}".format(url)
)
return
# Get committers count info that requires seperate endpoint
committers_count = self.query_committers_count(owner, repo)
# Put all data together in format of the table
self.logger.info(
f"Inserting repo info for repo with id:{repo_id}, owner:{owner}, name:{repo}\n"
)
rep_inf = {
"repo_id": repo_id,
"last_updated": data["updatedAt"] if "updatedAt" in data else None,
"issues_enabled": data["hasIssuesEnabled"]
if "hasIssuesEnabled" in data
else None,
"open_issues": data["issues"]["totalCount"] if data["issues"] else None,
"pull_requests_enabled": None,
"wiki_enabled": data["hasWikiEnabled"] if "hasWikiEnabled" in data else None,
"pages_enabled": None,
"fork_count": data["forkCount"] if "forkCount" in data else None,
"default_branch": data["defaultBranchRef"]["name"]
if data["defaultBranchRef"]
else None,
"watchers_count": data["watchers"]["totalCount"] if data["watchers"] else None,
"UUID": None,
"license": data["licenseInfo"]["name"] if data["licenseInfo"] else None,
"stars_count": data["stargazers"]["totalCount"] if data["stargazers"] else None,
"committers_count": committers_count,
"issue_contributors_count": None,
"changelog_file": None,
"contributing_file": None,
"license_file": data["licenseInfo"]["url"] if data["licenseInfo"] else None,
"code_of_conduct_file": data["codeOfConduct"]["url"]
if data["codeOfConduct"]
else None,
"security_issue_file": None,
"security_audit_file": None,
"status": None,
"keywords": None,
"commit_count": data["ref"]["target"]["history"]["totalCount"]
if data["ref"]
else None,
"issues_count": data["issue_count"]["totalCount"]
if data["issue_count"]
else None,
"issues_closed": data["issues_closed"]["totalCount"]
if data["issues_closed"]
else None,
"pull_request_count": data["pr_count"]["totalCount"]
if data["pr_count"]
else None,
"pull_requests_open": data["pr_open"]["totalCount"]
if data["pr_open"]
else None,
"pull_requests_closed": data["pr_closed"]["totalCount"]
if data["pr_closed"]
else None,
"pull_requests_merged": data["pr_merged"]["totalCount"]
if data["pr_merged"]
else None,
"tool_source": self.tool_source,
"tool_version": self.tool_version,
"data_source": self.data_source,
}
result = self.db.execute(self.repo_info_table.insert().values(rep_inf))
self.logger.info(
f"Primary Key inserted into repo_info table: {result.inserted_primary_key}\n"
)
self.results_counter += 1
# Note that the addition of information about where a repository may be forked from, and whether a repository is archived, updates the `repo` table, not the `repo_info` table.
forked = self.is_forked(owner, repo)
archived = self.is_archived(owner, repo)
archived_date_collected = None
if archived is not False:
archived_date_collected = archived
archived = 1
else:
archived = 0
rep_additional_data = {
"forked_from": forked,
"repo_archived": archived,
"repo_archived_date_collected": archived_date_collected,
}
result = self.db.execute(
self.repo_table.update()
.where(repo_table.c.repo_id == repo_id)
.values(rep_additional_data)
)
self.logger.info(
f"Primary Key inserted into repo table: {result.inserted_primary_key}\n"
)
self.logger.info(f"Inserted info for {owner}/{repo}\n")
# Register this task as completed
self.register_task_completion(self.task, repo_id, "repo_info")
|
def repo_info_model(self, task, repo_id):
github_url = task["given"]["github_url"]
self.logger.info(
"Beginning filling the repo_info model for repo: " + github_url + "\n"
)
owner, repo = self.get_owner_repo(github_url)
url = "https://api.github.com/graphql"
query = """
{
repository(owner:"%s", name:"%s"){
updatedAt
hasIssuesEnabled
issues(states:OPEN) {
totalCount
}
hasWikiEnabled
forkCount
defaultBranchRef {
name
}
watchers {
totalCount
}
id
licenseInfo {
name
url
}
stargazers {
totalCount
}
codeOfConduct {
name
url
}
issue_count: issues {
totalCount
}
issues_closed: issues(states:CLOSED) {
totalCount
}
pr_count: pullRequests {
totalCount
}
pr_open: pullRequests(states: OPEN) {
totalCount
}
pr_closed: pullRequests(states: CLOSED) {
totalCount
}
pr_merged: pullRequests(states: MERGED) {
totalCount
}
ref(qualifiedName: "master") {
target {
... on Commit {
history(first: 0){
totalCount
}
}
}
}
}
}
""" % (owner, repo)
# Hit the graphql endpoint and retry 3 times in case of failure
num_attempts = 0
success = False
while num_attempts < 3:
self.logger.info("Hitting endpoint: {} ...\n".format(url))
r = requests.post(url, json={"query": query}, headers=self.headers)
self.update_gh_rate_limit(r)
try:
data = r.json()
except:
data = json.loads(json.dumps(r.text))
if "errors" in data:
self.logger.info("Error!: {}".format(data["errors"]))
if data["errors"][0]["message"] == "API rate limit exceeded":
self.update_gh_rate_limit(r)
continue
if "data" in data:
success = True
data = data["data"]["repository"]
break
else:
self.logger.info("Request returned a non-data dict: {}\n".format(data))
if data["message"] == "Not Found":
self.logger.info(
"Github repo was not found or does not exist for endpoint: {}\n".format(
url
)
)
break
if (
data["message"]
== "You have triggered an abuse detection mechanism. Please wait a few minutes before you try again."
):
self.update_gh_rate_limit(r, temporarily_disable=True)
continue
if data["message"] == "Bad credentials":
self.update_gh_rate_limit(r, bad_credentials=True)
continue
num_attempts += 1
if not success:
self.register_task_failure(
self.task, repo_id, "Failed to hit endpoint: {}".format(url)
)
return
# Get committers count info that requires seperate endpoint
committers_count = self.query_committers_count(owner, repo)
# Note that the addition of information about where a repository may be forked from, and whether a repository is archived, updates the `repo` table, not the `repo_info` table.
forked = self.is_forked(owner, repo)
archived = self.is_archived(owner, repo)
if archived is not False:
archived_date_collected = archived
archived = True
else:
archived_date_collected = None
# Put all data together in format of the table
self.logger.info(
f"Inserting repo info for repo with id:{repo_id}, owner:{owner}, name:{repo}\n"
)
rep_inf = {
"repo_id": repo_id,
"last_updated": data["updatedAt"] if "updatedAt" in data else None,
"issues_enabled": data["hasIssuesEnabled"]
if "hasIssuesEnabled" in data
else None,
"open_issues": data["issues"]["totalCount"] if data["issues"] else None,
"pull_requests_enabled": None,
"wiki_enabled": data["hasWikiEnabled"] if "hasWikiEnabled" in data else None,
"pages_enabled": None,
"fork_count": data["forkCount"] if "forkCount" in data else None,
"default_branch": data["defaultBranchRef"]["name"]
if data["defaultBranchRef"]
else None,
"watchers_count": data["watchers"]["totalCount"] if data["watchers"] else None,
"UUID": None,
"license": data["licenseInfo"]["name"] if data["licenseInfo"] else None,
"stars_count": data["stargazers"]["totalCount"] if data["stargazers"] else None,
"committers_count": committers_count,
"issue_contributors_count": None,
"changelog_file": None,
"contributing_file": None,
"license_file": data["licenseInfo"]["url"] if data["licenseInfo"] else None,
"code_of_conduct_file": data["codeOfConduct"]["url"]
if data["codeOfConduct"]
else None,
"security_issue_file": None,
"security_audit_file": None,
"status": None,
"keywords": None,
"commit_count": data["ref"]["target"]["history"]["totalCount"]
if data["ref"]
else None,
"issues_count": data["issue_count"]["totalCount"]
if data["issue_count"]
else None,
"issues_closed": data["issues_closed"]["totalCount"]
if data["issues_closed"]
else None,
"pull_request_count": data["pr_count"]["totalCount"]
if data["pr_count"]
else None,
"pull_requests_open": data["pr_open"]["totalCount"]
if data["pr_open"]
else None,
"pull_requests_closed": data["pr_closed"]["totalCount"]
if data["pr_closed"]
else None,
"pull_requests_merged": data["pr_merged"]["totalCount"]
if data["pr_merged"]
else None,
"tool_source": self.tool_source,
"tool_version": self.tool_version,
"data_source": self.data_source,
# 'forked_from': forked,
# 'repo_archived': archived,
# 'repo_archived_date_collected': archived_date_collected
}
result = self.db.execute(self.repo_info_table.insert().values(rep_inf))
self.logger.info(
f"Primary Key inserted into repo_info table: {result.inserted_primary_key}\n"
)
self.results_counter += 1
self.logger.info(f"Inserted info for {owner}/{repo}\n")
# Register this task as completed
self.register_task_completion(self.task, repo_id, "repo_info")
|
https://github.com/chaoss/augur/issues/737
|
INFO:root:Worker ran into an error for task: {'job_type': 'MAINTAIN', 'models': ['repo_info'], 'display_name': 'repo_info model for url: https://github.com/davepacheco/node-verror.git', 'given': {'github_url': 'https://github.com/davepacheco/node-verror.git'}}
INFO:root:Printing traceback...
INFO:root:Traceback (most recent call last):
File "/mnt/md0/github/augur-census/workers/worker_base.py", line 189, in collect
model_method(message, repo_id)
File "/mnt/md0/github/augur-census/workers/repo_info_worker/repo_info_worker/worker.py", line 149, in repo_info_model
forked = self.is_forked(owner, repo)
File "/mnt/md0/github/augur-census/workers/repo_info_worker/repo_info_worker/worker.py", line 232, in is_forked
data = self.get_repo_data(self, url, r)
TypeError: get_repo_data() takes 3 positional arguments but 4 were given
INFO:root:This task inserted 0 tuples before failure.
INFO:root:Notifying broker and logging task failure in database...
INFO:werkzeug:127.0.0.1 - - [31/May/2020 18:48:58] "GET /AUGWOP/heartbeat HTTP/1.1" 200 -
INFO:root:Recorded job error in the history table for: {'job_type': 'MAINTAIN', 'models': ['repo_info'], 'display_name': 'repo_info model for url: https://github.com/davepacheco/node-verror.git', 'given': {'github_url': 'https://github.com/davepacheco/node-verror.git'}, 'worker_id': 'com.augurlabs.core.repo_info_worker.50700'}
INFO:root:Updated job process for model: repo_info
|
TypeError
|
def query_committers_count(self, owner, repo):
self.logger.info("Querying committers count\n")
url = f"https://api.github.com/repos/{owner}/{repo}/contributors?per_page=100"
committers = 0
try:
while True:
r = requests.get(url, headers=self.headers)
self.update_gh_rate_limit(r)
committers += len(r.json())
if "next" not in r.links:
break
else:
url = r.links["next"]["url"]
except Exception:
self.logger.exception("An error occured while querying contributor count\n")
return committers
|
def query_committers_count(self, owner, repo):
self.logger.info("Querying committers count\n")
url = f"https://api.github.com/repos/{owner}/{repo}/contributors?per_page=100"
committers = 0
try:
while True:
r = requests.get(url, headers=self.headers)
self.update_gh_rate_limit(r)
committers += len(r.json())
if "next" not in r.links:
break
else:
url = r.links["next"]["url"]
except Exception:
logging.exception("An error occured while querying contributor count\n")
return committers
|
https://github.com/chaoss/augur/issues/737
|
INFO:root:Worker ran into an error for task: {'job_type': 'MAINTAIN', 'models': ['repo_info'], 'display_name': 'repo_info model for url: https://github.com/davepacheco/node-verror.git', 'given': {'github_url': 'https://github.com/davepacheco/node-verror.git'}}
INFO:root:Printing traceback...
INFO:root:Traceback (most recent call last):
File "/mnt/md0/github/augur-census/workers/worker_base.py", line 189, in collect
model_method(message, repo_id)
File "/mnt/md0/github/augur-census/workers/repo_info_worker/repo_info_worker/worker.py", line 149, in repo_info_model
forked = self.is_forked(owner, repo)
File "/mnt/md0/github/augur-census/workers/repo_info_worker/repo_info_worker/worker.py", line 232, in is_forked
data = self.get_repo_data(self, url, r)
TypeError: get_repo_data() takes 3 positional arguments but 4 were given
INFO:root:This task inserted 0 tuples before failure.
INFO:root:Notifying broker and logging task failure in database...
INFO:werkzeug:127.0.0.1 - - [31/May/2020 18:48:58] "GET /AUGWOP/heartbeat HTTP/1.1" 200 -
INFO:root:Recorded job error in the history table for: {'job_type': 'MAINTAIN', 'models': ['repo_info'], 'display_name': 'repo_info model for url: https://github.com/davepacheco/node-verror.git', 'given': {'github_url': 'https://github.com/davepacheco/node-verror.git'}, 'worker_id': 'com.augurlabs.core.repo_info_worker.50700'}
INFO:root:Updated job process for model: repo_info
|
TypeError
|
def is_forked(self, owner, repo): # /repos/:owner/:repo parent
self.logger.info("Querying parent info to verify if the repo is forked\n")
url = f"https://api.github.com/repos/{owner}/{repo}"
r = requests.get(url, headers=self.headers)
self.update_gh_rate_limit(r)
data = self.get_repo_data(url, r)
if "fork" in data:
if "parent" in data:
return data["parent"]["full_name"]
return "Parent not available"
return False
|
def is_forked(self, owner, repo): # /repos/:owner/:repo parent
logging.info("Querying parent info to verify if the repo is forked\n")
url = f"https://api.github.com/repos/{owner}/{repo}"
r = requests.get(url, headers=self.headers)
self.update_gh_rate_limit(r)
data = self.get_repo_data(url, r)
if "fork" in data:
if "parent" in data:
return data["parent"]["full_name"]
return "Parent not available"
return False
|
https://github.com/chaoss/augur/issues/737
|
INFO:root:Worker ran into an error for task: {'job_type': 'MAINTAIN', 'models': ['repo_info'], 'display_name': 'repo_info model for url: https://github.com/davepacheco/node-verror.git', 'given': {'github_url': 'https://github.com/davepacheco/node-verror.git'}}
INFO:root:Printing traceback...
INFO:root:Traceback (most recent call last):
File "/mnt/md0/github/augur-census/workers/worker_base.py", line 189, in collect
model_method(message, repo_id)
File "/mnt/md0/github/augur-census/workers/repo_info_worker/repo_info_worker/worker.py", line 149, in repo_info_model
forked = self.is_forked(owner, repo)
File "/mnt/md0/github/augur-census/workers/repo_info_worker/repo_info_worker/worker.py", line 232, in is_forked
data = self.get_repo_data(self, url, r)
TypeError: get_repo_data() takes 3 positional arguments but 4 were given
INFO:root:This task inserted 0 tuples before failure.
INFO:root:Notifying broker and logging task failure in database...
INFO:werkzeug:127.0.0.1 - - [31/May/2020 18:48:58] "GET /AUGWOP/heartbeat HTTP/1.1" 200 -
INFO:root:Recorded job error in the history table for: {'job_type': 'MAINTAIN', 'models': ['repo_info'], 'display_name': 'repo_info model for url: https://github.com/davepacheco/node-verror.git', 'given': {'github_url': 'https://github.com/davepacheco/node-verror.git'}, 'worker_id': 'com.augurlabs.core.repo_info_worker.50700'}
INFO:root:Updated job process for model: repo_info
|
TypeError
|
def is_archived(self, owner, repo):
self.logger.info("Querying committers count\n")
url = f"https://api.github.com/repos/{owner}/{repo}"
r = requests.get(url, headers=self.headers)
self.update_gh_rate_limit(r)
data = self.get_repo_data(url, r)
if "archived" in data:
if data["archived"]:
if "updated_at" in data:
return data["updated_at"]
return "Date not available"
return False
return False
|
def is_archived(self, owner, repo):
logging.info("Querying committers count\n")
url = f"https://api.github.com/repos/{owner}/{repo}"
r = requests.get(url, headers=self.headers)
self.update_gh_rate_limit(r)
data = self.get_repo_data(url, r)
if "archived" in data:
if data["archived"]:
if "updated_at" in data:
return data["updated_at"]
return "Date not available"
return False
return False
|
https://github.com/chaoss/augur/issues/737
|
INFO:root:Worker ran into an error for task: {'job_type': 'MAINTAIN', 'models': ['repo_info'], 'display_name': 'repo_info model for url: https://github.com/davepacheco/node-verror.git', 'given': {'github_url': 'https://github.com/davepacheco/node-verror.git'}}
INFO:root:Printing traceback...
INFO:root:Traceback (most recent call last):
File "/mnt/md0/github/augur-census/workers/worker_base.py", line 189, in collect
model_method(message, repo_id)
File "/mnt/md0/github/augur-census/workers/repo_info_worker/repo_info_worker/worker.py", line 149, in repo_info_model
forked = self.is_forked(owner, repo)
File "/mnt/md0/github/augur-census/workers/repo_info_worker/repo_info_worker/worker.py", line 232, in is_forked
data = self.get_repo_data(self, url, r)
TypeError: get_repo_data() takes 3 positional arguments but 4 were given
INFO:root:This task inserted 0 tuples before failure.
INFO:root:Notifying broker and logging task failure in database...
INFO:werkzeug:127.0.0.1 - - [31/May/2020 18:48:58] "GET /AUGWOP/heartbeat HTTP/1.1" 200 -
INFO:root:Recorded job error in the history table for: {'job_type': 'MAINTAIN', 'models': ['repo_info'], 'display_name': 'repo_info model for url: https://github.com/davepacheco/node-verror.git', 'given': {'github_url': 'https://github.com/davepacheco/node-verror.git'}, 'worker_id': 'com.augurlabs.core.repo_info_worker.50700'}
INFO:root:Updated job process for model: repo_info
|
TypeError
|
def get_repo_data(self, url, response):
success = False
try:
data = response.json()
except:
data = json.loads(json.dumps(response.text))
if "errors" in data:
self.logger.info("Error!: {}".format(data["errors"]))
if data["errors"][0]["message"] == "API rate limit exceeded":
self.update_gh_rate_limit(response)
if "id" in data:
success = True
else:
self.logger.info("Request returned a non-data dict: {}\n".format(data))
if data["message"] == "Not Found":
self.logger.info(
"Github repo was not found or does not exist for endpoint: {}\n".format(
url
)
)
if (
data["message"]
== "You have triggered an abuse detection mechanism. Please wait a few minutes before you try again."
):
self.update_gh_rate_limit(r, temporarily_disable=True)
if data["message"] == "Bad credentials":
self.update_gh_rate_limit(r, bad_credentials=True)
if not success:
self.register_task_failure(
self.task, repo_id, "Failed to hit endpoint: {}".format(url)
)
return data
|
def get_repo_data(self, url, response):
success = False
try:
data = response.json()
except:
data = json.loads(json.dumps(response.text))
if "errors" in data:
logging.info("Error!: {}".format(data["errors"]))
if data["errors"][0]["message"] == "API rate limit exceeded":
self.update_gh_rate_limit(response)
if "id" in data:
success = True
else:
logging.info("Request returned a non-data dict: {}\n".format(data))
if data["message"] == "Not Found":
logging.info(
"Github repo was not found or does not exist for endpoint: {}\n".format(
url
)
)
if (
data["message"]
== "You have triggered an abuse detection mechanism. Please wait a few minutes before you try again."
):
self.update_gh_rate_limit(r, temporarily_disable=True)
if data["message"] == "Bad credentials":
self.update_gh_rate_limit(r, bad_credentials=True)
if not success:
self.register_task_failure(
self.task, repo_id, "Failed to hit endpoint: {}".format(url)
)
return data
|
https://github.com/chaoss/augur/issues/737
|
INFO:root:Worker ran into an error for task: {'job_type': 'MAINTAIN', 'models': ['repo_info'], 'display_name': 'repo_info model for url: https://github.com/davepacheco/node-verror.git', 'given': {'github_url': 'https://github.com/davepacheco/node-verror.git'}}
INFO:root:Printing traceback...
INFO:root:Traceback (most recent call last):
File "/mnt/md0/github/augur-census/workers/worker_base.py", line 189, in collect
model_method(message, repo_id)
File "/mnt/md0/github/augur-census/workers/repo_info_worker/repo_info_worker/worker.py", line 149, in repo_info_model
forked = self.is_forked(owner, repo)
File "/mnt/md0/github/augur-census/workers/repo_info_worker/repo_info_worker/worker.py", line 232, in is_forked
data = self.get_repo_data(self, url, r)
TypeError: get_repo_data() takes 3 positional arguments but 4 were given
INFO:root:This task inserted 0 tuples before failure.
INFO:root:Notifying broker and logging task failure in database...
INFO:werkzeug:127.0.0.1 - - [31/May/2020 18:48:58] "GET /AUGWOP/heartbeat HTTP/1.1" 200 -
INFO:root:Recorded job error in the history table for: {'job_type': 'MAINTAIN', 'models': ['repo_info'], 'display_name': 'repo_info model for url: https://github.com/davepacheco/node-verror.git', 'given': {'github_url': 'https://github.com/davepacheco/node-verror.git'}, 'worker_id': 'com.augurlabs.core.repo_info_worker.50700'}
INFO:root:Updated job process for model: repo_info
|
TypeError
|
def Run(self, args):
# Due to talking raw to hardware, this action has some inevitable risk of
# crashing the machine, so we need to flush the transaction log to ensure
# we know when this happens.
self.SyncTransactionLog()
# Temporary extra logging for Ubuntu
# TODO(user): Add generic hunt flag to notify syslog before running each
# client action.
if args.notify_syslog:
syslog = logging.getLogger("chipsec_grr")
syslog.setLevel(logging.INFO)
syslog.addHandler(handlers.SysLogHandler(address="/dev/log"))
syslog.info("%s: Runnning DumpFlashImage", config_lib.CONFIG["Client.name"])
self.logs = []
self.chipsec_log = StringIO.StringIO()
if args.log_level:
logger.logger().UTIL_TRACE = True
if args.log_level == 2:
logger.logger().VERBOSE = True
logger.logger().logfile = self.chipsec_log
logger.logger().LOG_TO_FILE = True
# Create a temporary file to store the flash image.
dest_fd, dest_pathspec = tempfiles.CreateGRRTempFileVFS()
# Wrap most of Chipsec code to gather its logs in case of failure.
try:
# Initialise Chipsec (die early if unknown chipset)
c = chipset.cs()
# Platform = None, Start Driver = False
c.init(None, False)
s = spi.SPI(c)
# Use hal.spi from chipsec to write BIOS to that file.
with dest_fd:
# Based on Chipsec code, rely on the address of BIOS(=1) region to
# determine the size of the flash.
_, limit, _ = s.get_SPI_region(1)
spi_size = limit + 1
# Read args.chunk_size bytes at a time and heartbeat.
bios = []
for i in range(0, spi_size, args.chunk_size):
bios.extend(s.read_spi(i, args.chunk_size))
self.Progress()
dest_fd.write("".join(bios))
except chipset.UnknownChipsetError as err:
if args.log_level:
self.LogError(err)
tempfiles.DeleteGRRTempFile(dest_pathspec.path)
self.SendReply(
chipsec_types.DumpFlashImageResponse(
logs=["%s" % err],
)
)
return
except Exception as err: # pylint: disable=broad-except
# In case an exception is raised, if the verbose mode
# is enabled, return the raw logs from Chipsec.
if args.log_level:
self.LogError(err)
tempfiles.DeleteGRRTempFile(dest_pathspec.path)
raise
if args.log_level:
self.logs.extend(self.chipsec_log.getvalue().splitlines())
if args.notify_syslog:
syslog.info(
"%s: DumpFlashImage has completed successfully",
config_lib.CONFIG["Client.name"],
)
self.SendReply(
chipsec_types.DumpFlashImageResponse(path=dest_pathspec, logs=self.logs)
)
|
def Run(self, args):
# Due to talking raw to hardware, this action has some inevitable risk of
# crashing the machine, so we need to flush the transaction log to ensure
# we know when this happens.
self.SyncTransactionLog()
# Temporary extra logging for Ubuntu
# TODO(user): Add generic hunt flag to notify syslog before running each
# client action.
if args.notify_syslog:
syslog = logging.getLogger("chipsec_grr")
syslog.setLevel(logging.INFO)
syslog.addHandler(handlers.SysLogHandler(address="/dev/log"))
syslog.info("%s: Runnning DumpFlashImage", config_lib.CONFIG["Client.name"])
logs = []
if args.log_level:
# Create a temporary file to store the log output as
# Chipsec does not support in-memory logging.
_, self.log_pathspec = tempfiles.CreateGRRTempFileVFS()
logger().UTIL_TRACE = True
if args.log_level == 2:
logger().VERBOSE = True
logger().set_log_file(self.log_pathspec.path)
# Create a temporary file to store the flash image.
dest_fd, dest_pathspec = tempfiles.CreateGRRTempFileVFS()
# Wrap most of Chipsec code to gather its logs in case of failure.
try:
# Initialise Chipsec (die early if unknown chipset)
c = chipset.cs()
# Platform = None, Start Driver = False
c.init(None, False)
s = spi.SPI(c)
# Use hal.spi from chipsec to write BIOS to that file.
with dest_fd:
# Based on Chipsec code, rely on the address of BIOS(=1) region to
# determine the size of the flash.
_, limit, _ = s.get_SPI_region(1)
spi_size = limit + 1
# Read args.chunk_size bytes at a time and heartbeat.
bios = []
for i in range(0, spi_size, args.chunk_size):
bios.extend(s.read_spi(i, args.chunk_size))
self.Progress()
dest_fd.write("".join(bios))
except Exception as err: # pylint: disable=broad-except
# In case an exception is raised, if the verbose mode
# is enabled, return the raw logs from Chipsec.
if args.log_level:
logs = self.ReadAndDeleteChipsecLogs()
logs.append("%r: %s" % (err, err))
self.SendReply(chipsec_types.DumpFlashImageResponse(logs=logs))
tempfiles.DeleteGRRTempFile(dest_pathspec.path)
if isinstance(err, chipset.UnknownChipsetError):
# If the chipset is unknown, simply returns an error message
self.SendReply(
chipsec_types.DumpFlashImageResponse(
logs=["%s" % err],
)
)
return
raise
if args.log_level:
logs = self.ReadAndDeleteChipsecLogs()
if args.notify_syslog:
syslog.info(
"%s: DumpFlashImage has completed successfully",
config_lib.CONFIG["Client.name"],
)
self.SendReply(chipsec_types.DumpFlashImageResponse(path=dest_pathspec, logs=logs))
|
https://github.com/google/grr/issues/411
|
Traceback (most recent call last): File "/usr/share/grr-server/local/lib/python2.7/site-packages/grr/lib/flow_runner.py", line 644, in RunStateMethod responses=responses) File "/usr/share/grr-server/local/lib/python2.7/site-packages/grr/lib/flow.py", line 346, in Decorated res = f(*args[:f.func_code.co_argcount]) File "/usr/share/grr-server/local/lib/python2.7/site-packages/grr/lib/flows/general/discovery.py", line 153, in Platform next_state="ProcessKnowledgeBase") File "/usr/share/grr-server/local/lib/python2.7/site-packages/grr/lib/flow.py", line 784, in CallFlow **kwargs) File "/usr/share/grr-server/local/lib/python2.7/site-packages/grr/lib/flow_runner.py", line 940, in CallFlow **kwargs) File "/usr/share/grr-server/local/lib/python2.7/site-packages/grr/lib/flow.py", line 927, in StartFlow flow_obj.Start() File "/usr/share/grr-server/local/lib/python2.7/site-packages/grr/lib/flow.py", line 346, in Decorated res = f(*args[:f.func_code.co_argcount]) File "/usr/share/grr-server/local/lib/python2.7/site-packages/grr/lib/artifact.py", line 143, in Start first_flows = self.GetFirstFlowsForCollection() File "/usr/share/grr-server/local/lib/python2.7/site-packages/grr/lib/artifact.py", line 407, in GetFirstFlowsForCollection artifact_registry.REGISTRY.GetArtifact(artifact_name) File "/usr/share/grr-server/local/lib/python2.7/site-packages/grr/lib/artifact_registry.py", line 304, in GetArtifact "directory." % name) ArtifactNotRegisteredError: Artifact WMIProfileUsersHomeDir missing from registry. You may need to sync the artifact repo by running make in the artifact directory.
|
ArtifactNotRegisteredError
|
def LogError(self, err):
self.logs.append("Error dumping ACPI table.")
self.logs.append("%r: %s" % (err, err))
self.logs.extend(self.chipsec_log.getvalue().splitlines())
self.SendReply(chipsec_types.DumpACPITableResponse(logs=self.logs))
|
def LogError(self, err):
self.logs.append("Error dumping ACPI table.")
self.logs.append("%r: %s" % (err, err))
self.logs.extend(self.chipsec_log.getvalue().split("\n"))
self.SendReply(chipsec_types.DumpACPITableResponse(logs=self.logs))
|
https://github.com/google/grr/issues/411
|
Traceback (most recent call last): File "/usr/share/grr-server/local/lib/python2.7/site-packages/grr/lib/flow_runner.py", line 644, in RunStateMethod responses=responses) File "/usr/share/grr-server/local/lib/python2.7/site-packages/grr/lib/flow.py", line 346, in Decorated res = f(*args[:f.func_code.co_argcount]) File "/usr/share/grr-server/local/lib/python2.7/site-packages/grr/lib/flows/general/discovery.py", line 153, in Platform next_state="ProcessKnowledgeBase") File "/usr/share/grr-server/local/lib/python2.7/site-packages/grr/lib/flow.py", line 784, in CallFlow **kwargs) File "/usr/share/grr-server/local/lib/python2.7/site-packages/grr/lib/flow_runner.py", line 940, in CallFlow **kwargs) File "/usr/share/grr-server/local/lib/python2.7/site-packages/grr/lib/flow.py", line 927, in StartFlow flow_obj.Start() File "/usr/share/grr-server/local/lib/python2.7/site-packages/grr/lib/flow.py", line 346, in Decorated res = f(*args[:f.func_code.co_argcount]) File "/usr/share/grr-server/local/lib/python2.7/site-packages/grr/lib/artifact.py", line 143, in Start first_flows = self.GetFirstFlowsForCollection() File "/usr/share/grr-server/local/lib/python2.7/site-packages/grr/lib/artifact.py", line 407, in GetFirstFlowsForCollection artifact_registry.REGISTRY.GetArtifact(artifact_name) File "/usr/share/grr-server/local/lib/python2.7/site-packages/grr/lib/artifact_registry.py", line 304, in GetArtifact "directory." % name) ArtifactNotRegisteredError: Artifact WMIProfileUsersHomeDir missing from registry. You may need to sync the artifact repo by running make in the artifact directory.
|
ArtifactNotRegisteredError
|
def Run(self, args):
self.logs = []
self.chipsec_log = StringIO.StringIO()
if args.logging:
self.logs.append("Dumping %s" % args.table_signature)
logger.logger().logfile = self.chipsec_log
logger.logger().LOG_TO_FILE = True
# Wrap most of Chipsec code to gather its logs in case of failure.
try:
# Initialise Chipsec (die early if unknown chipset)
c = chipset.cs()
# Platform = None, Start Driver = False
c.init(None, False)
a = acpi.ACPI(c)
acpi_tables_raw = a.get_ACPI_table(args.table_signature)
acpi_tables = []
for i, table_address in enumerate(a.tableList[args.table_signature]):
table_header, table_content = acpi_tables_raw[i]
table_blob = table_header + table_content
acpi_tables.append(
chipsec_types.ACPITableData(
table_address=table_address, table_blob=table_blob
)
)
except (chipset.UnknownChipsetError, OSError) as err:
# Expected errors that might happen on the client
# If the chipset is unknown or we encountered an error due to reading
# an area we do not have access to using /dev/mem, simply return an
# error message.
if args.logging:
self.LogError(err)
self.SendReply(
chipsec_types.DumpACPITableResponse(
logs=["%s" % err],
)
)
return
except Exception as err: # pylint: disable=broad-except
# In case an exception is raised, if the verbose mode
# is enabled, return the raw logs from Chipsec.
if args.logging:
self.LogError(err)
raise
if not acpi_tables:
self.logs.append("No ACPI table with signature %s." % args.table_signature)
else:
self.logs.append(
"ACPI table with signature %s has been successfully dumped."
% args.table_signature
)
if args.logging:
self.logs.extend(self.chipsec_log.getvalue().splitlines())
self.SendReply(
chipsec_types.DumpACPITableResponse(acpi_tables=acpi_tables, logs=self.logs)
)
|
def Run(self, args):
self.logs = []
self.chipsec_log = StringIO.StringIO()
if args.logging:
self.logs.append("Dumping %s" % args.table_signature)
logger().logfile = self.chipsec_log
logger().LOG_TO_FILE = True
# Wrap most of Chipsec code to gather its logs in case of failure.
try:
# Initialise Chipsec (die early if unknown chipset)
c = chipset.cs()
# Platform = None, Start Driver = False
c.init(None, False)
a = acpi.ACPI(c)
acpi_tables_raw = a.get_ACPI_table(args.table_signature)
acpi_tables = []
for i, table_address in enumerate(a.tableList[args.table_signature]):
table_header, table_content = acpi_tables_raw[i]
table_blob = table_header + table_content
acpi_tables.append(
chipsec_types.ACPITableData(
table_address=table_address, table_blob=table_blob
)
)
except (chipset.UnknownChipsetError, OSError) as err:
# Expected errors that might happen on the client
# If the chipset is unknown or we encountered an error due to reading
# an area we do not have access to using /dev/mem, simply return an
# error message.
if args.logging:
self.LogError(err)
self.SendReply(
chipsec_types.DumpACPITableResponse(
logs=["%s" % err],
)
)
return
except Exception as err: # pylint: disable=broad-except
# In case an exception is raised, if the verbose mode
# is enabled, return the raw logs from Chipsec.
if args.logging:
self.LogError(err)
raise
if not acpi_tables:
self.logs.append("No ACPI table with signature %s." % args.table_signature)
else:
self.logs.append(
"ACPI table with signature %s has been successfully dumped."
% args.table_signature
)
if args.logging:
self.logs.extend(self.chipsec_log.getvalue().split("\n"))
self.SendReply(
chipsec_types.DumpACPITableResponse(acpi_tables=acpi_tables, logs=self.logs)
)
|
https://github.com/google/grr/issues/411
|
Traceback (most recent call last): File "/usr/share/grr-server/local/lib/python2.7/site-packages/grr/lib/flow_runner.py", line 644, in RunStateMethod responses=responses) File "/usr/share/grr-server/local/lib/python2.7/site-packages/grr/lib/flow.py", line 346, in Decorated res = f(*args[:f.func_code.co_argcount]) File "/usr/share/grr-server/local/lib/python2.7/site-packages/grr/lib/flows/general/discovery.py", line 153, in Platform next_state="ProcessKnowledgeBase") File "/usr/share/grr-server/local/lib/python2.7/site-packages/grr/lib/flow.py", line 784, in CallFlow **kwargs) File "/usr/share/grr-server/local/lib/python2.7/site-packages/grr/lib/flow_runner.py", line 940, in CallFlow **kwargs) File "/usr/share/grr-server/local/lib/python2.7/site-packages/grr/lib/flow.py", line 927, in StartFlow flow_obj.Start() File "/usr/share/grr-server/local/lib/python2.7/site-packages/grr/lib/flow.py", line 346, in Decorated res = f(*args[:f.func_code.co_argcount]) File "/usr/share/grr-server/local/lib/python2.7/site-packages/grr/lib/artifact.py", line 143, in Start first_flows = self.GetFirstFlowsForCollection() File "/usr/share/grr-server/local/lib/python2.7/site-packages/grr/lib/artifact.py", line 407, in GetFirstFlowsForCollection artifact_registry.REGISTRY.GetArtifact(artifact_name) File "/usr/share/grr-server/local/lib/python2.7/site-packages/grr/lib/artifact_registry.py", line 304, in GetArtifact "directory." % name) ArtifactNotRegisteredError: Artifact WMIProfileUsersHomeDir missing from registry. You may need to sync the artifact repo by running make in the artifact directory.
|
ArtifactNotRegisteredError
|
def main(_):
"""Run the main test harness."""
config_lib.CONFIG.AddContext(
"AdminUI Context", "Context applied when running the admin user interface GUI."
)
startup.Init()
if not os.path.exists(
os.path.join(
config_lib.CONFIG["AdminUI.document_root"], "dist/grr-ui.bundle.js"
)
) or not os.path.exists(
os.path.join(
config_lib.CONFIG["AdminUI.document_root"], "dist/grr-ui.bundle.css"
)
):
raise RuntimeError(
"Can't find compiled JS/CSS bundles. "
"Please reinstall the PIP package using "
'"pip install -e ." to rebuild the bundles.'
)
# Start up a server in another thread
bind_address = config_lib.CONFIG["AdminUI.bind"]
ip = ipaddr.IPAddress(bind_address)
if ip.version == 4:
# Address looks like an IPv4 address.
ThreadingDjango.address_family = socket.AF_INET
max_port = config_lib.CONFIG.Get(
"AdminUI.port_max", config_lib.CONFIG["AdminUI.port"]
)
for port in range(config_lib.CONFIG["AdminUI.port"], max_port + 1):
# Make a simple reference implementation WSGI server
try:
server = simple_server.make_server(
bind_address,
port,
django_lib.GetWSGIHandler(),
server_class=ThreadingDjango,
)
break
except socket.error as e:
if e.errno == socket.errno.EADDRINUSE and port < max_port:
logging.info("Port %s in use, trying %s", port, port + 1)
else:
raise
proto = "HTTP"
if config_lib.CONFIG["AdminUI.enable_ssl"]:
cert_file = config_lib.CONFIG["AdminUI.ssl_cert_file"]
if not cert_file:
raise ValueError("Need a valid cert file to enable SSL.")
key_file = config_lib.CONFIG["AdminUI.ssl_key_file"]
server.socket = ssl.wrap_socket(
server.socket, certfile=cert_file, keyfile=key_file, server_side=True
)
proto = "HTTPS"
# SSL errors are swallowed by the WSGIServer so if your configuration does
# not work, uncomment the line below, point your browser at the gui and look
# at the log file to see why SSL complains:
# server.socket.accept()
sa = server.socket.getsockname()
logging.info("Serving %s on %s port %d ...", proto, sa[0], sa[1])
startup.DropPrivileges()
server.serve_forever()
|
def main(_):
"""Run the main test harness."""
config_lib.CONFIG.AddContext(
"AdminUI Context", "Context applied when running the admin user interface GUI."
)
startup.Init()
# Start up a server in another thread
bind_address = config_lib.CONFIG["AdminUI.bind"]
ip = ipaddr.IPAddress(bind_address)
if ip.version == 4:
# Address looks like an IPv4 address.
ThreadingDjango.address_family = socket.AF_INET
max_port = config_lib.CONFIG.Get(
"AdminUI.port_max", config_lib.CONFIG["AdminUI.port"]
)
for port in range(config_lib.CONFIG["AdminUI.port"], max_port + 1):
# Make a simple reference implementation WSGI server
try:
server = simple_server.make_server(
bind_address,
port,
django_lib.GetWSGIHandler(),
server_class=ThreadingDjango,
)
break
except socket.error as e:
if e.errno == socket.errno.EADDRINUSE and port < max_port:
logging.info("Port %s in use, trying %s", port, port + 1)
else:
raise
proto = "HTTP"
if config_lib.CONFIG["AdminUI.enable_ssl"]:
cert_file = config_lib.CONFIG["AdminUI.ssl_cert_file"]
if not cert_file:
raise ValueError("Need a valid cert file to enable SSL.")
key_file = config_lib.CONFIG["AdminUI.ssl_key_file"]
server.socket = ssl.wrap_socket(
server.socket, certfile=cert_file, keyfile=key_file, server_side=True
)
proto = "HTTPS"
# SSL errors are swallowed by the WSGIServer so if your configuration does
# not work, uncomment the line below, point your browser at the gui and look
# at the log file to see why SSL complains:
# server.socket.accept()
sa = server.socket.getsockname()
logging.info("Serving %s on %s port %d ...", proto, sa[0], sa[1])
startup.DropPrivileges()
server.serve_forever()
|
https://github.com/google/grr/issues/411
|
Traceback (most recent call last): File "/usr/share/grr-server/local/lib/python2.7/site-packages/grr/lib/flow_runner.py", line 644, in RunStateMethod responses=responses) File "/usr/share/grr-server/local/lib/python2.7/site-packages/grr/lib/flow.py", line 346, in Decorated res = f(*args[:f.func_code.co_argcount]) File "/usr/share/grr-server/local/lib/python2.7/site-packages/grr/lib/flows/general/discovery.py", line 153, in Platform next_state="ProcessKnowledgeBase") File "/usr/share/grr-server/local/lib/python2.7/site-packages/grr/lib/flow.py", line 784, in CallFlow **kwargs) File "/usr/share/grr-server/local/lib/python2.7/site-packages/grr/lib/flow_runner.py", line 940, in CallFlow **kwargs) File "/usr/share/grr-server/local/lib/python2.7/site-packages/grr/lib/flow.py", line 927, in StartFlow flow_obj.Start() File "/usr/share/grr-server/local/lib/python2.7/site-packages/grr/lib/flow.py", line 346, in Decorated res = f(*args[:f.func_code.co_argcount]) File "/usr/share/grr-server/local/lib/python2.7/site-packages/grr/lib/artifact.py", line 143, in Start first_flows = self.GetFirstFlowsForCollection() File "/usr/share/grr-server/local/lib/python2.7/site-packages/grr/lib/artifact.py", line 407, in GetFirstFlowsForCollection artifact_registry.REGISTRY.GetArtifact(artifact_name) File "/usr/share/grr-server/local/lib/python2.7/site-packages/grr/lib/artifact_registry.py", line 304, in GetArtifact "directory." % name) ArtifactNotRegisteredError: Artifact WMIProfileUsersHomeDir missing from registry. You may need to sync the artifact repo by running make in the artifact directory.
|
ArtifactNotRegisteredError
|
def Handle(self, args, token=None):
try:
flow_obj = aff4.FACTORY.Open(
args.operation_id, aff4_type=discovery.Interrogate, token=token
)
complete = not flow_obj.GetRunner().IsRunning()
except aff4.InstantiationError:
raise InterrogateOperationNotFoundError(
"Operation with id %s not found" % args.operation_id
)
result = ApiGetInterrogateOperationStateResult()
if complete:
result.state = ApiGetInterrogateOperationStateResult.State.FINISHED
else:
result.state = ApiGetInterrogateOperationStateResult.State.RUNNING
return result
|
def Handle(self, args, token=None):
try:
flow_obj = aff4.FACTORY.Open(
args.operation_id, aff4_type="Interrogate", token=token
)
complete = not flow_obj.GetRunner().IsRunning()
except aff4.InstantiationError:
raise InterrogateOperationNotFoundError(
"Operation with id %s not found" % args.operation_id
)
result = ApiGetInterrogateOperationStateResult()
if complete:
result.state = ApiGetInterrogateOperationStateResult.State.FINISHED
else:
result.state = ApiGetInterrogateOperationStateResult.State.RUNNING
return result
|
https://github.com/google/grr/issues/411
|
Traceback (most recent call last): File "/usr/share/grr-server/local/lib/python2.7/site-packages/grr/lib/flow_runner.py", line 644, in RunStateMethod responses=responses) File "/usr/share/grr-server/local/lib/python2.7/site-packages/grr/lib/flow.py", line 346, in Decorated res = f(*args[:f.func_code.co_argcount]) File "/usr/share/grr-server/local/lib/python2.7/site-packages/grr/lib/flows/general/discovery.py", line 153, in Platform next_state="ProcessKnowledgeBase") File "/usr/share/grr-server/local/lib/python2.7/site-packages/grr/lib/flow.py", line 784, in CallFlow **kwargs) File "/usr/share/grr-server/local/lib/python2.7/site-packages/grr/lib/flow_runner.py", line 940, in CallFlow **kwargs) File "/usr/share/grr-server/local/lib/python2.7/site-packages/grr/lib/flow.py", line 927, in StartFlow flow_obj.Start() File "/usr/share/grr-server/local/lib/python2.7/site-packages/grr/lib/flow.py", line 346, in Decorated res = f(*args[:f.func_code.co_argcount]) File "/usr/share/grr-server/local/lib/python2.7/site-packages/grr/lib/artifact.py", line 143, in Start first_flows = self.GetFirstFlowsForCollection() File "/usr/share/grr-server/local/lib/python2.7/site-packages/grr/lib/artifact.py", line 407, in GetFirstFlowsForCollection artifact_registry.REGISTRY.GetArtifact(artifact_name) File "/usr/share/grr-server/local/lib/python2.7/site-packages/grr/lib/artifact_registry.py", line 304, in GetArtifact "directory." % name) ArtifactNotRegisteredError: Artifact WMIProfileUsersHomeDir missing from registry. You may need to sync the artifact repo by running make in the artifact directory.
|
ArtifactNotRegisteredError
|
def Layout(self, request, response):
"""Render the toolbar."""
self.ParseRequest(request)
try:
client_id = rdfvalue.RDFURN(self.aff4_path).Split(2)[0]
update_flow_urn = flow.GRRFlow.StartFlow(
client_id=client_id,
flow_name="UpdateVFSFile",
token=request.token,
vfs_file_urn=rdfvalue.RDFURN(self.aff4_path),
attribute=self.attribute_to_refresh,
)
update_flow = aff4.FACTORY.Open(
update_flow_urn, aff4_type=aff4_grr.UpdateVFSFile, token=request.token
)
self.flow_urn = str(update_flow.state.get_file_flow_urn)
except IOError as e:
raise IOError("Sorry. This path cannot be refreshed due to %s" % e)
if self.flow_urn:
response = super(UpdateAttribute, self).Layout(request, response)
return self.CallJavascript(
response,
"UpdateAttribute.Layout",
aff4_path=self.aff4_path,
flow_urn=self.flow_urn,
attribute_to_refresh=self.attribute_to_refresh,
poll_time=self.poll_time,
)
|
def Layout(self, request, response):
"""Render the toolbar."""
self.ParseRequest(request)
try:
client_id = rdfvalue.RDFURN(self.aff4_path).Split(2)[0]
update_flow_urn = flow.GRRFlow.StartFlow(
client_id=client_id,
flow_name="UpdateVFSFile",
token=request.token,
vfs_file_urn=rdfvalue.RDFURN(self.aff4_path),
attribute=self.attribute_to_refresh,
)
update_flow = aff4.FACTORY.Open(
update_flow_urn, aff4_type="UpdateVFSFile", token=request.token
)
self.flow_urn = str(update_flow.state.get_file_flow_urn)
except IOError as e:
raise IOError("Sorry. This path cannot be refreshed due to %s" % e)
if self.flow_urn:
response = super(UpdateAttribute, self).Layout(request, response)
return self.CallJavascript(
response,
"UpdateAttribute.Layout",
aff4_path=self.aff4_path,
flow_urn=self.flow_urn,
attribute_to_refresh=self.attribute_to_refresh,
poll_time=self.poll_time,
)
|
https://github.com/google/grr/issues/411
|
Traceback (most recent call last): File "/usr/share/grr-server/local/lib/python2.7/site-packages/grr/lib/flow_runner.py", line 644, in RunStateMethod responses=responses) File "/usr/share/grr-server/local/lib/python2.7/site-packages/grr/lib/flow.py", line 346, in Decorated res = f(*args[:f.func_code.co_argcount]) File "/usr/share/grr-server/local/lib/python2.7/site-packages/grr/lib/flows/general/discovery.py", line 153, in Platform next_state="ProcessKnowledgeBase") File "/usr/share/grr-server/local/lib/python2.7/site-packages/grr/lib/flow.py", line 784, in CallFlow **kwargs) File "/usr/share/grr-server/local/lib/python2.7/site-packages/grr/lib/flow_runner.py", line 940, in CallFlow **kwargs) File "/usr/share/grr-server/local/lib/python2.7/site-packages/grr/lib/flow.py", line 927, in StartFlow flow_obj.Start() File "/usr/share/grr-server/local/lib/python2.7/site-packages/grr/lib/flow.py", line 346, in Decorated res = f(*args[:f.func_code.co_argcount]) File "/usr/share/grr-server/local/lib/python2.7/site-packages/grr/lib/artifact.py", line 143, in Start first_flows = self.GetFirstFlowsForCollection() File "/usr/share/grr-server/local/lib/python2.7/site-packages/grr/lib/artifact.py", line 407, in GetFirstFlowsForCollection artifact_registry.REGISTRY.GetArtifact(artifact_name) File "/usr/share/grr-server/local/lib/python2.7/site-packages/grr/lib/artifact_registry.py", line 304, in GetArtifact "directory." % name) ArtifactNotRegisteredError: Artifact WMIProfileUsersHomeDir missing from registry. You may need to sync the artifact repo by running make in the artifact directory.
|
ArtifactNotRegisteredError
|
def BuildTable(self, start_row, end_row, request):
"""Renders the table."""
depth = request.REQ.get("depth", 0)
flow_urn = self.state.get("value", request.REQ.get("value"))
if flow_urn is None:
client_id = request.REQ.get("client_id")
if not client_id:
return
flow_urn = rdf_client.ClientURN(client_id).Add("flows")
flow_root = aff4.FACTORY.Open(flow_urn, mode="r", token=request.token)
root_children_paths = sorted(
flow_root.ListChildren(), key=lambda x: x.age, reverse=True
)
additional_rows = depth == 0 and len(root_children_paths) > end_row
if not depth:
root_children_paths = root_children_paths[start_row:end_row]
# TODO(user): should be able to specify aff4_type=flow.GRRFlow
# here. Currently this doesn't work because symlinks get filtered out.
# This is an aff4.FACTORY.MultiOpen's bug.
root_children = aff4.FACTORY.MultiOpen(root_children_paths, token=request.token)
root_children = sorted(root_children, key=self._GetCreationTime, reverse=True)
level2_children = dict(
aff4.FACTORY.MultiListChildren(
[f.urn for f in root_children], token=request.token
)
)
self.size = len(root_children)
row_index = start_row
for flow_obj in root_children:
if level2_children.get(flow_obj.urn, None):
row_type = "branch"
else:
row_type = "leaf"
row = {}
last = flow_obj.Get(flow_obj.Schema.LAST)
if last:
row["Last Active"] = last
if isinstance(flow_obj, flow.GRRFlow):
row_name = (flow_obj.symlink_urn or flow_obj.urn).Basename()
try:
if flow_obj.Get(flow_obj.Schema.CLIENT_CRASH):
row["State"] = "CLIENT_CRASHED"
else:
row["State"] = flow_obj.state.context.state
row["Flow Name"] = flow_obj.state.context.args.flow_name
row["Creation Time"] = flow_obj.state.context.create_time
row["Creator"] = flow_obj.state.context.creator
except AttributeError:
row["Flow Name"] = "Failed to open flow."
elif isinstance(flow_obj, hunts.GRRHunt):
row_name = flow_obj.urn.Dirname()
row["Flow Name"] = "Hunt"
else:
# A logs collection, skip, it will be rendered separately
continue
self.columns[1].AddElement(
# If flow object is symlinked, we want to use symlink path in the
# table. This way UI logic can make reasonable assumptions about
# client's flows URNs.
row_index,
flow_obj.symlink_urn or flow_obj.urn,
depth,
row_type,
row_name,
)
self.AddRow(row, row_index)
row_index += 1
return additional_rows
|
def BuildTable(self, start_row, end_row, request):
"""Renders the table."""
depth = request.REQ.get("depth", 0)
flow_urn = self.state.get("value", request.REQ.get("value"))
if flow_urn is None:
client_id = request.REQ.get("client_id")
if not client_id:
return
flow_urn = rdf_client.ClientURN(client_id).Add("flows")
flow_root = aff4.FACTORY.Open(flow_urn, mode="r", token=request.token)
root_children_paths = sorted(
flow_root.ListChildren(), key=lambda x: x.age, reverse=True
)
additional_rows = depth == 0 and len(root_children_paths) > end_row
if not depth:
root_children_paths = root_children_paths[start_row:end_row]
# TODO(user): should be able to specify aff4_type="GRRFlow" here.
# Currently this doesn't work because symlinks get filtered out.
# This is an aff4.FACTORY.MultiOpen's bug.
root_children = aff4.FACTORY.MultiOpen(root_children_paths, token=request.token)
root_children = sorted(root_children, key=self._GetCreationTime, reverse=True)
level2_children = dict(
aff4.FACTORY.MultiListChildren(
[f.urn for f in root_children], token=request.token
)
)
self.size = len(root_children)
row_index = start_row
for flow_obj in root_children:
if level2_children.get(flow_obj.urn, None):
row_type = "branch"
else:
row_type = "leaf"
row = {}
last = flow_obj.Get(flow_obj.Schema.LAST)
if last:
row["Last Active"] = last
if isinstance(flow_obj, flow.GRRFlow):
row_name = (flow_obj.symlink_urn or flow_obj.urn).Basename()
try:
if flow_obj.Get(flow_obj.Schema.CLIENT_CRASH):
row["State"] = "CLIENT_CRASHED"
else:
row["State"] = flow_obj.state.context.state
row["Flow Name"] = flow_obj.state.context.args.flow_name
row["Creation Time"] = flow_obj.state.context.create_time
row["Creator"] = flow_obj.state.context.creator
except AttributeError:
row["Flow Name"] = "Failed to open flow."
elif isinstance(flow_obj, hunts.GRRHunt):
row_name = flow_obj.urn.Dirname()
row["Flow Name"] = "Hunt"
else:
# A logs collection, skip, it will be rendered separately
continue
self.columns[1].AddElement(
# If flow object is symlinked, we want to use symlink path in the
# table. This way UI logic can make reasonable assumptions about
# client's flows URNs.
row_index,
flow_obj.symlink_urn or flow_obj.urn,
depth,
row_type,
row_name,
)
self.AddRow(row, row_index)
row_index += 1
return additional_rows
|
https://github.com/google/grr/issues/411
|
Traceback (most recent call last): File "/usr/share/grr-server/local/lib/python2.7/site-packages/grr/lib/flow_runner.py", line 644, in RunStateMethod responses=responses) File "/usr/share/grr-server/local/lib/python2.7/site-packages/grr/lib/flow.py", line 346, in Decorated res = f(*args[:f.func_code.co_argcount]) File "/usr/share/grr-server/local/lib/python2.7/site-packages/grr/lib/flows/general/discovery.py", line 153, in Platform next_state="ProcessKnowledgeBase") File "/usr/share/grr-server/local/lib/python2.7/site-packages/grr/lib/flow.py", line 784, in CallFlow **kwargs) File "/usr/share/grr-server/local/lib/python2.7/site-packages/grr/lib/flow_runner.py", line 940, in CallFlow **kwargs) File "/usr/share/grr-server/local/lib/python2.7/site-packages/grr/lib/flow.py", line 927, in StartFlow flow_obj.Start() File "/usr/share/grr-server/local/lib/python2.7/site-packages/grr/lib/flow.py", line 346, in Decorated res = f(*args[:f.func_code.co_argcount]) File "/usr/share/grr-server/local/lib/python2.7/site-packages/grr/lib/artifact.py", line 143, in Start first_flows = self.GetFirstFlowsForCollection() File "/usr/share/grr-server/local/lib/python2.7/site-packages/grr/lib/artifact.py", line 407, in GetFirstFlowsForCollection artifact_registry.REGISTRY.GetArtifact(artifact_name) File "/usr/share/grr-server/local/lib/python2.7/site-packages/grr/lib/artifact_registry.py", line 304, in GetArtifact "directory." % name) ArtifactNotRegisteredError: Artifact WMIProfileUsersHomeDir missing from registry. You may need to sync the artifact repo by running make in the artifact directory.
|
ArtifactNotRegisteredError
|
def __getattr__(self, attr):
"""Handle unknown attributes.
Often the actual object returned is not the object that is expected. In
those cases attempting to retrieve a specific named attribute would
normally raise, e.g.:
fd = aff4.FACTORY.Open(urn)
fd.Get(fd.Schema.DOESNTEXIST, default_value)
In this case we return None to ensure that the default is chosen.
However, if the caller specifies a specific aff4_type, they expect the
attributes of that object. If they are referencing a non-existent
attribute this is an error and we should raise, e.g.:
fd = aff4.FACTORY.Open(urn, aff4_type=module.SomeClass)
fd.Get(fd.Schema.DOESNTEXIST, default_value)
Args:
attr: Some ignored attribute.
Raises:
BadGetAttributeError: if the object was opened with a specific type
"""
if self.aff4_type:
raise BadGetAttributeError(
"Attribute %s does not exist on object opened with aff4_type %s"
% (utils.SmartStr(attr), self.aff4_type)
)
return None
|
def __getattr__(self, attr):
"""Handle unknown attributes.
Often the actual object returned is not the object that is expected. In
those cases attempting to retrieve a specific named attribute would
normally raise, e.g.:
fd = aff4.FACTORY.Open(urn)
fd.Get(fd.Schema.DOESNTEXIST, default_value)
In this case we return None to ensure that the default is chosen.
However, if the caller specifies a specific aff4_type, they expect the
attributes of that object. If they are referencing a non-existent
attribute this is an error and we should raise, e.g.:
fd = aff4.FACTORY.Open(urn, aff4_type="something")
fd.Get(fd.Schema.DOESNTEXIST, default_value)
Args:
attr: Some ignored attribute.
Raises:
BadGetAttributeError: if the object was opened with a specific type
"""
if self.aff4_type:
raise BadGetAttributeError(
"Attribute %s does not exist on object opened with aff4_type %s"
% (utils.SmartStr(attr), self.aff4_type)
)
return None
|
https://github.com/google/grr/issues/411
|
Traceback (most recent call last): File "/usr/share/grr-server/local/lib/python2.7/site-packages/grr/lib/flow_runner.py", line 644, in RunStateMethod responses=responses) File "/usr/share/grr-server/local/lib/python2.7/site-packages/grr/lib/flow.py", line 346, in Decorated res = f(*args[:f.func_code.co_argcount]) File "/usr/share/grr-server/local/lib/python2.7/site-packages/grr/lib/flows/general/discovery.py", line 153, in Platform next_state="ProcessKnowledgeBase") File "/usr/share/grr-server/local/lib/python2.7/site-packages/grr/lib/flow.py", line 784, in CallFlow **kwargs) File "/usr/share/grr-server/local/lib/python2.7/site-packages/grr/lib/flow_runner.py", line 940, in CallFlow **kwargs) File "/usr/share/grr-server/local/lib/python2.7/site-packages/grr/lib/flow.py", line 927, in StartFlow flow_obj.Start() File "/usr/share/grr-server/local/lib/python2.7/site-packages/grr/lib/flow.py", line 346, in Decorated res = f(*args[:f.func_code.co_argcount]) File "/usr/share/grr-server/local/lib/python2.7/site-packages/grr/lib/artifact.py", line 143, in Start first_flows = self.GetFirstFlowsForCollection() File "/usr/share/grr-server/local/lib/python2.7/site-packages/grr/lib/artifact.py", line 407, in GetFirstFlowsForCollection artifact_registry.REGISTRY.GetArtifact(artifact_name) File "/usr/share/grr-server/local/lib/python2.7/site-packages/grr/lib/artifact_registry.py", line 304, in GetArtifact "directory." % name) ArtifactNotRegisteredError: Artifact WMIProfileUsersHomeDir missing from registry. You may need to sync the artifact repo by running make in the artifact directory.
|
ArtifactNotRegisteredError
|
def _ReadExactly(self, n):
ret = ""
left = n
while left:
data = self.sock.recv(left)
if not data:
raise IOError("Expected %d bytes, got EOF after %d" % (n, len(ret)))
ret += data
left = n - len(ret)
return ret
|
def _ReadExactly(self, n):
ret = ""
left = n
while left:
data = self.sock.recv(left)
if data == "":
raise IOError("Expected %d bytes, got EOF after %d" % (n, len(ret)))
ret += data
left = n - len(ret)
return ret
|
https://github.com/google/grr/issues/411
|
Traceback (most recent call last): File "/usr/share/grr-server/local/lib/python2.7/site-packages/grr/lib/flow_runner.py", line 644, in RunStateMethod responses=responses) File "/usr/share/grr-server/local/lib/python2.7/site-packages/grr/lib/flow.py", line 346, in Decorated res = f(*args[:f.func_code.co_argcount]) File "/usr/share/grr-server/local/lib/python2.7/site-packages/grr/lib/flows/general/discovery.py", line 153, in Platform next_state="ProcessKnowledgeBase") File "/usr/share/grr-server/local/lib/python2.7/site-packages/grr/lib/flow.py", line 784, in CallFlow **kwargs) File "/usr/share/grr-server/local/lib/python2.7/site-packages/grr/lib/flow_runner.py", line 940, in CallFlow **kwargs) File "/usr/share/grr-server/local/lib/python2.7/site-packages/grr/lib/flow.py", line 927, in StartFlow flow_obj.Start() File "/usr/share/grr-server/local/lib/python2.7/site-packages/grr/lib/flow.py", line 346, in Decorated res = f(*args[:f.func_code.co_argcount]) File "/usr/share/grr-server/local/lib/python2.7/site-packages/grr/lib/artifact.py", line 143, in Start first_flows = self.GetFirstFlowsForCollection() File "/usr/share/grr-server/local/lib/python2.7/site-packages/grr/lib/artifact.py", line 407, in GetFirstFlowsForCollection artifact_registry.REGISTRY.GetArtifact(artifact_name) File "/usr/share/grr-server/local/lib/python2.7/site-packages/grr/lib/artifact_registry.py", line 304, in GetArtifact "directory." % name) ArtifactNotRegisteredError: Artifact WMIProfileUsersHomeDir missing from registry. You may need to sync the artifact repo by running make in the artifact directory.
|
ArtifactNotRegisteredError
|
def Parse(self, stat, knowledge_base):
"""Expand any variables in the value."""
value = stat.registry_data.GetValue()
if not value:
raise parsers.ParseError("Invalid value for key %s" % stat.pathspec.path)
value = artifact_utils.ExpandWindowsEnvironmentVariables(value, knowledge_base)
if value:
yield rdfvalue.RDFString(value)
|
def Parse(self, stat, knowledge_base):
"""Parse the key currentcontrolset output."""
value = stat.registry_data.GetValue()
if not value:
raise parsers.ParseError("Invalid value for key %s" % stat.pathspec.path)
value = artifact_utils.ExpandWindowsEnvironmentVariables(value, knowledge_base)
if value:
yield rdfvalue.RDFString(value)
|
https://github.com/google/grr/issues/411
|
Traceback (most recent call last): File "/usr/share/grr-server/local/lib/python2.7/site-packages/grr/lib/flow_runner.py", line 644, in RunStateMethod responses=responses) File "/usr/share/grr-server/local/lib/python2.7/site-packages/grr/lib/flow.py", line 346, in Decorated res = f(*args[:f.func_code.co_argcount]) File "/usr/share/grr-server/local/lib/python2.7/site-packages/grr/lib/flows/general/discovery.py", line 153, in Platform next_state="ProcessKnowledgeBase") File "/usr/share/grr-server/local/lib/python2.7/site-packages/grr/lib/flow.py", line 784, in CallFlow **kwargs) File "/usr/share/grr-server/local/lib/python2.7/site-packages/grr/lib/flow_runner.py", line 940, in CallFlow **kwargs) File "/usr/share/grr-server/local/lib/python2.7/site-packages/grr/lib/flow.py", line 927, in StartFlow flow_obj.Start() File "/usr/share/grr-server/local/lib/python2.7/site-packages/grr/lib/flow.py", line 346, in Decorated res = f(*args[:f.func_code.co_argcount]) File "/usr/share/grr-server/local/lib/python2.7/site-packages/grr/lib/artifact.py", line 143, in Start first_flows = self.GetFirstFlowsForCollection() File "/usr/share/grr-server/local/lib/python2.7/site-packages/grr/lib/artifact.py", line 407, in GetFirstFlowsForCollection artifact_registry.REGISTRY.GetArtifact(artifact_name) File "/usr/share/grr-server/local/lib/python2.7/site-packages/grr/lib/artifact_registry.py", line 304, in GetArtifact "directory." % name) ArtifactNotRegisteredError: Artifact WMIProfileUsersHomeDir missing from registry. You may need to sync the artifact repo by running make in the artifact directory.
|
ArtifactNotRegisteredError
|
def Parse(self, stat, _):
"""Parse the key currentcontrolset output."""
# SystemDriveEnvironmentVariable produces a statentry,
# WindowsEnvironmentVariableSystemDrive produces a string
if isinstance(stat, rdf_client.StatEntry):
value = stat.registry_data.GetValue()
elif isinstance(stat, rdfvalue.RDFString):
value = stat
if not value:
raise parsers.ParseError("Invalid value for key %s" % stat.pathspec.path)
systemdrive = value[0:2]
if re.match(r"^[A-Za-z]:$", systemdrive):
yield rdfvalue.RDFString(systemdrive)
else:
raise parsers.ParseError("Bad drive letter for key %s" % stat.pathspec.path)
|
def Parse(self, stat, _):
"""Parse the key currentcontrolset output."""
value = stat.registry_data.GetValue()
if not value:
raise parsers.ParseError("Invalid value for key %s" % stat.pathspec.path)
systemdrive = value[0:2]
if re.match(r"^[A-Za-z]:$", systemdrive):
yield rdfvalue.RDFString(systemdrive)
else:
raise parsers.ParseError("Bad drive letter for key %s" % stat.pathspec.path)
|
https://github.com/google/grr/issues/411
|
Traceback (most recent call last): File "/usr/share/grr-server/local/lib/python2.7/site-packages/grr/lib/flow_runner.py", line 644, in RunStateMethod responses=responses) File "/usr/share/grr-server/local/lib/python2.7/site-packages/grr/lib/flow.py", line 346, in Decorated res = f(*args[:f.func_code.co_argcount]) File "/usr/share/grr-server/local/lib/python2.7/site-packages/grr/lib/flows/general/discovery.py", line 153, in Platform next_state="ProcessKnowledgeBase") File "/usr/share/grr-server/local/lib/python2.7/site-packages/grr/lib/flow.py", line 784, in CallFlow **kwargs) File "/usr/share/grr-server/local/lib/python2.7/site-packages/grr/lib/flow_runner.py", line 940, in CallFlow **kwargs) File "/usr/share/grr-server/local/lib/python2.7/site-packages/grr/lib/flow.py", line 927, in StartFlow flow_obj.Start() File "/usr/share/grr-server/local/lib/python2.7/site-packages/grr/lib/flow.py", line 346, in Decorated res = f(*args[:f.func_code.co_argcount]) File "/usr/share/grr-server/local/lib/python2.7/site-packages/grr/lib/artifact.py", line 143, in Start first_flows = self.GetFirstFlowsForCollection() File "/usr/share/grr-server/local/lib/python2.7/site-packages/grr/lib/artifact.py", line 407, in GetFirstFlowsForCollection artifact_registry.REGISTRY.GetArtifact(artifact_name) File "/usr/share/grr-server/local/lib/python2.7/site-packages/grr/lib/artifact_registry.py", line 304, in GetArtifact "directory." % name) ArtifactNotRegisteredError: Artifact WMIProfileUsersHomeDir missing from registry. You may need to sync the artifact repo by running make in the artifact directory.
|
ArtifactNotRegisteredError
|
def MakeCoreSdist(self):
os.chdir(args.grr_src)
subprocess.check_call(
[
self.PYTHON_BIN64,
"setup.py",
"sdist",
"--dist-dir=%s" % self.BUILDDIR,
"--no-make-docs",
"--no-make-ui-files",
"--no-sync-artifacts",
]
)
return glob.glob(os.path.join(self.BUILDDIR, "grr-response-core-*.zip")).pop()
|
def MakeCoreSdist(self):
os.chdir(args.grr_src)
subprocess.check_call(
[
self.PYTHON_BIN64,
"setup.py",
"sdist",
"--dist-dir=%s" % self.BUILDDIR,
"--no-make-docs",
"--no-sync-artifacts",
]
)
return glob.glob(os.path.join(self.BUILDDIR, "grr-response-core-*.zip")).pop()
|
https://github.com/google/grr/issues/411
|
Traceback (most recent call last): File "/usr/share/grr-server/local/lib/python2.7/site-packages/grr/lib/flow_runner.py", line 644, in RunStateMethod responses=responses) File "/usr/share/grr-server/local/lib/python2.7/site-packages/grr/lib/flow.py", line 346, in Decorated res = f(*args[:f.func_code.co_argcount]) File "/usr/share/grr-server/local/lib/python2.7/site-packages/grr/lib/flows/general/discovery.py", line 153, in Platform next_state="ProcessKnowledgeBase") File "/usr/share/grr-server/local/lib/python2.7/site-packages/grr/lib/flow.py", line 784, in CallFlow **kwargs) File "/usr/share/grr-server/local/lib/python2.7/site-packages/grr/lib/flow_runner.py", line 940, in CallFlow **kwargs) File "/usr/share/grr-server/local/lib/python2.7/site-packages/grr/lib/flow.py", line 927, in StartFlow flow_obj.Start() File "/usr/share/grr-server/local/lib/python2.7/site-packages/grr/lib/flow.py", line 346, in Decorated res = f(*args[:f.func_code.co_argcount]) File "/usr/share/grr-server/local/lib/python2.7/site-packages/grr/lib/artifact.py", line 143, in Start first_flows = self.GetFirstFlowsForCollection() File "/usr/share/grr-server/local/lib/python2.7/site-packages/grr/lib/artifact.py", line 407, in GetFirstFlowsForCollection artifact_registry.REGISTRY.GetArtifact(artifact_name) File "/usr/share/grr-server/local/lib/python2.7/site-packages/grr/lib/artifact_registry.py", line 304, in GetArtifact "directory." % name) ArtifactNotRegisteredError: Artifact WMIProfileUsersHomeDir missing from registry. You may need to sync the artifact repo by running make in the artifact directory.
|
ArtifactNotRegisteredError
|
def TestInit():
"""Only used in tests and will rerun all the hooks to create a clean state."""
# Tests use both the server template grr_server.yaml as a primary config file
# (this file does not contain all required options, e.g. private keys), and
# additional configuration in test_data/grr_test.yaml which contains typical
# values for a complete installation.
if stats.STATS is None:
stats.STATS = stats.StatsCollector()
flags.FLAGS.config = config_lib.Resource().Filter(
"install_data/etc/grr-server.yaml"
)
flags.FLAGS.secondary_configs = [
config_lib.Resource().Filter("test_data/grr_test.yaml@grr-response-test")
]
# We are running a test so let the config system know that.
config_lib.CONFIG.AddContext("Test Context", "Context applied when we run tests.")
AddConfigContext()
ConfigInit()
# Tests additionally add a test configuration file.
ServerLoggingStartupInit()
registry.TestInit()
|
def TestInit():
"""Only used in tests and will rerun all the hooks to create a clean state."""
# Tests use both the server template grr_server.yaml as a primary config file
# (this file does not contain all required options, e.g. private keys), and
# additional configuration in test_data/grr_test.yaml which contains typical
# values for a complete installation.
if stats.STATS is None:
stats.STATS = stats.StatsCollector()
flags.FLAGS.config = config_lib.Resource().Filter(
"install_data/etc/grr-server.yaml"
)
flags.FLAGS.secondary_configs = [
config_lib.Resource().Filter("test_data/grr_test.yaml")
]
# We are running a test so let the config system know that.
config_lib.CONFIG.AddContext("Test Context", "Context applied when we run tests.")
AddConfigContext()
ConfigInit()
# Tests additionally add a test configuration file.
ServerLoggingStartupInit()
registry.TestInit()
|
https://github.com/google/grr/issues/358
|
Traceback (most recent call last):
File "/home/dev/GRR_ENV/local/lib/python2.7/site-packages/grr/gui/http_api.py", line 415, in HandleRequest
rendered_data = self.CallApiHandler(handler, args, token=token)
File "/home/dev/GRR_ENV/local/lib/python2.7/site-packages/grr/gui/http_api.py", line 225, in CallApiHandler
return handler.Render(args, token=token)
File "/home/dev/GRR_ENV/local/lib/python2.7/site-packages/grr/gui/api_plugins/config.py", line 80, in Render
parameter_data = RenderConfigOption(parameter)
File "/home/dev/GRR_ENV/local/lib/python2.7/site-packages/grr/gui/api_plugins/config.py", line 25, in RenderConfigOption
option_value = config_lib.CONFIG.Get(name)
File "/home/dev/GRR_ENV/local/lib/python2.7/site-packages/grr/lib/config_lib.py", line 1162, in Get
type_info_obj=type_info_obj, context=calc_context)
File "/home/dev/GRR_ENV/local/lib/python2.7/site-packages/grr/lib/config_lib.py", line 1291, in InterpolateValue
parameter=type_info_obj.name, context=context).Parse()
File "/home/dev/GRR_ENV/local/lib/python2.7/site-packages/grr/lib/config_lib.py", line 671, in Parse
self.Close()
File "/home/dev/GRR_ENV/local/lib/python2.7/site-packages/grr/lib/lexer.py", line 187, in Close
while self.NextToken():
File "/home/dev/GRR_ENV/local/lib/python2.7/site-packages/grr/lib/lexer.py", line 127, in NextToken
possible_next_state = cb(string=m.group(0), match=m)
File "/home/dev/GRR_ENV/local/lib/python2.7/site-packages/grr/lib/config_lib.py", line 657, in ExpandArg
final_value = self.config.Get(parameter_name, context=self.context)
File "/home/dev/GRR_ENV/local/lib/python2.7/site-packages/grr/lib/config_lib.py", line 1162, in Get
type_info_obj=type_info_obj, context=calc_context)
File "/home/dev/GRR_ENV/local/lib/python2.7/site-packages/grr/lib/config_lib.py", line 1291, in InterpolateValue
parameter=type_info_obj.name, context=context).Parse()
File "/home/dev/GRR_ENV/local/lib/python2.7/site-packages/grr/lib/config_lib.py", line 671, in Parse
self.Close()
File "/home/dev/GRR_ENV/local/lib/python2.7/site-packages/grr/lib/lexer.py", line 187, in Close
while self.NextToken():
File "/home/dev/GRR_ENV/local/lib/python2.7/site-packages/grr/lib/lexer.py", line 127, in NextToken
possible_next_state = cb(string=m.group(0), match=m)
File "/home/dev/GRR_ENV/local/lib/python2.7/site-packages/grr/lib/config_lib.py", line 640, in Filter
arg = filter_object().Filter(arg)
File "/home/dev/GRR_ENV/local/lib/python2.7/site-packages/grr/lib/config_lib.py", line 220, in Filter
raise IOError("Unable to find resource %s" % filename)
IOError: Unable to find resource test_data
|
IOError
|
def main(argv):
"""Sets up all the component in their own threads."""
config_lib.CONFIG.AddContext(
"Demo Context",
"The demo runs all functions in a single process using the "
"in memory data store.",
)
config_lib.CONFIG.AddContext("Test Context", "Context applied when we run tests.")
flags.FLAGS.config = config_lib.Resource().Filter(
"install_data/etc/grr-server.yaml"
)
flags.FLAGS.secondary_configs = [
config_lib.Resource().Filter("test_data/grr_test.yaml@grr-response-test")
]
startup.Init()
# pylint: disable=unused-import,unused-variable,g-import-not-at-top
from grr.gui import gui_plugins
# pylint: enable=unused-import,unused-variable,g-import-not-at-top
# This is the worker thread.
worker_thread = threading.Thread(target=worker.main, args=[argv], name="Worker")
worker_thread.daemon = True
worker_thread.start()
# This is the http server Frontend that clients communicate with.
http_thread = threading.Thread(
target=http_server.main, args=[argv], name="HTTP Server"
)
http_thread.daemon = True
http_thread.start()
client_thread = threading.Thread(target=client.main, args=[argv], name="Client")
client_thread.daemon = True
client_thread.start()
# The UI is running in the main thread.
runtests.main(argv)
|
def main(argv):
"""Sets up all the component in their own threads."""
config_lib.CONFIG.AddContext(
"Demo Context",
"The demo runs all functions in a single process using the "
"in memory data store.",
)
config_lib.CONFIG.AddContext("Test Context", "Context applied when we run tests.")
flags.FLAGS.config = config_lib.Resource().Filter(
"install_data/etc/grr-server.yaml"
)
flags.FLAGS.secondary_configs = [
config_lib.Resource().Filter("test_data/grr_test.yaml")
]
startup.Init()
# pylint: disable=unused-import,unused-variable,g-import-not-at-top
from grr.gui import gui_plugins
# pylint: enable=unused-import,unused-variable,g-import-not-at-top
# This is the worker thread.
worker_thread = threading.Thread(target=worker.main, args=[argv], name="Worker")
worker_thread.daemon = True
worker_thread.start()
# This is the http server Frontend that clients communicate with.
http_thread = threading.Thread(
target=http_server.main, args=[argv], name="HTTP Server"
)
http_thread.daemon = True
http_thread.start()
client_thread = threading.Thread(target=client.main, args=[argv], name="Client")
client_thread.daemon = True
client_thread.start()
# The UI is running in the main thread.
runtests.main(argv)
|
https://github.com/google/grr/issues/358
|
Traceback (most recent call last):
File "/home/dev/GRR_ENV/local/lib/python2.7/site-packages/grr/gui/http_api.py", line 415, in HandleRequest
rendered_data = self.CallApiHandler(handler, args, token=token)
File "/home/dev/GRR_ENV/local/lib/python2.7/site-packages/grr/gui/http_api.py", line 225, in CallApiHandler
return handler.Render(args, token=token)
File "/home/dev/GRR_ENV/local/lib/python2.7/site-packages/grr/gui/api_plugins/config.py", line 80, in Render
parameter_data = RenderConfigOption(parameter)
File "/home/dev/GRR_ENV/local/lib/python2.7/site-packages/grr/gui/api_plugins/config.py", line 25, in RenderConfigOption
option_value = config_lib.CONFIG.Get(name)
File "/home/dev/GRR_ENV/local/lib/python2.7/site-packages/grr/lib/config_lib.py", line 1162, in Get
type_info_obj=type_info_obj, context=calc_context)
File "/home/dev/GRR_ENV/local/lib/python2.7/site-packages/grr/lib/config_lib.py", line 1291, in InterpolateValue
parameter=type_info_obj.name, context=context).Parse()
File "/home/dev/GRR_ENV/local/lib/python2.7/site-packages/grr/lib/config_lib.py", line 671, in Parse
self.Close()
File "/home/dev/GRR_ENV/local/lib/python2.7/site-packages/grr/lib/lexer.py", line 187, in Close
while self.NextToken():
File "/home/dev/GRR_ENV/local/lib/python2.7/site-packages/grr/lib/lexer.py", line 127, in NextToken
possible_next_state = cb(string=m.group(0), match=m)
File "/home/dev/GRR_ENV/local/lib/python2.7/site-packages/grr/lib/config_lib.py", line 657, in ExpandArg
final_value = self.config.Get(parameter_name, context=self.context)
File "/home/dev/GRR_ENV/local/lib/python2.7/site-packages/grr/lib/config_lib.py", line 1162, in Get
type_info_obj=type_info_obj, context=calc_context)
File "/home/dev/GRR_ENV/local/lib/python2.7/site-packages/grr/lib/config_lib.py", line 1291, in InterpolateValue
parameter=type_info_obj.name, context=context).Parse()
File "/home/dev/GRR_ENV/local/lib/python2.7/site-packages/grr/lib/config_lib.py", line 671, in Parse
self.Close()
File "/home/dev/GRR_ENV/local/lib/python2.7/site-packages/grr/lib/lexer.py", line 187, in Close
while self.NextToken():
File "/home/dev/GRR_ENV/local/lib/python2.7/site-packages/grr/lib/lexer.py", line 127, in NextToken
possible_next_state = cb(string=m.group(0), match=m)
File "/home/dev/GRR_ENV/local/lib/python2.7/site-packages/grr/lib/config_lib.py", line 640, in Filter
arg = filter_object().Filter(arg)
File "/home/dev/GRR_ENV/local/lib/python2.7/site-packages/grr/lib/config_lib.py", line 220, in Filter
raise IOError("Unable to find resource %s" % filename)
IOError: Unable to find resource test_data
|
IOError
|
def run(self):
working_directory = os.path.abspath(os.getcwd() + "/../")
virtualenv_bin = os.path.dirname(sys.executable)
pip = "%s/pip" % virtualenv_bin
# Install the GRR server component to satisfy the dependency below.
subprocess.check_call([sys.executable, pip, "install", "."], cwd=working_directory)
# Install the grr-response-server metapackage to get the remaining
# dependencies and the entry points.
subprocess.check_call(
[sys.executable, pip, "install", "."],
cwd=working_directory + "/grr/config/grr-response-server/",
)
major_minor_version = ".".join(
pkg_resources.get_distribution("grr-response-core").version.split(".")[0:2]
)
subprocess.check_call(
[
sys.executable,
pip,
"install",
"-f",
"https://storage.googleapis.com/releases.grr-response.com/index.html",
"grr-response-templates==%s.*" % major_minor_version,
],
cwd=working_directory,
)
|
def run(self):
working_directory = os.path.abspath(os.getcwd() + "/../")
virtualenv_bin = os.path.dirname(sys.executable)
pip = "%s/pip" % virtualenv_bin
# Install the GRR server component to satisfy the dependency below.
subprocess.check_call(
[sys.executable, pip, "install", ".[Server]"], cwd=working_directory
)
# Install the grr-response-server metapackage to get all the entry points.
subprocess.check_call(
[sys.executable, pip, "install", "."],
cwd=working_directory + "/grr/config/grr-response-server/",
)
major_minor_version = ".".join(
pkg_resources.get_distribution("grr-response-core").version.split(".")[0:2]
)
subprocess.check_call(
[
sys.executable,
pip,
"install",
"-f",
"https://storage.googleapis.com/releases.grr-response.com/index.html",
"grr-response-templates==%s.*" % major_minor_version,
],
cwd=working_directory,
)
|
https://github.com/google/grr/issues/358
|
Traceback (most recent call last):
File "/home/dev/GRR_ENV/local/lib/python2.7/site-packages/grr/gui/http_api.py", line 415, in HandleRequest
rendered_data = self.CallApiHandler(handler, args, token=token)
File "/home/dev/GRR_ENV/local/lib/python2.7/site-packages/grr/gui/http_api.py", line 225, in CallApiHandler
return handler.Render(args, token=token)
File "/home/dev/GRR_ENV/local/lib/python2.7/site-packages/grr/gui/api_plugins/config.py", line 80, in Render
parameter_data = RenderConfigOption(parameter)
File "/home/dev/GRR_ENV/local/lib/python2.7/site-packages/grr/gui/api_plugins/config.py", line 25, in RenderConfigOption
option_value = config_lib.CONFIG.Get(name)
File "/home/dev/GRR_ENV/local/lib/python2.7/site-packages/grr/lib/config_lib.py", line 1162, in Get
type_info_obj=type_info_obj, context=calc_context)
File "/home/dev/GRR_ENV/local/lib/python2.7/site-packages/grr/lib/config_lib.py", line 1291, in InterpolateValue
parameter=type_info_obj.name, context=context).Parse()
File "/home/dev/GRR_ENV/local/lib/python2.7/site-packages/grr/lib/config_lib.py", line 671, in Parse
self.Close()
File "/home/dev/GRR_ENV/local/lib/python2.7/site-packages/grr/lib/lexer.py", line 187, in Close
while self.NextToken():
File "/home/dev/GRR_ENV/local/lib/python2.7/site-packages/grr/lib/lexer.py", line 127, in NextToken
possible_next_state = cb(string=m.group(0), match=m)
File "/home/dev/GRR_ENV/local/lib/python2.7/site-packages/grr/lib/config_lib.py", line 657, in ExpandArg
final_value = self.config.Get(parameter_name, context=self.context)
File "/home/dev/GRR_ENV/local/lib/python2.7/site-packages/grr/lib/config_lib.py", line 1162, in Get
type_info_obj=type_info_obj, context=calc_context)
File "/home/dev/GRR_ENV/local/lib/python2.7/site-packages/grr/lib/config_lib.py", line 1291, in InterpolateValue
parameter=type_info_obj.name, context=context).Parse()
File "/home/dev/GRR_ENV/local/lib/python2.7/site-packages/grr/lib/config_lib.py", line 671, in Parse
self.Close()
File "/home/dev/GRR_ENV/local/lib/python2.7/site-packages/grr/lib/lexer.py", line 187, in Close
while self.NextToken():
File "/home/dev/GRR_ENV/local/lib/python2.7/site-packages/grr/lib/lexer.py", line 127, in NextToken
possible_next_state = cb(string=m.group(0), match=m)
File "/home/dev/GRR_ENV/local/lib/python2.7/site-packages/grr/lib/config_lib.py", line 640, in Filter
arg = filter_object().Filter(arg)
File "/home/dev/GRR_ENV/local/lib/python2.7/site-packages/grr/lib/config_lib.py", line 220, in Filter
raise IOError("Unable to find resource %s" % filename)
IOError: Unable to find resource test_data
|
IOError
|
def execute_query(self, query, data_source_id, metadata):
signal.signal(signal.SIGINT, signal_handler)
start_time = time.time()
logger.info("task=execute_query state=load_ds ds_id=%d", data_source_id)
data_source = models.DataSource.get_by_id(data_source_id)
self.update_state(
state="STARTED", meta={"start_time": start_time, "custom_message": ""}
)
logger.debug("Executing query:\n%s", query)
query_hash = gen_query_hash(query)
query_runner = get_query_runner(data_source.type, data_source.options)
logger.info(
"task=execute_query state=before query_hash=%s type=%s ds_id=%d task_id=%s queue=%s query_id=%s username=%s",
query_hash,
data_source.type,
data_source.id,
self.request.id,
self.request.delivery_info["routing_key"],
metadata.get("Query ID", "unknown"),
metadata.get("Username", "unknown"),
)
if query_runner.annotate_query():
metadata["Task ID"] = self.request.id
metadata["Query Hash"] = query_hash
metadata["Queue"] = self.request.delivery_info["routing_key"]
annotation = ", ".join(["{}: {}".format(k, v) for k, v in metadata.iteritems()])
logging.debug("Annotation: %s", annotation)
annotated_query = "/* {} */ {}".format(annotation, query)
else:
annotated_query = query
with statsd_client.timer(
"query_runner.{}.{}.run_time".format(data_source.type, data_source.name)
):
data, error = query_runner.run_query(annotated_query)
logger.info(
"task=execute_query state=after query_hash=%s type=%s ds_id=%d task_id=%s queue=%s query_id=%s username=%s",
query_hash,
data_source.type,
data_source.id,
self.request.id,
self.request.delivery_info["routing_key"],
metadata.get("Query ID", "unknown"),
metadata.get("Username", "unknown"),
)
run_time = time.time() - start_time
logger.info("Query finished... data length=%s, error=%s", data and len(data), error)
self.update_state(
state="STARTED",
meta={"start_time": start_time, "error": error, "custom_message": ""},
)
# Delete query_hash
redis_connection.delete(QueryTask._job_lock_id(query_hash, data_source.id))
if not error:
query_result, updated_query_ids = models.QueryResult.store_result(
data_source.org_id,
data_source.id,
query_hash,
query,
data,
run_time,
utils.utcnow(),
)
logger.info(
"task=execute_query state=after_store query_hash=%s type=%s ds_id=%d task_id=%s queue=%s query_id=%s username=%s",
query_hash,
data_source.type,
data_source.id,
self.request.id,
self.request.delivery_info["routing_key"],
metadata.get("Query ID", "unknown"),
metadata.get("Username", "unknown"),
)
for query_id in updated_query_ids:
check_alerts_for_query.delay(query_id)
logger.info(
"task=execute_query state=after_alerts query_hash=%s type=%s ds_id=%d task_id=%s queue=%s query_id=%s username=%s",
query_hash,
data_source.type,
data_source.id,
self.request.id,
self.request.delivery_info["routing_key"],
metadata.get("Query ID", "unknown"),
metadata.get("Username", "unknown"),
)
else:
raise QueryExecutionError(error)
return query_result.id
|
def execute_query(self, query, data_source_id, metadata):
signal.signal(signal.SIGINT, signal_handler)
start_time = time.time()
logger.info("Loading data source (%d)...", data_source_id)
# TODO: we should probably cache data sources in Redis
data_source = models.DataSource.get_by_id(data_source_id)
self.update_state(
state="STARTED", meta={"start_time": start_time, "custom_message": ""}
)
logger.info("Executing query:\n%s", query)
query_hash = gen_query_hash(query)
query_runner = get_query_runner(data_source.type, data_source.options)
if query_runner.annotate_query():
metadata["Task ID"] = self.request.id
metadata["Query Hash"] = query_hash
metadata["Queue"] = self.request.delivery_info["routing_key"]
annotation = ", ".join(["{}: {}".format(k, v) for k, v in metadata.iteritems()])
logging.debug("Annotation: %s", annotation)
annotated_query = "/* {} */ {}".format(annotation, query)
else:
annotated_query = query
with statsd_client.timer(
"query_runner.{}.{}.run_time".format(data_source.type, data_source.name)
):
data, error = query_runner.run_query(annotated_query)
run_time = time.time() - start_time
logger.info("Query finished... data length=%s, error=%s", data and len(data), error)
self.update_state(
state="STARTED",
meta={"start_time": start_time, "error": error, "custom_message": ""},
)
# Delete query_hash
redis_connection.delete(QueryTask._job_lock_id(query_hash, data_source.id))
if not error:
query_result, updated_query_ids = models.QueryResult.store_result(
data_source.org_id,
data_source.id,
query_hash,
query,
data,
run_time,
utils.utcnow(),
)
for query_id in updated_query_ids:
check_alerts_for_query.delay(query_id)
else:
raise QueryExecutionError(error)
return query_result.id
|
https://github.com/getredash/redash/issues/785
|
[2016-01-21 17:58:21,655][PID:12][ERROR][redash.wsgi] Exception on /api/users/2 [POST]
Traceback (most recent call last):
File "/usr/local/lib/python2.7/dist-packages/flask/app.py", line 1475, in full_dispatch_request
rv = self.dispatch_request()
File "/usr/local/lib/python2.7/dist-packages/flask/app.py", line 1461, in dispatch_request
return self.view_functions[rule.endpoint](**req.view_args)
File "/usr/local/lib/python2.7/dist-packages/flask_restful/__init__.py", line 477, in wrapper
resp = resource(*args, **kwargs)
File "/usr/local/lib/python2.7/dist-packages/flask_login.py", line 792, in decorated_view
return func(*args, **kwargs)
File "/usr/local/lib/python2.7/dist-packages/flask/views.py", line 84, in view
return self.dispatch_request(*args, **kwargs)
File "/opt/redash/redash/handlers/base.py", line 19, in dispatch_request
return super(BaseResource, self).dispatch_request(*args, **kwargs)
File "/usr/local/lib/python2.7/dist-packages/flask_restful/__init__.py", line 587, in dispatch_request
resp = meth(*args, **kwargs)
File "/opt/redash/redash/handlers/users.py", line 76, in post
user.update_instance(**params)
File "/opt/redash/redash/models.py", line 106, in update_instance
self.save(only=self.dirty_fields)
File "/opt/redash/redash/models.py", line 93, in save
super(BaseModel, self).save(*args, **kwargs)
File "/usr/local/lib/python2.7/dist-packages/peewee.py", line 4146, in save
rows = self.update(**field_dict).where(self._pk_expr()).execute()
File "/usr/local/lib/python2.7/dist-packages/peewee.py", line 2751, in execute
return self.database.rows_affected(self._execute())
File "/opt/redash/redash/metrics/database.py", line 50, in metered_execute
result = real_execute(self, *args, **kwargs)
File "/usr/local/lib/python2.7/dist-packages/peewee.py", line 2371, in _execute
return self.database.execute_sql(sql, params, self.require_commit)
File "/opt/redash/redash/metrics/database.py", line 22, in execute_sql
result = super(MeteredPostgresqlExtDatabase, self).execute_sql(*args, **kwargs)
File "/usr/local/lib/python2.7/dist-packages/playhouse/postgres_ext.py", line 369, in execute_sql
self.commit()
File "/usr/local/lib/python2.7/dist-packages/peewee.py", line 2922, in __exit__
reraise(new_type, new_type(*exc_value.args), traceback)
File "/usr/local/lib/python2.7/dist-packages/playhouse/postgres_ext.py", line 360, in execute_sql
cursor.execute(sql, params or ())
DataError: invalid input syntax for integer: "admin"
LINE 1: ..." = E'foo@bar.com', "groups" = ARRAY[2, E'admin'] ...
|
DataError
|
def convert(topology, backend, test_input, device, extra_config={}):
"""
This function is used to convert a `onnxconverter_common.topology.Topology` object into a *backend* model.
Args:
topology: The `onnxconverter_common.topology.Topology` object that will be converted into a backend model
backend: Which backend the model should be run on
test_input: Inputs for PyTorch model tracing
device: Which device the translated model will be run on
extra_config: Extra configurations to be used by individual operator converters
Returns:
A model implemented in the selected backend
"""
assert topology is not None, "Cannot convert a Topology object of type None."
assert backend is not None, "Cannot convert a Topology object into backend None."
assert device is not None, "Cannot convert a Topology object into device None."
tvm_backend = None
operator_map = {}
if tvm_installed():
import tvm
tvm_backend = tvm.__name__
for operator in topology.topological_operator_iterator():
converter = get_converter(operator.type)
if convert is None:
raise MissingConverter(
"Unable to find converter for {} type {} with extra config: {}.".format(
operator.type,
type(getattr(operator, "raw_model", None)),
extra_config,
)
)
if backend == onnx.__name__:
# vers = LooseVersion(torch.__version__)
# allowed_min = LooseVersion("1.6.0")
# Pytorch <= 1.6.0 has a bug with exporting GEMM into ONNX.
# For the moment only tree_trav is enabled for pytorch <= 1.6.0
# if vers < allowed_min:
extra_config[constants.TREE_IMPLEMENTATION] = "tree_trav"
operator_map[operator.full_name] = converter(operator, device, extra_config)
# Set the parameters for the model / container
n_threads = (
None
if constants.N_THREADS not in extra_config
else extra_config[constants.N_THREADS]
)
# We set the number of threads for torch here to avoid errors in case we JIT.
# We set intra op concurrency while we force operators to run sequentially.
# We can revise this later, but in general we don't have graphs requireing inter-op parallelism.
if n_threads is not None:
if torch.get_num_interop_threads() != 1:
torch.set_num_interop_threads(1)
torch.set_num_threads(n_threads)
operators = list(topology.topological_operator_iterator())
executor = Executor(
topology.raw_model.input_names,
topology.raw_model.output_names,
operator_map,
operators,
extra_config,
).eval()
# if constants.REMAINDER_SIZE is present in extra_config, we are in the convert_batch mode.
remainder_model = None
remainder_size = (
None
if constants.REMAINDER_SIZE not in extra_config
else extra_config[constants.REMAINDER_SIZE]
)
if backend == onnx.__name__:
onnx_model_name = output_model_name = None
target_opset = 11
# Set optional configuration options for ONNX if any.
if constants.ONNX_OUTPUT_MODEL_NAME in extra_config:
onnx_model_name = extra_config[constants.ONNX_OUTPUT_MODEL_NAME]
output_model_name = onnx_model_name + ".onnx"
if constants.ONNX_TARGET_OPSET in extra_config:
target_opset = extra_config[constants.ONNX_TARGET_OPSET]
if output_model_name is None:
output_model_name = str(uuid4().hex) + ".onnx"
# Put the tracing test input into the right format.
batch_trace_input, _ = _get_trace_input_from_test_input(
test_input, remainder_size, extra_config
)
# Generate the ONNX models
torch.onnx.export(
executor,
batch_trace_input,
output_model_name,
input_names=topology.raw_model.input_names,
output_names=topology.raw_model.output_names,
keep_initializers_as_inputs=False,
opset_version=target_opset,
do_constant_folding=True,
)
hb_model = onnx.load(output_model_name)
os.remove(output_model_name)
# Set the ONNX model name if any.
if onnx_model_name is not None:
hb_model.graph.name = onnx_model_name
# Fix the model to use arbitrary batch dimensions
def fix_dim(dim):
updated = False
if dim.HasField("dim_value"):
dim.Clear()
updated = True
dim.dim_param = "sym"
return updated
def fix_value_info(value):
num_fixed = 0
if value.type.HasField("tensor_type"):
shape = value.type.tensor_type.shape
if shape:
dim = shape.dim[0]
if fix_dim(dim):
num_fixed += 1
return num_fixed
def fix_graph(graph):
num_fixed = 0
for input in graph.input:
num_fixed += fix_value_info(input)
for output in graph.output:
num_fixed += fix_value_info(output)
for node in graph.node:
for attr in node.attribute:
if attr.HasField("g"):
num_fixed += fix_graph(attr.g)
return num_fixed
fix_graph(hb_model.graph)
elif backend == tvm_backend:
# Pick the proper target.
if device == "cuda":
target = tvm.target.cuda()
ctx = tvm.gpu()
elif device == "cpu":
target = "llvm"
ctx = tvm.cpu()
elif "llvm" in device:
target = device
ctx = tvm.cpu()
else:
raise RuntimeError("Device {} not recognized".format(device))
# Get configuration parameters.
# 50 is a good depth for operator fusion. More than that will probably hurt performance.
# https://github.com/microsoft/hummingbird/issues/232#issuecomment-697979508
config = {"relay.FuseOps.max_depth": 50}
if constants.TVM_MAX_FUSE_DEPTH in extra_config:
config["relay.FuseOps.max_depth"] = extra_config[
constants.TVM_MAX_FUSE_DEPTH
]
# First we need to generate the torchscript model.
batch_trace_input, remainder_trace_input = _get_trace_input_from_test_input(
test_input, remainder_size, extra_config
)
tvm_model = _compile_to_tvm(
topology, executor, batch_trace_input, target, ctx, config, extra_config
)
if remainder_trace_input is not None:
remainder_model = _compile_to_tvm(
topology,
executor,
remainder_trace_input,
target,
ctx,
config,
extra_config,
)
# In the container we will be using the context to properly configure the input tensors.
extra_config[constants.TVM_CONTEXT] = ctx
extra_config[constants.TVM_INPUT_NAMES] = topology.raw_model.input_names
hb_model = tvm_model
else:
# Set the device for the model.
if device != "cpu":
if backend == torch.__name__ or torch.jit.__name__:
executor = executor.to(device)
# If the backend is tochscript, jit the model.
if backend == torch.jit.__name__:
trace_input, _ = _get_trace_input_from_test_input(
test_input, remainder_size, extra_config
)
executor = _jit_trace(executor, trace_input, device, extra_config)
torch.jit.optimized_execution(executor)
hb_model = executor
# Return if the container is not needed.
if constants.CONTAINER in extra_config and not extra_config[constants.CONTAINER]:
return hb_model
# We scan the operators backwards until we find an operator with a defined type.
# This is necessary because ONNX models can have arbitrary operators doing casting, reshaping etc.
idx = len(operators) - 1
while (
idx >= 0
and not operator_map[operators[idx].full_name].regression
and not operator_map[operators[idx].full_name].classification
and not operator_map[operators[idx].full_name].anomaly_detection
and not operator_map[operators[idx].full_name].transformer
):
idx -= 1
assert idx >= 0, (
"Cannot detect container type. Please fill an issue at https://github.com/microsoft/hummingbird."
)
# If is a transformer, we need to check whether there is another operator type before.
# E.g., normalization after classification.
tmp_idx = idx
if operator_map[operators[idx].full_name].transformer:
while (
idx >= 0
and not operator_map[operators[idx].full_name].regression
and not operator_map[operators[idx].full_name].classification
and not operator_map[operators[idx].full_name].anomaly_detection
):
idx -= 1
if idx < 0:
idx = tmp_idx
# Get the proper container type.
if operator_map[operators[idx].full_name].regression:
# We are doing a regression task.
if backend == torch.jit.__name__:
container = TorchScriptSklearnContainerRegression
elif backend == onnx.__name__:
container = ONNXSklearnContainerRegression
elif backend == tvm_backend:
container = TVMSklearnContainerRegression
else:
container = PyTorchSklearnContainerRegression
elif operator_map[operators[idx].full_name].anomaly_detection:
# We are doing anomaly detection.
if backend == torch.jit.__name__:
container = TorchScriptSklearnContainerAnomalyDetection
elif backend == onnx.__name__:
container = ONNXSklearnContainerAnomalyDetection
elif backend == tvm_backend:
container = TVMSklearnContainerAnomalyDetection
else:
container = PyTorchSklearnContainerAnomalyDetection
elif operator_map[operators[idx].full_name].transformer:
# We are just transforming the input data.
if backend == torch.jit.__name__:
container = TorchScriptSklearnContainerTransformer
elif backend == onnx.__name__:
container = ONNXSklearnContainerTransformer
elif backend == tvm_backend:
container = TVMSklearnContainerTransformer
else:
container = PyTorchSklearnContainerTransformer
else:
# We are doing a classification task.
if backend == torch.jit.__name__:
container = TorchScriptSklearnContainerClassification
elif backend == onnx.__name__:
container = ONNXSklearnContainerClassification
elif backend == tvm_backend:
container = TVMSklearnContainerClassification
else:
container = PyTorchSklearnContainerClassification
n_threads = (
None
if constants.N_THREADS not in extra_config
else extra_config[constants.N_THREADS]
)
batch_size = (
None
if constants.TEST_INPUT not in extra_config
else _get_batch_size(test_input)
)
hb_container = container(hb_model, n_threads, batch_size, extra_config=extra_config)
if remainder_model:
aux_container = container(
remainder_model, n_threads, remainder_size, extra_config=extra_config
)
return BatchContainer(hb_container, aux_container)
elif remainder_size is not None and remainder_size > 0:
# remainder_size is non zero but remainder_model is not created
# -> torch backend case
aux_container = container(
hb_model, n_threads, remainder_size, extra_config=extra_config
)
return BatchContainer(hb_container, aux_container)
elif remainder_size is not None:
# remainder_size is not None but remainder_model is not created
# -> remainder_size must be zero (no need to create remainder_model)
assert remainder_size == 0, (
"remainder_size is non zero but no remainder_model has been created"
)
# remainder_size is not None only if called by convert_batch(...), so we return BatchContainer
# for this code path, even though there is no remainder_model created.
return BatchContainer(hb_container)
return hb_container
|
def convert(topology, backend, test_input, device, extra_config={}):
"""
This function is used to convert a `onnxconverter_common.topology.Topology` object into a *backend* model.
Args:
topology: The `onnxconverter_common.topology.Topology` object that will be converted into a backend model
backend: Which backend the model should be run on
test_input: Inputs for PyTorch model tracing
device: Which device the translated model will be run on
extra_config: Extra configurations to be used by individual operator converters
Returns:
A model implemented in the selected backend
"""
assert topology is not None, "Cannot convert a Topology object of type None."
assert backend is not None, "Cannot convert a Topology object into backend None."
assert device is not None, "Cannot convert a Topology object into device None."
tvm_backend = None
operator_map = {}
if tvm_installed():
import tvm
tvm_backend = tvm.__name__
for operator in topology.topological_operator_iterator():
try:
converter = get_converter(operator.type)
if backend == onnx.__name__:
# vers = LooseVersion(torch.__version__)
# allowed_min = LooseVersion("1.6.0")
# Pytorch <= 1.6.0 has a bug with exporting GEMM into ONNX.
# For the moment only tree_trav is enabled for pytorch <= 1.6.0
# if vers < allowed_min:
extra_config[constants.TREE_IMPLEMENTATION] = "tree_trav"
operator_map[operator.full_name] = converter(operator, device, extra_config)
except ValueError:
raise MissingConverter(
"Unable to find converter for {} type {} with extra config: {}.".format(
operator.type,
type(getattr(operator, "raw_model", None)),
extra_config,
)
)
except Exception as e:
raise e
# Set the parameters for the model / container
n_threads = (
None
if constants.N_THREADS not in extra_config
else extra_config[constants.N_THREADS]
)
# We set the number of threads for torch here to avoid errors in case we JIT.
# We set intra op concurrency while we force operators to run sequentially.
# We can revise this later, but in general we don't have graphs requireing inter-op parallelism.
if n_threads is not None:
if torch.get_num_interop_threads() != 1:
torch.set_num_interop_threads(1)
torch.set_num_threads(n_threads)
operators = list(topology.topological_operator_iterator())
executor = Executor(
topology.raw_model.input_names,
topology.raw_model.output_names,
operator_map,
operators,
extra_config,
).eval()
# if constants.REMAINDER_SIZE is present in extra_config, we are in the convert_batch mode.
remainder_model = None
remainder_size = (
None
if constants.REMAINDER_SIZE not in extra_config
else extra_config[constants.REMAINDER_SIZE]
)
if backend == onnx.__name__:
onnx_model_name = output_model_name = None
target_opset = 11
# Set optional configuration options for ONNX if any.
if constants.ONNX_OUTPUT_MODEL_NAME in extra_config:
onnx_model_name = extra_config[constants.ONNX_OUTPUT_MODEL_NAME]
output_model_name = onnx_model_name + ".onnx"
if constants.ONNX_TARGET_OPSET in extra_config:
target_opset = extra_config[constants.ONNX_TARGET_OPSET]
if output_model_name is None:
output_model_name = str(uuid4().hex) + ".onnx"
# Put the tracing test input into the right format.
batch_trace_input, _ = _get_trace_input_from_test_input(
test_input, remainder_size, extra_config
)
# Generate the ONNX models
torch.onnx.export(
executor,
batch_trace_input,
output_model_name,
input_names=topology.raw_model.input_names,
output_names=topology.raw_model.output_names,
keep_initializers_as_inputs=False,
opset_version=target_opset,
do_constant_folding=True,
)
hb_model = onnx.load(output_model_name)
os.remove(output_model_name)
# Set the ONNX model name if any.
if onnx_model_name is not None:
hb_model.graph.name = onnx_model_name
# Fix the model to use arbitrary batch dimensions
def fix_dim(dim):
updated = False
if dim.HasField("dim_value"):
dim.Clear()
updated = True
dim.dim_param = "sym"
return updated
def fix_value_info(value):
num_fixed = 0
if value.type.HasField("tensor_type"):
shape = value.type.tensor_type.shape
if shape:
dim = shape.dim[0]
if fix_dim(dim):
num_fixed += 1
return num_fixed
def fix_graph(graph):
num_fixed = 0
for input in graph.input:
num_fixed += fix_value_info(input)
for output in graph.output:
num_fixed += fix_value_info(output)
for node in graph.node:
for attr in node.attribute:
if attr.HasField("g"):
num_fixed += fix_graph(attr.g)
return num_fixed
fix_graph(hb_model.graph)
elif backend == tvm_backend:
# Pick the proper target.
if device == "cuda":
target = tvm.target.cuda()
ctx = tvm.gpu()
elif device == "cpu":
target = "llvm"
ctx = tvm.cpu()
elif "llvm" in device:
target = device
ctx = tvm.cpu()
else:
raise RuntimeError("Device {} not recognized".format(device))
# Get configuration parameters.
# 50 is a good depth for operator fusion. More than that will probably hurt performance.
# https://github.com/microsoft/hummingbird/issues/232#issuecomment-697979508
config = {"relay.FuseOps.max_depth": 50}
if constants.TVM_MAX_FUSE_DEPTH in extra_config:
config["relay.FuseOps.max_depth"] = extra_config[
constants.TVM_MAX_FUSE_DEPTH
]
# First we need to generate the torchscript model.
batch_trace_input, remainder_trace_input = _get_trace_input_from_test_input(
test_input, remainder_size, extra_config
)
tvm_model = _compile_to_tvm(
topology, executor, batch_trace_input, target, ctx, config, extra_config
)
if remainder_trace_input is not None:
remainder_model = _compile_to_tvm(
topology,
executor,
remainder_trace_input,
target,
ctx,
config,
extra_config,
)
# In the container we will be using the context to properly configure the input tensors.
extra_config[constants.TVM_CONTEXT] = ctx
extra_config[constants.TVM_INPUT_NAMES] = topology.raw_model.input_names
hb_model = tvm_model
else:
# Set the device for the model.
if device != "cpu":
if backend == torch.__name__ or torch.jit.__name__:
executor = executor.to(device)
# If the backend is tochscript, jit the model.
if backend == torch.jit.__name__:
trace_input, _ = _get_trace_input_from_test_input(
test_input, remainder_size, extra_config
)
executor = _jit_trace(executor, trace_input, device, extra_config)
torch.jit.optimized_execution(executor)
hb_model = executor
# Return if the container is not needed.
if constants.CONTAINER in extra_config and not extra_config[constants.CONTAINER]:
return hb_model
# We scan the operators backwards until we find an operator with a defined type.
# This is necessary because ONNX models can have arbitrary operators doing casting, reshaping etc.
idx = len(operators) - 1
while (
idx >= 0
and not operator_map[operators[idx].full_name].regression
and not operator_map[operators[idx].full_name].classification
and not operator_map[operators[idx].full_name].anomaly_detection
and not operator_map[operators[idx].full_name].transformer
):
idx -= 1
assert idx >= 0, (
"Cannot detect container type. Please fill an issue at https://github.com/microsoft/hummingbird."
)
# If is a transformer, we need to check whether there is another operator type before.
# E.g., normalization after classification.
tmp_idx = idx
if operator_map[operators[idx].full_name].transformer:
while (
idx >= 0
and not operator_map[operators[idx].full_name].regression
and not operator_map[operators[idx].full_name].classification
and not operator_map[operators[idx].full_name].anomaly_detection
):
idx -= 1
if idx < 0:
idx = tmp_idx
# Get the proper container type.
if operator_map[operators[idx].full_name].regression:
# We are doing a regression task.
if backend == torch.jit.__name__:
container = TorchScriptSklearnContainerRegression
elif backend == onnx.__name__:
container = ONNXSklearnContainerRegression
elif backend == tvm_backend:
container = TVMSklearnContainerRegression
else:
container = PyTorchSklearnContainerRegression
elif operator_map[operators[idx].full_name].anomaly_detection:
# We are doing anomaly detection.
if backend == torch.jit.__name__:
container = TorchScriptSklearnContainerAnomalyDetection
elif backend == onnx.__name__:
container = ONNXSklearnContainerAnomalyDetection
elif backend == tvm_backend:
container = TVMSklearnContainerAnomalyDetection
else:
container = PyTorchSklearnContainerAnomalyDetection
elif operator_map[operators[idx].full_name].transformer:
# We are just transforming the input data.
if backend == torch.jit.__name__:
container = TorchScriptSklearnContainerTransformer
elif backend == onnx.__name__:
container = ONNXSklearnContainerTransformer
elif backend == tvm_backend:
container = TVMSklearnContainerTransformer
else:
container = PyTorchSklearnContainerTransformer
else:
# We are doing a classification task.
if backend == torch.jit.__name__:
container = TorchScriptSklearnContainerClassification
elif backend == onnx.__name__:
container = ONNXSklearnContainerClassification
elif backend == tvm_backend:
container = TVMSklearnContainerClassification
else:
container = PyTorchSklearnContainerClassification
n_threads = (
None
if constants.N_THREADS not in extra_config
else extra_config[constants.N_THREADS]
)
batch_size = (
None
if constants.TEST_INPUT not in extra_config
else _get_batch_size(test_input)
)
hb_container = container(hb_model, n_threads, batch_size, extra_config=extra_config)
if remainder_model:
aux_container = container(
remainder_model, n_threads, remainder_size, extra_config=extra_config
)
return BatchContainer(hb_container, aux_container)
elif remainder_size is not None and remainder_size > 0:
# remainder_size is non zero but remainder_model is not created
# -> torch backend case
aux_container = container(
hb_model, n_threads, remainder_size, extra_config=extra_config
)
return BatchContainer(hb_container, aux_container)
elif remainder_size is not None:
# remainder_size is not None but remainder_model is not created
# -> remainder_size must be zero (no need to create remainder_model)
assert remainder_size == 0, (
"remainder_size is non zero but no remainder_model has been created"
)
# remainder_size is not None only if called by convert_batch(...), so we return BatchContainer
# for this code path, even though there is no remainder_model created.
return BatchContainer(hb_container)
return hb_container
|
https://github.com/microsoft/hummingbird/issues/424
|
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
~/anaconda3/envs/p37/lib/python3.7/site-packages/hummingbird/ml/_topology.py in convert(topology, backend, test_input, device, extra_config)
162
--> 163 operator_map[operator.full_name] = converter(operator, device, extra_config)
164 except ValueError:
~/anaconda3/envs/p37/lib/python3.7/site-packages/hummingbird/ml/operator_converters/sklearn/decision_tree.py in convert_sklearn_random_forest_classifier(operator, device, extra_config)
45 return convert_decision_ensemble_tree_common(
---> 46 tree_infos, get_parameters_for_sklearn_common, get_parameters_for_tree_trav_sklearn, n_features, classes, extra_config
47 )
~/anaconda3/envs/p37/lib/python3.7/site-packages/hummingbird/ml/operator_converters/_tree_commons.py in convert_decision_ensemble_tree_common(tree_infos, get_parameters, get_parameters_for_tree_trav, n_features, classes, extra_config)
407 )
--> 408 for tree_param in tree_parameters
409 ]
~/anaconda3/envs/p37/lib/python3.7/site-packages/hummingbird/ml/operator_converters/_tree_commons.py in <listcomp>(.0)
407 )
--> 408 for tree_param in tree_parameters
409 ]
~/anaconda3/envs/p37/lib/python3.7/site-packages/hummingbird/ml/operator_converters/_tree_commons.py in get_parameters_for_tree_trav_sklearn(lefts, rights, features, thresholds, values, extra_config)
276
--> 277 return get_parameters_for_tree_trav_common(lefts, rights, features, thresholds, values)
278
~/anaconda3/envs/p37/lib/python3.7/site-packages/hummingbird/ml/operator_converters/_tree_commons.py in get_parameters_for_tree_trav_common(lefts, rights, features, thresholds, values, extra_config)
208 values = np.array([np.array([0.0]), values[0], values[0]])
--> 209 values.reshape(3, n_classes)
210
ValueError: cannot reshape array of size 3 into shape (3,2)
During handling of the above exception, another exception occurred:
MissingConverter Traceback (most recent call last)
<ipython-input-2-70faf2a01f51> in <module>
10 clf = RandomForestClassifier(random_state=seed)
11 clf.fit(X, y)
---> 12 model = hummingbird.ml.convert(clf, "torch")
~/anaconda3/envs/p37/lib/python3.7/site-packages/hummingbird/ml/convert.py in convert(model, backend, test_input, device, extra_config)
429 """
430 assert constants.REMAINDER_SIZE not in extra_config
--> 431 return _convert_common(model, backend, test_input, device, extra_config)
432
433
~/anaconda3/envs/p37/lib/python3.7/site-packages/hummingbird/ml/convert.py in _convert_common(model, backend, test_input, device, extra_config)
390 return _convert_sparkml(model, backend, test_input, device, extra_config)
391
--> 392 return _convert_sklearn(model, backend, test_input, device, extra_config)
393
394
~/anaconda3/envs/p37/lib/python3.7/site-packages/hummingbird/ml/convert.py in _convert_sklearn(model, backend, test_input, device, extra_config)
98
99 # Convert the Topology object into a PyTorch model.
--> 100 hb_model = topology_converter(topology, backend, test_input, device, extra_config=extra_config)
101 return hb_model
102
~/anaconda3/envs/p37/lib/python3.7/site-packages/hummingbird/ml/_topology.py in convert(topology, backend, test_input, device, extra_config)
165 raise MissingConverter(
166 "Unable to find converter for {} type {} with extra config: {}.".format(
--> 167 operator.type, type(getattr(operator, "raw_model", None)), extra_config
168 )
169 )
MissingConverter: Unable to find converter for SklearnRandomForestClassifier type <class 'NoneType'> with extra config: {'container': True, 'n_threads': 10, 'n_trees': 100}.
It usually means the pipeline being converted contains a
transformer or a predictor with no corresponding converter implemented.
Please fill an issue at https://github.com/microsoft/hummingbird.
|
ValueError
|
def get_tree_implementation_by_config_or_depth(extra_config, max_depth, low=3, high=10):
"""
Utility function used to pick the tree implementation based on input parameters and heurstics.
The current heuristic is such that GEMM <= low < PerfTreeTrav <= high < TreeTrav
Args:
max_depth: The maximum tree-depth found in the tree model.
low: The maximum depth below which GEMM strategy is used
high: The maximum depth for which PerfTreeTrav strategy is used
Returns: A tree implementation
"""
if constants.TREE_IMPLEMENTATION not in extra_config:
if max_depth is not None and max_depth <= low:
return TreeImpl.gemm
elif max_depth is not None and max_depth <= high:
return TreeImpl.perf_tree_trav
else:
return TreeImpl.tree_trav
if extra_config[constants.TREE_IMPLEMENTATION] == TreeImpl.gemm.name:
return TreeImpl.gemm
elif extra_config[constants.TREE_IMPLEMENTATION] == TreeImpl.tree_trav.name:
return TreeImpl.tree_trav
elif extra_config[constants.TREE_IMPLEMENTATION] == TreeImpl.perf_tree_trav.name:
return TreeImpl.perf_tree_trav
else:
raise MissingConverter("Tree implementation {} not found".format(extra_config))
|
def get_tree_implementation_by_config_or_depth(extra_config, max_depth, low=3, high=10):
"""
Utility function used to pick the tree implementation based on input parameters and heurstics.
The current heuristic is such that GEMM <= low < PerfTreeTrav <= high < TreeTrav
Args:
max_depth: The maximum tree-depth found in the tree model.
low: The maximum depth below which GEMM strategy is used
high: The maximum depth for which PerfTreeTrav strategy is used
Returns: A tree implementation
"""
if constants.TREE_IMPLEMENTATION not in extra_config:
if max_depth is not None and max_depth <= low:
return TreeImpl.gemm
elif max_depth is not None and max_depth <= high:
return TreeImpl.perf_tree_trav
else:
return TreeImpl.tree_trav
if extra_config[constants.TREE_IMPLEMENTATION] == TreeImpl.gemm.name:
return TreeImpl.gemm
elif extra_config[constants.TREE_IMPLEMENTATION] == TreeImpl.tree_trav.name:
return TreeImpl.tree_trav
elif extra_config[constants.TREE_IMPLEMENTATION] == TreeImpl.perf_tree_trav.name:
return TreeImpl.perf_tree_trav
else:
raise ValueError("Tree implementation {} not found".format(extra_config))
|
https://github.com/microsoft/hummingbird/issues/424
|
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
~/anaconda3/envs/p37/lib/python3.7/site-packages/hummingbird/ml/_topology.py in convert(topology, backend, test_input, device, extra_config)
162
--> 163 operator_map[operator.full_name] = converter(operator, device, extra_config)
164 except ValueError:
~/anaconda3/envs/p37/lib/python3.7/site-packages/hummingbird/ml/operator_converters/sklearn/decision_tree.py in convert_sklearn_random_forest_classifier(operator, device, extra_config)
45 return convert_decision_ensemble_tree_common(
---> 46 tree_infos, get_parameters_for_sklearn_common, get_parameters_for_tree_trav_sklearn, n_features, classes, extra_config
47 )
~/anaconda3/envs/p37/lib/python3.7/site-packages/hummingbird/ml/operator_converters/_tree_commons.py in convert_decision_ensemble_tree_common(tree_infos, get_parameters, get_parameters_for_tree_trav, n_features, classes, extra_config)
407 )
--> 408 for tree_param in tree_parameters
409 ]
~/anaconda3/envs/p37/lib/python3.7/site-packages/hummingbird/ml/operator_converters/_tree_commons.py in <listcomp>(.0)
407 )
--> 408 for tree_param in tree_parameters
409 ]
~/anaconda3/envs/p37/lib/python3.7/site-packages/hummingbird/ml/operator_converters/_tree_commons.py in get_parameters_for_tree_trav_sklearn(lefts, rights, features, thresholds, values, extra_config)
276
--> 277 return get_parameters_for_tree_trav_common(lefts, rights, features, thresholds, values)
278
~/anaconda3/envs/p37/lib/python3.7/site-packages/hummingbird/ml/operator_converters/_tree_commons.py in get_parameters_for_tree_trav_common(lefts, rights, features, thresholds, values, extra_config)
208 values = np.array([np.array([0.0]), values[0], values[0]])
--> 209 values.reshape(3, n_classes)
210
ValueError: cannot reshape array of size 3 into shape (3,2)
During handling of the above exception, another exception occurred:
MissingConverter Traceback (most recent call last)
<ipython-input-2-70faf2a01f51> in <module>
10 clf = RandomForestClassifier(random_state=seed)
11 clf.fit(X, y)
---> 12 model = hummingbird.ml.convert(clf, "torch")
~/anaconda3/envs/p37/lib/python3.7/site-packages/hummingbird/ml/convert.py in convert(model, backend, test_input, device, extra_config)
429 """
430 assert constants.REMAINDER_SIZE not in extra_config
--> 431 return _convert_common(model, backend, test_input, device, extra_config)
432
433
~/anaconda3/envs/p37/lib/python3.7/site-packages/hummingbird/ml/convert.py in _convert_common(model, backend, test_input, device, extra_config)
390 return _convert_sparkml(model, backend, test_input, device, extra_config)
391
--> 392 return _convert_sklearn(model, backend, test_input, device, extra_config)
393
394
~/anaconda3/envs/p37/lib/python3.7/site-packages/hummingbird/ml/convert.py in _convert_sklearn(model, backend, test_input, device, extra_config)
98
99 # Convert the Topology object into a PyTorch model.
--> 100 hb_model = topology_converter(topology, backend, test_input, device, extra_config=extra_config)
101 return hb_model
102
~/anaconda3/envs/p37/lib/python3.7/site-packages/hummingbird/ml/_topology.py in convert(topology, backend, test_input, device, extra_config)
165 raise MissingConverter(
166 "Unable to find converter for {} type {} with extra config: {}.".format(
--> 167 operator.type, type(getattr(operator, "raw_model", None)), extra_config
168 )
169 )
MissingConverter: Unable to find converter for SklearnRandomForestClassifier type <class 'NoneType'> with extra config: {'container': True, 'n_threads': 10, 'n_trees': 100}.
It usually means the pipeline being converted contains a
transformer or a predictor with no corresponding converter implemented.
Please fill an issue at https://github.com/microsoft/hummingbird.
|
ValueError
|
def get_parameters_for_tree_trav_common(
lefts, rights, features, thresholds, values, extra_config={}
):
"""
Common functions used by all tree algorithms to generate the parameters according to the tree_trav strategies.
Args:
left: The left nodes
right: The right nodes
features: The features used in the decision nodes
thresholds: The thresholds used in the decision nodes
values: The values stored in the leaf nodes
Returns:
An array containing the extracted parameters
"""
if len(lefts) == 1:
# Model creating tree with just a single leaf node. We transform it
# to a model with one internal node.
lefts = [1, -1, -1]
rights = [2, -1, -1]
features = [0, 0, 0]
thresholds = [0, 0, 0]
n_classes = values.shape[1] if type(values) is np.ndarray else 1
values = np.array([np.zeros(n_classes), values[0], values[0]])
values.reshape(3, n_classes)
ids = [i for i in range(len(lefts))]
nodes = list(zip(ids, lefts, rights, features, thresholds, values))
# Refactor the tree parameters in the proper format.
nodes_map = {0: Node(0)}
current_node = 0
for i, node in enumerate(nodes):
id, left, right, feature, threshold, value = node
if left != -1:
l_node = Node(left)
nodes_map[left] = l_node
else:
lefts[i] = id
l_node = -1
feature = -1
if right != -1:
r_node = Node(right)
nodes_map[right] = r_node
else:
rights[i] = id
r_node = -1
feature = -1
nodes_map[current_node].left = l_node
nodes_map[current_node].right = r_node
nodes_map[current_node].feature = feature
nodes_map[current_node].threshold = threshold
nodes_map[current_node].value = value
current_node += 1
lefts = np.array(lefts)
rights = np.array(rights)
features = np.array(features)
thresholds = np.array(thresholds)
values = np.array(values)
return [nodes_map, ids, lefts, rights, features, thresholds, values]
|
def get_parameters_for_tree_trav_common(
lefts, rights, features, thresholds, values, extra_config={}
):
"""
Common functions used by all tree algorithms to generate the parameters according to the tree_trav strategies.
Args:
left: The left nodes
right: The right nodes
features: The features used in the decision nodes
thresholds: The thresholds used in the decision nodes
values: The values stored in the leaf nodes
Returns:
An array containing the extracted parameters
"""
if len(lefts) == 1:
# Model creating tree with just a single leaf node. We transform it
# to a model with one internal node.
lefts = [1, -1, -1]
rights = [2, -1, -1]
features = [0, 0, 0]
thresholds = [0, 0, 0]
n_classes = values.shape[1] if type(values) is np.ndarray else 1
values = np.array([np.array([0.0]), values[0], values[0]])
values.reshape(3, n_classes)
ids = [i for i in range(len(lefts))]
nodes = list(zip(ids, lefts, rights, features, thresholds, values))
# Refactor the tree parameters in the proper format.
nodes_map = {0: Node(0)}
current_node = 0
for i, node in enumerate(nodes):
id, left, right, feature, threshold, value = node
if left != -1:
l_node = Node(left)
nodes_map[left] = l_node
else:
lefts[i] = id
l_node = -1
feature = -1
if right != -1:
r_node = Node(right)
nodes_map[right] = r_node
else:
rights[i] = id
r_node = -1
feature = -1
nodes_map[current_node].left = l_node
nodes_map[current_node].right = r_node
nodes_map[current_node].feature = feature
nodes_map[current_node].threshold = threshold
nodes_map[current_node].value = value
current_node += 1
lefts = np.array(lefts)
rights = np.array(rights)
features = np.array(features)
thresholds = np.array(thresholds)
values = np.array(values)
return [nodes_map, ids, lefts, rights, features, thresholds, values]
|
https://github.com/microsoft/hummingbird/issues/424
|
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
~/anaconda3/envs/p37/lib/python3.7/site-packages/hummingbird/ml/_topology.py in convert(topology, backend, test_input, device, extra_config)
162
--> 163 operator_map[operator.full_name] = converter(operator, device, extra_config)
164 except ValueError:
~/anaconda3/envs/p37/lib/python3.7/site-packages/hummingbird/ml/operator_converters/sklearn/decision_tree.py in convert_sklearn_random_forest_classifier(operator, device, extra_config)
45 return convert_decision_ensemble_tree_common(
---> 46 tree_infos, get_parameters_for_sklearn_common, get_parameters_for_tree_trav_sklearn, n_features, classes, extra_config
47 )
~/anaconda3/envs/p37/lib/python3.7/site-packages/hummingbird/ml/operator_converters/_tree_commons.py in convert_decision_ensemble_tree_common(tree_infos, get_parameters, get_parameters_for_tree_trav, n_features, classes, extra_config)
407 )
--> 408 for tree_param in tree_parameters
409 ]
~/anaconda3/envs/p37/lib/python3.7/site-packages/hummingbird/ml/operator_converters/_tree_commons.py in <listcomp>(.0)
407 )
--> 408 for tree_param in tree_parameters
409 ]
~/anaconda3/envs/p37/lib/python3.7/site-packages/hummingbird/ml/operator_converters/_tree_commons.py in get_parameters_for_tree_trav_sklearn(lefts, rights, features, thresholds, values, extra_config)
276
--> 277 return get_parameters_for_tree_trav_common(lefts, rights, features, thresholds, values)
278
~/anaconda3/envs/p37/lib/python3.7/site-packages/hummingbird/ml/operator_converters/_tree_commons.py in get_parameters_for_tree_trav_common(lefts, rights, features, thresholds, values, extra_config)
208 values = np.array([np.array([0.0]), values[0], values[0]])
--> 209 values.reshape(3, n_classes)
210
ValueError: cannot reshape array of size 3 into shape (3,2)
During handling of the above exception, another exception occurred:
MissingConverter Traceback (most recent call last)
<ipython-input-2-70faf2a01f51> in <module>
10 clf = RandomForestClassifier(random_state=seed)
11 clf.fit(X, y)
---> 12 model = hummingbird.ml.convert(clf, "torch")
~/anaconda3/envs/p37/lib/python3.7/site-packages/hummingbird/ml/convert.py in convert(model, backend, test_input, device, extra_config)
429 """
430 assert constants.REMAINDER_SIZE not in extra_config
--> 431 return _convert_common(model, backend, test_input, device, extra_config)
432
433
~/anaconda3/envs/p37/lib/python3.7/site-packages/hummingbird/ml/convert.py in _convert_common(model, backend, test_input, device, extra_config)
390 return _convert_sparkml(model, backend, test_input, device, extra_config)
391
--> 392 return _convert_sklearn(model, backend, test_input, device, extra_config)
393
394
~/anaconda3/envs/p37/lib/python3.7/site-packages/hummingbird/ml/convert.py in _convert_sklearn(model, backend, test_input, device, extra_config)
98
99 # Convert the Topology object into a PyTorch model.
--> 100 hb_model = topology_converter(topology, backend, test_input, device, extra_config=extra_config)
101 return hb_model
102
~/anaconda3/envs/p37/lib/python3.7/site-packages/hummingbird/ml/_topology.py in convert(topology, backend, test_input, device, extra_config)
165 raise MissingConverter(
166 "Unable to find converter for {} type {} with extra config: {}.".format(
--> 167 operator.type, type(getattr(operator, "raw_model", None)), extra_config
168 )
169 )
MissingConverter: Unable to find converter for SklearnRandomForestClassifier type <class 'NoneType'> with extra config: {'container': True, 'n_threads': 10, 'n_trees': 100}.
It usually means the pipeline being converted contains a
transformer or a predictor with no corresponding converter implemented.
Please fill an issue at https://github.com/microsoft/hummingbird.
|
ValueError
|
def _get_tree_infos_from_onnx_ml_operator(model):
"""
Function used to extract the parameters from a ONNXML TreeEnsemble model.
"""
tree_infos = []
left = right = features = values = threshold = None
tree_ids = target_node_ids = target_tree_ids = modes = None
classes = post_transform = None
# The list of attributes is a merge between the classifier and regression operators.
# The operators descriptions can be found here
# https://github.com/onnx/onnx/blob/master/docs/Operators-ml.md#aionnxmltreeensembleclassifier and
# here https://github.com/onnx/onnx/blob/master/docs/Operators-ml.md#aionnxmltreeensembleregressor
for attr in model.origin.attribute:
if attr.name == "nodes_falsenodeids":
right = attr.ints
elif attr.name == "nodes_truenodeids":
left = attr.ints
elif attr.name == "nodes_featureids":
features = attr.ints
elif attr.name == "nodes_values":
threshold = attr.floats
elif attr.name == "class_weights" or attr.name == "target_weights":
values = attr.floats
elif attr.name == "class_nodeids" or attr.name == "target_nodeids":
target_node_ids = attr.ints
elif attr.name == "class_treeids" or attr.name == "target_treeids":
target_tree_ids = attr.ints
elif attr.name == "nodes_treeids":
tree_ids = attr.ints
elif attr.name == "classlabels_int64s":
classes = list(attr.ints)
elif attr.name == "classlabels_strings ":
if len(attr.strings) > 0:
raise AssertionError("String class labels not supported yet.")
elif attr.name == "post_transform":
post_transform = attr.s.decode("utf-8")
if post_transform not in ["NONE", "LOGISTIC", "SOFTMAX"]:
raise AssertionError(
"Post transform {} not supported".format(post_transform)
)
elif attr.name == "nodes_modes":
modes = attr.strings
for mode in modes:
if (not mode == b"BRANCH_LEQ") and (not mode == b"LEAF"):
raise AssertionError("Modality {} not supported".format(mode))
is_decision_tree = post_transform == "NONE"
# Order values based on target node and tree ids.
new_values = []
n_classes = 1 if classes is None or not is_decision_tree else len(classes)
j = 0
for i in range(max(target_tree_ids) + 1):
k = j
while k < len(target_tree_ids) and target_tree_ids[k] == i:
k += 1
target_ids = target_node_ids[j:k]
target_ids_zipped = dict(zip(target_ids, range(len(target_ids))))
for key in sorted(target_ids_zipped):
if is_decision_tree and n_classes > 2: # For multiclass we have 2d arrays.
tmp_values = []
for c in range(n_classes):
tmp_values.append(
values[j + c + (target_ids_zipped[key] - (n_classes - 1))]
)
new_values.append(tmp_values)
else:
new_values.append(values[j + target_ids_zipped[key]])
j = k
values = new_values
i = 0
prev_id = 0
count = 0
l_count = 0
for n, id in enumerate(tree_ids):
if id == i:
if modes[n] == b"LEAF":
left[n] = -1
right[n] = -1
threshold[n] = -1
else:
t_left = left[prev_id:count]
t_right = right[prev_id:count]
t_features = features[prev_id:count]
t_threshold = threshold[prev_id:count]
t_values = (
np.zeros((len(t_left), n_classes))
if is_decision_tree
else np.zeros(len(t_left))
)
if len(t_left) == 1:
# Model creating trees with just a single leaf node. We transform it
# to a model with one internal node.
t_left = [1, -1, -1]
t_right = [2, -1, -1]
t_features = [0, 0, 0]
t_threshold = [0, -1, -1]
if l_count < len(values):
t_values[0] = values[l_count]
l_count += 1
else:
for j in range(len(t_left)):
if t_threshold[j] == -1 and l_count < len(values):
t_values[j] = values[l_count]
l_count += 1
if t_values.shape[0] == 1:
# Model creating trees with just a single leaf node. We fix the values here.
n_classes = t_values.shape[1]
t_values = np.array([np.array([0.0]), t_values[0], t_values[0]])
t_values.reshape(3, n_classes)
if (
is_decision_tree and n_classes == 2
): # We need to fix the probabilities in this case.
for k in range(len(t_left)):
prob = (1 / (max(tree_ids) + 1)) - t_values[k][1]
t_values[k][0] = prob
tree_infos.append(
TreeParameters(
t_left,
t_right,
t_features,
t_threshold,
np.array(t_values).reshape(-1, n_classes),
)
)
prev_id = count
i += 1
count += 1
t_left = left[prev_id:count]
t_right = right[prev_id:count]
t_features = features[prev_id:count]
t_threshold = threshold[prev_id:count]
t_values = (
np.zeros((len(t_left), n_classes))
if is_decision_tree
else np.zeros(len(t_left))
)
if len(t_left) == 1:
# Model creating trees with just a single leaf node. We transform it
# to a model with one internal node.
t_left = [1, -1, -1]
t_right = [2, -1, -1]
t_features = [0, 0, 0]
t_threshold = [0, -1, -1]
if l_count < len(values):
t_values[0] = values[l_count]
l_count += 1
else:
for j in range(len(t_left)):
if t_threshold[j] == -1 and l_count < len(values):
t_values[j] = values[l_count]
l_count += 1
if t_values.shape[0] == 1:
# Model creating trees with just a single leaf node. We fix the values here.
n_classes = t_values.shape[1]
t_values = np.array([np.array([0.0]), t_values[0], t_values[0]])
t_values.reshape(3, n_classes)
if (
is_decision_tree and n_classes == 2
): # We need to fix the probabilities in this case.
for k in range(len(t_left)):
prob = (1 / (max(tree_ids) + 1)) - t_values[k][1]
t_values[k][0] = prob
tree_infos.append(
TreeParameters(
t_left,
t_right,
t_features,
t_threshold,
np.array(t_values).reshape(-1, n_classes),
)
)
return tree_infos, classes, post_transform
|
def _get_tree_infos_from_onnx_ml_operator(model):
"""
Function used to extract the parameters from a ONNXML TreeEnsemble model.
"""
tree_infos = []
left = right = features = values = threshold = None
tree_ids = target_node_ids = target_tree_ids = modes = None
classes = post_transform = None
# The list of attributes is a merge between the classifier and regression operators.
# The operators descriptions can be found here
# https://github.com/onnx/onnx/blob/master/docs/Operators-ml.md#aionnxmltreeensembleclassifier and
# here https://github.com/onnx/onnx/blob/master/docs/Operators-ml.md#aionnxmltreeensembleregressor
for attr in model.origin.attribute:
if attr.name == "nodes_falsenodeids":
right = attr.ints
elif attr.name == "nodes_truenodeids":
left = attr.ints
elif attr.name == "nodes_featureids":
features = attr.ints
elif attr.name == "nodes_values":
threshold = attr.floats
elif attr.name == "class_weights" or attr.name == "target_weights":
values = attr.floats
elif attr.name == "class_nodeids" or attr.name == "target_nodeids":
target_node_ids = attr.ints
elif attr.name == "class_treeids" or attr.name == "target_treeids":
target_tree_ids = attr.ints
elif attr.name == "nodes_treeids":
tree_ids = attr.ints
elif attr.name == "classlabels_int64s":
classes = attr.ints
elif attr.name == "classlabels_strings ":
if len(attr.strings) > 0:
raise AssertionError("String class labels not supported yet.")
elif attr.name == "post_transform":
post_transform = attr.s.decode("utf-8")
if post_transform not in ["NONE", "LOGISTIC", "SOFTMAX"]:
raise AssertionError(
"Post transform {} not supported".format(post_transform)
)
elif attr.name == "nodes_modes":
modes = attr.strings
for mode in modes:
if (not mode == b"BRANCH_LEQ") and (not mode == b"LEAF"):
raise AssertionError("Modality {} not supported".format(mode))
is_decision_tree = post_transform == "NONE"
# Order values based on target node and tree ids.
new_values = []
n_classes = 1 if classes is None or not is_decision_tree else len(classes)
j = 0
for i in range(max(target_tree_ids) + 1):
k = j
while k < len(target_tree_ids) and target_tree_ids[k] == i:
k += 1
target_ids = target_node_ids[j:k]
target_ids_zipped = dict(zip(target_ids, range(len(target_ids))))
for key in sorted(target_ids_zipped):
if is_decision_tree and n_classes > 2: # For multiclass we have 2d arrays.
tmp_values = []
for c in range(n_classes):
tmp_values.append(
values[j + c + (target_ids_zipped[key] - (n_classes - 1))]
)
new_values.append(tmp_values)
else:
new_values.append(values[j + target_ids_zipped[key]])
j = k
values = new_values
i = 0
prev_id = 0
count = 0
l_count = 0
for n, id in enumerate(tree_ids):
if id == i:
if modes[n] == b"LEAF":
left[n] = -1
right[n] = -1
threshold[n] = -1
else:
t_left = left[prev_id:count]
t_right = right[prev_id:count]
t_features = features[prev_id:count]
t_threshold = threshold[prev_id:count]
t_values = (
np.zeros((len(t_left), n_classes))
if is_decision_tree
else np.zeros(len(t_left))
)
if len(t_left) == 1:
# Model creating trees with just a single leaf node. We transform it
# to a model with one internal node.
t_left = [1, -1, -1]
t_right = [2, -1, -1]
t_features = [0, 0, 0]
t_threshold = [0, -1, -1]
if l_count < len(values):
t_values[0] = values[l_count]
l_count += 1
else:
for j in range(len(t_left)):
if t_threshold[j] == -1 and l_count < len(values):
t_values[j] = values[l_count]
l_count += 1
if t_values.shape[0] == 1:
# Model creating trees with just a single leaf node. We fix the values here.
n_classes = t_values.shape[1]
t_values = np.array([np.array([0.0]), t_values[0], t_values[0]])
t_values.reshape(3, n_classes)
if (
is_decision_tree and n_classes == 2
): # We need to fix the probabilities in this case.
for k in range(len(t_left)):
prob = (1 / (max(tree_ids) + 1)) - t_values[k][1]
t_values[k][0] = prob
tree_infos.append(
TreeParameters(
t_left,
t_right,
t_features,
t_threshold,
np.array(t_values).reshape(-1, n_classes),
)
)
prev_id = count
i += 1
count += 1
t_left = left[prev_id:count]
t_right = right[prev_id:count]
t_features = features[prev_id:count]
t_threshold = threshold[prev_id:count]
t_values = (
np.zeros((len(t_left), n_classes))
if is_decision_tree
else np.zeros(len(t_left))
)
if len(t_left) == 1:
# Model creating trees with just a single leaf node. We transform it
# to a model with one internal node.
t_left = [1, -1, -1]
t_right = [2, -1, -1]
t_features = [0, 0, 0]
t_threshold = [0, -1, -1]
if l_count < len(values):
t_values[0] = values[l_count]
l_count += 1
else:
for j in range(len(t_left)):
if t_threshold[j] == -1 and l_count < len(values):
t_values[j] = values[l_count]
l_count += 1
if t_values.shape[0] == 1:
# Model creating trees with just a single leaf node. We fix the values here.
n_classes = t_values.shape[1]
t_values = np.array([np.array([0.0]), t_values[0], t_values[0]])
t_values.reshape(3, n_classes)
if (
is_decision_tree and n_classes == 2
): # We need to fix the probabilities in this case.
for k in range(len(t_left)):
prob = (1 / (max(tree_ids) + 1)) - t_values[k][1]
t_values[k][0] = prob
tree_infos.append(
TreeParameters(
t_left,
t_right,
t_features,
t_threshold,
np.array(t_values).reshape(-1, n_classes),
)
)
return tree_infos, classes, post_transform
|
https://github.com/microsoft/hummingbird/issues/420
|
from hummingbird.ml import convert
import onnx
o = onnx.load('hospital_DecisionTree_BINARY_MaxDepth_20_batch_optimized.onnx', 'r')
type(o)
<class 'onnx.onnx_ONNX_REL_1_6_ml_pb2.ModelProto'>
t=convert(o,'torch')
/usr/local/lib/python3.6/dist-packages/torch/nn/modules/container.py:434: UserWarning: Setting attributes on ParameterList is not supported.
warnings.warn("Setting attributes on ParameterList is not supported.")
type(t)
<class 'hummingbird.ml.containers.sklearn.pytorch_containers.PyTorchSklearnContainerClassification'>
t.save('hospital_DecisionTree_BINARY_MaxDepth_20_batch_optimized.zip')
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/home/sshuser/hummingbird/hummingbird/ml/containers/sklearn/pytorch_containers.py", line 81, in save
dill.dump(self, file)
File "/usr/local/lib/python3.6/dist-packages/dill/_dill.py", line 267, in dump
Pickler(file, protocol, **_kwds).dump(obj)
File "/usr/local/lib/python3.6/dist-packages/dill/_dill.py", line 454, in dump
StockPickler.dump(self, obj)
File "/usr/lib/python3.6/pickle.py", line 409, in dump
self.save(obj)
File "/usr/lib/python3.6/pickle.py", line 521, in save
self.save_reduce(obj=obj, *rv)
File "/usr/lib/python3.6/pickle.py", line 634, in save_reduce
save(state)
File "/usr/lib/python3.6/pickle.py", line 476, in save
f(self, obj) # Call unbound method with explicit self
File "/usr/local/lib/python3.6/dist-packages/dill/_dill.py", line 941, in save_module_dict
StockPickler.save_dict(pickler, obj)
File "/usr/lib/python3.6/pickle.py", line 821, in save_dict
self._batch_setitems(obj.items())
File "/usr/lib/python3.6/pickle.py", line 847, in _batch_setitems
save(v)
File "/usr/lib/python3.6/pickle.py", line 521, in save
self.save_reduce(obj=obj, *rv)
File "/usr/lib/python3.6/pickle.py", line 634, in save_reduce
save(state)
File "/usr/lib/python3.6/pickle.py", line 476, in save
f(self, obj) # Call unbound method with explicit self
File "/usr/local/lib/python3.6/dist-packages/dill/_dill.py", line 941, in save_module_dict
StockPickler.save_dict(pickler, obj)
File "/usr/lib/python3.6/pickle.py", line 821, in save_dict
self._batch_setitems(obj.items())
File "/usr/lib/python3.6/pickle.py", line 847, in _batch_setitems
save(v)
File "/usr/lib/python3.6/pickle.py", line 521, in save
self.save_reduce(obj=obj, *rv)
File "/usr/lib/python3.6/pickle.py", line 631, in save_reduce
self._batch_setitems(dictitems)
File "/usr/lib/python3.6/pickle.py", line 852, in _batch_setitems
save(v)
File "/usr/lib/python3.6/pickle.py", line 521, in save
self.save_reduce(obj=obj, *rv)
File "/usr/lib/python3.6/pickle.py", line 634, in save_reduce
save(state)
File "/usr/lib/python3.6/pickle.py", line 476, in save
f(self, obj) # Call unbound method with explicit self
File "/usr/local/lib/python3.6/dist-packages/dill/_dill.py", line 941, in save_module_dict
StockPickler.save_dict(pickler, obj)
File "/usr/lib/python3.6/pickle.py", line 821, in save_dict
self._batch_setitems(obj.items())
File "/usr/lib/python3.6/pickle.py", line 847, in _batch_setitems
save(v)
File "/usr/lib/python3.6/pickle.py", line 521, in save
self.save_reduce(obj=obj, *rv)
File "/usr/lib/python3.6/pickle.py", line 631, in save_reduce
self._batch_setitems(dictitems)
File "/usr/lib/python3.6/pickle.py", line 847, in _batch_setitems
save(v)
File "/usr/lib/python3.6/pickle.py", line 521, in save
self.save_reduce(obj=obj, *rv)
File "/usr/lib/python3.6/pickle.py", line 634, in save_reduce
save(state)
File "/usr/lib/python3.6/pickle.py", line 476, in save
f(self, obj) # Call unbound method with explicit self
File "/usr/local/lib/python3.6/dist-packages/dill/_dill.py", line 941, in save_module_dict
StockPickler.save_dict(pickler, obj)
File "/usr/lib/python3.6/pickle.py", line 821, in save_dict
self._batch_setitems(obj.items())
File "/usr/lib/python3.6/pickle.py", line 847, in _batch_setitems
save(v)
File "/usr/lib/python3.6/pickle.py", line 496, in save
rv = reduce(self.proto)
_pickle.PickleError: can't pickle repeated message fields, convert to list first
|
_pickle.PickleError
|
def pool(self):
"""Create thread pool on first request
avoids instantiating unused threadpool for blocking clients.
"""
if self._pool is None:
atexit.register(self.close)
self._pool = ThreadPool(self.pool_threads)
return self._pool
|
def pool(self):
"""Create thread pool on first request
avoids instantiating unused threadpool for blocking clients.
"""
if self._pool is None:
self._pool = ThreadPool(self.pool_threads)
return self._pool
|
https://github.com/kubernetes-client/python/issues/1037
|
done
^CException ignored in: <bound method ApiClient.__del__ of <kubernetes.client.api_client.ApiClient object at 0x7f8f9ba67c50>>
Traceback (most recent call last):
File "/home/fabian/Envs/test/lib/python3.6/site-packages/kubernetes/client/api_client.py", line 81, in __del__
self._pool.join()
File "/usr/lib64/python3.6/multiprocessing/pool.py", line 546, in join
self._worker_handler.join()
File "/usr/lib64/python3.6/threading.py", line 1056, in join
self._wait_for_tstate_lock()
File "/usr/lib64/python3.6/threading.py", line 1072, in _wait_for_tstate_lock
elif lock.acquire(block, timeout):
KeyboardInterrupt
^CError in atexit._run_exitfuncs:
Traceback (most recent call last):
File "/usr/lib64/python3.6/multiprocessing/util.py", line 262, in _run_finalizers
finalizer()
File "/usr/lib64/python3.6/multiprocessing/util.py", line 186, in __call__
res = self._callback(*self._args, **self._kwargs)
File "/usr/lib64/python3.6/multiprocessing/pool.py", line 582, in _terminate_pool
worker_handler.join()
File "/usr/lib64/python3.6/threading.py", line 1056, in join
self._wait_for_tstate_lock()
File "/usr/lib64/python3.6/threading.py", line 1072, in _wait_for_tstate_lock
elif lock.acquire(block, timeout):
KeyboardInterrupt
|
CError
|
def spotify_dl():
parser = argparse.ArgumentParser(prog="spotify_dl")
parser.add_argument(
"-d",
"--download",
action="store_true",
help="Download using youtube-dl",
default=True,
)
parser.add_argument(
"-p",
"--playlist",
action="store",
help="Download from playlist id instead of saved tracks",
)
parser.add_argument(
"-V",
"--verbose",
action="store_true",
help="Show more information on whats happening.",
)
parser.add_argument(
"-v",
"--version",
action="store_true",
help="Shows current version of the program",
)
parser.add_argument(
"-o", "--output", type=str, action="store", help="Specify download directory."
)
parser.add_argument(
"-u",
"--user_id",
action="store",
help="Specify the playlist owner's userid when it"
" is different than your spotify userid",
)
parser.add_argument(
"-i",
"--uri",
type=str,
action="store",
nargs="*",
help="Given a URI, download it.",
)
parser.add_argument(
"-f",
"--format_str",
type=str,
action="store",
help="Specify youtube-dl format string.",
default="bestaudio/best",
)
parser.add_argument(
"-m",
"--skip_mp3",
action="store_true",
help="Don't convert downloaded songs to mp3",
)
parser.add_argument("-l", "--url", action="store", help="Spotify Playlist link URL")
args = parser.parse_args()
playlist_url_pattern = re.compile(r"^https://open.spotify.com/(.+)$")
if args.version:
print("spotify_dl v{}".format(VERSION))
exit(0)
if os.path.isfile(os.path.expanduser("~/.spotify_dl_settings")):
with open(os.path.expanduser("~/.spotify_dl_settings")) as file:
config = json.loads(file.read())
for key, value in config.items():
if value and (value.lower() == "true" or value.lower() == "t"):
setattr(args, key, True)
else:
setattr(args, key, value)
if args.verbose:
log.setLevel(DEBUG)
log.info("Starting spotify_dl")
log.debug("Setting debug mode on spotify_dl")
if not check_for_tokens():
exit(1)
token = authenticate()
sp = spotipy.Spotify(auth=token)
log.debug("Arguments: {}".format(args))
url_match = playlist_url_pattern.match(args.url)
if args.url and url_match and len(url_match.groups()) > 0:
uri = "spotify:" + url_match.groups()[0].replace("/", ":")
args.uri = [uri]
else:
raise Exception("Invalid playlist URL ")
if args.uri:
current_user_id, playlist_id = extract_user_and_playlist_from_uri(args.uri[0])
else:
if args.user_id is None:
current_user_id = sp.current_user()["id"]
else:
current_user_id = args.user_id
if args.output:
if args.uri:
uri = args.uri[0]
playlist = playlist_name(uri, sp)
else:
playlist = get_playlist_name_from_id(args.playlist, current_user_id, sp)
log.info("Saving songs to: {}".format(playlist))
download_directory = os.path.join(args.output, playlist)
if not os.path.exists(download_directory):
os.makedirs(download_directory)
else:
download_directory = ""
if args.uri:
songs = fetch_tracks(sp, playlist_id, current_user_id)
else:
songs = fetch_tracks(sp, args.playlist, current_user_id)
url = []
for song, artist in songs.items():
link = fetch_youtube_url(song + " - " + artist)
if link:
url.append((link, song, artist))
save_songs_to_file(url, download_directory)
if args.download is True:
download_songs(url, download_directory, args.format_str, args.skip_mp3)
|
def spotify_dl():
parser = argparse.ArgumentParser(prog="spotify_dl")
parser.add_argument(
"-d",
"--download",
action="store_true",
help="Download using youtube-dl",
default=True,
)
parser.add_argument(
"-p",
"--playlist",
action="store",
help="Download from playlist id instead of saved tracks",
)
parser.add_argument(
"-V",
"--verbose",
action="store_true",
help="Show more information on whats happening.",
)
parser.add_argument(
"-v",
"--version",
action="store_true",
help="Shows current version of the program",
)
parser.add_argument(
"-o",
"--output",
type=str,
action="store",
nargs="*",
help="Specify download directory.",
)
parser.add_argument(
"-u",
"--user_id",
action="store",
help="Specify the playlist owner's userid when it"
" is different than your spotify userid",
)
parser.add_argument(
"-i",
"--uri",
type=str,
action="store",
nargs="*",
help="Given a URI, download it.",
)
parser.add_argument(
"-f",
"--format_str",
type=str,
action="store",
nargs="*",
help="Specify youtube-dl format string.",
default=["bestaudio/best"],
)
parser.add_argument(
"-m",
"--skip_mp3",
action="store_true",
help="Don't convert downloaded songs to mp3",
)
parser.add_argument("-l", "--url", action="store", help="Spotify Playlist link URL")
args = parser.parse_args()
if args.version:
print("spotify_dl v{}".format(VERSION))
exit(0)
if os.path.isfile(os.path.expanduser("~/.spotify_dl_settings")):
with open(os.path.expanduser("~/.spotify_dl_settings")) as file:
config = json.loads(file.read())
for key, value in config.items():
if value and (value.lower() == "true" or value.lower() == "t"):
setattr(args, key, True)
else:
setattr(args, key, value)
if args.verbose:
log.setLevel(DEBUG)
log.info("Starting spotify_dl")
log.debug("Setting debug mode on spotify_dl")
if not check_for_tokens():
exit(1)
token = authenticate()
sp = spotipy.Spotify(auth=token)
log.debug("Arguments: {}".format(args))
if args.url is not None:
url = args.url.split("open.spotify.com/")[1].split("/")
uri = ":".join(url)
uri = "spotify:" + uri
args.uri = []
args.uri.append(uri)
if args.uri:
current_user_id, playlist_id = extract_user_and_playlist_from_uri(args.uri[0])
else:
if args.user_id is None:
current_user_id = sp.current_user()["id"]
else:
current_user_id = args.user_id
if args.output:
if args.uri:
uri = args.uri[0]
playlist = playlist_name(uri, sp)
else:
playlist = get_playlist_name_from_id(args.playlist, current_user_id, sp)
log.info("Saving songs to: {}".format(playlist))
download_directory = args.output + "/" + playlist
# Check whether directory has a trailing slash or not
if len(download_directory) >= 0 and download_directory[-1] != "/":
download_directory += "/"
if not os.path.exists(download_directory):
os.makedirs(download_directory)
else:
download_directory = ""
if args.uri:
songs = fetch_tracks(sp, playlist_id, current_user_id)
else:
songs = fetch_tracks(sp, args.playlist, current_user_id)
url = []
for song, artist in songs.items():
link = fetch_youtube_url(song + " - " + artist)
if link:
url.append((link, song, artist))
save_songs_to_file(url, download_directory)
if args.download is True:
download_songs(url, download_directory, args.format_str[0], args.skip_mp3)
|
https://github.com/SathyaBhat/spotify-dl/issues/44
|
INFO: 2017-06-03 19:12:37,099 - extract_user_and_playlist_from_uri - List owner: anjunadeep
INFO: 2017-06-03 19:12:37,099 - extract_user_and_playlist_from_uri - List ID: 1GfH39JcID8aFZ0ZQQVkBk
INFO: 2017-06-03 19:12:37,100 - extract_user_and_playlist_from_uri - List owner: anjunadeep
INFO: 2017-06-03 19:12:37,100 - extract_user_and_playlist_from_uri - List ID: 1GfH39JcID8aFZ0ZQQVkBk
INFO: 2017-06-03 19:12:37,813 - spotify_dl - Saving songs to: Anjunadeep Recommends - Updated Daily
Traceback (most recent call last):
File "f:\python34\lib\runpy.py", line 170, in _run_module_as_main
"__main__", mod_spec)
File "f:\python34\lib\runpy.py", line 85, in _run_code
exec(code, run_globals)
File "F:\Python34\Scripts\spotify_dl.exe\__main__.py", line 9, in <module>
File "f:\python34\lib\site-packages\spotify_dl\spotify_dl.py", line 96, in spotify_dl
download_directory = args.output + '/' + playlist
TypeError: can only concatenate list (not "str") to list
|
TypeError
|
def copy(self):
builder = ParseTreeBuilder(_parsetree_roundtrip=True)
self.visit(builder)
return builder.get_parsetree()
|
def copy(self):
# By using serialization we are absolutely sure all refs are new
xml = self.tostring()
try:
return ParseTree().fromstring(xml)
except:
print(">>>", xml, "<<<")
raise
|
https://github.com/zim-desktop-wiki/zim-desktop-wiki/issues/164
|
ERROR: Exception in signal handler for process on <zim.templates.Template object at 0x7f2a60595650>
Traceback (most recent call last):
File "/mnt/projects/zim-desktop-wiki/zim/signals.py", line 338, in emit
r = handler(self, *args)
File "/mnt/projects/zim-desktop-wiki/zim/templates/__init__.py", line 186, in do_process
processor.process(output, context)
File "/mnt/projects/zim-desktop-wiki/zim/templates/processor.py", line 81, in process
self.__call__(output, self.main, context)
File "/mnt/projects/zim-desktop-wiki/zim/templates/processor.py", line 130, in __call__
self._loop(output, element, context)
File "/mnt/projects/zim-desktop-wiki/zim/templates/processor.py", line 169, in _loop
self.__call__(output, element, context) # recurs
File "/mnt/projects/zim-desktop-wiki/zim/templates/processor.py", line 111, in __call__
value = expr(context)
File "/mnt/projects/zim-desktop-wiki/zim/templates/expression.py", line 124, in __call__
value = getattr(value, p)
File "/mnt/projects/zim-desktop-wiki/zim/export/template.py", line 488, in title
return self.heading or self.basename
File "/mnt/projects/zim-desktop-wiki/zim/export/template.py", line 431, in heading
head, body = self._split_head()
File "/mnt/projects/zim-desktop-wiki/zim/export/template.py", line 462, in _split_head
tree = self._tree.copy()
File "/mnt/projects/zim-desktop-wiki/zim/formats/__init__.py", line 330, in copy
return ParseTree().fromstring(xml)
File "/mnt/projects/zim-desktop-wiki/zim/formats/__init__.py", line 307, in fromstring
parser.feed(string)
ParseError: not well-formed (invalid token): line 35, column 5
ERROR: Could not read: /path/to/Notebook/Page/SubPage/attachment.txt
|
ParseError
|
def start(self, tag, attrib=None):
attrib = attrib.copy() if attrib is not None else None
self._b.start(tag, attrib)
self.stack.append(tag)
if tag in BLOCK_LEVEL:
self._last_char = None
|
def start(self, tag, attrib=None):
self._b.start(tag, attrib)
self.stack.append(tag)
if tag in BLOCK_LEVEL:
self._last_char = None
|
https://github.com/zim-desktop-wiki/zim-desktop-wiki/issues/164
|
ERROR: Exception in signal handler for process on <zim.templates.Template object at 0x7f2a60595650>
Traceback (most recent call last):
File "/mnt/projects/zim-desktop-wiki/zim/signals.py", line 338, in emit
r = handler(self, *args)
File "/mnt/projects/zim-desktop-wiki/zim/templates/__init__.py", line 186, in do_process
processor.process(output, context)
File "/mnt/projects/zim-desktop-wiki/zim/templates/processor.py", line 81, in process
self.__call__(output, self.main, context)
File "/mnt/projects/zim-desktop-wiki/zim/templates/processor.py", line 130, in __call__
self._loop(output, element, context)
File "/mnt/projects/zim-desktop-wiki/zim/templates/processor.py", line 169, in _loop
self.__call__(output, element, context) # recurs
File "/mnt/projects/zim-desktop-wiki/zim/templates/processor.py", line 111, in __call__
value = expr(context)
File "/mnt/projects/zim-desktop-wiki/zim/templates/expression.py", line 124, in __call__
value = getattr(value, p)
File "/mnt/projects/zim-desktop-wiki/zim/export/template.py", line 488, in title
return self.heading or self.basename
File "/mnt/projects/zim-desktop-wiki/zim/export/template.py", line 431, in heading
head, body = self._split_head()
File "/mnt/projects/zim-desktop-wiki/zim/export/template.py", line 462, in _split_head
tree = self._tree.copy()
File "/mnt/projects/zim-desktop-wiki/zim/formats/__init__.py", line 330, in copy
return ParseTree().fromstring(xml)
File "/mnt/projects/zim-desktop-wiki/zim/formats/__init__.py", line 307, in fromstring
parser.feed(string)
ParseError: not well-formed (invalid token): line 35, column 5
ERROR: Could not read: /path/to/Notebook/Page/SubPage/attachment.txt
|
ParseError
|
def append(self, tag, attrib=None, text=None):
attrib = attrib.copy() if attrib is not None else None
if tag in BLOCK_LEVEL:
if text and not text.endswith("\n"):
text += "\n"
# FIXME hack for backward compat
if text and tag in (HEADING, LISTITEM):
text = text.strip("\n")
self._b.start(tag, attrib)
if text:
self._b.data(text)
self._b.end(tag)
# FIXME hack for backward compat
if tag == HEADING and not self._parsetree_roundtrip:
self._b.data("\n")
self._last_char = None
|
def append(self, tag, attrib=None, text=None):
if tag in BLOCK_LEVEL:
if text and not text.endswith("\n"):
text += "\n"
# FIXME hack for backward compat
if text and tag in (HEADING, LISTITEM):
text = text.strip("\n")
self._b.start(tag, attrib)
if text:
self._b.data(text)
self._b.end(tag)
# FIXME hack for backward compat
if tag == HEADING:
self._b.data("\n")
self._last_char = None
|
https://github.com/zim-desktop-wiki/zim-desktop-wiki/issues/164
|
ERROR: Exception in signal handler for process on <zim.templates.Template object at 0x7f2a60595650>
Traceback (most recent call last):
File "/mnt/projects/zim-desktop-wiki/zim/signals.py", line 338, in emit
r = handler(self, *args)
File "/mnt/projects/zim-desktop-wiki/zim/templates/__init__.py", line 186, in do_process
processor.process(output, context)
File "/mnt/projects/zim-desktop-wiki/zim/templates/processor.py", line 81, in process
self.__call__(output, self.main, context)
File "/mnt/projects/zim-desktop-wiki/zim/templates/processor.py", line 130, in __call__
self._loop(output, element, context)
File "/mnt/projects/zim-desktop-wiki/zim/templates/processor.py", line 169, in _loop
self.__call__(output, element, context) # recurs
File "/mnt/projects/zim-desktop-wiki/zim/templates/processor.py", line 111, in __call__
value = expr(context)
File "/mnt/projects/zim-desktop-wiki/zim/templates/expression.py", line 124, in __call__
value = getattr(value, p)
File "/mnt/projects/zim-desktop-wiki/zim/export/template.py", line 488, in title
return self.heading or self.basename
File "/mnt/projects/zim-desktop-wiki/zim/export/template.py", line 431, in heading
head, body = self._split_head()
File "/mnt/projects/zim-desktop-wiki/zim/export/template.py", line 462, in _split_head
tree = self._tree.copy()
File "/mnt/projects/zim-desktop-wiki/zim/formats/__init__.py", line 330, in copy
return ParseTree().fromstring(xml)
File "/mnt/projects/zim-desktop-wiki/zim/formats/__init__.py", line 307, in fromstring
parser.feed(string)
ParseError: not well-formed (invalid token): line 35, column 5
ERROR: Could not read: /path/to/Notebook/Page/SubPage/attachment.txt
|
ParseError
|
def get_candidates_and_features_page_num(self, page_num):
elems = self.elems[page_num]
# font_stat = self.font_stats[page_num]
# lines_bboxes = self.get_candidates_lines(page_num, elems)
alignments_bboxes, alignment_features = self.get_candidates_alignments(
page_num, elems
)
# print "Page Num: ", page_num, "Line bboxes: ", len(lines_bboxes), ", Alignment bboxes: ", len(alignments_bboxes)
# alignment_features += get_alignment_features(lines_bboxes, elems, font_stat)
boxes = alignments_bboxes # + lines_bboxes
if len(boxes) == 0:
log.info("No boxes were found on page {}.".format(page_num))
return [], []
lines_features = get_lines_features(boxes, elems)
features = np.concatenate(
(np.array(alignment_features), np.array(lines_features)), axis=1
)
return boxes, features
|
def get_candidates_and_features_page_num(self, page_num):
elems = self.elems[page_num]
# font_stat = self.font_stats[page_num]
# lines_bboxes = self.get_candidates_lines(page_num, elems)
alignments_bboxes, alignment_features = self.get_candidates_alignments(
page_num, elems
)
# print "Page Num: ", page_num, "Line bboxes: ", len(lines_bboxes), ", Alignment bboxes: ", len(alignments_bboxes)
# alignment_features += get_alignment_features(lines_bboxes, elems, font_stat)
boxes = alignments_bboxes # + lines_bboxes
if len(boxes) == 0:
return [], []
lines_features = get_lines_features(boxes, elems)
features = np.concatenate(
(np.array(alignment_features), np.array(lines_features)), axis=1
)
return boxes, features
|
https://github.com/HazyResearch/pdftotree/issues/25
|
$ pdftotree -vv dtc114w.pdf
[INFO] pdftotree.core - Digitized PDF detected, building tree structure...
[ERROR] pdftotree.TreeExtract - list index out of range
Traceback (most recent call last):
File "/home/lwhsiao/repos/pdftotree/pdftotree/TreeExtract.py", line 148, in get_candidates_alignments
nodes, features = parse_layout(elems, font_stat)
File "/home/lwhsiao/repos/pdftotree/pdftotree/utils/pdf/pdf_parsers.py", line 36, in parse_layout
avg_font_pts = get_most_common_font_pts(elems.mentions, font_stat)
File "/home/lwhsiao/repos/pdftotree/pdftotree/utils/pdf/pdf_parsers.py", line 1159, in get_most_common_font_pts
most_common_font_size = font_stat.most_common(1)[0][0]
IndexError: list index out of range
Traceback (most recent call last):
File "/home/lwhsiao/repos/pdftotree/.venv/bin/pdftotree", line 6, in <module>
exec(compile(open(__file__).read(), __file__, 'exec'))
File "/home/lwhsiao/repos/pdftotree/bin/pdftotree", line 82, in <module>
args.favor_figures, args.visualize)
File "/home/lwhsiao/repos/pdftotree/pdftotree/core.py", line 58, in parse
pdf_tree = extractor.get_tree_structure(model, favor_figures)
File "/home/lwhsiao/repos/pdftotree/pdftotree/TreeExtract.py", line 188, in get_tree_structure
favor_figures)
File "/home/lwhsiao/repos/pdftotree/pdftotree/utils/pdf/pdf_parsers.py", line 666, in parse_tree_structure
avg_font_pts = get_most_common_font_pts(elems.mentions, font_stat)
File "/home/lwhsiao/repos/pdftotree/pdftotree/utils/pdf/pdf_parsers.py", line 1159, in get_most_common_font_pts
most_common_font_size = font_stat.most_common(1)[0][0]
IndexError: list index out of range
|
IndexError
|
def cluster_vertically_aligned_boxes(
boxes,
page_bbox,
avg_font_pts,
width,
char_width,
boxes_segments,
boxes_curves,
boxes_figures,
page_width,
combine,
):
# Too many "." in the Table of Content pages
if len(boxes) == 0:
log.warning("No boxes were found to cluster.")
return [], []
elif len(boxes) > 3500:
log.warning("Too many '.' in the Table of Content pages?")
return [], []
plane = Plane(page_bbox)
plane.extend(boxes)
# initialize clusters
cid2obj = [set([i]) for i in range(len(boxes))]
# default object map to cluster with its own index
obj2cid = list(range(len(boxes)))
prev_clusters = obj2cid
while True:
for i1, b1 in enumerate(boxes):
for i2, b2 in enumerate(boxes):
if (i1 == i2) or (obj2cid[i1] == obj2cid[i2]):
continue
if b1.bbox[1] < b2.bbox[1]:
box1 = b1.bbox
box2 = b2.bbox
elif b2.bbox[1] < b1.bbox[1]:
box1 = b2.bbox
box2 = b1.bbox
else:
# horizontally aligned
continue
if (
box2[1] < box1[3]
or (box2[1] - box1[1] < 1.5 * avg_font_pts)
or (box2[3] - box1[3] < 1.5 * avg_font_pts)
):
# can probably do better if we find the average space
# between words
if (
abs(box1[0] - box2[0]) < 3
or abs(box1[2] - box2[2]) < 3
or (((box1[0] + box1[2]) / 2) == ((box2[0] + box2[2]) / 2))
or ((box1[0] < box2[0]) and (box1[2] > box2[0]))
or ((box1[0] > box2[0]) and (box2[2] > box1[0]))
):
min_i = min(i1, i2)
max_i = max(i1, i2)
cid1 = obj2cid[min_i]
cid2 = obj2cid[max_i]
# move all objects from cluster cid2 to cid1
# reassign cluster ids for all such objects as well
for obj_iter in cid2obj[cid2]:
cid2obj[cid1].add(obj_iter)
obj2cid[obj_iter] = cid1
cid2obj[cid2] = set()
if prev_clusters == obj2cid:
break
prev_clusters = obj2cid
clusters = [[boxes[i] for i in cluster] for cluster in filter(bool, cid2obj)]
rid2obj = [set([i]) for i in range(len(boxes))] # initialize clusters
# default object map to cluster with its own index
obj2rid = list(range(len(boxes)))
prev_clusters = obj2rid
while True:
for i1, b1 in enumerate(boxes):
for i2, b2 in enumerate(boxes):
if (i1 == i2) or (obj2rid[i1] == obj2rid[i2]):
continue
box1 = b1.bbox
box2 = b2.bbox
if (
(abs(box1[1] - box2[1]) < 0.11 * avg_font_pts)
or (abs(box1[3] - box2[3]) < 0.11 * avg_font_pts)
or (
round((box1[1] + box1[3]) / 2) == round((box2[1] + box2[3]) / 2)
)
):
min_i = min(i1, i2)
max_i = max(i1, i2)
rid1 = obj2rid[min_i]
rid2 = obj2rid[max_i]
for obj_iter in rid2obj[rid2]:
rid2obj[rid1].add(obj_iter)
obj2rid[obj_iter] = rid1
rid2obj[rid2] = set()
if prev_clusters == obj2rid:
break
prev_clusters = obj2rid
not_merge = set()
for i1, b1 in enumerate(boxes):
for i2 in cid2obj[obj2cid[i1]]:
if i1 == i2:
continue
row1 = obj2rid[i1]
row2 = obj2rid[i2]
if row1 == row2:
continue
if b1.bbox[1] < b2.bbox[1]:
box1 = b1.bbox
box2 = b2.bbox
elif b2.bbox[1] < b1.bbox[1]:
box1 = b2.bbox
box2 = b1.bbox
else:
# horizontally aligned
continue
text_1 = 0.0
for obj in rid2obj[row1]:
text_1 += boxes[obj].bbox[2] - boxes[obj].bbox[0]
text_2 = 0.0
for obj in rid2obj[row2]:
text_2 += boxes[obj].bbox[2] - boxes[obj].bbox[0]
if abs(text_1 - text_2) / width > 0.1:
min_i = min(i1, i2)
max_i = max(i1, i2)
not_merge.add((min_i, max_i))
# Alignment Features
# If text boxes are very close in a row
if_row_connected = defaultdict(int)
num_row_connected = defaultdict(lambda: 1)
# If text is merged using span code in adjacent rows, this feature tells the number of times the cluster went through span based clustering
if_connected_by_span = defaultdict(int)
num_connected_by_span = defaultdict(lambda: 1)
# If columns were merged using cluster alignment
if_connected_by_align = defaultdict(int)
num_connected_by_align = defaultdict(lambda: 1)
# If vertical columns were merged
if_vertical_columns_merged = defaultdict(int)
num_vertical_columns_merged = defaultdict(lambda: 1)
# Number of Line Segments, Curves and Figures
num_segments = defaultdict(int)
num_curves = defaultdict(int)
num_figures = defaultdict(int)
# Average Word Space
total_word_space = defaultdict(float)
avg_word_space = defaultdict(float)
avg_word_space_norm = defaultdict(float)
node_space = defaultdict(float)
avg_node_space = defaultdict(float)
avg_node_space_norm = defaultdict(float)
cid2obj = [set([i]) for i in range(len(boxes))] # initialize clusters
obj2cid = list(
range(len(boxes))
) # default object map to cluster with its own index
prev_clusters = obj2cid
# add the code for merging close text boxes in particular row
while True:
for i1, b1 in enumerate(boxes):
for i2, b2 in enumerate(boxes):
if (i1 == i2) or (obj2cid[i1] == obj2cid[i2]):
continue
box1 = b1.bbox
box2 = b2.bbox
if obj2rid[i1] == obj2rid[i2]:
if (
(b1.bbox[0] < b2.bbox[0])
and ((b2.bbox[0] - b1.bbox[2]) <= 2 * char_width)
) or (
(b2.bbox[0] < b1.bbox[0])
and ((b1.bbox[0] - b2.bbox[2]) <= 2 * char_width)
):
min_i = min(i1, i2)
max_i = max(i1, i2)
cid1 = obj2cid[min_i]
cid2 = obj2cid[max_i]
for obj_iter in cid2obj[cid2]:
cid2obj[cid1].add(obj_iter)
obj2cid[obj_iter] = cid1
cid2obj[cid2] = set()
# Features
if_row_connected[cid1] = 1
if_row_connected[cid2] = 0
num_row_connected[cid1] += num_row_connected[cid2]
num_row_connected[cid2] = 0
if prev_clusters == obj2cid:
break
prev_clusters = obj2cid
# vertical alignment code
while True:
for i1, b1 in enumerate(boxes):
for i2, b2 in enumerate(boxes):
if (i1 == i2) or (obj2cid[i1] == obj2cid[i2]):
continue
if b1.bbox[1] < b2.bbox[1]:
box1 = b1.bbox
box2 = b2.bbox
elif b2.bbox[1] < b1.bbox[1]:
box1 = b2.bbox
box2 = b1.bbox
else:
# horizontally aligned
continue
if (
box2[1] < box1[3]
or (box2[1] - box1[1] < 1.5 * avg_font_pts)
or (box2[3] - box1[3] < 1.5 * avg_font_pts)
): # can probably do better if we find the average space between words
if (
abs(box1[0] - box2[0]) < 3
or abs(box1[2] - box2[2]) < 3
or (((box1[0] + box1[2]) / 2) == ((box2[0] + box2[2]) / 2))
): # or ((box1[0]<box2[0]) and (box1[2]>box2[0])) or ((box1[0]>box2[0]) and (box2[2]>box1[0]))): #added center alignemnt
min_i = min(i1, i2)
max_i = max(i1, i2)
if (min_i, max_i) not in not_merge:
cid1 = obj2cid[min_i]
cid2 = obj2cid[max_i]
# move all objects from cluster cid2 to cid1
# reassign cluster ids for all such objects as well
for obj_iter in cid2obj[cid2]:
cid2obj[cid1].add(obj_iter)
obj2cid[obj_iter] = cid1
cid2obj[cid2] = set()
# Features
if_connected_by_span[cid1] = 1
if_connected_by_span[cid2] = 0
if (
if_row_connected[cid1] == 1
or if_row_connected[cid2] == 1
):
if_row_connected[cid1] = 1
num_row_connected[cid1] += num_row_connected[cid2]
num_row_connected[cid2] = 0
if_row_connected[cid2] = 0
num_connected_by_span[cid1] = (
num_connected_by_span[cid1]
+ num_connected_by_span[cid2]
)
num_connected_by_span[cid2] = 0
if prev_clusters == obj2cid:
break
prev_clusters = obj2cid
# blacklist nearly half-page wide clusters before horizontal merging
cid2obj2 = cid2obj[:]
obj2cid2 = obj2cid[:]
blacklist = set()
blacklist_obj = set()
for cid_iter in range(len(cid2obj2)):
cid = cid2obj2[cid_iter]
xmin = float("Inf")
xmax = float("-Inf")
for obj in cid:
xmin = min(xmin, boxes[obj].bbox[0])
xmax = max(xmax, boxes[obj].bbox[2])
if ((xmax - xmin) > width / 2.75 and (xmax - xmin) < width / 2) or (
(xmax - xmin) > 0.9 * width
):
blacklist.add(cid_iter)
for obj in cid:
blacklist_obj.add(obj)
for obj_iter in rid2obj[obj2rid[obj]]:
if (
boxes[obj_iter].bbox[0] >= xmin
and boxes[obj_iter].bbox[2] <= xmax
):
blacklist_obj.add(obj_iter)
# create a cluster span
cid2span = {}
for cid in range(len(cid2obj)):
cid2span[cid] = {}
cid2span[cid]["min_x"] = float("Inf")
cid2span[cid]["min_y"] = float("Inf")
cid2span[cid]["max_x"] = float("-Inf")
cid2span[cid]["max_y"] = float("-Inf")
for obj in cid2obj[cid]:
cid2span[cid]["min_x"] = min(cid2span[cid]["min_x"], boxes[obj].bbox[0])
cid2span[cid]["max_x"] = max(cid2span[cid]["max_x"], boxes[obj].bbox[2])
cid2span[cid]["min_y"] = min(cid2span[cid]["min_y"], boxes[obj].bbox[1])
cid2span[cid]["max_y"] = max(cid2span[cid]["max_y"], boxes[obj].bbox[3])
cid2cid = {}
cid_pair_compared = set()
cid2cid2 = [cid for cid in range(len(cid2obj))]
for i1, b1 in enumerate(boxes):
for i2, b2 in enumerate(boxes):
if i1 == i2:
continue
if i1 in blacklist_obj or i2 in blacklist_obj:
continue
cid1 = obj2cid[i1]
cid2 = obj2cid[i2]
if (min(cid1, cid2), max(cid1, cid2)) in cid_pair_compared:
continue
if cid1 == cid2:
continue
if obj2rid[i1] == obj2rid[i2]:
continue
if cid1 not in cid2cid:
cid2cid[cid1] = set()
if cid2 not in cid2cid:
cid2cid[cid2] = set()
if cid2span[cid1]["min_y"] < cid2span[cid2]["min_y"]:
box1 = [
cid2span[cid1]["min_x"],
cid2span[cid1]["min_y"],
cid2span[cid1]["max_x"],
cid2span[cid1]["max_y"],
]
box2 = [
cid2span[cid2]["min_x"],
cid2span[cid2]["min_y"],
cid2span[cid2]["max_x"],
cid2span[cid2]["max_y"],
]
else:
box1 = [
cid2span[cid2]["min_x"],
cid2span[cid2]["min_y"],
cid2span[cid2]["max_x"],
cid2span[cid2]["max_y"],
]
box2 = [
cid2span[cid1]["min_x"],
cid2span[cid1]["min_y"],
cid2span[cid1]["max_x"],
cid2span[cid1]["max_y"],
]
if ((box1[1] < box2[1]) and (box1[3] > box2[1])) or (
(box1[1] > box2[1]) and (box1[1] < box2[3])
):
continue
cid_pair_compared.add((min(cid1, cid2), max(cid1, cid2)))
query_rect = (
min(box1[0], box2[0]),
min(box1[1], box2[1]),
max(box1[2], box2[2]),
max(box1[3], box2[3]),
)
connected = True
for i3, b3 in enumerate(boxes):
if (i3 == i1) or (i3 == i2):
continue
if obj2cid[i1] == obj2cid[i3] or obj2cid[i2] == obj2cid[i3]:
continue
box3 = b3.bbox
if intersect(query_rect, box3):
connected = False
break
if (
(
(
round(box1[0]) == round(box2[0])
or round(box1[2]) == round(box2[2])
)
and connected
)
or (
round((box1[0] + box1[2]) / 2) == round((box2[0] + box2[2]) / 2)
and connected
)
): # or (abs((box1[0]+box1[2])/2-(box2[0]+box2[2])/2)<0.1*char_width and connected)):# or ((box1[0]<box2[0]) and (box1[2]>box2[0])) or ((box1[0]>box2[0]) and (box2[2]>box1[0]))): #added center alignemnt
cid2cid[min(cid1, cid2)].add(max(cid1, cid2))
min_cid = min(cid1, cid2)
max_cid = max(cid1, cid2)
for cid_iter in cid2cid2:
if cid2cid2[cid_iter] == cid2cid2[max_cid]:
cid2cid2[cid_iter] = cid2cid2[min_cid]
# post-process cid2cid
cid2obj2 = cid2obj[:]
obj2cid2 = obj2cid[:]
for cid in range(len(cid2cid2)):
cid_merge = cid2cid2[cid]
if cid != cid_merge:
for obj_iter in cid2obj2[cid]:
cid2obj2[cid_merge].add(obj_iter)
obj2cid2[obj_iter] = cid_merge
cid2obj2[cid] = set()
# Features
if_connected_by_align[cid_merge] = 1
if_connected_by_align[cid] = 0
if if_row_connected[cid_merge] == 1 or if_row_connected[cid] == 1:
if_row_connected[cid_merge] = 1
num_row_connected[cid_merge] += num_row_connected[cid]
num_row_connected[cid] = 0
if_row_connected[cid2] = 0
if if_connected_by_span[cid_merge] == 1 or if_connected_by_span[cid] == 1:
if_connected_by_span[cid_merge] = 1
num_connected_by_span[cid_merge] += num_connected_by_span[cid]
num_connected_by_span[cid] = 0
if_connected_by_span[cid] = 0
num_connected_by_align[cid_merge] += num_connected_by_align[cid]
num_connected_by_align[cid] = 0
# code to merge columns for table
prev_clusters = obj2cid2
while True:
for obj1, b1 in enumerate(boxes):
cid1 = obj2cid2[obj1]
rid1 = obj2rid[obj1]
if cid1 in blacklist:
continue
if obj1 in blacklist_obj:
continue
for obj2, b2 in enumerate(boxes):
if obj1 == obj2:
continue
if obj2cid2[obj2] == cid1:
rid2 = obj2rid[obj2]
if rid1 == rid2:
continue
for obj3 in rid2obj[rid2]:
cid3 = obj2cid2[obj3]
if obj3 in blacklist_obj:
continue
if cid1 != cid3:
for obj4 in cid2obj2[cid3]:
if obj4 == obj3:
continue
if obj2rid[obj4] == rid1:
min_cid = min(cid1, cid3)
max_cid = max(cid1, cid3)
for obj_iter in cid2obj2[max_cid]:
cid2obj2[min_cid].add(obj_iter)
obj2cid2[obj_iter] = min_cid
cid2obj2[max_cid] = set()
# Features
if_vertical_columns_merged[min_cid] = 1
if_vertical_columns_merged[max_cid] = 0
num_vertical_columns_merged[min_cid] += (
num_vertical_columns_merged[max_cid]
)
num_vertical_columns_merged[max_cid] = 0
if (
if_row_connected[min_cid] == 1
or if_row_connected[max_cid] == 1
):
if_row_connected[min_cid] = 1
num_row_connected[min_cid] += num_row_connected[
max_cid
]
num_row_connected[max_cid] = 0
if_row_connected[max_cid] = 0
if (
if_connected_by_span[min_cid] == 1
or if_connected_by_span[max_cid] == 1
):
if_connected_by_span[min_cid] = 1
num_connected_by_span[min_cid] += (
num_connected_by_span[max_cid]
)
num_connected_by_span[max_cid] = 0
if_connected_by_span[max_cid] = 0
if (
if_connected_by_align[min_cid] == 1
or if_connected_by_align[max_cid] == 1
):
if_connected_by_align[min_cid] = 1
num_connected_by_align[min_cid] += (
num_connected_by_align[max_cid]
)
num_connected_by_align[max_cid] = 0
if_connected_by_align[max_cid] = 0
break
if prev_clusters == obj2cid2:
break
prev_clusters = obj2cid2
clusters = [[boxes[i] for i in cluster] for cluster in filter(bool, cid2obj2)]
nodes = [Node(elems) for elems in clusters]
node_indices = [i for i, x in enumerate(cid2obj2) if x]
merge_indices = [i for i in range(len(node_indices))]
page_stat = Node(boxes)
nodes, merge_indices = merge_nodes(nodes, plane, page_stat, merge_indices)
# Features
for idx in range(len(merge_indices)):
if merge_indices[idx] != idx:
cid1 = node_indices[merge_indices[idx]]
cid2 = node_indices[idx]
if if_row_connected[cid1] == 1 or if_row_connected[cid2] == 1:
if_row_connected[cid1] = 1
num_row_connected[cid1] += num_row_connected[cid2]
num_row_connected[cid2] = 0
if_row_connected[cid2] = 0
if if_connected_by_span[cid1] == 1 or if_connected_by_span[cid2] == 1:
if_connected_by_span[cid1] = 1
num_connected_by_span[cid1] += num_connected_by_span[cid2]
num_connected_by_span[cid2] = 0
if_connected_by_span[cid2] = 0
if if_connected_by_align[cid1] == 1 or if_connected_by_align[cid2] == 1:
if_connected_by_align[cid1] = 1
num_connected_by_align[cid1] += num_connected_by_align[cid2]
num_connected_by_align[cid2] = 0
if_connected_by_align[cid2] = 0
if (
if_vertical_columns_merged[cid1] == 1
or if_vertical_columns_merged[cid2] == 1
):
if_vertical_columns_merged[cid1] = 1
num_vertical_columns_merged[cid1] += num_vertical_columns_merged[cid2]
num_vertical_columns_merged[cid2] = 0
if_vertical_columns_merged[cid2] = 0
# Get Word Spacing Features
rid2space = defaultdict(float)
rid2space_norm = defaultdict(float)
row_indices = [i for i, x in enumerate(rid2obj) if x]
for rid in row_indices:
obj_list = list(rid2obj[rid])
if len(obj_list) == 1:
rid2space[rid] = 0
continue
obj_boxes = [boxes[obj].bbox[0] for obj in obj_list]
sorted_obj_idx = [
i[0] for i in sorted(enumerate(obj_boxes), key=lambda x: x[1])
]
for obj_idx in range(len(sorted_obj_idx) - 1):
rid2space[rid] += (
boxes[obj_list[sorted_obj_idx[obj_idx + 1]]].bbox[2]
- boxes[obj_list[sorted_obj_idx[obj_idx]]].bbox[0]
)
rid2space_norm[rid] = rid2space[rid] / (len(obj_list) - 1)
for idx, node in enumerate(nodes):
node_idx = node_indices[idx]
if merge_indices[idx] == idx:
obj_list = []
for idx_iter in range(len(merge_indices)):
if merge_indices[idx_iter] == idx:
obj_list += list(cid2obj2[node_indices[idx_iter]])
obj_list = list(set(obj_list))
rid_list = list(set([obj2rid[obj] for obj in obj_list]))
for rid in rid_list:
total_word_space[node_idx] += rid2space[rid]
avg_word_space_norm[node_idx] += rid2space_norm[rid]
obj_boxes = [
boxes[obj].bbox[0] for obj in rid2obj if obj in cid2obj2[node_idx]
]
sorted_obj_idx = [
i[0] for i in sorted(enumerate(obj_boxes), key=lambda x: x[1])
]
for obj_idx in range(len(sorted_obj_idx) - 1):
node_space[node_idx] += (
boxes[obj_list[sorted_obj_idx[obj_idx + 1]]].bbox[2]
- boxes[obj_list[sorted_obj_idx[obj_idx]]].bbox[0]
)
avg_node_space_norm[node_idx] += node_space[node_idx] / (
len(obj_boxes) - 1
)
avg_word_space[node_idx] = total_word_space[node_idx] / len(rid_list)
avg_word_space_norm[node_idx] /= len(rid_list)
avg_node_space[node_idx] = node_space[node_idx] / len(rid_list)
avg_node_space_norm[node_idx] /= len(rid_list)
new_nodes = []
new_node_indices = []
for idx in range(len(merge_indices)):
if merge_indices[idx] == idx:
new_nodes.append(nodes[idx])
new_node_indices.append(node_indices[idx])
nodes = new_nodes
node_indices = new_node_indices
# Features
for idx, node in enumerate(nodes):
node_idx = node_indices[idx]
node_bbox = (node.x0, node.y0, node.x1, node.y1)
for i1, b1 in enumerate(boxes_segments):
if intersect(node_bbox, b1.bbox):
num_segments[node_idx] += 1
for i1, b1 in enumerate(boxes_figures):
if intersect(node_bbox, b1.bbox):
num_figures[node_idx] += 1
for i1, b1 in enumerate(boxes_curves):
if intersect(node_bbox, b1.bbox):
num_curves[node_idx] += 1
tables = []
table_indices = []
for idx, node in enumerate(nodes):
node_idx = node_indices[idx]
isTable = True
if node.is_table():
for elem in node.elems:
if "table" in elem.get_text().lower():
continue
if (node.width - elem.bbox[2] + elem.bbox[0]) < 2 * char_width:
isTable = False
if isTable:
tables.append(node)
table_indices.append(node_idx)
if combine == True:
node_features = [0] * 17
for idx, node in enumerate(nodes):
node_idx = node_indices[idx]
node_features = [
sum(x)
for x in zip(
node_features,
[
if_row_connected[node_idx],
num_row_connected[node_idx],
if_connected_by_span[node_idx],
num_connected_by_span[node_idx],
if_connected_by_align[node_idx],
num_connected_by_align[node_idx],
if_vertical_columns_merged[node_idx],
num_vertical_columns_merged[node_idx],
num_segments[node_idx],
num_curves[node_idx],
num_figures[node_idx],
total_word_space[node_idx],
avg_word_space[node_idx],
avg_word_space_norm[node_idx],
node_space[node_idx],
avg_node_space[node_idx],
avg_node_space_norm[node_idx],
],
)
]
return [], node_features
else:
table_features = []
for idx, table in enumerate(tables):
table_idx = table_indices[idx]
table_features += [
[
if_row_connected[table_idx],
num_row_connected[table_idx],
if_connected_by_span[table_idx],
num_connected_by_span[table_idx],
if_connected_by_align[table_idx],
num_connected_by_align[table_idx],
if_vertical_columns_merged[table_idx],
num_vertical_columns_merged[table_idx],
num_segments[table_idx],
num_curves[table_idx],
num_figures[table_idx],
total_word_space[table_idx],
avg_word_space[table_idx],
avg_word_space_norm[table_idx],
node_space[table_idx],
avg_node_space[table_idx],
avg_node_space_norm[table_idx],
]
]
return tables, table_features
|
def cluster_vertically_aligned_boxes(
boxes,
page_bbox,
avg_font_pts,
width,
char_width,
boxes_segments,
boxes_curves,
boxes_figures,
page_width,
combine,
):
# Too many "." in the Table of Content pages
if len(boxes) == 0 or len(boxes) > 3500:
log.error("Too many '.' in the Table of Content pages.")
return []
plane = Plane(page_bbox)
plane.extend(boxes)
# initialize clusters
cid2obj = [set([i]) for i in range(len(boxes))]
# default object map to cluster with its own index
obj2cid = list(range(len(boxes)))
prev_clusters = obj2cid
while True:
for i1, b1 in enumerate(boxes):
for i2, b2 in enumerate(boxes):
if (i1 == i2) or (obj2cid[i1] == obj2cid[i2]):
continue
if b1.bbox[1] < b2.bbox[1]:
box1 = b1.bbox
box2 = b2.bbox
elif b2.bbox[1] < b1.bbox[1]:
box1 = b2.bbox
box2 = b1.bbox
else:
# horizontally aligned
continue
if (
box2[1] < box1[3]
or (box2[1] - box1[1] < 1.5 * avg_font_pts)
or (box2[3] - box1[3] < 1.5 * avg_font_pts)
):
# can probably do better if we find the average space
# between words
if (
abs(box1[0] - box2[0]) < 3
or abs(box1[2] - box2[2]) < 3
or (((box1[0] + box1[2]) / 2) == ((box2[0] + box2[2]) / 2))
or ((box1[0] < box2[0]) and (box1[2] > box2[0]))
or ((box1[0] > box2[0]) and (box2[2] > box1[0]))
):
min_i = min(i1, i2)
max_i = max(i1, i2)
cid1 = obj2cid[min_i]
cid2 = obj2cid[max_i]
# move all objects from cluster cid2 to cid1
# reassign cluster ids for all such objects as well
for obj_iter in cid2obj[cid2]:
cid2obj[cid1].add(obj_iter)
obj2cid[obj_iter] = cid1
cid2obj[cid2] = set()
if prev_clusters == obj2cid:
break
prev_clusters = obj2cid
clusters = [[boxes[i] for i in cluster] for cluster in filter(bool, cid2obj)]
rid2obj = [set([i]) for i in range(len(boxes))] # initialize clusters
# default object map to cluster with its own index
obj2rid = list(range(len(boxes)))
prev_clusters = obj2rid
while True:
for i1, b1 in enumerate(boxes):
for i2, b2 in enumerate(boxes):
if (i1 == i2) or (obj2rid[i1] == obj2rid[i2]):
continue
box1 = b1.bbox
box2 = b2.bbox
if (
(abs(box1[1] - box2[1]) < 0.11 * avg_font_pts)
or (abs(box1[3] - box2[3]) < 0.11 * avg_font_pts)
or (
round((box1[1] + box1[3]) / 2) == round((box2[1] + box2[3]) / 2)
)
):
min_i = min(i1, i2)
max_i = max(i1, i2)
rid1 = obj2rid[min_i]
rid2 = obj2rid[max_i]
for obj_iter in rid2obj[rid2]:
rid2obj[rid1].add(obj_iter)
obj2rid[obj_iter] = rid1
rid2obj[rid2] = set()
if prev_clusters == obj2rid:
break
prev_clusters = obj2rid
not_merge = set()
for i1, b1 in enumerate(boxes):
for i2 in cid2obj[obj2cid[i1]]:
if i1 == i2:
continue
row1 = obj2rid[i1]
row2 = obj2rid[i2]
if row1 == row2:
continue
if b1.bbox[1] < b2.bbox[1]:
box1 = b1.bbox
box2 = b2.bbox
elif b2.bbox[1] < b1.bbox[1]:
box1 = b2.bbox
box2 = b1.bbox
else:
# horizontally aligned
continue
text_1 = 0.0
for obj in rid2obj[row1]:
text_1 += boxes[obj].bbox[2] - boxes[obj].bbox[0]
text_2 = 0.0
for obj in rid2obj[row2]:
text_2 += boxes[obj].bbox[2] - boxes[obj].bbox[0]
if abs(text_1 - text_2) / width > 0.1:
min_i = min(i1, i2)
max_i = max(i1, i2)
not_merge.add((min_i, max_i))
# Alignment Features
# If text boxes are very close in a row
if_row_connected = defaultdict(int)
num_row_connected = defaultdict(lambda: 1)
# If text is merged using span code in adjacent rows, this feature tells the number of times the cluster went through span based clustering
if_connected_by_span = defaultdict(int)
num_connected_by_span = defaultdict(lambda: 1)
# If columns were merged using cluster alignment
if_connected_by_align = defaultdict(int)
num_connected_by_align = defaultdict(lambda: 1)
# If vertical columns were merged
if_vertical_columns_merged = defaultdict(int)
num_vertical_columns_merged = defaultdict(lambda: 1)
# Number of Line Segments, Curves and Figures
num_segments = defaultdict(int)
num_curves = defaultdict(int)
num_figures = defaultdict(int)
# Average Word Space
total_word_space = defaultdict(float)
avg_word_space = defaultdict(float)
avg_word_space_norm = defaultdict(float)
node_space = defaultdict(float)
avg_node_space = defaultdict(float)
avg_node_space_norm = defaultdict(float)
cid2obj = [set([i]) for i in range(len(boxes))] # initialize clusters
obj2cid = list(
range(len(boxes))
) # default object map to cluster with its own index
prev_clusters = obj2cid
# add the code for merging close text boxes in particular row
while True:
for i1, b1 in enumerate(boxes):
for i2, b2 in enumerate(boxes):
if (i1 == i2) or (obj2cid[i1] == obj2cid[i2]):
continue
box1 = b1.bbox
box2 = b2.bbox
if obj2rid[i1] == obj2rid[i2]:
if (
(b1.bbox[0] < b2.bbox[0])
and ((b2.bbox[0] - b1.bbox[2]) <= 2 * char_width)
) or (
(b2.bbox[0] < b1.bbox[0])
and ((b1.bbox[0] - b2.bbox[2]) <= 2 * char_width)
):
min_i = min(i1, i2)
max_i = max(i1, i2)
cid1 = obj2cid[min_i]
cid2 = obj2cid[max_i]
for obj_iter in cid2obj[cid2]:
cid2obj[cid1].add(obj_iter)
obj2cid[obj_iter] = cid1
cid2obj[cid2] = set()
# Features
if_row_connected[cid1] = 1
if_row_connected[cid2] = 0
num_row_connected[cid1] += num_row_connected[cid2]
num_row_connected[cid2] = 0
if prev_clusters == obj2cid:
break
prev_clusters = obj2cid
# vertical alignment code
while True:
for i1, b1 in enumerate(boxes):
for i2, b2 in enumerate(boxes):
if (i1 == i2) or (obj2cid[i1] == obj2cid[i2]):
continue
if b1.bbox[1] < b2.bbox[1]:
box1 = b1.bbox
box2 = b2.bbox
elif b2.bbox[1] < b1.bbox[1]:
box1 = b2.bbox
box2 = b1.bbox
else:
# horizontally aligned
continue
if (
box2[1] < box1[3]
or (box2[1] - box1[1] < 1.5 * avg_font_pts)
or (box2[3] - box1[3] < 1.5 * avg_font_pts)
): # can probably do better if we find the average space between words
if (
abs(box1[0] - box2[0]) < 3
or abs(box1[2] - box2[2]) < 3
or (((box1[0] + box1[2]) / 2) == ((box2[0] + box2[2]) / 2))
): # or ((box1[0]<box2[0]) and (box1[2]>box2[0])) or ((box1[0]>box2[0]) and (box2[2]>box1[0]))): #added center alignemnt
min_i = min(i1, i2)
max_i = max(i1, i2)
if (min_i, max_i) not in not_merge:
cid1 = obj2cid[min_i]
cid2 = obj2cid[max_i]
# move all objects from cluster cid2 to cid1
# reassign cluster ids for all such objects as well
for obj_iter in cid2obj[cid2]:
cid2obj[cid1].add(obj_iter)
obj2cid[obj_iter] = cid1
cid2obj[cid2] = set()
# Features
if_connected_by_span[cid1] = 1
if_connected_by_span[cid2] = 0
if (
if_row_connected[cid1] == 1
or if_row_connected[cid2] == 1
):
if_row_connected[cid1] = 1
num_row_connected[cid1] += num_row_connected[cid2]
num_row_connected[cid2] = 0
if_row_connected[cid2] = 0
num_connected_by_span[cid1] = (
num_connected_by_span[cid1]
+ num_connected_by_span[cid2]
)
num_connected_by_span[cid2] = 0
if prev_clusters == obj2cid:
break
prev_clusters = obj2cid
# blacklist nearly half-page wide clusters before horizontal merging
cid2obj2 = cid2obj[:]
obj2cid2 = obj2cid[:]
blacklist = set()
blacklist_obj = set()
for cid_iter in range(len(cid2obj2)):
cid = cid2obj2[cid_iter]
xmin = float("Inf")
xmax = float("-Inf")
for obj in cid:
xmin = min(xmin, boxes[obj].bbox[0])
xmax = max(xmax, boxes[obj].bbox[2])
if ((xmax - xmin) > width / 2.75 and (xmax - xmin) < width / 2) or (
(xmax - xmin) > 0.9 * width
):
blacklist.add(cid_iter)
for obj in cid:
blacklist_obj.add(obj)
for obj_iter in rid2obj[obj2rid[obj]]:
if (
boxes[obj_iter].bbox[0] >= xmin
and boxes[obj_iter].bbox[2] <= xmax
):
blacklist_obj.add(obj_iter)
# create a cluster span
cid2span = {}
for cid in range(len(cid2obj)):
cid2span[cid] = {}
cid2span[cid]["min_x"] = float("Inf")
cid2span[cid]["min_y"] = float("Inf")
cid2span[cid]["max_x"] = float("-Inf")
cid2span[cid]["max_y"] = float("-Inf")
for obj in cid2obj[cid]:
cid2span[cid]["min_x"] = min(cid2span[cid]["min_x"], boxes[obj].bbox[0])
cid2span[cid]["max_x"] = max(cid2span[cid]["max_x"], boxes[obj].bbox[2])
cid2span[cid]["min_y"] = min(cid2span[cid]["min_y"], boxes[obj].bbox[1])
cid2span[cid]["max_y"] = max(cid2span[cid]["max_y"], boxes[obj].bbox[3])
cid2cid = {}
cid_pair_compared = set()
cid2cid2 = [cid for cid in range(len(cid2obj))]
for i1, b1 in enumerate(boxes):
for i2, b2 in enumerate(boxes):
if i1 == i2:
continue
if i1 in blacklist_obj or i2 in blacklist_obj:
continue
cid1 = obj2cid[i1]
cid2 = obj2cid[i2]
if (min(cid1, cid2), max(cid1, cid2)) in cid_pair_compared:
continue
if cid1 == cid2:
continue
if obj2rid[i1] == obj2rid[i2]:
continue
if cid1 not in cid2cid:
cid2cid[cid1] = set()
if cid2 not in cid2cid:
cid2cid[cid2] = set()
if cid2span[cid1]["min_y"] < cid2span[cid2]["min_y"]:
box1 = [
cid2span[cid1]["min_x"],
cid2span[cid1]["min_y"],
cid2span[cid1]["max_x"],
cid2span[cid1]["max_y"],
]
box2 = [
cid2span[cid2]["min_x"],
cid2span[cid2]["min_y"],
cid2span[cid2]["max_x"],
cid2span[cid2]["max_y"],
]
else:
box1 = [
cid2span[cid2]["min_x"],
cid2span[cid2]["min_y"],
cid2span[cid2]["max_x"],
cid2span[cid2]["max_y"],
]
box2 = [
cid2span[cid1]["min_x"],
cid2span[cid1]["min_y"],
cid2span[cid1]["max_x"],
cid2span[cid1]["max_y"],
]
if ((box1[1] < box2[1]) and (box1[3] > box2[1])) or (
(box1[1] > box2[1]) and (box1[1] < box2[3])
):
continue
cid_pair_compared.add((min(cid1, cid2), max(cid1, cid2)))
query_rect = (
min(box1[0], box2[0]),
min(box1[1], box2[1]),
max(box1[2], box2[2]),
max(box1[3], box2[3]),
)
connected = True
for i3, b3 in enumerate(boxes):
if (i3 == i1) or (i3 == i2):
continue
if obj2cid[i1] == obj2cid[i3] or obj2cid[i2] == obj2cid[i3]:
continue
box3 = b3.bbox
if intersect(query_rect, box3):
connected = False
break
if (
(
(
round(box1[0]) == round(box2[0])
or round(box1[2]) == round(box2[2])
)
and connected
)
or (
round((box1[0] + box1[2]) / 2) == round((box2[0] + box2[2]) / 2)
and connected
)
): # or (abs((box1[0]+box1[2])/2-(box2[0]+box2[2])/2)<0.1*char_width and connected)):# or ((box1[0]<box2[0]) and (box1[2]>box2[0])) or ((box1[0]>box2[0]) and (box2[2]>box1[0]))): #added center alignemnt
cid2cid[min(cid1, cid2)].add(max(cid1, cid2))
min_cid = min(cid1, cid2)
max_cid = max(cid1, cid2)
for cid_iter in cid2cid2:
if cid2cid2[cid_iter] == cid2cid2[max_cid]:
cid2cid2[cid_iter] = cid2cid2[min_cid]
# post-process cid2cid
cid2obj2 = cid2obj[:]
obj2cid2 = obj2cid[:]
for cid in range(len(cid2cid2)):
cid_merge = cid2cid2[cid]
if cid != cid_merge:
for obj_iter in cid2obj2[cid]:
cid2obj2[cid_merge].add(obj_iter)
obj2cid2[obj_iter] = cid_merge
cid2obj2[cid] = set()
# Features
if_connected_by_align[cid_merge] = 1
if_connected_by_align[cid] = 0
if if_row_connected[cid_merge] == 1 or if_row_connected[cid] == 1:
if_row_connected[cid_merge] = 1
num_row_connected[cid_merge] += num_row_connected[cid]
num_row_connected[cid] = 0
if_row_connected[cid2] = 0
if if_connected_by_span[cid_merge] == 1 or if_connected_by_span[cid] == 1:
if_connected_by_span[cid_merge] = 1
num_connected_by_span[cid_merge] += num_connected_by_span[cid]
num_connected_by_span[cid] = 0
if_connected_by_span[cid] = 0
num_connected_by_align[cid_merge] += num_connected_by_align[cid]
num_connected_by_align[cid] = 0
# code to merge columns for table
prev_clusters = obj2cid2
while True:
for obj1, b1 in enumerate(boxes):
cid1 = obj2cid2[obj1]
rid1 = obj2rid[obj1]
if cid1 in blacklist:
continue
if obj1 in blacklist_obj:
continue
for obj2, b2 in enumerate(boxes):
if obj1 == obj2:
continue
if obj2cid2[obj2] == cid1:
rid2 = obj2rid[obj2]
if rid1 == rid2:
continue
for obj3 in rid2obj[rid2]:
cid3 = obj2cid2[obj3]
if obj3 in blacklist_obj:
continue
if cid1 != cid3:
for obj4 in cid2obj2[cid3]:
if obj4 == obj3:
continue
if obj2rid[obj4] == rid1:
min_cid = min(cid1, cid3)
max_cid = max(cid1, cid3)
for obj_iter in cid2obj2[max_cid]:
cid2obj2[min_cid].add(obj_iter)
obj2cid2[obj_iter] = min_cid
cid2obj2[max_cid] = set()
# Features
if_vertical_columns_merged[min_cid] = 1
if_vertical_columns_merged[max_cid] = 0
num_vertical_columns_merged[min_cid] += (
num_vertical_columns_merged[max_cid]
)
num_vertical_columns_merged[max_cid] = 0
if (
if_row_connected[min_cid] == 1
or if_row_connected[max_cid] == 1
):
if_row_connected[min_cid] = 1
num_row_connected[min_cid] += num_row_connected[
max_cid
]
num_row_connected[max_cid] = 0
if_row_connected[max_cid] = 0
if (
if_connected_by_span[min_cid] == 1
or if_connected_by_span[max_cid] == 1
):
if_connected_by_span[min_cid] = 1
num_connected_by_span[min_cid] += (
num_connected_by_span[max_cid]
)
num_connected_by_span[max_cid] = 0
if_connected_by_span[max_cid] = 0
if (
if_connected_by_align[min_cid] == 1
or if_connected_by_align[max_cid] == 1
):
if_connected_by_align[min_cid] = 1
num_connected_by_align[min_cid] += (
num_connected_by_align[max_cid]
)
num_connected_by_align[max_cid] = 0
if_connected_by_align[max_cid] = 0
break
if prev_clusters == obj2cid2:
break
prev_clusters = obj2cid2
clusters = [[boxes[i] for i in cluster] for cluster in filter(bool, cid2obj2)]
nodes = [Node(elems) for elems in clusters]
node_indices = [i for i, x in enumerate(cid2obj2) if x]
merge_indices = [i for i in range(len(node_indices))]
page_stat = Node(boxes)
nodes, merge_indices = merge_nodes(nodes, plane, page_stat, merge_indices)
# Features
for idx in range(len(merge_indices)):
if merge_indices[idx] != idx:
cid1 = node_indices[merge_indices[idx]]
cid2 = node_indices[idx]
if if_row_connected[cid1] == 1 or if_row_connected[cid2] == 1:
if_row_connected[cid1] = 1
num_row_connected[cid1] += num_row_connected[cid2]
num_row_connected[cid2] = 0
if_row_connected[cid2] = 0
if if_connected_by_span[cid1] == 1 or if_connected_by_span[cid2] == 1:
if_connected_by_span[cid1] = 1
num_connected_by_span[cid1] += num_connected_by_span[cid2]
num_connected_by_span[cid2] = 0
if_connected_by_span[cid2] = 0
if if_connected_by_align[cid1] == 1 or if_connected_by_align[cid2] == 1:
if_connected_by_align[cid1] = 1
num_connected_by_align[cid1] += num_connected_by_align[cid2]
num_connected_by_align[cid2] = 0
if_connected_by_align[cid2] = 0
if (
if_vertical_columns_merged[cid1] == 1
or if_vertical_columns_merged[cid2] == 1
):
if_vertical_columns_merged[cid1] = 1
num_vertical_columns_merged[cid1] += num_vertical_columns_merged[cid2]
num_vertical_columns_merged[cid2] = 0
if_vertical_columns_merged[cid2] = 0
# Get Word Spacing Features
rid2space = defaultdict(float)
rid2space_norm = defaultdict(float)
row_indices = [i for i, x in enumerate(rid2obj) if x]
for rid in row_indices:
obj_list = list(rid2obj[rid])
if len(obj_list) == 1:
rid2space[rid] = 0
continue
obj_boxes = [boxes[obj].bbox[0] for obj in obj_list]
sorted_obj_idx = [
i[0] for i in sorted(enumerate(obj_boxes), key=lambda x: x[1])
]
for obj_idx in range(len(sorted_obj_idx) - 1):
rid2space[rid] += (
boxes[obj_list[sorted_obj_idx[obj_idx + 1]]].bbox[2]
- boxes[obj_list[sorted_obj_idx[obj_idx]]].bbox[0]
)
rid2space_norm[rid] = rid2space[rid] / (len(obj_list) - 1)
for idx, node in enumerate(nodes):
node_idx = node_indices[idx]
if merge_indices[idx] == idx:
obj_list = []
for idx_iter in range(len(merge_indices)):
if merge_indices[idx_iter] == idx:
obj_list += list(cid2obj2[node_indices[idx_iter]])
obj_list = list(set(obj_list))
rid_list = list(set([obj2rid[obj] for obj in obj_list]))
for rid in rid_list:
total_word_space[node_idx] += rid2space[rid]
avg_word_space_norm[node_idx] += rid2space_norm[rid]
obj_boxes = [
boxes[obj].bbox[0] for obj in rid2obj if obj in cid2obj2[node_idx]
]
sorted_obj_idx = [
i[0] for i in sorted(enumerate(obj_boxes), key=lambda x: x[1])
]
for obj_idx in range(len(sorted_obj_idx) - 1):
node_space[node_idx] += (
boxes[obj_list[sorted_obj_idx[obj_idx + 1]]].bbox[2]
- boxes[obj_list[sorted_obj_idx[obj_idx]]].bbox[0]
)
avg_node_space_norm[node_idx] += node_space[node_idx] / (
len(obj_boxes) - 1
)
avg_word_space[node_idx] = total_word_space[node_idx] / len(rid_list)
avg_word_space_norm[node_idx] /= len(rid_list)
avg_node_space[node_idx] = node_space[node_idx] / len(rid_list)
avg_node_space_norm[node_idx] /= len(rid_list)
new_nodes = []
new_node_indices = []
for idx in range(len(merge_indices)):
if merge_indices[idx] == idx:
new_nodes.append(nodes[idx])
new_node_indices.append(node_indices[idx])
nodes = new_nodes
node_indices = new_node_indices
# Features
for idx, node in enumerate(nodes):
node_idx = node_indices[idx]
node_bbox = (node.x0, node.y0, node.x1, node.y1)
for i1, b1 in enumerate(boxes_segments):
if intersect(node_bbox, b1.bbox):
num_segments[node_idx] += 1
for i1, b1 in enumerate(boxes_figures):
if intersect(node_bbox, b1.bbox):
num_figures[node_idx] += 1
for i1, b1 in enumerate(boxes_curves):
if intersect(node_bbox, b1.bbox):
num_curves[node_idx] += 1
tables = []
table_indices = []
for idx, node in enumerate(nodes):
node_idx = node_indices[idx]
isTable = True
if node.is_table():
for elem in node.elems:
if "table" in elem.get_text().lower():
continue
if (node.width - elem.bbox[2] + elem.bbox[0]) < 2 * char_width:
isTable = False
if isTable:
tables.append(node)
table_indices.append(node_idx)
if combine == True:
node_features = [0] * 17
for idx, node in enumerate(nodes):
node_idx = node_indices[idx]
node_features = [
sum(x)
for x in zip(
node_features,
[
if_row_connected[node_idx],
num_row_connected[node_idx],
if_connected_by_span[node_idx],
num_connected_by_span[node_idx],
if_connected_by_align[node_idx],
num_connected_by_align[node_idx],
if_vertical_columns_merged[node_idx],
num_vertical_columns_merged[node_idx],
num_segments[node_idx],
num_curves[node_idx],
num_figures[node_idx],
total_word_space[node_idx],
avg_word_space[node_idx],
avg_word_space_norm[node_idx],
node_space[node_idx],
avg_node_space[node_idx],
avg_node_space_norm[node_idx],
],
)
]
return [], node_features
else:
table_features = []
for idx, table in enumerate(tables):
table_idx = table_indices[idx]
table_features += [
[
if_row_connected[table_idx],
num_row_connected[table_idx],
if_connected_by_span[table_idx],
num_connected_by_span[table_idx],
if_connected_by_align[table_idx],
num_connected_by_align[table_idx],
if_vertical_columns_merged[table_idx],
num_vertical_columns_merged[table_idx],
num_segments[table_idx],
num_curves[table_idx],
num_figures[table_idx],
total_word_space[table_idx],
avg_word_space[table_idx],
avg_word_space_norm[table_idx],
node_space[table_idx],
avg_node_space[table_idx],
avg_node_space_norm[table_idx],
]
]
return tables, table_features
|
https://github.com/HazyResearch/pdftotree/issues/25
|
$ pdftotree -vv dtc114w.pdf
[INFO] pdftotree.core - Digitized PDF detected, building tree structure...
[ERROR] pdftotree.TreeExtract - list index out of range
Traceback (most recent call last):
File "/home/lwhsiao/repos/pdftotree/pdftotree/TreeExtract.py", line 148, in get_candidates_alignments
nodes, features = parse_layout(elems, font_stat)
File "/home/lwhsiao/repos/pdftotree/pdftotree/utils/pdf/pdf_parsers.py", line 36, in parse_layout
avg_font_pts = get_most_common_font_pts(elems.mentions, font_stat)
File "/home/lwhsiao/repos/pdftotree/pdftotree/utils/pdf/pdf_parsers.py", line 1159, in get_most_common_font_pts
most_common_font_size = font_stat.most_common(1)[0][0]
IndexError: list index out of range
Traceback (most recent call last):
File "/home/lwhsiao/repos/pdftotree/.venv/bin/pdftotree", line 6, in <module>
exec(compile(open(__file__).read(), __file__, 'exec'))
File "/home/lwhsiao/repos/pdftotree/bin/pdftotree", line 82, in <module>
args.favor_figures, args.visualize)
File "/home/lwhsiao/repos/pdftotree/pdftotree/core.py", line 58, in parse
pdf_tree = extractor.get_tree_structure(model, favor_figures)
File "/home/lwhsiao/repos/pdftotree/pdftotree/TreeExtract.py", line 188, in get_tree_structure
favor_figures)
File "/home/lwhsiao/repos/pdftotree/pdftotree/utils/pdf/pdf_parsers.py", line 666, in parse_tree_structure
avg_font_pts = get_most_common_font_pts(elems.mentions, font_stat)
File "/home/lwhsiao/repos/pdftotree/pdftotree/utils/pdf/pdf_parsers.py", line 1159, in get_most_common_font_pts
most_common_font_size = font_stat.most_common(1)[0][0]
IndexError: list index out of range
|
IndexError
|
def get_figures(boxes, page_bbox, page_num, boxes_figures, page_width, page_height):
if len(boxes) == 0:
log.warning("No boxes to get figures from on page {}.".format(page_num))
return []
plane = Plane(page_bbox)
plane.extend(boxes)
nodes_figures = []
for fig_box in boxes_figures:
node_fig = Node(fig_box)
nodes_figures.append(node_fig)
merge_indices = [i for i in range(len(nodes_figures))]
page_stat = Node(boxes)
nodes, merge_indices = merge_nodes(nodes_figures, plane, page_stat, merge_indices)
##Merging Nodes
new_nodes = []
for idx in range(len(merge_indices)):
if merge_indices[idx] == idx:
new_nodes.append(nodes[idx])
figures = [
(page_num, page_width, page_height) + (node.y0, node.x0, node.y1, node.x1)
for node in new_nodes
]
return figures
|
def get_figures(boxes, page_bbox, page_num, boxes_figures, page_width, page_height):
plane = Plane(page_bbox)
plane.extend(boxes)
nodes_figures = []
for fig_box in boxes_figures:
node_fig = Node(fig_box)
nodes_figures.append(node_fig)
merge_indices = [i for i in range(len(nodes_figures))]
page_stat = Node(boxes)
nodes, merge_indices = merge_nodes(nodes_figures, plane, page_stat, merge_indices)
##Merging Nodes
new_nodes = []
for idx in range(len(merge_indices)):
if merge_indices[idx] == idx:
new_nodes.append(nodes[idx])
figures = [
(page_num, page_width, page_height) + (node.y0, node.x0, node.y1, node.x1)
for node in new_nodes
]
return figures
|
https://github.com/HazyResearch/pdftotree/issues/25
|
$ pdftotree -vv dtc114w.pdf
[INFO] pdftotree.core - Digitized PDF detected, building tree structure...
[ERROR] pdftotree.TreeExtract - list index out of range
Traceback (most recent call last):
File "/home/lwhsiao/repos/pdftotree/pdftotree/TreeExtract.py", line 148, in get_candidates_alignments
nodes, features = parse_layout(elems, font_stat)
File "/home/lwhsiao/repos/pdftotree/pdftotree/utils/pdf/pdf_parsers.py", line 36, in parse_layout
avg_font_pts = get_most_common_font_pts(elems.mentions, font_stat)
File "/home/lwhsiao/repos/pdftotree/pdftotree/utils/pdf/pdf_parsers.py", line 1159, in get_most_common_font_pts
most_common_font_size = font_stat.most_common(1)[0][0]
IndexError: list index out of range
Traceback (most recent call last):
File "/home/lwhsiao/repos/pdftotree/.venv/bin/pdftotree", line 6, in <module>
exec(compile(open(__file__).read(), __file__, 'exec'))
File "/home/lwhsiao/repos/pdftotree/bin/pdftotree", line 82, in <module>
args.favor_figures, args.visualize)
File "/home/lwhsiao/repos/pdftotree/pdftotree/core.py", line 58, in parse
pdf_tree = extractor.get_tree_structure(model, favor_figures)
File "/home/lwhsiao/repos/pdftotree/pdftotree/TreeExtract.py", line 188, in get_tree_structure
favor_figures)
File "/home/lwhsiao/repos/pdftotree/pdftotree/utils/pdf/pdf_parsers.py", line 666, in parse_tree_structure
avg_font_pts = get_most_common_font_pts(elems.mentions, font_stat)
File "/home/lwhsiao/repos/pdftotree/pdftotree/utils/pdf/pdf_parsers.py", line 1159, in get_most_common_font_pts
most_common_font_size = font_stat.most_common(1)[0][0]
IndexError: list index out of range
|
IndexError
|
def get_candidates_and_features_page_num(self, page_num):
elems = self.elems[page_num]
# font_stat = self.font_stats[page_num]
# lines_bboxes = self.get_candidates_lines(page_num, elems)
alignments_bboxes, alignment_features = self.get_candidates_alignments(
page_num, elems
)
# print "Page Num: ", page_num, "Line bboxes: ", len(lines_bboxes), ", Alignment bboxes: ", len(alignments_bboxes)
# alignment_features += get_alignment_features(lines_bboxes, elems, font_stat)
boxes = alignments_bboxes
if len(boxes) == 0:
log.info("No boxes were found on page {}.".format(page_num))
return [], []
lines_features = get_lines_features(boxes, elems)
features = np.concatenate(
(np.array(alignment_features), np.array(lines_features)), axis=1
)
return boxes, features
|
def get_candidates_and_features_page_num(self, page_num):
elems = self.elems[page_num]
# font_stat = self.font_stats[page_num]
# lines_bboxes = self.get_candidates_lines(page_num, elems)
alignments_bboxes, alignment_features = self.get_candidates_alignments(
page_num, elems
)
# print "Page Num: ", page_num, "Line bboxes: ", len(lines_bboxes), ", Alignment bboxes: ", len(alignments_bboxes)
# alignment_features += get_alignment_features(lines_bboxes, elems, font_stat)
boxes = alignments_bboxes # + lines_bboxes
if len(boxes) == 0:
log.info("No boxes were found on page {}.".format(page_num))
return [], []
lines_features = get_lines_features(boxes, elems)
features = np.concatenate(
(np.array(alignment_features), np.array(lines_features)), axis=1
)
return boxes, features
|
https://github.com/HazyResearch/pdftotree/issues/20
|
Digitized PDF detected, building tree structure
float division by zero
float division by zero
float division by zero
(89.33853599999992, 89.33853599999992, 50)
Traceback (most recent call last):
File "chh_test.py", line 10, in <module>
pdftotree.parse(filename, htmlpath, model_path=None, favor_figures=False, visualize=True)
File "/home/chhenning/repos/pdftotree/pdftotree/core.py", line 57, in parse
pdf_tree = extractor.get_tree_structure(model, favor_figures)
File "/home/chhenning/repos/pdftotree/pdftotree/TreeExtract.py", line 195, in get_tree_structure
favor_figures)
File "/home/chhenning/repos/pdftotree/pdftotree/pdf/pdf_parsers.py", line 615, in parse_tree_structure
figures_page = get_figures(mentions, elems.layout.bbox, page_num, boxes_figures, page_width, page_height)
File "/home/chhenning/repos/pdftotree/pdftotree/pdf/pdf_parsers.py", line 945, in get_figures
page_stat = Node(boxes)
File "/home/chhenning/repos/pdftotree/pdftotree/pdf/node.py", line 44, in __init__
self.set_bbox(bound_elems(elems))
File "/home/chhenning/repos/pdftotree/pdftotree/pdf/vector_utils.py", line 101, in bound_elems
group_x0 = min(map(lambda l:l.x0,elems))
ValueError: min() arg is an empty sequence
|
ValueError
|
def get_html_tree(self):
self.html = "<html>"
for page_num in self.elems.keys():
page_html = "<div id=" + str(page_num) + ">"
boxes = []
for clust in self.tree[page_num]:
for pnum, pwidth, pheight, top, left, bottom, right in self.tree[page_num][
clust
]:
boxes += [[clust.lower().replace(" ", "_"), top, left, bottom, right]]
# TODO: We need to detect columns and sort acccordingly.
boxes.sort(key=cmp_to_key(column_order))
for box in boxes:
if box[0] == "table":
table = box[1:]
table_html = self.get_html_table(table, page_num)
if six.PY2:
page_html += table_html.decode("utf-8")
elif six.PY3:
page_html += table_html
elif box[0] == "figure":
fig_str = [str(i) for i in box[1:]]
fig_html = "<figure bbox=" + ",".join(fig_str) + "></figure>"
if six.PY2:
page_html += fig_html.decode("utf-8")
elif six.PY3:
page_html += fig_html
else:
(box_html, char_html, top_html, left_html, bottom_html, right_html) = (
self.get_html_others(box[1:], page_num)
)
page_html += (
"<"
+ box[0]
+ " char='"
+ char_html
+ "', top='"
+ top_html
+ "', left='"
+ left_html
+ "', bottom='"
+ bottom_html
+ "', right='"
+ right_html
+ "'>"
+ box_html
+ "</"
+ box[0]
+ ">"
)
page_html += "</div>"
self.html += page_html
self.html += "</html>"
return self.html
|
def get_html_tree(self):
self.html = "<html>"
for page_num in self.elems.keys():
page_html = "<div id=" + str(page_num) + ">"
boxes = []
for clust in self.tree[page_num]:
for pnum, pwidth, pheight, top, left, bottom, right in self.tree[page_num][
clust
]:
boxes += [[clust.lower().replace(" ", "_"), top, left, bottom, right]]
# TODO: We need to detect columns and sort acccordingly.
boxes.sort(key=cmp_to_key(column_order))
# from pprint import pprint
# pprint(boxes, width=120)
# import pdb; pdb.set_trace()
for box in boxes:
if box[0] == "table":
table = box[1:]
table_html = self.get_html_table(table, page_num)
if six.PY2:
page_html += table_html.decode("utf-8")
elif six.PY3:
page_html += table_html
elif box[0] == "figure":
fig_str = [str(i) for i in box[1:]]
fig_html = "<figure bbox=" + ",".join(fig_str) + "></figure>"
if six.PY2:
page_html += fig_html.decode("utf-8")
elif six.PY3:
page_html += fig_html
else:
(box_html, char_html, top_html, left_html, bottom_html, right_html) = (
self.get_html_others(box[1:], page_num)
)
page_html += (
"<"
+ box[0]
+ " char='"
+ char_html
+ "', top='"
+ top_html
+ "', left='"
+ left_html
+ "', bottom='"
+ bottom_html
+ "', right='"
+ right_html
+ "'>"
+ box_html
+ "</"
+ box[0]
+ ">"
)
page_html += "</div>"
self.html += page_html
self.html += "</html>"
return self.html
|
https://github.com/HazyResearch/pdftotree/issues/20
|
Digitized PDF detected, building tree structure
float division by zero
float division by zero
float division by zero
(89.33853599999992, 89.33853599999992, 50)
Traceback (most recent call last):
File "chh_test.py", line 10, in <module>
pdftotree.parse(filename, htmlpath, model_path=None, favor_figures=False, visualize=True)
File "/home/chhenning/repos/pdftotree/pdftotree/core.py", line 57, in parse
pdf_tree = extractor.get_tree_structure(model, favor_figures)
File "/home/chhenning/repos/pdftotree/pdftotree/TreeExtract.py", line 195, in get_tree_structure
favor_figures)
File "/home/chhenning/repos/pdftotree/pdftotree/pdf/pdf_parsers.py", line 615, in parse_tree_structure
figures_page = get_figures(mentions, elems.layout.bbox, page_num, boxes_figures, page_width, page_height)
File "/home/chhenning/repos/pdftotree/pdftotree/pdf/pdf_parsers.py", line 945, in get_figures
page_stat = Node(boxes)
File "/home/chhenning/repos/pdftotree/pdftotree/pdf/node.py", line 44, in __init__
self.set_bbox(bound_elems(elems))
File "/home/chhenning/repos/pdftotree/pdftotree/pdf/vector_utils.py", line 101, in bound_elems
group_x0 = min(map(lambda l:l.x0,elems))
ValueError: min() arg is an empty sequence
|
ValueError
|
def cluster_vertically_aligned_boxes(
boxes,
page_bbox,
avg_font_pts,
width,
char_width,
boxes_segments,
boxes_curves,
boxes_figures,
page_width,
combine,
):
# Filter out boxes with zero width or height
filtered_boxes = []
for bbox in boxes:
if bbox.x1 - bbox.x0 > 0 and bbox.y1 - bbox.y0 > 0:
filtered_boxes.append(bbox)
boxes = filtered_boxes
# Too many "." in the Table of Content pages
if len(boxes) == 0:
log.warning("No boxes were found to cluster.")
return [], []
elif len(boxes) > 3500:
log.warning("Too many '.' in the Table of Content pages?")
return [], []
plane = Plane(page_bbox)
plane.extend(boxes)
# initialize clusters
cid2obj = [set([i]) for i in range(len(boxes))]
# default object map to cluster with its own index
obj2cid = list(range(len(boxes)))
prev_clusters = obj2cid
while True:
for i1, b1 in enumerate(boxes):
for i2, b2 in enumerate(boxes):
if (i1 == i2) or (obj2cid[i1] == obj2cid[i2]):
continue
if b1.bbox[1] < b2.bbox[1]:
box1 = b1.bbox
box2 = b2.bbox
elif b2.bbox[1] < b1.bbox[1]:
box1 = b2.bbox
box2 = b1.bbox
else:
# horizontally aligned
continue
if (
box2[1] < box1[3]
or (box2[1] - box1[1] < 1.5 * avg_font_pts)
or (box2[3] - box1[3] < 1.5 * avg_font_pts)
):
# can probably do better if we find the average space
# between words
if (
abs(box1[0] - box2[0]) < 3
or abs(box1[2] - box2[2]) < 3
or (((box1[0] + box1[2]) / 2) == ((box2[0] + box2[2]) / 2))
or ((box1[0] < box2[0]) and (box1[2] > box2[0]))
or ((box1[0] > box2[0]) and (box2[2] > box1[0]))
):
min_i = min(i1, i2)
max_i = max(i1, i2)
cid1 = obj2cid[min_i]
cid2 = obj2cid[max_i]
# move all objects from cluster cid2 to cid1
# reassign cluster ids for all such objects as well
for obj_iter in cid2obj[cid2]:
cid2obj[cid1].add(obj_iter)
obj2cid[obj_iter] = cid1
cid2obj[cid2] = set()
if prev_clusters == obj2cid:
break
prev_clusters = obj2cid
clusters = [[boxes[i] for i in cluster] for cluster in filter(bool, cid2obj)]
rid2obj = [set([i]) for i in range(len(boxes))] # initialize clusters
# default object map to cluster with its own index
obj2rid = list(range(len(boxes)))
prev_clusters = obj2rid
while True:
for i1, b1 in enumerate(boxes):
for i2, b2 in enumerate(boxes):
if (i1 == i2) or (obj2rid[i1] == obj2rid[i2]):
continue
box1 = b1.bbox
box2 = b2.bbox
if (
(abs(box1[1] - box2[1]) < 0.11 * avg_font_pts)
or (abs(box1[3] - box2[3]) < 0.11 * avg_font_pts)
or (
round((box1[1] + box1[3]) / 2) == round((box2[1] + box2[3]) / 2)
)
):
min_i = min(i1, i2)
max_i = max(i1, i2)
rid1 = obj2rid[min_i]
rid2 = obj2rid[max_i]
for obj_iter in rid2obj[rid2]:
rid2obj[rid1].add(obj_iter)
obj2rid[obj_iter] = rid1
rid2obj[rid2] = set()
if prev_clusters == obj2rid:
break
prev_clusters = obj2rid
not_merge = set()
for i1, b1 in enumerate(boxes):
for i2 in cid2obj[obj2cid[i1]]:
if i1 == i2:
continue
row1 = obj2rid[i1]
row2 = obj2rid[i2]
if row1 == row2:
continue
if b1.bbox[1] < b2.bbox[1]:
box1 = b1.bbox
box2 = b2.bbox
elif b2.bbox[1] < b1.bbox[1]:
box1 = b2.bbox
box2 = b1.bbox
else:
# horizontally aligned
continue
text_1 = 0.0
for obj in rid2obj[row1]:
text_1 += boxes[obj].bbox[2] - boxes[obj].bbox[0]
text_2 = 0.0
for obj in rid2obj[row2]:
text_2 += boxes[obj].bbox[2] - boxes[obj].bbox[0]
if abs(text_1 - text_2) / width > 0.1:
min_i = min(i1, i2)
max_i = max(i1, i2)
not_merge.add((min_i, max_i))
# Alignment Features
# If text boxes are very close in a row
if_row_connected = defaultdict(int)
num_row_connected = defaultdict(lambda: 1)
# If text is merged using span code in adjacent rows, this feature tells the number of times the cluster went through span based clustering
if_connected_by_span = defaultdict(int)
num_connected_by_span = defaultdict(lambda: 1)
# If columns were merged using cluster alignment
if_connected_by_align = defaultdict(int)
num_connected_by_align = defaultdict(lambda: 1)
# If vertical columns were merged
if_vertical_columns_merged = defaultdict(int)
num_vertical_columns_merged = defaultdict(lambda: 1)
# Number of Line Segments, Curves and Figures
num_segments = defaultdict(int)
num_curves = defaultdict(int)
num_figures = defaultdict(int)
# Average Word Space
total_word_space = defaultdict(float)
avg_word_space = defaultdict(float)
avg_word_space_norm = defaultdict(float)
node_space = defaultdict(float)
avg_node_space = defaultdict(float)
avg_node_space_norm = defaultdict(float)
cid2obj = [set([i]) for i in range(len(boxes))] # initialize clusters
obj2cid = list(
range(len(boxes))
) # default object map to cluster with its own index
prev_clusters = obj2cid
# add the code for merging close text boxes in particular row
while True:
for i1, b1 in enumerate(boxes):
for i2, b2 in enumerate(boxes):
if (i1 == i2) or (obj2cid[i1] == obj2cid[i2]):
continue
box1 = b1.bbox
box2 = b2.bbox
if obj2rid[i1] == obj2rid[i2]:
if (
(b1.bbox[0] < b2.bbox[0])
and ((b2.bbox[0] - b1.bbox[2]) <= 2 * char_width)
) or (
(b2.bbox[0] < b1.bbox[0])
and ((b1.bbox[0] - b2.bbox[2]) <= 2 * char_width)
):
min_i = min(i1, i2)
max_i = max(i1, i2)
cid1 = obj2cid[min_i]
cid2 = obj2cid[max_i]
for obj_iter in cid2obj[cid2]:
cid2obj[cid1].add(obj_iter)
obj2cid[obj_iter] = cid1
cid2obj[cid2] = set()
# Features
if_row_connected[cid1] = 1
if_row_connected[cid2] = 0
num_row_connected[cid1] += num_row_connected[cid2]
num_row_connected[cid2] = 0
if prev_clusters == obj2cid:
break
prev_clusters = obj2cid
# vertical alignment code
while True:
for i1, b1 in enumerate(boxes):
for i2, b2 in enumerate(boxes):
if (i1 == i2) or (obj2cid[i1] == obj2cid[i2]):
continue
if b1.bbox[1] < b2.bbox[1]:
box1 = b1.bbox
box2 = b2.bbox
elif b2.bbox[1] < b1.bbox[1]:
box1 = b2.bbox
box2 = b1.bbox
else:
# horizontally aligned
continue
if (
box2[1] < box1[3]
or (box2[1] - box1[1] < 1.5 * avg_font_pts)
or (box2[3] - box1[3] < 1.5 * avg_font_pts)
): # can probably do better if we find the average space between words
if (
abs(box1[0] - box2[0]) < 3
or abs(box1[2] - box2[2]) < 3
or (((box1[0] + box1[2]) / 2) == ((box2[0] + box2[2]) / 2))
): # or ((box1[0]<box2[0]) and (box1[2]>box2[0])) or ((box1[0]>box2[0]) and (box2[2]>box1[0]))): #added center alignemnt
min_i = min(i1, i2)
max_i = max(i1, i2)
if (min_i, max_i) not in not_merge:
cid1 = obj2cid[min_i]
cid2 = obj2cid[max_i]
# move all objects from cluster cid2 to cid1
# reassign cluster ids for all such objects as well
for obj_iter in cid2obj[cid2]:
cid2obj[cid1].add(obj_iter)
obj2cid[obj_iter] = cid1
cid2obj[cid2] = set()
# Features
if_connected_by_span[cid1] = 1
if_connected_by_span[cid2] = 0
if (
if_row_connected[cid1] == 1
or if_row_connected[cid2] == 1
):
if_row_connected[cid1] = 1
num_row_connected[cid1] += num_row_connected[cid2]
num_row_connected[cid2] = 0
if_row_connected[cid2] = 0
num_connected_by_span[cid1] = (
num_connected_by_span[cid1]
+ num_connected_by_span[cid2]
)
num_connected_by_span[cid2] = 0
if prev_clusters == obj2cid:
break
prev_clusters = obj2cid
# blacklist nearly half-page wide clusters before horizontal merging
cid2obj2 = cid2obj[:]
obj2cid2 = obj2cid[:]
blacklist = set()
blacklist_obj = set()
for cid_iter in range(len(cid2obj2)):
cid = cid2obj2[cid_iter]
xmin = float("Inf")
xmax = float("-Inf")
for obj in cid:
xmin = min(xmin, boxes[obj].bbox[0])
xmax = max(xmax, boxes[obj].bbox[2])
if ((xmax - xmin) > width / 2.75 and (xmax - xmin) < width / 2) or (
(xmax - xmin) > 0.9 * width
):
blacklist.add(cid_iter)
for obj in cid:
blacklist_obj.add(obj)
for obj_iter in rid2obj[obj2rid[obj]]:
if (
boxes[obj_iter].bbox[0] >= xmin
and boxes[obj_iter].bbox[2] <= xmax
):
blacklist_obj.add(obj_iter)
# create a cluster span
cid2span = {}
for cid in range(len(cid2obj)):
cid2span[cid] = {}
cid2span[cid]["min_x"] = float("Inf")
cid2span[cid]["min_y"] = float("Inf")
cid2span[cid]["max_x"] = float("-Inf")
cid2span[cid]["max_y"] = float("-Inf")
for obj in cid2obj[cid]:
cid2span[cid]["min_x"] = min(cid2span[cid]["min_x"], boxes[obj].bbox[0])
cid2span[cid]["max_x"] = max(cid2span[cid]["max_x"], boxes[obj].bbox[2])
cid2span[cid]["min_y"] = min(cid2span[cid]["min_y"], boxes[obj].bbox[1])
cid2span[cid]["max_y"] = max(cid2span[cid]["max_y"], boxes[obj].bbox[3])
cid2cid = {}
cid_pair_compared = set()
cid2cid2 = [cid for cid in range(len(cid2obj))]
for i1, b1 in enumerate(boxes):
for i2, b2 in enumerate(boxes):
if i1 == i2:
continue
if i1 in blacklist_obj or i2 in blacklist_obj:
continue
cid1 = obj2cid[i1]
cid2 = obj2cid[i2]
if (min(cid1, cid2), max(cid1, cid2)) in cid_pair_compared:
continue
if cid1 == cid2:
continue
if obj2rid[i1] == obj2rid[i2]:
continue
if cid1 not in cid2cid:
cid2cid[cid1] = set()
if cid2 not in cid2cid:
cid2cid[cid2] = set()
if cid2span[cid1]["min_y"] < cid2span[cid2]["min_y"]:
box1 = [
cid2span[cid1]["min_x"],
cid2span[cid1]["min_y"],
cid2span[cid1]["max_x"],
cid2span[cid1]["max_y"],
]
box2 = [
cid2span[cid2]["min_x"],
cid2span[cid2]["min_y"],
cid2span[cid2]["max_x"],
cid2span[cid2]["max_y"],
]
else:
box1 = [
cid2span[cid2]["min_x"],
cid2span[cid2]["min_y"],
cid2span[cid2]["max_x"],
cid2span[cid2]["max_y"],
]
box2 = [
cid2span[cid1]["min_x"],
cid2span[cid1]["min_y"],
cid2span[cid1]["max_x"],
cid2span[cid1]["max_y"],
]
if ((box1[1] < box2[1]) and (box1[3] > box2[1])) or (
(box1[1] > box2[1]) and (box1[1] < box2[3])
):
continue
cid_pair_compared.add((min(cid1, cid2), max(cid1, cid2)))
query_rect = (
min(box1[0], box2[0]),
min(box1[1], box2[1]),
max(box1[2], box2[2]),
max(box1[3], box2[3]),
)
connected = True
for i3, b3 in enumerate(boxes):
if (i3 == i1) or (i3 == i2):
continue
if obj2cid[i1] == obj2cid[i3] or obj2cid[i2] == obj2cid[i3]:
continue
box3 = b3.bbox
if intersect(query_rect, box3):
connected = False
break
if (
(
(
round(box1[0]) == round(box2[0])
or round(box1[2]) == round(box2[2])
)
and connected
)
or (
round((box1[0] + box1[2]) / 2) == round((box2[0] + box2[2]) / 2)
and connected
)
): # or (abs((box1[0]+box1[2])/2-(box2[0]+box2[2])/2)<0.1*char_width and connected)):# or ((box1[0]<box2[0]) and (box1[2]>box2[0])) or ((box1[0]>box2[0]) and (box2[2]>box1[0]))): #added center alignemnt
cid2cid[min(cid1, cid2)].add(max(cid1, cid2))
min_cid = min(cid1, cid2)
max_cid = max(cid1, cid2)
for cid_iter in cid2cid2:
if cid2cid2[cid_iter] == cid2cid2[max_cid]:
cid2cid2[cid_iter] = cid2cid2[min_cid]
# post-process cid2cid
cid2obj2 = cid2obj[:]
obj2cid2 = obj2cid[:]
for cid in range(len(cid2cid2)):
cid_merge = cid2cid2[cid]
if cid != cid_merge:
for obj_iter in cid2obj2[cid]:
cid2obj2[cid_merge].add(obj_iter)
obj2cid2[obj_iter] = cid_merge
cid2obj2[cid] = set()
# Features
if_connected_by_align[cid_merge] = 1
if_connected_by_align[cid] = 0
if if_row_connected[cid_merge] == 1 or if_row_connected[cid] == 1:
if_row_connected[cid_merge] = 1
num_row_connected[cid_merge] += num_row_connected[cid]
num_row_connected[cid] = 0
if_row_connected[cid2] = 0
if if_connected_by_span[cid_merge] == 1 or if_connected_by_span[cid] == 1:
if_connected_by_span[cid_merge] = 1
num_connected_by_span[cid_merge] += num_connected_by_span[cid]
num_connected_by_span[cid] = 0
if_connected_by_span[cid] = 0
num_connected_by_align[cid_merge] += num_connected_by_align[cid]
num_connected_by_align[cid] = 0
# code to merge columns for table
prev_clusters = obj2cid2
while True:
for obj1, b1 in enumerate(boxes):
cid1 = obj2cid2[obj1]
rid1 = obj2rid[obj1]
if cid1 in blacklist:
continue
if obj1 in blacklist_obj:
continue
for obj2, b2 in enumerate(boxes):
if obj1 == obj2:
continue
if obj2cid2[obj2] == cid1:
rid2 = obj2rid[obj2]
if rid1 == rid2:
continue
for obj3 in rid2obj[rid2]:
cid3 = obj2cid2[obj3]
if obj3 in blacklist_obj:
continue
if cid1 != cid3:
for obj4 in cid2obj2[cid3]:
if obj4 == obj3:
continue
if obj2rid[obj4] == rid1:
min_cid = min(cid1, cid3)
max_cid = max(cid1, cid3)
for obj_iter in cid2obj2[max_cid]:
cid2obj2[min_cid].add(obj_iter)
obj2cid2[obj_iter] = min_cid
cid2obj2[max_cid] = set()
# Features
if_vertical_columns_merged[min_cid] = 1
if_vertical_columns_merged[max_cid] = 0
num_vertical_columns_merged[min_cid] += (
num_vertical_columns_merged[max_cid]
)
num_vertical_columns_merged[max_cid] = 0
if (
if_row_connected[min_cid] == 1
or if_row_connected[max_cid] == 1
):
if_row_connected[min_cid] = 1
num_row_connected[min_cid] += num_row_connected[
max_cid
]
num_row_connected[max_cid] = 0
if_row_connected[max_cid] = 0
if (
if_connected_by_span[min_cid] == 1
or if_connected_by_span[max_cid] == 1
):
if_connected_by_span[min_cid] = 1
num_connected_by_span[min_cid] += (
num_connected_by_span[max_cid]
)
num_connected_by_span[max_cid] = 0
if_connected_by_span[max_cid] = 0
if (
if_connected_by_align[min_cid] == 1
or if_connected_by_align[max_cid] == 1
):
if_connected_by_align[min_cid] = 1
num_connected_by_align[min_cid] += (
num_connected_by_align[max_cid]
)
num_connected_by_align[max_cid] = 0
if_connected_by_align[max_cid] = 0
break
if prev_clusters == obj2cid2:
break
prev_clusters = obj2cid2
clusters = [[boxes[i] for i in cluster] for cluster in filter(bool, cid2obj2)]
nodes = [Node(elems) for elems in clusters]
node_indices = [i for i, x in enumerate(cid2obj2) if x]
merge_indices = [i for i in range(len(node_indices))]
page_stat = Node(boxes)
nodes, merge_indices = merge_nodes(nodes, plane, page_stat, merge_indices)
# Features
for idx in range(len(merge_indices)):
if merge_indices[idx] != idx:
cid1 = node_indices[merge_indices[idx]]
cid2 = node_indices[idx]
if if_row_connected[cid1] == 1 or if_row_connected[cid2] == 1:
if_row_connected[cid1] = 1
num_row_connected[cid1] += num_row_connected[cid2]
num_row_connected[cid2] = 0
if_row_connected[cid2] = 0
if if_connected_by_span[cid1] == 1 or if_connected_by_span[cid2] == 1:
if_connected_by_span[cid1] = 1
num_connected_by_span[cid1] += num_connected_by_span[cid2]
num_connected_by_span[cid2] = 0
if_connected_by_span[cid2] = 0
if if_connected_by_align[cid1] == 1 or if_connected_by_align[cid2] == 1:
if_connected_by_align[cid1] = 1
num_connected_by_align[cid1] += num_connected_by_align[cid2]
num_connected_by_align[cid2] = 0
if_connected_by_align[cid2] = 0
if (
if_vertical_columns_merged[cid1] == 1
or if_vertical_columns_merged[cid2] == 1
):
if_vertical_columns_merged[cid1] = 1
num_vertical_columns_merged[cid1] += num_vertical_columns_merged[cid2]
num_vertical_columns_merged[cid2] = 0
if_vertical_columns_merged[cid2] = 0
# Get Word Spacing Features
rid2space = defaultdict(float)
rid2space_norm = defaultdict(float)
row_indices = [i for i, x in enumerate(rid2obj) if x]
for rid in row_indices:
obj_list = list(rid2obj[rid])
if len(obj_list) == 1:
rid2space[rid] = 0
continue
obj_boxes = [boxes[obj].bbox[0] for obj in obj_list]
sorted_obj_idx = [
i[0] for i in sorted(enumerate(obj_boxes), key=lambda x: x[1])
]
for obj_idx in range(len(sorted_obj_idx) - 1):
rid2space[rid] += (
boxes[obj_list[sorted_obj_idx[obj_idx + 1]]].bbox[2]
- boxes[obj_list[sorted_obj_idx[obj_idx]]].bbox[0]
)
rid2space_norm[rid] = rid2space[rid] / (len(obj_list) - 1)
for idx, node in enumerate(nodes):
node_idx = node_indices[idx]
if merge_indices[idx] == idx:
obj_list = []
for idx_iter in range(len(merge_indices)):
if merge_indices[idx_iter] == idx:
obj_list += list(cid2obj2[node_indices[idx_iter]])
obj_list = list(set(obj_list))
rid_list = list(set([obj2rid[obj] for obj in obj_list]))
for rid in rid_list:
total_word_space[node_idx] += rid2space[rid]
avg_word_space_norm[node_idx] += rid2space_norm[rid]
obj_boxes = [
boxes[obj].bbox[0] for obj in rid2obj if obj in cid2obj2[node_idx]
]
sorted_obj_idx = [
i[0] for i in sorted(enumerate(obj_boxes), key=lambda x: x[1])
]
for obj_idx in range(len(sorted_obj_idx) - 1):
node_space[node_idx] += (
boxes[obj_list[sorted_obj_idx[obj_idx + 1]]].bbox[2]
- boxes[obj_list[sorted_obj_idx[obj_idx]]].bbox[0]
)
avg_node_space_norm[node_idx] += node_space[node_idx] / (
len(obj_boxes) - 1
)
avg_word_space[node_idx] = total_word_space[node_idx] / len(rid_list)
avg_word_space_norm[node_idx] /= len(rid_list)
avg_node_space[node_idx] = node_space[node_idx] / len(rid_list)
avg_node_space_norm[node_idx] /= len(rid_list)
new_nodes = []
new_node_indices = []
for idx in range(len(merge_indices)):
if merge_indices[idx] == idx:
new_nodes.append(nodes[idx])
new_node_indices.append(node_indices[idx])
nodes = new_nodes
node_indices = new_node_indices
# Features
for idx, node in enumerate(nodes):
node_idx = node_indices[idx]
node_bbox = (node.x0, node.y0, node.x1, node.y1)
for i1, b1 in enumerate(boxes_segments):
if intersect(node_bbox, b1.bbox):
num_segments[node_idx] += 1
for i1, b1 in enumerate(boxes_figures):
if intersect(node_bbox, b1.bbox):
num_figures[node_idx] += 1
for i1, b1 in enumerate(boxes_curves):
if intersect(node_bbox, b1.bbox):
num_curves[node_idx] += 1
tables = []
table_indices = []
for idx, node in enumerate(nodes):
node_idx = node_indices[idx]
isTable = True
if node.is_table():
for elem in node.elems:
if "table" in elem.get_text().lower():
continue
if (node.width - elem.bbox[2] + elem.bbox[0]) < 2 * char_width:
isTable = False
if isTable:
tables.append(node)
table_indices.append(node_idx)
if combine == True:
node_features = [0] * 17
for idx, node in enumerate(nodes):
node_idx = node_indices[idx]
node_features = [
sum(x)
for x in zip(
node_features,
[
if_row_connected[node_idx],
num_row_connected[node_idx],
if_connected_by_span[node_idx],
num_connected_by_span[node_idx],
if_connected_by_align[node_idx],
num_connected_by_align[node_idx],
if_vertical_columns_merged[node_idx],
num_vertical_columns_merged[node_idx],
num_segments[node_idx],
num_curves[node_idx],
num_figures[node_idx],
total_word_space[node_idx],
avg_word_space[node_idx],
avg_word_space_norm[node_idx],
node_space[node_idx],
avg_node_space[node_idx],
avg_node_space_norm[node_idx],
],
)
]
return [], node_features
else:
table_features = []
for idx, table in enumerate(tables):
table_idx = table_indices[idx]
table_features += [
[
if_row_connected[table_idx],
num_row_connected[table_idx],
if_connected_by_span[table_idx],
num_connected_by_span[table_idx],
if_connected_by_align[table_idx],
num_connected_by_align[table_idx],
if_vertical_columns_merged[table_idx],
num_vertical_columns_merged[table_idx],
num_segments[table_idx],
num_curves[table_idx],
num_figures[table_idx],
total_word_space[table_idx],
avg_word_space[table_idx],
avg_word_space_norm[table_idx],
node_space[table_idx],
avg_node_space[table_idx],
avg_node_space_norm[table_idx],
]
]
return tables, table_features
|
def cluster_vertically_aligned_boxes(
boxes,
page_bbox,
avg_font_pts,
width,
char_width,
boxes_segments,
boxes_curves,
boxes_figures,
page_width,
combine,
):
# Too many "." in the Table of Content pages
if len(boxes) == 0:
log.warning("No boxes were found to cluster.")
return [], []
elif len(boxes) > 3500:
log.warning("Too many '.' in the Table of Content pages?")
return [], []
plane = Plane(page_bbox)
plane.extend(boxes)
# initialize clusters
cid2obj = [set([i]) for i in range(len(boxes))]
# default object map to cluster with its own index
obj2cid = list(range(len(boxes)))
prev_clusters = obj2cid
while True:
for i1, b1 in enumerate(boxes):
for i2, b2 in enumerate(boxes):
if (i1 == i2) or (obj2cid[i1] == obj2cid[i2]):
continue
if b1.bbox[1] < b2.bbox[1]:
box1 = b1.bbox
box2 = b2.bbox
elif b2.bbox[1] < b1.bbox[1]:
box1 = b2.bbox
box2 = b1.bbox
else:
# horizontally aligned
continue
if (
box2[1] < box1[3]
or (box2[1] - box1[1] < 1.5 * avg_font_pts)
or (box2[3] - box1[3] < 1.5 * avg_font_pts)
):
# can probably do better if we find the average space
# between words
if (
abs(box1[0] - box2[0]) < 3
or abs(box1[2] - box2[2]) < 3
or (((box1[0] + box1[2]) / 2) == ((box2[0] + box2[2]) / 2))
or ((box1[0] < box2[0]) and (box1[2] > box2[0]))
or ((box1[0] > box2[0]) and (box2[2] > box1[0]))
):
min_i = min(i1, i2)
max_i = max(i1, i2)
cid1 = obj2cid[min_i]
cid2 = obj2cid[max_i]
# move all objects from cluster cid2 to cid1
# reassign cluster ids for all such objects as well
for obj_iter in cid2obj[cid2]:
cid2obj[cid1].add(obj_iter)
obj2cid[obj_iter] = cid1
cid2obj[cid2] = set()
if prev_clusters == obj2cid:
break
prev_clusters = obj2cid
clusters = [[boxes[i] for i in cluster] for cluster in filter(bool, cid2obj)]
rid2obj = [set([i]) for i in range(len(boxes))] # initialize clusters
# default object map to cluster with its own index
obj2rid = list(range(len(boxes)))
prev_clusters = obj2rid
while True:
for i1, b1 in enumerate(boxes):
for i2, b2 in enumerate(boxes):
if (i1 == i2) or (obj2rid[i1] == obj2rid[i2]):
continue
box1 = b1.bbox
box2 = b2.bbox
if (
(abs(box1[1] - box2[1]) < 0.11 * avg_font_pts)
or (abs(box1[3] - box2[3]) < 0.11 * avg_font_pts)
or (
round((box1[1] + box1[3]) / 2) == round((box2[1] + box2[3]) / 2)
)
):
min_i = min(i1, i2)
max_i = max(i1, i2)
rid1 = obj2rid[min_i]
rid2 = obj2rid[max_i]
for obj_iter in rid2obj[rid2]:
rid2obj[rid1].add(obj_iter)
obj2rid[obj_iter] = rid1
rid2obj[rid2] = set()
if prev_clusters == obj2rid:
break
prev_clusters = obj2rid
not_merge = set()
for i1, b1 in enumerate(boxes):
for i2 in cid2obj[obj2cid[i1]]:
if i1 == i2:
continue
row1 = obj2rid[i1]
row2 = obj2rid[i2]
if row1 == row2:
continue
if b1.bbox[1] < b2.bbox[1]:
box1 = b1.bbox
box2 = b2.bbox
elif b2.bbox[1] < b1.bbox[1]:
box1 = b2.bbox
box2 = b1.bbox
else:
# horizontally aligned
continue
text_1 = 0.0
for obj in rid2obj[row1]:
text_1 += boxes[obj].bbox[2] - boxes[obj].bbox[0]
text_2 = 0.0
for obj in rid2obj[row2]:
text_2 += boxes[obj].bbox[2] - boxes[obj].bbox[0]
if abs(text_1 - text_2) / width > 0.1:
min_i = min(i1, i2)
max_i = max(i1, i2)
not_merge.add((min_i, max_i))
# Alignment Features
# If text boxes are very close in a row
if_row_connected = defaultdict(int)
num_row_connected = defaultdict(lambda: 1)
# If text is merged using span code in adjacent rows, this feature tells the number of times the cluster went through span based clustering
if_connected_by_span = defaultdict(int)
num_connected_by_span = defaultdict(lambda: 1)
# If columns were merged using cluster alignment
if_connected_by_align = defaultdict(int)
num_connected_by_align = defaultdict(lambda: 1)
# If vertical columns were merged
if_vertical_columns_merged = defaultdict(int)
num_vertical_columns_merged = defaultdict(lambda: 1)
# Number of Line Segments, Curves and Figures
num_segments = defaultdict(int)
num_curves = defaultdict(int)
num_figures = defaultdict(int)
# Average Word Space
total_word_space = defaultdict(float)
avg_word_space = defaultdict(float)
avg_word_space_norm = defaultdict(float)
node_space = defaultdict(float)
avg_node_space = defaultdict(float)
avg_node_space_norm = defaultdict(float)
cid2obj = [set([i]) for i in range(len(boxes))] # initialize clusters
obj2cid = list(
range(len(boxes))
) # default object map to cluster with its own index
prev_clusters = obj2cid
# add the code for merging close text boxes in particular row
while True:
for i1, b1 in enumerate(boxes):
for i2, b2 in enumerate(boxes):
if (i1 == i2) or (obj2cid[i1] == obj2cid[i2]):
continue
box1 = b1.bbox
box2 = b2.bbox
if obj2rid[i1] == obj2rid[i2]:
if (
(b1.bbox[0] < b2.bbox[0])
and ((b2.bbox[0] - b1.bbox[2]) <= 2 * char_width)
) or (
(b2.bbox[0] < b1.bbox[0])
and ((b1.bbox[0] - b2.bbox[2]) <= 2 * char_width)
):
min_i = min(i1, i2)
max_i = max(i1, i2)
cid1 = obj2cid[min_i]
cid2 = obj2cid[max_i]
for obj_iter in cid2obj[cid2]:
cid2obj[cid1].add(obj_iter)
obj2cid[obj_iter] = cid1
cid2obj[cid2] = set()
# Features
if_row_connected[cid1] = 1
if_row_connected[cid2] = 0
num_row_connected[cid1] += num_row_connected[cid2]
num_row_connected[cid2] = 0
if prev_clusters == obj2cid:
break
prev_clusters = obj2cid
# vertical alignment code
while True:
for i1, b1 in enumerate(boxes):
for i2, b2 in enumerate(boxes):
if (i1 == i2) or (obj2cid[i1] == obj2cid[i2]):
continue
if b1.bbox[1] < b2.bbox[1]:
box1 = b1.bbox
box2 = b2.bbox
elif b2.bbox[1] < b1.bbox[1]:
box1 = b2.bbox
box2 = b1.bbox
else:
# horizontally aligned
continue
if (
box2[1] < box1[3]
or (box2[1] - box1[1] < 1.5 * avg_font_pts)
or (box2[3] - box1[3] < 1.5 * avg_font_pts)
): # can probably do better if we find the average space between words
if (
abs(box1[0] - box2[0]) < 3
or abs(box1[2] - box2[2]) < 3
or (((box1[0] + box1[2]) / 2) == ((box2[0] + box2[2]) / 2))
): # or ((box1[0]<box2[0]) and (box1[2]>box2[0])) or ((box1[0]>box2[0]) and (box2[2]>box1[0]))): #added center alignemnt
min_i = min(i1, i2)
max_i = max(i1, i2)
if (min_i, max_i) not in not_merge:
cid1 = obj2cid[min_i]
cid2 = obj2cid[max_i]
# move all objects from cluster cid2 to cid1
# reassign cluster ids for all such objects as well
for obj_iter in cid2obj[cid2]:
cid2obj[cid1].add(obj_iter)
obj2cid[obj_iter] = cid1
cid2obj[cid2] = set()
# Features
if_connected_by_span[cid1] = 1
if_connected_by_span[cid2] = 0
if (
if_row_connected[cid1] == 1
or if_row_connected[cid2] == 1
):
if_row_connected[cid1] = 1
num_row_connected[cid1] += num_row_connected[cid2]
num_row_connected[cid2] = 0
if_row_connected[cid2] = 0
num_connected_by_span[cid1] = (
num_connected_by_span[cid1]
+ num_connected_by_span[cid2]
)
num_connected_by_span[cid2] = 0
if prev_clusters == obj2cid:
break
prev_clusters = obj2cid
# blacklist nearly half-page wide clusters before horizontal merging
cid2obj2 = cid2obj[:]
obj2cid2 = obj2cid[:]
blacklist = set()
blacklist_obj = set()
for cid_iter in range(len(cid2obj2)):
cid = cid2obj2[cid_iter]
xmin = float("Inf")
xmax = float("-Inf")
for obj in cid:
xmin = min(xmin, boxes[obj].bbox[0])
xmax = max(xmax, boxes[obj].bbox[2])
if ((xmax - xmin) > width / 2.75 and (xmax - xmin) < width / 2) or (
(xmax - xmin) > 0.9 * width
):
blacklist.add(cid_iter)
for obj in cid:
blacklist_obj.add(obj)
for obj_iter in rid2obj[obj2rid[obj]]:
if (
boxes[obj_iter].bbox[0] >= xmin
and boxes[obj_iter].bbox[2] <= xmax
):
blacklist_obj.add(obj_iter)
# create a cluster span
cid2span = {}
for cid in range(len(cid2obj)):
cid2span[cid] = {}
cid2span[cid]["min_x"] = float("Inf")
cid2span[cid]["min_y"] = float("Inf")
cid2span[cid]["max_x"] = float("-Inf")
cid2span[cid]["max_y"] = float("-Inf")
for obj in cid2obj[cid]:
cid2span[cid]["min_x"] = min(cid2span[cid]["min_x"], boxes[obj].bbox[0])
cid2span[cid]["max_x"] = max(cid2span[cid]["max_x"], boxes[obj].bbox[2])
cid2span[cid]["min_y"] = min(cid2span[cid]["min_y"], boxes[obj].bbox[1])
cid2span[cid]["max_y"] = max(cid2span[cid]["max_y"], boxes[obj].bbox[3])
cid2cid = {}
cid_pair_compared = set()
cid2cid2 = [cid for cid in range(len(cid2obj))]
for i1, b1 in enumerate(boxes):
for i2, b2 in enumerate(boxes):
if i1 == i2:
continue
if i1 in blacklist_obj or i2 in blacklist_obj:
continue
cid1 = obj2cid[i1]
cid2 = obj2cid[i2]
if (min(cid1, cid2), max(cid1, cid2)) in cid_pair_compared:
continue
if cid1 == cid2:
continue
if obj2rid[i1] == obj2rid[i2]:
continue
if cid1 not in cid2cid:
cid2cid[cid1] = set()
if cid2 not in cid2cid:
cid2cid[cid2] = set()
if cid2span[cid1]["min_y"] < cid2span[cid2]["min_y"]:
box1 = [
cid2span[cid1]["min_x"],
cid2span[cid1]["min_y"],
cid2span[cid1]["max_x"],
cid2span[cid1]["max_y"],
]
box2 = [
cid2span[cid2]["min_x"],
cid2span[cid2]["min_y"],
cid2span[cid2]["max_x"],
cid2span[cid2]["max_y"],
]
else:
box1 = [
cid2span[cid2]["min_x"],
cid2span[cid2]["min_y"],
cid2span[cid2]["max_x"],
cid2span[cid2]["max_y"],
]
box2 = [
cid2span[cid1]["min_x"],
cid2span[cid1]["min_y"],
cid2span[cid1]["max_x"],
cid2span[cid1]["max_y"],
]
if ((box1[1] < box2[1]) and (box1[3] > box2[1])) or (
(box1[1] > box2[1]) and (box1[1] < box2[3])
):
continue
cid_pair_compared.add((min(cid1, cid2), max(cid1, cid2)))
query_rect = (
min(box1[0], box2[0]),
min(box1[1], box2[1]),
max(box1[2], box2[2]),
max(box1[3], box2[3]),
)
connected = True
for i3, b3 in enumerate(boxes):
if (i3 == i1) or (i3 == i2):
continue
if obj2cid[i1] == obj2cid[i3] or obj2cid[i2] == obj2cid[i3]:
continue
box3 = b3.bbox
if intersect(query_rect, box3):
connected = False
break
if (
(
(
round(box1[0]) == round(box2[0])
or round(box1[2]) == round(box2[2])
)
and connected
)
or (
round((box1[0] + box1[2]) / 2) == round((box2[0] + box2[2]) / 2)
and connected
)
): # or (abs((box1[0]+box1[2])/2-(box2[0]+box2[2])/2)<0.1*char_width and connected)):# or ((box1[0]<box2[0]) and (box1[2]>box2[0])) or ((box1[0]>box2[0]) and (box2[2]>box1[0]))): #added center alignemnt
cid2cid[min(cid1, cid2)].add(max(cid1, cid2))
min_cid = min(cid1, cid2)
max_cid = max(cid1, cid2)
for cid_iter in cid2cid2:
if cid2cid2[cid_iter] == cid2cid2[max_cid]:
cid2cid2[cid_iter] = cid2cid2[min_cid]
# post-process cid2cid
cid2obj2 = cid2obj[:]
obj2cid2 = obj2cid[:]
for cid in range(len(cid2cid2)):
cid_merge = cid2cid2[cid]
if cid != cid_merge:
for obj_iter in cid2obj2[cid]:
cid2obj2[cid_merge].add(obj_iter)
obj2cid2[obj_iter] = cid_merge
cid2obj2[cid] = set()
# Features
if_connected_by_align[cid_merge] = 1
if_connected_by_align[cid] = 0
if if_row_connected[cid_merge] == 1 or if_row_connected[cid] == 1:
if_row_connected[cid_merge] = 1
num_row_connected[cid_merge] += num_row_connected[cid]
num_row_connected[cid] = 0
if_row_connected[cid2] = 0
if if_connected_by_span[cid_merge] == 1 or if_connected_by_span[cid] == 1:
if_connected_by_span[cid_merge] = 1
num_connected_by_span[cid_merge] += num_connected_by_span[cid]
num_connected_by_span[cid] = 0
if_connected_by_span[cid] = 0
num_connected_by_align[cid_merge] += num_connected_by_align[cid]
num_connected_by_align[cid] = 0
# code to merge columns for table
prev_clusters = obj2cid2
while True:
for obj1, b1 in enumerate(boxes):
cid1 = obj2cid2[obj1]
rid1 = obj2rid[obj1]
if cid1 in blacklist:
continue
if obj1 in blacklist_obj:
continue
for obj2, b2 in enumerate(boxes):
if obj1 == obj2:
continue
if obj2cid2[obj2] == cid1:
rid2 = obj2rid[obj2]
if rid1 == rid2:
continue
for obj3 in rid2obj[rid2]:
cid3 = obj2cid2[obj3]
if obj3 in blacklist_obj:
continue
if cid1 != cid3:
for obj4 in cid2obj2[cid3]:
if obj4 == obj3:
continue
if obj2rid[obj4] == rid1:
min_cid = min(cid1, cid3)
max_cid = max(cid1, cid3)
for obj_iter in cid2obj2[max_cid]:
cid2obj2[min_cid].add(obj_iter)
obj2cid2[obj_iter] = min_cid
cid2obj2[max_cid] = set()
# Features
if_vertical_columns_merged[min_cid] = 1
if_vertical_columns_merged[max_cid] = 0
num_vertical_columns_merged[min_cid] += (
num_vertical_columns_merged[max_cid]
)
num_vertical_columns_merged[max_cid] = 0
if (
if_row_connected[min_cid] == 1
or if_row_connected[max_cid] == 1
):
if_row_connected[min_cid] = 1
num_row_connected[min_cid] += num_row_connected[
max_cid
]
num_row_connected[max_cid] = 0
if_row_connected[max_cid] = 0
if (
if_connected_by_span[min_cid] == 1
or if_connected_by_span[max_cid] == 1
):
if_connected_by_span[min_cid] = 1
num_connected_by_span[min_cid] += (
num_connected_by_span[max_cid]
)
num_connected_by_span[max_cid] = 0
if_connected_by_span[max_cid] = 0
if (
if_connected_by_align[min_cid] == 1
or if_connected_by_align[max_cid] == 1
):
if_connected_by_align[min_cid] = 1
num_connected_by_align[min_cid] += (
num_connected_by_align[max_cid]
)
num_connected_by_align[max_cid] = 0
if_connected_by_align[max_cid] = 0
break
if prev_clusters == obj2cid2:
break
prev_clusters = obj2cid2
clusters = [[boxes[i] for i in cluster] for cluster in filter(bool, cid2obj2)]
nodes = [Node(elems) for elems in clusters]
node_indices = [i for i, x in enumerate(cid2obj2) if x]
merge_indices = [i for i in range(len(node_indices))]
page_stat = Node(boxes)
nodes, merge_indices = merge_nodes(nodes, plane, page_stat, merge_indices)
# Features
for idx in range(len(merge_indices)):
if merge_indices[idx] != idx:
cid1 = node_indices[merge_indices[idx]]
cid2 = node_indices[idx]
if if_row_connected[cid1] == 1 or if_row_connected[cid2] == 1:
if_row_connected[cid1] = 1
num_row_connected[cid1] += num_row_connected[cid2]
num_row_connected[cid2] = 0
if_row_connected[cid2] = 0
if if_connected_by_span[cid1] == 1 or if_connected_by_span[cid2] == 1:
if_connected_by_span[cid1] = 1
num_connected_by_span[cid1] += num_connected_by_span[cid2]
num_connected_by_span[cid2] = 0
if_connected_by_span[cid2] = 0
if if_connected_by_align[cid1] == 1 or if_connected_by_align[cid2] == 1:
if_connected_by_align[cid1] = 1
num_connected_by_align[cid1] += num_connected_by_align[cid2]
num_connected_by_align[cid2] = 0
if_connected_by_align[cid2] = 0
if (
if_vertical_columns_merged[cid1] == 1
or if_vertical_columns_merged[cid2] == 1
):
if_vertical_columns_merged[cid1] = 1
num_vertical_columns_merged[cid1] += num_vertical_columns_merged[cid2]
num_vertical_columns_merged[cid2] = 0
if_vertical_columns_merged[cid2] = 0
# Get Word Spacing Features
rid2space = defaultdict(float)
rid2space_norm = defaultdict(float)
row_indices = [i for i, x in enumerate(rid2obj) if x]
for rid in row_indices:
obj_list = list(rid2obj[rid])
if len(obj_list) == 1:
rid2space[rid] = 0
continue
obj_boxes = [boxes[obj].bbox[0] for obj in obj_list]
sorted_obj_idx = [
i[0] for i in sorted(enumerate(obj_boxes), key=lambda x: x[1])
]
for obj_idx in range(len(sorted_obj_idx) - 1):
rid2space[rid] += (
boxes[obj_list[sorted_obj_idx[obj_idx + 1]]].bbox[2]
- boxes[obj_list[sorted_obj_idx[obj_idx]]].bbox[0]
)
rid2space_norm[rid] = rid2space[rid] / (len(obj_list) - 1)
for idx, node in enumerate(nodes):
node_idx = node_indices[idx]
if merge_indices[idx] == idx:
obj_list = []
for idx_iter in range(len(merge_indices)):
if merge_indices[idx_iter] == idx:
obj_list += list(cid2obj2[node_indices[idx_iter]])
obj_list = list(set(obj_list))
rid_list = list(set([obj2rid[obj] for obj in obj_list]))
for rid in rid_list:
total_word_space[node_idx] += rid2space[rid]
avg_word_space_norm[node_idx] += rid2space_norm[rid]
obj_boxes = [
boxes[obj].bbox[0] for obj in rid2obj if obj in cid2obj2[node_idx]
]
sorted_obj_idx = [
i[0] for i in sorted(enumerate(obj_boxes), key=lambda x: x[1])
]
for obj_idx in range(len(sorted_obj_idx) - 1):
node_space[node_idx] += (
boxes[obj_list[sorted_obj_idx[obj_idx + 1]]].bbox[2]
- boxes[obj_list[sorted_obj_idx[obj_idx]]].bbox[0]
)
avg_node_space_norm[node_idx] += node_space[node_idx] / (
len(obj_boxes) - 1
)
avg_word_space[node_idx] = total_word_space[node_idx] / len(rid_list)
avg_word_space_norm[node_idx] /= len(rid_list)
avg_node_space[node_idx] = node_space[node_idx] / len(rid_list)
avg_node_space_norm[node_idx] /= len(rid_list)
new_nodes = []
new_node_indices = []
for idx in range(len(merge_indices)):
if merge_indices[idx] == idx:
new_nodes.append(nodes[idx])
new_node_indices.append(node_indices[idx])
nodes = new_nodes
node_indices = new_node_indices
# Features
for idx, node in enumerate(nodes):
node_idx = node_indices[idx]
node_bbox = (node.x0, node.y0, node.x1, node.y1)
for i1, b1 in enumerate(boxes_segments):
if intersect(node_bbox, b1.bbox):
num_segments[node_idx] += 1
for i1, b1 in enumerate(boxes_figures):
if intersect(node_bbox, b1.bbox):
num_figures[node_idx] += 1
for i1, b1 in enumerate(boxes_curves):
if intersect(node_bbox, b1.bbox):
num_curves[node_idx] += 1
tables = []
table_indices = []
for idx, node in enumerate(nodes):
node_idx = node_indices[idx]
isTable = True
if node.is_table():
for elem in node.elems:
if "table" in elem.get_text().lower():
continue
if (node.width - elem.bbox[2] + elem.bbox[0]) < 2 * char_width:
isTable = False
if isTable:
tables.append(node)
table_indices.append(node_idx)
if combine == True:
node_features = [0] * 17
for idx, node in enumerate(nodes):
node_idx = node_indices[idx]
node_features = [
sum(x)
for x in zip(
node_features,
[
if_row_connected[node_idx],
num_row_connected[node_idx],
if_connected_by_span[node_idx],
num_connected_by_span[node_idx],
if_connected_by_align[node_idx],
num_connected_by_align[node_idx],
if_vertical_columns_merged[node_idx],
num_vertical_columns_merged[node_idx],
num_segments[node_idx],
num_curves[node_idx],
num_figures[node_idx],
total_word_space[node_idx],
avg_word_space[node_idx],
avg_word_space_norm[node_idx],
node_space[node_idx],
avg_node_space[node_idx],
avg_node_space_norm[node_idx],
],
)
]
return [], node_features
else:
table_features = []
for idx, table in enumerate(tables):
table_idx = table_indices[idx]
table_features += [
[
if_row_connected[table_idx],
num_row_connected[table_idx],
if_connected_by_span[table_idx],
num_connected_by_span[table_idx],
if_connected_by_align[table_idx],
num_connected_by_align[table_idx],
if_vertical_columns_merged[table_idx],
num_vertical_columns_merged[table_idx],
num_segments[table_idx],
num_curves[table_idx],
num_figures[table_idx],
total_word_space[table_idx],
avg_word_space[table_idx],
avg_word_space_norm[table_idx],
node_space[table_idx],
avg_node_space[table_idx],
avg_node_space_norm[table_idx],
]
]
return tables, table_features
|
https://github.com/HazyResearch/pdftotree/issues/20
|
Digitized PDF detected, building tree structure
float division by zero
float division by zero
float division by zero
(89.33853599999992, 89.33853599999992, 50)
Traceback (most recent call last):
File "chh_test.py", line 10, in <module>
pdftotree.parse(filename, htmlpath, model_path=None, favor_figures=False, visualize=True)
File "/home/chhenning/repos/pdftotree/pdftotree/core.py", line 57, in parse
pdf_tree = extractor.get_tree_structure(model, favor_figures)
File "/home/chhenning/repos/pdftotree/pdftotree/TreeExtract.py", line 195, in get_tree_structure
favor_figures)
File "/home/chhenning/repos/pdftotree/pdftotree/pdf/pdf_parsers.py", line 615, in parse_tree_structure
figures_page = get_figures(mentions, elems.layout.bbox, page_num, boxes_figures, page_width, page_height)
File "/home/chhenning/repos/pdftotree/pdftotree/pdf/pdf_parsers.py", line 945, in get_figures
page_stat = Node(boxes)
File "/home/chhenning/repos/pdftotree/pdftotree/pdf/node.py", line 44, in __init__
self.set_bbox(bound_elems(elems))
File "/home/chhenning/repos/pdftotree/pdftotree/pdf/vector_utils.py", line 101, in bound_elems
group_x0 = min(map(lambda l:l.x0,elems))
ValueError: min() arg is an empty sequence
|
ValueError
|
def extract_text_candidates(
boxes,
page_bbox,
avg_font_pts,
width,
char_width,
page_num,
ref_page_seen,
boxes_figures,
page_width,
page_height,
):
# Filter out boxes with zero width or height
filtered_boxes = []
for bbox in boxes:
if bbox.x1 - bbox.x0 > 0 and bbox.y1 - bbox.y0 > 0:
filtered_boxes.append(bbox)
boxes = filtered_boxes
# Too many "." in the Table of Content pages - ignore because it takes a lot of time
if len(boxes) == 0 or len(boxes) > 3500:
return {}, False
plane = Plane(page_bbox)
plane.extend(boxes)
# Row level clustering - identify objects that have same horizontal alignment
rid2obj = [set([i]) for i in range(len(boxes))] # initialize clusters
obj2rid = list(
range(len(boxes))
) # default object map to cluster with its own index
prev_clusters = obj2rid
while True:
for i1, b1 in enumerate(boxes):
for i2, b2 in enumerate(boxes):
if (i1 == i2) or (obj2rid[i1] == obj2rid[i2]):
continue
box1 = b1.bbox
box2 = b2.bbox
if (
(abs(box1[1] - box2[1]) < 0.11 * avg_font_pts)
or (abs(box1[3] - box2[3]) < 0.11 * avg_font_pts)
or (
round((box1[1] + box1[3]) / 2) == round((box2[1] + box2[3]) / 2)
)
):
min_i = min(i1, i2)
max_i = max(i1, i2)
rid1 = obj2rid[min_i]
rid2 = obj2rid[max_i]
for obj_iter in rid2obj[rid2]:
rid2obj[rid1].add(obj_iter)
obj2rid[obj_iter] = rid1
rid2obj[rid2] = set()
if prev_clusters == obj2rid:
break
prev_clusters = obj2rid
cid2obj = [set([i]) for i in range(len(boxes))] # initialize clusters
obj2cid = list(
range(len(boxes))
) # default object map to cluster with its own index
prev_clusters = obj2cid
# add the code for merging close text boxes in particular row
while True:
for i1, b1 in enumerate(boxes):
for i2, b2 in enumerate(boxes):
if (i1 == i2) or (obj2cid[i1] == obj2cid[i2]):
continue
box1 = b1.bbox
box2 = b2.bbox
if obj2rid[i1] == obj2rid[i2]:
if (
(b1.bbox[0] < b2.bbox[0])
and ((b2.bbox[0] - b1.bbox[2]) <= 2 * char_width)
) or (
(b2.bbox[0] < b1.bbox[0])
and ((b1.bbox[0] - b2.bbox[2]) <= 2 * char_width)
):
min_i = min(i1, i2)
max_i = max(i1, i2)
cid1 = obj2cid[min_i]
cid2 = obj2cid[max_i]
for obj_iter in cid2obj[cid2]:
cid2obj[cid1].add(obj_iter)
obj2cid[obj_iter] = cid1
cid2obj[cid2] = set()
if prev_clusters == obj2cid:
break
prev_clusters = obj2cid
# vertical alignment code
while True:
for i1, b1 in enumerate(boxes):
for i2, b2 in enumerate(boxes):
if (i1 == i2) or (obj2cid[i1] == obj2cid[i2]):
continue
if b1.bbox[1] < b2.bbox[1]:
box1 = b1.bbox
box2 = b2.bbox
elif b2.bbox[1] < b1.bbox[1]:
box1 = b2.bbox
box2 = b1.bbox
else:
# horizontally aligned
continue
if abs((box2[3] - box2[1]) - (box1[3] - box1[1])) > 0.5 * avg_font_pts:
continue
if (
box2[1] < box1[3]
or (box2[1] - box1[1] < 1.5 * avg_font_pts)
or (box2[3] - box1[3] < 1.5 * avg_font_pts)
): # can probably do better if we find the average space between words
if (
abs(box1[0] - box2[0]) < 3 * char_width
or abs(box1[2] - box2[2]) < 3 * char_width
or (((box1[0] + box1[2]) / 2) == ((box2[0] + box2[2]) / 2))
):
min_i = min(i1, i2)
max_i = max(i1, i2)
cid1 = obj2cid[min_i]
cid2 = obj2cid[max_i]
# move all objects from cluster cid2 to cid1
# reassign cluster ids for all such objects as well
for obj_iter in cid2obj[cid2]:
cid2obj[cid1].add(obj_iter)
obj2cid[obj_iter] = cid1
cid2obj[cid2] = set()
if prev_clusters == obj2cid:
break
prev_clusters = obj2cid
# get cluster spans
cid2span = {}
for cid in range(len(cid2obj)):
cid2span[cid] = {}
cid2span[cid]["min_x"] = float("Inf")
cid2span[cid]["min_y"] = float("Inf")
cid2span[cid]["max_x"] = float("-Inf")
cid2span[cid]["max_y"] = float("-Inf")
for obj in cid2obj[cid]:
cid2span[cid]["min_x"] = min(cid2span[cid]["min_x"], boxes[obj].bbox[0])
cid2span[cid]["max_x"] = max(cid2span[cid]["max_x"], boxes[obj].bbox[2])
cid2span[cid]["min_y"] = min(cid2span[cid]["min_y"], boxes[obj].bbox[1])
cid2span[cid]["max_y"] = max(cid2span[cid]["max_y"], boxes[obj].bbox[3])
# Don't split up references
references_bbox = []
references_cid = set()
for cid in range(len(cid2obj)):
if len(cid2obj[cid]) == 1:
if boxes[list(cid2obj[cid])[0]].get_text().lower() == "references":
references_bbox = [
cid2span[cid]["min_x"],
cid2span[cid]["min_y"],
cid2span[cid]["max_x"],
cid2span[cid]["max_y"],
]
for cid2 in range(len(cid2obj)):
if (
round(cid2span[cid]["min_x"]) == round(cid2span[cid2]["min_x"])
and cid2span[cid]["max_y"] < cid2span[cid2]["min_y"]
):
references_cid.add(cid2)
cid2span[cid2]["min_x"] = cid2span[cid]["min_x"]
cid2span[cid2]["max_x"] = cid2span[cid]["max_x"]
# get a list of empty cids
empty_cids = [cid for cid in range(len(cid2obj)) if len(cid2obj[cid]) == 0]
empty_idx = 0
# Split paras based on whitespaces - seems to work
if ref_page_seen == False:
for cid in range(len(cid2obj)):
if (
len(cid2obj[cid]) > 0
and cid not in empty_cids
and cid not in references_cid
):
cid_maxx = max([boxes[obj].bbox[2] for obj in cid2obj[cid]])
cid_minx = min([boxes[obj].bbox[0] for obj in cid2obj[cid]])
rid_list = set([obj2rid[obj] for obj in cid2obj[cid]])
# Get min_y for each row
rid_miny = {}
for rid in rid_list:
rid_miny[rid] = min(
[
boxes[obj].bbox[1] if obj in cid2obj[cid] else 10000
for obj in rid2obj[rid]
]
)
sorted_rid_miny = sorted(
list(rid_miny.items()), key=operator.itemgetter(1)
)
last_rid = 0
for i in range(len(sorted_rid_miny) - 1):
row1 = sorted_rid_miny[i][0]
row2 = sorted_rid_miny[i + 1][0]
row1_maxx = max(
[
boxes[obj].bbox[2] if obj in cid2obj[cid] else -1
for obj in rid2obj[row1]
]
)
row2_minx = min(
[
boxes[obj].bbox[0] if obj in cid2obj[cid] else 10000
for obj in rid2obj[row2]
]
)
if row1_maxx <= cid_maxx and (row2_minx - char_width) > cid_minx:
# split cluster cid
new_cid_idx = empty_cids[empty_idx]
empty_idx += 1
for i_iter in range(last_rid, i + 1):
obj_list = [
obj
for obj in rid2obj[sorted_rid_miny[i_iter][0]]
if obj2cid[obj] == cid
]
for obj in obj_list:
cid2obj[cid].remove(obj)
cid2obj[new_cid_idx].add(obj)
obj2cid[obj] = new_cid_idx
last_rid = i + 1
clusters = [[boxes[i] for i in cluster] for cluster in filter(bool, cid2obj)]
nodes = [Node(elems) for elems in clusters]
node_indices = [i for i, x in enumerate(cid2obj) if x]
merge_indices = [i for i in range(len(node_indices))]
page_stat = Node(boxes)
nodes, merge_indices = merge_nodes(nodes, plane, page_stat, merge_indices)
##Merging Nodes
new_nodes = []
new_node_indices = []
for idx in range(len(merge_indices)):
if merge_indices[idx] == idx:
new_nodes.append(nodes[idx])
new_node_indices.append(node_indices[idx])
# Heuristics for Node type
ref_nodes = []
new_ref_page_seen = False
if len(references_cid) > 0 or ref_page_seen or references_bbox != []:
new_ref_page_seen = True
ref_seen_in_node = False or ref_page_seen
all_boxes = boxes + boxes_figures
min_y_page = float("Inf")
for idx, box in enumerate(all_boxes):
min_y_page = min(min_y_page, box.bbox[1])
if page_num == -1:
# handle title, authors and abstract here
log.error("TODO: no way to handle title authors abstract yet.")
else:
# eliminate header, footer, page number
# sort other text and classify as header/paragraph
new_nodes.sort(key=cmp_to_key(xy_reading_order))
for idx, node in enumerate(new_nodes):
if idx < len(new_nodes) - 1:
if (
round(node.y0) == round(min_y_page)
or math.floor(node.y0) == math.floor(min_y_page)
) and node.y1 - node.y0 < 2 * avg_font_pts: # can be header
idx_new = idx + 1
if idx_new < len(new_nodes) - 1:
while idx_new < len(new_nodes) - 1 and (
(round(node.y0) == round(new_nodes[idx_new].y0))
or (
math.floor(node.y0) == math.floor(new_nodes[idx_new].y0)
)
):
idx_new += 1
if idx_new < len(new_nodes) - 1:
if new_nodes[idx_new].y0 - node.y0 > 1.5 * avg_font_pts:
node.type = "Header"
continue
# get captions - first word is fig/figure/table
first_elem = None
for elem in node.elems:
if round(elem.bbox[0]) == round(node.x0) and round(
elem.bbox[1]
) == round(node.y0):
first_elem = elem
break
if first_elem != None:
text = first_elem.get_text()
if len(text) > 10:
text = first_elem.get_text()[0:10]
if "Table" in text:
node.type = "Table Caption"
continue
if "Fig" in text or "Figure" in text:
node.type = "Figure Caption"
continue
if first_elem.get_text().lower() == "references":
node.type = "Section Header"
ref_seen_in_node = True
continue
if ref_seen_in_node:
node.type = "List"
continue
if references_bbox != [] or ref_seen_in_node:
if (
node.y0 > references_bbox[3]
and node.x0 <= references_bbox[0]
and node.x1 > references_bbox[2]
):
node.type = "List"
continue
if node.y1 - node.y0 <= 2.0 * avg_font_pts: # one lines - section
node.type = "Section Header"
else: # multiple lines - para
node.type = "Paragraph"
# handle references
newer_nodes = []
ref_indices = [False for idx in range(len(new_nodes))]
for idx1, node1 in enumerate(new_nodes):
if ref_indices[idx1] == True:
continue
if node1.type != "List":
newer_nodes.append(node1)
continue
x0, y0, x1, y1 = node1.x0, node1.y0, node1.x1, node1.y1
newer_node = node1
ref_indices[idx1] = True
for idx2, node2 in enumerate(new_nodes):
if idx1 != idx2:
if node2.type == "List" and ref_indices[idx2] == False:
if (node2.x0 <= x0 and node2.x1 >= x0) or (
x0 <= node2.x0 and x1 >= node2.x0
):
newer_node.merge(node2)
ref_indices[idx2] = True
newer_nodes.append(newer_node)
# handle figures
for fig_box in boxes_figures:
node_fig = Node(fig_box)
node_fig.type = "Figure"
newer_nodes.append(node_fig)
tree = {}
tree["section_header"] = [
(page_num, page_width, page_height) + (node.y0, node.x0, node.y1, node.x1)
for node in newer_nodes
if node.type == "Section Header"
]
tree["header"] = [
(page_num, page_width, page_height) + (node.y0, node.x0, node.y1, node.x1)
for node in newer_nodes
if node.type == "Header"
]
tree["paragraph"] = [
(page_num, page_width, page_height) + (node.y0, node.x0, node.y1, node.x1)
for node in newer_nodes
if node.type == "Paragraph"
]
# tree["figure"] = [(page_num, page_width, page_height) + (node.y0, node.x0, node.y1, node.x1) for node in newer_nodes if node.type=="Figure"]
tree["figure_caption"] = [
(page_num, page_width, page_height) + (node.y0, node.x0, node.y1, node.x1)
for node in newer_nodes
if node.type == "Figure Caption"
]
tree["table_caption"] = [
(page_num, page_width, page_height) + (node.y0, node.x0, node.y1, node.x1)
for node in newer_nodes
if node.type == "Table Caption"
]
tree["list"] = [
(page_num, page_width, page_height) + (node.y0, node.x0, node.y1, node.x1)
for node in newer_nodes
if node.type == "List"
]
return tree, new_ref_page_seen
|
def extract_text_candidates(
boxes,
page_bbox,
avg_font_pts,
width,
char_width,
page_num,
ref_page_seen,
boxes_figures,
page_width,
page_height,
):
# Too many "." in the Table of Content pages - ignore because it takes a lot of time
if len(boxes) == 0 or len(boxes) > 3500:
return {}, False
plane = Plane(page_bbox)
plane.extend(boxes)
# Row level clustering - identify objects that have same horizontal alignment
rid2obj = [set([i]) for i in range(len(boxes))] # initialize clusters
obj2rid = list(
range(len(boxes))
) # default object map to cluster with its own index
prev_clusters = obj2rid
while True:
for i1, b1 in enumerate(boxes):
for i2, b2 in enumerate(boxes):
if (i1 == i2) or (obj2rid[i1] == obj2rid[i2]):
continue
box1 = b1.bbox
box2 = b2.bbox
if (
(abs(box1[1] - box2[1]) < 0.11 * avg_font_pts)
or (abs(box1[3] - box2[3]) < 0.11 * avg_font_pts)
or (
round((box1[1] + box1[3]) / 2) == round((box2[1] + box2[3]) / 2)
)
):
min_i = min(i1, i2)
max_i = max(i1, i2)
rid1 = obj2rid[min_i]
rid2 = obj2rid[max_i]
for obj_iter in rid2obj[rid2]:
rid2obj[rid1].add(obj_iter)
obj2rid[obj_iter] = rid1
rid2obj[rid2] = set()
if prev_clusters == obj2rid:
break
prev_clusters = obj2rid
cid2obj = [set([i]) for i in range(len(boxes))] # initialize clusters
obj2cid = list(
range(len(boxes))
) # default object map to cluster with its own index
prev_clusters = obj2cid
# add the code for merging close text boxes in particular row
while True:
for i1, b1 in enumerate(boxes):
for i2, b2 in enumerate(boxes):
if (i1 == i2) or (obj2cid[i1] == obj2cid[i2]):
continue
box1 = b1.bbox
box2 = b2.bbox
if obj2rid[i1] == obj2rid[i2]:
if (
(b1.bbox[0] < b2.bbox[0])
and ((b2.bbox[0] - b1.bbox[2]) <= 2 * char_width)
) or (
(b2.bbox[0] < b1.bbox[0])
and ((b1.bbox[0] - b2.bbox[2]) <= 2 * char_width)
):
min_i = min(i1, i2)
max_i = max(i1, i2)
cid1 = obj2cid[min_i]
cid2 = obj2cid[max_i]
for obj_iter in cid2obj[cid2]:
cid2obj[cid1].add(obj_iter)
obj2cid[obj_iter] = cid1
cid2obj[cid2] = set()
if prev_clusters == obj2cid:
break
prev_clusters = obj2cid
# vertical alignment code
while True:
for i1, b1 in enumerate(boxes):
for i2, b2 in enumerate(boxes):
if (i1 == i2) or (obj2cid[i1] == obj2cid[i2]):
continue
if b1.bbox[1] < b2.bbox[1]:
box1 = b1.bbox
box2 = b2.bbox
elif b2.bbox[1] < b1.bbox[1]:
box1 = b2.bbox
box2 = b1.bbox
else:
# horizontally aligned
continue
if abs((box2[3] - box2[1]) - (box1[3] - box1[1])) > 0.5 * avg_font_pts:
continue
if (
box2[1] < box1[3]
or (box2[1] - box1[1] < 1.5 * avg_font_pts)
or (box2[3] - box1[3] < 1.5 * avg_font_pts)
): # can probably do better if we find the average space between words
if (
abs(box1[0] - box2[0]) < 3 * char_width
or abs(box1[2] - box2[2]) < 3 * char_width
or (((box1[0] + box1[2]) / 2) == ((box2[0] + box2[2]) / 2))
):
min_i = min(i1, i2)
max_i = max(i1, i2)
cid1 = obj2cid[min_i]
cid2 = obj2cid[max_i]
# move all objects from cluster cid2 to cid1
# reassign cluster ids for all such objects as well
for obj_iter in cid2obj[cid2]:
cid2obj[cid1].add(obj_iter)
obj2cid[obj_iter] = cid1
cid2obj[cid2] = set()
if prev_clusters == obj2cid:
break
prev_clusters = obj2cid
# get cluster spans
cid2span = {}
for cid in range(len(cid2obj)):
cid2span[cid] = {}
cid2span[cid]["min_x"] = float("Inf")
cid2span[cid]["min_y"] = float("Inf")
cid2span[cid]["max_x"] = float("-Inf")
cid2span[cid]["max_y"] = float("-Inf")
for obj in cid2obj[cid]:
cid2span[cid]["min_x"] = min(cid2span[cid]["min_x"], boxes[obj].bbox[0])
cid2span[cid]["max_x"] = max(cid2span[cid]["max_x"], boxes[obj].bbox[2])
cid2span[cid]["min_y"] = min(cid2span[cid]["min_y"], boxes[obj].bbox[1])
cid2span[cid]["max_y"] = max(cid2span[cid]["max_y"], boxes[obj].bbox[3])
# Don't split up references
references_bbox = []
references_cid = set()
for cid in range(len(cid2obj)):
if len(cid2obj[cid]) == 1:
if boxes[list(cid2obj[cid])[0]].get_text().lower() == "references":
references_bbox = [
cid2span[cid]["min_x"],
cid2span[cid]["min_y"],
cid2span[cid]["max_x"],
cid2span[cid]["max_y"],
]
for cid2 in range(len(cid2obj)):
if (
round(cid2span[cid]["min_x"]) == round(cid2span[cid2]["min_x"])
and cid2span[cid]["max_y"] < cid2span[cid2]["min_y"]
):
references_cid.add(cid2)
cid2span[cid2]["min_x"] = cid2span[cid]["min_x"]
cid2span[cid2]["max_x"] = cid2span[cid]["max_x"]
# get a list of empty cids
empty_cids = [cid for cid in range(len(cid2obj)) if len(cid2obj[cid]) == 0]
empty_idx = 0
# Split paras based on whitespaces - seems to work
if ref_page_seen == False:
for cid in range(len(cid2obj)):
if (
len(cid2obj[cid]) > 0
and cid not in empty_cids
and cid not in references_cid
):
cid_maxx = max([boxes[obj].bbox[2] for obj in cid2obj[cid]])
cid_minx = min([boxes[obj].bbox[0] for obj in cid2obj[cid]])
rid_list = set([obj2rid[obj] for obj in cid2obj[cid]])
# Get min_y for each row
rid_miny = {}
for rid in rid_list:
rid_miny[rid] = min(
[
boxes[obj].bbox[1] if obj in cid2obj[cid] else 10000
for obj in rid2obj[rid]
]
)
sorted_rid_miny = sorted(
list(rid_miny.items()), key=operator.itemgetter(1)
)
last_rid = 0
for i in range(len(sorted_rid_miny) - 1):
row1 = sorted_rid_miny[i][0]
row2 = sorted_rid_miny[i + 1][0]
row1_maxx = max(
[
boxes[obj].bbox[2] if obj in cid2obj[cid] else -1
for obj in rid2obj[row1]
]
)
row2_minx = min(
[
boxes[obj].bbox[0] if obj in cid2obj[cid] else 10000
for obj in rid2obj[row2]
]
)
if row1_maxx <= cid_maxx and (row2_minx - char_width) > cid_minx:
# split cluster cid
new_cid_idx = empty_cids[empty_idx]
empty_idx += 1
for i_iter in range(last_rid, i + 1):
obj_list = [
obj
for obj in rid2obj[sorted_rid_miny[i_iter][0]]
if obj2cid[obj] == cid
]
for obj in obj_list:
cid2obj[cid].remove(obj)
cid2obj[new_cid_idx].add(obj)
obj2cid[obj] = new_cid_idx
last_rid = i + 1
clusters = [[boxes[i] for i in cluster] for cluster in filter(bool, cid2obj)]
nodes = [Node(elems) for elems in clusters]
node_indices = [i for i, x in enumerate(cid2obj) if x]
merge_indices = [i for i in range(len(node_indices))]
page_stat = Node(boxes)
nodes, merge_indices = merge_nodes(nodes, plane, page_stat, merge_indices)
##Merging Nodes
new_nodes = []
new_node_indices = []
for idx in range(len(merge_indices)):
if merge_indices[idx] == idx:
new_nodes.append(nodes[idx])
new_node_indices.append(node_indices[idx])
# Heuristics for Node type
ref_nodes = []
new_ref_page_seen = False
if len(references_cid) > 0 or ref_page_seen or references_bbox != []:
new_ref_page_seen = True
ref_seen_in_node = False or ref_page_seen
all_boxes = boxes + boxes_figures
min_y_page = float("Inf")
for idx, box in enumerate(all_boxes):
min_y_page = min(min_y_page, box.bbox[1])
if page_num == -1:
# handle title, authors and abstract here
log.error("TODO: no way to handle title authors abstract yet.")
else:
# eliminate header, footer, page number
# sort other text and classify as header/paragraph
new_nodes.sort(key=cmp_to_key(xy_reading_order))
for idx, node in enumerate(new_nodes):
if idx < len(new_nodes) - 1:
if (
round(node.y0) == round(min_y_page)
or math.floor(node.y0) == math.floor(min_y_page)
) and node.y1 - node.y0 < 2 * avg_font_pts: # can be header
idx_new = idx + 1
if idx_new < len(new_nodes) - 1:
while idx_new < len(new_nodes) - 1 and (
(round(node.y0) == round(new_nodes[idx_new].y0))
or (
math.floor(node.y0) == math.floor(new_nodes[idx_new].y0)
)
):
idx_new += 1
if idx_new < len(new_nodes) - 1:
if new_nodes[idx_new].y0 - node.y0 > 1.5 * avg_font_pts:
node.type = "Header"
continue
# get captions - first word is fig/figure/table
first_elem = None
for elem in node.elems:
if round(elem.bbox[0]) == round(node.x0) and round(
elem.bbox[1]
) == round(node.y0):
first_elem = elem
break
if first_elem != None:
text = first_elem.get_text()
if len(text) > 10:
text = first_elem.get_text()[0:10]
if "Table" in text:
node.type = "Table Caption"
continue
if "Fig" in text or "Figure" in text:
node.type = "Figure Caption"
continue
if first_elem.get_text().lower() == "references":
node.type = "Section Header"
ref_seen_in_node = True
continue
if ref_seen_in_node:
node.type = "List"
continue
if references_bbox != [] or ref_seen_in_node:
if (
node.y0 > references_bbox[3]
and node.x0 <= references_bbox[0]
and node.x1 > references_bbox[2]
):
node.type = "List"
continue
if node.y1 - node.y0 <= 2.0 * avg_font_pts: # one lines - section
node.type = "Section Header"
else: # multiple lines - para
node.type = "Paragraph"
# handle references
newer_nodes = []
ref_indices = [False for idx in range(len(new_nodes))]
for idx1, node1 in enumerate(new_nodes):
if ref_indices[idx1] == True:
continue
if node1.type != "List":
newer_nodes.append(node1)
continue
x0, y0, x1, y1 = node1.x0, node1.y0, node1.x1, node1.y1
newer_node = node1
ref_indices[idx1] = True
for idx2, node2 in enumerate(new_nodes):
if idx1 != idx2:
if node2.type == "List" and ref_indices[idx2] == False:
if (node2.x0 <= x0 and node2.x1 >= x0) or (
x0 <= node2.x0 and x1 >= node2.x0
):
newer_node.merge(node2)
ref_indices[idx2] = True
newer_nodes.append(newer_node)
# handle figures
for fig_box in boxes_figures:
node_fig = Node(fig_box)
node_fig.type = "Figure"
newer_nodes.append(node_fig)
tree = {}
tree["section_header"] = [
(page_num, page_width, page_height) + (node.y0, node.x0, node.y1, node.x1)
for node in newer_nodes
if node.type == "Section Header"
]
tree["header"] = [
(page_num, page_width, page_height) + (node.y0, node.x0, node.y1, node.x1)
for node in newer_nodes
if node.type == "Header"
]
tree["paragraph"] = [
(page_num, page_width, page_height) + (node.y0, node.x0, node.y1, node.x1)
for node in newer_nodes
if node.type == "Paragraph"
]
# tree["figure"] = [(page_num, page_width, page_height) + (node.y0, node.x0, node.y1, node.x1) for node in newer_nodes if node.type=="Figure"]
tree["figure_caption"] = [
(page_num, page_width, page_height) + (node.y0, node.x0, node.y1, node.x1)
for node in newer_nodes
if node.type == "Figure Caption"
]
tree["table_caption"] = [
(page_num, page_width, page_height) + (node.y0, node.x0, node.y1, node.x1)
for node in newer_nodes
if node.type == "Table Caption"
]
tree["list"] = [
(page_num, page_width, page_height) + (node.y0, node.x0, node.y1, node.x1)
for node in newer_nodes
if node.type == "List"
]
return tree, new_ref_page_seen
|
https://github.com/HazyResearch/pdftotree/issues/20
|
Digitized PDF detected, building tree structure
float division by zero
float division by zero
float division by zero
(89.33853599999992, 89.33853599999992, 50)
Traceback (most recent call last):
File "chh_test.py", line 10, in <module>
pdftotree.parse(filename, htmlpath, model_path=None, favor_figures=False, visualize=True)
File "/home/chhenning/repos/pdftotree/pdftotree/core.py", line 57, in parse
pdf_tree = extractor.get_tree_structure(model, favor_figures)
File "/home/chhenning/repos/pdftotree/pdftotree/TreeExtract.py", line 195, in get_tree_structure
favor_figures)
File "/home/chhenning/repos/pdftotree/pdftotree/pdf/pdf_parsers.py", line 615, in parse_tree_structure
figures_page = get_figures(mentions, elems.layout.bbox, page_num, boxes_figures, page_width, page_height)
File "/home/chhenning/repos/pdftotree/pdftotree/pdf/pdf_parsers.py", line 945, in get_figures
page_stat = Node(boxes)
File "/home/chhenning/repos/pdftotree/pdftotree/pdf/node.py", line 44, in __init__
self.set_bbox(bound_elems(elems))
File "/home/chhenning/repos/pdftotree/pdftotree/pdf/vector_utils.py", line 101, in bound_elems
group_x0 = min(map(lambda l:l.x0,elems))
ValueError: min() arg is an empty sequence
|
ValueError
|
def get_figures(boxes, page_bbox, page_num, boxes_figures, page_width, page_height):
# Filter out boxes with zero width or height
filtered_boxes = []
for bbox in boxes:
if bbox.x1 - bbox.x0 > 0 and bbox.y1 - bbox.y0 > 0:
filtered_boxes.append(bbox)
boxes = filtered_boxes
if len(boxes) == 0:
log.warning("No boxes to get figures from on page {}.".format(page_num))
return []
plane = Plane(page_bbox)
plane.extend(boxes)
nodes_figures = []
for fig_box in boxes_figures:
node_fig = Node(fig_box)
nodes_figures.append(node_fig)
merge_indices = [i for i in range(len(nodes_figures))]
page_stat = Node(boxes)
nodes, merge_indices = merge_nodes(nodes_figures, plane, page_stat, merge_indices)
##Merging Nodes
new_nodes = []
for idx in range(len(merge_indices)):
if merge_indices[idx] == idx:
new_nodes.append(nodes[idx])
figures = [
(page_num, page_width, page_height) + (node.y0, node.x0, node.y1, node.x1)
for node in new_nodes
]
return figures
|
def get_figures(boxes, page_bbox, page_num, boxes_figures, page_width, page_height):
if len(boxes) == 0:
log.warning("No boxes to get figures from on page {}.".format(page_num))
return []
plane = Plane(page_bbox)
plane.extend(boxes)
nodes_figures = []
for fig_box in boxes_figures:
node_fig = Node(fig_box)
nodes_figures.append(node_fig)
merge_indices = [i for i in range(len(nodes_figures))]
page_stat = Node(boxes)
nodes, merge_indices = merge_nodes(nodes_figures, plane, page_stat, merge_indices)
##Merging Nodes
new_nodes = []
for idx in range(len(merge_indices)):
if merge_indices[idx] == idx:
new_nodes.append(nodes[idx])
figures = [
(page_num, page_width, page_height) + (node.y0, node.x0, node.y1, node.x1)
for node in new_nodes
]
return figures
|
https://github.com/HazyResearch/pdftotree/issues/20
|
Digitized PDF detected, building tree structure
float division by zero
float division by zero
float division by zero
(89.33853599999992, 89.33853599999992, 50)
Traceback (most recent call last):
File "chh_test.py", line 10, in <module>
pdftotree.parse(filename, htmlpath, model_path=None, favor_figures=False, visualize=True)
File "/home/chhenning/repos/pdftotree/pdftotree/core.py", line 57, in parse
pdf_tree = extractor.get_tree_structure(model, favor_figures)
File "/home/chhenning/repos/pdftotree/pdftotree/TreeExtract.py", line 195, in get_tree_structure
favor_figures)
File "/home/chhenning/repos/pdftotree/pdftotree/pdf/pdf_parsers.py", line 615, in parse_tree_structure
figures_page = get_figures(mentions, elems.layout.bbox, page_num, boxes_figures, page_width, page_height)
File "/home/chhenning/repos/pdftotree/pdftotree/pdf/pdf_parsers.py", line 945, in get_figures
page_stat = Node(boxes)
File "/home/chhenning/repos/pdftotree/pdftotree/pdf/node.py", line 44, in __init__
self.set_bbox(bound_elems(elems))
File "/home/chhenning/repos/pdftotree/pdftotree/pdf/vector_utils.py", line 101, in bound_elems
group_x0 = min(map(lambda l:l.x0,elems))
ValueError: min() arg is an empty sequence
|
ValueError
|
def extract_text_candidates(
boxes,
page_bbox,
avg_font_pts,
width,
char_width,
page_num,
ref_page_seen,
boxes_figures,
page_width,
page_height,
):
# Too many "." in the Table of Content pages - ignore because it takes a lot of time
if len(boxes) == 0 or len(boxes) > 3500:
return {}, False
plane = Plane(page_bbox)
plane.extend(boxes)
# Row level clustering - identify objects that have same horizontal alignment
rid2obj = [set([i]) for i in range(len(boxes))] # initialize clusters
obj2rid = list(
range(len(boxes))
) # default object map to cluster with its own index
prev_clusters = obj2rid
while True:
for i1, b1 in enumerate(boxes):
for i2, b2 in enumerate(boxes):
if (i1 == i2) or (obj2rid[i1] == obj2rid[i2]):
continue
box1 = b1.bbox
box2 = b2.bbox
if (
(abs(box1[1] - box2[1]) < 0.11 * avg_font_pts)
or (abs(box1[3] - box2[3]) < 0.11 * avg_font_pts)
or (
round((box1[1] + box1[3]) / 2) == round((box2[1] + box2[3]) / 2)
)
):
min_i = min(i1, i2)
max_i = max(i1, i2)
rid1 = obj2rid[min_i]
rid2 = obj2rid[max_i]
for obj_iter in rid2obj[rid2]:
rid2obj[rid1].add(obj_iter)
obj2rid[obj_iter] = rid1
rid2obj[rid2] = set()
if prev_clusters == obj2rid:
break
prev_clusters = obj2rid
cid2obj = [set([i]) for i in range(len(boxes))] # initialize clusters
obj2cid = list(
range(len(boxes))
) # default object map to cluster with its own index
prev_clusters = obj2cid
# add the code for merging close text boxes in particular row
while True:
for i1, b1 in enumerate(boxes):
for i2, b2 in enumerate(boxes):
if (i1 == i2) or (obj2cid[i1] == obj2cid[i2]):
continue
box1 = b1.bbox
box2 = b2.bbox
if obj2rid[i1] == obj2rid[i2]:
if (
(b1.bbox[0] < b2.bbox[0])
and ((b2.bbox[0] - b1.bbox[2]) <= 2 * char_width)
) or (
(b2.bbox[0] < b1.bbox[0])
and ((b1.bbox[0] - b2.bbox[2]) <= 2 * char_width)
):
min_i = min(i1, i2)
max_i = max(i1, i2)
cid1 = obj2cid[min_i]
cid2 = obj2cid[max_i]
for obj_iter in cid2obj[cid2]:
cid2obj[cid1].add(obj_iter)
obj2cid[obj_iter] = cid1
cid2obj[cid2] = set()
if prev_clusters == obj2cid:
break
prev_clusters = obj2cid
# vertical alignment code
while True:
for i1, b1 in enumerate(boxes):
for i2, b2 in enumerate(boxes):
if (i1 == i2) or (obj2cid[i1] == obj2cid[i2]):
continue
if b1.bbox[1] < b2.bbox[1]:
box1 = b1.bbox
box2 = b2.bbox
elif b2.bbox[1] < b1.bbox[1]:
box1 = b2.bbox
box2 = b1.bbox
else:
# horizontally aligned
continue
if abs((box2[3] - box2[1]) - (box1[3] - box1[1])) > 0.5 * avg_font_pts:
continue
if (
box2[1] < box1[3]
or (box2[1] - box1[1] < 1.5 * avg_font_pts)
or (box2[3] - box1[3] < 1.5 * avg_font_pts)
): # can probably do better if we find the average space between words
if (
abs(box1[0] - box2[0]) < 3 * char_width
or abs(box1[2] - box2[2]) < 3 * char_width
or (((box1[0] + box1[2]) / 2) == ((box2[0] + box2[2]) / 2))
):
min_i = min(i1, i2)
max_i = max(i1, i2)
cid1 = obj2cid[min_i]
cid2 = obj2cid[max_i]
# move all objects from cluster cid2 to cid1
# reassign cluster ids for all such objects as well
for obj_iter in cid2obj[cid2]:
cid2obj[cid1].add(obj_iter)
obj2cid[obj_iter] = cid1
cid2obj[cid2] = set()
if prev_clusters == obj2cid:
break
prev_clusters = obj2cid
# get cluster spans
cid2span = {}
for cid in range(len(cid2obj)):
cid2span[cid] = {}
cid2span[cid]["min_x"] = float("Inf")
cid2span[cid]["min_y"] = float("Inf")
cid2span[cid]["max_x"] = float("-Inf")
cid2span[cid]["max_y"] = float("-Inf")
for obj in cid2obj[cid]:
cid2span[cid]["min_x"] = min(cid2span[cid]["min_x"], boxes[obj].bbox[0])
cid2span[cid]["max_x"] = max(cid2span[cid]["max_x"], boxes[obj].bbox[2])
cid2span[cid]["min_y"] = min(cid2span[cid]["min_y"], boxes[obj].bbox[1])
cid2span[cid]["max_y"] = max(cid2span[cid]["max_y"], boxes[obj].bbox[3])
# Don't split up references
references_bbox = []
references_cid = set()
for cid in range(len(cid2obj)):
if len(cid2obj[cid]) == 1:
if boxes[list(cid2obj[cid])[0]].get_text().lower() == "references":
references_bbox = [
cid2span[cid]["min_x"],
cid2span[cid]["min_y"],
cid2span[cid]["max_x"],
cid2span[cid]["max_y"],
]
for cid2 in range(len(cid2obj)):
if (
round(cid2span[cid]["min_x"]) == round(cid2span[cid2]["min_x"])
and cid2span[cid]["max_y"] < cid2span[cid2]["min_y"]
):
references_cid.add(cid2)
cid2span[cid2]["min_x"] = cid2span[cid]["min_x"]
cid2span[cid2]["max_x"] = cid2span[cid]["max_x"]
# get a list of empty cids
empty_cids = [cid for cid in range(len(cid2obj)) if len(cid2obj[cid]) == 0]
empty_idx = 0
# Split paras based on whitespaces - seems to work
if ref_page_seen == False:
for cid in range(len(cid2obj)):
if (
len(cid2obj[cid]) > 0
and cid not in empty_cids
and cid not in references_cid
):
cid_maxx = max([boxes[obj].bbox[2] for obj in cid2obj[cid]])
cid_minx = min([boxes[obj].bbox[0] for obj in cid2obj[cid]])
rid_list = set([obj2rid[obj] for obj in cid2obj[cid]])
# Get min_y for each row
rid_miny = {}
for rid in rid_list:
rid_miny[rid] = min(
[
boxes[obj].bbox[1] if obj in cid2obj[cid] else 10000
for obj in rid2obj[rid]
]
)
sorted_rid_miny = sorted(
list(rid_miny.items()), key=operator.itemgetter(1)
)
last_rid = 0
for i in range(len(sorted_rid_miny) - 1):
row1 = sorted_rid_miny[i][0]
row2 = sorted_rid_miny[i + 1][0]
row1_maxx = max(
[
boxes[obj].bbox[2] if obj in cid2obj[cid] else -1
for obj in rid2obj[row1]
]
)
row2_minx = min(
[
boxes[obj].bbox[0] if obj in cid2obj[cid] else 10000
for obj in rid2obj[row2]
]
)
if row1_maxx <= cid_maxx and (row2_minx - char_width) > cid_minx:
# split cluster cid
new_cid_idx = empty_cids[empty_idx]
empty_idx += 1
for i_iter in range(last_rid, i + 1):
obj_list = [
obj
for obj in rid2obj[sorted_rid_miny[i_iter][0]]
if obj2cid[obj] == cid
]
for obj in obj_list:
cid2obj[cid].remove(obj)
cid2obj[new_cid_idx].add(obj)
obj2cid[obj] = new_cid_idx
last_rid = i + 1
clusters = [[boxes[i] for i in cluster] for cluster in filter(bool, cid2obj)]
nodes = [Node(elems) for elems in clusters]
node_indices = [i for i, x in enumerate(cid2obj) if x]
merge_indices = [i for i in range(len(node_indices))]
page_stat = Node(boxes)
nodes, merge_indices = merge_nodes(nodes, plane, page_stat, merge_indices)
##Merging Nodes
new_nodes = []
new_node_indices = []
for idx in range(len(merge_indices)):
if merge_indices[idx] == idx:
new_nodes.append(nodes[idx])
new_node_indices.append(node_indices[idx])
# Heuristics for Node type
ref_nodes = []
new_ref_page_seen = False
if len(references_cid) > 0 or ref_page_seen or references_bbox != []:
new_ref_page_seen = True
ref_seen_in_node = False or ref_page_seen
all_boxes = boxes + boxes_figures
min_y_page = float("Inf")
for idx, box in enumerate(all_boxes):
min_y_page = min(min_y_page, box.bbox[1])
if page_num == -1:
# handle title, authors and abstract here
log.error("TODO: no way to handle title authors abstract yet.")
else:
# eliminate header, footer, page number
# sort other text and classify as header/paragraph
new_nodes.sort(key=cmp_to_key(xy_reading_order))
for idx, node in enumerate(new_nodes):
if idx < len(new_nodes) - 1:
if (
round(node.y0) == round(min_y_page)
or math.floor(node.y0) == math.floor(min_y_page)
) and node.y1 - node.y0 < 2 * avg_font_pts: # can be header
idx_new = idx + 1
if idx_new < len(new_nodes) - 1:
while idx_new < len(new_nodes) - 1 and (
(round(node.y0) == round(new_nodes[idx_new].y0))
or (
math.floor(node.y0) == math.floor(new_nodes[idx_new].y0)
)
):
idx_new += 1
if idx_new < len(new_nodes) - 1:
if new_nodes[idx_new].y0 - node.y0 > 1.5 * avg_font_pts:
node.type = "Header"
continue
# get captions - first word is fig/figure/table
first_elem = None
for elem in node.elems:
if round(elem.bbox[0]) == round(node.x0) and round(
elem.bbox[1]
) == round(node.y0):
first_elem = elem
break
if first_elem != None:
text = first_elem.get_text()
if len(text) > 10:
text = first_elem.get_text()[0:10]
if "Table" in text:
node.type = "Table Caption"
continue
if "Fig" in text or "Figure" in text:
node.type = "Figure Caption"
continue
if first_elem.get_text().lower() == "references":
node.type = "Section Header"
ref_seen_in_node = True
continue
if ref_seen_in_node:
node.type = "List"
continue
if references_bbox != [] or ref_seen_in_node:
if (
node.y0 > references_bbox[3]
and node.x0 <= references_bbox[0]
and node.x1 > references_bbox[2]
):
node.type = "List"
continue
if node.y1 - node.y0 <= 2.0 * avg_font_pts: # one lines - section
node.type = "Section Header"
else: # multiple lines - para
node.type = "Paragraph"
# handle references
newer_nodes = []
ref_indices = [False for idx in range(len(new_nodes))]
for idx1, node1 in enumerate(new_nodes):
if ref_indices[idx1] == True:
continue
if node1.type != "List":
newer_nodes.append(node1)
continue
x0, y0, x1, y1 = node1.x0, node1.y0, node1.x1, node1.y1
newer_node = node1
ref_indices[idx1] = True
for idx2, node2 in enumerate(new_nodes):
if idx1 != idx2:
if node2.type == "List" and ref_indices[idx2] == False:
if (node2.x0 <= x0 and node2.x1 >= x0) or (
x0 <= node2.x0 and x1 >= node2.x0
):
newer_node.merge(node2)
ref_indices[idx2] = True
newer_nodes.append(newer_node)
# handle figures
for fig_box in boxes_figures:
node_fig = Node(fig_box)
node_fig.type = "Figure"
newer_nodes.append(node_fig)
tree = {}
tree["section_header"] = [
(page_num, page_width, page_height) + (node.y0, node.x0, node.y1, node.x1)
for node in newer_nodes
if node.type == "Section Header"
]
tree["header"] = [
(page_num, page_width, page_height) + (node.y0, node.x0, node.y1, node.x1)
for node in newer_nodes
if node.type == "Header"
]
tree["paragraph"] = [
(page_num, page_width, page_height) + (node.y0, node.x0, node.y1, node.x1)
for node in newer_nodes
if node.type == "Paragraph"
]
# tree["figure"] = [(page_num, page_width, page_height) + (node.y0, node.x0, node.y1, node.x1) for node in newer_nodes if node.type=="Figure"]
tree["figure_caption"] = [
(page_num, page_width, page_height) + (node.y0, node.x0, node.y1, node.x1)
for node in newer_nodes
if node.type == "Figure Caption"
]
tree["table_caption"] = [
(page_num, page_width, page_height) + (node.y0, node.x0, node.y1, node.x1)
for node in newer_nodes
if node.type == "Table Caption"
]
tree["list"] = [
(page_num, page_width, page_height) + (node.y0, node.x0, node.y1, node.x1)
for node in newer_nodes
if node.type == "List"
]
return tree, new_ref_page_seen
|
def extract_text_candidates(
boxes,
page_bbox,
avg_font_pts,
width,
char_width,
page_num,
ref_page_seen,
boxes_figures,
page_width,
page_height,
):
# Too many "." in the Table of Content pages - ignore because it takes a lot of time
if len(boxes) == 0 or len(boxes) > 3500:
return {}, False
plane = Plane(page_bbox)
plane.extend(boxes)
# Row level clustering - identify objects that have same horizontal alignment
rid2obj = [set([i]) for i in range(len(boxes))] # initialize clusters
obj2rid = list(
range(len(boxes))
) # default object map to cluster with its own index
prev_clusters = obj2rid
while True:
for i1, b1 in enumerate(boxes):
for i2, b2 in enumerate(boxes):
if (i1 == i2) or (obj2rid[i1] == obj2rid[i2]):
continue
box1 = b1.bbox
box2 = b2.bbox
if (
(abs(box1[1] - box2[1]) < 0.11 * avg_font_pts)
or (abs(box1[3] - box2[3]) < 0.11 * avg_font_pts)
or (
round((box1[1] + box1[3]) / 2) == round((box2[1] + box2[3]) / 2)
)
):
min_i = min(i1, i2)
max_i = max(i1, i2)
rid1 = obj2rid[min_i]
rid2 = obj2rid[max_i]
for obj_iter in rid2obj[rid2]:
rid2obj[rid1].add(obj_iter)
obj2rid[obj_iter] = rid1
rid2obj[rid2] = set()
if prev_clusters == obj2rid:
break
prev_clusters = obj2rid
cid2obj = [set([i]) for i in range(len(boxes))] # initialize clusters
obj2cid = list(
range(len(boxes))
) # default object map to cluster with its own index
prev_clusters = obj2cid
# add the code for merging close text boxes in particular row
while True:
for i1, b1 in enumerate(boxes):
for i2, b2 in enumerate(boxes):
if (i1 == i2) or (obj2cid[i1] == obj2cid[i2]):
continue
box1 = b1.bbox
box2 = b2.bbox
if obj2rid[i1] == obj2rid[i2]:
if (
(b1.bbox[0] < b2.bbox[0])
and ((b2.bbox[0] - b1.bbox[2]) <= 2 * char_width)
) or (
(b2.bbox[0] < b1.bbox[0])
and ((b1.bbox[0] - b2.bbox[2]) <= 2 * char_width)
):
min_i = min(i1, i2)
max_i = max(i1, i2)
cid1 = obj2cid[min_i]
cid2 = obj2cid[max_i]
for obj_iter in cid2obj[cid2]:
cid2obj[cid1].add(obj_iter)
obj2cid[obj_iter] = cid1
cid2obj[cid2] = set()
if prev_clusters == obj2cid:
break
prev_clusters = obj2cid
# vertical alignment code
while True:
for i1, b1 in enumerate(boxes):
for i2, b2 in enumerate(boxes):
if (i1 == i2) or (obj2cid[i1] == obj2cid[i2]):
continue
if b1.bbox[1] < b2.bbox[1]:
box1 = b1.bbox
box2 = b2.bbox
elif b2.bbox[1] < b1.bbox[1]:
box1 = b2.bbox
box2 = b1.bbox
else:
# horizontally aligned
continue
if abs((box2[3] - box2[1]) - (box1[3] - box1[1])) > 0.5 * avg_font_pts:
continue
if (
box2[1] < box1[3]
or (box2[1] - box1[1] < 1.5 * avg_font_pts)
or (box2[3] - box1[3] < 1.5 * avg_font_pts)
): # can probably do better if we find the average space between words
if (
abs(box1[0] - box2[0]) < 3 * char_width
or abs(box1[2] - box2[2]) < 3 * char_width
or (((box1[0] + box1[2]) / 2) == ((box2[0] + box2[2]) / 2))
):
min_i = min(i1, i2)
max_i = max(i1, i2)
cid1 = obj2cid[min_i]
cid2 = obj2cid[max_i]
# move all objects from cluster cid2 to cid1
# reassign cluster ids for all such objects as well
for obj_iter in cid2obj[cid2]:
cid2obj[cid1].add(obj_iter)
obj2cid[obj_iter] = cid1
cid2obj[cid2] = set()
if prev_clusters == obj2cid:
break
prev_clusters = obj2cid
# get cluster spans
cid2span = {}
for cid in range(len(cid2obj)):
cid2span[cid] = {}
cid2span[cid]["min_x"] = float("Inf")
cid2span[cid]["min_y"] = float("Inf")
cid2span[cid]["max_x"] = float("-Inf")
cid2span[cid]["max_y"] = float("-Inf")
for obj in cid2obj[cid]:
cid2span[cid]["min_x"] = min(cid2span[cid]["min_x"], boxes[obj].bbox[0])
cid2span[cid]["max_x"] = max(cid2span[cid]["max_x"], boxes[obj].bbox[2])
cid2span[cid]["min_y"] = min(cid2span[cid]["min_y"], boxes[obj].bbox[1])
cid2span[cid]["max_y"] = max(cid2span[cid]["max_y"], boxes[obj].bbox[3])
# Don't split up references
references_bbox = []
references_cid = set()
for cid in range(len(cid2obj)):
if len(cid2obj[cid]) == 1:
if boxes[list(cid2obj[cid])[0]].get_text().lower() == "references":
references_bbox = [
cid2span[cid]["min_x"],
cid2span[cid]["min_y"],
cid2span[cid]["max_x"],
cid2span[cid]["max_y"],
]
for cid2 in range(len(cid2obj)):
if (
round(cid2span[cid]["min_x"]) == round(cid2span[cid2]["min_x"])
and cid2span[cid]["max_y"] < cid2span[cid2]["min_y"]
):
references_cid.add(cid2)
cid2span[cid2]["min_x"] = cid2span[cid]["min_x"]
cid2span[cid2]["max_x"] = cid2span[cid]["max_x"]
# get a list of empty cids
empty_cids = [cid for cid in range(len(cid2obj)) if len(cid2obj[cid]) == 0]
empty_idx = 0
# Split paras based on whitespaces - seems to work
if ref_page_seen == False:
for cid in range(len(cid2obj)):
if (
len(cid2obj[cid]) > 0
and cid not in empty_cids
and cid not in references_cid
):
cid_maxx = max([boxes[obj].bbox[2] for obj in cid2obj[cid]])
cid_minx = min([boxes[obj].bbox[0] for obj in cid2obj[cid]])
rid_list = set([obj2rid[obj] for obj in cid2obj[cid]])
# Get min_y for each row
rid_miny = {}
for rid in rid_list:
rid_miny[rid] = min(
[
boxes[obj].bbox[1] if obj in cid2obj[cid] else 10000
for obj in rid2obj[rid]
]
)
sorted_rid_miny = sorted(
list(rid_miny.items()), key=operator.itemgetter(1)
)
last_rid = 0
for i in range(len(sorted_rid_miny) - 1):
row1 = sorted_rid_miny[i][0]
row2 = sorted_rid_miny[i + 1][0]
row1_maxx = max(
[
boxes[obj].bbox[2] if obj in cid2obj[cid] else -1
for obj in rid2obj[row1]
]
)
row2_minx = min(
[
boxes[obj].bbox[0] if obj in cid2obj[cid] else 10000
for obj in rid2obj[row2]
]
)
if row1_maxx <= cid_maxx and (row2_minx - char_width) > cid_minx:
# split cluster cid
new_cid_idx = empty_cids[empty_idx]
empty_idx += 1
for i_iter in range(last_rid, i + 1):
obj_list = [
obj
for obj in rid2obj[sorted_rid_miny[i_iter][0]]
if obj2cid[obj] == cid
]
for obj in obj_list:
cid2obj[cid].remove(obj)
cid2obj[new_cid_idx].add(obj)
obj2cid[obj] = new_cid_idx
last_rid = i + 1
clusters = [[boxes[i] for i in cluster] for cluster in filter(bool, cid2obj)]
nodes = [Node(elems) for elems in clusters]
node_indices = [i for i, x in enumerate(cid2obj) if x]
merge_indices = [i for i in range(len(node_indices))]
page_stat = Node(boxes)
nodes, merge_indices = merge_nodes(nodes, plane, page_stat, merge_indices)
##Merging Nodes
new_nodes = []
new_node_indices = []
for idx in range(len(merge_indices)):
if merge_indices[idx] == idx:
new_nodes.append(nodes[idx])
new_node_indices.append(node_indices[idx])
# Heuristics for Node type
ref_nodes = []
new_ref_page_seen = False
if len(references_cid) > 0 or ref_page_seen or references_bbox != []:
new_ref_page_seen = True
ref_seen_in_node = False or ref_page_seen
all_boxes = boxes + boxes_figures
min_y_page = float("Inf")
for idx, box in enumerate(all_boxes):
min_y_page = min(min_y_page, box.bbox[1])
if page_num == -1:
# handle title, authors and abstract here
log.error("TODO: no way to handle title authors abstract yet.")
else:
# eliminate header, footer, page number
# sort other text and classify as header/paragraph
new_nodes.sort(key=cmp_to_key(xy_reading_order))
for idx, node in enumerate(new_nodes):
if idx < len(new_nodes) - 1:
if (
round(node.y0) == round(min_y_page)
or math.floor(node.y0) == math.floor(min_y_page)
) and node.y1 - node.y0 < 2 * avg_font_pts: # can be header
idx_new = idx + 1
if idx_new < len(new_nodes) - 1:
while (
idx_new < len(new_nodes) - 1
and (round(node.y0) == round(new_nodes[idx_new].y0))
or (
math.floor(node.y0) == math.floor(new_nodes[idx_new].y0)
)
):
idx_new += 1
if idx_new < len(new_nodes) - 1:
if new_nodes[idx_new].y0 - node.y0 > 1.5 * avg_font_pts:
node.type = "Header"
continue
# get captions - first word is fig/figure/table
first_elem = None
for elem in node.elems:
if round(elem.bbox[0]) == round(node.x0) and round(
elem.bbox[1]
) == round(node.y0):
first_elem = elem
break
if first_elem != None:
text = first_elem.get_text()
if len(text) > 10:
text = first_elem.get_text()[0:10]
if "Table" in text:
node.type = "Table Caption"
continue
if "Fig" in text or "Figure" in text:
node.type = "Figure Caption"
continue
if first_elem.get_text().lower() == "references":
node.type = "Section Header"
ref_seen_in_node = True
continue
if ref_seen_in_node:
node.type = "List"
continue
if references_bbox != [] or ref_seen_in_node:
if (
node.y0 > references_bbox[3]
and node.x0 <= references_bbox[0]
and node.x1 > references_bbox[2]
):
node.type = "List"
continue
if node.y1 - node.y0 <= 2.0 * avg_font_pts: # one lines - section
node.type = "Section Header"
else: # multiple lines - para
node.type = "Paragraph"
# handle references
newer_nodes = []
ref_indices = [False for idx in range(len(new_nodes))]
for idx1, node1 in enumerate(new_nodes):
if ref_indices[idx1] == True:
continue
if node1.type != "List":
newer_nodes.append(node1)
continue
x0, y0, x1, y1 = node1.x0, node1.y0, node1.x1, node1.y1
newer_node = node1
ref_indices[idx1] = True
for idx2, node2 in enumerate(new_nodes):
if idx1 != idx2:
if node2.type == "List" and ref_indices[idx2] == False:
if (node2.x0 <= x0 and node2.x1 >= x0) or (
x0 <= node2.x0 and x1 >= node2.x0
):
newer_node.merge(node2)
ref_indices[idx2] = True
newer_nodes.append(newer_node)
# handle figures
for fig_box in boxes_figures:
node_fig = Node(fig_box)
node_fig.type = "Figure"
newer_nodes.append(node_fig)
tree = {}
tree["section_header"] = [
(page_num, page_width, page_height) + (node.y0, node.x0, node.y1, node.x1)
for node in newer_nodes
if node.type == "Section Header"
]
tree["header"] = [
(page_num, page_width, page_height) + (node.y0, node.x0, node.y1, node.x1)
for node in newer_nodes
if node.type == "Header"
]
tree["paragraph"] = [
(page_num, page_width, page_height) + (node.y0, node.x0, node.y1, node.x1)
for node in newer_nodes
if node.type == "Paragraph"
]
# tree["figure"] = [(page_num, page_width, page_height) + (node.y0, node.x0, node.y1, node.x1) for node in newer_nodes if node.type=="Figure"]
tree["figure_caption"] = [
(page_num, page_width, page_height) + (node.y0, node.x0, node.y1, node.x1)
for node in newer_nodes
if node.type == "Figure Caption"
]
tree["table_caption"] = [
(page_num, page_width, page_height) + (node.y0, node.x0, node.y1, node.x1)
for node in newer_nodes
if node.type == "Table Caption"
]
tree["list"] = [
(page_num, page_width, page_height) + (node.y0, node.x0, node.y1, node.x1)
for node in newer_nodes
if node.type == "List"
]
return tree, new_ref_page_seen
|
https://github.com/HazyResearch/pdftotree/issues/22
|
$ pdftotree test.pdf
Traceback (most recent call last):
File "/home/lwhsiao/repos/pdftotree/.venv/bin/pdftotree", line 6, in <module>
exec(compile(open(__file__).read(), __file__, 'exec'))
File "/home/lwhsiao/repos/pdftotree/bin/pdftotree", line 82, in <module>
args.favor_figures, args.visualize)
File "/home/lwhsiao/repos/pdftotree/pdftotree/core.py", line 58, in parse
pdf_tree = extractor.get_tree_structure(model, favor_figures)
File "/home/lwhsiao/repos/pdftotree/pdftotree/TreeExtract.py", line 188, in get_tree_structure
favor_figures)
File "/home/lwhsiao/repos/pdftotree/pdftotree/utils/pdf/pdf_parsers.py", line 734, in parse_tree_structure
ref_page_seen, boxes_figures, page_width, page_height)
File "/home/lwhsiao/repos/pdftotree/pdftotree/utils/pdf/pdf_parsers.py", line 1001, in extract_text_candidates
new_nodes[idx_new].y0))):
IndexError: list index out of range
|
IndexError
|
def audit(data_list, tags, labels, debug=False, **kwargs):
"""
Runs secedit on the local machine and audits the return data
with the CIS yaml processed by __virtual__
"""
__data__ = {}
__secdata__ = _secedit_export()
__sidaccounts__ = _get_account_sid()
for profile, data in data_list:
_merge_yaml(__data__, data, profile)
__data__ = apply_labels(__data__, labels)
__tags__ = _get_tags(__data__)
if debug:
log.debug("secedit audit __data__:")
log.debug(__data__)
log.debug("secedit audit __tags__:")
log.debug(__tags__)
ret = {"Success": [], "Failure": [], "Controlled": []}
for tag in __tags__:
if fnmatch.fnmatch(tag, tags):
for tag_data in __tags__[tag]:
if "control" in tag_data:
ret["Controlled"].append(tag_data)
continue
name = tag_data["name"]
audit_type = tag_data["type"]
output = tag_data["match_output"].lower()
# Blacklisted audit (do not include)
if audit_type == "blacklist":
if "no one" in output:
if name not in __secdata__:
ret["Success"].append(tag_data)
else:
tag_data["failure_reason"] = (
"No value/account should be configured "
"under '{0}', but atleast one value/account"
" is configured on the system.".format(name)
)
ret["Failure"].append(tag_data)
else:
if name in __secdata__:
secret = _translate_value_type(
__secdata__[name],
tag_data["value_type"],
tag_data["match_output"],
)
if secret:
tag_data["failure_reason"] = (
"Value of the key '{0}' is configured to a "
"blacklisted value '{1}({2})'".format(
name,
tag_data["match_output"],
tag_data["value_type"],
)
)
ret["Failure"].append(tag_data)
else:
ret["Success"].append(tag_data)
# Whitelisted audit (must include)
if audit_type == "whitelist":
if name in __secdata__:
sec_value = __secdata__[name]
tag_data["found_value"] = sec_value
if "MACHINE\\" in name:
match_output = _reg_value_translator(
tag_data["match_output"]
)
else:
match_output = tag_data["match_output"]
if "," in sec_value and "\\" in sec_value:
sec_value = sec_value.split(",")
match_output = match_output.split(",")
if "account" in tag_data["value_type"]:
secret = _translate_value_type(
sec_value,
tag_data["value_type"],
match_output,
__sidaccounts__,
)
else:
secret = _translate_value_type(
sec_value, tag_data["value_type"], match_output
)
if secret:
ret["Success"].append(tag_data)
else:
tag_data["failure_reason"] = (
"Value of the key '{0}' is configured to"
" invalid value '{1}'. It should be set to"
" '{2}({3})'".format(
name,
sec_value,
match_output,
tag_data["value_type"],
)
)
ret["Failure"].append(tag_data)
else:
log.error("name {} was not in __secdata__".format(name))
tag_data["failure_reason"] = (
"Value of the key '{0}' could not be found in"
" the registry. It should be set to '{1}({2})'".format(
name, tag_data["match_output"], tag_data["value_type"]
)
)
ret["Failure"].append(tag_data)
return ret
|
def audit(data_list, tags, labels, debug=False, **kwargs):
"""
Runs secedit on the local machine and audits the return data
with the CIS yaml processed by __virtual__
"""
__data__ = {}
__secdata__ = _secedit_export()
__sidaccounts__ = _get_account_sid()
for profile, data in data_list:
_merge_yaml(__data__, data, profile)
__data__ = apply_labels(__data__, labels)
__tags__ = _get_tags(__data__)
if debug:
log.debug("secedit audit __data__:")
log.debug(__data__)
log.debug("secedit audit __tags__:")
log.debug(__tags__)
ret = {"Success": [], "Failure": [], "Controlled": []}
for tag in __tags__:
if fnmatch.fnmatch(tag, tags):
for tag_data in __tags__[tag]:
if "control" in tag_data:
ret["Controlled"].append(tag_data)
continue
name = tag_data["name"]
audit_type = tag_data["type"]
output = tag_data["match_output"].lower()
# Blacklisted audit (do not include)
if audit_type == "blacklist":
if "no one" in output:
if name not in __secdata__:
ret["Success"].append(tag_data)
else:
tag_data["failure_reason"] = (
"No value/account should be configured "
"under '{0}', but atleast one value/account"
" is configured on the system.".format(name)
)
ret["Failure"].append(tag_data)
else:
if name in __secdata__:
secret = _translate_value_type(
__secdata__[name],
tag_data["value_type"],
tag_data["match_output"],
)
if secret:
tag_data["failure_reason"] = (
"Value of the key '{0}' is configured to a "
"blacklisted value '{1}({2})'".format(
name,
tag_data["match_output"],
tag_data["value_type"],
)
)
ret["Failure"].append(tag_data)
else:
ret["Success"].append(tag_data)
# Whitelisted audit (must include)
if audit_type == "whitelist":
if name in __secdata__:
sec_value = __secdata__[name]
tag_data["found_value"] = sec_value
if "MACHINE\\" in name:
match_output = _reg_value_translator(
tag_data["match_output"]
)
else:
match_output = tag_data["match_output"]
if "," in sec_value and "\\" in sec_value:
sec_value = sec_value.split(",")
match_output = match_output.split(",")
if "account" in tag_data["value_type"]:
secret = _translate_value_type(
sec_value,
tag_data["value_type"],
match_output,
__sidaccounts__,
)
else:
secret = _translate_value_type(
sec_value, tag_data["value_type"], match_output
)
if secret:
ret["Success"].append(tag_data)
else:
tag_data["failure_reason"] = (
"Value of the key '{0}' is configured to"
" invalid value '{1}'. It should be set to"
" '{2}({3})'".format(
name,
sec_value,
match_output,
tag_data["value_type"],
)
)
ret["Failure"].append(tag_data)
else:
log.error("name {} was not in __secdata__".format(name))
tag_data["failure_reason"] = (
"Value of the key '{0}' could not be found in"
" the registry. It should be set to '{1}({2})'".format(
name, match_output, tag_data["value_type"]
)
)
ret["Failure"].append(tag_data)
return ret
|
https://github.com/hubblestack/hubble/issues/493
|
[ERROR ] name MACHINE\System\CurrentControlSet\Control\Lsa\pku2u\AllowOnlineID was not in __secdata__
[ERROR ] Exception occurred in nova module:
[ERROR ] Traceback (most recent call last):
File "C:\PROGRA~2\Hubble\hubblestack\extmods\modules\hubble.py", line 291, in _run_audit
ret = func(data_list, tags, labels, **kwargs)
File "C:\PROGRA~2\Hubble\hubblestack\files\hubblestack_nova\win_secedit.py", line 133, in audit
match_output,
UnboundLocalError: local variable 'match_output' referenced before assignment
{'Compliance': '15%',
'Errors': [{'\\win_secedit.py': {'data': "UnboundLocalError: local variable 'match_output' referenced before assignment",
'error': 'exception occurred'}}],
'Failure': [{'CIS-9.3.8': 'Ensure "Windows Firewall- Public- Logging- Size limit (KB)" is set to "16,384 KB or greater"'},
{'CIS-9.2.10': 'Ensure "Windows Firewall- Private- Logging- Log successful connections" is set to "Yes"'},
{'CIS-9.1.10': 'Ensure "Windows Firewall- Domain- Logging- Log successful connections" is set to "Yes"'},
|
UnboundLocalError
|
def load_config():
"""
Load the config from configfile and load into imported salt modules
"""
# Parse arguments
parsed_args = parse_args()
# Let's find out the path of this module
if "SETUP_DIRNAME" in globals():
# This is from the exec() call in Salt's setup.py
this_file = os.path.join(SETUP_DIRNAME, "salt", "syspaths.py") # pylint: disable=E0602
else:
this_file = __file__
install_dir = os.path.dirname(os.path.realpath(this_file))
# Load unique data for Windows or Linux
if salt.utils.platform.is_windows():
if parsed_args.get("configfile") is None:
parsed_args["configfile"] = (
"C:\\Program Files (x86)\\Hubble\\etc\\hubble\\hubble.conf"
)
salt.config.DEFAULT_MINION_OPTS["cachedir"] = (
"C:\\Program Files (x86)\\hubble\\var\\cache"
)
salt.config.DEFAULT_MINION_OPTS["pidfile"] = (
"C:\\Program Files (x86)\\hubble\\var\\run\\hubble.pid"
)
salt.config.DEFAULT_MINION_OPTS["log_file"] = (
"C:\\Program Files (x86)\\hubble\\var\\log\\hubble.log"
)
salt.config.DEFAULT_MINION_OPTS["osquery_dbpath"] = (
"C:\\Program Files (x86)\\hubble\\var\\hubble_osquery_db"
)
salt.config.DEFAULT_MINION_OPTS["osquerylogpath"] = (
"C:\\Program Files (x86)\\hubble\\var\\log\\hubble_osquery"
)
salt.config.DEFAULT_MINION_OPTS["osquerylog_backupdir"] = (
"C:\\Program Files (x86)\\hubble\\var\\log\\hubble_osquery\\backuplogs"
)
else:
if parsed_args.get("configfile") is None:
parsed_args["configfile"] = "/etc/hubble/hubble"
salt.config.DEFAULT_MINION_OPTS["cachedir"] = "/var/cache/hubble"
salt.config.DEFAULT_MINION_OPTS["pidfile"] = "/var/run/hubble.pid"
salt.config.DEFAULT_MINION_OPTS["log_file"] = "/var/log/hubble"
salt.config.DEFAULT_MINION_OPTS["osquery_dbpath"] = "/var/cache/hubble/osquery"
salt.config.DEFAULT_MINION_OPTS["osquerylogpath"] = "/var/log/hubble_osquery"
salt.config.DEFAULT_MINION_OPTS["osquerylog_backupdir"] = (
"/var/log/hubble_osquery/backuplogs"
)
salt.config.DEFAULT_MINION_OPTS["file_roots"] = {"base": []}
salt.config.DEFAULT_MINION_OPTS["log_level"] = "error"
salt.config.DEFAULT_MINION_OPTS["file_client"] = "local"
salt.config.DEFAULT_MINION_OPTS["fileserver_update_frequency"] = 43200 # 12 hours
salt.config.DEFAULT_MINION_OPTS["grains_refresh_frequency"] = 3600 # 1 hour
salt.config.DEFAULT_MINION_OPTS["scheduler_sleep_frequency"] = 0.5
salt.config.DEFAULT_MINION_OPTS["default_include"] = "hubble.d/*.conf"
salt.config.DEFAULT_MINION_OPTS["logfile_maxbytes"] = 100000000 # 100MB
salt.config.DEFAULT_MINION_OPTS["logfile_backups"] = 1 # maximum rotated logs
salt.config.DEFAULT_MINION_OPTS["delete_inaccessible_azure_containers"] = False
salt.config.DEFAULT_MINION_OPTS["enable_globbing_in_nebula_masking"] = (
False # Globbing will not be supported in nebula masking
)
salt.config.DEFAULT_MINION_OPTS["osquery_logfile_maxbytes"] = 50000000 # 50MB
salt.config.DEFAULT_MINION_OPTS["osquery_logfile_maxbytes_toparse"] = (
100000000 # 100MB
)
salt.config.DEFAULT_MINION_OPTS["osquery_backuplogs_count"] = 2
global __opts__
__opts__ = salt.config.minion_config(parsed_args.get("configfile"))
__opts__.update(parsed_args)
__opts__["conf_file"] = parsed_args.get("configfile")
__opts__["install_dir"] = install_dir
if __opts__["version"]:
print(__version__)
clean_up_process(None, None)
sys.exit(0)
scan_proc = __opts__.get("scan_proc", False)
if __opts__["daemonize"]:
# before becoming a daemon, check for other procs and possibly send
# then a signal 15 (otherwise refuse to run)
if not __opts__.get("ignore_running", False):
check_pidfile(kill_other=True, scan_proc=scan_proc)
salt.utils.daemonize()
create_pidfile()
elif not __opts__["function"] and not __opts__["version"]:
# check the pidfile and possibly refuse to run
# (assuming this isn't a single function call)
if not __opts__.get("ignore_running", False):
check_pidfile(kill_other=False, scan_proc=scan_proc)
signal.signal(signal.SIGTERM, clean_up_process)
signal.signal(signal.SIGINT, clean_up_process)
# Optional sleep to wait for network
time.sleep(int(__opts__.get("startup_sleep", 0)))
# Convert -vvv to log level
if __opts__["log_level"] is None:
# Default to 'error'
__opts__["log_level"] = "error"
# Default to more verbose if we're daemonizing
if __opts__["daemonize"]:
__opts__["log_level"] = "info"
# Handle the explicit -vvv settings
if __opts__["verbose"] == 1:
__opts__["log_level"] = "warning"
elif __opts__["verbose"] == 2:
__opts__["log_level"] = "info"
elif __opts__["verbose"] >= 3:
__opts__["log_level"] = "debug"
# Setup module/grain/returner dirs
module_dirs = __opts__.get("module_dirs", [])
module_dirs.append(os.path.join(os.path.dirname(__file__), "extmods", "modules"))
__opts__["module_dirs"] = module_dirs
grains_dirs = __opts__.get("grains_dirs", [])
grains_dirs.append(os.path.join(os.path.dirname(__file__), "extmods", "grains"))
__opts__["grains_dirs"] = grains_dirs
returner_dirs = __opts__.get("returner_dirs", [])
returner_dirs.append(
os.path.join(os.path.dirname(__file__), "extmods", "returners")
)
__opts__["returner_dirs"] = returner_dirs
fileserver_dirs = __opts__.get("fileserver_dirs", [])
fileserver_dirs.append(
os.path.join(os.path.dirname(__file__), "extmods", "fileserver")
)
__opts__["fileserver_dirs"] = fileserver_dirs
utils_dirs = __opts__.get("utils_dirs", [])
utils_dirs.append(os.path.join(os.path.dirname(__file__), "extmods", "utils"))
__opts__["utils_dirs"] = utils_dirs
fdg_dirs = __opts__.get("fdg_dirs", [])
fdg_dirs.append(os.path.join(os.path.dirname(__file__), "extmods", "fdg"))
__opts__["fdg_dirs"] = fdg_dirs
__opts__["file_roots"]["base"].insert(
0, os.path.join(os.path.dirname(__file__), "files")
)
if "roots" not in __opts__["fileserver_backend"]:
__opts__["fileserver_backend"].append("roots")
# Disable all of salt's boto modules, they give nothing but trouble to the loader
disable_modules = __opts__.get("disable_modules", [])
disable_modules.extend(
[
"boto3_elasticache",
"boto3_route53",
"boto_apigateway",
"boto_asg",
"boto_cfn",
"boto_cloudtrail",
"boto_cloudwatch_event",
"boto_cloudwatch",
"boto_cognitoidentity",
"boto_datapipeline",
"boto_dynamodb",
"boto_ec2",
"boto_efs",
"boto_elasticache",
"boto_elasticsearch_domain",
"boto_elb",
"boto_elbv2",
"boto_iam",
"boto_iot",
"boto_kinesis",
"boto_kms",
"boto_lambda",
"boto_rds",
"boto_route53",
"boto_s3_bucket",
"boto_secgroup",
"boto_sns",
"boto_sqs",
"boto_vpc",
]
)
__opts__["disable_modules"] = disable_modules
# Console logging is probably the same, but can be different
console_logging_opts = {
"log_level": __opts__.get("console_log_level", __opts__["log_level"]),
"log_format": __opts__.get("console_log_format"),
"date_format": __opts__.get("console_log_date_format"),
}
# remove early console logging from the handlers
if early_log_handler in logging.root.handlers:
logging.root.handlers.remove(early_log_handler)
# Setup logging
salt.log.setup.setup_console_logger(**console_logging_opts)
salt.log.setup.setup_logfile_logger(
__opts__["log_file"],
__opts__["log_level"],
max_bytes=__opts__.get("logfile_maxbytes", 100000000),
backup_count=__opts__.get("logfile_backups", 1),
)
# 384 is 0o600 permissions, written without octal for python 2/3 compat
os.chmod(__opts__["log_file"], 384)
os.chmod(parsed_args.get("configfile"), 384)
refresh_grains(initial=True)
# splunk logs below warning, above info by default
logging.SPLUNK = int(__opts__.get("splunk_log_level", 25))
logging.addLevelName(logging.SPLUNK, "SPLUNK")
def splunk(self, message, *args, **kwargs):
if self.isEnabledFor(logging.SPLUNK):
self._log(logging.SPLUNK, message, args, **kwargs)
logging.Logger.splunk = splunk
if __salt__["config.get"]("splunklogging", False):
root_logger = logging.getLogger()
handler = hubblestack.splunklogging.SplunkHandler()
handler.setLevel(logging.SPLUNK)
root_logger.addHandler(handler)
class MockRecord(object):
def __init__(self, message, levelname, asctime, name):
self.message = message
self.levelname = levelname
self.asctime = asctime
self.name = name
handler.emit(
MockRecord(__grains__, "INFO", time.asctime(), "hubblestack.grains_report")
)
|
def load_config():
"""
Load the config from configfile and load into imported salt modules
"""
# Parse arguments
parsed_args = parse_args()
# Let's find out the path of this module
if "SETUP_DIRNAME" in globals():
# This is from the exec() call in Salt's setup.py
this_file = os.path.join(SETUP_DIRNAME, "salt", "syspaths.py") # pylint: disable=E0602
else:
this_file = __file__
install_dir = os.path.dirname(os.path.realpath(this_file))
# Load unique data for Windows or Linux
if salt.utils.platform.is_windows():
if parsed_args.get("configfile") is None:
parsed_args["configfile"] = (
"C:\\Program Files (x86)\\Hubble\\etc\\hubble\\hubble.conf"
)
salt.config.DEFAULT_MINION_OPTS["cachedir"] = (
"C:\\Program Files (x86)\\hubble\\var\\cache"
)
salt.config.DEFAULT_MINION_OPTS["pidfile"] = (
"C:\\Program Files (x86)\\hubble\\var\\run\\hubble.pid"
)
salt.config.DEFAULT_MINION_OPTS["log_file"] = (
"C:\\Program Files (x86)\\hubble\\var\\log\\hubble.log"
)
else:
if parsed_args.get("configfile") is None:
parsed_args["configfile"] = "/etc/hubble/hubble"
salt.config.DEFAULT_MINION_OPTS["cachedir"] = "/var/cache/hubble"
salt.config.DEFAULT_MINION_OPTS["pidfile"] = "/var/run/hubble.pid"
salt.config.DEFAULT_MINION_OPTS["log_file"] = "/var/log/hubble"
salt.config.DEFAULT_MINION_OPTS["file_roots"] = {"base": []}
salt.config.DEFAULT_MINION_OPTS["log_level"] = "error"
salt.config.DEFAULT_MINION_OPTS["file_client"] = "local"
salt.config.DEFAULT_MINION_OPTS["fileserver_update_frequency"] = 43200 # 12 hours
salt.config.DEFAULT_MINION_OPTS["grains_refresh_frequency"] = 3600 # 1 hour
salt.config.DEFAULT_MINION_OPTS["scheduler_sleep_frequency"] = 0.5
salt.config.DEFAULT_MINION_OPTS["default_include"] = "hubble.d/*.conf"
salt.config.DEFAULT_MINION_OPTS["logfile_maxbytes"] = 100000000 # 100MB
salt.config.DEFAULT_MINION_OPTS["logfile_backups"] = 1 # maximum rotated logs
salt.config.DEFAULT_MINION_OPTS["delete_inaccessible_azure_containers"] = False
global __opts__
__opts__ = salt.config.minion_config(parsed_args.get("configfile"))
__opts__.update(parsed_args)
__opts__["conf_file"] = parsed_args.get("configfile")
__opts__["install_dir"] = install_dir
if __opts__["version"]:
print(__version__)
clean_up_process(None, None)
sys.exit(0)
scan_proc = __opts__.get("scan_proc", False)
if __opts__["daemonize"]:
# before becoming a daemon, check for other procs and possibly send
# then a signal 15 (otherwise refuse to run)
if not __opts__.get("ignore_running", False):
check_pidfile(kill_other=True, scan_proc=scan_proc)
salt.utils.daemonize()
create_pidfile()
elif not __opts__["function"] and not __opts__["version"]:
# check the pidfile and possibly refuse to run
# (assuming this isn't a single function call)
if not __opts__.get("ignore_running", False):
check_pidfile(kill_other=False, scan_proc=scan_proc)
signal.signal(signal.SIGTERM, clean_up_process)
signal.signal(signal.SIGINT, clean_up_process)
# Optional sleep to wait for network
time.sleep(int(__opts__.get("startup_sleep", 0)))
# Convert -vvv to log level
if __opts__["log_level"] is None:
# Default to 'error'
__opts__["log_level"] = "error"
# Default to more verbose if we're daemonizing
if __opts__["daemonize"]:
__opts__["log_level"] = "info"
# Handle the explicit -vvv settings
if __opts__["verbose"] == 1:
__opts__["log_level"] = "warning"
elif __opts__["verbose"] == 2:
__opts__["log_level"] = "info"
elif __opts__["verbose"] >= 3:
__opts__["log_level"] = "debug"
# Setup module/grain/returner dirs
module_dirs = __opts__.get("module_dirs", [])
module_dirs.append(os.path.join(os.path.dirname(__file__), "extmods", "modules"))
__opts__["module_dirs"] = module_dirs
grains_dirs = __opts__.get("grains_dirs", [])
grains_dirs.append(os.path.join(os.path.dirname(__file__), "extmods", "grains"))
__opts__["grains_dirs"] = grains_dirs
returner_dirs = __opts__.get("returner_dirs", [])
returner_dirs.append(
os.path.join(os.path.dirname(__file__), "extmods", "returners")
)
__opts__["returner_dirs"] = returner_dirs
fileserver_dirs = __opts__.get("fileserver_dirs", [])
fileserver_dirs.append(
os.path.join(os.path.dirname(__file__), "extmods", "fileserver")
)
__opts__["fileserver_dirs"] = fileserver_dirs
utils_dirs = __opts__.get("utils_dirs", [])
utils_dirs.append(os.path.join(os.path.dirname(__file__), "extmods", "utils"))
__opts__["utils_dirs"] = utils_dirs
fdg_dirs = __opts__.get("fdg_dirs", [])
fdg_dirs.append(os.path.join(os.path.dirname(__file__), "extmods", "fdg"))
__opts__["fdg_dirs"] = fdg_dirs
__opts__["file_roots"]["base"].insert(
0, os.path.join(os.path.dirname(__file__), "files")
)
if "roots" not in __opts__["fileserver_backend"]:
__opts__["fileserver_backend"].append("roots")
# Disable all of salt's boto modules, they give nothing but trouble to the loader
disable_modules = __opts__.get("disable_modules", [])
disable_modules.extend(
[
"boto3_elasticache",
"boto3_route53",
"boto_apigateway",
"boto_asg",
"boto_cfn",
"boto_cloudtrail",
"boto_cloudwatch_event",
"boto_cloudwatch",
"boto_cognitoidentity",
"boto_datapipeline",
"boto_dynamodb",
"boto_ec2",
"boto_efs",
"boto_elasticache",
"boto_elasticsearch_domain",
"boto_elb",
"boto_elbv2",
"boto_iam",
"boto_iot",
"boto_kinesis",
"boto_kms",
"boto_lambda",
"boto_rds",
"boto_route53",
"boto_s3_bucket",
"boto_secgroup",
"boto_sns",
"boto_sqs",
"boto_vpc",
]
)
__opts__["disable_modules"] = disable_modules
# Console logging is probably the same, but can be different
console_logging_opts = {
"log_level": __opts__.get("console_log_level", __opts__["log_level"]),
"log_format": __opts__.get("console_log_format"),
"date_format": __opts__.get("console_log_date_format"),
}
# remove early console logging from the handlers
if early_log_handler in logging.root.handlers:
logging.root.handlers.remove(early_log_handler)
# Setup logging
salt.log.setup.setup_console_logger(**console_logging_opts)
salt.log.setup.setup_logfile_logger(
__opts__["log_file"],
__opts__["log_level"],
max_bytes=__opts__.get("logfile_maxbytes", 100000000),
backup_count=__opts__.get("logfile_backups", 1),
)
# 384 is 0o600 permissions, written without octal for python 2/3 compat
os.chmod(__opts__["log_file"], 384)
os.chmod(parsed_args.get("configfile"), 384)
refresh_grains(initial=True)
# splunk logs below warning, above info by default
logging.SPLUNK = int(__opts__.get("splunk_log_level", 25))
logging.addLevelName(logging.SPLUNK, "SPLUNK")
def splunk(self, message, *args, **kwargs):
if self.isEnabledFor(logging.SPLUNK):
self._log(logging.SPLUNK, message, args, **kwargs)
logging.Logger.splunk = splunk
if __salt__["config.get"]("splunklogging", False):
root_logger = logging.getLogger()
handler = hubblestack.splunklogging.SplunkHandler()
handler.setLevel(logging.SPLUNK)
root_logger.addHandler(handler)
class MockRecord(object):
def __init__(self, message, levelname, asctime, name):
self.message = message
self.levelname = levelname
self.asctime = asctime
self.name = name
handler.emit(
MockRecord(__grains__, "INFO", time.asctime(), "hubblestack.grains_report")
)
|
https://github.com/hubblestack/hubble/issues/493
|
[ERROR ] name MACHINE\System\CurrentControlSet\Control\Lsa\pku2u\AllowOnlineID was not in __secdata__
[ERROR ] Exception occurred in nova module:
[ERROR ] Traceback (most recent call last):
File "C:\PROGRA~2\Hubble\hubblestack\extmods\modules\hubble.py", line 291, in _run_audit
ret = func(data_list, tags, labels, **kwargs)
File "C:\PROGRA~2\Hubble\hubblestack\files\hubblestack_nova\win_secedit.py", line 133, in audit
match_output,
UnboundLocalError: local variable 'match_output' referenced before assignment
{'Compliance': '15%',
'Errors': [{'\\win_secedit.py': {'data': "UnboundLocalError: local variable 'match_output' referenced before assignment",
'error': 'exception occurred'}}],
'Failure': [{'CIS-9.3.8': 'Ensure "Windows Firewall- Public- Logging- Size limit (KB)" is set to "16,384 KB or greater"'},
{'CIS-9.2.10': 'Ensure "Windows Firewall- Private- Logging- Log successful connections" is set to "Yes"'},
{'CIS-9.1.10': 'Ensure "Windows Firewall- Domain- Logging- Log successful connections" is set to "Yes"'},
|
UnboundLocalError
|
def _mask_object(object_to_be_masked, topfile):
"""
Given an object with potential secrets (or other data that should not be
returned), mask the contents of that object as configured in the mask
configuration file. The mask configuration file used is defined by the
top data in the ``topfile`` argument.
If multiple mask.yaml files are matched in the topfile, the data within
them will be recursively merged.
If no matching mask_files are found in the top.mask file, no masking will
happen.
Note that this function has side effects: alterations to
``object_to_be_masked`` will be made in place.
Sample mask.yaml data (with inline documentation):
.. code-block:: yaml
# Pattern that will replace whatever is masked
mask_with: '***masked*by*hubble***'
# Target and mask strings based on regex patterns
# Can limit search specific queries and columns
# Some osquery results are formed as lists of dicts. We can mask
# based on variable names within these dicts.
blacklisted_objects:
- query_names:
- 'running_procs'
- 'listening_procs' # List of name(s) of the osquery to be masked.
# Put '*' to match all queries. Note
# that query_names doesn't support
# full globbing. '*' is just given
# special treatment.
column: 'environment' # Column name in the osquery to be masked. No regex or glob support
attribute_to_check: 'variable_name' # Optional attribute
# In the inner dict, this is the key
# to check for blacklisted_patterns
# Will skipped if column specified is of type 'String'
attributes_to_mask: # Optional attribute, Values under these keys in the dict will be
- 'value' # masked, assuming one of the blacklisted_patterns
# is found under attribute_to_check in the same dict
# Will be skipped if column specified is of type 'String'
blacklisted_patterns: # Strings to look for under attribute_to_check. Conditional Globbing support.
- 'ETCDCTL_READ_PASSWORD'
- 'ETCDCTL_WRITE_PASSWORD'
- '*PASSWORD*' # Enable globbing by setting 'enable_globbing_in_nebula_masking' to True, default False
blacklisted_patterns (for blacklisted_objects)
For objects, the pattern applies to the variable name, and doesn't
support regex. For example, you might have data formed like this::
[{ value: 'SOME_PASSWORD', variable_name: 'ETCDCTL_READ_PASSWORD' }]
The attribute_to_check would be ``variable_name`` and the pattern would
be ``ETCDCTL_READ_PASSWORD``. The attribute_to_mask would be ``value``.
All dicts with ``variable_name`` in the list of blacklisted_patterns
would have the value under their ``value`` key masked.
"""
try:
mask = {}
if topfile is None:
# We will maintain backward compatibility by keeping two versions of top files and mask files for now
# Once all hubble servers are updated, we can remove old version of top file and mask file
# Similar to what we have for nebula and nebula_v2 for older versions and newer versions of profiles
topfile = "salt://hubblestack_nebula_v2/top_v2.mask"
mask_files = _get_top_data(topfile)
mask_files = [
"salt://hubblestack_nebula_v2/" + mask_file.replace(".", "/") + ".yaml"
for mask_file in mask_files
]
if not mask_files:
mask_files = []
for fh in mask_files:
if "salt://" in fh:
orig_fh = fh
fh = __salt__["cp.cache_file"](fh)
if fh is None:
log.error("Could not find file {0}.".format(orig_fh))
return None
if os.path.isfile(fh):
with open(fh, "r") as f:
f_data = yaml.safe_load(f)
if not isinstance(f_data, dict):
raise CommandExecutionError(
"File data is not formed as a dict {0}".format(f_data)
)
mask = _dict_update(
mask, f_data, recursive_update=True, merge_lists=True
)
log.debug("Masking data: {}".format(mask))
# Backwards compatibility with mask_by
mask_with = mask.get("mask_with", mask.get("mask_by", "******"))
log.info(
"Total number of results to check for masking: {0}".format(
len(object_to_be_masked)
)
)
globbing_enabled = __opts__.get("enable_globbing_in_nebula_masking")
for blacklisted_object in mask.get("blacklisted_objects", []):
query_names = blacklisted_object["query_names"]
column = blacklisted_object[
"column"
] # Can be converted to list as well in future if need be
if "*" in query_names:
# This means wildcard is specified and each event should be masked, if applicable
for r in object_to_be_masked:
if "action" in r:
# This means data is generated by osquery daemon
_mask_event_data(
r,
None,
column,
blacklisted_object,
mask_with,
globbing_enabled,
)
else:
# This means data is generated by osquery interactive shell
for query_name, query_ret in r.iteritems():
for query_result in query_ret["data"]:
if column not in query_result or (
isinstance(query_result[column], basestring)
and query_result[column].strip() != ""
):
# No error here, since we didn't reference a specific query
break
if isinstance(query_result[column], basestring):
# If column is of 'string' type, then replace pattern in-place
# No need for recursion here
value = query_result[column]
for pattern in blacklisted_object[
"blacklisted_patterns"
]:
value = re.sub(
pattern + "()",
r"\1" + mask_with + r"\3",
value,
)
query_result[column] = value
else:
_recursively_mask_objects(
query_result[column],
blacklisted_object,
mask_with,
globbing_enabled,
)
else:
# Perform masking on results of specific queries specified in 'query_names'
for query_name in query_names:
for r in object_to_be_masked:
if "action" in r:
# This means data is generated by osquery daemon
_mask_event_data(
r,
query_name,
column,
blacklisted_object,
mask_with,
globbing_enabled,
)
else:
# This means data is generated by osquery interactive shell
for query_result in r.get(query_name, {"data": []})["data"]:
if column not in query_result or (
isinstance(query_result[column], basestring)
and query_result[column].strip() != ""
):
# if the column in not present in one data-object, it will
# not be present in others as well. Break in that case.
# This will happen only if mask.yaml is malformed
log.error(
"masking data references a missing column {0} in query {1}".format(
column, query_name
)
)
break
if isinstance(query_result[column], basestring):
# If column is of 'string' type, then replace pattern in-place
# No need for recursion here
value = query_result[column]
for pattern in blacklisted_object[
"blacklisted_patterns"
]:
value = re.sub(
pattern + "()",
r"\1" + mask_with + r"\3",
value,
)
query_result[column] = value
else:
_recursively_mask_objects(
query_result[column],
blacklisted_object,
mask_with,
globbing_enabled,
)
except Exception as e:
log.exception("An error occured while masking the passwords: {}".format(e))
# Object masked in place, so we don't need to return the object
return True
|
def _mask_object(object_to_be_masked, topfile):
"""
Given an object with potential secrets (or other data that should not be
returned), mask the contents of that object as configured in the mask
configuration file. The mask configuration file used is defined by the
top data in the ``topfile`` argument.
If multiple mask.yaml files are matched in the topfile, the data within
them will be recursively merged.
If no matching mask_files are found in the top.mask file, no masking will
happen.
Note that this function has side effects: alterations to
``object_to_be_masked`` will be made in place.
Sample mask.yaml data (with inline documentation):
.. code-block:: yaml
# Pattern that will replace whatever is masked
mask_with: '***masked*by*hubble***'
# Target and mask strings based on regex patterns
# Can limit search specific queries and columns
blacklisted_strings:
- query_name: 'running_procs' # Name of the osquery to be masked.
# Put '*' to match all queries. Note
# that query_name doesn't support
# full globbing. '*' is just given
# special treatment.
column: 'command_line' # Column name in the osquery to be masked. No regex or glob support
# See below for documentation of these blacklisted patterns
blacklisted_patterns:
- '(prefix)(password)(suffix)'
# Some osquery results are formed as lists of dicts. We can mask
# based on variable names within these dicts.
blacklisted_objects:
- query_name: 'running_procs' # Name of the osquery to be masked.
# Put '*' to match all queries. Note
# that query_name doesn't support
# full globbing. '*' is just given
# special treatment.
column: 'environment' # Column name in the osquery to be masked. No regex or glob support
attribute_to_check: 'variable_name' # In the inner dict, this is the key
# to check for blacklisted_patterns
attributes_to_mask: # Values under these keys in the dict will be
- 'value' # masked, assuming one of the blacklisted_patterns
# is found under attribute_to_check in the same dict
blacklisted_patterns: # Strings to look for under attribute_to_check. Globbing support
- 'ETCDCTL_READ_PASSWORD'
- 'ETCDCTL_WRITE_PASSWORD'
- '*PASSWORD*'
blacklisted_patterns (for blacklisted_strings)
Blacklisted patterns are regular expressions, and have a prefix, a
secret, and a suffix. Nebula uses regex groups to maintain the prefix
and suffix, *which are not masked*. Only the password is masked.
If you don't need a suffix or a prefix, leave those sets of parenthesis
blank. Do not remove any parenthesis, or else your password could
remain unmasked!
blacklisted_patterns is formed as a list. These patterns are processed
(and substituted) in order.
blacklisted_patterns (for blacklisted_objects)
For objects, the pattern applies to the variable name, and doesn't
support regex. For example, you might have data formed like this::
[{ value: 'SOME_PASSWORD', variable_name: 'ETCDCTL_READ_PASSWORD' }]
The attribute_to_check would be ``variable_name`` and the pattern would
be ``ETCDCTL_READ_PASSWORD``. The attribute_to_mask would be ``value``.
All dicts with ``variable_name`` in the list of blacklisted_patterns
would have the value under their ``value`` key masked.
"""
try:
mask = {}
if topfile is None:
topfile = "salt://hubblestack_nebula_v2/top.mask"
mask_files = _get_top_data(topfile)
mask_files = [
"salt://hubblestack_nebula_v2/" + mask_file.replace(".", "/") + ".yaml"
for mask_file in mask_files
]
if not mask_files:
mask_files = []
for fh in mask_files:
if "salt://" in fh:
orig_fh = fh
fh = __salt__["cp.cache_file"](fh)
if fh is None:
log.error("Could not find file {0}.".format(orig_fh))
return None
if os.path.isfile(fh):
with open(fh, "r") as f:
f_data = yaml.safe_load(f)
if not isinstance(f_data, dict):
raise CommandExecutionError(
"File data is not formed as a dict {0}".format(f_data)
)
mask = _dict_update(
mask, f_data, recursive_update=True, merge_lists=True
)
log.debug("Masking data: {}".format(mask))
# Backwards compatibility with mask_by
mask_with = mask.get("mask_with", mask.get("mask_by", "******"))
# We can blacklist strings based on their pattern
for blacklisted_string in mask.get("blacklisted_strings", []):
query_name = blacklisted_string["query_name"]
column = blacklisted_string["column"]
if query_name != "*":
for r in object_to_be_masked:
for query_result in r.get(query_name, {"data": []})["data"]:
if column not in query_result or not isinstance(
query_result[column], basestring
):
# if the column in not present in one data-object, it will
# not be present in others as well. Break in that case.
# This will happen only if mask.yaml is malformed
log.error(
"masking data references a missing column {0} in query {1}".format(
column, query_name
)
)
break
value = query_result[column]
for pattern in blacklisted_string["blacklisted_patterns"]:
value = re.sub(
pattern + "()", r"\1" + mask_with + r"\3", value
)
query_result[column] = value
else:
for r in object_to_be_masked:
for query_name, query_ret in r.iteritems():
for query_result in query_ret["data"]:
if column not in query_result or not isinstance(
query_result[column], basestring
):
# No error here, since we didn't reference a specific query
break
value = query_result[column]
for pattern in blacklisted_string["blacklisted_patterns"]:
value = re.sub(
pattern + "()", r"\1" + mask_with + r"\3", value
)
query_result[column] = value
for blacklisted_object in mask.get("blacklisted_objects", []):
query_name = blacklisted_object["query_name"]
column = blacklisted_object["column"]
if query_name != "*":
for r in object_to_be_masked:
for query_result in r.get(query_name, {"data": []})["data"]:
if column not in query_result or (
isinstance(query_result[column], basestring)
and query_result[column].strip() != ""
):
# if the column in not present in one data-object, it will
# not be present in others as well. Break in that case.
# This will happen only if mask.yaml is malformed
log.error(
"masking data references a missing column {0} in query {1}".format(
column, query_name
)
)
break
_recursively_mask_objects(
query_result[column], blacklisted_object, mask_with
)
else:
for r in object_to_be_masked:
for query_name, query_ret in r.iteritems():
for query_result in query_ret["data"]:
if column not in query_result or (
isinstance(query_result[column], basestring)
and query_result[column].strip() != ""
):
# No error here, since we didn't reference a specific query
break
_recursively_mask_objects(
query_result[column], blacklisted_object, mask_with
)
except Exception as e:
log.exception("An error occured while masking the passwords: {}".format(e))
# Object masked in place, so we don't need to return the object
return True
|
https://github.com/hubblestack/hubble/issues/493
|
[ERROR ] name MACHINE\System\CurrentControlSet\Control\Lsa\pku2u\AllowOnlineID was not in __secdata__
[ERROR ] Exception occurred in nova module:
[ERROR ] Traceback (most recent call last):
File "C:\PROGRA~2\Hubble\hubblestack\extmods\modules\hubble.py", line 291, in _run_audit
ret = func(data_list, tags, labels, **kwargs)
File "C:\PROGRA~2\Hubble\hubblestack\files\hubblestack_nova\win_secedit.py", line 133, in audit
match_output,
UnboundLocalError: local variable 'match_output' referenced before assignment
{'Compliance': '15%',
'Errors': [{'\\win_secedit.py': {'data': "UnboundLocalError: local variable 'match_output' referenced before assignment",
'error': 'exception occurred'}}],
'Failure': [{'CIS-9.3.8': 'Ensure "Windows Firewall- Public- Logging- Size limit (KB)" is set to "16,384 KB or greater"'},
{'CIS-9.2.10': 'Ensure "Windows Firewall- Private- Logging- Log successful connections" is set to "Yes"'},
{'CIS-9.1.10': 'Ensure "Windows Firewall- Domain- Logging- Log successful connections" is set to "Yes"'},
|
UnboundLocalError
|
def _recursively_mask_objects(
object_to_mask, blacklisted_object, mask_with, globbing_enabled
):
"""
This function is used by ``_mask_object()`` to mask passwords contained in
an osquery data structure (formed as a list of dicts, usually). Since the
lists can sometimes be nested, recurse through the lists.
object_to_mask
data structure to mask recursively
blacklisted_object
the blacklisted_objects entry from the mask.yaml
mask_with
masked values are replaced with this string
globbing_enabled
enable globbing in specified blacklisted patterns of mask file
"""
if isinstance(object_to_mask, list):
for child in object_to_mask:
log.debug("Recursing object {0}".format(child))
_recursively_mask_objects(
child, blacklisted_object, mask_with, globbing_enabled
)
elif (
globbing_enabled and blacklisted_object["attribute_to_check"] in object_to_mask
):
mask = False
for blacklisted_pattern in blacklisted_object["blacklisted_patterns"]:
if fnmatch.fnmatch(
object_to_mask[blacklisted_object["attribute_to_check"]],
blacklisted_pattern,
):
mask = True
log.info(
"Attribute {0} will be masked.".format(
object_to_mask[blacklisted_object["attribute_to_check"]]
)
)
break
if mask:
for key in blacklisted_object["attributes_to_mask"]:
if key in object_to_mask:
object_to_mask[key] = mask_with
elif (
(not globbing_enabled)
and blacklisted_object["attribute_to_check"] in object_to_mask
and object_to_mask[blacklisted_object["attribute_to_check"]]
in blacklisted_object["blacklisted_patterns"]
):
for key in blacklisted_object["attributes_to_mask"]:
if key in object_to_mask:
object_to_mask[key] = mask_with
|
def _recursively_mask_objects(object_to_mask, blacklisted_object, mask_with):
"""
This function is used by ``_mask_object()`` to mask passwords contained in
an osquery data structure (formed as a list of dicts, usually). Since the
lists can sometimes be nested, recurse through the lists.
object_to_mask
data structure to mask recursively
blacklisted_object
the blacklisted_objects entry from the mask.yaml
mask_with
masked values are replaced with this string
"""
if isinstance(object_to_mask, list):
for child in object_to_mask:
_recursively_mask_objects(child, blacklisted_object, mask_with)
elif blacklisted_object["attribute_to_check"] in object_to_mask:
mask = False
for blacklisted_pattern in blacklisted_object["blacklisted_patterns"]:
if fnmatch.fnmatch(
object_to_mask[blacklisted_object["attribute_to_check"]],
blacklisted_pattern,
):
mask = True
break
if mask:
for key in blacklisted_object["attributes_to_mask"]:
if key in object_to_mask:
object_to_mask[key] = mask_with
|
https://github.com/hubblestack/hubble/issues/493
|
[ERROR ] name MACHINE\System\CurrentControlSet\Control\Lsa\pku2u\AllowOnlineID was not in __secdata__
[ERROR ] Exception occurred in nova module:
[ERROR ] Traceback (most recent call last):
File "C:\PROGRA~2\Hubble\hubblestack\extmods\modules\hubble.py", line 291, in _run_audit
ret = func(data_list, tags, labels, **kwargs)
File "C:\PROGRA~2\Hubble\hubblestack\files\hubblestack_nova\win_secedit.py", line 133, in audit
match_output,
UnboundLocalError: local variable 'match_output' referenced before assignment
{'Compliance': '15%',
'Errors': [{'\\win_secedit.py': {'data': "UnboundLocalError: local variable 'match_output' referenced before assignment",
'error': 'exception occurred'}}],
'Failure': [{'CIS-9.3.8': 'Ensure "Windows Firewall- Public- Logging- Size limit (KB)" is set to "16,384 KB or greater"'},
{'CIS-9.2.10': 'Ensure "Windows Firewall- Private- Logging- Log successful connections" is set to "Yes"'},
{'CIS-9.1.10': 'Ensure "Windows Firewall- Domain- Logging- Log successful connections" is set to "Yes"'},
|
UnboundLocalError
|
def osqueryd_monitor(
configfile=None,
flagfile=None,
logdir=None,
databasepath=None,
pidfile=None,
hashfile=None,
daemonize=True,
):
"""
This function will monitor whether osqueryd is running on the system or not.
Whenever it detects that osqueryd is not running, it will start the osqueryd.
Also, it checks for conditions that would require osqueryd to restart(such as changes in flag file content)
On such conditions, osqueryd will get restarted, thereby loading new files.
configfile
Path to osquery configuration file.
flagfile
Path to osquery flag file
logdir
Path to log directory where osquery daemon/service will write logs
pidfile
pidfile path where osquery daemon will write pid info
hashfile
path to hashfile where osquery flagfile's hash would be stored
daemonize
daemonize osquery daemon. Default is True. Applicable for posix system only
"""
log.info("Starting osqueryd monitor")
saltenv = __salt__["config.get"]("hubblestack:nova:saltenv", "base")
osqueryd_path = "salt://osquery"
cached = __salt__["cp.cache_dir"](osqueryd_path, saltenv=saltenv)
log.info("Cached osqueryd files to cachedir")
cachedir = os.path.join(__opts__.get("cachedir"), "files", saltenv, "osquery")
base_path = cachedir
servicename = "hubble_osqueryd"
if not logdir:
logdir = __opts__.get("osquerylogpath")
if not databasepath:
databasepath = __opts__.get("osquery_dbpath")
if salt.utils.platform.is_windows():
if not pidfile:
pidfile = os.path.join(base_path, "hubble_osqueryd.pidfile")
if not configfile:
configfile = os.path.join(base_path, "osquery.conf")
if not flagfile:
flagfile = os.path.join(base_path, "osquery.flags")
if not hashfile:
hashfile = os.path.join(base_path, "hash_of_flagfile.txt")
osqueryd_running = _osqueryd_running_status_windows(servicename)
if not osqueryd_running:
_start_osqueryd(
pidfile, configfile, flagfile, logdir, databasepath, servicename
)
else:
osqueryd_restart = _osqueryd_restart_required(hashfile, flagfile)
if osqueryd_restart:
_restart_osqueryd(
pidfile,
configfile,
flagfile,
logdir,
databasepath,
hashfile,
servicename,
)
else:
if not pidfile:
pidfile = os.path.join(base_path, "hubble_osqueryd.pidfile")
if not configfile:
configfile = os.path.join(base_path, "osquery.conf")
if not flagfile:
flagfile = os.path.join(base_path, "osquery.flags")
if not hashfile:
hashfile = os.path.join(base_path, "hash_of_flagfile.txt")
osqueryd_running = _osqueryd_running_status(pidfile, servicename)
if not osqueryd_running:
_start_osqueryd(
pidfile, configfile, flagfile, logdir, databasepath, servicename
)
else:
osqueryd_restart = _osqueryd_restart_required(hashfile, flagfile)
if osqueryd_restart:
_restart_osqueryd(
pidfile,
configfile,
flagfile,
logdir,
databasepath,
hashfile,
servicename,
)
|
def osqueryd_monitor(
configfile=None,
flagfile=None,
logdir=None,
databasepath=None,
pidfile=None,
hashfile=None,
daemonize=True,
):
"""
This function will monitor whether osqueryd is running on the system or not.
Whenever it detects that osqueryd is not running, it will start the osqueryd.
Also, it checks for conditions that would require osqueryd to restart(such as changes in flag file content)
On such conditions, osqueryd will get restarted, thereby loading new files.
configfile
Path to osquery configuration file.
flagfile
Path to osquery flag file
logdir
Path to log directory where osquery daemon/service will write logs
pidfile
pidfile path where osquery daemon will write pid info
hashfile
path to hashfile where osquery flagfile's hash would be stored
daemonize
daemonize osquery daemon. Default is True. Applicable for posix system only
"""
log.info("Starting osqueryd monitor")
saltenv = __salt__["config.get"]("hubblestack:nova:saltenv", "base")
osqueryd_path = "salt://osquery"
cached = __salt__["cp.cache_dir"](osqueryd_path, saltenv=saltenv)
log.info("Cached osqueryd files to cachedir")
cachedir = os.path.join(__opts__.get("cachedir"), "files", saltenv, "osquery")
base_path = cachedir
servicename = "hubble_osqueryd"
if not logdir:
logdir = __opts__.get("osquerylogpath")
if not databasepath:
databasepath = __opts__.get("osquery_dbpath")
if salt.utils.platform.is_windows():
if not pidfile:
pidfile = os.path.join(base_path, "osqueryd.pidfile")
if not configfile:
configfile = os.path.join(base_path, "osquery.conf")
if not flagfile:
flagfile = os.path.join(base_path, "osquery.flags")
if not hashfile:
hashfile = os.path.join(base_path, "hash_of_flagfile.txt")
osqueryd_running = _osqueryd_running_status_windows(servicename)
if not osqueryd_running:
_start_osqueryd(
pidfile, configfile, flagfile, logdir, databasepath, servicename
)
else:
osqueryd_restart = _osqueryd_restart_required(hashfile, flagfile)
if osqueryd_restart:
_restart_osqueryd(
pidfile,
configfile,
flagfile,
logdir,
databasepath,
hashfile,
servicename,
)
else:
if not pidfile:
pidfile = os.path.join(base_path, "osqueryd.pidfile")
if not configfile:
configfile = os.path.join(base_path, "osquery.conf")
if not flagfile:
flagfile = os.path.join(base_path, "osquery.flags")
if not hashfile:
hashfile = os.path.join(base_path, "hash_of_flagfile.txt")
osqueryd_running = _osqueryd_running_status(pidfile, servicename)
if not osqueryd_running:
_start_osqueryd(
pidfile, configfile, flagfile, logdir, databasepath, servicename
)
else:
osqueryd_restart = _osqueryd_restart_required(hashfile, flagfile)
if osqueryd_restart:
_restart_osqueryd(
pidfile,
configfile,
flagfile,
logdir,
databasepath,
hashfile,
servicename,
)
|
https://github.com/hubblestack/hubble/issues/493
|
[ERROR ] name MACHINE\System\CurrentControlSet\Control\Lsa\pku2u\AllowOnlineID was not in __secdata__
[ERROR ] Exception occurred in nova module:
[ERROR ] Traceback (most recent call last):
File "C:\PROGRA~2\Hubble\hubblestack\extmods\modules\hubble.py", line 291, in _run_audit
ret = func(data_list, tags, labels, **kwargs)
File "C:\PROGRA~2\Hubble\hubblestack\files\hubblestack_nova\win_secedit.py", line 133, in audit
match_output,
UnboundLocalError: local variable 'match_output' referenced before assignment
{'Compliance': '15%',
'Errors': [{'\\win_secedit.py': {'data': "UnboundLocalError: local variable 'match_output' referenced before assignment",
'error': 'exception occurred'}}],
'Failure': [{'CIS-9.3.8': 'Ensure "Windows Firewall- Public- Logging- Size limit (KB)" is set to "16,384 KB or greater"'},
{'CIS-9.2.10': 'Ensure "Windows Firewall- Private- Logging- Log successful connections" is set to "Yes"'},
{'CIS-9.1.10': 'Ensure "Windows Firewall- Domain- Logging- Log successful connections" is set to "Yes"'},
|
UnboundLocalError
|
def _mask_object(object_to_be_masked, topfile):
"""
Given an object with potential secrets (or other data that should not be
returned), mask the contents of that object as configured in the mask
configuration file. The mask configuration file used is defined by the
top data in the ``topfile`` argument.
If multiple mask.yaml files are matched in the topfile, the data within
them will be recursively merged.
If no matching mask_files are found in the top.mask file, no masking will
happen.
Note that this function has side effects: alterations to
``object_to_be_masked`` will be made in place.
Sample mask.yaml data (with inline documentation):
.. code-block:: yaml
# Pattern that will replace whatever is masked
mask_with: '***masked*by*hubble***'
# Target and mask strings based on regex patterns
# Can limit search specific queries and columns
# Some osquery results are formed as lists of dicts. We can mask
# based on variable names within these dicts.
blacklisted_objects:
- query_names:
- 'running_procs'
- 'listening_procs' # List of name(s) of the osquery to be masked.
# Put '*' to match all queries. Note
# that query_names doesn't support
# full globbing. '*' is just given
# special treatment.
column: 'environment' # Column name in the osquery to be masked. No regex or glob support
custom_mask_column: 'environment' # Column name which stores environment variables
custom_mask_key: '__hubble_mask__' # Env variable to look for constructing custom blacklist of patterns.
attribute_to_check: 'variable_name' # Optional attribute
# In the inner dict, this is the key
# to check for blacklisted_patterns
# Will skipped if column specified is of type 'String'
attributes_to_mask: # Optional attribute, Values under these keys in the dict will be
- 'value' # masked, assuming one of the blacklisted_patterns
# is found under attribute_to_check in the same dict
# Will be skipped if column specified is of type 'String'
blacklisted_patterns: # Strings to look for under attribute_to_check. Conditional Globbing support.
- 'ETCDCTL_READ_PASSWORD'
- 'ETCDCTL_WRITE_PASSWORD'
- '*PASSWORD*' # Enable globbing by setting 'enable_globbing_in_nebula_masking' to True, default False
blacklisted_patterns (for blacklisted_objects)
For objects, the pattern applies to the variable name, and doesn't
support regex. For example, you might have data formed like this::
[{ value: 'SOME_PASSWORD', variable_name: 'ETCDCTL_READ_PASSWORD' }]
The attribute_to_check would be ``variable_name`` and the pattern would
be ``ETCDCTL_READ_PASSWORD``. The attribute_to_mask would be ``value``.
All dicts with ``variable_name`` in the list of blacklisted_patterns
would have the value under their ``value`` key masked.
"""
try:
mask = {}
if topfile is None:
# We will maintain backward compatibility by keeping two versions of top files and mask files for now
# Once all hubble servers are updated, we can remove old version of top file and mask file
# Similar to what we have for nebula and nebula_v2 for older versions and newer versions of profiles
topfile = "salt://hubblestack_nebula_v2/top_v2.mask"
mask_files = _get_top_data(topfile)
mask_files = [
"salt://hubblestack_nebula_v2/" + mask_file.replace(".", "/") + ".yaml"
for mask_file in mask_files
]
if not mask_files:
mask_files = []
for fh in mask_files:
if "salt://" in fh:
orig_fh = fh
fh = __salt__["cp.cache_file"](fh)
if fh is None:
log.error("Could not find file {0}.".format(orig_fh))
return None
if os.path.isfile(fh):
with open(fh, "r") as f:
f_data = yaml.safe_load(f)
if not isinstance(f_data, dict):
raise CommandExecutionError(
"File data is not formed as a dict {0}".format(f_data)
)
mask = _dict_update(
mask, f_data, recursive_update=True, merge_lists=True
)
log.debug("Masking data: {}".format(mask))
# Backwards compatibility with mask_by
mask_with = mask.get("mask_with", mask.get("mask_by", "******"))
log.info(
"Total number of results to check for masking: {0}".format(
len(object_to_be_masked)
)
)
globbing_enabled = __opts__.get("enable_globbing_in_nebula_masking")
for blacklisted_object in mask.get("blacklisted_objects", []):
query_names = blacklisted_object["query_names"]
column = blacklisted_object[
"column"
] # Can be converted to list as well in future if need be
custom_mask_column = blacklisted_object.get(
"custom_mask_column", ""
) # Name of column that stores environment variables
if "*" in query_names:
# This means wildcard is specified and each event should be masked, if applicable
for r in object_to_be_masked:
if "action" in r:
# This means data is generated by osquery daemon
_mask_event_data(
r,
None,
column,
blacklisted_object,
mask_with,
globbing_enabled,
)
else:
# This means data is generated by osquery interactive shell
for query_name, query_ret in r.iteritems():
for query_result in query_ret["data"]:
if (
custom_mask_column
and custom_mask_column in query_result
):
log.debug(
"Checking if custom mask patterns are set in environment"
)
mask_column = query_result[custom_mask_column]
if mask_column and isinstance(mask_column, list):
for column_field in mask_column:
try:
if (
"variable_name" in column_field
and "value" in column_field
and column_field["variable_name"]
== blacklisted_object[
"custom_mask_key"
]
):
log.debug(
"Constructing custom blacklisted patterns based on \
environment variable '{0}'".format(
blacklisted_object[
"custom_mask_key"
]
)
)
blacklisted_object[
"custom_blacklist"
] = [
p.strip()
for p in column_field[
"value"
].split(",")
if p.strip()
!= blacklisted_object[
"custom_mask_key"
]
]
else:
log.debug(
"Custom mask variable not set in environment. \
Custom mask key used: {0}".format(
blacklisted_object[
"custom_mask_key"
]
)
)
except Exception as e:
log.error(
"Failed to generate custom blacklisted patterns based on hubble mask key"
)
log.error("Got error: {0}".format(e))
if column not in query_result or (
isinstance(query_result[column], basestring)
and query_result[column].strip() != ""
):
# No error here, since we didn't reference a specific query
break
if isinstance(query_result[column], basestring):
# If column is of 'string' type, then replace pattern in-place
# No need for recursion here
value = query_result[column]
for pattern in blacklisted_object[
"blacklisted_patterns"
]:
value = re.sub(
pattern + "()",
r"\1" + mask_with + r"\3",
value,
)
query_result[column] = value
else:
_perform_masking(
query_result[column],
blacklisted_object,
mask_with,
globbing_enabled,
)
else:
# Perform masking on results of specific queries specified in 'query_names'
for query_name in query_names:
for r in object_to_be_masked:
if "action" in r:
# This means data is generated by osquery daemon
_mask_event_data(
r,
query_name,
column,
blacklisted_object,
mask_with,
globbing_enabled,
)
else:
# This means data is generated by osquery interactive shell
for query_result in r.get(query_name, {"data": []})["data"]:
if (
custom_mask_column
and custom_mask_column in query_result
):
log.debug(
"Checking if custom mask patterns are set in environment"
)
mask_column = query_result[custom_mask_column]
if mask_column and isinstance(mask_column, list):
for column_field in mask_column:
try:
if (
"variable_name" in column_field
and "value" in column_field
and column_field["variable_name"]
== blacklisted_object[
"custom_mask_key"
]
):
log.debug(
"Constructing custom blacklisted patterns based on \
environment variable '{0}'".format(
blacklisted_object[
"custom_mask_key"
]
)
)
blacklisted_object[
"custom_blacklist"
] = [
p.strip()
for p in column_field[
"value"
].split(",")
if p.strip()
!= blacklisted_object[
"custom_mask_key"
]
]
else:
log.debug(
"Custom mask variable not set in environment. \
Custom mask key used: {0}".format(
blacklisted_object[
"custom_mask_key"
]
)
)
except Exception as e:
log.error(
"Failed to generate custom blacklisted patterns based on hubble mask key"
)
log.error("Got error: {0}".format(e))
if column not in query_result or (
isinstance(query_result[column], basestring)
and query_result[column].strip() != ""
):
# if the column in not present in one data-object, it will
# not be present in others as well. Break in that case.
# This will happen only if mask.yaml is malformed
log.error(
"masking data references a missing column {0} in query {1}".format(
column, query_name
)
)
break
if isinstance(query_result[column], basestring):
# If column is of 'string' type, then replace pattern in-place
# No need for recursion here
value = query_result[column]
for pattern in blacklisted_object[
"blacklisted_patterns"
]:
value = re.sub(
pattern + "()",
r"\1" + mask_with + r"\3",
value,
)
query_result[column] = value
else:
_perform_masking(
query_result[column],
blacklisted_object,
mask_with,
globbing_enabled,
)
except Exception as e:
log.exception("An error occured while masking the passwords: {}".format(e))
# Object masked in place, so we don't need to return the object
return True
|
def _mask_object(object_to_be_masked, topfile):
"""
Given an object with potential secrets (or other data that should not be
returned), mask the contents of that object as configured in the mask
configuration file. The mask configuration file used is defined by the
top data in the ``topfile`` argument.
If multiple mask.yaml files are matched in the topfile, the data within
them will be recursively merged.
If no matching mask_files are found in the top.mask file, no masking will
happen.
Note that this function has side effects: alterations to
``object_to_be_masked`` will be made in place.
Sample mask.yaml data (with inline documentation):
.. code-block:: yaml
# Pattern that will replace whatever is masked
mask_with: '***masked*by*hubble***'
# Target and mask strings based on regex patterns
# Can limit search specific queries and columns
# Some osquery results are formed as lists of dicts. We can mask
# based on variable names within these dicts.
blacklisted_objects:
- query_names:
- 'running_procs'
- 'listening_procs' # List of name(s) of the osquery to be masked.
# Put '*' to match all queries. Note
# that query_names doesn't support
# full globbing. '*' is just given
# special treatment.
column: 'environment' # Column name in the osquery to be masked. No regex or glob support
attribute_to_check: 'variable_name' # Optional attribute
# In the inner dict, this is the key
# to check for blacklisted_patterns
# Will skipped if column specified is of type 'String'
attributes_to_mask: # Optional attribute, Values under these keys in the dict will be
- 'value' # masked, assuming one of the blacklisted_patterns
# is found under attribute_to_check in the same dict
# Will be skipped if column specified is of type 'String'
blacklisted_patterns: # Strings to look for under attribute_to_check. Conditional Globbing support.
- 'ETCDCTL_READ_PASSWORD'
- 'ETCDCTL_WRITE_PASSWORD'
- '*PASSWORD*' # Enable globbing by setting 'enable_globbing_in_nebula_masking' to True, default False
blacklisted_patterns (for blacklisted_objects)
For objects, the pattern applies to the variable name, and doesn't
support regex. For example, you might have data formed like this::
[{ value: 'SOME_PASSWORD', variable_name: 'ETCDCTL_READ_PASSWORD' }]
The attribute_to_check would be ``variable_name`` and the pattern would
be ``ETCDCTL_READ_PASSWORD``. The attribute_to_mask would be ``value``.
All dicts with ``variable_name`` in the list of blacklisted_patterns
would have the value under their ``value`` key masked.
"""
try:
mask = {}
if topfile is None:
# We will maintain backward compatibility by keeping two versions of top files and mask files for now
# Once all hubble servers are updated, we can remove old version of top file and mask file
# Similar to what we have for nebula and nebula_v2 for older versions and newer versions of profiles
topfile = "salt://hubblestack_nebula_v2/top_v2.mask"
mask_files = _get_top_data(topfile)
mask_files = [
"salt://hubblestack_nebula_v2/" + mask_file.replace(".", "/") + ".yaml"
for mask_file in mask_files
]
if not mask_files:
mask_files = []
for fh in mask_files:
if "salt://" in fh:
orig_fh = fh
fh = __salt__["cp.cache_file"](fh)
if fh is None:
log.error("Could not find file {0}.".format(orig_fh))
return None
if os.path.isfile(fh):
with open(fh, "r") as f:
f_data = yaml.safe_load(f)
if not isinstance(f_data, dict):
raise CommandExecutionError(
"File data is not formed as a dict {0}".format(f_data)
)
mask = _dict_update(
mask, f_data, recursive_update=True, merge_lists=True
)
log.debug("Masking data: {}".format(mask))
# Backwards compatibility with mask_by
mask_with = mask.get("mask_with", mask.get("mask_by", "******"))
log.info(
"Total number of results to check for masking: {0}".format(
len(object_to_be_masked)
)
)
globbing_enabled = __opts__.get("enable_globbing_in_nebula_masking")
for blacklisted_object in mask.get("blacklisted_objects", []):
query_names = blacklisted_object["query_names"]
column = blacklisted_object[
"column"
] # Can be converted to list as well in future if need be
if "*" in query_names:
# This means wildcard is specified and each event should be masked, if applicable
for r in object_to_be_masked:
if "action" in r:
# This means data is generated by osquery daemon
_mask_event_data(
r,
None,
column,
blacklisted_object,
mask_with,
globbing_enabled,
)
else:
# This means data is generated by osquery interactive shell
for query_name, query_ret in r.iteritems():
for query_result in query_ret["data"]:
if column not in query_result or (
isinstance(query_result[column], basestring)
and query_result[column].strip() != ""
):
# No error here, since we didn't reference a specific query
break
if isinstance(query_result[column], basestring):
# If column is of 'string' type, then replace pattern in-place
# No need for recursion here
value = query_result[column]
for pattern in blacklisted_object[
"blacklisted_patterns"
]:
value = re.sub(
pattern + "()",
r"\1" + mask_with + r"\3",
value,
)
query_result[column] = value
else:
_recursively_mask_objects(
query_result[column],
blacklisted_object,
mask_with,
globbing_enabled,
)
else:
# Perform masking on results of specific queries specified in 'query_names'
for query_name in query_names:
for r in object_to_be_masked:
if "action" in r:
# This means data is generated by osquery daemon
_mask_event_data(
r,
query_name,
column,
blacklisted_object,
mask_with,
globbing_enabled,
)
else:
# This means data is generated by osquery interactive shell
for query_result in r.get(query_name, {"data": []})["data"]:
if column not in query_result or (
isinstance(query_result[column], basestring)
and query_result[column].strip() != ""
):
# if the column in not present in one data-object, it will
# not be present in others as well. Break in that case.
# This will happen only if mask.yaml is malformed
log.error(
"masking data references a missing column {0} in query {1}".format(
column, query_name
)
)
break
if isinstance(query_result[column], basestring):
# If column is of 'string' type, then replace pattern in-place
# No need for recursion here
value = query_result[column]
for pattern in blacklisted_object[
"blacklisted_patterns"
]:
value = re.sub(
pattern + "()",
r"\1" + mask_with + r"\3",
value,
)
query_result[column] = value
else:
_recursively_mask_objects(
query_result[column],
blacklisted_object,
mask_with,
globbing_enabled,
)
except Exception as e:
log.exception("An error occured while masking the passwords: {}".format(e))
# Object masked in place, so we don't need to return the object
return True
|
https://github.com/hubblestack/hubble/issues/493
|
[ERROR ] name MACHINE\System\CurrentControlSet\Control\Lsa\pku2u\AllowOnlineID was not in __secdata__
[ERROR ] Exception occurred in nova module:
[ERROR ] Traceback (most recent call last):
File "C:\PROGRA~2\Hubble\hubblestack\extmods\modules\hubble.py", line 291, in _run_audit
ret = func(data_list, tags, labels, **kwargs)
File "C:\PROGRA~2\Hubble\hubblestack\files\hubblestack_nova\win_secedit.py", line 133, in audit
match_output,
UnboundLocalError: local variable 'match_output' referenced before assignment
{'Compliance': '15%',
'Errors': [{'\\win_secedit.py': {'data': "UnboundLocalError: local variable 'match_output' referenced before assignment",
'error': 'exception occurred'}}],
'Failure': [{'CIS-9.3.8': 'Ensure "Windows Firewall- Public- Logging- Size limit (KB)" is set to "16,384 KB or greater"'},
{'CIS-9.2.10': 'Ensure "Windows Firewall- Private- Logging- Log successful connections" is set to "Yes"'},
{'CIS-9.1.10': 'Ensure "Windows Firewall- Domain- Logging- Log successful connections" is set to "Yes"'},
|
UnboundLocalError
|
def _mask_event_data(
object_to_be_masked,
query_name,
column,
blacklisted_object,
mask_with,
globbing_enabled,
):
"""
This method is responsible for masking potential secrets in event data generated by
osquery daemon. This will handle logs format of both differential and snapshot types
Logs generated by 'osqueryi' would not reach here due checks in parent method
object_to_be_masked
data structure to mask recursively
query_name
Perform masking only if query name in 'object_to_be_masked' matches the 'query_name'
column
column in which masking is to be performed
blacklisted_object
the blacklisted_objects entry from the mask.yaml
mask_with
masked values are replaced with this string
globbing_enabled
enable globbing in specified blacklisted patterns of mask file
"""
if not query_name:
query_name = object_to_be_masked["name"]
custom_mask_column = blacklisted_object.get(
"custom_mask_column", ""
) # Name of column that stores environment variables
if (
object_to_be_masked["action"] == "snapshot"
and query_name == object_to_be_masked["name"]
):
# This means we have event data of type 'snapshot'
for snap_object in object_to_be_masked["snapshot"]:
if custom_mask_column and custom_mask_column in snap_object:
log.debug("Checking if custom mask patterns are set in environment")
mask_column = snap_object[custom_mask_column]
if mask_column and isinstance(mask_column, list):
for column_field in mask_column:
try:
if (
"variable_name" in column_field
and "value" in column_field
and column_field["variable_name"]
== blacklisted_object["custom_mask_key"]
):
log.debug(
"Constructing custom blacklisted patterns based on \
environment variable '{0}'".format(
blacklisted_object["custom_mask_key"]
)
)
blacklisted_object["custom_blacklist"] = [
p.strip()
for p in column_field["value"].split(",")
if p.strip()
!= blacklisted_object["custom_mask_key"]
]
else:
log.debug(
"Custom mask variable not set in environment. \
Custom mask key used: {0}".format(
blacklisted_object["custom_mask_key"]
)
)
except Exception as e:
log.error(
"Failed to generate custom blacklisted patterns based on hubble mask key"
)
log.error("Got error: {0}".format(e))
if column not in snap_object or (
isinstance(snap_object[column], basestring)
and snap_object[column].strip() != ""
):
log.error(
"masking data references a missing column {0} in query {1}".format(
column, query_name
)
)
break
if isinstance(snap_object[column], basestring):
value = snap_object[column]
for pattern in blacklisted_object["blacklisted_patterns"]:
value = re.sub(pattern + "()", r"\1" + mask_with + r"\3", value)
snap_object[column] = value
else:
_perform_masking(
snap_object[column], blacklisted_object, mask_with, globbing_enabled
)
elif query_name == object_to_be_masked["name"]:
q_result = object_to_be_masked["columns"]
if custom_mask_column and custom_mask_column in q_result:
log.debug("Checking if custom mask patterns are set in environment")
mask_column = q_result[custom_mask_column]
if mask_column and isinstance(mask_column, list):
for column_field in mask_column:
try:
if (
"variable_name" in column_field
and "value" in column_field
and column_field["variable_name"]
== blacklisted_object["custom_mask_key"]
):
log.debug(
"Constructing custom blacklisted patterns based on \
environment variable '{0}'".format(
blacklisted_object["custom_mask_key"]
)
)
blacklisted_object["custom_blacklist"] = [
p.strip()
for p in column_field["value"].split(",")
if p.strip() != blacklisted_object["custom_mask_key"]
]
else:
log.debug(
"Custom mask variable not set in environment. \
Custom mask key used: {0}".format(
blacklisted_object["custom_mask_key"]
)
)
except Exception as e:
log.error(
"Failed to generate custom blacklisted patterns based on hubble mask key"
)
log.error("Got error: {0}".format(e))
if column not in q_result or (
isinstance(q_result[column], basestring) and q_result[column].strip() != ""
):
log.error(
"masking data references a missing column {0} in query {1}".format(
column, query_name
)
)
if isinstance(q_result[column], basestring):
value = q_result[column]
for pattern in blacklisted_object["blacklisted_patterns"]:
value = re.sub(pattern + "()", r"\1" + mask_with + r"\3", value)
q_result[column] = value
else:
_perform_masking(
q_result[column], blacklisted_object, mask_with, globbing_enabled
)
else:
# Unable to match query_name
log.debug(
"Skipping masking, as event data is not for query: {0}".format(query_name)
)
|
def _mask_event_data(
object_to_be_masked,
query_name,
column,
blacklisted_object,
mask_with,
globbing_enabled,
):
"""
This method is responsible for masking potential secrets in event data generated by
osquery daemon. This will handle logs format of both differential and snapshot types
Logs generated by 'osqueryi' would not reach here due checks in parent method
object_to_be_masked
data structure to mask recursively
query_name
Perform masking only if query name in 'object_to_be_masked' matches the 'query_name'
column
column in which masking is to be performed
blacklisted_object
the blacklisted_objects entry from the mask.yaml
mask_with
masked values are replaced with this string
globbing_enabled
enable globbing in specified blacklisted patterns of mask file
"""
if not query_name:
query_name = object_to_be_masked["name"]
if (
object_to_be_masked["action"] == "snapshot"
and query_name == object_to_be_masked["name"]
):
# This means we have event data of type 'snapshot'
for snap_object in object_to_be_masked["snapshot"]:
if column not in snap_object or (
isinstance(snap_object[column], basestring)
and snap_object[column].strip() != ""
):
log.error(
"masking data references a missing column {0} in query {1}".format(
column, query_name
)
)
break
if isinstance(snap_object[column], basestring):
value = snap_object[column]
for pattern in blacklisted_object["blacklisted_patterns"]:
value = re.sub(pattern + "()", r"\1" + mask_with + r"\3", value)
snap_object[column] = value
else:
_recursively_mask_objects(
snap_object[column], blacklisted_object, mask_with, globbing_enabled
)
elif query_name == object_to_be_masked["name"]:
q_result = object_to_be_masked["columns"]
if column not in q_result or (
isinstance(q_result[column], basestring) and q_result[column].strip() != ""
):
log.error(
"masking data references a missing column {0} in query {1}".format(
column, query_name
)
)
if isinstance(q_result[column], basestring):
value = q_result[column]
for pattern in blacklisted_object["blacklisted_patterns"]:
value = re.sub(pattern + "()", r"\1" + mask_with + r"\3", value)
q_result[column] = value
else:
_recursively_mask_objects(
q_result[column], blacklisted_object, mask_with, globbing_enabled
)
else:
# Unable to match query_name
log.debug(
"Skipping masking, as event data is not for query: {0}".format(query_name)
)
|
https://github.com/hubblestack/hubble/issues/493
|
[ERROR ] name MACHINE\System\CurrentControlSet\Control\Lsa\pku2u\AllowOnlineID was not in __secdata__
[ERROR ] Exception occurred in nova module:
[ERROR ] Traceback (most recent call last):
File "C:\PROGRA~2\Hubble\hubblestack\extmods\modules\hubble.py", line 291, in _run_audit
ret = func(data_list, tags, labels, **kwargs)
File "C:\PROGRA~2\Hubble\hubblestack\files\hubblestack_nova\win_secedit.py", line 133, in audit
match_output,
UnboundLocalError: local variable 'match_output' referenced before assignment
{'Compliance': '15%',
'Errors': [{'\\win_secedit.py': {'data': "UnboundLocalError: local variable 'match_output' referenced before assignment",
'error': 'exception occurred'}}],
'Failure': [{'CIS-9.3.8': 'Ensure "Windows Firewall- Public- Logging- Size limit (KB)" is set to "16,384 KB or greater"'},
{'CIS-9.2.10': 'Ensure "Windows Firewall- Private- Logging- Log successful connections" is set to "Yes"'},
{'CIS-9.1.10': 'Ensure "Windows Firewall- Domain- Logging- Log successful connections" is set to "Yes"'},
|
UnboundLocalError
|
def _recursively_mask_objects(
object_to_mask,
blacklisted_object,
blacklisted_patterns,
mask_with,
globbing_enabled,
):
"""
This function is used by ``_mask_object()`` to mask passwords contained in
an osquery data structure (formed as a list of dicts, usually). Since the
lists can sometimes be nested, recurse through the lists.
object_to_mask
data structure to mask recursively
blacklisted_object
the blacklisted_objects entry from the mask.yaml
blacklisted_patterns
List of blacklisted patterns which will be used to identify if a field is to be masked
mask_with
masked values are replaced with this string
globbing_enabled
enable globbing in specified blacklisted patterns of mask file
"""
if isinstance(object_to_mask, list):
for child in object_to_mask:
log.debug("Recursing object {0}".format(child))
_recursively_mask_objects(
child,
blacklisted_object,
blacklisted_patterns,
mask_with,
globbing_enabled,
)
elif (
globbing_enabled and blacklisted_object["attribute_to_check"] in object_to_mask
):
mask = False
for blacklisted_pattern in blacklisted_patterns:
if fnmatch.fnmatch(
object_to_mask[blacklisted_object["attribute_to_check"]],
blacklisted_pattern,
):
mask = True
log.info(
"Attribute {0} will be masked.".format(
object_to_mask[blacklisted_object["attribute_to_check"]]
)
)
break
if mask:
for key in blacklisted_object["attributes_to_mask"]:
if key in object_to_mask:
object_to_mask[key] = mask_with
elif (
(not globbing_enabled)
and blacklisted_object["attribute_to_check"] in object_to_mask
and object_to_mask[blacklisted_object["attribute_to_check"]]
in blacklisted_patterns
):
for key in blacklisted_object["attributes_to_mask"]:
if key in object_to_mask:
object_to_mask[key] = mask_with
|
def _recursively_mask_objects(
object_to_mask, blacklisted_object, mask_with, globbing_enabled
):
"""
This function is used by ``_mask_object()`` to mask passwords contained in
an osquery data structure (formed as a list of dicts, usually). Since the
lists can sometimes be nested, recurse through the lists.
object_to_mask
data structure to mask recursively
blacklisted_object
the blacklisted_objects entry from the mask.yaml
mask_with
masked values are replaced with this string
globbing_enabled
enable globbing in specified blacklisted patterns of mask file
"""
if isinstance(object_to_mask, list):
for child in object_to_mask:
log.debug("Recursing object {0}".format(child))
_recursively_mask_objects(
child, blacklisted_object, mask_with, globbing_enabled
)
elif (
globbing_enabled and blacklisted_object["attribute_to_check"] in object_to_mask
):
mask = False
for blacklisted_pattern in blacklisted_object["blacklisted_patterns"]:
if fnmatch.fnmatch(
object_to_mask[blacklisted_object["attribute_to_check"]],
blacklisted_pattern,
):
mask = True
log.info(
"Attribute {0} will be masked.".format(
object_to_mask[blacklisted_object["attribute_to_check"]]
)
)
break
if mask:
for key in blacklisted_object["attributes_to_mask"]:
if key in object_to_mask:
object_to_mask[key] = mask_with
elif (
(not globbing_enabled)
and blacklisted_object["attribute_to_check"] in object_to_mask
and object_to_mask[blacklisted_object["attribute_to_check"]]
in blacklisted_object["blacklisted_patterns"]
):
for key in blacklisted_object["attributes_to_mask"]:
if key in object_to_mask:
object_to_mask[key] = mask_with
|
https://github.com/hubblestack/hubble/issues/493
|
[ERROR ] name MACHINE\System\CurrentControlSet\Control\Lsa\pku2u\AllowOnlineID was not in __secdata__
[ERROR ] Exception occurred in nova module:
[ERROR ] Traceback (most recent call last):
File "C:\PROGRA~2\Hubble\hubblestack\extmods\modules\hubble.py", line 291, in _run_audit
ret = func(data_list, tags, labels, **kwargs)
File "C:\PROGRA~2\Hubble\hubblestack\files\hubblestack_nova\win_secedit.py", line 133, in audit
match_output,
UnboundLocalError: local variable 'match_output' referenced before assignment
{'Compliance': '15%',
'Errors': [{'\\win_secedit.py': {'data': "UnboundLocalError: local variable 'match_output' referenced before assignment",
'error': 'exception occurred'}}],
'Failure': [{'CIS-9.3.8': 'Ensure "Windows Firewall- Public- Logging- Size limit (KB)" is set to "16,384 KB or greater"'},
{'CIS-9.2.10': 'Ensure "Windows Firewall- Private- Logging- Log successful connections" is set to "Yes"'},
{'CIS-9.1.10': 'Ensure "Windows Firewall- Domain- Logging- Log successful connections" is set to "Yes"'},
|
UnboundLocalError
|
def update():
"""
Update caches of the storage containers.
Compares the md5 of the files on disk to the md5 of the blobs in the
container, and only updates if necessary.
Also processes deletions by walking the container caches and comparing
with the list of blobs in the container
"""
for container in __opts__["azurefs"]:
path = _get_container_path(container)
try:
if not os.path.exists(path):
os.makedirs(path)
elif not os.path.isdir(path):
shutil.rmtree(path)
os.makedirs(path)
except Exception as exc:
log.exception("Error occurred creating cache directory for azurefs")
continue
blob_service = _get_container_service(container)
name = container["container_name"]
try:
blob_list = blob_service.list_blobs(name)
except Exception as exc:
log.exception("Error occurred fetching blob list for azurefs")
continue
# Walk the cache directory searching for deletions
blob_names = [blob.name for blob in blob_list]
blob_set = set(blob_names)
for root, dirs, files in os.walk(path):
for f in files:
fname = os.path.join(root, f)
relpath = os.path.relpath(fname, path)
if relpath not in blob_set:
salt.fileserver.wait_lock(fname + ".lk", fname)
try:
os.unlink(fname)
except Exception:
pass
if not dirs and not files:
shutil.rmtree(root)
for blob in blob_list:
fname = os.path.join(path, blob.name)
update = False
if os.path.exists(fname):
# File exists, check the hashes
source_md5 = blob.properties.content_settings.content_md5
local_md5 = base64.b64encode(
salt.utils.get_hash(fname, "md5").decode("hex")
)
if local_md5 != source_md5:
update = True
else:
update = True
if update:
if not os.path.exists(os.path.dirname(fname)):
os.makedirs(os.path.dirname(fname))
# Lock writes
lk_fn = fname + ".lk"
salt.fileserver.wait_lock(lk_fn, fname)
with salt.utils.fopen(lk_fn, "w+") as fp_:
fp_.write("")
try:
blob_service.get_blob_to_path(name, blob.name, fname)
except Exception as exc:
log.exception("Error occurred fetching blob from azurefs")
continue
# Unlock writes
try:
os.unlink(lk_fn)
except Exception:
pass
# Write out file list
container_list = path + ".list"
lk_fn = container_list + ".lk"
salt.fileserver.wait_lock(lk_fn, container_list)
with salt.utils.fopen(lk_fn, "w+") as fp_:
fp_.write("")
with salt.utils.fopen(container_list, "w") as fp_:
fp_.write(json.dumps(blob_names))
try:
os.unlink(lk_fn)
except Exception:
pass
try:
hash_cachedir = os.path.join(__opts__["cachedir"], "azurefs", "hashes")
if os.path.exists(hash_cachedir):
shutil.rmtree(hash_cachedir)
except Exception:
log.exception("Problem occurred trying to invalidate hash cach for azurefs")
|
def update():
"""
Update caches of the storage containers.
Compares the md5 of the files on disk to the md5 of the blobs in the
container, and only updates if necessary.
Also processes deletions by walking the container caches and comparing
with the list of blobs in the container
"""
for container in __opts__["azurefs"]:
path = _get_container_path(container)
try:
if not os.path.exists(path):
os.makedirs(path)
elif not os.path.isdir(path):
shutil.rmtree(path)
os.makedirs(path)
except Exception as exc:
log.exception("Error occurred creating cache directory for azurefs")
continue
blob_service = _get_container_service(container)
name = container["container_name"]
try:
blob_list = blob_service.list_blobs(name)
except Exception as exc:
log.exception("Error occurred fetching blob list for azurefs")
continue
# Walk the cache directory searching for deletions
blob_names = [blob.name for blob in blob_list]
blob_set = set(blob_names)
for root, dirs, files in os.walk(path):
for f in files:
fname = os.path.join(root, f)
relpath = os.path.relpath(fname, path)
if relpath not in blob_set:
salt.fileserver.wait_lock(fname + ".lk", fname)
try:
os.unlink(fname)
except Exception:
pass
if not dirs and not files:
shutil.rmtree(root)
for blob in blob_list:
fname = os.path.join(path, blob.name)
update = False
if os.path.exists(fname):
# File exists, check the hashes
source_md5 = blob.properties.content_settings.content_md5
local_md5 = base64.b64encode(
salt.utils.get_hash(fname, "md5").decode("hex")
)
if local_md5 != source_md5:
update = True
else:
update = True
if update:
if not os.path.exists(os.path.dirname(fname)):
os.makedirs(os.path.dirname(fname))
# Lock writes
lk_fn = fname + ".lk"
salt.fileserver.wait_lock(lk_fn, fname)
with salt.utils.fopen(lk_fn, "w+") as fp_:
fp_.write("")
try:
blob_service.get_blob_to_path(name, blob.name, fname)
except Exception as exc:
log.exception("Error occurred fetching blob from azurefs")
continue
# Unlock writes
try:
os.unlink(lk_fn)
except Exception:
pass
# Write out file list
container_list = path + ".list"
lk_fn = container_list + ".lk"
salt.fileserver.wait_lock(lk_fn, container_list)
with salt.utils.fopen(lk_fn, "w+") as fp_:
fp_.write("")
with salt.utils.fopen(container_list, "w") as fp_:
fp_.write(json.dumps(blob_names))
try:
os.unlink(lk_fn)
except Exception:
pass
try:
hash_cachedir = os.path.join(__opts__["cachedir"], "azurefs", "hashes")
shutil.rmtree(hash_cachedir)
except Exception:
log.exception("Problem occurred trying to invalidate hash cach for azurefs")
|
https://github.com/hubblestack/hubble/issues/248
|
[root@ip-10-249-71-213 ~]# hubble hubble.audit
[ERROR ] Problem occurred trying to invalidate hash cach for azurefs
Traceback (most recent call last):
File "/opt/hubble/hubble-libs/hubblestack/extmods/fileserver/azurefs.py", line 268, in update
shutil.rmtree(hash_cachedir)
File "shutil.py", line 239, in rmtree
File "shutil.py", line 237, in rmtree
OSError: [Errno 2] No such file or directory: '/var/cache/hubble/azurefs/hashes'
[ERROR ] Problem occurred trying to invalidate hash cach for azurefs
Traceback (most recent call last):
File "/opt/hubble/hubble-libs/hubblestack/extmods/fileserver/azurefs.py", line 268, in update
shutil.rmtree(hash_cachedir)
File "shutil.py", line 239, in rmtree
File "shutil.py", line 237, in rmtree
OSError: [Errno 2] No such file or directory: '/var/cache/hubble/azurefs/hashes'
[ERROR ] Problem occurred trying to invalidate hash cach for azurefs
Traceback (most recent call last):
File "/opt/hubble/hubble-libs/hubblestack/extmods/fileserver/azurefs.py", line 268, in update
shutil.rmtree(hash_cachedir)
File "shutil.py", line 239, in rmtree
File "shutil.py", line 237, in rmtree
OSError: [Errno 2] No such file or directory: '/var/cache/hubble/azurefs/hashes'
[ERROR ] Problem occurred trying to invalidate hash cach for azurefs
Traceback (most recent call last):
File "/opt/hubble/hubble-libs/hubblestack/extmods/fileserver/azurefs.py", line 268, in update
shutil.rmtree(hash_cachedir)
File "shutil.py", line 239, in rmtree
File "shutil.py", line 237, in rmtree
OSError: [Errno 2] No such file or directory: '/var/cache/hubble/azurefs/hashes'
[ERROR ] Problem occurred trying to invalidate hash cach for azurefs
Traceback (most recent call last):
File "/opt/hubble/hubble-libs/hubblestack/extmods/fileserver/azurefs.py", line 268, in update
shutil.rmtree(hash_cachedir)
File "shutil.py", line 239, in rmtree
File "shutil.py", line 237, in rmtree
OSError: [Errno 2] No such file or directory: '/var/cache/hubble/azurefs/hashes'
[ERROR ] Problem occurred trying to invalidate hash cach for azurefs
Traceback (most recent call last):
File "/opt/hubble/hubble-libs/hubblestack/extmods/fileserver/azurefs.py", line 268, in update
shutil.rmtree(hash_cachedir)
File "shutil.py", line 239, in rmtree
File "shutil.py", line 237, in rmtree
OSError: [Errno 2] No such file or directory: '/var/cache/hubble/azurefs/hashes'
{'Compliance': '97%',
'Controlled': [{'CIS-1.1.8': 'AMI does not have this partition, will fix in hubble'},
{'CIS-1.1.3': 'AMI does not have this partition, will fix in hubble'},
{'CIS-1.1.5': 'AMI does not have this partition, will fix in hubble'},
{'CIS-1.1.4': 'AMI does not have this partition, will fix in hubble'},
{'CIS-1.1.9': 'AMI does not have this partition, will fix in hubble'},
{'CIS-5.2.15': 'Disabling it as per sos ticket 1307'},
{'CIS-4.2.1.4': 'Needs investigation spartans/hubble#4'},
{'CIS-4.2.2.3': 'We do not use syslog-ng in Image factory'},
{'CIS-1.1.10': 'AMI does not have this partition, will fix in hubble'},
{'CIS-2.2.1.3': 'We do not use chrony in AMI factory, we use ntpd'},
{'CIS-1.3.2': 'We use Hubble'},
{'CIS-1.1.14': 'AMI does not have this partition, will fix in hubble'},
{'CIS-4.2.2.1': 'We do not use syslog-ng in Image factory'},
{'CIS-4.2.4': 'Logged bug as sos 1441'},
{'CIS-2.2.15': 'Bug in Hubble - spartans/hubble#1'},
{'CIS-3.3.1': 'Seems bug in Hubble - spartans/hubble#5'},
{'CIS-3.3.2': 'Seems bug in Hubble - spartans/hubble#5'}],
'Failure': [{'CIS-5.2.16': 'Ensure SSH warning banner is configured'},
{'CIS-5.2.1': 'Ensure permissions on /etc/ssh/sshd_config are configured'},
{'CIS-5.2.12': 'Ensure only approved MAC algorithms are used'},
{'CIS-6.2.6': 'Ensure root PATH Integrity'}],
'Success': [{'CIS-2.2.5': 'Ensure DHCP Server is not enabled'},
{'CIS-2.3.1': 'Ensure NIS Client is not installed'},
{'CIS-2.3.2': 'Ensure rsh client is not installed'},
{'CIS-2.3.3': 'Ensure talk client is not installed'},
{'CIS-2.3.4': 'Ensure telnet client is not installed'},
{'CIS-2.3.5': 'Ensure LDAP client is not installed'},
{'CIS-2.2.3': 'Ensure Avahi Server is not enabled'},
{'CIS-2.2.4': 'Ensure CUPS is not enabled'},
{'CIS-2.1.9': 'Ensure tftp server is not enabled'},
{'CIS-2.2.2': 'Ensure X Window System is not installed'},
{'CIS-2.2.9': 'Ensure FTP Server is not enabled'},
{'CIS-1.5.4': 'Ensure prelink is disabled'},
{'CIS-2.2.6': 'Ensure LDAP server is not enabled'},
{'CIS-2.1.6': 'Ensure rsh server is not enabled'},
{'CIS-2.1.7': 'Ensure talk server is not enabled'},
{'CIS-3.6.1': 'Ensure iptables is installed'},
{'CIS-2.1.8': 'Ensure telnet server is not enabled'},
{'CIS-2.2.16': 'Ensure NIS Server is not enabled'},
{'CIS-1.3.1': 'Ensure AIDE is installed'},
{'CIS-3.4.1': 'Ensure TCP Wrappers is installed'},
{'CIS-2.1.11': 'Ensure xinetd is not enabled'},
{'CIS-5.2.2': 'Ensure SSH Protocol is set to 2'},
{'CIS-5.3.4': 'Ensure password hashing algorithm is SHA-512'},
{'CIS-5.3.3': 'Ensure password reuse is limited'},
{'CIS-5.2.7': 'Ensure SSH HostbasedAuthentication is disabled'},
{'CIS-5.3.1': 'Ensure password creation requirements are configured'},
{'CIS-5.2.5': 'Ensure SSH MaxAuthTries is set to 4 or less'},
{'CIS-5.2.9': 'Ensure SSH PermitEmptyPasswords is disabled'},
{'CIS-1.4.2': 'Ensure authentication required for single user mode'},
{'CIS-1.4.3': 'Ensure interactive boot is not enabled'},
{'CIS-1.5.1': 'Ensure core dumps are restricted'},
{'CIS-5.2.6': 'Ensure SSH IgnoreRhosts is enabled'},
{'CIS-5.3.2': 'Ensure lockout for failed password attempts is configured'},
{'CIS-5.2.14': 'Ensure SSH LoginGraceTime is set to one minute or less'},
{'CIS-6.2.3': 'Ensure no legacy "+" entries exist in /etc/shadow'},
{'CIS-6.2.2': 'Ensure no legacy "+" entries exist in /etc/passwd'},
{'CIS-5.2.10': 'Ensure SSH PermitUserEnvironment is disabled'},
{'CIS-6.2.4': 'Ensure no legacy "+" entries exist in /etc/group'},
{'CIS-5.2.13': 'Ensure SSH Idle Timeout Interval is configured'},
{'CIS-5.2.8': 'Ensure SSH root login is disabled'},
{'CIS-3.4.2': 'Ensure /etc/hosts.allow is configured'},
{'CIS-3.4.3': 'Ensure /etc/hosts.deny is configured'},
{'CIS-1.7.1.1': 'Ensure message of the day is configured properly'},
{'CIS-5.4.1.1': 'Ensure password expiration is 90 days or less'},
{'CIS-4.2.1.3': 'Ensure rsyslog default file permissions configured'},
{'CIS-5.4.1.3': 'Ensure password expiration warning days is 7 or more'},
{'CIS-5.4.1.2': 'Ensure minimum days between password changes is 7 or more'},
{'CIS-1.1.1.4': 'Ensure mounting of hfs filesystems is disabled'},
{'CIS-1.1.1.8': 'Ensure mounting of FAT filesystems is disabled'},
{'CIS-1.1.1.6': 'Ensure mounting of squashfs filesystems is disabled'},
{'CIS-1.1.1.7': 'Ensure mounting of udf filesystems is disabled'},
{'CIS-1.1.1.1': 'Ensure mounting of cramfs filesystems is disabled'},
{'CIS-1.1.1.2': 'Ensure mounting of freevxfs filesystems is disabled'},
{'CIS-1.1.1.3': 'Ensure mounting of jffs2 filesystems is disabled'},
{'CIS-3.6.2': 'Ensure default deny firewall policy'},
{'CIS-3.6.3': 'Ensure loopback traffic is configured'},
{'CIS-1.2.3': 'Ensure gpgcheck is globally activated'},
{'CIS-1.1.1.5': 'Ensure mounting of hfsplus filesystems is disabled'},
{'CIS-5.2.4': 'Ensure SSH X11 forwarding is disabled'},
{'CIS-2.2.1.2': 'Ensure ntp is configured'},
{'CIS-5.4.1.4': 'Ensure inactive password lock is 30 days or less'},
{'CIS-5.5': 'Ensure access to the su command is restricted'},
{'CIS-5.4.4': 'Ensure default user umask is 027 or more restrictive'},
{'CIS-5.2.3': 'Ensure SSH LogLevel is set to INFO'},
{'CIS-1.1.15': 'Ensure nodev option set on /dev/shm partition'},
{'CIS-1.1.17': 'Ensure noexec option set on /dev/shm partition'},
{'CIS-1.1.16': 'Ensure nosuid option set on /dev/shm partition'},
{'CIS-1.1.19': 'Disable Automounting'},
{'CIS-2.2.7': 'Ensure NFS and RPC are not enabled'},
{'CIS-4.2.1.1': 'Ensure rsyslog Service is enabled'},
{'CIS-2.2.8': 'Ensure DNS Server is not enabled'},
{'CIS-2.1.2': 'Ensure daytime services are not enabled'},
{'CIS-2.1.3': 'Ensure discard services are not enabled'},
{'CIS-5.1.1': 'Ensure cron daemon is enabled'},
{'CIS-2.1.1': 'Ensure chargen services are not enabled'},
{'CIS-2.1.10': 'Ensure rsync service is not enabled'},
{'CIS-2.1.4': 'Ensure echo services are not enabled'},
{'CIS-2.1.5': 'Ensure time services are not enabled'},
{'CIS-2.2.14': 'Ensure SNMP Server is not enabled'},
{'CIS-2.2.11': 'Ensure IMAP and POP3 server is not enabled'},
{'CIS-2.2.10': 'Ensure HTTP server is not enabled'},
{'CIS-2.2.13': 'Ensure HTTP Proxy Server is not enabled'},
{'CIS-2.2.12': 'Ensure Samba is not enabled'},
{'CIS-6.1.9': 'Ensure permissions on /etc/gshadow- are configured'},
{'CIS-6.1.8': 'Ensure permissions on /etc/group- are configured'},
{'CIS-1.4.1': 'Ensure permissions on bootloader config are configured'},
{'CIS-3.4.4': 'Ensure permissions on /etc/hosts.allow are configured'},
{'CIS-5.1.2': 'Ensure permissions on /etc/crontab are configured'},
{'CIS-3.4.5': 'Ensure permissions on /etc/hosts.deny are configured'},
{'CIS-6.1.3': 'Ensure permissions on /etc/shadow are configured'},
{'CIS-5.1.5': 'Ensure permissions on /etc/cron.weekly are configured'},
{'CIS-5.1.4': 'Ensure permissions on /etc/cron.daily are configured'},
{'CIS-5.1.7': 'Ensure permissions on /etc/cron.d are configured'},
{'CIS-5.1.6': 'Ensure permissions on /etc/cron.monthly are configured'},
{'CIS-5.1.3': 'Ensure permissions on /etc/cron.hourly are configured'},
{'CIS-1.7.1.5': 'Ensure permissions on /etc/issue are configured'},
{'CIS-6.1.6': 'Ensure permissions on /etc/passwd- are configured'},
{'CIS-6.1.7': 'Ensure permissions on /etc/shadow- are configured'},
{'CIS-6.1.4': 'Ensure permissions on /etc/group are configured'},
{'CIS-6.1.5': 'Ensure permissions on /etc/gshadow are configured'},
{'CIS-6.1.2': 'Ensure permissions on /etc/passwd are configured'},
{'CIS-5.1.8': 'Ensure at/cron is restricted to authorized users'},
{'CIS-4.2.3': 'Ensure rsyslog or syslog-ng is installed'},
{'CIS-6.2.1': 'Ensure password fields are not empty'},
{'CIS-6.2.5': 'Ensure root is the only UID 0 account'},
{'CIS-5.2.11': 'Ensure only approved ciphers are used'},
{'CIS-6.2.9': 'Ensure users own their home directories'},
{'CIS-6.2.8': "Ensure users' home directories permissions are 750 or more restrictive"},
{'CIS-1.1.18': 'Ensure sticky bit is set on all world-writable directories'},
{'CIS-5.4.2': 'Ensure system accounts are non-login'},
{'CIS-5.4.3': 'Ensure default group for the root account is GID 0'},
{'CIS-6.1.10': 'Ensure no world writable files exist'},
{'CIS-6.1.11': 'Ensure no unowned files or directories exist'},
{'CIS-6.1.12': 'Ensure no ungrouped files or directories exist'},
{'CIS-3.2.4': 'Ensure suspicious packets are logged'},
{'CIS-3.2.5': 'Ensure broadcast ICMP requests are ignored'},
{'CIS-3.2.6': 'Ensure bogus ICMP responses are ignored'},
{'CIS-3.2.7': 'Ensure Reverse Path Filtering is enabled'},
{'CIS-3.2.1': 'Ensure source routed packets are not accepted'},
{'CIS-3.2.2': 'Ensure ICMP redirects are not accepted'},
{'CIS-3.2.3': 'Ensure secure ICMP redirects are not accepted'},
{'CIS-3.1.2': 'Ensure packet redirect sending is disabled'},
{'CIS-3.1.1': 'Ensure IP forwarding is disabled'},
{'CIS-3.2.8': 'Ensure TCP SYN Cookies is enabled'},
{'CIS-1.5.3': 'Ensure address space layout randomization (ASLR) is enabled'}]}
|
OSError
|
def queries(query_group, query_file=None, verbose=False, report_version_with_day=True):
"""
Run the set of queries represented by ``query_group`` from the
configuration in the file query_file
query_group
Group of queries to run
query_file
salt:// file which will be parsed for osquery queries
verbose
Defaults to False. If set to True, more information (such as the query
which was run) will be included in the result.
CLI Examples:
.. code_block:: bash
salt '*' nebula.queries day
salt '*' nebula.queries hour verbose=True
salt '*' nebula.queries hour pillar_key=sec_osqueries
"""
query_data = {}
MAX_FILE_SIZE = 104857600
if query_file is None:
if salt.utils.is_windows():
query_file = "salt://hubblestack_nebula/hubblestack_nebula_win_queries.yaml"
else:
query_file = "salt://hubblestack_nebula/hubblestack_nebula_queries.yaml"
if not isinstance(query_file, list):
query_file = [query_file]
for fh in query_file:
if "salt://" in fh:
orig_fh = fh
fh = __salt__["cp.cache_file"](fh)
if fh is None:
log.error("Could not find file {0}.".format(orig_fh))
return None
if os.path.isfile(fh):
with open(fh, "r") as f:
f_data = yaml.safe_load(f)
if not isinstance(f_data, dict):
raise CommandExecutionError(
"File data is not formed as a dict {0}".format(f_data)
)
query_data = _dict_update(
query_data, f_data, recursive_update=True, merge_lists=True
)
if "osquerybinpath" not in __grains__:
if query_group == "day":
log.warning("osquery not installed on this host. Returning baseline data")
# Match the formatting of normal osquery results. Not super
# readable, but just add new dictionaries to the list as we need
# more data
ret = []
ret.append(
{
"fallback_osfinger": {
"data": [
{
"osfinger": __grains__.get(
"osfinger", __grains__.get("osfullname")
),
"osrelease": __grains__.get(
"osrelease", __grains__.get("lsb_distrib_release")
),
}
],
"result": True,
}
}
)
if "pkg.list_pkgs" in __salt__:
ret.append(
{
"fallback_pkgs": {
"data": [
{"name": k, "version": v}
for k, v in __salt__["pkg.list_pkgs"]().iteritems()
],
"result": True,
}
}
)
uptime = __salt__["status.uptime"]()
if isinstance(uptime, dict):
uptime = uptime.get("seconds", __salt__["cmd.run"]("uptime"))
ret.append(
{"fallback_uptime": {"data": [{"uptime": uptime}], "result": True}}
)
if report_version_with_day:
ret.append(hubble_versions())
return ret
else:
log.debug("osquery not installed on this host. Skipping.")
return None
if salt.utils.is_windows():
win_version = __grains__["osfullname"]
if (
"2008" not in win_version
and "2012" not in win_version
and "2016" not in win_version
):
log.error(
"osquery does not run on windows versions earlier than Server 2008 and Windows 7"
)
if query_group == "day":
ret = []
ret.append(
{
"fallback_osfinger": {
"data": [
{
"osfinger": __grains__.get(
"osfinger", __grains__.get("osfullname")
),
"osrelease": __grains__.get(
"osrelease",
__grains__.get("lsb_distrib_release"),
),
}
],
"result": True,
}
}
)
ret.append(
{
"fallback_error": {
"data": "osqueryi is installed but not compatible with this version of windows",
"result": True,
}
}
)
return ret
else:
return None
query_data = query_data.get(query_group, [])
if not query_data:
return None
ret = []
for query in query_data:
name = query.get("query_name")
query_sql = query.get("query")
if not query_sql:
continue
# Run the osqueryi query
query_ret = {
"result": True,
}
cmd = [
__grains__["osquerybinpath"],
"--read_max",
MAX_FILE_SIZE,
"--json",
query_sql,
]
res = __salt__["cmd.run_all"](cmd)
if res["retcode"] == 0:
query_ret["data"] = json.loads(res["stdout"])
else:
query_ret["result"] = False
query_ret["error"] = res["stderr"]
if verbose:
tmp = copy.deepcopy(query)
tmp["query_result"] = query_ret
ret.append(tmp)
else:
ret.append({name: query_ret})
if query_group == "day" and report_version_with_day:
ret.append(hubble_versions())
for r in ret:
for query_name, query_ret in r.iteritems():
if "data" in query_ret:
for result in query_ret["data"]:
for key, value in result.iteritems():
if (
value
and isinstance(value, basestring)
and value.startswith("__JSONIFY__")
):
result[key] = json.loads(value[len("__JSONIFY__") :])
return ret
|
def queries(query_group, query_file=None, verbose=False, report_version_with_day=True):
"""
Run the set of queries represented by ``query_group`` from the
configuration in the file query_file
query_group
Group of queries to run
query_file
salt:// file which will be parsed for osquery queries
verbose
Defaults to False. If set to True, more information (such as the query
which was run) will be included in the result.
CLI Examples:
.. code_block:: bash
salt '*' nebula.queries day
salt '*' nebula.queries hour verbose=True
salt '*' nebula.queries hour pillar_key=sec_osqueries
"""
query_data = {}
MAX_FILE_SIZE = 104857600
if query_file is None:
if salt.utils.is_windows():
query_file = "salt://hubblestack_nebula/hubblestack_nebula_win_queries.yaml"
else:
query_file = "salt://hubblestack_nebula/hubblestack_nebula_queries.yaml"
if not isinstance(query_file, list):
query_file = [query_file]
for fh in query_file:
if "salt://" in fh:
orig_fh = fh
fh = __salt__["cp.cache_file"](fh)
if fh is None:
log.error("Could not find file {0}.".format(orig_fh))
return None
if os.path.isfile(fh):
with open(fh, "r") as f:
f_data = yaml.safe_load(f)
if not isinstance(f_data, dict):
raise CommandExecutionError(
"File data is not formed as a dict {0}".format(f_data)
)
query_data = _dict_update(
query_data, f_data, recursive_update=True, merge_lists=True
)
if "osquerybinpath" not in __grains__:
if query_group == "day":
log.warning("osquery not installed on this host. Returning baseline data")
# Match the formatting of normal osquery results. Not super
# readable, but just add new dictionaries to the list as we need
# more data
ret = []
ret.append(
{
"fallback_osfinger": {
"data": [
{
"osfinger": __grains__.get(
"osfinger", __grains__.get("osfullname")
),
"osrelease": __grains__.get(
"osrelease", __grains__.get("lsb_distrib_release")
),
}
],
"result": True,
}
}
)
if "pkg.list_pkgs" in __salt__:
ret.append(
{
"fallback_pkgs": {
"data": [
{"name": k, "version": v}
for k, v in __salt__["pkg.list_pkgs"]().iteritems()
],
"result": True,
}
}
)
uptime = __salt__["status.uptime"]()
if isinstance(uptime, dict):
uptime = uptime.get("seconds", __salt__["cmd.run"]("uptime"))
ret.append(
{"fallback_uptime": {"data": [{"uptime": uptime}], "result": True}}
)
if report_version_with_day:
ret.append(hubble_versions())
return ret
else:
log.debug("osquery not installed on this host. Skipping.")
return None
if salt.utils.is_windows():
win_version = __grains__["osfullname"]
if (
"2008" not in win_version
and "2012" not in win_version
and "2016" not in win_version
):
log.error(
"osquery does not run on windows versions earlier than Server 2008 and Windows 7"
)
if query_group == "day":
ret = []
ret.append(
{
"fallback_osfinger": {
"data": [
{
"osfinger": __grains__.get(
"osfinger", __grains__.get("osfullname")
),
"osrelease": __grains__.get(
"osrelease",
__grains__.get("lsb_distrib_release"),
),
}
],
"result": True,
}
}
)
ret.append(
{
"fallback_error": {
"data": "osqueryi is installed but not compatible with this version of windows",
"result": True,
}
}
)
return ret
else:
return None
query_data = query_data.get(query_group, [])
if not query_data:
return None
ret = []
for query in query_data:
name = query.get("query_name")
query_sql = query.get("query")
if not query_sql:
continue
# Run the osqueryi query
query_ret = {
"result": True,
}
cmd = [
__grains__["osquerybinpath"],
"--read_max",
MAX_FILE_SIZE,
"--json",
query_sql,
]
res = __salt__["cmd.run_all"](cmd)
if res["retcode"] == 0:
query_ret["data"] = json.loads(res["stdout"])
else:
query_ret["result"] = False
query_ret["error"] = res["stderr"]
if verbose:
tmp = copy.deepcopy(query)
tmp["query_result"] = query_ret
ret.append(tmp)
else:
ret.append({name: query_ret})
if query_group == "day" and report_version_with_day:
ret.append(hubble_versions())
for r in ret:
for query_name, query_ret in r.iteritems():
for result in query_ret["data"]:
for key, value in result.iteritems():
if (
value
and isinstance(value, basestring)
and value.startswith("__JSONIFY__")
):
result[key] = json.loads(value[len("__JSONIFY__") :])
return ret
|
https://github.com/hubblestack/hubble/issues/210
|
2017-09-26 20:15:08 [hubblestack.daemon][ERROR ] Error executing schedule
Traceback (most recent call last):
File "hubblestack/daemon.py", line 94, in main
File "hubblestack/daemon.py", line 209, in schedule
File "/opt/hubble/hubble-libs/hubblestack/extmods/modules/nebula_osquery.py", line 202, in queries
for result in query_ret['data']:
KeyError: 'data'
|
KeyError
|
def report_progress(self, s):
if s["status"] == "finished":
if self.params.get("noprogress", False):
self.to_screen("[download] Download completed")
else:
if s.get("total_bytes") is not None:
s["_total_bytes_str"] = format_bytes(s["total_bytes"])
msg_template = "100%% of %(_total_bytes_str)s"
else:
msg_template = "Completed"
if s.get("elapsed") is not None:
s["_elapsed_str"] = self.format_seconds(s["elapsed"])
msg_template += " in %(_elapsed_str)s"
self._report_progress_status(msg_template % s, is_last_line=True)
if self.params.get("noprogress"):
return
if s["status"] != "downloading":
return
if s.get("eta") is not None:
s["_eta_str"] = self.format_eta(s["eta"])
else:
s["_eta_str"] = "Unknown ETA"
if s.get("total_bytes") and s.get("downloaded_bytes") is not None:
s["_percent_str"] = self.format_percent(
100 * s["downloaded_bytes"] / s["total_bytes"]
)
elif s.get("total_bytes_estimate") and s.get("downloaded_bytes") is not None:
s["_percent_str"] = self.format_percent(
100 * s["downloaded_bytes"] / s["total_bytes_estimate"]
)
else:
if s.get("downloaded_bytes") == 0:
s["_percent_str"] = self.format_percent(0)
else:
s["_percent_str"] = "Unknown %"
if s.get("speed") is not None:
s["_speed_str"] = self.format_speed(s["speed"])
else:
s["_speed_str"] = "Unknown speed"
if s.get("total_bytes") is not None:
s["_total_bytes_str"] = format_bytes(s["total_bytes"])
msg_template = "%(_percent_str)s of %(_total_bytes_str)s at %(_speed_str)s ETA %(_eta_str)s"
elif s.get("total_bytes_estimate") is not None:
s["_total_bytes_estimate_str"] = format_bytes(s["total_bytes_estimate"])
msg_template = "%(_percent_str)s of ~%(_total_bytes_estimate_str)s at %(_speed_str)s ETA %(_eta_str)s"
else:
if s.get("downloaded_bytes") is not None:
s["_downloaded_bytes_str"] = format_bytes(s["downloaded_bytes"])
if s.get("elapsed"):
s["_elapsed_str"] = self.format_seconds(s["elapsed"])
msg_template = (
"%(_downloaded_bytes_str)s at %(_speed_str)s (%(_elapsed_str)s)"
)
else:
msg_template = "%(_downloaded_bytes_str)s at %(_speed_str)s"
else:
msg_template = "%(_percent_str)s % at %(_speed_str)s ETA %(_eta_str)s"
self._report_progress_status(msg_template % s)
|
def report_progress(self, s):
if s["status"] == "finished":
if self.params.get("noprogress", False):
self.to_screen("[download] Download completed")
else:
s["_total_bytes_str"] = format_bytes(s["total_bytes"])
if s.get("elapsed") is not None:
s["_elapsed_str"] = self.format_seconds(s["elapsed"])
msg_template = "100%% of %(_total_bytes_str)s in %(_elapsed_str)s"
else:
msg_template = "100%% of %(_total_bytes_str)s"
self._report_progress_status(msg_template % s, is_last_line=True)
if self.params.get("noprogress"):
return
if s["status"] != "downloading":
return
if s.get("eta") is not None:
s["_eta_str"] = self.format_eta(s["eta"])
else:
s["_eta_str"] = "Unknown ETA"
if s.get("total_bytes") and s.get("downloaded_bytes") is not None:
s["_percent_str"] = self.format_percent(
100 * s["downloaded_bytes"] / s["total_bytes"]
)
elif s.get("total_bytes_estimate") and s.get("downloaded_bytes") is not None:
s["_percent_str"] = self.format_percent(
100 * s["downloaded_bytes"] / s["total_bytes_estimate"]
)
else:
if s.get("downloaded_bytes") == 0:
s["_percent_str"] = self.format_percent(0)
else:
s["_percent_str"] = "Unknown %"
if s.get("speed") is not None:
s["_speed_str"] = self.format_speed(s["speed"])
else:
s["_speed_str"] = "Unknown speed"
if s.get("total_bytes") is not None:
s["_total_bytes_str"] = format_bytes(s["total_bytes"])
msg_template = "%(_percent_str)s of %(_total_bytes_str)s at %(_speed_str)s ETA %(_eta_str)s"
elif s.get("total_bytes_estimate") is not None:
s["_total_bytes_estimate_str"] = format_bytes(s["total_bytes_estimate"])
msg_template = "%(_percent_str)s of ~%(_total_bytes_estimate_str)s at %(_speed_str)s ETA %(_eta_str)s"
else:
if s.get("downloaded_bytes") is not None:
s["_downloaded_bytes_str"] = format_bytes(s["downloaded_bytes"])
if s.get("elapsed"):
s["_elapsed_str"] = self.format_seconds(s["elapsed"])
msg_template = (
"%(_downloaded_bytes_str)s at %(_speed_str)s (%(_elapsed_str)s)"
)
else:
msg_template = "%(_downloaded_bytes_str)s at %(_speed_str)s"
else:
msg_template = "%(_percent_str)s % at %(_speed_str)s ETA %(_eta_str)s"
self._report_progress_status(msg_template % s)
|
https://github.com/ytdl-org/youtube-dl/issues/10809
|
pb3:Downloads jhawk$ youtube-dl --get-filename 'http://www.cbs.com/shows/the-late-show-with-stephen-colbert/video/Kj139uP5fQfkmaoQ4tMW2BSq7vPfhmnJ/the-late-show-9-29-2016-morgan-freeman-judith-light-jimmy-eat-world-/'
The Late Show - 9_29_2016 (Morgan Freeman, Judith Light, Jimmy Eat World)-Kj139uP5fQfkmaoQ4tMW2BSq7vPfhmnJ.mp4
pb3:Downloads jhawk$ youtube-dl -v -o - 'http://www.cbs.com/shows/the-late-show-with-stephen-colbert/video/Kj139uP5fQfkmaoQ4tMW2BSq7vPfhmnJ/the-late-show-9-29-2016-morgan-freeman-judith-light-jimmy-eat-world-/' | tee 'The Late Show - 9_29_2016 (Morgan Freeman, Judith Light, Jimmy Eat World)-Kj139uP5fQfkmaoQ4tMW2BSq7vPfhmnJ.mp4' | /Applications/VLC.app/Contents/MacOS/VLC -
VLC media player 2.2.4 Weatherwax (revision 2.2.4-3-g2fc51dd)
[0000000100219ee8] core libvlc: Running vlc with the default interface. Use 'cvlc' to use vlc without interface.
[debug] System config: []
[debug] User config: []
[debug] Command-line args: [u'-v', u'-o', u'-', u'http://www.cbs.com/shows/the-late-show-with-stephen-colbert/video/Kj139uP5fQfkmaoQ4tMW2BSq7vPfhmnJ/the-late-show-9-29-2016-morgan-freeman-judith-light-jimmy-eat-world-/']
[debug] Encodings: locale UTF-8, fs utf-8, out None, pref UTF-8
[debug] youtube-dl version 2016.09.27
[debug] Python version 2.7.10 - Darwin-14.5.0-x86_64-i386-64bit
[debug] exe versions: ffmpeg 3.1.3, ffprobe 3.1.3, rtmpdump 2.4
[debug] Proxy map: {}
[CBS] Kj139uP5fQfkmaoQ4tMW2BSq7vPfhmnJ: Downloading XML
[CBS] Kj139uP5fQfkmaoQ4tMW2BSq7vPfhmnJ: Downloading StreamPack SMIL data
[CBS] Kj139uP5fQfkmaoQ4tMW2BSq7vPfhmnJ: Downloading m3u8 information
[CBS] Kj139uP5fQfkmaoQ4tMW2BSq7vPfhmnJ: Downloading RTMP SMIL data
[CBS] Kj139uP5fQfkmaoQ4tMW2BSq7vPfhmnJ: Downloading OnceURL SMIL data
[CBS] Kj139uP5fQfkmaoQ4tMW2BSq7vPfhmnJ: Checking video URL
[CBS] b7c06387-13d6-4f72-b6f6-b8719eeb8a9f: Downloading m3u8 information
[CBS] b7c06387-13d6-4f72-b6f6-b8719eeb8a9f: Checking http-1200 video format URL
[CBS] b7c06387-13d6-4f72-b6f6-b8719eeb8a9f: http-1200 video format URL is invalid, skipping
[CBS] b7c06387-13d6-4f72-b6f6-b8719eeb8a9f: Checking http-4400 video format URL
[CBS] b7c06387-13d6-4f72-b6f6-b8719eeb8a9f: http-4400 video format URL is invalid, skipping
[CBS] b7c06387-13d6-4f72-b6f6-b8719eeb8a9f: Checking http-2000 video format URL
[CBS] b7c06387-13d6-4f72-b6f6-b8719eeb8a9f: http-2000 video format URL is invalid, skipping
[CBS] b7c06387-13d6-4f72-b6f6-b8719eeb8a9f: Checking http-764 video format URL
[CBS] b7c06387-13d6-4f72-b6f6-b8719eeb8a9f: http-764 video format URL is invalid, skipping
[CBS] b7c06387-13d6-4f72-b6f6-b8719eeb8a9f: Checking http-512 video format URL
[CBS] b7c06387-13d6-4f72-b6f6-b8719eeb8a9f: http-512 video format URL is invalid, skipping
[CBS] b7c06387-13d6-4f72-b6f6-b8719eeb8a9f: Checking http-264 video format URL
[CBS] b7c06387-13d6-4f72-b6f6-b8719eeb8a9f: http-264 video format URL is invalid, skipping
[CBS] b7c06387-13d6-4f72-b6f6-b8719eeb8a9f: Checking http-60 video format URL
[CBS] b7c06387-13d6-4f72-b6f6-b8719eeb8a9f: http-60 video format URL is invalid, skipping
[CBS] Kj139uP5fQfkmaoQ4tMW2BSq7vPfhmnJ: Downloading JSON metadata
[debug] Invoking downloader on u'http://once-aws-us-east-1-lb.unicornmedia.com/now/media/playlist/bb0b18ba-64f5-4b1b-a29f-0ac252f06b68/77a785f3-5188-4806-b788-0893a61634ed/468fb310-a585-11e4-bfdb-005056837bc7/b7c06387-13d6-4f72-b6f6-b8719eeb8a9f/0/0/2483/content.m3u8?visitguid=f266b2cd-c23d-4e80-9d95-5974e1bd323c&segmentlength=10&adsegmentlength=0&protocolversion=3'
[download] Destination: -
[debug] ffmpeg command line: ffmpeg -y -headers 'Accept-Charset: ISO-8859-1,utf-8;q=0.7,*;q=0.7
Accept-Language: en-us,en;q=0.5
Accept-Encoding: gzip, deflate
Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8
User-Agent: Mozilla/5.0 (X11; Linux x86_64; rv:10.0) Gecko/20150101 Firefox/47.0 (Chrome)
' -i 'http://once-aws-us-east-1-lb.unicornmedia.com/now/media/playlist/bb0b18ba-64f5-4b1b-a29f-0ac252f06b68/77a785f3-5188-4806-b788-0893a61634ed/468fb310-a585-11e4-bfdb-005056837bc7/b7c06387-13d6-4f72-b6f6-b8719eeb8a9f/0/0/2483/content.m3u8?visitguid=f266b2cd-c23d-4e80-9d95-5974e1bd323c&segmentlength=10&adsegmentlength=0&protocolversion=3' -c copy -f mpegts -
ffmpeg version 3.1.3 Copyright (c) 2000-2016 the FFmpeg developers
built with Apple LLVM version 7.0.2 (clang-700.1.81)
configuration: --prefix=/usr/local/Cellar/ffmpeg/3.1.3 --enable-shared --enable-pthreads --enable-gpl --enable-version3 --enable-hardcoded-tables --enable-avresample --cc=clang --host-cflags= --host-ldflags= --enable-opencl --enable-libx264 --enable-libmp3lame --enable-libxvid --enable-openssl --disable-lzma --enable-nonfree --enable-vda
libavutil 55. 28.100 / 55. 28.100
libavcodec 57. 48.101 / 57. 48.101
libavformat 57. 41.100 / 57. 41.100
libavdevice 57. 0.101 / 57. 0.101
libavfilter 6. 47.100 / 6. 47.100
libavresample 3. 0. 0 / 3. 0. 0
libswscale 4. 1.100 / 4. 1.100
libswresample 2. 1.100 / 2. 1.100
libpostproc 54. 0.100 / 54. 0.100
Input #0, hls,applehttp, from 'http://once-aws-us-east-1-lb.unicornmedia.com/now/media/playlist/bb0b18ba-64f5-4b1b-a29f-0ac252f06b68/77a785f3-5188-4806-b788-0893a61634ed/468fb310-a585-11e4-bfdb-005056837bc7/b7c06387-13d6-4f72-b6f6-b8719eeb8a9f/0/0/2483/content.m3u8?visitguid=f266b2cd-c23d-4e80-9d95-5974e1bd323c&segmentlength=10&adsegmentlength=0&protocolversion=3':
Duration: 00:41:22.51, start: 1.466733, bitrate: 0 kb/s
Program 0
Metadata:
variant_bitrate : 0
Stream #0:0: Video: h264 (High) ([27][0][0][0] / 0x001B), yuv420p(tv, bt709), 1280x720 [SAR 1:1 DAR 16:9], 29.97 fps, 29.97 tbr, 90k tbn, 59.94 tbc
Stream #0:1: Audio: aac (LC) ([15][0][0][0] / 0x000F), 48000 Hz, stereo, fltp, 263 kb/s
[mpegts @ 0x7faf5c048200] Using AVStream.codec to pass codec parameters to muxers is deprecated, use AVStream.codecpar instead.
Last message repeated 1 times
Output #0, mpegts, to 'pipe:':
Metadata:
encoder : Lavf57.41.100
Stream #0:0: Video: h264 ([27][0][0][0] / 0x001B), yuv420p(tv, bt709), 1280x720 [SAR 1:1 DAR 16:9], q=2-31, 29.97 fps, 29.97 tbr, 90k tbn, 90k tbc
Stream #0:1: Audio: aac (LC) ([15][0][0][0] / 0x000F), 48000 Hz, stereo, 263 kb/s
Stream mapping:
Stream #0:0 -> #0:0 (copy)
Stream #0:1 -> #0:1 (copy)
Press [q] to stop, [?] for help
[00000001077d4598] ts demux: MPEG-4 descriptor not found for pid 0x101 type 0xf
[00000001008df0b8] packetizer_mpeg4audio packetizer: AAC channels: 2 samplerate: 48000
shader program 1: WARNING: Output of vertex shader 'TexCoord1' not read by fragment shader
WARNING: Output of vertex shader 'TexCoord2' not read by fragment shader
[00000001077c27c8] core input error: ES_OUT_SET_(GROUP_)PCR is called too late (pts_delay increased to 300 ms)
[00000001077c27c8] core input error: ES_OUT_RESET_PCR called
[h264 @ 0x101886800] Missing reference picture, default is 0
[h264 @ 0x101886800] Missing reference picture, default is 0
[00000001018b50b8] core decoder error: Could not convert timestamp 0
[00000001077c27c8] core input error: ES_OUT_SET_(GROUP_)PCR is called too late (pts_delay increased to 1159 ms)
[00000001077c27c8] core input error: ES_OUT_RESET_PCR called
[00000001018b50b8] core decoder error: Could not convert timestamp 0
[0000000102876eb8] clock decoder error: Timestamp conversion failed (delay 1159346, buffering 100000, bound 3000000)
[0000000102876eb8] core decoder error: Could not convert timestamp 2175695376599
[00000001077c27c8] core input error: ES_OUT_SET_(GROUP_)PCR is called too late (pts_delay increased to 1235 ms)
[00000001077c27c8] core input error: ES_OUT_RESET_PCR called
[00000001018b50b8] core decoder error: Could not convert timestamp 04718.2kbits/s speed=0.963x
[00000001077c27c8] core input error: ES_OUT_SET_(GROUP_)PCR is called too late (pts_delay increased to 1322 ms)
[00000001077c27c8] core input error: ES_OUT_RESET_PCR called
[00000001018b50b8] core decoder error: Could not convert timestamp 0
[00000001077c27c8] core input error: ES_OUT_SET_(GROUP_)PCR is called too late (pts_delay increased to 1347 ms)
[00000001077c27c8] core input error: ES_OUT_RESET_PCR called
[00000001018b50b8] core decoder error: Could not convert timestamp 0
[00000001077c27c8] core input error: ES_OUT_SET_(GROUP_)PCR is called too late (pts_delay increased to 1418 ms)
[00000001077c27c8] core input error: ES_OUT_RESET_PCR called
[00000001018b50b8] core decoder error: Could not convert timestamp 0
[00000001077c27c8] core input error: ES_OUT_SET_(GROUP_)PCR is called too late (pts_delay increased to 1504 ms)
[00000001077c27c8] core input error: ES_OUT_RESET_PCR called
[h264 @ 0x101886800] Missing reference picture, default is 0
[h264 @ 0x101886800] decode_slice_header error
[00000001018b50b8] core decoder error: Could not convert timestamp 0
[00000001077c27c8] core input error: ES_OUT_SET_(GROUP_)PCR is called too late (pts_delay increased to 1616 ms)
[00000001077c27c8] core input error: ES_OUT_RESET_PCR called
[00000001018b50b8] core decoder error: Could not convert timestamp 0
[00000001077c27c8] core input error: ES_OUT_SET_(GROUP_)PCR is called too late (pts_delay increased to 1667 ms)
[00000001077c27c8] core input error: ES_OUT_RESET_PCR called
[h264 @ 0x101886800] Missing reference picture, default is 0
[h264 @ 0x101886800] decode_slice_header error
[00000001018b50b8] core decoder error: Could not convert timestamp 0
[00000001077c27c8] core input error: ES_OUT_SET_(GROUP_)PCR is called too late (pts_delay increased to 1884 ms)
[00000001077c27c8] core input error: ES_OUT_RESET_PCR called
[h264 @ 0x101886800] Missing reference picture, default is 0
[h264 @ 0x101886800] decode_slice_header error
[00000001018b50b8] core decoder error: Could not convert timestamp 0
[00000001077c27c8] core input error: ES_OUT_SET_(GROUP_)PCR is called too late (pts_delay increased to 1950 ms)
[00000001077c27c8] core input error: ES_OUT_RESET_PCR called
[00000001018b50b8] core decoder error: Could not convert timestamp 0
[00000001077c27c8] core input error: ES_OUT_SET_(GROUP_)PCR is called too late (pts_delay increased to 2530 ms)
[00000001077c27c8] core input error: ES_OUT_RESET_PCR called
[00000001018b50b8] core decoder error: Could not convert timestamp 0
[0000000102876eb8] clock decoder error: Timestamp conversion failed (delay 2530271, buffering 100000, bound 3000000)
[0000000102876eb8] core decoder error: Could not convert timestamp 2177570154622
[0000000102876eb8] clock decoder error: Timestamp conversion failed (delay 2530271, buffering 100000, bound 3000000)
[0000000102876eb8] core decoder error: Could not convert timestamp 2177668052247
[00000001077c27c8] core input error: ES_OUT_SET_(GROUP_)PCR is called too late (pts_delay increased to 3252 ms)
[00000001077c27c8] core input error: ES_OUT_RESET_PCR called
[00000001018b50b8] core decoder error: Could not convert timestamp 0
[0000000102876eb8] clock decoder error: Timestamp conversion failed (delay 3252327, buffering 100000, bound 3000000)
[0000000102876eb8] core decoder error: Could not convert timestamp 2178285079164
frame=74401 fps= 29 q=-1.0 Lsize= 1428642kB time=00:41:22.53 bitrate=4714.3kbits/s speed=0.954x
video:1236706kB audio:78588kB subtitle:0kB other streams:0kB global headers:0kB muxing overhead: 8.617682%
ERROR: unable to download video
Traceback (most recent call last):
File "/usr/local/bin/youtube-dl/youtube_dl/YoutubeDL.py", line 1791, in download
url, force_generic_extractor=self.params.get('force_generic_extractor', False))
File "/usr/local/bin/youtube-dl/youtube_dl/YoutubeDL.py", line 705, in extract_info
return self.process_ie_result(ie_result, download, extra_info)
File "/usr/local/bin/youtube-dl/youtube_dl/YoutubeDL.py", line 751, in process_ie_result
return self.process_video_result(ie_result, download=download)
File "/usr/local/bin/youtube-dl/youtube_dl/YoutubeDL.py", line 1435, in process_video_result
self.process_info(new_info)
File "/usr/local/bin/youtube-dl/youtube_dl/YoutubeDL.py", line 1704, in process_info
raise UnavailableVideoError(err)
UnavailableVideoError: [Errno 2] No such file or directory: '-'
^Z
[1]+ Stopped youtube-dl -v -o - 'http://www.cbs.com/shows/the-late-show-with-stephen-colbert/video/Kj139uP5fQfkmaoQ4tMW2BSq7vPfhmnJ/the-late-show-9-29-2016-morgan-freeman-judith-light-jimmy-eat-world-/' | tee 'The Late Show - 9_29_2016 (Morgan Freeman, Judith Light, Jimmy Eat World)-Kj139uP5fQfkmaoQ4tMW2BSq7vPfhmnJ.mp4' | /Applications/VLC.app/Contents/MacOS/VLC -
pb3:Downloads jhawk$ jobs
[1]+ Stopped youtube-dl -v -o - 'http://www.cbs.com/shows/the-late-show-with-stephen-colbert/video/Kj139uP5fQfkmaoQ4tMW2BSq7vPfhmnJ/the-late-show-9-29-2016-morgan-freeman-judith-light-jimmy-eat-world-/' | tee 'The Late Show - 9_29_2016 (Morgan Freeman, Judith Light, Jimmy Eat World)-Kj139uP5fQfkmaoQ4tMW2BSq7vPfhmnJ.mp4' | /Applications/VLC.app/Contents/MacOS/VLC -
pb3:Downloads jhawk$ jobs -l
[1]+ 25017 Exit 1 youtube-dl -v -o - 'http://www.cbs.com/shows/the-late-show-with-stephen-colbert/video/Kj139uP5fQfkmaoQ4tMW2BSq7vPfhmnJ/the-late-show-9-29-2016-morgan-freeman-judith-light-jimmy-eat-world-/'
25018 Done | tee 'The Late Show - 9_29_2016 (Morgan Freeman, Judith Light, Jimmy Eat World)-Kj139uP5fQfkmaoQ4tMW2BSq7vPfhmnJ.mp4'
25019 Suspended: 18 | /Applications/VLC.app/Contents/MacOS/VLC -
pb3:Downloads jhawk$
|
UnavailableVideoError
|
def real_download(self, filename, info_dict):
self.report_destination(filename)
tmpfilename = self.temp_name(filename)
try:
retval = self._call_downloader(tmpfilename, info_dict)
except KeyboardInterrupt:
if not info_dict.get("is_live"):
raise
# Live stream downloading cancellation should be considered as
# correct and expected termination thus all postprocessing
# should take place
retval = 0
self.to_screen("[%s] Interrupted by user" % self.get_basename())
if retval == 0:
if filename == "-":
self._hook_progress(
{
"filename": filename,
"status": "finished",
}
)
else:
fsize = os.path.getsize(encodeFilename(tmpfilename))
self.to_screen("\r[%s] Downloaded %s bytes" % (self.get_basename(), fsize))
self.try_rename(tmpfilename, filename)
self._hook_progress(
{
"downloaded_bytes": fsize,
"total_bytes": fsize,
"filename": filename,
"status": "finished",
}
)
return True
else:
self.to_stderr("\n")
self.report_error("%s exited with code %d" % (self.get_basename(), retval))
return False
|
def real_download(self, filename, info_dict):
self.report_destination(filename)
tmpfilename = self.temp_name(filename)
try:
retval = self._call_downloader(tmpfilename, info_dict)
except KeyboardInterrupt:
if not info_dict.get("is_live"):
raise
# Live stream downloading cancellation should be considered as
# correct and expected termination thus all postprocessing
# should take place
retval = 0
self.to_screen("[%s] Interrupted by user" % self.get_basename())
if retval == 0:
fsize = os.path.getsize(encodeFilename(tmpfilename))
self.to_screen("\r[%s] Downloaded %s bytes" % (self.get_basename(), fsize))
self.try_rename(tmpfilename, filename)
self._hook_progress(
{
"downloaded_bytes": fsize,
"total_bytes": fsize,
"filename": filename,
"status": "finished",
}
)
return True
else:
self.to_stderr("\n")
self.report_error("%s exited with code %d" % (self.get_basename(), retval))
return False
|
https://github.com/ytdl-org/youtube-dl/issues/10809
|
pb3:Downloads jhawk$ youtube-dl --get-filename 'http://www.cbs.com/shows/the-late-show-with-stephen-colbert/video/Kj139uP5fQfkmaoQ4tMW2BSq7vPfhmnJ/the-late-show-9-29-2016-morgan-freeman-judith-light-jimmy-eat-world-/'
The Late Show - 9_29_2016 (Morgan Freeman, Judith Light, Jimmy Eat World)-Kj139uP5fQfkmaoQ4tMW2BSq7vPfhmnJ.mp4
pb3:Downloads jhawk$ youtube-dl -v -o - 'http://www.cbs.com/shows/the-late-show-with-stephen-colbert/video/Kj139uP5fQfkmaoQ4tMW2BSq7vPfhmnJ/the-late-show-9-29-2016-morgan-freeman-judith-light-jimmy-eat-world-/' | tee 'The Late Show - 9_29_2016 (Morgan Freeman, Judith Light, Jimmy Eat World)-Kj139uP5fQfkmaoQ4tMW2BSq7vPfhmnJ.mp4' | /Applications/VLC.app/Contents/MacOS/VLC -
VLC media player 2.2.4 Weatherwax (revision 2.2.4-3-g2fc51dd)
[0000000100219ee8] core libvlc: Running vlc with the default interface. Use 'cvlc' to use vlc without interface.
[debug] System config: []
[debug] User config: []
[debug] Command-line args: [u'-v', u'-o', u'-', u'http://www.cbs.com/shows/the-late-show-with-stephen-colbert/video/Kj139uP5fQfkmaoQ4tMW2BSq7vPfhmnJ/the-late-show-9-29-2016-morgan-freeman-judith-light-jimmy-eat-world-/']
[debug] Encodings: locale UTF-8, fs utf-8, out None, pref UTF-8
[debug] youtube-dl version 2016.09.27
[debug] Python version 2.7.10 - Darwin-14.5.0-x86_64-i386-64bit
[debug] exe versions: ffmpeg 3.1.3, ffprobe 3.1.3, rtmpdump 2.4
[debug] Proxy map: {}
[CBS] Kj139uP5fQfkmaoQ4tMW2BSq7vPfhmnJ: Downloading XML
[CBS] Kj139uP5fQfkmaoQ4tMW2BSq7vPfhmnJ: Downloading StreamPack SMIL data
[CBS] Kj139uP5fQfkmaoQ4tMW2BSq7vPfhmnJ: Downloading m3u8 information
[CBS] Kj139uP5fQfkmaoQ4tMW2BSq7vPfhmnJ: Downloading RTMP SMIL data
[CBS] Kj139uP5fQfkmaoQ4tMW2BSq7vPfhmnJ: Downloading OnceURL SMIL data
[CBS] Kj139uP5fQfkmaoQ4tMW2BSq7vPfhmnJ: Checking video URL
[CBS] b7c06387-13d6-4f72-b6f6-b8719eeb8a9f: Downloading m3u8 information
[CBS] b7c06387-13d6-4f72-b6f6-b8719eeb8a9f: Checking http-1200 video format URL
[CBS] b7c06387-13d6-4f72-b6f6-b8719eeb8a9f: http-1200 video format URL is invalid, skipping
[CBS] b7c06387-13d6-4f72-b6f6-b8719eeb8a9f: Checking http-4400 video format URL
[CBS] b7c06387-13d6-4f72-b6f6-b8719eeb8a9f: http-4400 video format URL is invalid, skipping
[CBS] b7c06387-13d6-4f72-b6f6-b8719eeb8a9f: Checking http-2000 video format URL
[CBS] b7c06387-13d6-4f72-b6f6-b8719eeb8a9f: http-2000 video format URL is invalid, skipping
[CBS] b7c06387-13d6-4f72-b6f6-b8719eeb8a9f: Checking http-764 video format URL
[CBS] b7c06387-13d6-4f72-b6f6-b8719eeb8a9f: http-764 video format URL is invalid, skipping
[CBS] b7c06387-13d6-4f72-b6f6-b8719eeb8a9f: Checking http-512 video format URL
[CBS] b7c06387-13d6-4f72-b6f6-b8719eeb8a9f: http-512 video format URL is invalid, skipping
[CBS] b7c06387-13d6-4f72-b6f6-b8719eeb8a9f: Checking http-264 video format URL
[CBS] b7c06387-13d6-4f72-b6f6-b8719eeb8a9f: http-264 video format URL is invalid, skipping
[CBS] b7c06387-13d6-4f72-b6f6-b8719eeb8a9f: Checking http-60 video format URL
[CBS] b7c06387-13d6-4f72-b6f6-b8719eeb8a9f: http-60 video format URL is invalid, skipping
[CBS] Kj139uP5fQfkmaoQ4tMW2BSq7vPfhmnJ: Downloading JSON metadata
[debug] Invoking downloader on u'http://once-aws-us-east-1-lb.unicornmedia.com/now/media/playlist/bb0b18ba-64f5-4b1b-a29f-0ac252f06b68/77a785f3-5188-4806-b788-0893a61634ed/468fb310-a585-11e4-bfdb-005056837bc7/b7c06387-13d6-4f72-b6f6-b8719eeb8a9f/0/0/2483/content.m3u8?visitguid=f266b2cd-c23d-4e80-9d95-5974e1bd323c&segmentlength=10&adsegmentlength=0&protocolversion=3'
[download] Destination: -
[debug] ffmpeg command line: ffmpeg -y -headers 'Accept-Charset: ISO-8859-1,utf-8;q=0.7,*;q=0.7
Accept-Language: en-us,en;q=0.5
Accept-Encoding: gzip, deflate
Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8
User-Agent: Mozilla/5.0 (X11; Linux x86_64; rv:10.0) Gecko/20150101 Firefox/47.0 (Chrome)
' -i 'http://once-aws-us-east-1-lb.unicornmedia.com/now/media/playlist/bb0b18ba-64f5-4b1b-a29f-0ac252f06b68/77a785f3-5188-4806-b788-0893a61634ed/468fb310-a585-11e4-bfdb-005056837bc7/b7c06387-13d6-4f72-b6f6-b8719eeb8a9f/0/0/2483/content.m3u8?visitguid=f266b2cd-c23d-4e80-9d95-5974e1bd323c&segmentlength=10&adsegmentlength=0&protocolversion=3' -c copy -f mpegts -
ffmpeg version 3.1.3 Copyright (c) 2000-2016 the FFmpeg developers
built with Apple LLVM version 7.0.2 (clang-700.1.81)
configuration: --prefix=/usr/local/Cellar/ffmpeg/3.1.3 --enable-shared --enable-pthreads --enable-gpl --enable-version3 --enable-hardcoded-tables --enable-avresample --cc=clang --host-cflags= --host-ldflags= --enable-opencl --enable-libx264 --enable-libmp3lame --enable-libxvid --enable-openssl --disable-lzma --enable-nonfree --enable-vda
libavutil 55. 28.100 / 55. 28.100
libavcodec 57. 48.101 / 57. 48.101
libavformat 57. 41.100 / 57. 41.100
libavdevice 57. 0.101 / 57. 0.101
libavfilter 6. 47.100 / 6. 47.100
libavresample 3. 0. 0 / 3. 0. 0
libswscale 4. 1.100 / 4. 1.100
libswresample 2. 1.100 / 2. 1.100
libpostproc 54. 0.100 / 54. 0.100
Input #0, hls,applehttp, from 'http://once-aws-us-east-1-lb.unicornmedia.com/now/media/playlist/bb0b18ba-64f5-4b1b-a29f-0ac252f06b68/77a785f3-5188-4806-b788-0893a61634ed/468fb310-a585-11e4-bfdb-005056837bc7/b7c06387-13d6-4f72-b6f6-b8719eeb8a9f/0/0/2483/content.m3u8?visitguid=f266b2cd-c23d-4e80-9d95-5974e1bd323c&segmentlength=10&adsegmentlength=0&protocolversion=3':
Duration: 00:41:22.51, start: 1.466733, bitrate: 0 kb/s
Program 0
Metadata:
variant_bitrate : 0
Stream #0:0: Video: h264 (High) ([27][0][0][0] / 0x001B), yuv420p(tv, bt709), 1280x720 [SAR 1:1 DAR 16:9], 29.97 fps, 29.97 tbr, 90k tbn, 59.94 tbc
Stream #0:1: Audio: aac (LC) ([15][0][0][0] / 0x000F), 48000 Hz, stereo, fltp, 263 kb/s
[mpegts @ 0x7faf5c048200] Using AVStream.codec to pass codec parameters to muxers is deprecated, use AVStream.codecpar instead.
Last message repeated 1 times
Output #0, mpegts, to 'pipe:':
Metadata:
encoder : Lavf57.41.100
Stream #0:0: Video: h264 ([27][0][0][0] / 0x001B), yuv420p(tv, bt709), 1280x720 [SAR 1:1 DAR 16:9], q=2-31, 29.97 fps, 29.97 tbr, 90k tbn, 90k tbc
Stream #0:1: Audio: aac (LC) ([15][0][0][0] / 0x000F), 48000 Hz, stereo, 263 kb/s
Stream mapping:
Stream #0:0 -> #0:0 (copy)
Stream #0:1 -> #0:1 (copy)
Press [q] to stop, [?] for help
[00000001077d4598] ts demux: MPEG-4 descriptor not found for pid 0x101 type 0xf
[00000001008df0b8] packetizer_mpeg4audio packetizer: AAC channels: 2 samplerate: 48000
shader program 1: WARNING: Output of vertex shader 'TexCoord1' not read by fragment shader
WARNING: Output of vertex shader 'TexCoord2' not read by fragment shader
[00000001077c27c8] core input error: ES_OUT_SET_(GROUP_)PCR is called too late (pts_delay increased to 300 ms)
[00000001077c27c8] core input error: ES_OUT_RESET_PCR called
[h264 @ 0x101886800] Missing reference picture, default is 0
[h264 @ 0x101886800] Missing reference picture, default is 0
[00000001018b50b8] core decoder error: Could not convert timestamp 0
[00000001077c27c8] core input error: ES_OUT_SET_(GROUP_)PCR is called too late (pts_delay increased to 1159 ms)
[00000001077c27c8] core input error: ES_OUT_RESET_PCR called
[00000001018b50b8] core decoder error: Could not convert timestamp 0
[0000000102876eb8] clock decoder error: Timestamp conversion failed (delay 1159346, buffering 100000, bound 3000000)
[0000000102876eb8] core decoder error: Could not convert timestamp 2175695376599
[00000001077c27c8] core input error: ES_OUT_SET_(GROUP_)PCR is called too late (pts_delay increased to 1235 ms)
[00000001077c27c8] core input error: ES_OUT_RESET_PCR called
[00000001018b50b8] core decoder error: Could not convert timestamp 04718.2kbits/s speed=0.963x
[00000001077c27c8] core input error: ES_OUT_SET_(GROUP_)PCR is called too late (pts_delay increased to 1322 ms)
[00000001077c27c8] core input error: ES_OUT_RESET_PCR called
[00000001018b50b8] core decoder error: Could not convert timestamp 0
[00000001077c27c8] core input error: ES_OUT_SET_(GROUP_)PCR is called too late (pts_delay increased to 1347 ms)
[00000001077c27c8] core input error: ES_OUT_RESET_PCR called
[00000001018b50b8] core decoder error: Could not convert timestamp 0
[00000001077c27c8] core input error: ES_OUT_SET_(GROUP_)PCR is called too late (pts_delay increased to 1418 ms)
[00000001077c27c8] core input error: ES_OUT_RESET_PCR called
[00000001018b50b8] core decoder error: Could not convert timestamp 0
[00000001077c27c8] core input error: ES_OUT_SET_(GROUP_)PCR is called too late (pts_delay increased to 1504 ms)
[00000001077c27c8] core input error: ES_OUT_RESET_PCR called
[h264 @ 0x101886800] Missing reference picture, default is 0
[h264 @ 0x101886800] decode_slice_header error
[00000001018b50b8] core decoder error: Could not convert timestamp 0
[00000001077c27c8] core input error: ES_OUT_SET_(GROUP_)PCR is called too late (pts_delay increased to 1616 ms)
[00000001077c27c8] core input error: ES_OUT_RESET_PCR called
[00000001018b50b8] core decoder error: Could not convert timestamp 0
[00000001077c27c8] core input error: ES_OUT_SET_(GROUP_)PCR is called too late (pts_delay increased to 1667 ms)
[00000001077c27c8] core input error: ES_OUT_RESET_PCR called
[h264 @ 0x101886800] Missing reference picture, default is 0
[h264 @ 0x101886800] decode_slice_header error
[00000001018b50b8] core decoder error: Could not convert timestamp 0
[00000001077c27c8] core input error: ES_OUT_SET_(GROUP_)PCR is called too late (pts_delay increased to 1884 ms)
[00000001077c27c8] core input error: ES_OUT_RESET_PCR called
[h264 @ 0x101886800] Missing reference picture, default is 0
[h264 @ 0x101886800] decode_slice_header error
[00000001018b50b8] core decoder error: Could not convert timestamp 0
[00000001077c27c8] core input error: ES_OUT_SET_(GROUP_)PCR is called too late (pts_delay increased to 1950 ms)
[00000001077c27c8] core input error: ES_OUT_RESET_PCR called
[00000001018b50b8] core decoder error: Could not convert timestamp 0
[00000001077c27c8] core input error: ES_OUT_SET_(GROUP_)PCR is called too late (pts_delay increased to 2530 ms)
[00000001077c27c8] core input error: ES_OUT_RESET_PCR called
[00000001018b50b8] core decoder error: Could not convert timestamp 0
[0000000102876eb8] clock decoder error: Timestamp conversion failed (delay 2530271, buffering 100000, bound 3000000)
[0000000102876eb8] core decoder error: Could not convert timestamp 2177570154622
[0000000102876eb8] clock decoder error: Timestamp conversion failed (delay 2530271, buffering 100000, bound 3000000)
[0000000102876eb8] core decoder error: Could not convert timestamp 2177668052247
[00000001077c27c8] core input error: ES_OUT_SET_(GROUP_)PCR is called too late (pts_delay increased to 3252 ms)
[00000001077c27c8] core input error: ES_OUT_RESET_PCR called
[00000001018b50b8] core decoder error: Could not convert timestamp 0
[0000000102876eb8] clock decoder error: Timestamp conversion failed (delay 3252327, buffering 100000, bound 3000000)
[0000000102876eb8] core decoder error: Could not convert timestamp 2178285079164
frame=74401 fps= 29 q=-1.0 Lsize= 1428642kB time=00:41:22.53 bitrate=4714.3kbits/s speed=0.954x
video:1236706kB audio:78588kB subtitle:0kB other streams:0kB global headers:0kB muxing overhead: 8.617682%
ERROR: unable to download video
Traceback (most recent call last):
File "/usr/local/bin/youtube-dl/youtube_dl/YoutubeDL.py", line 1791, in download
url, force_generic_extractor=self.params.get('force_generic_extractor', False))
File "/usr/local/bin/youtube-dl/youtube_dl/YoutubeDL.py", line 705, in extract_info
return self.process_ie_result(ie_result, download, extra_info)
File "/usr/local/bin/youtube-dl/youtube_dl/YoutubeDL.py", line 751, in process_ie_result
return self.process_video_result(ie_result, download=download)
File "/usr/local/bin/youtube-dl/youtube_dl/YoutubeDL.py", line 1435, in process_video_result
self.process_info(new_info)
File "/usr/local/bin/youtube-dl/youtube_dl/YoutubeDL.py", line 1704, in process_info
raise UnavailableVideoError(err)
UnavailableVideoError: [Errno 2] No such file or directory: '-'
^Z
[1]+ Stopped youtube-dl -v -o - 'http://www.cbs.com/shows/the-late-show-with-stephen-colbert/video/Kj139uP5fQfkmaoQ4tMW2BSq7vPfhmnJ/the-late-show-9-29-2016-morgan-freeman-judith-light-jimmy-eat-world-/' | tee 'The Late Show - 9_29_2016 (Morgan Freeman, Judith Light, Jimmy Eat World)-Kj139uP5fQfkmaoQ4tMW2BSq7vPfhmnJ.mp4' | /Applications/VLC.app/Contents/MacOS/VLC -
pb3:Downloads jhawk$ jobs
[1]+ Stopped youtube-dl -v -o - 'http://www.cbs.com/shows/the-late-show-with-stephen-colbert/video/Kj139uP5fQfkmaoQ4tMW2BSq7vPfhmnJ/the-late-show-9-29-2016-morgan-freeman-judith-light-jimmy-eat-world-/' | tee 'The Late Show - 9_29_2016 (Morgan Freeman, Judith Light, Jimmy Eat World)-Kj139uP5fQfkmaoQ4tMW2BSq7vPfhmnJ.mp4' | /Applications/VLC.app/Contents/MacOS/VLC -
pb3:Downloads jhawk$ jobs -l
[1]+ 25017 Exit 1 youtube-dl -v -o - 'http://www.cbs.com/shows/the-late-show-with-stephen-colbert/video/Kj139uP5fQfkmaoQ4tMW2BSq7vPfhmnJ/the-late-show-9-29-2016-morgan-freeman-judith-light-jimmy-eat-world-/'
25018 Done | tee 'The Late Show - 9_29_2016 (Morgan Freeman, Judith Light, Jimmy Eat World)-Kj139uP5fQfkmaoQ4tMW2BSq7vPfhmnJ.mp4'
25019 Suspended: 18 | /Applications/VLC.app/Contents/MacOS/VLC -
pb3:Downloads jhawk$
|
UnavailableVideoError
|
def js_to_json(code):
COMMENT_RE = r"/\*(?:(?!\*/).)*?\*/|//[^\n]*"
SKIP_RE = r"\s*(?:{comment})?\s*".format(comment=COMMENT_RE)
INTEGER_TABLE = (
(r"(?s)^(0[xX][0-9a-fA-F]+){skip}:?$".format(skip=SKIP_RE), 16),
(r"(?s)^(0+[0-7]+){skip}:?$".format(skip=SKIP_RE), 8),
)
def fix_kv(m):
v = m.group(0)
if v in ("true", "false", "null"):
return v
elif v.startswith("/*") or v.startswith("//") or v == ",":
return ""
if v[0] in ("'", '"'):
v = re.sub(
r'(?s)\\.|"',
lambda m: {
'"': '\\"',
"\\'": "'",
"\\\n": "",
"\\x": "\\u00",
}.get(m.group(0), m.group(0)),
v[1:-1],
)
for regex, base in INTEGER_TABLE:
im = re.match(regex, v)
if im:
i = int(im.group(1), base)
return '"%d":' % i if v.endswith(":") else "%d" % i
return '"%s"' % v
return re.sub(
r"""(?sx)
"(?:[^"\\]*(?:\\\\|\\['"nurtbfx/\n]))*[^"\\]*"|
'(?:[^'\\]*(?:\\\\|\\['"nurtbfx/\n]))*[^'\\]*'|
{comment}|,(?={skip}[\]}}])|
(?:(?<![0-9])[eE]|[a-df-zA-DF-Z_])[.a-zA-Z_0-9]*|
\b(?:0[xX][0-9a-fA-F]+|0+[0-7]+)(?:{skip}:)?|
[0-9]+(?={skip}:)
""".format(comment=COMMENT_RE, skip=SKIP_RE),
fix_kv,
code,
)
|
def js_to_json(code):
COMMENT_RE = r"/\*(?:(?!\*/).)*?\*/|//[^\n]*"
SKIP_RE = r"\s*(?:{comment})?\s*".format(comment=COMMENT_RE)
INTEGER_TABLE = (
(r"(?s)^(0[xX][0-9a-fA-F]+){skip}:?$".format(skip=SKIP_RE), 16),
(r"(?s)^(0+[0-7]+){skip}:?$".format(skip=SKIP_RE), 8),
)
def fix_kv(m):
v = m.group(0)
if v in ("true", "false", "null"):
return v
elif v.startswith("/*") or v.startswith("//") or v == ",":
return ""
if v[0] in ("'", '"'):
v = re.sub(
r'(?s)\\.|"',
lambda m: {
'"': '\\"',
"\\'": "'",
"\\\n": "",
"\\x": "\\u00",
}.get(m.group(0), m.group(0)),
v[1:-1],
)
for regex, base in INTEGER_TABLE:
im = re.match(regex, v)
if im:
i = int(im.group(1), base)
return '"%d":' % i if v.endswith(":") else "%d" % i
return '"%s"' % v
return re.sub(
r"""(?sx)
"(?:[^"\\]*(?:\\\\|\\['"nurtbfx/\n]))*[^"\\]*"|
'(?:[^'\\]*(?:\\\\|\\['"nurtbfx/\n]))*[^'\\]*'|
{comment}|,(?={skip}[\]}}])|
[a-zA-Z_][.a-zA-Z_0-9]*|
\b(?:0[xX][0-9a-fA-F]+|0+[0-7]+)(?:{skip}:)?|
[0-9]+(?={skip}:)
""".format(comment=COMMENT_RE, skip=SKIP_RE),
fix_kv,
code,
)
|
https://github.com/ytdl-org/youtube-dl/issues/14789
|
$ python -m youtube_dl --verbose https://clips.twitch.tv/CarelessZealousKangarooNerfBlueBlaster
[debug] System config: []
[debug] User config: []
[debug] Custom config: []
[debug] Command-line args: ['--verbose', 'https://clips.twitch.tv/CarelessZealousKangarooNerfBlueBlaster']
[debug] Encodings: locale UTF-8, fs utf-8, out UTF-8, pref UTF-8
[debug] youtube-dl version 2017.11.15
[debug] Git HEAD: f610dbb05
[debug] Python version 3.6.3 - Linux-4.12.13-1-ARCH-x86_64-with-arch
[debug] exe versions: ffmpeg 3.4, ffprobe 3.4, rtmpdump 2.4
[debug] Proxy map: {}
[twitch:clips] CarelessZealousKangarooNerfBlueBlaster: Downloading webpage
ERROR: CarelessZealousKangarooNerfBlueBlaster: Failed to parse JSON (caused by JSONDecodeError("Expecting ',' delimiter: line 38 column 78 (char 2178)",)); please report this issue on https://yt-dl.org/bug . Make sure you are using the latest version; see https://yt-dl.org/update on how to update. Be sure to call youtube-dl with the --verbose flag and include its complete output.
Traceback (most recent call last):
File "/tmp/youtube-dl/youtube_dl/extractor/common.py", line 686, in _parse_json
return json.loads(json_string)
File "/usr/lib/python3.6/json/__init__.py", line 354, in loads
return _default_decoder.decode(s)
File "/usr/lib/python3.6/json/decoder.py", line 339, in decode
obj, end = self.raw_decode(s, idx=_w(s, 0).end())
File "/usr/lib/python3.6/json/decoder.py", line 355, in raw_decode
obj, end = self.scan_once(s, idx)
json.decoder.JSONDecodeError: Expecting ',' delimiter: line 38 column 78 (char 2178)
Traceback (most recent call last):
File "/tmp/youtube-dl/youtube_dl/extractor/common.py", line 686, in _parse_json
return json.loads(json_string)
File "/usr/lib/python3.6/json/__init__.py", line 354, in loads
return _default_decoder.decode(s)
File "/usr/lib/python3.6/json/decoder.py", line 339, in decode
obj, end = self.raw_decode(s, idx=_w(s, 0).end())
File "/usr/lib/python3.6/json/decoder.py", line 355, in raw_decode
obj, end = self.scan_once(s, idx)
json.decoder.JSONDecodeError: Expecting ',' delimiter: line 38 column 78 (char 2178)
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/tmp/youtube-dl/youtube_dl/YoutubeDL.py", line 784, in extract_info
ie_result = ie.extract(url)
File "/tmp/youtube-dl/youtube_dl/extractor/common.py", line 437, in extract
ie_result = self._real_extract(url)
File "/tmp/youtube-dl/youtube_dl/extractor/twitch.py", line 610, in _real_extract
video_id, transform_source=js_to_json)
File "/tmp/youtube-dl/youtube_dl/extractor/common.py", line 690, in _parse_json
raise ExtractorError(errmsg, cause=ve)
youtube_dl.utils.ExtractorError: CarelessZealousKangarooNerfBlueBlaster: Failed to parse JSON (caused by JSONDecodeError("Expecting ',' delimiter: line 38 column 78 (char 2178)",)); please report this issue on https://yt-dl.org/bug . Make sure you are using the latest version; see https://yt-dl.org/update on how to update. Be sure to call youtube-dl with the --verbose flag and include its complete output.
|
json.decoder.JSONDecodeError
|
def _real_extract(self, url):
video_id = self._match_id(url)
url_pattern = "https://openload.co/%%s/%s/" % video_id
headers = {
"User-Agent": self._USER_AGENT,
}
for path in ("embed", "f"):
page_url = url_pattern % path
last = path == "f"
webpage = self._download_webpage(
page_url,
video_id,
"Downloading %s webpage" % path,
headers=headers,
fatal=last,
)
if not webpage:
continue
if "File not found" in webpage or "deleted by the owner" in webpage:
if not last:
continue
raise ExtractorError("File not found", expected=True, video_id=video_id)
break
phantom = PhantomJSwrapper(self, required_version="2.0")
webpage, _ = phantom.get(page_url, html=webpage, video_id=video_id, headers=headers)
decoded_id = get_element_by_id("streamurl", webpage)
video_url = "https://openload.co/stream/%s?mime=true" % decoded_id
title = (
self._og_search_title(webpage, default=None)
or self._search_regex(
r'<span[^>]+class=["\']title["\'][^>]*>([^<]+)',
webpage,
"title",
default=None,
)
or self._html_search_meta("description", webpage, "title", fatal=True)
)
entries = self._parse_html5_media_entries(page_url, webpage, video_id)
entry = entries[0] if entries else {}
subtitles = entry.get("subtitles")
info_dict = {
"id": video_id,
"title": title,
"thumbnail": entry.get("thumbnail")
or self._og_search_thumbnail(webpage, default=None),
"url": video_url,
# Seems all videos have extensions in their titles
"ext": determine_ext(title, "mp4"),
"subtitles": subtitles,
"http_headers": headers,
}
return info_dict
|
def _real_extract(self, url):
video_id = self._match_id(url)
url = "https://openload.co/embed/%s/" % video_id
headers = {
"User-Agent": self._USER_AGENT,
}
webpage = self._download_webpage(url, video_id, headers=headers)
if "File not found" in webpage or "deleted by the owner" in webpage:
raise ExtractorError("File not found", expected=True, video_id=video_id)
phantom = PhantomJSwrapper(self, required_version="2.0")
webpage, _ = phantom.get(url, html=webpage, video_id=video_id, headers=headers)
decoded_id = get_element_by_id("streamurl", webpage)
video_url = "https://openload.co/stream/%s?mime=true" % decoded_id
title = (
self._og_search_title(webpage, default=None)
or self._search_regex(
r'<span[^>]+class=["\']title["\'][^>]*>([^<]+)',
webpage,
"title",
default=None,
)
or self._html_search_meta("description", webpage, "title", fatal=True)
)
entries = self._parse_html5_media_entries(url, webpage, video_id)
entry = entries[0] if entries else {}
subtitles = entry.get("subtitles")
info_dict = {
"id": video_id,
"title": title,
"thumbnail": entry.get("thumbnail")
or self._og_search_thumbnail(webpage, default=None),
"url": video_url,
# Seems all videos have extensions in their titles
"ext": determine_ext(title, "mp4"),
"subtitles": subtitles,
"http_headers": headers,
}
return info_dict
|
https://github.com/ytdl-org/youtube-dl/issues/14665
|
[debug] System config: []
[debug] User config: []
[debug] Custom config: []
[debug] Command-line args: [u'https://openload.co/f/e-Ixz9ZR5L0/', u'-v']
[debug] Encodings: locale UTF-8, fs utf-8, out UTF-8, pref UTF-8
[debug] youtube-dl version 2017.10.29
[debug] Python version 2.7.10
[debug] exe versions: ffmpeg 3.4, ffprobe 3.4, phantomjs 2.1.1
[debug] Proxy map: {}
[Openload] e-Ixz9ZR5L0: Downloading webpage
ERROR: e-Ixz9ZR5L0: File not found
Traceback (most recent call last):
File "youtube_dl/YoutubeDL.py", line 784, in extract_info
ie_result = ie.extract(url)
File "outube_dl/extractor/common.py", line 434, in extract
ie_result = self._real_extract(url)
File "youtube_dl/extractor/openload.py", line 309, in _real_extract
raise ExtractorError('File not found', expected=True, video_id=video_id)
ExtractorError: e-Ixz9ZR5L0: File not found
|
ExtractorError
|
def process_ie_result(self, ie_result, download=True, extra_info={}):
"""
Take the result of the ie(may be modified) and resolve all unresolved
references (URLs, playlist items).
It will also download the videos if 'download'.
Returns the resolved ie_result.
"""
result_type = ie_result.get("_type", "video")
if result_type in ("url", "url_transparent"):
ie_result["url"] = sanitize_url(ie_result["url"])
extract_flat = self.params.get("extract_flat", False)
if (
extract_flat == "in_playlist" and "playlist" in extra_info
) or extract_flat is True:
if self.params.get("forcejson", False):
self.to_stdout(json.dumps(ie_result))
return ie_result
if result_type == "video":
self.add_extra_info(ie_result, extra_info)
return self.process_video_result(ie_result, download=download)
elif result_type == "url":
# We have to add extra_info to the results because it may be
# contained in a playlist
return self.extract_info(
ie_result["url"],
download,
ie_key=ie_result.get("ie_key"),
extra_info=extra_info,
)
elif result_type == "url_transparent":
# Use the information from the embedding page
info = self.extract_info(
ie_result["url"],
ie_key=ie_result.get("ie_key"),
extra_info=extra_info,
download=False,
process=False,
)
# extract_info may return None when ignoreerrors is enabled and
# extraction failed with an error, don't crash and return early
# in this case
if not info:
return info
force_properties = dict((k, v) for k, v in ie_result.items() if v is not None)
for f in ("_type", "url", "id", "extractor", "extractor_key", "ie_key"):
if f in force_properties:
del force_properties[f]
new_result = info.copy()
new_result.update(force_properties)
# Extracted info may not be a video result (i.e.
# info.get('_type', 'video') != video) but rather an url or
# url_transparent. In such cases outer metadata (from ie_result)
# should be propagated to inner one (info). For this to happen
# _type of info should be overridden with url_transparent. This
# fixes issue from https://github.com/rg3/youtube-dl/pull/11163.
if new_result.get("_type") == "url":
new_result["_type"] = "url_transparent"
return self.process_ie_result(
new_result, download=download, extra_info=extra_info
)
elif result_type in ("playlist", "multi_video"):
# We process each entry in the playlist
playlist = ie_result.get("title") or ie_result.get("id")
self.to_screen("[download] Downloading playlist: %s" % playlist)
playlist_results = []
playliststart = self.params.get("playliststart", 1) - 1
playlistend = self.params.get("playlistend")
# For backwards compatibility, interpret -1 as whole list
if playlistend == -1:
playlistend = None
playlistitems_str = self.params.get("playlist_items")
playlistitems = None
if playlistitems_str is not None:
def iter_playlistitems(format):
for string_segment in format.split(","):
if "-" in string_segment:
start, end = string_segment.split("-")
for item in range(int(start), int(end) + 1):
yield int(item)
else:
yield int(string_segment)
playlistitems = iter_playlistitems(playlistitems_str)
ie_entries = ie_result["entries"]
def make_playlistitems_entries(list_ie_entries):
num_entries = len(list_ie_entries)
return [
list_ie_entries[i - 1]
for i in playlistitems
if -num_entries <= i - 1 < num_entries
]
def report_download(num_entries):
self.to_screen(
"[%s] playlist %s: Downloading %d videos"
% (ie_result["extractor"], playlist, num_entries)
)
if isinstance(ie_entries, list):
n_all_entries = len(ie_entries)
if playlistitems:
entries = make_playlistitems_entries(ie_entries)
else:
entries = ie_entries[playliststart:playlistend]
n_entries = len(entries)
self.to_screen(
"[%s] playlist %s: Collected %d video ids (downloading %d of them)"
% (ie_result["extractor"], playlist, n_all_entries, n_entries)
)
elif isinstance(ie_entries, PagedList):
if playlistitems:
entries = []
for item in playlistitems:
entries.extend(ie_entries.getslice(item - 1, item))
else:
entries = ie_entries.getslice(playliststart, playlistend)
n_entries = len(entries)
report_download(n_entries)
else: # iterable
if playlistitems:
entries = make_playlistitems_entries(list(ie_entries))
else:
entries = list(itertools.islice(ie_entries, playliststart, playlistend))
n_entries = len(entries)
report_download(n_entries)
if self.params.get("playlistreverse", False):
entries = entries[::-1]
if self.params.get("playlistrandom", False):
random.shuffle(entries)
x_forwarded_for = ie_result.get("__x_forwarded_for_ip")
for i, entry in enumerate(entries, 1):
self.to_screen("[download] Downloading video %s of %s" % (i, n_entries))
# This __x_forwarded_for_ip thing is a bit ugly but requires
# minimal changes
if x_forwarded_for:
entry["__x_forwarded_for_ip"] = x_forwarded_for
extra = {
"n_entries": n_entries,
"playlist": playlist,
"playlist_id": ie_result.get("id"),
"playlist_title": ie_result.get("title"),
"playlist_index": i + playliststart,
"extractor": ie_result["extractor"],
"webpage_url": ie_result["webpage_url"],
"webpage_url_basename": url_basename(ie_result["webpage_url"]),
"extractor_key": ie_result["extractor_key"],
}
reason = self._match_entry(entry, incomplete=True)
if reason is not None:
self.to_screen("[download] " + reason)
continue
entry_result = self.process_ie_result(
entry, download=download, extra_info=extra
)
playlist_results.append(entry_result)
ie_result["entries"] = playlist_results
self.to_screen("[download] Finished downloading playlist: %s" % playlist)
return ie_result
elif result_type == "compat_list":
self.report_warning(
"Extractor %s returned a compat_list result. "
"It needs to be updated." % ie_result.get("extractor")
)
def _fixup(r):
self.add_extra_info(
r,
{
"extractor": ie_result["extractor"],
"webpage_url": ie_result["webpage_url"],
"webpage_url_basename": url_basename(ie_result["webpage_url"]),
"extractor_key": ie_result["extractor_key"],
},
)
return r
ie_result["entries"] = [
self.process_ie_result(_fixup(r), download, extra_info)
for r in ie_result["entries"]
]
return ie_result
else:
raise Exception("Invalid result type: %s" % result_type)
|
def process_ie_result(self, ie_result, download=True, extra_info={}):
"""
Take the result of the ie(may be modified) and resolve all unresolved
references (URLs, playlist items).
It will also download the videos if 'download'.
Returns the resolved ie_result.
"""
result_type = ie_result.get("_type", "video")
if result_type in ("url", "url_transparent"):
ie_result["url"] = sanitize_url(ie_result["url"])
extract_flat = self.params.get("extract_flat", False)
if (
extract_flat == "in_playlist" and "playlist" in extra_info
) or extract_flat is True:
if self.params.get("forcejson", False):
self.to_stdout(json.dumps(ie_result))
return ie_result
if result_type == "video":
self.add_extra_info(ie_result, extra_info)
return self.process_video_result(ie_result, download=download)
elif result_type == "url":
# We have to add extra_info to the results because it may be
# contained in a playlist
return self.extract_info(
ie_result["url"],
download,
ie_key=ie_result.get("ie_key"),
extra_info=extra_info,
)
elif result_type == "url_transparent":
# Use the information from the embedding page
info = self.extract_info(
ie_result["url"],
ie_key=ie_result.get("ie_key"),
extra_info=extra_info,
download=False,
process=False,
)
# extract_info may return None when ignoreerrors is enabled and
# extraction failed with an error, don't crash and return early
# in this case
if not info:
return info
force_properties = dict((k, v) for k, v in ie_result.items() if v is not None)
for f in ("_type", "url", "id", "extractor", "extractor_key", "ie_key"):
if f in force_properties:
del force_properties[f]
new_result = info.copy()
new_result.update(force_properties)
# Extracted info may not be a video result (i.e.
# info.get('_type', 'video') != video) but rather an url or
# url_transparent. In such cases outer metadata (from ie_result)
# should be propagated to inner one (info). For this to happen
# _type of info should be overridden with url_transparent. This
# fixes issue from https://github.com/rg3/youtube-dl/pull/11163.
if new_result.get("_type") == "url":
new_result["_type"] = "url_transparent"
return self.process_ie_result(
new_result, download=download, extra_info=extra_info
)
elif result_type in ("playlist", "multi_video"):
# We process each entry in the playlist
playlist = ie_result.get("title") or ie_result.get("id")
self.to_screen("[download] Downloading playlist: %s" % playlist)
playlist_results = []
playliststart = self.params.get("playliststart", 1) - 1
playlistend = self.params.get("playlistend")
# For backwards compatibility, interpret -1 as whole list
if playlistend == -1:
playlistend = None
playlistitems_str = self.params.get("playlist_items")
playlistitems = None
if playlistitems_str is not None:
def iter_playlistitems(format):
for string_segment in format.split(","):
if "-" in string_segment:
start, end = string_segment.split("-")
for item in range(int(start), int(end) + 1):
yield int(item)
else:
yield int(string_segment)
playlistitems = iter_playlistitems(playlistitems_str)
ie_entries = ie_result["entries"]
if isinstance(ie_entries, list):
n_all_entries = len(ie_entries)
if playlistitems:
entries = [
ie_entries[i - 1]
for i in playlistitems
if -n_all_entries <= i - 1 < n_all_entries
]
else:
entries = ie_entries[playliststart:playlistend]
n_entries = len(entries)
self.to_screen(
"[%s] playlist %s: Collected %d video ids (downloading %d of them)"
% (ie_result["extractor"], playlist, n_all_entries, n_entries)
)
elif isinstance(ie_entries, PagedList):
if playlistitems:
entries = []
for item in playlistitems:
entries.extend(ie_entries.getslice(item - 1, item))
else:
entries = ie_entries.getslice(playliststart, playlistend)
n_entries = len(entries)
self.to_screen(
"[%s] playlist %s: Downloading %d videos"
% (ie_result["extractor"], playlist, n_entries)
)
else: # iterable
if playlistitems:
entry_list = list(ie_entries)
entries = [entry_list[i - 1] for i in playlistitems]
else:
entries = list(itertools.islice(ie_entries, playliststart, playlistend))
n_entries = len(entries)
self.to_screen(
"[%s] playlist %s: Downloading %d videos"
% (ie_result["extractor"], playlist, n_entries)
)
if self.params.get("playlistreverse", False):
entries = entries[::-1]
if self.params.get("playlistrandom", False):
random.shuffle(entries)
x_forwarded_for = ie_result.get("__x_forwarded_for_ip")
for i, entry in enumerate(entries, 1):
self.to_screen("[download] Downloading video %s of %s" % (i, n_entries))
# This __x_forwarded_for_ip thing is a bit ugly but requires
# minimal changes
if x_forwarded_for:
entry["__x_forwarded_for_ip"] = x_forwarded_for
extra = {
"n_entries": n_entries,
"playlist": playlist,
"playlist_id": ie_result.get("id"),
"playlist_title": ie_result.get("title"),
"playlist_index": i + playliststart,
"extractor": ie_result["extractor"],
"webpage_url": ie_result["webpage_url"],
"webpage_url_basename": url_basename(ie_result["webpage_url"]),
"extractor_key": ie_result["extractor_key"],
}
reason = self._match_entry(entry, incomplete=True)
if reason is not None:
self.to_screen("[download] " + reason)
continue
entry_result = self.process_ie_result(
entry, download=download, extra_info=extra
)
playlist_results.append(entry_result)
ie_result["entries"] = playlist_results
self.to_screen("[download] Finished downloading playlist: %s" % playlist)
return ie_result
elif result_type == "compat_list":
self.report_warning(
"Extractor %s returned a compat_list result. "
"It needs to be updated." % ie_result.get("extractor")
)
def _fixup(r):
self.add_extra_info(
r,
{
"extractor": ie_result["extractor"],
"webpage_url": ie_result["webpage_url"],
"webpage_url_basename": url_basename(ie_result["webpage_url"]),
"extractor_key": ie_result["extractor_key"],
},
)
return r
ie_result["entries"] = [
self.process_ie_result(_fixup(r), download, extra_info)
for r in ie_result["entries"]
]
return ie_result
else:
raise Exception("Invalid result type: %s" % result_type)
|
https://github.com/ytdl-org/youtube-dl/issues/14425
|
[youtube:playlist] Downloading playlist PLbIzmw8qvgXRPK9ozTWhHfyzFvG8fN-Qz - add --no-playlist to just download video qEkj2c1HCJs
[youtube:playlist] PLbIzmw8qvgXRPK9ozTWhHfyzFvG8fN-Qz: Downloading webpage
[download] Downloading playlist: Amansız Övücüler
Traceback (most recent call last):
File "<stdin>", line 11, in <module>
File "/home/ubuntu/.virtualenvs/putio/lib/python3.5/site-packages/youtube_dl/YoutubeDL.py", line 1974, in download
url, force_generic_extractor=self.params.get('force_generic_extractor', False))
File "/home/ubuntu/.virtualenvs/putio/lib/python3.5/site-packages/youtube_dl/YoutubeDL.py", line 794, in extract_info
return self.process_ie_result(ie_result, download, extra_info)
File "/home/ubuntu/.virtualenvs/putio/lib/python3.5/site-packages/youtube_dl/YoutubeDL.py", line 943, in process_ie_result
entries = [entry_list[i - 1] for i in playlistitems]
File "/home/ubuntu/.virtualenvs/putio/lib/python3.5/site-packages/youtube_dl/YoutubeDL.py", line 943, in <listcomp>
entries = [entry_list[i - 1] for i in playlistitems]
IndexError: list index out of range
list index out of range
|
IndexError
|
def _real_extract(self, url):
video_id = self._match_id(url)
self._set_cookie("cda.pl", "cda.player", "html5")
webpage = self._download_webpage(self._BASE_URL + "/video/" + video_id, video_id)
if "Ten film jest dostępny dla użytkowników premium" in webpage:
raise ExtractorError(
"This video is only available for premium users.", expected=True
)
need_confirm_age = False
if self._html_search_regex(
r'(<form[^>]+action="/a/validatebirth")',
webpage,
"birthday validate form",
default=None,
):
webpage = self._download_age_confirm_page(url, video_id, note="Confirming age")
need_confirm_age = True
formats = []
uploader = self._search_regex(
r"""(?x)
<(span|meta)[^>]+itemprop=(["\'])author\2[^>]*>
(?:<\1[^>]*>[^<]*</\1>|(?!</\1>)(?:.|\n))*?
<(span|meta)[^>]+itemprop=(["\'])name\4[^>]*>(?P<uploader>[^<]+)</\3>
""",
webpage,
"uploader",
default=None,
group="uploader",
)
view_count = self._search_regex(
r"Odsłony:(?:\s| )*([0-9]+)", webpage, "view_count", default=None
)
average_rating = self._search_regex(
r'<(?:span|meta)[^>]+itemprop=(["\'])ratingValue\1[^>]*>(?P<rating_value>[0-9.]+)',
webpage,
"rating",
fatal=False,
group="rating_value",
)
info_dict = {
"id": video_id,
"title": self._og_search_title(webpage),
"description": self._og_search_description(webpage),
"uploader": uploader,
"view_count": int_or_none(view_count),
"average_rating": float_or_none(average_rating),
"thumbnail": self._og_search_thumbnail(webpage),
"formats": formats,
"duration": None,
"age_limit": 18 if need_confirm_age else 0,
}
def extract_format(page, version):
json_str = self._html_search_regex(
r'player_data=(\\?["\'])(?P<player_data>.+?)\1',
page,
"%s player_json" % version,
fatal=False,
group="player_data",
)
if not json_str:
return
player_data = self._parse_json(
json_str, "%s player_data" % version, fatal=False
)
if not player_data:
return
video = player_data.get("video")
if not video or "file" not in video:
self.report_warning("Unable to extract %s version information" % version)
return
if video["file"].startswith("uggc"):
video["file"] = codecs.decode(video["file"], "rot_13")
if video["file"].endswith("adc.mp4"):
video["file"] = video["file"].replace("adc.mp4", ".mp4")
f = {
"url": video["file"],
}
m = re.search(
r'<a[^>]+data-quality="(?P<format_id>[^"]+)"[^>]+href="[^"]+"[^>]+class="[^"]*quality-btn-active[^"]*">(?P<height>[0-9]+)p',
page,
)
if m:
f.update(
{
"format_id": m.group("format_id"),
"height": int(m.group("height")),
}
)
info_dict["formats"].append(f)
if not info_dict["duration"]:
info_dict["duration"] = parse_duration(video.get("duration"))
extract_format(webpage, "default")
for href, resolution in re.findall(
r'<a[^>]+data-quality="[^"]+"[^>]+href="([^"]+)"[^>]+class="quality-btn"[^>]*>([0-9]+p)',
webpage,
):
if need_confirm_age:
handler = self._download_age_confirm_page
else:
handler = self._download_webpage
webpage = handler(
self._BASE_URL + href,
video_id,
"Downloading %s version information" % resolution,
fatal=False,
)
if not webpage:
# Manually report warning because empty page is returned when
# invalid version is requested.
self.report_warning(
"Unable to download %s version information" % resolution
)
continue
extract_format(webpage, resolution)
self._sort_formats(formats)
return info_dict
|
def _real_extract(self, url):
video_id = self._match_id(url)
self._set_cookie("cda.pl", "cda.player", "html5")
webpage = self._download_webpage(self._BASE_URL + "/video/" + video_id, video_id)
if "Ten film jest dostępny dla użytkowników premium" in webpage:
raise ExtractorError(
"This video is only available for premium users.", expected=True
)
need_confirm_age = False
if self._html_search_regex(
r'(<form[^>]+action="/a/validatebirth")',
webpage,
"birthday validate form",
default=None,
):
webpage = self._download_age_confirm_page(url, video_id, note="Confirming age")
need_confirm_age = True
formats = []
uploader = self._search_regex(
r"""(?x)
<(span|meta)[^>]+itemprop=(["\'])author\2[^>]*>
(?:<\1[^>]*>[^<]*</\1>|(?!</\1>)(?:.|\n))*?
<(span|meta)[^>]+itemprop=(["\'])name\4[^>]*>(?P<uploader>[^<]+)</\3>
""",
webpage,
"uploader",
default=None,
group="uploader",
)
view_count = self._search_regex(
r"Odsłony:(?:\s| )*([0-9]+)", webpage, "view_count", default=None
)
average_rating = self._search_regex(
r'<(?:span|meta)[^>]+itemprop=(["\'])ratingValue\1[^>]*>(?P<rating_value>[0-9.]+)',
webpage,
"rating",
fatal=False,
group="rating_value",
)
info_dict = {
"id": video_id,
"title": self._og_search_title(webpage),
"description": self._og_search_description(webpage),
"uploader": uploader,
"view_count": int_or_none(view_count),
"average_rating": float_or_none(average_rating),
"thumbnail": self._og_search_thumbnail(webpage),
"formats": formats,
"duration": None,
"age_limit": 18 if need_confirm_age else 0,
}
def extract_format(page, version):
json_str = self._search_regex(
r'player_data=(\\?["\'])(?P<player_data>.+?)\1',
page,
"%s player_json" % version,
fatal=False,
group="player_data",
)
if not json_str:
return
player_data = self._parse_json(
json_str, "%s player_data" % version, fatal=False
)
if not player_data:
return
video = player_data.get("video")
if not video or "file" not in video:
self.report_warning("Unable to extract %s version information" % version)
return
if video["file"].startswith("uggc"):
video["file"] = codecs.decode(video["file"], "rot_13")
if video["file"].endswith("adc.mp4"):
video["file"] = video["file"].replace("adc.mp4", ".mp4")
f = {
"url": video["file"],
}
m = re.search(
r'<a[^>]+data-quality="(?P<format_id>[^"]+)"[^>]+href="[^"]+"[^>]+class="[^"]*quality-btn-active[^"]*">(?P<height>[0-9]+)p',
page,
)
if m:
f.update(
{
"format_id": m.group("format_id"),
"height": int(m.group("height")),
}
)
info_dict["formats"].append(f)
if not info_dict["duration"]:
info_dict["duration"] = parse_duration(video.get("duration"))
extract_format(webpage, "default")
for href, resolution in re.findall(
r'<a[^>]+data-quality="[^"]+"[^>]+href="([^"]+)"[^>]+class="quality-btn"[^>]*>([0-9]+p)',
webpage,
):
if need_confirm_age:
handler = self._download_age_confirm_page
else:
handler = self._download_webpage
webpage = handler(
self._BASE_URL + href,
video_id,
"Downloading %s version information" % resolution,
fatal=False,
)
if not webpage:
# Manually report warning because empty page is returned when
# invalid version is requested.
self.report_warning(
"Unable to download %s version information" % resolution
)
continue
extract_format(webpage, resolution)
self._sort_formats(formats)
return info_dict
|
https://github.com/ytdl-org/youtube-dl/issues/13935
|
youtube-dl -gvv https://www.cda.pl/video/9443700b
[debug] System config: [u'--prefer-free-formats']
[debug] User config: []
[debug] Custom config: []
[debug] Command-line args: [u'-gvv', u'https://www.cda.pl/video/9443700b']
[debug] Encodings: locale UTF-8, fs UTF-8, out UTF-8, pref UTF-8
[debug] youtube-dl version 2017.08.13
[debug] Python version 2.7.13 - Linux-4.11.12-200.fc25.x86_64-x86_64-with-fedora-25-Gurgle
[debug] exe versions: ffmpeg 3.1.9, ffprobe 3.1.9
[debug] Proxy map: {}
WARNING: [CDA] default player_data: Failed to parse JSON Expecting property name enclosed in double quotes: line 1 column 2 (char 1)
WARNING: [CDA] 480p player_data: Failed to parse JSON Expecting property name enclosed in double quotes: line 1 column 2 (char 1)
WARNING: [CDA] 720p player_data: Failed to parse JSON Expecting property name enclosed in double quotes: line 1 column 2 (char 1)
WARNING: [CDA] 1080p player_data: Failed to parse JSON Expecting property name enclosed in double quotes: line 1 column 2 (char 1)
ERROR: No video formats found; please report this issue on https://yt-dl.org/bug . Make sure you are using the latest version; type youtube-dl -U to update. Be sure to call youtube-dl with the --verbose flag and include its complete output.
Traceback (most recent call last):
File "/usr/bin/youtube-dl/youtube_dl/YoutubeDL.py", line 776, in extract_info
ie_result = ie.extract(url)
File "/usr/bin/youtube-dl/youtube_dl/extractor/common.py", line 433, in extract
ie_result = self._real_extract(url)
File "/usr/bin/youtube-dl/youtube_dl/extractor/cda.py", line 180, in _real_extract
self._sort_formats(formats)
File "/usr/bin/youtube-dl/youtube_dl/extractor/common.py", line 1057, in _sort_formats
raise ExtractorError('No video formats found')
ExtractorError: No video formats found; please report this issue on https://yt-dl.org/bug . Make sure you are using the latest version; type youtube-dl -U to update. Be sure to call youtube-dl with the --verbose flag and include its complete output.
|
ExtractorError
|
def extract_format(page, version):
json_str = self._html_search_regex(
r'player_data=(\\?["\'])(?P<player_data>.+?)\1',
page,
"%s player_json" % version,
fatal=False,
group="player_data",
)
if not json_str:
return
player_data = self._parse_json(json_str, "%s player_data" % version, fatal=False)
if not player_data:
return
video = player_data.get("video")
if not video or "file" not in video:
self.report_warning("Unable to extract %s version information" % version)
return
if video["file"].startswith("uggc"):
video["file"] = codecs.decode(video["file"], "rot_13")
if video["file"].endswith("adc.mp4"):
video["file"] = video["file"].replace("adc.mp4", ".mp4")
f = {
"url": video["file"],
}
m = re.search(
r'<a[^>]+data-quality="(?P<format_id>[^"]+)"[^>]+href="[^"]+"[^>]+class="[^"]*quality-btn-active[^"]*">(?P<height>[0-9]+)p',
page,
)
if m:
f.update(
{
"format_id": m.group("format_id"),
"height": int(m.group("height")),
}
)
info_dict["formats"].append(f)
if not info_dict["duration"]:
info_dict["duration"] = parse_duration(video.get("duration"))
|
def extract_format(page, version):
json_str = self._search_regex(
r'player_data=(\\?["\'])(?P<player_data>.+?)\1',
page,
"%s player_json" % version,
fatal=False,
group="player_data",
)
if not json_str:
return
player_data = self._parse_json(json_str, "%s player_data" % version, fatal=False)
if not player_data:
return
video = player_data.get("video")
if not video or "file" not in video:
self.report_warning("Unable to extract %s version information" % version)
return
if video["file"].startswith("uggc"):
video["file"] = codecs.decode(video["file"], "rot_13")
if video["file"].endswith("adc.mp4"):
video["file"] = video["file"].replace("adc.mp4", ".mp4")
f = {
"url": video["file"],
}
m = re.search(
r'<a[^>]+data-quality="(?P<format_id>[^"]+)"[^>]+href="[^"]+"[^>]+class="[^"]*quality-btn-active[^"]*">(?P<height>[0-9]+)p',
page,
)
if m:
f.update(
{
"format_id": m.group("format_id"),
"height": int(m.group("height")),
}
)
info_dict["formats"].append(f)
if not info_dict["duration"]:
info_dict["duration"] = parse_duration(video.get("duration"))
|
https://github.com/ytdl-org/youtube-dl/issues/13935
|
youtube-dl -gvv https://www.cda.pl/video/9443700b
[debug] System config: [u'--prefer-free-formats']
[debug] User config: []
[debug] Custom config: []
[debug] Command-line args: [u'-gvv', u'https://www.cda.pl/video/9443700b']
[debug] Encodings: locale UTF-8, fs UTF-8, out UTF-8, pref UTF-8
[debug] youtube-dl version 2017.08.13
[debug] Python version 2.7.13 - Linux-4.11.12-200.fc25.x86_64-x86_64-with-fedora-25-Gurgle
[debug] exe versions: ffmpeg 3.1.9, ffprobe 3.1.9
[debug] Proxy map: {}
WARNING: [CDA] default player_data: Failed to parse JSON Expecting property name enclosed in double quotes: line 1 column 2 (char 1)
WARNING: [CDA] 480p player_data: Failed to parse JSON Expecting property name enclosed in double quotes: line 1 column 2 (char 1)
WARNING: [CDA] 720p player_data: Failed to parse JSON Expecting property name enclosed in double quotes: line 1 column 2 (char 1)
WARNING: [CDA] 1080p player_data: Failed to parse JSON Expecting property name enclosed in double quotes: line 1 column 2 (char 1)
ERROR: No video formats found; please report this issue on https://yt-dl.org/bug . Make sure you are using the latest version; type youtube-dl -U to update. Be sure to call youtube-dl with the --verbose flag and include its complete output.
Traceback (most recent call last):
File "/usr/bin/youtube-dl/youtube_dl/YoutubeDL.py", line 776, in extract_info
ie_result = ie.extract(url)
File "/usr/bin/youtube-dl/youtube_dl/extractor/common.py", line 433, in extract
ie_result = self._real_extract(url)
File "/usr/bin/youtube-dl/youtube_dl/extractor/cda.py", line 180, in _real_extract
self._sort_formats(formats)
File "/usr/bin/youtube-dl/youtube_dl/extractor/common.py", line 1057, in _sort_formats
raise ExtractorError('No video formats found')
ExtractorError: No video formats found; please report this issue on https://yt-dl.org/bug . Make sure you are using the latest version; type youtube-dl -U to update. Be sure to call youtube-dl with the --verbose flag and include its complete output.
|
ExtractorError
|
def unescapeHTML(s):
if s is None:
return None
assert type(s) == compat_str
return re.sub(r"&([^&;]+;)", lambda m: _htmlentity_transform(m.group(1)), s)
|
def unescapeHTML(s):
if s is None:
return None
assert type(s) == compat_str
return re.sub(r"&([^;]+;)", lambda m: _htmlentity_transform(m.group(1)), s)
|
https://github.com/ytdl-org/youtube-dl/issues/13935
|
youtube-dl -gvv https://www.cda.pl/video/9443700b
[debug] System config: [u'--prefer-free-formats']
[debug] User config: []
[debug] Custom config: []
[debug] Command-line args: [u'-gvv', u'https://www.cda.pl/video/9443700b']
[debug] Encodings: locale UTF-8, fs UTF-8, out UTF-8, pref UTF-8
[debug] youtube-dl version 2017.08.13
[debug] Python version 2.7.13 - Linux-4.11.12-200.fc25.x86_64-x86_64-with-fedora-25-Gurgle
[debug] exe versions: ffmpeg 3.1.9, ffprobe 3.1.9
[debug] Proxy map: {}
WARNING: [CDA] default player_data: Failed to parse JSON Expecting property name enclosed in double quotes: line 1 column 2 (char 1)
WARNING: [CDA] 480p player_data: Failed to parse JSON Expecting property name enclosed in double quotes: line 1 column 2 (char 1)
WARNING: [CDA] 720p player_data: Failed to parse JSON Expecting property name enclosed in double quotes: line 1 column 2 (char 1)
WARNING: [CDA] 1080p player_data: Failed to parse JSON Expecting property name enclosed in double quotes: line 1 column 2 (char 1)
ERROR: No video formats found; please report this issue on https://yt-dl.org/bug . Make sure you are using the latest version; type youtube-dl -U to update. Be sure to call youtube-dl with the --verbose flag and include its complete output.
Traceback (most recent call last):
File "/usr/bin/youtube-dl/youtube_dl/YoutubeDL.py", line 776, in extract_info
ie_result = ie.extract(url)
File "/usr/bin/youtube-dl/youtube_dl/extractor/common.py", line 433, in extract
ie_result = self._real_extract(url)
File "/usr/bin/youtube-dl/youtube_dl/extractor/cda.py", line 180, in _real_extract
self._sort_formats(formats)
File "/usr/bin/youtube-dl/youtube_dl/extractor/common.py", line 1057, in _sort_formats
raise ExtractorError('No video formats found')
ExtractorError: No video formats found; please report this issue on https://yt-dl.org/bug . Make sure you are using the latest version; type youtube-dl -U to update. Be sure to call youtube-dl with the --verbose flag and include its complete output.
|
ExtractorError
|
def _real_extract(self, url):
video_id = self._match_id(url)
# Get video webpage. We are not actually interested in it for normal
# cases, but need the cookies in order to be able to download the
# info webpage
webpage, handle = self._download_webpage_handle(
"http://www.nicovideo.jp/watch/" + video_id, video_id
)
if video_id.startswith("so"):
video_id = self._match_id(handle.geturl())
api_data = self._parse_json(
self._html_search_regex(
'data-api-data="([^"]+)"', webpage, "API data", default="{}"
),
video_id,
)
video_real_url = try_get(api_data, lambda x: x["video"]["smileInfo"]["url"])
if video_real_url:
def get_video_info(items):
return dict_get(api_data["video"], items)
else:
# Get flv info
flv_info_webpage = self._download_webpage(
"http://flapi.nicovideo.jp/api/getflv/" + video_id + "?as3=1",
video_id,
"Downloading flv info",
)
flv_info = compat_urlparse.parse_qs(flv_info_webpage)
if "url" not in flv_info:
if "deleted" in flv_info:
raise ExtractorError("The video has been deleted.", expected=True)
elif "closed" in flv_info:
raise ExtractorError(
"Niconico videos now require logging in", expected=True
)
elif "error" in flv_info:
raise ExtractorError(
"%s reports error: %s" % (self.IE_NAME, flv_info["error"][0]),
expected=True,
)
else:
raise ExtractorError("Unable to find video URL")
video_real_url = flv_info["url"][0]
video_info_xml = self._download_xml(
"http://ext.nicovideo.jp/api/getthumbinfo/" + video_id,
video_id,
note="Downloading video info page",
)
def get_video_info(items):
if not isinstance(items, list):
items = [items]
for item in items:
ret = xpath_text(video_info_xml, ".//" + item)
if ret:
return ret
# Start extracting information
title = get_video_info("title")
if not title:
title = self._og_search_title(webpage, default=None)
if not title:
title = self._html_search_regex(
r'<span[^>]+class="videoHeaderTitle"[^>]*>([^<]+)</span>',
webpage,
"video title",
)
watch_api_data_string = self._html_search_regex(
r'<div[^>]+id="watchAPIDataContainer"[^>]+>([^<]+)</div>',
webpage,
"watch api data",
default=None,
)
watch_api_data = (
self._parse_json(watch_api_data_string, video_id)
if watch_api_data_string
else {}
)
video_detail = watch_api_data.get("videoDetail", {})
extension = get_video_info(["movie_type", "movieType"])
if not extension:
extension = determine_ext(video_real_url)
thumbnail = (
get_video_info(["thumbnail_url", "thumbnailURL"])
or self._html_search_meta("image", webpage, "thumbnail", default=None)
or video_detail.get("thumbnail")
)
description = get_video_info("description")
timestamp = parse_iso8601(get_video_info("first_retrieve")) or unified_timestamp(
get_video_info("postedDateTime")
)
if not timestamp:
match = self._html_search_meta(
"datePublished", webpage, "date published", default=None
)
if match:
timestamp = parse_iso8601(match.replace("+", ":00+"))
if not timestamp and video_detail.get("postedAt"):
timestamp = parse_iso8601(
video_detail["postedAt"].replace("/", "-"),
delimiter=" ",
timezone=datetime.timedelta(hours=9),
)
view_count = int_or_none(get_video_info(["view_counter", "viewCount"]))
if not view_count:
match = self._html_search_regex(
r">Views: <strong[^>]*>([^<]+)</strong>",
webpage,
"view count",
default=None,
)
if match:
view_count = int_or_none(match.replace(",", ""))
view_count = view_count or video_detail.get("viewCount")
comment_count = (
int_or_none(get_video_info("comment_num"))
or video_detail.get("commentCount")
or try_get(api_data, lambda x: x["thread"]["commentCount"])
)
if not comment_count:
match = self._html_search_regex(
r">Comments: <strong[^>]*>([^<]+)</strong>",
webpage,
"comment count",
default=None,
)
if match:
comment_count = int_or_none(match.replace(",", ""))
duration = (
parse_duration(
get_video_info("length")
or self._html_search_meta(
"video:duration", webpage, "video duration", default=None
)
)
or video_detail.get("length")
or get_video_info("duration")
)
webpage_url = get_video_info("watch_url") or url
owner = api_data.get("owner", {})
uploader_id = get_video_info(["ch_id", "user_id"]) or owner.get("id")
uploader = get_video_info(["ch_name", "user_nickname"]) or owner.get("nickname")
return {
"id": video_id,
"url": video_real_url,
"title": title,
"ext": extension,
"format_id": "economy" if video_real_url.endswith("low") else "normal",
"thumbnail": thumbnail,
"description": description,
"uploader": uploader,
"timestamp": timestamp,
"uploader_id": uploader_id,
"view_count": view_count,
"comment_count": comment_count,
"duration": duration,
"webpage_url": webpage_url,
}
|
def _real_extract(self, url):
video_id = self._match_id(url)
# Get video webpage. We are not actually interested in it for normal
# cases, but need the cookies in order to be able to download the
# info webpage
webpage, handle = self._download_webpage_handle(
"http://www.nicovideo.jp/watch/" + video_id, video_id
)
if video_id.startswith("so"):
video_id = self._match_id(handle.geturl())
video_info = self._download_xml(
"http://ext.nicovideo.jp/api/getthumbinfo/" + video_id,
video_id,
note="Downloading video info page",
)
# Get flv info
flv_info_webpage = self._download_webpage(
"http://flapi.nicovideo.jp/api/getflv/" + video_id + "?as3=1",
video_id,
"Downloading flv info",
)
flv_info = compat_urlparse.parse_qs(flv_info_webpage)
if "url" not in flv_info:
if "deleted" in flv_info:
raise ExtractorError("The video has been deleted.", expected=True)
elif "closed" in flv_info:
raise ExtractorError(
"Niconico videos now require logging in", expected=True
)
elif "error" in flv_info:
raise ExtractorError(
"%s reports error: %s" % (self.IE_NAME, flv_info["error"][0]),
expected=True,
)
else:
raise ExtractorError("Unable to find video URL")
video_real_url = flv_info["url"][0]
# Start extracting information
title = xpath_text(video_info, ".//title")
if not title:
title = self._og_search_title(webpage, default=None)
if not title:
title = self._html_search_regex(
r'<span[^>]+class="videoHeaderTitle"[^>]*>([^<]+)</span>',
webpage,
"video title",
)
watch_api_data_string = self._html_search_regex(
r'<div[^>]+id="watchAPIDataContainer"[^>]+>([^<]+)</div>',
webpage,
"watch api data",
default=None,
)
watch_api_data = (
self._parse_json(watch_api_data_string, video_id)
if watch_api_data_string
else {}
)
video_detail = watch_api_data.get("videoDetail", {})
extension = xpath_text(video_info, ".//movie_type")
if not extension:
extension = determine_ext(video_real_url)
thumbnail = (
xpath_text(video_info, ".//thumbnail_url")
or self._html_search_meta("image", webpage, "thumbnail", default=None)
or video_detail.get("thumbnail")
)
description = xpath_text(video_info, ".//description")
timestamp = parse_iso8601(xpath_text(video_info, ".//first_retrieve"))
if not timestamp:
match = self._html_search_meta(
"datePublished", webpage, "date published", default=None
)
if match:
timestamp = parse_iso8601(match.replace("+", ":00+"))
if not timestamp and video_detail.get("postedAt"):
timestamp = parse_iso8601(
video_detail["postedAt"].replace("/", "-"),
delimiter=" ",
timezone=datetime.timedelta(hours=9),
)
view_count = int_or_none(xpath_text(video_info, ".//view_counter"))
if not view_count:
match = self._html_search_regex(
r">Views: <strong[^>]*>([^<]+)</strong>",
webpage,
"view count",
default=None,
)
if match:
view_count = int_or_none(match.replace(",", ""))
view_count = view_count or video_detail.get("viewCount")
comment_count = int_or_none(xpath_text(video_info, ".//comment_num"))
if not comment_count:
match = self._html_search_regex(
r">Comments: <strong[^>]*>([^<]+)</strong>",
webpage,
"comment count",
default=None,
)
if match:
comment_count = int_or_none(match.replace(",", ""))
comment_count = comment_count or video_detail.get("commentCount")
duration = parse_duration(
xpath_text(video_info, ".//length")
or self._html_search_meta(
"video:duration", webpage, "video duration", default=None
)
) or video_detail.get("length")
webpage_url = xpath_text(video_info, ".//watch_url") or url
if video_info.find(".//ch_id") is not None:
uploader_id = video_info.find(".//ch_id").text
uploader = video_info.find(".//ch_name").text
elif video_info.find(".//user_id") is not None:
uploader_id = video_info.find(".//user_id").text
uploader = video_info.find(".//user_nickname").text
else:
uploader_id = uploader = None
return {
"id": video_id,
"url": video_real_url,
"title": title,
"ext": extension,
"format_id": "economy" if video_real_url.endswith("low") else "normal",
"thumbnail": thumbnail,
"description": description,
"uploader": uploader,
"timestamp": timestamp,
"uploader_id": uploader_id,
"view_count": view_count,
"comment_count": comment_count,
"duration": duration,
"webpage_url": webpage_url,
}
|
https://github.com/ytdl-org/youtube-dl/issues/13806
|
[debug] System config: []
[debug] User config: []
[debug] Custom config: []
[debug] Command-line args: ['-u', 'PRIVATE', '-p', 'PRIVATE', '-v', '--user-agen
t', 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko)
Chrome/59.0.3071.112 Safari/537.36 Vivaldi/1.91.867.48', 'http://www.nicovideo.j
p/watch/sm1151009']
[debug] Encodings: locale cp932, fs mbcs, out cp932, pref cp932
[debug] youtube-dl version 2017.07.30.1
[debug] Python version 3.4.4 - Windows-7-6.1.7601-SP1
[debug] exe versions: ffmpeg N-86537-gae6f6d4, ffprobe N-86537-gae6f6d4
[debug] Proxy map: {}
[niconico] Logging in
[niconico] sm1151009: Downloading webpage
[niconico] sm1151009: Downloading video info page
[niconico] sm1151009: Downloading flv info
[debug] Default format spec: bestvideo+bestaudio/best
[debug] Invoking downloader on 'http://smile-cca61.nicovideo.jp/smile?v=1151009.
96367'
ERROR: unable to download video data: HTTP Error 403: Forbidden
Traceback (most recent call last):
File "C:\Users\dst\AppData\Roaming\Build archive\youtube-dl\rg3\tmpfdfxmx88\bu
ild\youtube_dl\YoutubeDL.py", line 1863, in process_info
File "C:\Users\dst\AppData\Roaming\Build archive\youtube-dl\rg3\tmpfdfxmx88\bu
ild\youtube_dl\YoutubeDL.py", line 1805, in dl
File "C:\Users\dst\AppData\Roaming\Build archive\youtube-dl\rg3\tmpfdfxmx88\bu
ild\youtube_dl\downloader\common.py", line 361, in download
File "C:\Users\dst\AppData\Roaming\Build archive\youtube-dl\rg3\tmpfdfxmx88\bu
ild\youtube_dl\downloader\http.py", line 61, in real_download
File "C:\Users\dst\AppData\Roaming\Build archive\youtube-dl\rg3\tmpfdfxmx88\bu
ild\youtube_dl\YoutubeDL.py", line 2166, in urlopen
File "C:\Python\Python34\lib\urllib\request.py", line 470, in open
File "C:\Python\Python34\lib\urllib\request.py", line 580, in http_response
File "C:\Python\Python34\lib\urllib\request.py", line 508, in error
File "C:\Python\Python34\lib\urllib\request.py", line 442, in _call_chain
File "C:\Python\Python34\lib\urllib\request.py", line 588, in http_error_defau
lt
urllib.error.HTTPError: HTTP Error 403: Forbidden
...
<end of log>
|
urllib.error.HTTPError
|
def _real_extract(self, url):
if url.startswith("//"):
return {
"_type": "url",
"url": self.http_scheme() + url,
}
parsed_url = compat_urlparse.urlparse(url)
if not parsed_url.scheme:
default_search = self._downloader.params.get("default_search")
if default_search is None:
default_search = "fixup_error"
if default_search in ("auto", "auto_warning", "fixup_error"):
if "/" in url:
self._downloader.report_warning(
"The url doesn't specify the protocol, trying with http"
)
return self.url_result("http://" + url)
elif default_search != "fixup_error":
if default_search == "auto_warning":
if re.match(r"^(?:url|URL)$", url):
raise ExtractorError(
'Invalid URL: %r . Call youtube-dl like this: youtube-dl -v "https://www.youtube.com/watch?v=BaW_jenozKc" '
% url,
expected=True,
)
else:
self._downloader.report_warning(
'Falling back to youtube search for %s . Set --default-search "auto" to suppress this warning.'
% url
)
return self.url_result("ytsearch:" + url)
if default_search in ("error", "fixup_error"):
raise ExtractorError(
"%r is not a valid URL. "
'Set --default-search "ytsearch" (or run youtube-dl "ytsearch:%s" ) to search YouTube'
% (url, url),
expected=True,
)
else:
if ":" not in default_search:
default_search += ":"
return self.url_result(default_search + url)
url, smuggled_data = unsmuggle_url(url)
force_videoid = None
is_intentional = smuggled_data and smuggled_data.get("to_generic")
if smuggled_data and "force_videoid" in smuggled_data:
force_videoid = smuggled_data["force_videoid"]
video_id = force_videoid
else:
video_id = self._generic_id(url)
self.to_screen("%s: Requesting header" % video_id)
head_req = HEADRequest(url)
head_response = self._request_webpage(
head_req,
video_id,
note=False,
errnote="Could not send HEAD request to %s" % url,
fatal=False,
)
if head_response is not False:
# Check for redirect
new_url = head_response.geturl()
if url != new_url:
self.report_following_redirect(new_url)
if force_videoid:
new_url = smuggle_url(new_url, {"force_videoid": force_videoid})
return self.url_result(new_url)
full_response = None
if head_response is False:
request = sanitized_Request(url)
request.add_header("Accept-Encoding", "*")
full_response = self._request_webpage(request, video_id)
head_response = full_response
info_dict = {
"id": video_id,
"title": self._generic_title(url),
"upload_date": unified_strdate(head_response.headers.get("Last-Modified")),
}
# Check for direct link to a video
content_type = head_response.headers.get("Content-Type", "").lower()
m = re.match(
r"^(?P<type>audio|video|application(?=/(?:ogg$|(?:vnd\.apple\.|x-)?mpegurl)))/(?P<format_id>[^;\s]+)",
content_type,
)
if m:
format_id = m.group("format_id")
if format_id.endswith("mpegurl"):
formats = self._extract_m3u8_formats(url, video_id, "mp4")
elif format_id == "f4m":
formats = self._extract_f4m_formats(url, video_id)
else:
formats = [
{
"format_id": m.group("format_id"),
"url": url,
"vcodec": "none" if m.group("type") == "audio" else None,
}
]
info_dict["direct"] = True
self._sort_formats(formats)
info_dict["formats"] = formats
return info_dict
if not self._downloader.params.get("test", False) and not is_intentional:
force = self._downloader.params.get("force_generic_extractor", False)
self._downloader.report_warning(
"%s on generic information extractor."
% ("Forcing" if force else "Falling back")
)
if not full_response:
request = sanitized_Request(url)
# Some webservers may serve compressed content of rather big size (e.g. gzipped flac)
# making it impossible to download only chunk of the file (yet we need only 512kB to
# test whether it's HTML or not). According to youtube-dl default Accept-Encoding
# that will always result in downloading the whole file that is not desirable.
# Therefore for extraction pass we have to override Accept-Encoding to any in order
# to accept raw bytes and being able to download only a chunk.
# It may probably better to solve this by checking Content-Type for application/octet-stream
# after HEAD request finishes, but not sure if we can rely on this.
request.add_header("Accept-Encoding", "*")
full_response = self._request_webpage(request, video_id)
first_bytes = full_response.read(512)
# Is it an M3U playlist?
if first_bytes.startswith(b"#EXTM3U"):
info_dict["formats"] = self._extract_m3u8_formats(url, video_id, "mp4")
self._sort_formats(info_dict["formats"])
return info_dict
# Maybe it's a direct link to a video?
# Be careful not to download the whole thing!
if not is_html(first_bytes):
self._downloader.report_warning(
"URL could be a direct video link, returning it as such."
)
info_dict.update(
{
"direct": True,
"url": url,
}
)
return info_dict
webpage = self._webpage_read_content(
full_response, url, video_id, prefix=first_bytes
)
self.report_extraction(video_id)
# Is it an RSS feed, a SMIL file, an XSPF playlist or a MPD manifest?
try:
doc = compat_etree_fromstring(webpage.encode("utf-8"))
if doc.tag == "rss":
return self._extract_rss(url, video_id, doc)
elif doc.tag == "SmoothStreamingMedia":
info_dict["formats"] = self._parse_ism_formats(doc, url)
self._sort_formats(info_dict["formats"])
return info_dict
elif re.match(r"^(?:{[^}]+})?smil$", doc.tag):
smil = self._parse_smil(doc, url, video_id)
self._sort_formats(smil["formats"])
return smil
elif doc.tag == "{http://xspf.org/ns/0/}playlist":
return self.playlist_result(self._parse_xspf(doc, video_id), video_id)
elif re.match(r"(?i)^(?:{[^}]+})?MPD$", doc.tag):
info_dict["formats"] = self._parse_mpd_formats(
doc,
video_id,
mpd_base_url=full_response.geturl().rpartition("/")[0],
mpd_url=url,
)
self._sort_formats(info_dict["formats"])
return info_dict
elif re.match(r"^{http://ns\.adobe\.com/f4m/[12]\.0}manifest$", doc.tag):
info_dict["formats"] = self._parse_f4m_formats(doc, url, video_id)
self._sort_formats(info_dict["formats"])
return info_dict
except compat_xml_parse_error:
pass
# Is it a Camtasia project?
camtasia_res = self._extract_camtasia(url, video_id, webpage)
if camtasia_res is not None:
return camtasia_res
# Sometimes embedded video player is hidden behind percent encoding
# (e.g. https://github.com/rg3/youtube-dl/issues/2448)
# Unescaping the whole page allows to handle those cases in a generic way
webpage = compat_urllib_parse_unquote(webpage)
# it's tempting to parse this further, but you would
# have to take into account all the variations like
# Video Title - Site Name
# Site Name | Video Title
# Video Title - Tagline | Site Name
# and so on and so forth; it's just not practical
video_title = self._og_search_title(
webpage, default=None
) or self._html_search_regex(
r"(?s)<title>(.*?)</title>", webpage, "video title", default="video"
)
# Try to detect age limit automatically
age_limit = self._rta_search(webpage)
# And then there are the jokers who advertise that they use RTA,
# but actually don't.
AGE_LIMIT_MARKERS = [
r'Proudly Labeled <a href="http://www.rtalabel.org/" title="Restricted to Adults">RTA</a>',
]
if any(re.search(marker, webpage) for marker in AGE_LIMIT_MARKERS):
age_limit = 18
# video uploader is domain name
video_uploader = self._search_regex(
r"^(?:https?://)?([^/]*)/.*", url, "video uploader"
)
video_description = self._og_search_description(webpage, default=None)
video_thumbnail = self._og_search_thumbnail(webpage, default=None)
# Helper method
def _playlist_from_matches(matches, getter=None, ie=None):
urlrs = orderedSet(
self.url_result(self._proto_relative_url(getter(m) if getter else m), ie)
for m in matches
)
return self.playlist_result(
urlrs, playlist_id=video_id, playlist_title=video_title
)
# Look for Brightcove Legacy Studio embeds
bc_urls = BrightcoveLegacyIE._extract_brightcove_urls(webpage)
if bc_urls:
self.to_screen("Brightcove video detected.")
entries = [
{
"_type": "url",
"url": smuggle_url(bc_url, {"Referer": url}),
"ie_key": "BrightcoveLegacy",
}
for bc_url in bc_urls
]
return {
"_type": "playlist",
"title": video_title,
"id": video_id,
"entries": entries,
}
# Look for Brightcove New Studio embeds
bc_urls = BrightcoveNewIE._extract_urls(webpage)
if bc_urls:
return _playlist_from_matches(bc_urls, ie="BrightcoveNew")
# Look for ThePlatform embeds
tp_urls = ThePlatformIE._extract_urls(webpage)
if tp_urls:
return _playlist_from_matches(tp_urls, ie="ThePlatform")
# Look for Vessel embeds
vessel_urls = VesselIE._extract_urls(webpage)
if vessel_urls:
return _playlist_from_matches(vessel_urls, ie=VesselIE.ie_key())
# Look for embedded rtl.nl player
matches = re.findall(
r'<iframe[^>]+?src="((?:https?:)?//(?:www\.)?rtl\.nl/system/videoplayer/[^"]+(?:video_)?embed[^"]+)"',
webpage,
)
if matches:
return _playlist_from_matches(matches, ie="RtlNl")
vimeo_urls = VimeoIE._extract_urls(url, webpage)
if vimeo_urls:
return _playlist_from_matches(vimeo_urls, ie=VimeoIE.ie_key())
vid_me_embed_url = self._search_regex(
r'src=[\'"](https?://vid\.me/[^\'"]+)[\'"]',
webpage,
"vid.me embed",
default=None,
)
if vid_me_embed_url is not None:
return self.url_result(vid_me_embed_url, "Vidme")
# Look for embedded YouTube player
matches = re.findall(
r"""(?x)
(?:
<iframe[^>]+?src=|
data-video-url=|
<embed[^>]+?src=|
embedSWF\(?:\s*|
new\s+SWFObject\(
)
(["\'])
(?P<url>(?:https?:)?//(?:www\.)?youtube(?:-nocookie)?\.com/
(?:embed|v|p)/.+?)
\1""",
webpage,
)
if matches:
return _playlist_from_matches(matches, lambda m: unescapeHTML(m[1]))
# Look for lazyYT YouTube embed
matches = re.findall(r'class="lazyYT" data-youtube-id="([^"]+)"', webpage)
if matches:
return _playlist_from_matches(matches, lambda m: unescapeHTML(m))
# Look for Wordpress "YouTube Video Importer" plugin
matches = re.findall(
r"""(?x)<div[^>]+
class=(?P<q1>[\'"])[^\'"]*\byvii_single_video_player\b[^\'"]*(?P=q1)[^>]+
data-video_id=(?P<q2>[\'"])([^\'"]+)(?P=q2)""",
webpage,
)
if matches:
return _playlist_from_matches(matches, lambda m: m[-1])
matches = DailymotionIE._extract_urls(webpage)
if matches:
return _playlist_from_matches(matches)
# Look for embedded Dailymotion playlist player (#3822)
m = re.search(
r'<iframe[^>]+?src=(["\'])(?P<url>(?:https?:)?//(?:www\.)?dailymotion\.[a-z]{2,3}/widget/jukebox\?.+?)\1',
webpage,
)
if m:
playlists = re.findall(
r"list\[\]=/playlist/([^/]+)/", unescapeHTML(m.group("url"))
)
if playlists:
return _playlist_from_matches(
playlists, lambda p: "//dailymotion.com/playlist/%s" % p
)
# Look for embedded Wistia player
match = re.search(
r'<(?:meta[^>]+?content|iframe[^>]+?src)=(["\'])(?P<url>(?:https?:)?//(?:fast\.)?wistia\.net/embed/iframe/.+?)\1',
webpage,
)
if match:
embed_url = self._proto_relative_url(unescapeHTML(match.group("url")))
return {
"_type": "url_transparent",
"url": embed_url,
"ie_key": "Wistia",
"uploader": video_uploader,
}
match = re.search(
r'(?:id=["\']wistia_|data-wistia-?id=["\']|Wistia\.embed\(["\'])(?P<id>[^"\']+)',
webpage,
)
if match:
return {
"_type": "url_transparent",
"url": "wistia:%s" % match.group("id"),
"ie_key": "Wistia",
"uploader": video_uploader,
}
match = re.search(
r"""(?sx)
<script[^>]+src=(["'])(?:https?:)?//fast\.wistia\.com/assets/external/E-v1\.js\1[^>]*>.*?
<div[^>]+class=(["']).*?\bwistia_async_(?P<id>[a-z0-9]+)\b.*?\2
""",
webpage,
)
if match:
return self.url_result(
self._proto_relative_url("wistia:%s" % match.group("id")), "Wistia"
)
# Look for SVT player
svt_url = SVTIE._extract_url(webpage)
if svt_url:
return self.url_result(svt_url, "SVT")
# Look for embedded condenast player
matches = re.findall(
r'<iframe\s+(?:[a-zA-Z-]+="[^"]+"\s+)*?src="(https?://player\.cnevids\.com/embed/[^"]+")',
webpage,
)
if matches:
return {
"_type": "playlist",
"entries": [
{
"_type": "url",
"ie_key": "CondeNast",
"url": ma,
}
for ma in matches
],
"title": video_title,
"id": video_id,
}
# Look for Bandcamp pages with custom domain
mobj = re.search(
r'<meta property="og:url"[^>]*?content="(.*?bandcamp\.com.*?)"', webpage
)
if mobj is not None:
burl = unescapeHTML(mobj.group(1))
# Don't set the extractor because it can be a track url or an album
return self.url_result(burl)
# Look for embedded Vevo player
mobj = re.search(
r'<iframe[^>]+?src=(["\'])(?P<url>(?:https?:)?//(?:cache\.)?vevo\.com/.+?)\1',
webpage,
)
if mobj is not None:
return self.url_result(mobj.group("url"))
# Look for embedded Viddler player
mobj = re.search(
r'<(?:iframe[^>]+?src|param[^>]+?value)=(["\'])(?P<url>(?:https?:)?//(?:www\.)?viddler\.com/(?:embed|player)/.+?)\1',
webpage,
)
if mobj is not None:
return self.url_result(mobj.group("url"))
# Look for NYTimes player
mobj = re.search(
r'<iframe[^>]+src=(["\'])(?P<url>(?:https?:)?//graphics8\.nytimes\.com/bcvideo/[^/]+/iframe/embed\.html.+?)\1>',
webpage,
)
if mobj is not None:
return self.url_result(mobj.group("url"))
# Look for Libsyn player
mobj = re.search(
r'<iframe[^>]+src=(["\'])(?P<url>(?:https?:)?//html5-player\.libsyn\.com/embed/.+?)\1',
webpage,
)
if mobj is not None:
return self.url_result(mobj.group("url"))
# Look for Ooyala videos
mobj = (
re.search(
r'player\.ooyala\.com/[^"?]+[?#][^"]*?(?:embedCode|ec)=(?P<ec>[^"&]+)',
webpage,
)
or re.search(
r'OO\.Player\.create\([\'"].*?[\'"],\s*[\'"](?P<ec>.{32})[\'"]', webpage
)
or re.search(r'SBN\.VideoLinkset\.ooyala\([\'"](?P<ec>.{32})[\'"]\)', webpage)
or re.search(r'data-ooyala-video-id\s*=\s*[\'"](?P<ec>.{32})[\'"]', webpage)
)
if mobj is not None:
embed_token = self._search_regex(
r'embedToken[\'"]?\s*:\s*[\'"]([^\'"]+)',
webpage,
"ooyala embed token",
default=None,
)
return OoyalaIE._build_url_result(
smuggle_url(
mobj.group("ec"),
{
"domain": url,
"embed_token": embed_token,
},
)
)
# Look for multiple Ooyala embeds on SBN network websites
mobj = re.search(r"SBN\.VideoLinkset\.entryGroup\((\[.*?\])", webpage)
if mobj is not None:
embeds = self._parse_json(mobj.group(1), video_id, fatal=False)
if embeds:
return _playlist_from_matches(
embeds,
getter=lambda v: OoyalaIE._url_for_embed_code(
smuggle_url(v["provider_video_id"], {"domain": url})
),
ie="Ooyala",
)
# Look for Aparat videos
mobj = re.search(r'<iframe .*?src="(http://www\.aparat\.com/video/[^"]+)"', webpage)
if mobj is not None:
return self.url_result(mobj.group(1), "Aparat")
# Look for MPORA videos
mobj = re.search(
r'<iframe .*?src="(http://mpora\.(?:com|de)/videos/[^"]+)"', webpage
)
if mobj is not None:
return self.url_result(mobj.group(1), "Mpora")
# Look for embedded NovaMov-based player
mobj = re.search(
r"""(?x)<(?:pagespeed_)?iframe[^>]+?src=(["\'])
(?P<url>http://(?:(?:embed|www)\.)?
(?:novamov\.com|
nowvideo\.(?:ch|sx|eu|at|ag|co)|
videoweed\.(?:es|com)|
movshare\.(?:net|sx|ag)|
divxstage\.(?:eu|net|ch|co|at|ag))
/embed\.php.+?)\1""",
webpage,
)
if mobj is not None:
return self.url_result(mobj.group("url"))
# Look for embedded Facebook player
facebook_url = FacebookIE._extract_url(webpage)
if facebook_url is not None:
return self.url_result(facebook_url, "Facebook")
# Look for embedded VK player
mobj = re.search(
r'<iframe[^>]+?src=(["\'])(?P<url>https?://vk\.com/video_ext\.php.+?)\1',
webpage,
)
if mobj is not None:
return self.url_result(mobj.group("url"), "VK")
# Look for embedded Odnoklassniki player
mobj = re.search(
r'<iframe[^>]+?src=(["\'])(?P<url>https?://(?:odnoklassniki|ok)\.ru/videoembed/.+?)\1',
webpage,
)
if mobj is not None:
return self.url_result(mobj.group("url"), "Odnoklassniki")
# Look for embedded ivi player
mobj = re.search(
r'<embed[^>]+?src=(["\'])(?P<url>https?://(?:www\.)?ivi\.ru/video/player.+?)\1',
webpage,
)
if mobj is not None:
return self.url_result(mobj.group("url"), "Ivi")
# Look for embedded Huffington Post player
mobj = re.search(
r'<iframe[^>]+?src=(["\'])(?P<url>https?://embed\.live\.huffingtonpost\.com/.+?)\1',
webpage,
)
if mobj is not None:
return self.url_result(mobj.group("url"), "HuffPost")
# Look for embed.ly
mobj = re.search(
r'class=["\']embedly-card["\'][^>]href=["\'](?P<url>[^"\']+)', webpage
)
if mobj is not None:
return self.url_result(mobj.group("url"))
mobj = re.search(
r'class=["\']embedly-embed["\'][^>]src=["\'][^"\']*url=(?P<url>[^&]+)', webpage
)
if mobj is not None:
return self.url_result(compat_urllib_parse_unquote(mobj.group("url")))
# Look for funnyordie embed
matches = re.findall(
r'<iframe[^>]+?src="(https?://(?:www\.)?funnyordie\.com/embed/[^"]+)"', webpage
)
if matches:
return _playlist_from_matches(matches, getter=unescapeHTML, ie="FunnyOrDie")
# Look for BBC iPlayer embed
matches = re.findall(
r'setPlaylist\("(https?://www\.bbc\.co\.uk/iplayer/[^/]+/[\da-z]{8})"\)',
webpage,
)
if matches:
return _playlist_from_matches(matches, ie="BBCCoUk")
# Look for embedded RUTV player
rutv_url = RUTVIE._extract_url(webpage)
if rutv_url:
return self.url_result(rutv_url, "RUTV")
# Look for embedded TVC player
tvc_url = TVCIE._extract_url(webpage)
if tvc_url:
return self.url_result(tvc_url, "TVC")
# Look for embedded SportBox player
sportbox_urls = SportBoxEmbedIE._extract_urls(webpage)
if sportbox_urls:
return _playlist_from_matches(sportbox_urls, ie="SportBoxEmbed")
# Look for embedded XHamster player
xhamster_urls = XHamsterEmbedIE._extract_urls(webpage)
if xhamster_urls:
return _playlist_from_matches(xhamster_urls, ie="XHamsterEmbed")
# Look for embedded TNAFlixNetwork player
tnaflix_urls = TNAFlixNetworkEmbedIE._extract_urls(webpage)
if tnaflix_urls:
return _playlist_from_matches(tnaflix_urls, ie=TNAFlixNetworkEmbedIE.ie_key())
# Look for embedded PornHub player
pornhub_urls = PornHubIE._extract_urls(webpage)
if pornhub_urls:
return _playlist_from_matches(pornhub_urls, ie=PornHubIE.ie_key())
# Look for embedded DrTuber player
drtuber_urls = DrTuberIE._extract_urls(webpage)
if drtuber_urls:
return _playlist_from_matches(drtuber_urls, ie=DrTuberIE.ie_key())
# Look for embedded RedTube player
redtube_urls = RedTubeIE._extract_urls(webpage)
if redtube_urls:
return _playlist_from_matches(redtube_urls, ie=RedTubeIE.ie_key())
# Look for embedded Tvigle player
mobj = re.search(
r'<iframe[^>]+?src=(["\'])(?P<url>(?:https?:)?//cloud\.tvigle\.ru/video/.+?)\1',
webpage,
)
if mobj is not None:
return self.url_result(mobj.group("url"), "Tvigle")
# Look for embedded TED player
mobj = re.search(
r'<iframe[^>]+?src=(["\'])(?P<url>https?://embed(?:-ssl)?\.ted\.com/.+?)\1',
webpage,
)
if mobj is not None:
return self.url_result(mobj.group("url"), "TED")
# Look for embedded Ustream videos
ustream_url = UstreamIE._extract_url(webpage)
if ustream_url:
return self.url_result(ustream_url, UstreamIE.ie_key())
# Look for embedded arte.tv player
mobj = re.search(
r'<(?:script|iframe) [^>]*?src="(?P<url>http://www\.arte\.tv/(?:playerv2/embed|arte_vp/index)[^"]+)"',
webpage,
)
if mobj is not None:
return self.url_result(mobj.group("url"), "ArteTVEmbed")
# Look for embedded francetv player
mobj = re.search(
r'<iframe[^>]+?src=(["\'])(?P<url>(?:https?://)?embed\.francetv\.fr/\?ue=.+?)\1',
webpage,
)
if mobj is not None:
return self.url_result(mobj.group("url"))
# Look for embedded smotri.com player
smotri_url = SmotriIE._extract_url(webpage)
if smotri_url:
return self.url_result(smotri_url, "Smotri")
# Look for embedded Myvi.ru player
myvi_url = MyviIE._extract_url(webpage)
if myvi_url:
return self.url_result(myvi_url)
# Look for embedded soundcloud player
soundcloud_urls = SoundcloudIE._extract_urls(webpage)
if soundcloud_urls:
return _playlist_from_matches(
soundcloud_urls, getter=unescapeHTML, ie=SoundcloudIE.ie_key()
)
# Look for tunein player
tunein_urls = TuneInBaseIE._extract_urls(webpage)
if tunein_urls:
return _playlist_from_matches(tunein_urls)
# Look for embedded mtvservices player
mtvservices_url = MTVServicesEmbeddedIE._extract_url(webpage)
if mtvservices_url:
return self.url_result(mtvservices_url, ie="MTVServicesEmbedded")
# Look for embedded yahoo player
mobj = re.search(
r'<iframe[^>]+?src=(["\'])(?P<url>https?://(?:screen|movies)\.yahoo\.com/.+?\.html\?format=embed)\1',
webpage,
)
if mobj is not None:
return self.url_result(mobj.group("url"), "Yahoo")
# Look for embedded sbs.com.au player
mobj = re.search(
r"""(?x)
(?:
<meta\s+property="og:video"\s+content=|
<iframe[^>]+?src=
)
(["\'])(?P<url>https?://(?:www\.)?sbs\.com\.au/ondemand/video/.+?)\1""",
webpage,
)
if mobj is not None:
return self.url_result(mobj.group("url"), "SBS")
# Look for embedded Cinchcast player
mobj = re.search(
r'<iframe[^>]+?src=(["\'])(?P<url>https?://player\.cinchcast\.com/.+?)\1',
webpage,
)
if mobj is not None:
return self.url_result(mobj.group("url"), "Cinchcast")
mobj = re.search(
r'<iframe[^>]+?src=(["\'])(?P<url>https?://m(?:lb)?\.mlb\.com/shared/video/embed/embed\.html\?.+?)\1',
webpage,
)
if not mobj:
mobj = re.search(
r'data-video-link=["\'](?P<url>http://m.mlb.com/video/[^"\']+)', webpage
)
if mobj is not None:
return self.url_result(mobj.group("url"), "MLB")
mobj = re.search(
r'<(?:iframe|script)[^>]+?src=(["\'])(?P<url>%s)\1' % CondeNastIE.EMBED_URL,
webpage,
)
if mobj is not None:
return self.url_result(
self._proto_relative_url(mobj.group("url"), scheme="http:"), "CondeNast"
)
mobj = re.search(
r'<iframe[^>]+src="(?P<url>https?://(?:new\.)?livestream\.com/[^"]+/player[^"]+)"',
webpage,
)
if mobj is not None:
return self.url_result(mobj.group("url"), "Livestream")
# Look for Zapiks embed
mobj = re.search(
r'<iframe[^>]+src="(?P<url>https?://(?:www\.)?zapiks\.fr/index\.php\?.+?)"',
webpage,
)
if mobj is not None:
return self.url_result(mobj.group("url"), "Zapiks")
# Look for Kaltura embeds
kaltura_url = KalturaIE._extract_url(webpage)
if kaltura_url:
return self.url_result(
smuggle_url(kaltura_url, {"source_url": url}), KalturaIE.ie_key()
)
# Look for Eagle.Platform embeds
eagleplatform_url = EaglePlatformIE._extract_url(webpage)
if eagleplatform_url:
return self.url_result(eagleplatform_url, EaglePlatformIE.ie_key())
# Look for ClipYou (uses Eagle.Platform) embeds
mobj = re.search(
r'<iframe[^>]+src="https?://(?P<host>media\.clipyou\.ru)/index/player\?.*\brecord_id=(?P<id>\d+).*"',
webpage,
)
if mobj is not None:
return self.url_result(
"eagleplatform:%(host)s:%(id)s" % mobj.groupdict(), "EaglePlatform"
)
# Look for Pladform embeds
pladform_url = PladformIE._extract_url(webpage)
if pladform_url:
return self.url_result(pladform_url)
# Look for Videomore embeds
videomore_url = VideomoreIE._extract_url(webpage)
if videomore_url:
return self.url_result(videomore_url)
# Look for Webcaster embeds
webcaster_url = WebcasterFeedIE._extract_url(self, webpage)
if webcaster_url:
return self.url_result(webcaster_url, ie=WebcasterFeedIE.ie_key())
# Look for Playwire embeds
mobj = re.search(
r'<script[^>]+data-config=(["\'])(?P<url>(?:https?:)?//config\.playwire\.com/.+?)\1',
webpage,
)
if mobj is not None:
return self.url_result(mobj.group("url"))
# Look for 5min embeds
mobj = re.search(
r'<meta[^>]+property="og:video"[^>]+content="https?://embed\.5min\.com/(?P<id>[0-9]+)/?',
webpage,
)
if mobj is not None:
return self.url_result("5min:%s" % mobj.group("id"), "FiveMin")
# Look for Crooks and Liars embeds
mobj = re.search(
r'<(?:iframe[^>]+src|param[^>]+value)=(["\'])(?P<url>(?:https?:)?//embed\.crooksandliars\.com/(?:embed|v)/.+?)\1',
webpage,
)
if mobj is not None:
return self.url_result(mobj.group("url"))
# Look for NBC Sports VPlayer embeds
nbc_sports_url = NBCSportsVPlayerIE._extract_url(webpage)
if nbc_sports_url:
return self.url_result(nbc_sports_url, "NBCSportsVPlayer")
# Look for NBC News embeds
nbc_news_embed_url = re.search(
r'<iframe[^>]+src=(["\'])(?P<url>(?:https?:)?//www\.nbcnews\.com/widget/video-embed/[^"\']+)\1',
webpage,
)
if nbc_news_embed_url:
return self.url_result(nbc_news_embed_url.group("url"), "NBCNews")
# Look for Google Drive embeds
google_drive_url = GoogleDriveIE._extract_url(webpage)
if google_drive_url:
return self.url_result(google_drive_url, "GoogleDrive")
# Look for UDN embeds
mobj = re.search(
r'<iframe[^>]+src="(?P<url>%s)"' % UDNEmbedIE._PROTOCOL_RELATIVE_VALID_URL,
webpage,
)
if mobj is not None:
return self.url_result(
compat_urlparse.urljoin(url, mobj.group("url")), "UDNEmbed"
)
# Look for Senate ISVP iframe
senate_isvp_url = SenateISVPIE._search_iframe_url(webpage)
if senate_isvp_url:
return self.url_result(senate_isvp_url, "SenateISVP")
# Look for Dailymotion Cloud videos
dmcloud_url = DailymotionCloudIE._extract_dmcloud_url(webpage)
if dmcloud_url:
return self.url_result(dmcloud_url, "DailymotionCloud")
# Look for OnionStudios embeds
onionstudios_url = OnionStudiosIE._extract_url(webpage)
if onionstudios_url:
return self.url_result(onionstudios_url)
# Look for ViewLift embeds
viewlift_url = ViewLiftEmbedIE._extract_url(webpage)
if viewlift_url:
return self.url_result(viewlift_url)
# Look for JWPlatform embeds
jwplatform_url = JWPlatformIE._extract_url(webpage)
if jwplatform_url:
return self.url_result(jwplatform_url, "JWPlatform")
# Look for Digiteka embeds
digiteka_url = DigitekaIE._extract_url(webpage)
if digiteka_url:
return self.url_result(
self._proto_relative_url(digiteka_url), DigitekaIE.ie_key()
)
# Look for Arkena embeds
arkena_url = ArkenaIE._extract_url(webpage)
if arkena_url:
return self.url_result(arkena_url, ArkenaIE.ie_key())
# Look for Piksel embeds
piksel_url = PikselIE._extract_url(webpage)
if piksel_url:
return self.url_result(piksel_url, PikselIE.ie_key())
# Look for Limelight embeds
mobj = re.search(
r'LimelightPlayer\.doLoad(Media|Channel|ChannelList)\(["\'](?P<id>[a-z0-9]{32})',
webpage,
)
if mobj:
lm = {
"Media": "media",
"Channel": "channel",
"ChannelList": "channel_list",
}
return self.url_result(
smuggle_url(
"limelight:%s:%s" % (lm[mobj.group(1)], mobj.group(2)),
{"source_url": url},
),
"Limelight%s" % mobj.group(1),
mobj.group(2),
)
mobj = re.search(
r"""(?sx)
<object[^>]+class=(["\'])LimelightEmbeddedPlayerFlash\1[^>]*>.*?
<param[^>]+
name=(["\'])flashVars\2[^>]+
value=(["\'])(?:(?!\3).)*mediaId=(?P<id>[a-z0-9]{32})
""",
webpage,
)
if mobj:
return self.url_result(
smuggle_url("limelight:media:%s" % mobj.group("id"), {"source_url": url}),
"LimelightMedia",
mobj.group("id"),
)
# Look for AdobeTVVideo embeds
mobj = re.search(
r'<iframe[^>]+src=[\'"]((?:https?:)?//video\.tv\.adobe\.com/v/\d+[^"]+)[\'"]',
webpage,
)
if mobj is not None:
return self.url_result(
self._proto_relative_url(unescapeHTML(mobj.group(1))), "AdobeTVVideo"
)
# Look for Vine embeds
mobj = re.search(
r'<iframe[^>]+src=[\'"]((?:https?:)?//(?:www\.)?vine\.co/v/[^/]+/embed/(?:simple|postcard))',
webpage,
)
if mobj is not None:
return self.url_result(
self._proto_relative_url(unescapeHTML(mobj.group(1))), "Vine"
)
# Look for VODPlatform embeds
mobj = re.search(
r'<iframe[^>]+src=(["\'])(?P<url>(?:https?:)?//(?:www\.)?vod-platform\.net/[eE]mbed/.+?)\1',
webpage,
)
if mobj is not None:
return self.url_result(
self._proto_relative_url(unescapeHTML(mobj.group("url"))), "VODPlatform"
)
# Look for Mangomolo embeds
mobj = re.search(
r"""(?x)<iframe[^>]+src=(["\'])(?P<url>(?:https?:)?//(?:www\.)?admin\.mangomolo\.com/analytics/index\.php/customers/embed/
(?:
video\?.*?\bid=(?P<video_id>\d+)|
index\?.*?\bchannelid=(?P<channel_id>(?:[A-Za-z0-9+/=]|%2B|%2F|%3D)+)
).+?)\1""",
webpage,
)
if mobj is not None:
info = {
"_type": "url_transparent",
"url": self._proto_relative_url(unescapeHTML(mobj.group("url"))),
"title": video_title,
"description": video_description,
"thumbnail": video_thumbnail,
"uploader": video_uploader,
}
video_id = mobj.group("video_id")
if video_id:
info.update(
{
"ie_key": "MangomoloVideo",
"id": video_id,
}
)
else:
info.update(
{
"ie_key": "MangomoloLive",
"id": mobj.group("channel_id"),
}
)
return info
# Look for Instagram embeds
instagram_embed_url = InstagramIE._extract_embed_url(webpage)
if instagram_embed_url is not None:
return self.url_result(
self._proto_relative_url(instagram_embed_url), InstagramIE.ie_key()
)
# Look for LiveLeak embeds
liveleak_url = LiveLeakIE._extract_url(webpage)
if liveleak_url:
return self.url_result(liveleak_url, "LiveLeak")
# Look for 3Q SDN embeds
threeqsdn_url = ThreeQSDNIE._extract_url(webpage)
if threeqsdn_url:
return {
"_type": "url_transparent",
"ie_key": ThreeQSDNIE.ie_key(),
"url": self._proto_relative_url(threeqsdn_url),
"title": video_title,
"description": video_description,
"thumbnail": video_thumbnail,
"uploader": video_uploader,
}
# Look for VBOX7 embeds
vbox7_url = Vbox7IE._extract_url(webpage)
if vbox7_url:
return self.url_result(vbox7_url, Vbox7IE.ie_key())
# Look for DBTV embeds
dbtv_urls = DBTVIE._extract_urls(webpage)
if dbtv_urls:
return _playlist_from_matches(dbtv_urls, ie=DBTVIE.ie_key())
# Look for Videa embeds
videa_urls = VideaIE._extract_urls(webpage)
if videa_urls:
return _playlist_from_matches(videa_urls, ie=VideaIE.ie_key())
# Look for 20 minuten embeds
twentymin_urls = TwentyMinutenIE._extract_urls(webpage)
if twentymin_urls:
return _playlist_from_matches(twentymin_urls, ie=TwentyMinutenIE.ie_key())
# Look for Openload embeds
openload_urls = OpenloadIE._extract_urls(webpage)
if openload_urls:
return _playlist_from_matches(openload_urls, ie=OpenloadIE.ie_key())
# Look for VideoPress embeds
videopress_urls = VideoPressIE._extract_urls(webpage)
if videopress_urls:
return _playlist_from_matches(videopress_urls, ie=VideoPressIE.ie_key())
# Look for Rutube embeds
rutube_urls = RutubeIE._extract_urls(webpage)
if rutube_urls:
return _playlist_from_matches(rutube_urls, ie=RutubeIE.ie_key())
# Looking for http://schema.org/VideoObject
json_ld = self._search_json_ld(
webpage, video_id, default={}, expected_type="VideoObject"
)
if json_ld.get("url"):
info_dict.update(
{
"title": video_title or info_dict["title"],
"description": video_description,
"thumbnail": video_thumbnail,
"age_limit": age_limit,
}
)
info_dict.update(json_ld)
return info_dict
# Look for HTML5 media
entries = self._parse_html5_media_entries(url, webpage, video_id, m3u8_id="hls")
if entries:
for entry in entries:
entry.update(
{
"id": video_id,
"title": video_title,
}
)
self._sort_formats(entry["formats"])
return self.playlist_result(entries)
jwplayer_data_str = self._find_jwplayer_data(webpage)
if jwplayer_data_str:
try:
jwplayer_data = self._parse_json(
jwplayer_data_str, video_id, transform_source=js_to_json
)
info = self._parse_jwplayer_data(
jwplayer_data, video_id, require_title=False
)
if not info.get("title"):
info["title"] = video_title
except ExtractorError:
pass
def check_video(vurl):
if YoutubeIE.suitable(vurl):
return True
if RtmpIE.suitable(vurl):
return True
vpath = compat_urlparse.urlparse(vurl).path
vext = determine_ext(vpath)
return "." in vpath and vext not in (
"swf",
"png",
"jpg",
"srt",
"sbv",
"sub",
"vtt",
"ttml",
"js",
)
def filter_video(urls):
return list(filter(check_video, urls))
# Start with something easy: JW Player in SWFObject
found = filter_video(
re.findall(r'flashvars: [\'"](?:.*&)?file=(http[^\'"&]*)', webpage)
)
if not found:
# Look for gorilla-vid style embedding
found = filter_video(
re.findall(
r"""(?sx)
(?:
jw_plugins|
JWPlayerOptions|
jwplayer\s*\(\s*["'][^'"]+["']\s*\)\s*\.setup
)
.*?
['"]?file['"]?\s*:\s*["\'](.*?)["\']""",
webpage,
)
)
if not found:
# Broaden the search a little bit
found = filter_video(
re.findall(r'[^A-Za-z0-9]?(?:file|source)=(http[^\'"&]*)', webpage)
)
if not found:
# Broaden the findall a little bit: JWPlayer JS loader
found = filter_video(
re.findall(
r'[^A-Za-z0-9]?(?:file|video_url)["\']?:\s*["\'](http(?![^\'"]+\.[0-9]+[\'"])[^\'"]+)["\']',
webpage,
)
)
if not found:
# Flow player
found = filter_video(
re.findall(
r"""(?xs)
flowplayer\("[^"]+",\s*
\{[^}]+?\}\s*,
\s*\{[^}]+? ["']?clip["']?\s*:\s*\{\s*
["']?url["']?\s*:\s*["']([^"']+)["']
""",
webpage,
)
)
if not found:
# Cinerama player
found = re.findall(r"cinerama\.embedPlayer\(\s*\'[^']+\',\s*'([^']+)'", webpage)
if not found:
# Try to find twitter cards info
# twitter:player:stream should be checked before twitter:player since
# it is expected to contain a raw stream (see
# https://dev.twitter.com/cards/types/player#On_twitter.com_via_desktop_browser)
found = filter_video(
re.findall(
r'<meta (?:property|name)="twitter:player:stream" (?:content|value)="(.+?)"',
webpage,
)
)
if not found:
# We look for Open Graph info:
# We have to match any number spaces between elements, some sites try to align them (eg.: statigr.am)
m_video_type = re.findall(
r'<meta.*?property="og:video:type".*?content="video/(.*?)"', webpage
)
# We only look in og:video if the MIME type is a video, don't try if it's a Flash player:
if m_video_type is not None:
found = filter_video(
re.findall(r'<meta.*?property="og:video".*?content="(.*?)"', webpage)
)
if not found:
REDIRECT_REGEX = r'[0-9]{,2};\s*(?:URL|url)=\'?([^\'"]+)'
found = re.search(
r'(?i)<meta\s+(?=(?:[a-z-]+="[^"]+"\s+)*http-equiv="refresh")'
r'(?:[a-z-]+="[^"]+"\s+)*?content="%s' % REDIRECT_REGEX,
webpage,
)
if not found:
# Look also in Refresh HTTP header
refresh_header = head_response.headers.get("Refresh")
if refresh_header:
# In python 2 response HTTP headers are bytestrings
if sys.version_info < (3, 0) and isinstance(refresh_header, str):
refresh_header = refresh_header.decode("iso-8859-1")
found = re.search(REDIRECT_REGEX, refresh_header)
if found:
new_url = compat_urlparse.urljoin(url, unescapeHTML(found.group(1)))
self.report_following_redirect(new_url)
return {
"_type": "url",
"url": new_url,
}
if not found:
# twitter:player is a https URL to iframe player that may or may not
# be supported by youtube-dl thus this is checked the very last (see
# https://dev.twitter.com/cards/types/player#On_twitter.com_via_desktop_browser)
embed_url = self._html_search_meta("twitter:player", webpage, default=None)
if embed_url:
return self.url_result(embed_url)
if not found:
raise UnsupportedError(url)
entries = []
for video_url in orderedSet(found):
video_url = unescapeHTML(video_url)
video_url = video_url.replace("\\/", "/")
video_url = compat_urlparse.urljoin(url, video_url)
video_id = compat_urllib_parse_unquote(os.path.basename(video_url))
# Sometimes, jwplayer extraction will result in a YouTube URL
if YoutubeIE.suitable(video_url):
entries.append(self.url_result(video_url, "Youtube"))
continue
# here's a fun little line of code for you:
video_id = os.path.splitext(video_id)[0]
entry_info_dict = {
"id": video_id,
"uploader": video_uploader,
"title": video_title,
"age_limit": age_limit,
}
if RtmpIE.suitable(video_url):
entry_info_dict.update(
{
"_type": "url_transparent",
"ie_key": RtmpIE.ie_key(),
"url": video_url,
}
)
entries.append(entry_info_dict)
continue
ext = determine_ext(video_url)
if ext == "smil":
entry_info_dict["formats"] = self._extract_smil_formats(video_url, video_id)
elif ext == "xspf":
return self.playlist_result(
self._extract_xspf_playlist(video_url, video_id), video_id
)
elif ext == "m3u8":
entry_info_dict["formats"] = self._extract_m3u8_formats(
video_url, video_id, ext="mp4"
)
elif ext == "mpd":
entry_info_dict["formats"] = self._extract_mpd_formats(video_url, video_id)
elif ext == "f4m":
entry_info_dict["formats"] = self._extract_f4m_formats(video_url, video_id)
elif re.search(r"(?i)\.(?:ism|smil)/manifest", video_url) and video_url != url:
# Just matching .ism/manifest is not enough to be reliably sure
# whether it's actually an ISM manifest or some other streaming
# manifest since there are various streaming URL formats
# possible (see [1]) as well as some other shenanigans like
# .smil/manifest URLs that actually serve an ISM (see [2]) and
# so on.
# Thus the most reasonable way to solve this is to delegate
# to generic extractor in order to look into the contents of
# the manifest itself.
# 1. https://azure.microsoft.com/en-us/documentation/articles/media-services-deliver-content-overview/#streaming-url-formats
# 2. https://svs.itworkscdn.net/lbcivod/smil:itwfcdn/lbci/170976.smil/Manifest
entry_info_dict = self.url_result(
smuggle_url(video_url, {"to_generic": True}), GenericIE.ie_key()
)
else:
entry_info_dict["url"] = video_url
if entry_info_dict.get("formats"):
self._sort_formats(entry_info_dict["formats"])
entries.append(entry_info_dict)
if len(entries) == 1:
return entries[0]
else:
for num, e in enumerate(entries, start=1):
# 'url' results don't have a title
if e.get("title") is not None:
e["title"] = "%s (%d)" % (e["title"], num)
return {
"_type": "playlist",
"entries": entries,
}
|
def _real_extract(self, url):
if url.startswith("//"):
return {
"_type": "url",
"url": self.http_scheme() + url,
}
parsed_url = compat_urlparse.urlparse(url)
if not parsed_url.scheme:
default_search = self._downloader.params.get("default_search")
if default_search is None:
default_search = "fixup_error"
if default_search in ("auto", "auto_warning", "fixup_error"):
if "/" in url:
self._downloader.report_warning(
"The url doesn't specify the protocol, trying with http"
)
return self.url_result("http://" + url)
elif default_search != "fixup_error":
if default_search == "auto_warning":
if re.match(r"^(?:url|URL)$", url):
raise ExtractorError(
'Invalid URL: %r . Call youtube-dl like this: youtube-dl -v "https://www.youtube.com/watch?v=BaW_jenozKc" '
% url,
expected=True,
)
else:
self._downloader.report_warning(
'Falling back to youtube search for %s . Set --default-search "auto" to suppress this warning.'
% url
)
return self.url_result("ytsearch:" + url)
if default_search in ("error", "fixup_error"):
raise ExtractorError(
"%r is not a valid URL. "
'Set --default-search "ytsearch" (or run youtube-dl "ytsearch:%s" ) to search YouTube'
% (url, url),
expected=True,
)
else:
if ":" not in default_search:
default_search += ":"
return self.url_result(default_search + url)
url, smuggled_data = unsmuggle_url(url)
force_videoid = None
is_intentional = smuggled_data and smuggled_data.get("to_generic")
if smuggled_data and "force_videoid" in smuggled_data:
force_videoid = smuggled_data["force_videoid"]
video_id = force_videoid
else:
video_id = self._generic_id(url)
self.to_screen("%s: Requesting header" % video_id)
head_req = HEADRequest(url)
head_response = self._request_webpage(
head_req,
video_id,
note=False,
errnote="Could not send HEAD request to %s" % url,
fatal=False,
)
if head_response is not False:
# Check for redirect
new_url = head_response.geturl()
if url != new_url:
self.report_following_redirect(new_url)
if force_videoid:
new_url = smuggle_url(new_url, {"force_videoid": force_videoid})
return self.url_result(new_url)
full_response = None
if head_response is False:
request = sanitized_Request(url)
request.add_header("Accept-Encoding", "*")
full_response = self._request_webpage(request, video_id)
head_response = full_response
info_dict = {
"id": video_id,
"title": self._generic_title(url),
"upload_date": unified_strdate(head_response.headers.get("Last-Modified")),
}
# Check for direct link to a video
content_type = head_response.headers.get("Content-Type", "").lower()
m = re.match(
r"^(?P<type>audio|video|application(?=/(?:ogg$|(?:vnd\.apple\.|x-)?mpegurl)))/(?P<format_id>[^;\s]+)",
content_type,
)
if m:
format_id = m.group("format_id")
if format_id.endswith("mpegurl"):
formats = self._extract_m3u8_formats(url, video_id, "mp4")
elif format_id == "f4m":
formats = self._extract_f4m_formats(url, video_id)
else:
formats = [
{
"format_id": m.group("format_id"),
"url": url,
"vcodec": "none" if m.group("type") == "audio" else None,
}
]
info_dict["direct"] = True
self._sort_formats(formats)
info_dict["formats"] = formats
return info_dict
if not self._downloader.params.get("test", False) and not is_intentional:
force = self._downloader.params.get("force_generic_extractor", False)
self._downloader.report_warning(
"%s on generic information extractor."
% ("Forcing" if force else "Falling back")
)
if not full_response:
request = sanitized_Request(url)
# Some webservers may serve compressed content of rather big size (e.g. gzipped flac)
# making it impossible to download only chunk of the file (yet we need only 512kB to
# test whether it's HTML or not). According to youtube-dl default Accept-Encoding
# that will always result in downloading the whole file that is not desirable.
# Therefore for extraction pass we have to override Accept-Encoding to any in order
# to accept raw bytes and being able to download only a chunk.
# It may probably better to solve this by checking Content-Type for application/octet-stream
# after HEAD request finishes, but not sure if we can rely on this.
request.add_header("Accept-Encoding", "*")
full_response = self._request_webpage(request, video_id)
first_bytes = full_response.read(512)
# Is it an M3U playlist?
if first_bytes.startswith(b"#EXTM3U"):
info_dict["formats"] = self._extract_m3u8_formats(url, video_id, "mp4")
self._sort_formats(info_dict["formats"])
return info_dict
# Maybe it's a direct link to a video?
# Be careful not to download the whole thing!
if not is_html(first_bytes):
self._downloader.report_warning(
"URL could be a direct video link, returning it as such."
)
info_dict.update(
{
"direct": True,
"url": url,
}
)
return info_dict
webpage = self._webpage_read_content(
full_response, url, video_id, prefix=first_bytes
)
self.report_extraction(video_id)
# Is it an RSS feed, a SMIL file, an XSPF playlist or a MPD manifest?
try:
doc = compat_etree_fromstring(webpage.encode("utf-8"))
if doc.tag == "rss":
return self._extract_rss(url, video_id, doc)
elif doc.tag == "SmoothStreamingMedia":
info_dict["formats"] = self._parse_ism_formats(doc, url)
self._sort_formats(info_dict["formats"])
return info_dict
elif re.match(r"^(?:{[^}]+})?smil$", doc.tag):
smil = self._parse_smil(doc, url, video_id)
self._sort_formats(smil["formats"])
return smil
elif doc.tag == "{http://xspf.org/ns/0/}playlist":
return self.playlist_result(self._parse_xspf(doc, video_id), video_id)
elif re.match(r"(?i)^(?:{[^}]+})?MPD$", doc.tag):
info_dict["formats"] = self._parse_mpd_formats(
doc,
video_id,
mpd_base_url=full_response.geturl().rpartition("/")[0],
mpd_url=url,
)
self._sort_formats(info_dict["formats"])
return info_dict
elif re.match(r"^{http://ns\.adobe\.com/f4m/[12]\.0}manifest$", doc.tag):
info_dict["formats"] = self._parse_f4m_formats(doc, url, video_id)
self._sort_formats(info_dict["formats"])
return info_dict
except compat_xml_parse_error:
pass
# Is it a Camtasia project?
camtasia_res = self._extract_camtasia(url, video_id, webpage)
if camtasia_res is not None:
return camtasia_res
# Sometimes embedded video player is hidden behind percent encoding
# (e.g. https://github.com/rg3/youtube-dl/issues/2448)
# Unescaping the whole page allows to handle those cases in a generic way
webpage = compat_urllib_parse_unquote(webpage)
# it's tempting to parse this further, but you would
# have to take into account all the variations like
# Video Title - Site Name
# Site Name | Video Title
# Video Title - Tagline | Site Name
# and so on and so forth; it's just not practical
video_title = self._og_search_title(
webpage, default=None
) or self._html_search_regex(
r"(?s)<title>(.*?)</title>", webpage, "video title", default="video"
)
# Try to detect age limit automatically
age_limit = self._rta_search(webpage)
# And then there are the jokers who advertise that they use RTA,
# but actually don't.
AGE_LIMIT_MARKERS = [
r'Proudly Labeled <a href="http://www.rtalabel.org/" title="Restricted to Adults">RTA</a>',
]
if any(re.search(marker, webpage) for marker in AGE_LIMIT_MARKERS):
age_limit = 18
# video uploader is domain name
video_uploader = self._search_regex(
r"^(?:https?://)?([^/]*)/.*", url, "video uploader"
)
video_description = self._og_search_description(webpage, default=None)
video_thumbnail = self._og_search_thumbnail(webpage, default=None)
# Helper method
def _playlist_from_matches(matches, getter=None, ie=None):
urlrs = orderedSet(
self.url_result(self._proto_relative_url(getter(m) if getter else m), ie)
for m in matches
)
return self.playlist_result(
urlrs, playlist_id=video_id, playlist_title=video_title
)
# Look for Brightcove Legacy Studio embeds
bc_urls = BrightcoveLegacyIE._extract_brightcove_urls(webpage)
if bc_urls:
self.to_screen("Brightcove video detected.")
entries = [
{
"_type": "url",
"url": smuggle_url(bc_url, {"Referer": url}),
"ie_key": "BrightcoveLegacy",
}
for bc_url in bc_urls
]
return {
"_type": "playlist",
"title": video_title,
"id": video_id,
"entries": entries,
}
# Look for Brightcove New Studio embeds
bc_urls = BrightcoveNewIE._extract_urls(webpage)
if bc_urls:
return _playlist_from_matches(bc_urls, ie="BrightcoveNew")
# Look for ThePlatform embeds
tp_urls = ThePlatformIE._extract_urls(webpage)
if tp_urls:
return _playlist_from_matches(tp_urls, ie="ThePlatform")
# Look for Vessel embeds
vessel_urls = VesselIE._extract_urls(webpage)
if vessel_urls:
return _playlist_from_matches(vessel_urls, ie=VesselIE.ie_key())
# Look for embedded rtl.nl player
matches = re.findall(
r'<iframe[^>]+?src="((?:https?:)?//(?:www\.)?rtl\.nl/system/videoplayer/[^"]+(?:video_)?embed[^"]+)"',
webpage,
)
if matches:
return _playlist_from_matches(matches, ie="RtlNl")
vimeo_urls = VimeoIE._extract_urls(url, webpage)
if vimeo_urls:
return _playlist_from_matches(vimeo_urls, ie=VimeoIE.ie_key())
vid_me_embed_url = self._search_regex(
r'src=[\'"](https?://vid\.me/[^\'"]+)[\'"]',
webpage,
"vid.me embed",
default=None,
)
if vid_me_embed_url is not None:
return self.url_result(vid_me_embed_url, "Vidme")
# Look for embedded YouTube player
matches = re.findall(
r"""(?x)
(?:
<iframe[^>]+?src=|
data-video-url=|
<embed[^>]+?src=|
embedSWF\(?:\s*|
new\s+SWFObject\(
)
(["\'])
(?P<url>(?:https?:)?//(?:www\.)?youtube(?:-nocookie)?\.com/
(?:embed|v|p)/.+?)
\1""",
webpage,
)
if matches:
return _playlist_from_matches(matches, lambda m: unescapeHTML(m[1]))
# Look for lazyYT YouTube embed
matches = re.findall(r'class="lazyYT" data-youtube-id="([^"]+)"', webpage)
if matches:
return _playlist_from_matches(matches, lambda m: unescapeHTML(m))
# Look for Wordpress "YouTube Video Importer" plugin
matches = re.findall(
r"""(?x)<div[^>]+
class=(?P<q1>[\'"])[^\'"]*\byvii_single_video_player\b[^\'"]*(?P=q1)[^>]+
data-video_id=(?P<q2>[\'"])([^\'"]+)(?P=q2)""",
webpage,
)
if matches:
return _playlist_from_matches(matches, lambda m: m[-1])
matches = DailymotionIE._extract_urls(webpage)
if matches:
return _playlist_from_matches(matches)
# Look for embedded Dailymotion playlist player (#3822)
m = re.search(
r'<iframe[^>]+?src=(["\'])(?P<url>(?:https?:)?//(?:www\.)?dailymotion\.[a-z]{2,3}/widget/jukebox\?.+?)\1',
webpage,
)
if m:
playlists = re.findall(
r"list\[\]=/playlist/([^/]+)/", unescapeHTML(m.group("url"))
)
if playlists:
return _playlist_from_matches(
playlists, lambda p: "//dailymotion.com/playlist/%s" % p
)
# Look for embedded Wistia player
match = re.search(
r'<(?:meta[^>]+?content|iframe[^>]+?src)=(["\'])(?P<url>(?:https?:)?//(?:fast\.)?wistia\.net/embed/iframe/.+?)\1',
webpage,
)
if match:
embed_url = self._proto_relative_url(unescapeHTML(match.group("url")))
return {
"_type": "url_transparent",
"url": embed_url,
"ie_key": "Wistia",
"uploader": video_uploader,
}
match = re.search(
r'(?:id=["\']wistia_|data-wistia-?id=["\']|Wistia\.embed\(["\'])(?P<id>[^"\']+)',
webpage,
)
if match:
return {
"_type": "url_transparent",
"url": "wistia:%s" % match.group("id"),
"ie_key": "Wistia",
"uploader": video_uploader,
}
match = re.search(
r"""(?sx)
<script[^>]+src=(["'])(?:https?:)?//fast\.wistia\.com/assets/external/E-v1\.js\1[^>]*>.*?
<div[^>]+class=(["']).*?\bwistia_async_(?P<id>[a-z0-9]+)\b.*?\2
""",
webpage,
)
if match:
return self.url_result(
self._proto_relative_url("wistia:%s" % match.group("id")), "Wistia"
)
# Look for SVT player
svt_url = SVTIE._extract_url(webpage)
if svt_url:
return self.url_result(svt_url, "SVT")
# Look for embedded condenast player
matches = re.findall(
r'<iframe\s+(?:[a-zA-Z-]+="[^"]+"\s+)*?src="(https?://player\.cnevids\.com/embed/[^"]+")',
webpage,
)
if matches:
return {
"_type": "playlist",
"entries": [
{
"_type": "url",
"ie_key": "CondeNast",
"url": ma,
}
for ma in matches
],
"title": video_title,
"id": video_id,
}
# Look for Bandcamp pages with custom domain
mobj = re.search(
r'<meta property="og:url"[^>]*?content="(.*?bandcamp\.com.*?)"', webpage
)
if mobj is not None:
burl = unescapeHTML(mobj.group(1))
# Don't set the extractor because it can be a track url or an album
return self.url_result(burl)
# Look for embedded Vevo player
mobj = re.search(
r'<iframe[^>]+?src=(["\'])(?P<url>(?:https?:)?//(?:cache\.)?vevo\.com/.+?)\1',
webpage,
)
if mobj is not None:
return self.url_result(mobj.group("url"))
# Look for embedded Viddler player
mobj = re.search(
r'<(?:iframe[^>]+?src|param[^>]+?value)=(["\'])(?P<url>(?:https?:)?//(?:www\.)?viddler\.com/(?:embed|player)/.+?)\1',
webpage,
)
if mobj is not None:
return self.url_result(mobj.group("url"))
# Look for NYTimes player
mobj = re.search(
r'<iframe[^>]+src=(["\'])(?P<url>(?:https?:)?//graphics8\.nytimes\.com/bcvideo/[^/]+/iframe/embed\.html.+?)\1>',
webpage,
)
if mobj is not None:
return self.url_result(mobj.group("url"))
# Look for Libsyn player
mobj = re.search(
r'<iframe[^>]+src=(["\'])(?P<url>(?:https?:)?//html5-player\.libsyn\.com/embed/.+?)\1',
webpage,
)
if mobj is not None:
return self.url_result(mobj.group("url"))
# Look for Ooyala videos
mobj = (
re.search(
r'player\.ooyala\.com/[^"?]+[?#][^"]*?(?:embedCode|ec)=(?P<ec>[^"&]+)',
webpage,
)
or re.search(
r'OO\.Player\.create\([\'"].*?[\'"],\s*[\'"](?P<ec>.{32})[\'"]', webpage
)
or re.search(r'SBN\.VideoLinkset\.ooyala\([\'"](?P<ec>.{32})[\'"]\)', webpage)
or re.search(r'data-ooyala-video-id\s*=\s*[\'"](?P<ec>.{32})[\'"]', webpage)
)
if mobj is not None:
embed_token = self._search_regex(
r'embedToken[\'"]?\s*:\s*[\'"]([^\'"]+)',
webpage,
"ooyala embed token",
default=None,
)
return OoyalaIE._build_url_result(
smuggle_url(
mobj.group("ec"),
{
"domain": url,
"embed_token": embed_token,
},
)
)
# Look for multiple Ooyala embeds on SBN network websites
mobj = re.search(r"SBN\.VideoLinkset\.entryGroup\((\[.*?\])", webpage)
if mobj is not None:
embeds = self._parse_json(mobj.group(1), video_id, fatal=False)
if embeds:
return _playlist_from_matches(
embeds,
getter=lambda v: OoyalaIE._url_for_embed_code(
smuggle_url(v["provider_video_id"], {"domain": url})
),
ie="Ooyala",
)
# Look for Aparat videos
mobj = re.search(r'<iframe .*?src="(http://www\.aparat\.com/video/[^"]+)"', webpage)
if mobj is not None:
return self.url_result(mobj.group(1), "Aparat")
# Look for MPORA videos
mobj = re.search(
r'<iframe .*?src="(http://mpora\.(?:com|de)/videos/[^"]+)"', webpage
)
if mobj is not None:
return self.url_result(mobj.group(1), "Mpora")
# Look for embedded NovaMov-based player
mobj = re.search(
r"""(?x)<(?:pagespeed_)?iframe[^>]+?src=(["\'])
(?P<url>http://(?:(?:embed|www)\.)?
(?:novamov\.com|
nowvideo\.(?:ch|sx|eu|at|ag|co)|
videoweed\.(?:es|com)|
movshare\.(?:net|sx|ag)|
divxstage\.(?:eu|net|ch|co|at|ag))
/embed\.php.+?)\1""",
webpage,
)
if mobj is not None:
return self.url_result(mobj.group("url"))
# Look for embedded Facebook player
facebook_url = FacebookIE._extract_url(webpage)
if facebook_url is not None:
return self.url_result(facebook_url, "Facebook")
# Look for embedded VK player
mobj = re.search(
r'<iframe[^>]+?src=(["\'])(?P<url>https?://vk\.com/video_ext\.php.+?)\1',
webpage,
)
if mobj is not None:
return self.url_result(mobj.group("url"), "VK")
# Look for embedded Odnoklassniki player
mobj = re.search(
r'<iframe[^>]+?src=(["\'])(?P<url>https?://(?:odnoklassniki|ok)\.ru/videoembed/.+?)\1',
webpage,
)
if mobj is not None:
return self.url_result(mobj.group("url"), "Odnoklassniki")
# Look for embedded ivi player
mobj = re.search(
r'<embed[^>]+?src=(["\'])(?P<url>https?://(?:www\.)?ivi\.ru/video/player.+?)\1',
webpage,
)
if mobj is not None:
return self.url_result(mobj.group("url"), "Ivi")
# Look for embedded Huffington Post player
mobj = re.search(
r'<iframe[^>]+?src=(["\'])(?P<url>https?://embed\.live\.huffingtonpost\.com/.+?)\1',
webpage,
)
if mobj is not None:
return self.url_result(mobj.group("url"), "HuffPost")
# Look for embed.ly
mobj = re.search(
r'class=["\']embedly-card["\'][^>]href=["\'](?P<url>[^"\']+)', webpage
)
if mobj is not None:
return self.url_result(mobj.group("url"))
mobj = re.search(
r'class=["\']embedly-embed["\'][^>]src=["\'][^"\']*url=(?P<url>[^&]+)', webpage
)
if mobj is not None:
return self.url_result(compat_urllib_parse_unquote(mobj.group("url")))
# Look for funnyordie embed
matches = re.findall(
r'<iframe[^>]+?src="(https?://(?:www\.)?funnyordie\.com/embed/[^"]+)"', webpage
)
if matches:
return _playlist_from_matches(matches, getter=unescapeHTML, ie="FunnyOrDie")
# Look for BBC iPlayer embed
matches = re.findall(
r'setPlaylist\("(https?://www\.bbc\.co\.uk/iplayer/[^/]+/[\da-z]{8})"\)',
webpage,
)
if matches:
return _playlist_from_matches(matches, ie="BBCCoUk")
# Look for embedded RUTV player
rutv_url = RUTVIE._extract_url(webpage)
if rutv_url:
return self.url_result(rutv_url, "RUTV")
# Look for embedded TVC player
tvc_url = TVCIE._extract_url(webpage)
if tvc_url:
return self.url_result(tvc_url, "TVC")
# Look for embedded SportBox player
sportbox_urls = SportBoxEmbedIE._extract_urls(webpage)
if sportbox_urls:
return _playlist_from_matches(sportbox_urls, ie="SportBoxEmbed")
# Look for embedded XHamster player
xhamster_urls = XHamsterEmbedIE._extract_urls(webpage)
if xhamster_urls:
return _playlist_from_matches(xhamster_urls, ie="XHamsterEmbed")
# Look for embedded TNAFlixNetwork player
tnaflix_urls = TNAFlixNetworkEmbedIE._extract_urls(webpage)
if tnaflix_urls:
return _playlist_from_matches(tnaflix_urls, ie=TNAFlixNetworkEmbedIE.ie_key())
# Look for embedded PornHub player
pornhub_urls = PornHubIE._extract_urls(webpage)
if pornhub_urls:
return _playlist_from_matches(pornhub_urls, ie=PornHubIE.ie_key())
# Look for embedded DrTuber player
drtuber_urls = DrTuberIE._extract_urls(webpage)
if drtuber_urls:
return _playlist_from_matches(drtuber_urls, ie=DrTuberIE.ie_key())
# Look for embedded RedTube player
redtube_urls = RedTubeIE._extract_urls(webpage)
if redtube_urls:
return _playlist_from_matches(redtube_urls, ie=RedTubeIE.ie_key())
# Look for embedded Tvigle player
mobj = re.search(
r'<iframe[^>]+?src=(["\'])(?P<url>(?:https?:)?//cloud\.tvigle\.ru/video/.+?)\1',
webpage,
)
if mobj is not None:
return self.url_result(mobj.group("url"), "Tvigle")
# Look for embedded TED player
mobj = re.search(
r'<iframe[^>]+?src=(["\'])(?P<url>https?://embed(?:-ssl)?\.ted\.com/.+?)\1',
webpage,
)
if mobj is not None:
return self.url_result(mobj.group("url"), "TED")
# Look for embedded Ustream videos
ustream_url = UstreamIE._extract_url(webpage)
if ustream_url:
return self.url_result(ustream_url, UstreamIE.ie_key())
# Look for embedded arte.tv player
mobj = re.search(
r'<(?:script|iframe) [^>]*?src="(?P<url>http://www\.arte\.tv/(?:playerv2/embed|arte_vp/index)[^"]+)"',
webpage,
)
if mobj is not None:
return self.url_result(mobj.group("url"), "ArteTVEmbed")
# Look for embedded francetv player
mobj = re.search(
r'<iframe[^>]+?src=(["\'])(?P<url>(?:https?://)?embed\.francetv\.fr/\?ue=.+?)\1',
webpage,
)
if mobj is not None:
return self.url_result(mobj.group("url"))
# Look for embedded smotri.com player
smotri_url = SmotriIE._extract_url(webpage)
if smotri_url:
return self.url_result(smotri_url, "Smotri")
# Look for embedded Myvi.ru player
myvi_url = MyviIE._extract_url(webpage)
if myvi_url:
return self.url_result(myvi_url)
# Look for embedded soundcloud player
soundcloud_urls = SoundcloudIE._extract_urls(webpage)
if soundcloud_urls:
return _playlist_from_matches(
soundcloud_urls, getter=unescapeHTML, ie=SoundcloudIE.ie_key()
)
# Look for tunein player
tunein_urls = TuneInBaseIE._extract_urls(webpage)
if tunein_urls:
return _playlist_from_matches(tunein_urls)
# Look for embedded mtvservices player
mtvservices_url = MTVServicesEmbeddedIE._extract_url(webpage)
if mtvservices_url:
return self.url_result(mtvservices_url, ie="MTVServicesEmbedded")
# Look for embedded yahoo player
mobj = re.search(
r'<iframe[^>]+?src=(["\'])(?P<url>https?://(?:screen|movies)\.yahoo\.com/.+?\.html\?format=embed)\1',
webpage,
)
if mobj is not None:
return self.url_result(mobj.group("url"), "Yahoo")
# Look for embedded sbs.com.au player
mobj = re.search(
r"""(?x)
(?:
<meta\s+property="og:video"\s+content=|
<iframe[^>]+?src=
)
(["\'])(?P<url>https?://(?:www\.)?sbs\.com\.au/ondemand/video/.+?)\1""",
webpage,
)
if mobj is not None:
return self.url_result(mobj.group("url"), "SBS")
# Look for embedded Cinchcast player
mobj = re.search(
r'<iframe[^>]+?src=(["\'])(?P<url>https?://player\.cinchcast\.com/.+?)\1',
webpage,
)
if mobj is not None:
return self.url_result(mobj.group("url"), "Cinchcast")
mobj = re.search(
r'<iframe[^>]+?src=(["\'])(?P<url>https?://m(?:lb)?\.mlb\.com/shared/video/embed/embed\.html\?.+?)\1',
webpage,
)
if not mobj:
mobj = re.search(
r'data-video-link=["\'](?P<url>http://m.mlb.com/video/[^"\']+)', webpage
)
if mobj is not None:
return self.url_result(mobj.group("url"), "MLB")
mobj = re.search(
r'<(?:iframe|script)[^>]+?src=(["\'])(?P<url>%s)\1' % CondeNastIE.EMBED_URL,
webpage,
)
if mobj is not None:
return self.url_result(
self._proto_relative_url(mobj.group("url"), scheme="http:"), "CondeNast"
)
mobj = re.search(
r'<iframe[^>]+src="(?P<url>https?://(?:new\.)?livestream\.com/[^"]+/player[^"]+)"',
webpage,
)
if mobj is not None:
return self.url_result(mobj.group("url"), "Livestream")
# Look for Zapiks embed
mobj = re.search(
r'<iframe[^>]+src="(?P<url>https?://(?:www\.)?zapiks\.fr/index\.php\?.+?)"',
webpage,
)
if mobj is not None:
return self.url_result(mobj.group("url"), "Zapiks")
# Look for Kaltura embeds
kaltura_url = KalturaIE._extract_url(webpage)
if kaltura_url:
return self.url_result(
smuggle_url(kaltura_url, {"source_url": url}), KalturaIE.ie_key()
)
# Look for Eagle.Platform embeds
eagleplatform_url = EaglePlatformIE._extract_url(webpage)
if eagleplatform_url:
return self.url_result(eagleplatform_url, EaglePlatformIE.ie_key())
# Look for ClipYou (uses Eagle.Platform) embeds
mobj = re.search(
r'<iframe[^>]+src="https?://(?P<host>media\.clipyou\.ru)/index/player\?.*\brecord_id=(?P<id>\d+).*"',
webpage,
)
if mobj is not None:
return self.url_result(
"eagleplatform:%(host)s:%(id)s" % mobj.groupdict(), "EaglePlatform"
)
# Look for Pladform embeds
pladform_url = PladformIE._extract_url(webpage)
if pladform_url:
return self.url_result(pladform_url)
# Look for Videomore embeds
videomore_url = VideomoreIE._extract_url(webpage)
if videomore_url:
return self.url_result(videomore_url)
# Look for Webcaster embeds
webcaster_url = WebcasterFeedIE._extract_url(self, webpage)
if webcaster_url:
return self.url_result(webcaster_url, ie=WebcasterFeedIE.ie_key())
# Look for Playwire embeds
mobj = re.search(
r'<script[^>]+data-config=(["\'])(?P<url>(?:https?:)?//config\.playwire\.com/.+?)\1',
webpage,
)
if mobj is not None:
return self.url_result(mobj.group("url"))
# Look for 5min embeds
mobj = re.search(
r'<meta[^>]+property="og:video"[^>]+content="https?://embed\.5min\.com/(?P<id>[0-9]+)/?',
webpage,
)
if mobj is not None:
return self.url_result("5min:%s" % mobj.group("id"), "FiveMin")
# Look for Crooks and Liars embeds
mobj = re.search(
r'<(?:iframe[^>]+src|param[^>]+value)=(["\'])(?P<url>(?:https?:)?//embed\.crooksandliars\.com/(?:embed|v)/.+?)\1',
webpage,
)
if mobj is not None:
return self.url_result(mobj.group("url"))
# Look for NBC Sports VPlayer embeds
nbc_sports_url = NBCSportsVPlayerIE._extract_url(webpage)
if nbc_sports_url:
return self.url_result(nbc_sports_url, "NBCSportsVPlayer")
# Look for NBC News embeds
nbc_news_embed_url = re.search(
r'<iframe[^>]+src=(["\'])(?P<url>(?:https?:)?//www\.nbcnews\.com/widget/video-embed/[^"\']+)\1',
webpage,
)
if nbc_news_embed_url:
return self.url_result(nbc_news_embed_url.group("url"), "NBCNews")
# Look for Google Drive embeds
google_drive_url = GoogleDriveIE._extract_url(webpage)
if google_drive_url:
return self.url_result(google_drive_url, "GoogleDrive")
# Look for UDN embeds
mobj = re.search(
r'<iframe[^>]+src="(?P<url>%s)"' % UDNEmbedIE._PROTOCOL_RELATIVE_VALID_URL,
webpage,
)
if mobj is not None:
return self.url_result(
compat_urlparse.urljoin(url, mobj.group("url")), "UDNEmbed"
)
# Look for Senate ISVP iframe
senate_isvp_url = SenateISVPIE._search_iframe_url(webpage)
if senate_isvp_url:
return self.url_result(senate_isvp_url, "SenateISVP")
# Look for Dailymotion Cloud videos
dmcloud_url = DailymotionCloudIE._extract_dmcloud_url(webpage)
if dmcloud_url:
return self.url_result(dmcloud_url, "DailymotionCloud")
# Look for OnionStudios embeds
onionstudios_url = OnionStudiosIE._extract_url(webpage)
if onionstudios_url:
return self.url_result(onionstudios_url)
# Look for ViewLift embeds
viewlift_url = ViewLiftEmbedIE._extract_url(webpage)
if viewlift_url:
return self.url_result(viewlift_url)
# Look for JWPlatform embeds
jwplatform_url = JWPlatformIE._extract_url(webpage)
if jwplatform_url:
return self.url_result(jwplatform_url, "JWPlatform")
# Look for Digiteka embeds
digiteka_url = DigitekaIE._extract_url(webpage)
if digiteka_url:
return self.url_result(
self._proto_relative_url(digiteka_url), DigitekaIE.ie_key()
)
# Look for Arkena embeds
arkena_url = ArkenaIE._extract_url(webpage)
if arkena_url:
return self.url_result(arkena_url, ArkenaIE.ie_key())
# Look for Piksel embeds
piksel_url = PikselIE._extract_url(webpage)
if piksel_url:
return self.url_result(piksel_url, PikselIE.ie_key())
# Look for Limelight embeds
mobj = re.search(
r'LimelightPlayer\.doLoad(Media|Channel|ChannelList)\(["\'](?P<id>[a-z0-9]{32})',
webpage,
)
if mobj:
lm = {
"Media": "media",
"Channel": "channel",
"ChannelList": "channel_list",
}
return self.url_result(
smuggle_url(
"limelight:%s:%s" % (lm[mobj.group(1)], mobj.group(2)),
{"source_url": url},
),
"Limelight%s" % mobj.group(1),
mobj.group(2),
)
mobj = re.search(
r"""(?sx)
<object[^>]+class=(["\'])LimelightEmbeddedPlayerFlash\1[^>]*>.*?
<param[^>]+
name=(["\'])flashVars\2[^>]+
value=(["\'])(?:(?!\3).)*mediaId=(?P<id>[a-z0-9]{32})
""",
webpage,
)
if mobj:
return self.url_result(
smuggle_url("limelight:media:%s" % mobj.group("id"), {"source_url": url}),
"LimelightMedia",
mobj.group("id"),
)
# Look for AdobeTVVideo embeds
mobj = re.search(
r'<iframe[^>]+src=[\'"]((?:https?:)?//video\.tv\.adobe\.com/v/\d+[^"]+)[\'"]',
webpage,
)
if mobj is not None:
return self.url_result(
self._proto_relative_url(unescapeHTML(mobj.group(1))), "AdobeTVVideo"
)
# Look for Vine embeds
mobj = re.search(
r'<iframe[^>]+src=[\'"]((?:https?:)?//(?:www\.)?vine\.co/v/[^/]+/embed/(?:simple|postcard))',
webpage,
)
if mobj is not None:
return self.url_result(
self._proto_relative_url(unescapeHTML(mobj.group(1))), "Vine"
)
# Look for VODPlatform embeds
mobj = re.search(
r'<iframe[^>]+src=(["\'])(?P<url>(?:https?:)?//(?:www\.)?vod-platform\.net/[eE]mbed/.+?)\1',
webpage,
)
if mobj is not None:
return self.url_result(
self._proto_relative_url(unescapeHTML(mobj.group("url"))), "VODPlatform"
)
# Look for Mangomolo embeds
mobj = re.search(
r"""(?x)<iframe[^>]+src=(["\'])(?P<url>(?:https?:)?//(?:www\.)?admin\.mangomolo\.com/analytics/index\.php/customers/embed/
(?:
video\?.*?\bid=(?P<video_id>\d+)|
index\?.*?\bchannelid=(?P<channel_id>(?:[A-Za-z0-9+/=]|%2B|%2F|%3D)+)
).+?)\1""",
webpage,
)
if mobj is not None:
info = {
"_type": "url_transparent",
"url": self._proto_relative_url(unescapeHTML(mobj.group("url"))),
"title": video_title,
"description": video_description,
"thumbnail": video_thumbnail,
"uploader": video_uploader,
}
video_id = mobj.group("video_id")
if video_id:
info.update(
{
"ie_key": "MangomoloVideo",
"id": video_id,
}
)
else:
info.update(
{
"ie_key": "MangomoloLive",
"id": mobj.group("channel_id"),
}
)
return info
# Look for Instagram embeds
instagram_embed_url = InstagramIE._extract_embed_url(webpage)
if instagram_embed_url is not None:
return self.url_result(
self._proto_relative_url(instagram_embed_url), InstagramIE.ie_key()
)
# Look for LiveLeak embeds
liveleak_url = LiveLeakIE._extract_url(webpage)
if liveleak_url:
return self.url_result(liveleak_url, "LiveLeak")
# Look for 3Q SDN embeds
threeqsdn_url = ThreeQSDNIE._extract_url(webpage)
if threeqsdn_url:
return {
"_type": "url_transparent",
"ie_key": ThreeQSDNIE.ie_key(),
"url": self._proto_relative_url(threeqsdn_url),
"title": video_title,
"description": video_description,
"thumbnail": video_thumbnail,
"uploader": video_uploader,
}
# Look for VBOX7 embeds
vbox7_url = Vbox7IE._extract_url(webpage)
if vbox7_url:
return self.url_result(vbox7_url, Vbox7IE.ie_key())
# Look for DBTV embeds
dbtv_urls = DBTVIE._extract_urls(webpage)
if dbtv_urls:
return _playlist_from_matches(dbtv_urls, ie=DBTVIE.ie_key())
# Look for Videa embeds
videa_urls = VideaIE._extract_urls(webpage)
if videa_urls:
return _playlist_from_matches(videa_urls, ie=VideaIE.ie_key())
# Look for 20 minuten embeds
twentymin_urls = TwentyMinutenIE._extract_urls(webpage)
if twentymin_urls:
return _playlist_from_matches(twentymin_urls, ie=TwentyMinutenIE.ie_key())
# Look for Openload embeds
openload_urls = OpenloadIE._extract_urls(webpage)
if openload_urls:
return _playlist_from_matches(openload_urls, ie=OpenloadIE.ie_key())
# Look for VideoPress embeds
videopress_urls = VideoPressIE._extract_urls(webpage)
if videopress_urls:
return _playlist_from_matches(videopress_urls, ie=VideoPressIE.ie_key())
# Look for Rutube embeds
rutube_urls = RutubeIE._extract_urls(webpage)
if rutube_urls:
return _playlist_from_matches(rutube_urls, ie=RutubeIE.ie_key())
# Looking for http://schema.org/VideoObject
json_ld = self._search_json_ld(
webpage, video_id, default={}, expected_type="VideoObject"
)
if json_ld.get("url"):
info_dict.update(
{
"title": video_title or info_dict["title"],
"description": video_description,
"thumbnail": video_thumbnail,
"age_limit": age_limit,
}
)
info_dict.update(json_ld)
return info_dict
# Look for HTML5 media
entries = self._parse_html5_media_entries(url, webpage, video_id, m3u8_id="hls")
if entries:
for entry in entries:
entry.update(
{
"id": video_id,
"title": video_title,
}
)
self._sort_formats(entry["formats"])
return self.playlist_result(entries)
jwplayer_data_str = self._find_jwplayer_data(webpage)
if jwplayer_data_str:
try:
jwplayer_data = self._parse_json(
jwplayer_data_str, video_id, transform_source=js_to_json
)
return self._parse_jwplayer_data(jwplayer_data, video_id)
except ExtractorError:
pass
def check_video(vurl):
if YoutubeIE.suitable(vurl):
return True
if RtmpIE.suitable(vurl):
return True
vpath = compat_urlparse.urlparse(vurl).path
vext = determine_ext(vpath)
return "." in vpath and vext not in (
"swf",
"png",
"jpg",
"srt",
"sbv",
"sub",
"vtt",
"ttml",
"js",
)
def filter_video(urls):
return list(filter(check_video, urls))
# Start with something easy: JW Player in SWFObject
found = filter_video(
re.findall(r'flashvars: [\'"](?:.*&)?file=(http[^\'"&]*)', webpage)
)
if not found:
# Look for gorilla-vid style embedding
found = filter_video(
re.findall(
r"""(?sx)
(?:
jw_plugins|
JWPlayerOptions|
jwplayer\s*\(\s*["'][^'"]+["']\s*\)\s*\.setup
)
.*?
['"]?file['"]?\s*:\s*["\'](.*?)["\']""",
webpage,
)
)
if not found:
# Broaden the search a little bit
found = filter_video(
re.findall(r'[^A-Za-z0-9]?(?:file|source)=(http[^\'"&]*)', webpage)
)
if not found:
# Broaden the findall a little bit: JWPlayer JS loader
found = filter_video(
re.findall(
r'[^A-Za-z0-9]?(?:file|video_url)["\']?:\s*["\'](http(?![^\'"]+\.[0-9]+[\'"])[^\'"]+)["\']',
webpage,
)
)
if not found:
# Flow player
found = filter_video(
re.findall(
r"""(?xs)
flowplayer\("[^"]+",\s*
\{[^}]+?\}\s*,
\s*\{[^}]+? ["']?clip["']?\s*:\s*\{\s*
["']?url["']?\s*:\s*["']([^"']+)["']
""",
webpage,
)
)
if not found:
# Cinerama player
found = re.findall(r"cinerama\.embedPlayer\(\s*\'[^']+\',\s*'([^']+)'", webpage)
if not found:
# Try to find twitter cards info
# twitter:player:stream should be checked before twitter:player since
# it is expected to contain a raw stream (see
# https://dev.twitter.com/cards/types/player#On_twitter.com_via_desktop_browser)
found = filter_video(
re.findall(
r'<meta (?:property|name)="twitter:player:stream" (?:content|value)="(.+?)"',
webpage,
)
)
if not found:
# We look for Open Graph info:
# We have to match any number spaces between elements, some sites try to align them (eg.: statigr.am)
m_video_type = re.findall(
r'<meta.*?property="og:video:type".*?content="video/(.*?)"', webpage
)
# We only look in og:video if the MIME type is a video, don't try if it's a Flash player:
if m_video_type is not None:
found = filter_video(
re.findall(r'<meta.*?property="og:video".*?content="(.*?)"', webpage)
)
if not found:
REDIRECT_REGEX = r'[0-9]{,2};\s*(?:URL|url)=\'?([^\'"]+)'
found = re.search(
r'(?i)<meta\s+(?=(?:[a-z-]+="[^"]+"\s+)*http-equiv="refresh")'
r'(?:[a-z-]+="[^"]+"\s+)*?content="%s' % REDIRECT_REGEX,
webpage,
)
if not found:
# Look also in Refresh HTTP header
refresh_header = head_response.headers.get("Refresh")
if refresh_header:
# In python 2 response HTTP headers are bytestrings
if sys.version_info < (3, 0) and isinstance(refresh_header, str):
refresh_header = refresh_header.decode("iso-8859-1")
found = re.search(REDIRECT_REGEX, refresh_header)
if found:
new_url = compat_urlparse.urljoin(url, unescapeHTML(found.group(1)))
self.report_following_redirect(new_url)
return {
"_type": "url",
"url": new_url,
}
if not found:
# twitter:player is a https URL to iframe player that may or may not
# be supported by youtube-dl thus this is checked the very last (see
# https://dev.twitter.com/cards/types/player#On_twitter.com_via_desktop_browser)
embed_url = self._html_search_meta("twitter:player", webpage, default=None)
if embed_url:
return self.url_result(embed_url)
if not found:
raise UnsupportedError(url)
entries = []
for video_url in orderedSet(found):
video_url = unescapeHTML(video_url)
video_url = video_url.replace("\\/", "/")
video_url = compat_urlparse.urljoin(url, video_url)
video_id = compat_urllib_parse_unquote(os.path.basename(video_url))
# Sometimes, jwplayer extraction will result in a YouTube URL
if YoutubeIE.suitable(video_url):
entries.append(self.url_result(video_url, "Youtube"))
continue
# here's a fun little line of code for you:
video_id = os.path.splitext(video_id)[0]
entry_info_dict = {
"id": video_id,
"uploader": video_uploader,
"title": video_title,
"age_limit": age_limit,
}
if RtmpIE.suitable(video_url):
entry_info_dict.update(
{
"_type": "url_transparent",
"ie_key": RtmpIE.ie_key(),
"url": video_url,
}
)
entries.append(entry_info_dict)
continue
ext = determine_ext(video_url)
if ext == "smil":
entry_info_dict["formats"] = self._extract_smil_formats(video_url, video_id)
elif ext == "xspf":
return self.playlist_result(
self._extract_xspf_playlist(video_url, video_id), video_id
)
elif ext == "m3u8":
entry_info_dict["formats"] = self._extract_m3u8_formats(
video_url, video_id, ext="mp4"
)
elif ext == "mpd":
entry_info_dict["formats"] = self._extract_mpd_formats(video_url, video_id)
elif ext == "f4m":
entry_info_dict["formats"] = self._extract_f4m_formats(video_url, video_id)
elif re.search(r"(?i)\.(?:ism|smil)/manifest", video_url) and video_url != url:
# Just matching .ism/manifest is not enough to be reliably sure
# whether it's actually an ISM manifest or some other streaming
# manifest since there are various streaming URL formats
# possible (see [1]) as well as some other shenanigans like
# .smil/manifest URLs that actually serve an ISM (see [2]) and
# so on.
# Thus the most reasonable way to solve this is to delegate
# to generic extractor in order to look into the contents of
# the manifest itself.
# 1. https://azure.microsoft.com/en-us/documentation/articles/media-services-deliver-content-overview/#streaming-url-formats
# 2. https://svs.itworkscdn.net/lbcivod/smil:itwfcdn/lbci/170976.smil/Manifest
entry_info_dict = self.url_result(
smuggle_url(video_url, {"to_generic": True}), GenericIE.ie_key()
)
else:
entry_info_dict["url"] = video_url
if entry_info_dict.get("formats"):
self._sort_formats(entry_info_dict["formats"])
entries.append(entry_info_dict)
if len(entries) == 1:
return entries[0]
else:
for num, e in enumerate(entries, start=1):
# 'url' results don't have a title
if e.get("title") is not None:
e["title"] = "%s (%d)" % (e["title"], num)
return {
"_type": "playlist",
"entries": entries,
}
|
https://github.com/ytdl-org/youtube-dl/issues/12410
|
pb3:youtube-dl jhawk$ git checkout b898f0a173fa040ddf95dbd97650cec07a8f19f5
Previous HEAD position was a50862b73... [downloader/external] Add missing import and PEP8
HEAD is now at b898f0a17... [elpais] Fix typo and improve extraction (closes #12139)
pb3:youtube-dl jhawk$ python test/test_download.py TestDownload.test_Generic_61
[generic] sjc: Requesting header
[generic] sjc: Downloading webpage
[generic] sjc: Extracting information
[info] Writing video description metadata as JSON to: sjclive.info.json
.
----------------------------------------------------------------------
Ran 1 test in 0.810s
OK
pb3:youtube-dl jhawk$ git checkout a4a554a79354981fcab55de8eaab7b95a40bbb48
Previous HEAD position was b898f0a17... [elpais] Fix typo and improve extraction (closes #12139)
HEAD is now at a4a554a79... [generic] Try parsing JWPlayer embedded videos (closes #12030)
pb3:youtube-dl jhawk$ python test/test_download.py TestDownload.test_Generic_61
[generic] sjc: Requesting header
[generic] sjc: Downloading webpage
[generic] sjc: Extracting information
ERROR: An extractor error has occurred. (caused by KeyError(u'title',)); please report this issue on https://yt-dl.org/bug . Make sure you are using the latest version; see https://yt-dl.org/update on how to update. Be sure to call youtube-dl with the --verbose flag and include its complete output.
Traceback (most recent call last):
File "/Users/jhawk/src/youtube-dl/youtube_dl/extractor/common.py", line 370, in extract
return self._real_extract(url)
File "/Users/jhawk/src/youtube-dl/youtube_dl/extractor/generic.py", line 2507, in _real_extract
return self._parse_jwplayer_data(jwplayer_data, video_id)
File "/Users/jhawk/src/youtube-dl/youtube_dl/extractor/common.py", line 2181, in _parse_jwplayer_data
'title': video_data['title'] if require_title else video_data.get('title'),
KeyError: u'title'
Traceback (most recent call last):
File "/Users/jhawk/src/youtube-dl/youtube_dl/YoutubeDL.py", line 696, in extract_info
ie_result = ie.extract(url)
File "/Users/jhawk/src/youtube-dl/youtube_dl/extractor/common.py", line 376, in extract
raise ExtractorError('An extractor error has occurred.', cause=e)
ExtractorError: An extractor error has occurred. (caused by KeyError(u'title',)); please report this issue on https://yt-dl.org/bug . Make sure you are using the latest version; see https://yt-dl.org/update on how to update. Be sure to call youtube-dl with the --verbose flag and include its complete output.
E
======================================================================
ERROR: test_Generic_61 (__main__.TestDownload)
----------------------------------------------------------------------
Traceback (most recent call last):
File "test/test_download.py", line 142, in test_template
force_generic_extractor=params.get('force_generic_extractor', False))
File "/Users/jhawk/src/youtube-dl/youtube_dl/YoutubeDL.py", line 711, in extract_info
self.report_error(compat_str(e), e.format_traceback())
File "/Users/jhawk/src/youtube-dl/youtube_dl/YoutubeDL.py", line 570, in report_error
self.trouble(error_message, tb)
File "/Users/jhawk/src/youtube-dl/youtube_dl/YoutubeDL.py", line 540, in trouble
raise DownloadError(message, exc_info)
DownloadError: ERROR: An extractor error has occurred. (caused by KeyError(u'title',)); please report this issue on https://yt-dl.org/bug . Make sure you are using the latest version; see https://yt-dl.org/update on how to update. Be sure to call youtube-dl with the --verbose flag and include its complete output.
----------------------------------------------------------------------
Ran 1 test in 0.781s
FAILED (errors=1)
|
KeyError
|
def __init__(self, code=None, msg=None):
if code is not None and msg is None:
msg = self.CODES.get(code) or "unknown error"
super(ProxyError, self).__init__(code, msg)
|
def __init__(self, code=None, msg=None):
if code is not None and msg is None:
msg = self.CODES.get(code) and "unknown error"
super(ProxyError, self).__init__(code, msg)
|
https://github.com/ytdl-org/youtube-dl/issues/11355
|
$ youtube-dl -v --proxy socks5://127.0.0.1:9050/ http://www.spiegel.tv/filme/bbc-gefaehrliche-strassen-sibirien/
[debug] System config: []
[debug] User config: [u'--restrict-filenames', u'--no-mtime', u'--no-part', u'-x', u'-k', u'--audio-format', u'mp3', u'--audio-quality', u'0']
[debug] Command-line args: [u'-v', u'--proxy', u'socks5://127.0.0.1:9050/', u'http://www.spiegel.tv/filme/bbc-gefaehrliche-strassen-sibirien/']
[debug] Encodings: locale UTF-8, fs UTF-8, out UTF-8, pref UTF-8
[debug] youtube-dl version 2016.12.01
[debug] Python version 2.7.12 - Linux-4.1.34-33-default-x86_64-with-SuSE-42.1-x86_64
[debug] exe versions: ffmpeg 3.0.2, ffprobe 3.0.2, rtmpdump 2.4
[debug] Proxy map: {u'http': u'socks5://127.0.0.1:9050/', u'https': u'socks5://127.0.0.1:9050/'}
[Spiegeltv] bbc-gefaehrliche-strassen-sibirien: Downloading webpage
[Spiegeltv] bbc-gefaehrliche-strassen-sibirien: Downloading version information
[Spiegeltv] bbc-gefaehrliche-strassen-sibirien: Downloading object information
[Spiegeltv] bbc-gefaehrliche-strassen-sibirien: Downloading media information
[Spiegeltv] bbc-gefaehrliche-strassen-sibirien: Downloading server information
[Spiegeltv] bbc-gefaehrliche-strassen-sibirien: Checking hls video format URL
[Spiegeltv] bbc-gefaehrliche-strassen-sibirien: Checking hls video format URL
Traceback (most recent call last):
File "/usr/lib64/python2.7/runpy.py", line 174, in _run_module_as_main
"__main__", fname, loader, pkg_name)
File "/usr/lib64/python2.7/runpy.py", line 72, in _run_code
exec code in run_globals
File "/usr/bin/youtube-dl/__main__.py", line 19, in <module>
File "/usr/bin/youtube-dl/youtube_dl/__init__.py", line 444, in main
File "/usr/bin/youtube-dl/youtube_dl/__init__.py", line 434, in _real_main
File "/usr/bin/youtube-dl/youtube_dl/YoutubeDL.py", line 1791, in download
File "/usr/bin/youtube-dl/youtube_dl/YoutubeDL.py", line 694, in extract_info
File "/usr/bin/youtube-dl/youtube_dl/extractor/common.py", line 357, in extract
File "/usr/bin/youtube-dl/youtube_dl/extractor/spiegeltv.py", line 94, in _real_extract
File "/usr/bin/youtube-dl/youtube_dl/extractor/common.py", line 1004, in _check_formats
File "/usr/bin/youtube-dl/youtube_dl/extractor/common.py", line 1003, in <lambda>
File "/usr/bin/youtube-dl/youtube_dl/extractor/common.py", line 1022, in _is_valid_url
File "/usr/bin/youtube-dl/youtube_dl/extractor/common.py", line 404, in _request_webpage
File "/usr/bin/youtube-dl/youtube_dl/YoutubeDL.py", line 2001, in urlopen
File "/usr/lib64/python2.7/urllib2.py", line 429, in open
response = self._open(req, data)
File "/usr/lib64/python2.7/urllib2.py", line 447, in _open
'_open', req)
File "/usr/lib64/python2.7/urllib2.py", line 407, in _call_chain
result = func(*args)
File "/usr/bin/youtube-dl/youtube_dl/utils.py", line 872, in http_open
File "/usr/lib64/python2.7/urllib2.py", line 1195, in do_open
h.request(req.get_method(), req.get_selector(), req.data, headers)
File "/usr/lib64/python2.7/httplib.py", line 1057, in request
self._send_request(method, url, body, headers)
File "/usr/lib64/python2.7/httplib.py", line 1097, in _send_request
self.endheaders(body)
File "/usr/lib64/python2.7/httplib.py", line 1053, in endheaders
self._send_output(message_body)
File "/usr/lib64/python2.7/httplib.py", line 897, in _send_output
self.send(msg)
File "/usr/lib64/python2.7/httplib.py", line 859, in send
self.connect()
File "/usr/bin/youtube-dl/youtube_dl/utils.py", line 1002, in connect
File "/usr/bin/youtube-dl/youtube_dl/socks.py", line 269, in connect
def __negotiatesocks4(self,destaddr,destport):
File "/usr/bin/youtube-dl/youtube_dl/socks.py", line 265, in _make_proxy
machine (note: getproxypeername returns the proxy)
File "/usr/bin/youtube-dl/youtube_dl/socks.py", line 240, in _setup_socks5
else:
youtube_dl.socks.Socks5Error: [Errno 4] unknown error
|
youtube_dl.socks.Socks5Error
|
def write_xattr(path, key, value):
# This mess below finds the best xattr tool for the job
try:
# try the pyxattr module...
import xattr
if hasattr(xattr, "set"): # pyxattr
# Unicode arguments are not supported in python-pyxattr until
# version 0.5.0
# See https://github.com/rg3/youtube-dl/issues/5498
pyxattr_required_version = "0.5.0"
if version_tuple(xattr.__version__) < version_tuple(
pyxattr_required_version
):
# TODO: fallback to CLI tools
raise XAttrUnavailableError(
"python-pyxattr is detected but is too old. "
"youtube-dl requires %s or above while your version is %s. "
"Falling back to other xattr implementations"
% (pyxattr_required_version, xattr.__version__)
)
setxattr = xattr.set
else: # xattr
setxattr = xattr.setxattr
try:
setxattr(path, key, value)
except EnvironmentError as e:
raise XAttrMetadataError(e.errno, e.strerror)
except ImportError:
if compat_os_name == "nt":
# Write xattrs to NTFS Alternate Data Streams:
# http://en.wikipedia.org/wiki/NTFS#Alternate_data_streams_.28ADS.29
assert ":" not in key
assert os.path.exists(path)
ads_fn = path + ":" + key
try:
with open(ads_fn, "wb") as f:
f.write(value)
except EnvironmentError as e:
raise XAttrMetadataError(e.errno, e.strerror)
else:
user_has_setfattr = check_executable("setfattr", ["--version"])
user_has_xattr = check_executable("xattr", ["-h"])
if user_has_setfattr or user_has_xattr:
value = value.decode("utf-8")
if user_has_setfattr:
executable = "setfattr"
opts = ["-n", key, "-v", value]
elif user_has_xattr:
executable = "xattr"
opts = ["-w", key, value]
cmd = (
[encodeFilename(executable, True)]
+ [encodeArgument(o) for o in opts]
+ [encodeFilename(path, True)]
)
try:
p = subprocess.Popen(
cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
stdin=subprocess.PIPE,
)
except EnvironmentError as e:
raise XAttrMetadataError(e.errno, e.strerror)
stdout, stderr = p.communicate()
stderr = stderr.decode("utf-8", "replace")
if p.returncode != 0:
raise XAttrMetadataError(p.returncode, stderr)
else:
# On Unix, and can't find pyxattr, setfattr, or xattr.
if sys.platform.startswith("linux"):
raise XAttrUnavailableError(
"Couldn't find a tool to set the xattrs. "
"Install either the python 'pyxattr' or 'xattr' "
"modules, or the GNU 'attr' package "
"(which contains the 'setfattr' tool)."
)
else:
raise XAttrUnavailableError(
"Couldn't find a tool to set the xattrs. "
"Install either the python 'xattr' module, "
"or the 'xattr' binary."
)
|
def write_xattr(path, key, value):
# This mess below finds the best xattr tool for the job
try:
# try the pyxattr module...
import xattr
# Unicode arguments are not supported in python-pyxattr until
# version 0.5.0
# See https://github.com/rg3/youtube-dl/issues/5498
pyxattr_required_version = "0.5.0"
if version_tuple(xattr.__version__) < version_tuple(pyxattr_required_version):
# TODO: fallback to CLI tools
raise XAttrUnavailableError(
"python-pyxattr is detected but is too old. "
"youtube-dl requires %s or above while your version is %s. "
"Falling back to other xattr implementations"
% (pyxattr_required_version, xattr.__version__)
)
try:
xattr.set(path, key, value)
except EnvironmentError as e:
raise XAttrMetadataError(e.errno, e.strerror)
except ImportError:
if compat_os_name == "nt":
# Write xattrs to NTFS Alternate Data Streams:
# http://en.wikipedia.org/wiki/NTFS#Alternate_data_streams_.28ADS.29
assert ":" not in key
assert os.path.exists(path)
ads_fn = path + ":" + key
try:
with open(ads_fn, "wb") as f:
f.write(value)
except EnvironmentError as e:
raise XAttrMetadataError(e.errno, e.strerror)
else:
user_has_setfattr = check_executable("setfattr", ["--version"])
user_has_xattr = check_executable("xattr", ["-h"])
if user_has_setfattr or user_has_xattr:
value = value.decode("utf-8")
if user_has_setfattr:
executable = "setfattr"
opts = ["-n", key, "-v", value]
elif user_has_xattr:
executable = "xattr"
opts = ["-w", key, value]
cmd = (
[encodeFilename(executable, True)]
+ [encodeArgument(o) for o in opts]
+ [encodeFilename(path, True)]
)
try:
p = subprocess.Popen(
cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
stdin=subprocess.PIPE,
)
except EnvironmentError as e:
raise XAttrMetadataError(e.errno, e.strerror)
stdout, stderr = p.communicate()
stderr = stderr.decode("utf-8", "replace")
if p.returncode != 0:
raise XAttrMetadataError(p.returncode, stderr)
else:
# On Unix, and can't find pyxattr, setfattr, or xattr.
if sys.platform.startswith("linux"):
raise XAttrUnavailableError(
"Couldn't find a tool to set the xattrs. "
"Install either the python 'pyxattr' or 'xattr' "
"modules, or the GNU 'attr' package "
"(which contains the 'setfattr' tool)."
)
else:
raise XAttrUnavailableError(
"Couldn't find a tool to set the xattrs. "
"Install either the python 'xattr' module, "
"or the 'xattr' binary."
)
|
https://github.com/ytdl-org/youtube-dl/issues/9054
|
$ youtube-dl --write-auto-sub --sub-format srt --include-ads --console-title --newline --embed-subs --xattrs --add-metadata -v https://www.youtube.com/watch?v=Bfktt22nUG4
[debug] System config: []
[debug] User config: []
[debug] Command-line args: [u'--write-auto-sub', u'--sub-format', u'srt', u'--include-ads', u'--console-title', u'--newline', u'--embed-subs', u'--xattrs', u'--add-metadata', u'-v', u'https://www.youtube.com/watch?v=Bfktt22nUG4']
[debug] Encodings: locale UTF-8, fs utf-8, out UTF-8, pref UTF-8
[debug] youtube-dl version 2016.04.01
[debug] Python version 2.7.10 - Darwin-14.5.0-x86_64-i386-64bit
[debug] exe versions: ffmpeg 2.7.1, ffprobe 2.7.1
[debug] Proxy map: {}
[youtube] Bfktt22nUG4: Downloading webpage
[youtube] Bfktt22nUG4: Downloading video info webpage
[youtube] Bfktt22nUG4: Extracting video information
[youtube] Bfktt22nUG4: Looking for automatic captions
[youtube] Bfktt22nUG4: Downloading MPD manifest
WARNING: No subtitle format found matching "srt" for language en, using vtt
[info] Writing video subtitles to: iPhone SE (parody)-Bfktt22nUG4.en.vtt
WARNING: Requested formats are incompatible for merge and will be merged into mkv.
[download] iPhone SE (parody)-Bfktt22nUG4.mkv has already been downloaded and merged
[ffmpeg] Adding metadata to 'iPhone SE (parody)-Bfktt22nUG4.mkv'
[debug] ffmpeg command line: ffmpeg -y -i 'file:iPhone SE (parody)-Bfktt22nUG4.mkv' -c copy -metadata 'comment=Previous sketch: https://www.youtube.com/watch?v=--Fg3YbwG8E&index=1&list=PLiWL8lZPZ2_kafFTewyr06GuSPFd0m8hf
Help me make fun of everything! http://bit.ly/1A0Crdb
TWITTER: http://www.twitter.com/jacksfilms
FACEBOOK: http://www.facebook.com/jacksfilmsfans
LIVE CHAT every FRIDAY: http://www.younow.com/jacksfilms
SNAPCHAT: realjacksfilms
MERCH: http://www.jacksfilms.spreadshirt.com
PERISCOPE: http://www.periscope.tv/jacksfilms' -metadata 'description=Previous sketch: https://www.youtube.com/watch?v=--Fg3YbwG8E&index=1&list=PLiWL8lZPZ2_kafFTewyr06GuSPFd0m8hf
Help me make fun of everything! http://bit.ly/1A0Crdb
TWITTER: http://www.twitter.com/jacksfilms
FACEBOOK: http://www.facebook.com/jacksfilmsfans
LIVE CHAT every FRIDAY: http://www.younow.com/jacksfilms
SNAPCHAT: realjacksfilms
MERCH: http://www.jacksfilms.spreadshirt.com
PERISCOPE: http://www.periscope.tv/jacksfilms' -metadata artist=jacksfilms -metadata 'title=iPhone SE (parody)' -metadata date=20160321 -metadata 'purl=https://www.youtube.com/watch?v=Bfktt22nUG4' 'file:iPhone SE (parody)-Bfktt22nUG4.temp.mkv'
[ffmpeg] Embedding subtitles in 'iPhone SE (parody)-Bfktt22nUG4.mkv'
[debug] ffmpeg command line: ffmpeg -y -i 'file:iPhone SE (parody)-Bfktt22nUG4.mkv' -i 'file:iPhone SE (parody)-Bfktt22nUG4.en.vtt' -map 0 -c copy -map -0:s -map 1:0 -metadata:s:s:0 language=eng 'file:iPhone SE (parody)-Bfktt22nUG4.temp.mkv'
Deleting original file iPhone SE (parody)-Bfktt22nUG4.en.vtt (pass -k to keep)
[metadata] Writing metadata to file's xattrs
Traceback (most recent call last):
File "/System/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/runpy.py", line 162, in _run_module_as_main
"__main__", fname, loader, pkg_name)
File "/System/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/runpy.py", line 72, in _run_code
exec code in run_globals
File "/usr/local/bin/youtube-dl/__main__.py", line 19, in <module>
File "/usr/local/bin/youtube-dl/youtube_dl/__init__.py", line 419, in main
File "/usr/local/bin/youtube-dl/youtube_dl/__init__.py", line 409, in _real_main
File "/usr/local/bin/youtube-dl/youtube_dl/YoutubeDL.py", line 1725, in download
File "/usr/local/bin/youtube-dl/youtube_dl/YoutubeDL.py", line 680, in extract_info
File "/usr/local/bin/youtube-dl/youtube_dl/YoutubeDL.py", line 725, in process_ie_result
File "/usr/local/bin/youtube-dl/youtube_dl/YoutubeDL.py", line 1371, in process_video_result
File "/usr/local/bin/youtube-dl/youtube_dl/YoutubeDL.py", line 1707, in process_info
File "/usr/local/bin/youtube-dl/youtube_dl/YoutubeDL.py", line 1771, in post_process
File "/usr/local/bin/youtube-dl/youtube_dl/postprocessor/xattrpp.py", line 158, in run
File "/usr/local/bin/youtube-dl/youtube_dl/postprocessor/xattrpp.py", line 72, in write_xattr
AttributeError: 'module' object has no attribute 'set'
|
AttributeError
|
def real_download(self, filename, info_dict):
base_url = info_dict["url"]
segment_urls = (
[info_dict["segment_urls"][0]]
if self.params.get("test", False)
else info_dict["segment_urls"]
)
initialization_url = info_dict.get("initialization_url")
ctx = {
"filename": filename,
"total_frags": len(segment_urls) + (1 if initialization_url else 0),
}
self._prepare_and_start_frag_download(ctx)
def combine_url(base_url, target_url):
if re.match(r"^https?://", target_url):
return target_url
return "%s%s%s" % (base_url, "" if base_url.endswith("/") else "/", target_url)
segments_filenames = []
fragment_retries = self.params.get("fragment_retries", 0)
skip_unavailable_fragments = self.params.get("skip_unavailable_fragments", True)
def append_url_to_file(target_url, tmp_filename, segment_name):
target_filename = "%s-%s" % (tmp_filename, segment_name)
count = 0
while count <= fragment_retries:
try:
success = ctx["dl"].download(
target_filename, {"url": combine_url(base_url, target_url)}
)
if not success:
return False
down, target_sanitized = sanitize_open(target_filename, "rb")
ctx["dest_stream"].write(down.read())
down.close()
segments_filenames.append(target_sanitized)
break
except compat_urllib_error.HTTPError:
# YouTube may often return 404 HTTP error for a fragment causing the
# whole download to fail. However if the same fragment is immediately
# retried with the same request data this usually succeeds (1-2 attemps
# is usually enough) thus allowing to download the whole file successfully.
# To be future-proof we will retry all fragments that fail with any
# HTTP error.
count += 1
if count <= fragment_retries:
self.report_retry_fragment(segment_name, count, fragment_retries)
if count > fragment_retries:
if skip_unavailable_fragments:
self.report_skip_fragment(segment_name)
return
self.report_error("giving up after %s fragment retries" % fragment_retries)
return False
if initialization_url:
append_url_to_file(initialization_url, ctx["tmpfilename"], "Init")
for i, segment_url in enumerate(segment_urls):
append_url_to_file(segment_url, ctx["tmpfilename"], "Seg%d" % i)
self._finish_frag_download(ctx)
for segment_file in segments_filenames:
os.remove(encodeFilename(segment_file))
return True
|
def real_download(self, filename, info_dict):
base_url = info_dict["url"]
segment_urls = (
[info_dict["segment_urls"][0]]
if self.params.get("test", False)
else info_dict["segment_urls"]
)
initialization_url = info_dict.get("initialization_url")
ctx = {
"filename": filename,
"total_frags": len(segment_urls) + (1 if initialization_url else 0),
}
self._prepare_and_start_frag_download(ctx)
def combine_url(base_url, target_url):
if re.match(r"^https?://", target_url):
return target_url
return "%s%s%s" % (base_url, "" if base_url.endswith("/") else "/", target_url)
segments_filenames = []
fragment_retries = self.params.get("fragment_retries", 0)
def append_url_to_file(target_url, tmp_filename, segment_name):
target_filename = "%s-%s" % (tmp_filename, segment_name)
count = 0
while count <= fragment_retries:
try:
success = ctx["dl"].download(
target_filename, {"url": combine_url(base_url, target_url)}
)
if not success:
return False
down, target_sanitized = sanitize_open(target_filename, "rb")
ctx["dest_stream"].write(down.read())
down.close()
segments_filenames.append(target_sanitized)
break
except (compat_urllib_error.HTTPError,) as err:
# YouTube may often return 404 HTTP error for a fragment causing the
# whole download to fail. However if the same fragment is immediately
# retried with the same request data this usually succeeds (1-2 attemps
# is usually enough) thus allowing to download the whole file successfully.
# So, we will retry all fragments that fail with 404 HTTP error for now.
if err.code != 404:
raise
# Retry fragment
count += 1
if count <= fragment_retries:
self.report_retry_fragment(segment_name, count, fragment_retries)
if count > fragment_retries:
self.report_error("giving up after %s fragment retries" % fragment_retries)
return False
if initialization_url:
append_url_to_file(initialization_url, ctx["tmpfilename"], "Init")
for i, segment_url in enumerate(segment_urls):
append_url_to_file(segment_url, ctx["tmpfilename"], "Seg%d" % i)
self._finish_frag_download(ctx)
for segment_file in segments_filenames:
os.remove(encodeFilename(segment_file))
return True
|
https://github.com/ytdl-org/youtube-dl/issues/10448
|
PS C:\dev\youtube-dl\master> youtube-dl.exe https://www.twitch.tv/naysayer88/v/85713845 -
v
[debug] System config: []
[debug] User config: []
[debug] Command-line args: ['https://www.twitch.tv/naysayer88/v/85713845', '-v']
[debug] Encodings: locale cp1251, fs mbcs, out cp866, pref cp1251
[debug] youtube-dl version 2016.08.24.1
[debug] Python version 3.4.4 - Windows-10-10.0.10240
[debug] exe versions: ffmpeg N-81192-g04da20e, ffprobe N-81192-g04da20e, rtmpdump 2.4
[debug] Proxy map: {}
[twitch:vod] 85713845: Downloading vod info JSON
[twitch:vod] 85713845: Downloading vod access token
[twitch:vod] 85713845: Downloading m3u8 information
[debug] Invoking downloader on 'http://vod.edgecast.hls.ttvnw.net/v1/AUTH_system/vods_60f4/naysayer88_23003365072_506378255/chunked/index-dvr.m3u8'
[hlsnative] Downloading m3u8 manifest
[hlsnative] Total fragments: 43
[download] Destination: Gammin' - Obduction Spoilerama-v85713845.mp4
[download] 34.9% of ~13.53MiB at Unknown speed ETA Unknown ETAERROR: unable to download video data: HTTP Error 400: Bad Request
Traceback (most recent call last):
File "C:\Users\dst\AppData\Roaming\Build archive\youtube-dl\rg3\tmpvarrvgfb\build\youtube_dl\YoutubeDL.py", line 1694, in process_info
File "C:\Users\dst\AppData\Roaming\Build archive\youtube-dl\rg3\tmpvarrvgfb\build\youtube_dl\YoutubeDL.py", line 1636, in dl
File "C:\Users\dst\AppData\Roaming\Build archive\youtube-dl\rg3\tmpvarrvgfb\build\youtube_dl\downloader\common.py", line 354, in download
File "C:\Users\dst\AppData\Roaming\Build archive\youtube-dl\rg3\tmpvarrvgfb\build\youtube_dl\downloader\hls.py", line 102, in real_download
File "C:\Users\dst\AppData\Roaming\Build archive\youtube-dl\rg3\tmpvarrvgfb\build\youtube_dl\downloader\common.py", line 354, in download
File "C:\Users\dst\AppData\Roaming\Build archive\youtube-dl\rg3\tmpvarrvgfb\build\youtube_dl\downloader\http.py", line 58, in real_download
File "C:\Users\dst\AppData\Roaming\Build archive\youtube-dl\rg3\tmpvarrvgfb\build\youtube_dl\YoutubeDL.py", line 1996, in urlopen
File "C:\Python\Python34\lib\urllib\request.py", line 470, in open
response = meth(req, response)
File "C:\Python\Python34\lib\urllib\request.py", line 580, in http_response
'http', request, response, code, msg, hdrs)
File "C:\Python\Python34\lib\urllib\request.py", line 508, in error
return self._call_chain(*args)
File "C:\Python\Python34\lib\urllib\request.py", line 442, in _call_chain
result = func(*args)
File "C:\Python\Python34\lib\urllib\request.py", line 588, in http_error_default
raise HTTPError(req.full_url, code, msg, hdrs, fp)
urllib.error.HTTPError: HTTP Error 400: Bad Request
|
urllib.error.HTTPError
|
def append_url_to_file(target_url, tmp_filename, segment_name):
target_filename = "%s-%s" % (tmp_filename, segment_name)
count = 0
while count <= fragment_retries:
try:
success = ctx["dl"].download(
target_filename, {"url": combine_url(base_url, target_url)}
)
if not success:
return False
down, target_sanitized = sanitize_open(target_filename, "rb")
ctx["dest_stream"].write(down.read())
down.close()
segments_filenames.append(target_sanitized)
break
except compat_urllib_error.HTTPError:
# YouTube may often return 404 HTTP error for a fragment causing the
# whole download to fail. However if the same fragment is immediately
# retried with the same request data this usually succeeds (1-2 attemps
# is usually enough) thus allowing to download the whole file successfully.
# To be future-proof we will retry all fragments that fail with any
# HTTP error.
count += 1
if count <= fragment_retries:
self.report_retry_fragment(segment_name, count, fragment_retries)
if count > fragment_retries:
if skip_unavailable_fragments:
self.report_skip_fragment(segment_name)
return
self.report_error("giving up after %s fragment retries" % fragment_retries)
return False
|
def append_url_to_file(target_url, tmp_filename, segment_name):
target_filename = "%s-%s" % (tmp_filename, segment_name)
count = 0
while count <= fragment_retries:
try:
success = ctx["dl"].download(
target_filename, {"url": combine_url(base_url, target_url)}
)
if not success:
return False
down, target_sanitized = sanitize_open(target_filename, "rb")
ctx["dest_stream"].write(down.read())
down.close()
segments_filenames.append(target_sanitized)
break
except (compat_urllib_error.HTTPError,) as err:
# YouTube may often return 404 HTTP error for a fragment causing the
# whole download to fail. However if the same fragment is immediately
# retried with the same request data this usually succeeds (1-2 attemps
# is usually enough) thus allowing to download the whole file successfully.
# So, we will retry all fragments that fail with 404 HTTP error for now.
if err.code != 404:
raise
# Retry fragment
count += 1
if count <= fragment_retries:
self.report_retry_fragment(segment_name, count, fragment_retries)
if count > fragment_retries:
self.report_error("giving up after %s fragment retries" % fragment_retries)
return False
|
https://github.com/ytdl-org/youtube-dl/issues/10448
|
PS C:\dev\youtube-dl\master> youtube-dl.exe https://www.twitch.tv/naysayer88/v/85713845 -
v
[debug] System config: []
[debug] User config: []
[debug] Command-line args: ['https://www.twitch.tv/naysayer88/v/85713845', '-v']
[debug] Encodings: locale cp1251, fs mbcs, out cp866, pref cp1251
[debug] youtube-dl version 2016.08.24.1
[debug] Python version 3.4.4 - Windows-10-10.0.10240
[debug] exe versions: ffmpeg N-81192-g04da20e, ffprobe N-81192-g04da20e, rtmpdump 2.4
[debug] Proxy map: {}
[twitch:vod] 85713845: Downloading vod info JSON
[twitch:vod] 85713845: Downloading vod access token
[twitch:vod] 85713845: Downloading m3u8 information
[debug] Invoking downloader on 'http://vod.edgecast.hls.ttvnw.net/v1/AUTH_system/vods_60f4/naysayer88_23003365072_506378255/chunked/index-dvr.m3u8'
[hlsnative] Downloading m3u8 manifest
[hlsnative] Total fragments: 43
[download] Destination: Gammin' - Obduction Spoilerama-v85713845.mp4
[download] 34.9% of ~13.53MiB at Unknown speed ETA Unknown ETAERROR: unable to download video data: HTTP Error 400: Bad Request
Traceback (most recent call last):
File "C:\Users\dst\AppData\Roaming\Build archive\youtube-dl\rg3\tmpvarrvgfb\build\youtube_dl\YoutubeDL.py", line 1694, in process_info
File "C:\Users\dst\AppData\Roaming\Build archive\youtube-dl\rg3\tmpvarrvgfb\build\youtube_dl\YoutubeDL.py", line 1636, in dl
File "C:\Users\dst\AppData\Roaming\Build archive\youtube-dl\rg3\tmpvarrvgfb\build\youtube_dl\downloader\common.py", line 354, in download
File "C:\Users\dst\AppData\Roaming\Build archive\youtube-dl\rg3\tmpvarrvgfb\build\youtube_dl\downloader\hls.py", line 102, in real_download
File "C:\Users\dst\AppData\Roaming\Build archive\youtube-dl\rg3\tmpvarrvgfb\build\youtube_dl\downloader\common.py", line 354, in download
File "C:\Users\dst\AppData\Roaming\Build archive\youtube-dl\rg3\tmpvarrvgfb\build\youtube_dl\downloader\http.py", line 58, in real_download
File "C:\Users\dst\AppData\Roaming\Build archive\youtube-dl\rg3\tmpvarrvgfb\build\youtube_dl\YoutubeDL.py", line 1996, in urlopen
File "C:\Python\Python34\lib\urllib\request.py", line 470, in open
response = meth(req, response)
File "C:\Python\Python34\lib\urllib\request.py", line 580, in http_response
'http', request, response, code, msg, hdrs)
File "C:\Python\Python34\lib\urllib\request.py", line 508, in error
return self._call_chain(*args)
File "C:\Python\Python34\lib\urllib\request.py", line 442, in _call_chain
result = func(*args)
File "C:\Python\Python34\lib\urllib\request.py", line 588, in http_error_default
raise HTTPError(req.full_url, code, msg, hdrs, fp)
urllib.error.HTTPError: HTTP Error 400: Bad Request
|
urllib.error.HTTPError
|
def real_download(self, filename, info_dict):
man_url = info_dict["url"]
self.to_screen("[%s] Downloading m3u8 manifest" % self.FD_NAME)
manifest = self.ydl.urlopen(man_url).read()
s = manifest.decode("utf-8", "ignore")
if not self.can_download(s):
self.report_warning(
"hlsnative has detected features it does not support, "
"extraction will be delegated to ffmpeg"
)
fd = FFmpegFD(self.ydl, self.params)
for ph in self._progress_hooks:
fd.add_progress_hook(ph)
return fd.real_download(filename, info_dict)
total_frags = 0
for line in s.splitlines():
line = line.strip()
if line and not line.startswith("#"):
total_frags += 1
ctx = {
"filename": filename,
"total_frags": total_frags,
}
self._prepare_and_start_frag_download(ctx)
fragment_retries = self.params.get("fragment_retries", 0)
skip_unavailable_fragments = self.params.get("skip_unavailable_fragments", True)
test = self.params.get("test", False)
extra_query = None
extra_param_to_segment_url = info_dict.get("extra_param_to_segment_url")
if extra_param_to_segment_url:
extra_query = compat_urlparse.parse_qs(extra_param_to_segment_url)
i = 0
media_sequence = 0
decrypt_info = {"METHOD": "NONE"}
frags_filenames = []
for line in s.splitlines():
line = line.strip()
if line:
if not line.startswith("#"):
frag_url = (
line
if re.match(r"^https?://", line)
else compat_urlparse.urljoin(man_url, line)
)
frag_name = "Frag%d" % i
frag_filename = "%s-%s" % (ctx["tmpfilename"], frag_name)
if extra_query:
frag_url = update_url_query(frag_url, extra_query)
count = 0
while count <= fragment_retries:
try:
success = ctx["dl"].download(frag_filename, {"url": frag_url})
if not success:
return False
down, frag_sanitized = sanitize_open(frag_filename, "rb")
frag_content = down.read()
down.close()
break
except compat_urllib_error.HTTPError:
# Unavailable (possibly temporary) fragments may be served.
# First we try to retry then either skip or abort.
# See https://github.com/rg3/youtube-dl/issues/10165,
# https://github.com/rg3/youtube-dl/issues/10448).
count += 1
if count <= fragment_retries:
self.report_retry_fragment(
frag_name, count, fragment_retries
)
if count > fragment_retries:
if skip_unavailable_fragments:
i += 1
media_sequence += 1
self.report_skip_fragment(frag_name)
continue
self.report_error(
"giving up after %s fragment retries" % fragment_retries
)
return False
if decrypt_info["METHOD"] == "AES-128":
iv = decrypt_info.get("IV") or compat_struct_pack(
">8xq", media_sequence
)
frag_content = AES.new(
decrypt_info["KEY"], AES.MODE_CBC, iv
).decrypt(frag_content)
ctx["dest_stream"].write(frag_content)
frags_filenames.append(frag_sanitized)
# We only download the first fragment during the test
if test:
break
i += 1
media_sequence += 1
elif line.startswith("#EXT-X-KEY"):
decrypt_info = parse_m3u8_attributes(line[11:])
if decrypt_info["METHOD"] == "AES-128":
if "IV" in decrypt_info:
decrypt_info["IV"] = binascii.unhexlify(
decrypt_info["IV"][2:].zfill(32)
)
if not re.match(r"^https?://", decrypt_info["URI"]):
decrypt_info["URI"] = compat_urlparse.urljoin(
man_url, decrypt_info["URI"]
)
if extra_query:
decrypt_info["URI"] = update_url_query(
decrypt_info["URI"], extra_query
)
decrypt_info["KEY"] = self.ydl.urlopen(decrypt_info["URI"]).read()
elif line.startswith("#EXT-X-MEDIA-SEQUENCE"):
media_sequence = int(line[22:])
self._finish_frag_download(ctx)
for frag_file in frags_filenames:
os.remove(encodeFilename(frag_file))
return True
|
def real_download(self, filename, info_dict):
man_url = info_dict["url"]
self.to_screen("[%s] Downloading m3u8 manifest" % self.FD_NAME)
manifest = self.ydl.urlopen(man_url).read()
s = manifest.decode("utf-8", "ignore")
if not self.can_download(s):
self.report_warning(
"hlsnative has detected features it does not support, "
"extraction will be delegated to ffmpeg"
)
fd = FFmpegFD(self.ydl, self.params)
for ph in self._progress_hooks:
fd.add_progress_hook(ph)
return fd.real_download(filename, info_dict)
total_frags = 0
for line in s.splitlines():
line = line.strip()
if line and not line.startswith("#"):
total_frags += 1
ctx = {
"filename": filename,
"total_frags": total_frags,
}
self._prepare_and_start_frag_download(ctx)
extra_query = None
extra_param_to_segment_url = info_dict.get("extra_param_to_segment_url")
if extra_param_to_segment_url:
extra_query = compat_urlparse.parse_qs(extra_param_to_segment_url)
i = 0
media_sequence = 0
decrypt_info = {"METHOD": "NONE"}
frags_filenames = []
for line in s.splitlines():
line = line.strip()
if line:
if not line.startswith("#"):
frag_url = (
line
if re.match(r"^https?://", line)
else compat_urlparse.urljoin(man_url, line)
)
frag_filename = "%s-Frag%d" % (ctx["tmpfilename"], i)
if extra_query:
frag_url = update_url_query(frag_url, extra_query)
success = ctx["dl"].download(frag_filename, {"url": frag_url})
if not success:
return False
down, frag_sanitized = sanitize_open(frag_filename, "rb")
frag_content = down.read()
down.close()
if decrypt_info["METHOD"] == "AES-128":
iv = decrypt_info.get("IV") or compat_struct_pack(
">8xq", media_sequence
)
frag_content = AES.new(
decrypt_info["KEY"], AES.MODE_CBC, iv
).decrypt(frag_content)
ctx["dest_stream"].write(frag_content)
frags_filenames.append(frag_sanitized)
# We only download the first fragment during the test
if self.params.get("test", False):
break
i += 1
media_sequence += 1
elif line.startswith("#EXT-X-KEY"):
decrypt_info = parse_m3u8_attributes(line[11:])
if decrypt_info["METHOD"] == "AES-128":
if "IV" in decrypt_info:
decrypt_info["IV"] = binascii.unhexlify(
decrypt_info["IV"][2:].zfill(32)
)
if not re.match(r"^https?://", decrypt_info["URI"]):
decrypt_info["URI"] = compat_urlparse.urljoin(
man_url, decrypt_info["URI"]
)
if extra_query:
decrypt_info["URI"] = update_url_query(
decrypt_info["URI"], extra_query
)
decrypt_info["KEY"] = self.ydl.urlopen(decrypt_info["URI"]).read()
elif line.startswith("#EXT-X-MEDIA-SEQUENCE"):
media_sequence = int(line[22:])
self._finish_frag_download(ctx)
for frag_file in frags_filenames:
os.remove(encodeFilename(frag_file))
return True
|
https://github.com/ytdl-org/youtube-dl/issues/10448
|
PS C:\dev\youtube-dl\master> youtube-dl.exe https://www.twitch.tv/naysayer88/v/85713845 -
v
[debug] System config: []
[debug] User config: []
[debug] Command-line args: ['https://www.twitch.tv/naysayer88/v/85713845', '-v']
[debug] Encodings: locale cp1251, fs mbcs, out cp866, pref cp1251
[debug] youtube-dl version 2016.08.24.1
[debug] Python version 3.4.4 - Windows-10-10.0.10240
[debug] exe versions: ffmpeg N-81192-g04da20e, ffprobe N-81192-g04da20e, rtmpdump 2.4
[debug] Proxy map: {}
[twitch:vod] 85713845: Downloading vod info JSON
[twitch:vod] 85713845: Downloading vod access token
[twitch:vod] 85713845: Downloading m3u8 information
[debug] Invoking downloader on 'http://vod.edgecast.hls.ttvnw.net/v1/AUTH_system/vods_60f4/naysayer88_23003365072_506378255/chunked/index-dvr.m3u8'
[hlsnative] Downloading m3u8 manifest
[hlsnative] Total fragments: 43
[download] Destination: Gammin' - Obduction Spoilerama-v85713845.mp4
[download] 34.9% of ~13.53MiB at Unknown speed ETA Unknown ETAERROR: unable to download video data: HTTP Error 400: Bad Request
Traceback (most recent call last):
File "C:\Users\dst\AppData\Roaming\Build archive\youtube-dl\rg3\tmpvarrvgfb\build\youtube_dl\YoutubeDL.py", line 1694, in process_info
File "C:\Users\dst\AppData\Roaming\Build archive\youtube-dl\rg3\tmpvarrvgfb\build\youtube_dl\YoutubeDL.py", line 1636, in dl
File "C:\Users\dst\AppData\Roaming\Build archive\youtube-dl\rg3\tmpvarrvgfb\build\youtube_dl\downloader\common.py", line 354, in download
File "C:\Users\dst\AppData\Roaming\Build archive\youtube-dl\rg3\tmpvarrvgfb\build\youtube_dl\downloader\hls.py", line 102, in real_download
File "C:\Users\dst\AppData\Roaming\Build archive\youtube-dl\rg3\tmpvarrvgfb\build\youtube_dl\downloader\common.py", line 354, in download
File "C:\Users\dst\AppData\Roaming\Build archive\youtube-dl\rg3\tmpvarrvgfb\build\youtube_dl\downloader\http.py", line 58, in real_download
File "C:\Users\dst\AppData\Roaming\Build archive\youtube-dl\rg3\tmpvarrvgfb\build\youtube_dl\YoutubeDL.py", line 1996, in urlopen
File "C:\Python\Python34\lib\urllib\request.py", line 470, in open
response = meth(req, response)
File "C:\Python\Python34\lib\urllib\request.py", line 580, in http_response
'http', request, response, code, msg, hdrs)
File "C:\Python\Python34\lib\urllib\request.py", line 508, in error
return self._call_chain(*args)
File "C:\Python\Python34\lib\urllib\request.py", line 442, in _call_chain
result = func(*args)
File "C:\Python\Python34\lib\urllib\request.py", line 588, in http_error_default
raise HTTPError(req.full_url, code, msg, hdrs, fp)
urllib.error.HTTPError: HTTP Error 400: Bad Request
|
urllib.error.HTTPError
|
def _real_extract(self, url):
url, data = unsmuggle_url(url, {})
headers = std_headers.copy()
if "http_headers" in data:
headers.update(data["http_headers"])
if "Referer" not in headers:
headers["Referer"] = url
# Extract ID from URL
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group("id")
orig_url = url
if mobj.group("pro") or mobj.group("player"):
url = "https://player.vimeo.com/video/" + video_id
else:
url = "https://vimeo.com/" + video_id
# Retrieve video webpage to extract further information
request = sanitized_Request(url, headers=headers)
try:
webpage = self._download_webpage(request, video_id)
except ExtractorError as ee:
if isinstance(ee.cause, compat_HTTPError) and ee.cause.code == 403:
errmsg = ee.cause.read()
if (
b"Because of its privacy settings, this video cannot be played here"
in errmsg
):
raise ExtractorError(
"Cannot download embed-only video without embedding "
"URL. Please call youtube-dl with the URL of the page "
"that embeds this video.",
expected=True,
)
raise
# Now we begin extracting as much information as we can from what we
# retrieved. First we extract the information common to all extractors,
# and latter we extract those that are Vimeo specific.
self.report_extraction(video_id)
vimeo_config = self._search_regex(
r"vimeo\.config\s*=\s*(?:({.+?})|_extend\([^,]+,\s+({.+?})\));",
webpage,
"vimeo config",
default=None,
)
if vimeo_config:
seed_status = self._parse_json(vimeo_config, video_id).get("seed_status", {})
if seed_status.get("state") == "failed":
raise ExtractorError(
"%s said: %s" % (self.IE_NAME, seed_status["title"]), expected=True
)
# Extract the config JSON
try:
try:
config_url = self._html_search_regex(
r' data-config-url="(.+?)"', webpage, "config URL", default=None
)
if not config_url:
# Sometimes new react-based page is served instead of old one that require
# different config URL extraction approach (see
# https://github.com/rg3/youtube-dl/pull/7209)
vimeo_clip_page_config = self._search_regex(
r"vimeo\.clip_page_config\s*=\s*({.+?});",
webpage,
"vimeo clip page config",
)
config_url = self._parse_json(vimeo_clip_page_config, video_id)[
"player"
]["config_url"]
config_json = self._download_webpage(config_url, video_id)
config = json.loads(config_json)
except RegexNotFoundError:
# For pro videos or player.vimeo.com urls
# We try to find out to which variable is assigned the config dic
m_variable_name = re.search("(\w)\.video\.id", webpage)
if m_variable_name is not None:
config_re = r"%s=({[^}].+?});" % re.escape(m_variable_name.group(1))
else:
config_re = [r" = {config:({.+?}),assets:", r"(?:[abc])=({.+?});"]
config = self._search_regex(
config_re, webpage, "info section", flags=re.DOTALL
)
config = json.loads(config)
except Exception as e:
if re.search(
"The creator of this video has not given you permission to embed it on this domain.",
webpage,
):
raise ExtractorError(
'The author has restricted the access to this video, try with the "--referer" option'
)
if re.search(r'<form[^>]+?id="pw_form"', webpage) is not None:
if "_video_password_verified" in data:
raise ExtractorError("video password verification failed!")
self._verify_video_password(url, video_id, webpage)
return self._real_extract(
smuggle_url(url, {"_video_password_verified": "verified"})
)
else:
raise ExtractorError("Unable to extract info section", cause=e)
else:
if config.get("view") == 4:
config = self._verify_player_video_password(url, video_id)
if ">You rented this title.<" in webpage:
feature_id = config.get("video", {}).get("vod", {}).get("feature_id")
if feature_id and not data.get("force_feature_id", False):
return self.url_result(
smuggle_url(
"https://player.vimeo.com/player/%s" % feature_id,
{"force_feature_id": True},
),
"Vimeo",
)
# Extract title
video_title = config["video"]["title"]
# Extract uploader, uploader_url and uploader_id
video_uploader = config["video"].get("owner", {}).get("name")
video_uploader_url = config["video"].get("owner", {}).get("url")
video_uploader_id = (
video_uploader_url.split("/")[-1] if video_uploader_url else None
)
# Extract video thumbnail
video_thumbnail = config["video"].get("thumbnail")
if video_thumbnail is None:
video_thumbs = config["video"].get("thumbs")
if video_thumbs and isinstance(video_thumbs, dict):
_, video_thumbnail = sorted(
(int(width if width.isdigit() else 0), t_url)
for (width, t_url) in video_thumbs.items()
)[-1]
# Extract video description
video_description = self._html_search_regex(
r'(?s)<div\s+class="[^"]*description[^"]*"[^>]*>(.*?)</div>',
webpage,
"description",
default=None,
)
if not video_description:
video_description = self._html_search_meta("description", webpage, default=None)
if not video_description and mobj.group("pro"):
orig_webpage = self._download_webpage(
orig_url, video_id, note="Downloading webpage for description", fatal=False
)
if orig_webpage:
video_description = self._html_search_meta(
"description", orig_webpage, default=None
)
if not video_description and not mobj.group("player"):
self._downloader.report_warning("Cannot find video description")
# Extract video duration
video_duration = int_or_none(config["video"].get("duration"))
# Extract upload date
video_upload_date = None
mobj = re.search(r'<time[^>]+datetime="([^"]+)"', webpage)
if mobj is not None:
video_upload_date = unified_strdate(mobj.group(1))
try:
view_count = int(self._search_regex(r"UserPlays:(\d+)", webpage, "view count"))
like_count = int(self._search_regex(r"UserLikes:(\d+)", webpage, "like count"))
comment_count = int(
self._search_regex(r"UserComments:(\d+)", webpage, "comment count")
)
except RegexNotFoundError:
# This info is only available in vimeo.com/{id} urls
view_count = None
like_count = None
comment_count = None
formats = []
download_request = sanitized_Request(
"https://vimeo.com/%s?action=load_download_config" % video_id,
headers={"X-Requested-With": "XMLHttpRequest"},
)
download_data = self._download_json(download_request, video_id, fatal=False)
if download_data:
source_file = download_data.get("source_file")
if isinstance(source_file, dict):
download_url = source_file.get("download_url")
if (
download_url
and not source_file.get("is_cold")
and not source_file.get("is_defrosting")
):
source_name = source_file.get("public_name", "Original")
if self._is_valid_url(download_url, video_id, "%s video" % source_name):
ext = source_file.get(
"extension", determine_ext(download_url)
).lower()
formats.append(
{
"url": download_url,
"ext": ext,
"width": int_or_none(source_file.get("width")),
"height": int_or_none(source_file.get("height")),
"filesize": parse_filesize(source_file.get("size")),
"format_id": source_name,
"preference": 1,
}
)
config_files = config["video"].get("files") or config["request"].get("files", {})
for f in config_files.get("progressive", []):
video_url = f.get("url")
if not video_url:
continue
formats.append(
{
"url": video_url,
"format_id": "http-%s" % f.get("quality"),
"width": int_or_none(f.get("width")),
"height": int_or_none(f.get("height")),
"fps": int_or_none(f.get("fps")),
"tbr": int_or_none(f.get("bitrate")),
}
)
m3u8_url = config_files.get("hls", {}).get("url")
if m3u8_url:
formats.extend(
self._extract_m3u8_formats(
m3u8_url, video_id, "mp4", "m3u8_native", m3u8_id="hls", fatal=False
)
)
# Bitrates are completely broken. Single m3u8 may contain entries in kbps and bps
# at the same time without actual units specified. This lead to wrong sorting.
self._sort_formats(
formats, field_preference=("preference", "height", "width", "fps", "format_id")
)
subtitles = {}
text_tracks = config["request"].get("text_tracks")
if text_tracks:
for tt in text_tracks:
subtitles[tt["lang"]] = [
{
"ext": "vtt",
"url": "https://vimeo.com" + tt["url"],
}
]
return {
"id": video_id,
"uploader": video_uploader,
"uploader_url": video_uploader_url,
"uploader_id": video_uploader_id,
"upload_date": video_upload_date,
"title": video_title,
"thumbnail": video_thumbnail,
"description": video_description,
"duration": video_duration,
"formats": formats,
"webpage_url": url,
"view_count": view_count,
"like_count": like_count,
"comment_count": comment_count,
"subtitles": subtitles,
}
|
def _real_extract(self, url):
url, data = unsmuggle_url(url, {})
headers = std_headers
if "http_headers" in data:
headers = headers.copy()
headers.update(data["http_headers"])
if "Referer" not in headers:
headers["Referer"] = url
# Extract ID from URL
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group("id")
orig_url = url
if mobj.group("pro") or mobj.group("player"):
url = "https://player.vimeo.com/video/" + video_id
else:
url = "https://vimeo.com/" + video_id
# Retrieve video webpage to extract further information
request = sanitized_Request(url, None, headers)
try:
webpage = self._download_webpage(request, video_id)
except ExtractorError as ee:
if isinstance(ee.cause, compat_HTTPError) and ee.cause.code == 403:
errmsg = ee.cause.read()
if (
b"Because of its privacy settings, this video cannot be played here"
in errmsg
):
raise ExtractorError(
"Cannot download embed-only video without embedding "
"URL. Please call youtube-dl with the URL of the page "
"that embeds this video.",
expected=True,
)
raise
# Now we begin extracting as much information as we can from what we
# retrieved. First we extract the information common to all extractors,
# and latter we extract those that are Vimeo specific.
self.report_extraction(video_id)
vimeo_config = self._search_regex(
r"vimeo\.config\s*=\s*(?:({.+?})|_extend\([^,]+,\s+({.+?})\));",
webpage,
"vimeo config",
default=None,
)
if vimeo_config:
seed_status = self._parse_json(vimeo_config, video_id).get("seed_status", {})
if seed_status.get("state") == "failed":
raise ExtractorError(
"%s said: %s" % (self.IE_NAME, seed_status["title"]), expected=True
)
# Extract the config JSON
try:
try:
config_url = self._html_search_regex(
r' data-config-url="(.+?)"', webpage, "config URL", default=None
)
if not config_url:
# Sometimes new react-based page is served instead of old one that require
# different config URL extraction approach (see
# https://github.com/rg3/youtube-dl/pull/7209)
vimeo_clip_page_config = self._search_regex(
r"vimeo\.clip_page_config\s*=\s*({.+?});",
webpage,
"vimeo clip page config",
)
config_url = self._parse_json(vimeo_clip_page_config, video_id)[
"player"
]["config_url"]
config_json = self._download_webpage(config_url, video_id)
config = json.loads(config_json)
except RegexNotFoundError:
# For pro videos or player.vimeo.com urls
# We try to find out to which variable is assigned the config dic
m_variable_name = re.search("(\w)\.video\.id", webpage)
if m_variable_name is not None:
config_re = r"%s=({[^}].+?});" % re.escape(m_variable_name.group(1))
else:
config_re = [r" = {config:({.+?}),assets:", r"(?:[abc])=({.+?});"]
config = self._search_regex(
config_re, webpage, "info section", flags=re.DOTALL
)
config = json.loads(config)
except Exception as e:
if re.search(
"The creator of this video has not given you permission to embed it on this domain.",
webpage,
):
raise ExtractorError(
'The author has restricted the access to this video, try with the "--referer" option'
)
if re.search(r'<form[^>]+?id="pw_form"', webpage) is not None:
if "_video_password_verified" in data:
raise ExtractorError("video password verification failed!")
self._verify_video_password(url, video_id, webpage)
return self._real_extract(
smuggle_url(url, {"_video_password_verified": "verified"})
)
else:
raise ExtractorError("Unable to extract info section", cause=e)
else:
if config.get("view") == 4:
config = self._verify_player_video_password(url, video_id)
if ">You rented this title.<" in webpage:
feature_id = config.get("video", {}).get("vod", {}).get("feature_id")
if feature_id and not data.get("force_feature_id", False):
return self.url_result(
smuggle_url(
"https://player.vimeo.com/player/%s" % feature_id,
{"force_feature_id": True},
),
"Vimeo",
)
# Extract title
video_title = config["video"]["title"]
# Extract uploader, uploader_url and uploader_id
video_uploader = config["video"].get("owner", {}).get("name")
video_uploader_url = config["video"].get("owner", {}).get("url")
video_uploader_id = (
video_uploader_url.split("/")[-1] if video_uploader_url else None
)
# Extract video thumbnail
video_thumbnail = config["video"].get("thumbnail")
if video_thumbnail is None:
video_thumbs = config["video"].get("thumbs")
if video_thumbs and isinstance(video_thumbs, dict):
_, video_thumbnail = sorted(
(int(width if width.isdigit() else 0), t_url)
for (width, t_url) in video_thumbs.items()
)[-1]
# Extract video description
video_description = self._html_search_regex(
r'(?s)<div\s+class="[^"]*description[^"]*"[^>]*>(.*?)</div>',
webpage,
"description",
default=None,
)
if not video_description:
video_description = self._html_search_meta("description", webpage, default=None)
if not video_description and mobj.group("pro"):
orig_webpage = self._download_webpage(
orig_url, video_id, note="Downloading webpage for description", fatal=False
)
if orig_webpage:
video_description = self._html_search_meta(
"description", orig_webpage, default=None
)
if not video_description and not mobj.group("player"):
self._downloader.report_warning("Cannot find video description")
# Extract video duration
video_duration = int_or_none(config["video"].get("duration"))
# Extract upload date
video_upload_date = None
mobj = re.search(r'<time[^>]+datetime="([^"]+)"', webpage)
if mobj is not None:
video_upload_date = unified_strdate(mobj.group(1))
try:
view_count = int(self._search_regex(r"UserPlays:(\d+)", webpage, "view count"))
like_count = int(self._search_regex(r"UserLikes:(\d+)", webpage, "like count"))
comment_count = int(
self._search_regex(r"UserComments:(\d+)", webpage, "comment count")
)
except RegexNotFoundError:
# This info is only available in vimeo.com/{id} urls
view_count = None
like_count = None
comment_count = None
formats = []
download_request = sanitized_Request(
"https://vimeo.com/%s?action=load_download_config" % video_id,
headers={"X-Requested-With": "XMLHttpRequest"},
)
download_data = self._download_json(download_request, video_id, fatal=False)
if download_data:
source_file = download_data.get("source_file")
if isinstance(source_file, dict):
download_url = source_file.get("download_url")
if (
download_url
and not source_file.get("is_cold")
and not source_file.get("is_defrosting")
):
source_name = source_file.get("public_name", "Original")
if self._is_valid_url(download_url, video_id, "%s video" % source_name):
ext = source_file.get(
"extension", determine_ext(download_url)
).lower()
formats.append(
{
"url": download_url,
"ext": ext,
"width": int_or_none(source_file.get("width")),
"height": int_or_none(source_file.get("height")),
"filesize": parse_filesize(source_file.get("size")),
"format_id": source_name,
"preference": 1,
}
)
config_files = config["video"].get("files") or config["request"].get("files", {})
for f in config_files.get("progressive", []):
video_url = f.get("url")
if not video_url:
continue
formats.append(
{
"url": video_url,
"format_id": "http-%s" % f.get("quality"),
"width": int_or_none(f.get("width")),
"height": int_or_none(f.get("height")),
"fps": int_or_none(f.get("fps")),
"tbr": int_or_none(f.get("bitrate")),
}
)
m3u8_url = config_files.get("hls", {}).get("url")
if m3u8_url:
formats.extend(
self._extract_m3u8_formats(
m3u8_url, video_id, "mp4", "m3u8_native", m3u8_id="hls", fatal=False
)
)
# Bitrates are completely broken. Single m3u8 may contain entries in kbps and bps
# at the same time without actual units specified. This lead to wrong sorting.
self._sort_formats(
formats, field_preference=("preference", "height", "width", "fps", "format_id")
)
subtitles = {}
text_tracks = config["request"].get("text_tracks")
if text_tracks:
for tt in text_tracks:
subtitles[tt["lang"]] = [
{
"ext": "vtt",
"url": "https://vimeo.com" + tt["url"],
}
]
return {
"id": video_id,
"uploader": video_uploader,
"uploader_url": video_uploader_url,
"uploader_id": video_uploader_id,
"upload_date": video_upload_date,
"title": video_title,
"thumbnail": video_thumbnail,
"description": video_description,
"duration": video_duration,
"formats": formats,
"webpage_url": url,
"view_count": view_count,
"like_count": like_count,
"comment_count": comment_count,
"subtitles": subtitles,
}
|
https://github.com/ytdl-org/youtube-dl/issues/8778
|
[vimeo] 149274392: Downloading webpage
[vimeo] 149274392: Extracting information
[vimeo] 149274392: Downloading webpage
[vimeo] 149274392: Downloading JSON metadata
[vimeo] 149274392: Downloading m3u8 information
[tudou] ayXy8TTcG0M: Downloading JSON metadata
[tudou] ayXy8TTcG0M: found 26 parts
[tudou] 400753010: Opening the info XML page
Traceback (most recent call last):
File "C:\Python35\lib\site-packages\django\core\handlers\base.py", line 149, in get_response
response = self.process_exception_by_middleware(e, request)
File "C:\Python35\lib\site-packages\django\core\handlers\base.py", line 147, in get_response
response = wrapped_callback(request, *callback_args, **callback_kwargs)
File "D:/Python projects/getvideo/getvideo\getvideo\views.py", line 175, in search
tudou_info = downloader.extract_info('http://www.tudou.com/programs/view/ayXy8TTcG0M/', download=False)
File "C:\Python35\lib\site-packages\youtube_dl\YoutubeDL.py", line 666, in extract_info
ie_result = ie.extract(url)
File "C:\Python35\lib\site-packages\youtube_dl\extractor\common.py", line 316, in extract
return self._real_extract(url)
File "C:\Python35\lib\site-packages\youtube_dl\extractor\tudou.py", line 85, in _real_extract
ext = (final_url.split('?')[0]).split('.')[-1]
AttributeError: 'NoneType' object has no attribute 'split'
|
AttributeError
|
def _login(self):
(username, password) = self._get_login_info()
if username is None:
self.raise_login_required("safaribooksonline.com account is required")
headers = std_headers.copy()
if "Referer" not in headers:
headers["Referer"] = self._LOGIN_URL
login_page_request = sanitized_Request(self._LOGIN_URL, headers=headers)
login_page = self._download_webpage(
login_page_request, None, "Downloading login form"
)
csrf = self._html_search_regex(
r"name='csrfmiddlewaretoken'\s+value='([^']+)'", login_page, "csrf token"
)
login_form = {
"csrfmiddlewaretoken": csrf,
"email": username,
"password1": password,
"login": "Sign In",
"next": "",
}
request = sanitized_Request(
self._LOGIN_URL, urlencode_postdata(login_form), headers=headers
)
login_page = self._download_webpage(request, None, "Logging in as %s" % username)
if re.search(self._SUCCESSFUL_LOGIN_REGEX, login_page) is None:
raise ExtractorError(
"Login failed; make sure your credentials are correct and try again.",
expected=True,
)
self.to_screen("Login successful")
|
def _login(self):
(username, password) = self._get_login_info()
if username is None:
self.raise_login_required("safaribooksonline.com account is required")
headers = std_headers
if "Referer" not in headers:
headers["Referer"] = self._LOGIN_URL
login_page = self._download_webpage(self._LOGIN_URL, None, "Downloading login form")
csrf = self._html_search_regex(
r"name='csrfmiddlewaretoken'\s+value='([^']+)'", login_page, "csrf token"
)
login_form = {
"csrfmiddlewaretoken": csrf,
"email": username,
"password1": password,
"login": "Sign In",
"next": "",
}
request = sanitized_Request(
self._LOGIN_URL, urlencode_postdata(login_form), headers=headers
)
login_page = self._download_webpage(request, None, "Logging in as %s" % username)
if re.search(self._SUCCESSFUL_LOGIN_REGEX, login_page) is None:
raise ExtractorError(
"Login failed; make sure your credentials are correct and try again.",
expected=True,
)
self.to_screen("Login successful")
|
https://github.com/ytdl-org/youtube-dl/issues/8778
|
[vimeo] 149274392: Downloading webpage
[vimeo] 149274392: Extracting information
[vimeo] 149274392: Downloading webpage
[vimeo] 149274392: Downloading JSON metadata
[vimeo] 149274392: Downloading m3u8 information
[tudou] ayXy8TTcG0M: Downloading JSON metadata
[tudou] ayXy8TTcG0M: found 26 parts
[tudou] 400753010: Opening the info XML page
Traceback (most recent call last):
File "C:\Python35\lib\site-packages\django\core\handlers\base.py", line 149, in get_response
response = self.process_exception_by_middleware(e, request)
File "C:\Python35\lib\site-packages\django\core\handlers\base.py", line 147, in get_response
response = wrapped_callback(request, *callback_args, **callback_kwargs)
File "D:/Python projects/getvideo/getvideo\getvideo\views.py", line 175, in search
tudou_info = downloader.extract_info('http://www.tudou.com/programs/view/ayXy8TTcG0M/', download=False)
File "C:\Python35\lib\site-packages\youtube_dl\YoutubeDL.py", line 666, in extract_info
ie_result = ie.extract(url)
File "C:\Python35\lib\site-packages\youtube_dl\extractor\common.py", line 316, in extract
return self._real_extract(url)
File "C:\Python35\lib\site-packages\youtube_dl\extractor\tudou.py", line 85, in _real_extract
ext = (final_url.split('?')[0]).split('.')[-1]
AttributeError: 'NoneType' object has no attribute 'split'
|
AttributeError
|
def run(self, info):
metadata = {}
if info.get("title") is not None:
metadata["title"] = info["title"]
if info.get("upload_date") is not None:
metadata["date"] = info["upload_date"]
if info.get("artist") is not None:
metadata["artist"] = info["artist"]
elif info.get("uploader") is not None:
metadata["artist"] = info["uploader"]
elif info.get("uploader_id") is not None:
metadata["artist"] = info["uploader_id"]
if info.get("description") is not None:
metadata["description"] = info["description"]
metadata["comment"] = info["description"]
if info.get("webpage_url") is not None:
metadata["purl"] = info["webpage_url"]
if info.get("album") is not None:
metadata["album"] = info["album"]
if not metadata:
self._downloader.to_screen("[ffmpeg] There isn't any metadata to add")
return [], info
filename = info["filepath"]
temp_filename = prepend_extension(filename, "temp")
if info["ext"] == "m4a":
options = ["-vn", "-acodec", "copy"]
else:
options = ["-c", "copy"]
for name, value in metadata.items():
options.extend(["-metadata", "%s=%s" % (name, value)])
# https://github.com/rg3/youtube-dl/issues/8350
if info["protocol"] == "m3u8_native":
options.extend(["-bsf:a", "aac_adtstoasc"])
self._downloader.to_screen("[ffmpeg] Adding metadata to '%s'" % filename)
self.run_ffmpeg(filename, temp_filename, options)
os.remove(encodeFilename(filename))
os.rename(encodeFilename(temp_filename), encodeFilename(filename))
return [], info
|
def run(self, info):
metadata = {}
if info.get("title") is not None:
metadata["title"] = info["title"]
if info.get("upload_date") is not None:
metadata["date"] = info["upload_date"]
if info.get("artist") is not None:
metadata["artist"] = info["artist"]
elif info.get("uploader") is not None:
metadata["artist"] = info["uploader"]
elif info.get("uploader_id") is not None:
metadata["artist"] = info["uploader_id"]
if info.get("description") is not None:
metadata["description"] = info["description"]
metadata["comment"] = info["description"]
if info.get("webpage_url") is not None:
metadata["purl"] = info["webpage_url"]
if info.get("album") is not None:
metadata["album"] = info["album"]
if not metadata:
self._downloader.to_screen("[ffmpeg] There isn't any metadata to add")
return [], info
filename = info["filepath"]
temp_filename = prepend_extension(filename, "temp")
if info["ext"] == "m4a":
options = ["-vn", "-acodec", "copy"]
else:
options = ["-c", "copy"]
for name, value in metadata.items():
options.extend(["-metadata", "%s=%s" % (name, value)])
self._downloader.to_screen("[ffmpeg] Adding metadata to '%s'" % filename)
self.run_ffmpeg(filename, temp_filename, options)
os.remove(encodeFilename(filename))
os.rename(encodeFilename(temp_filename), encodeFilename(filename))
return [], info
|
https://github.com/ytdl-org/youtube-dl/issues/8350
|
[debug] System config: []
[debug] User config: []
[debug] Command-line args: [u'--ignore-config', u'--verbose', u'--download-archive', u'/home/vxbinaca/.ytdlarchive', u'--no-overwrites', u'--call-home', u'--continue', u'--write-info-json', u'--write-description', u'--write-thumbnail', u'--merge-output-format', u'mkv--all-subs', u'--sub-format', u'srt', u'--convert-subs', u'srt', u'--write-sub', u'--add-metadata', u'https://vimeo.com/70668043', u'https://vimeo.com/70666333']
[debug] Encodings: locale UTF-8, fs UTF-8, out UTF-8, pref UTF-8
[debug] youtube-dl version 2016.01.27
[debug] Python version 2.7.10 - Linux-4.2.0-25-generic-x86_64-with-Ubuntu-15.10-wily
[debug] exe versions: ffmpeg 2.7.5-0ubuntu0.15.10.1, ffprobe 2.7.5-0ubuntu0.15.10.1, rtmpdump 2.4
[debug] Proxy map: {}
[debug] Public IP address: 76.101.221.232
[vimeo] 70668043: Downloading webpage
[vimeo] 70668043: Extracting information
[vimeo] 70668043: Downloading webpage
[vimeo] 70668043: Downloading JSON metadata
[vimeo] 70668043: Downloading m3u8 information
[info] Video description is already present
[info] Video description metadata is already present
[vimeo] 70668043: Thumbnail is already present
[debug] Invoking downloader on u'https://10-lvl3-hls.vimeocdn.com/1453990861-28223b02a7d6053983227f4b64333f85d0240957/01/4133/2/70668043/178317076.mp4.m3u8'
[download] Ask Ash No. 1-70668043.mp4 has already been downloaded
[download] 100% of 9.83MiB
[ffmpeg] Adding metadata to 'Ask Ash No. 1-70668043.mp4'
[debug] ffmpeg command line: ffmpeg -y -i 'file:Ask Ash No. 1-70668043.mp4' -c copy -metadata 'comment=More | junnnktank.com/thenakedissue/faq
f. Ash twitter.com/ashvandeesch
This is Ash. She'"'"'s from Holland. She'"'"'s a regular {and fucking awesome} contributor to The Naked Issue. You ask her questions, she makes a video and answers them {while looking pretty damn cute}.
Ask Ash | thenakedissue@junnnktank.com' -metadata 'description=More | junnnktank.com/thenakedissue/faq
f. Ash twitter.com/ashvandeesch
This is Ash. She'"'"'s from Holland. She'"'"'s a regular {and fucking awesome} contributor to The Naked Issue. You ask her questions, she makes a video and answers them {while looking pretty damn cute}.
Ask Ash | thenakedissue@junnnktank.com' -metadata artist=JUNNNKTANK -metadata 'title=Ask Ash No. 1' -metadata date=20130719 -metadata purl=https://vimeo.com/70668043 'file:Ask Ash No. 1-70668043.temp.mp4'
ERROR: Conversion failed!
Traceback (most recent call last):
File "/usr/local/lib/python2.7/dist-packages/youtube_dl/YoutubeDL.py", line 1737, in post_process
files_to_delete, info = pp.run(info)
File "/usr/local/lib/python2.7/dist-packages/youtube_dl/postprocessor/ffmpeg.py", line 395, in run
self.run_ffmpeg(filename, temp_filename, options)
File "/usr/local/lib/python2.7/dist-packages/youtube_dl/postprocessor/ffmpeg.py", line 159, in run_ffmpeg
self.run_ffmpeg_multiple_files([path], out_path, opts)
File "/usr/local/lib/python2.7/dist-packages/youtube_dl/postprocessor/ffmpeg.py", line 155, in run_ffmpeg_multiple_files
raise FFmpegPostProcessorError(msg)
FFmpegPostProcessorError
|
FFmpegPostProcessorError
|
def run(self, info):
metadata = {}
if info.get("title") is not None:
metadata["title"] = info["title"]
if info.get("upload_date") is not None:
metadata["date"] = info["upload_date"]
if info.get("artist") is not None:
metadata["artist"] = info["artist"]
elif info.get("uploader") is not None:
metadata["artist"] = info["uploader"]
elif info.get("uploader_id") is not None:
metadata["artist"] = info["uploader_id"]
if info.get("description") is not None:
metadata["description"] = info["description"]
metadata["comment"] = info["description"]
if info.get("webpage_url") is not None:
metadata["purl"] = info["webpage_url"]
if info.get("album") is not None:
metadata["album"] = info["album"]
if not metadata:
self._downloader.to_screen("[ffmpeg] There isn't any metadata to add")
return [], info
filename = info["filepath"]
temp_filename = prepend_extension(filename, "temp")
if info["ext"] == "m4a":
options = ["-vn", "-acodec", "copy"]
else:
options = ["-c", "copy"]
for name, value in metadata.items():
options.extend(["-metadata", "%s=%s" % (name, value)])
# https://github.com/rg3/youtube-dl/issues/8350
if info["protocol"] == "m3u8_native" or self._downloader.params.get(
"hls_prefer_native", False
):
options.extend(["-bsf:a", "aac_adtstoasc"])
self._downloader.to_screen("[ffmpeg] Adding metadata to '%s'" % filename)
self.run_ffmpeg(filename, temp_filename, options)
os.remove(encodeFilename(filename))
os.rename(encodeFilename(temp_filename), encodeFilename(filename))
return [], info
|
def run(self, info):
metadata = {}
if info.get("title") is not None:
metadata["title"] = info["title"]
if info.get("upload_date") is not None:
metadata["date"] = info["upload_date"]
if info.get("artist") is not None:
metadata["artist"] = info["artist"]
elif info.get("uploader") is not None:
metadata["artist"] = info["uploader"]
elif info.get("uploader_id") is not None:
metadata["artist"] = info["uploader_id"]
if info.get("description") is not None:
metadata["description"] = info["description"]
metadata["comment"] = info["description"]
if info.get("webpage_url") is not None:
metadata["purl"] = info["webpage_url"]
if info.get("album") is not None:
metadata["album"] = info["album"]
if not metadata:
self._downloader.to_screen("[ffmpeg] There isn't any metadata to add")
return [], info
filename = info["filepath"]
temp_filename = prepend_extension(filename, "temp")
if info["ext"] == "m4a":
options = ["-vn", "-acodec", "copy"]
else:
options = ["-c", "copy"]
for name, value in metadata.items():
options.extend(["-metadata", "%s=%s" % (name, value)])
# https://github.com/rg3/youtube-dl/issues/8350
if info["protocol"] == "m3u8_native":
options.extend(["-bsf:a", "aac_adtstoasc"])
self._downloader.to_screen("[ffmpeg] Adding metadata to '%s'" % filename)
self.run_ffmpeg(filename, temp_filename, options)
os.remove(encodeFilename(filename))
os.rename(encodeFilename(temp_filename), encodeFilename(filename))
return [], info
|
https://github.com/ytdl-org/youtube-dl/issues/8350
|
[debug] System config: []
[debug] User config: []
[debug] Command-line args: [u'--ignore-config', u'--verbose', u'--download-archive', u'/home/vxbinaca/.ytdlarchive', u'--no-overwrites', u'--call-home', u'--continue', u'--write-info-json', u'--write-description', u'--write-thumbnail', u'--merge-output-format', u'mkv--all-subs', u'--sub-format', u'srt', u'--convert-subs', u'srt', u'--write-sub', u'--add-metadata', u'https://vimeo.com/70668043', u'https://vimeo.com/70666333']
[debug] Encodings: locale UTF-8, fs UTF-8, out UTF-8, pref UTF-8
[debug] youtube-dl version 2016.01.27
[debug] Python version 2.7.10 - Linux-4.2.0-25-generic-x86_64-with-Ubuntu-15.10-wily
[debug] exe versions: ffmpeg 2.7.5-0ubuntu0.15.10.1, ffprobe 2.7.5-0ubuntu0.15.10.1, rtmpdump 2.4
[debug] Proxy map: {}
[debug] Public IP address: 76.101.221.232
[vimeo] 70668043: Downloading webpage
[vimeo] 70668043: Extracting information
[vimeo] 70668043: Downloading webpage
[vimeo] 70668043: Downloading JSON metadata
[vimeo] 70668043: Downloading m3u8 information
[info] Video description is already present
[info] Video description metadata is already present
[vimeo] 70668043: Thumbnail is already present
[debug] Invoking downloader on u'https://10-lvl3-hls.vimeocdn.com/1453990861-28223b02a7d6053983227f4b64333f85d0240957/01/4133/2/70668043/178317076.mp4.m3u8'
[download] Ask Ash No. 1-70668043.mp4 has already been downloaded
[download] 100% of 9.83MiB
[ffmpeg] Adding metadata to 'Ask Ash No. 1-70668043.mp4'
[debug] ffmpeg command line: ffmpeg -y -i 'file:Ask Ash No. 1-70668043.mp4' -c copy -metadata 'comment=More | junnnktank.com/thenakedissue/faq
f. Ash twitter.com/ashvandeesch
This is Ash. She'"'"'s from Holland. She'"'"'s a regular {and fucking awesome} contributor to The Naked Issue. You ask her questions, she makes a video and answers them {while looking pretty damn cute}.
Ask Ash | thenakedissue@junnnktank.com' -metadata 'description=More | junnnktank.com/thenakedissue/faq
f. Ash twitter.com/ashvandeesch
This is Ash. She'"'"'s from Holland. She'"'"'s a regular {and fucking awesome} contributor to The Naked Issue. You ask her questions, she makes a video and answers them {while looking pretty damn cute}.
Ask Ash | thenakedissue@junnnktank.com' -metadata artist=JUNNNKTANK -metadata 'title=Ask Ash No. 1' -metadata date=20130719 -metadata purl=https://vimeo.com/70668043 'file:Ask Ash No. 1-70668043.temp.mp4'
ERROR: Conversion failed!
Traceback (most recent call last):
File "/usr/local/lib/python2.7/dist-packages/youtube_dl/YoutubeDL.py", line 1737, in post_process
files_to_delete, info = pp.run(info)
File "/usr/local/lib/python2.7/dist-packages/youtube_dl/postprocessor/ffmpeg.py", line 395, in run
self.run_ffmpeg(filename, temp_filename, options)
File "/usr/local/lib/python2.7/dist-packages/youtube_dl/postprocessor/ffmpeg.py", line 159, in run_ffmpeg
self.run_ffmpeg_multiple_files([path], out_path, opts)
File "/usr/local/lib/python2.7/dist-packages/youtube_dl/postprocessor/ffmpeg.py", line 155, in run_ffmpeg_multiple_files
raise FFmpegPostProcessorError(msg)
FFmpegPostProcessorError
|
FFmpegPostProcessorError
|
def _real_extract(self, url):
video_id = self._match_id(url)
video_type = None
webpage = self._download_webpage(url, video_id)
# We first look for clipid, because clipprog always appears before
patterns = [r"id=\'clip(%s)\'\s*value=\'([0-9]+)\'" % t for t in ("id", "prog")]
results = list(filter(None, (re.search(p, webpage) for p in patterns)))
if results:
matches = results[0]
video_type, video_id = matches.groups()
video_type = "clip" if video_type == "id" else "program"
else:
senate_isvp_url = SenateISVPIE._search_iframe_url(webpage)
if senate_isvp_url:
title = self._og_search_title(webpage)
surl = smuggle_url(senate_isvp_url, {"force_title": title})
return self.url_result(surl, "SenateISVP", video_id, title)
if video_type is None or video_id is None:
raise ExtractorError("unable to find video id and type")
def get_text_attr(d, attr):
return d.get(attr, {}).get("#text")
data = self._download_json(
"http://www.c-span.org/assets/player/ajax-player.php?os=android&html5=%s&id=%s"
% (video_type, video_id),
video_id,
)["video"]
if data["@status"] != "Success":
raise ExtractorError(
"%s said: %s" % (self.IE_NAME, get_text_attr(data, "error")), expected=True
)
doc = self._download_xml(
"http://www.c-span.org/common/services/flashXml.php?%sid=%s"
% (video_type, video_id),
video_id,
)
description = self._html_search_meta("description", webpage)
title = find_xpath_attr(doc, ".//string", "name", "title").text
thumbnail = find_xpath_attr(doc, ".//string", "name", "poster").text
files = data["files"]
capfile = get_text_attr(data, "capfile")
entries = []
for partnum, f in enumerate(files):
formats = []
for quality in f["qualities"]:
formats.append(
{
"format_id": "%s-%sp"
% (
get_text_attr(quality, "bitrate"),
get_text_attr(quality, "height"),
),
"url": unescapeHTML(get_text_attr(quality, "file")),
"height": int_or_none(get_text_attr(quality, "height")),
"tbr": int_or_none(get_text_attr(quality, "bitrate")),
}
)
self._sort_formats(formats)
entries.append(
{
"id": "%s_%d" % (video_id, partnum + 1),
"title": (
title if len(files) == 1 else "%s part %d" % (title, partnum + 1)
),
"formats": formats,
"description": description,
"thumbnail": thumbnail,
"duration": int_or_none(get_text_attr(f, "length")),
"subtitles": {
"en": [{"url": capfile, "ext": determine_ext(capfile, "dfxp")}],
}
if capfile
else None,
}
)
if len(entries) == 1:
entry = dict(entries[0])
entry["id"] = "c" + video_id if video_type == "clip" else video_id
return entry
else:
return {
"_type": "playlist",
"entries": entries,
"title": title,
"id": "c" + video_id if video_type == "clip" else video_id,
}
|
def _real_extract(self, url):
video_id = self._match_id(url)
video_type = None
webpage = self._download_webpage(url, video_id)
matches = re.search(r"data-(prog|clip)id=\'([0-9]+)\'", webpage)
if matches:
video_type, video_id = matches.groups()
if video_type == "prog":
video_type = "program"
else:
senate_isvp_url = SenateISVPIE._search_iframe_url(webpage)
if senate_isvp_url:
title = self._og_search_title(webpage)
surl = smuggle_url(senate_isvp_url, {"force_title": title})
return self.url_result(surl, "SenateISVP", video_id, title)
if video_type is None or video_id is None:
raise ExtractorError("unable to find video id and type")
def get_text_attr(d, attr):
return d.get(attr, {}).get("#text")
data = self._download_json(
"http://www.c-span.org/assets/player/ajax-player.php?os=android&html5=%s&id=%s"
% (video_type, video_id),
video_id,
)["video"]
if data["@status"] != "Success":
raise ExtractorError(
"%s said: %s" % (self.IE_NAME, get_text_attr(data, "error")), expected=True
)
doc = self._download_xml(
"http://www.c-span.org/common/services/flashXml.php?%sid=%s"
% (video_type, video_id),
video_id,
)
description = self._html_search_meta("description", webpage)
title = find_xpath_attr(doc, ".//string", "name", "title").text
thumbnail = find_xpath_attr(doc, ".//string", "name", "poster").text
files = data["files"]
capfile = get_text_attr(data, "capfile")
entries = []
for partnum, f in enumerate(files):
formats = []
for quality in f["qualities"]:
formats.append(
{
"format_id": "%s-%sp"
% (
get_text_attr(quality, "bitrate"),
get_text_attr(quality, "height"),
),
"url": unescapeHTML(get_text_attr(quality, "file")),
"height": int_or_none(get_text_attr(quality, "height")),
"tbr": int_or_none(get_text_attr(quality, "bitrate")),
}
)
self._sort_formats(formats)
entries.append(
{
"id": "%s_%d" % (video_id, partnum + 1),
"title": (
title if len(files) == 1 else "%s part %d" % (title, partnum + 1)
),
"formats": formats,
"description": description,
"thumbnail": thumbnail,
"duration": int_or_none(get_text_attr(f, "length")),
"subtitles": {
"en": [{"url": capfile, "ext": determine_ext(capfile, "dfxp")}],
}
if capfile
else None,
}
)
if len(entries) == 1:
entry = dict(entries[0])
entry["id"] = "c" + video_id if video_type == "clip" else video_id
return entry
else:
return {
"_type": "playlist",
"entries": entries,
"title": title,
"id": "c" + video_id if video_type == "clip" else video_id,
}
|
https://github.com/ytdl-org/youtube-dl/issues/8032
|
~ $ youtube-dl -v http://www.c-span.org/video/?319979-1/european-security-ukraine
[debug] System config: []
[debug] User config: []
[debug] Command-line args: ['-v', 'http://www.c-span.org/video/?319979-1/european-security-ukraine']
[debug] Encodings: locale UTF-8, fs utf-8, out UTF-8, pref UTF-8
[debug] youtube-dl version 2015.12.23
[debug] Python version 3.2.3 - Linux-3.13.0-68-generic-i686-with-LinuxMint-13-maya
[debug] exe versions: ffmpeg 2.8.3-dpkg, ffprobe 2.8.3-dpkg, rtmpdump 2.4
[debug] Proxy map: {}
[CSpan] 319979: Downloading webpage
Traceback (most recent call last):
File "/usr/local/bin/youtube-dl", line 11, in <module>
sys.exit(main())
File "/usr/local/lib/python3.2/dist-packages/youtube_dl/__init__.py", line 410, in main
_real_main(argv)
File "/usr/local/lib/python3.2/dist-packages/youtube_dl/__init__.py", line 400, in _real_main
retcode = ydl.download(all_urls)
File "/usr/local/lib/python3.2/dist-packages/youtube_dl/YoutubeDL.py", line 1677, in download
url, force_generic_extractor=self.params.get('force_generic_extractor', False))
File "/usr/local/lib/python3.2/dist-packages/youtube_dl/YoutubeDL.py", line 665, in extract_info
ie_result = ie.extract(url)
File "/usr/local/lib/python3.2/dist-packages/youtube_dl/extractor/common.py", line 291, in extract
return self._real_extract(url)
File "/usr/local/lib/python3.2/dist-packages/youtube_dl/extractor/cspan.py", line 78, in _real_extract
'http://www.c-span.org/assets/player/ajax-player.php?os=android&html5=%s&id=%s' % (video_type, video_id),
UnboundLocalError: local variable 'video_type' referenced before assignment
|
UnboundLocalError
|
def _real_extract(self, url):
video_id = self._match_id(url)
video_type = None
webpage = self._download_webpage(url, video_id)
matches = re.search(r"data-(prog|clip)id=\'([0-9]+)\'", webpage)
if matches:
video_type, video_id = matches.groups()
if video_type == "prog":
video_type = "program"
else:
senate_isvp_url = SenateISVPIE._search_iframe_url(webpage)
if senate_isvp_url:
title = self._og_search_title(webpage)
surl = smuggle_url(senate_isvp_url, {"force_title": title})
return self.url_result(surl, "SenateISVP", video_id, title)
if video_type is None or video_id is None:
raise ExtractorError("unable to find video id and type")
def get_text_attr(d, attr):
return d.get(attr, {}).get("#text")
data = self._download_json(
"http://www.c-span.org/assets/player/ajax-player.php?os=android&html5=%s&id=%s"
% (video_type, video_id),
video_id,
)["video"]
if data["@status"] != "Success":
raise ExtractorError(
"%s said: %s" % (self.IE_NAME, get_text_attr(data, "error")), expected=True
)
doc = self._download_xml(
"http://www.c-span.org/common/services/flashXml.php?%sid=%s"
% (video_type, video_id),
video_id,
)
description = self._html_search_meta("description", webpage)
title = find_xpath_attr(doc, ".//string", "name", "title").text
thumbnail = find_xpath_attr(doc, ".//string", "name", "poster").text
files = data["files"]
capfile = get_text_attr(data, "capfile")
entries = []
for partnum, f in enumerate(files):
formats = []
for quality in f["qualities"]:
formats.append(
{
"format_id": "%s-%sp"
% (
get_text_attr(quality, "bitrate"),
get_text_attr(quality, "height"),
),
"url": unescapeHTML(get_text_attr(quality, "file")),
"height": int_or_none(get_text_attr(quality, "height")),
"tbr": int_or_none(get_text_attr(quality, "bitrate")),
}
)
self._sort_formats(formats)
entries.append(
{
"id": "%s_%d" % (video_id, partnum + 1),
"title": (
title if len(files) == 1 else "%s part %d" % (title, partnum + 1)
),
"formats": formats,
"description": description,
"thumbnail": thumbnail,
"duration": int_or_none(get_text_attr(f, "length")),
"subtitles": {
"en": [{"url": capfile, "ext": determine_ext(capfile, "dfxp")}],
}
if capfile
else None,
}
)
if len(entries) == 1:
entry = dict(entries[0])
entry["id"] = "c" + video_id if video_type == "clip" else video_id
return entry
else:
return {
"_type": "playlist",
"entries": entries,
"title": title,
"id": "c" + video_id if video_type == "clip" else video_id,
}
|
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
matches = re.search(r"data-(prog|clip)id=\'([0-9]+)\'", webpage)
if matches:
video_type, video_id = matches.groups()
if video_type == "prog":
video_type = "program"
else:
senate_isvp_url = SenateISVPIE._search_iframe_url(webpage)
if senate_isvp_url:
title = self._og_search_title(webpage)
surl = smuggle_url(senate_isvp_url, {"force_title": title})
return self.url_result(surl, "SenateISVP", video_id, title)
def get_text_attr(d, attr):
return d.get(attr, {}).get("#text")
data = self._download_json(
"http://www.c-span.org/assets/player/ajax-player.php?os=android&html5=%s&id=%s"
% (video_type, video_id),
video_id,
)["video"]
if data["@status"] != "Success":
raise ExtractorError(
"%s said: %s" % (self.IE_NAME, get_text_attr(data, "error")), expected=True
)
doc = self._download_xml(
"http://www.c-span.org/common/services/flashXml.php?%sid=%s"
% (video_type, video_id),
video_id,
)
description = self._html_search_meta("description", webpage)
title = find_xpath_attr(doc, ".//string", "name", "title").text
thumbnail = find_xpath_attr(doc, ".//string", "name", "poster").text
files = data["files"]
capfile = get_text_attr(data, "capfile")
entries = []
for partnum, f in enumerate(files):
formats = []
for quality in f["qualities"]:
formats.append(
{
"format_id": "%s-%sp"
% (
get_text_attr(quality, "bitrate"),
get_text_attr(quality, "height"),
),
"url": unescapeHTML(get_text_attr(quality, "file")),
"height": int_or_none(get_text_attr(quality, "height")),
"tbr": int_or_none(get_text_attr(quality, "bitrate")),
}
)
self._sort_formats(formats)
entries.append(
{
"id": "%s_%d" % (video_id, partnum + 1),
"title": (
title if len(files) == 1 else "%s part %d" % (title, partnum + 1)
),
"formats": formats,
"description": description,
"thumbnail": thumbnail,
"duration": int_or_none(get_text_attr(f, "length")),
"subtitles": {
"en": [{"url": capfile, "ext": determine_ext(capfile, "dfxp")}],
}
if capfile
else None,
}
)
if len(entries) == 1:
entry = dict(entries[0])
entry["id"] = "c" + video_id if video_type == "clip" else video_id
return entry
else:
return {
"_type": "playlist",
"entries": entries,
"title": title,
"id": "c" + video_id if video_type == "clip" else video_id,
}
|
https://github.com/ytdl-org/youtube-dl/issues/8032
|
~ $ youtube-dl -v http://www.c-span.org/video/?319979-1/european-security-ukraine
[debug] System config: []
[debug] User config: []
[debug] Command-line args: ['-v', 'http://www.c-span.org/video/?319979-1/european-security-ukraine']
[debug] Encodings: locale UTF-8, fs utf-8, out UTF-8, pref UTF-8
[debug] youtube-dl version 2015.12.23
[debug] Python version 3.2.3 - Linux-3.13.0-68-generic-i686-with-LinuxMint-13-maya
[debug] exe versions: ffmpeg 2.8.3-dpkg, ffprobe 2.8.3-dpkg, rtmpdump 2.4
[debug] Proxy map: {}
[CSpan] 319979: Downloading webpage
Traceback (most recent call last):
File "/usr/local/bin/youtube-dl", line 11, in <module>
sys.exit(main())
File "/usr/local/lib/python3.2/dist-packages/youtube_dl/__init__.py", line 410, in main
_real_main(argv)
File "/usr/local/lib/python3.2/dist-packages/youtube_dl/__init__.py", line 400, in _real_main
retcode = ydl.download(all_urls)
File "/usr/local/lib/python3.2/dist-packages/youtube_dl/YoutubeDL.py", line 1677, in download
url, force_generic_extractor=self.params.get('force_generic_extractor', False))
File "/usr/local/lib/python3.2/dist-packages/youtube_dl/YoutubeDL.py", line 665, in extract_info
ie_result = ie.extract(url)
File "/usr/local/lib/python3.2/dist-packages/youtube_dl/extractor/common.py", line 291, in extract
return self._real_extract(url)
File "/usr/local/lib/python3.2/dist-packages/youtube_dl/extractor/cspan.py", line 78, in _real_extract
'http://www.c-span.org/assets/player/ajax-player.php?os=android&html5=%s&id=%s' % (video_type, video_id),
UnboundLocalError: local variable 'video_type' referenced before assignment
|
UnboundLocalError
|
def _real_extract(self, url):
url, smuggled_data = unsmuggle_url(url, {})
proto = "http" if self._downloader.params.get("prefer_insecure", False) else "https"
start_time = None
end_time = None
parsed_url = compat_urllib_parse_urlparse(url)
for component in [parsed_url.fragment, parsed_url.query]:
query = compat_parse_qs(component)
if start_time is None and "t" in query:
start_time = parse_duration(query["t"][0])
if start_time is None and "start" in query:
start_time = parse_duration(query["start"][0])
if end_time is None and "end" in query:
end_time = parse_duration(query["end"][0])
# Extract original video URL from URL with redirection, like age verification, using next_url parameter
mobj = re.search(self._NEXT_URL_RE, url)
if mobj:
url = (
proto
+ "://www.youtube.com/"
+ compat_urllib_parse_unquote(mobj.group(1)).lstrip("/")
)
video_id = self.extract_id(url)
# Get video webpage
url = (
proto
+ "://www.youtube.com/watch?v=%s&gl=US&hl=en&has_verified=1&bpctr=9999999999"
% video_id
)
video_webpage = self._download_webpage(url, video_id)
# Attempt to extract SWF player URL
mobj = re.search(
r'swfConfig.*?"(https?:\\/\\/.*?watch.*?-.*?\.swf)"', video_webpage
)
if mobj is not None:
player_url = re.sub(r"\\(.)", r"\1", mobj.group(1))
else:
player_url = None
dash_mpds = []
def add_dash_mpd(video_info):
dash_mpd = video_info.get("dashmpd")
if dash_mpd and dash_mpd[0] not in dash_mpds:
dash_mpds.append(dash_mpd[0])
# Get video info
embed_webpage = None
is_live = None
if re.search(r'player-age-gate-content">', video_webpage) is not None:
age_gate = True
# We simulate the access to the video from www.youtube.com/v/{video_id}
# this can be viewed without login into Youtube
url = proto + "://www.youtube.com/embed/%s" % video_id
embed_webpage = self._download_webpage(
url, video_id, "Downloading embed webpage"
)
data = compat_urllib_parse.urlencode(
{
"video_id": video_id,
"eurl": "https://youtube.googleapis.com/v/" + video_id,
"sts": self._search_regex(
r'"sts"\s*:\s*(\d+)', embed_webpage, "sts", default=""
),
}
)
video_info_url = proto + "://www.youtube.com/get_video_info?" + data
video_info_webpage = self._download_webpage(
video_info_url,
video_id,
note="Refetching age-gated info webpage",
errnote="unable to download video info webpage",
)
video_info = compat_parse_qs(video_info_webpage)
add_dash_mpd(video_info)
else:
age_gate = False
video_info = None
# Try looking directly into the video webpage
mobj = re.search(r";ytplayer\.config\s*=\s*({.*?});ytplayer", video_webpage)
if mobj:
json_code = uppercase_escape(mobj.group(1))
ytplayer_config = json.loads(json_code)
args = ytplayer_config["args"]
if args.get("url_encoded_fmt_stream_map"):
# Convert to the same format returned by compat_parse_qs
video_info = dict((k, [v]) for k, v in args.items())
add_dash_mpd(video_info)
if args.get("livestream") == "1" or args.get("live_playback") == 1:
is_live = True
if not video_info or self._downloader.params.get(
"youtube_include_dash_manifest", True
):
# We also try looking in get_video_info since it may contain different dashmpd
# URL that points to a DASH manifest with possibly different itag set (some itags
# are missing from DASH manifest pointed by webpage's dashmpd, some - from DASH
# manifest pointed by get_video_info's dashmpd).
# The general idea is to take a union of itags of both DASH manifests (for example
# video with such 'manifest behavior' see https://github.com/rg3/youtube-dl/issues/6093)
self.report_video_info_webpage_download(video_id)
for el_type in [
"&el=info",
"&el=embedded",
"&el=detailpage",
"&el=vevo",
"",
]:
video_info_url = (
"%s://www.youtube.com/get_video_info?&video_id=%s%s&ps=default&eurl=&gl=US&hl=en"
% (proto, video_id, el_type)
)
video_info_webpage = self._download_webpage(
video_info_url,
video_id,
note=False,
errnote="unable to download video info webpage",
)
get_video_info = compat_parse_qs(video_info_webpage)
if get_video_info.get("use_cipher_signature") != ["True"]:
add_dash_mpd(get_video_info)
if not video_info:
video_info = get_video_info
if "token" in get_video_info:
# Different get_video_info requests may report different results, e.g.
# some may report video unavailability, but some may serve it without
# any complaint (see https://github.com/rg3/youtube-dl/issues/7362,
# the original webpage as well as el=info and el=embedded get_video_info
# requests report video unavailability due to geo restriction while
# el=detailpage succeeds and returns valid data). This is probably
# due to YouTube measures against IP ranges of hosting providers.
# Working around by preferring the first succeeded video_info containing
# the token if no such video_info yet was found.
if "token" not in video_info:
video_info = get_video_info
break
if "token" not in video_info:
if "reason" in video_info:
if (
"The uploader has not made this video available in your country."
in video_info["reason"]
):
regions_allowed = self._html_search_meta(
"regionsAllowed", video_webpage, default=None
)
if regions_allowed:
raise ExtractorError(
"YouTube said: This video is available in %s only"
% (
", ".join(
map(ISO3166Utils.short2full, regions_allowed.split(","))
)
),
expected=True,
)
raise ExtractorError(
"YouTube said: %s" % video_info["reason"][0],
expected=True,
video_id=video_id,
)
else:
raise ExtractorError(
'"token" parameter not in video info for unknown reason',
video_id=video_id,
)
# title
if "title" in video_info:
video_title = video_info["title"][0]
else:
self._downloader.report_warning("Unable to extract video title")
video_title = "_"
# description
video_description = get_element_by_id("eow-description", video_webpage)
if video_description:
video_description = re.sub(
r"""(?x)
<a\s+
(?:[a-zA-Z-]+="[^"]+"\s+)*?
title="([^"]+)"\s+
(?:[a-zA-Z-]+="[^"]+"\s+)*?
class="yt-uix-redirect-link"\s*>
[^<]+
</a>
""",
r"\1",
video_description,
)
video_description = clean_html(video_description)
else:
fd_mobj = re.search(
r'<meta name="description" content="([^"]+)"', video_webpage
)
if fd_mobj:
video_description = unescapeHTML(fd_mobj.group(1))
else:
video_description = ""
if "multifeed_metadata_list" in video_info and not smuggled_data.get(
"force_singlefeed", False
):
if not self._downloader.params.get("noplaylist"):
entries = []
feed_ids = []
multifeed_metadata_list = compat_urllib_parse_unquote_plus(
video_info["multifeed_metadata_list"][0]
)
for feed in multifeed_metadata_list.split(","):
feed_data = compat_parse_qs(feed)
entries.append(
{
"_type": "url_transparent",
"ie_key": "Youtube",
"url": smuggle_url(
"%s://www.youtube.com/watch?v=%s"
% (proto, feed_data["id"][0]),
{"force_singlefeed": True},
),
"title": "%s (%s)" % (video_title, feed_data["title"][0]),
}
)
feed_ids.append(feed_data["id"][0])
self.to_screen(
"Downloading multifeed video (%s) - add --no-playlist to just download video %s"
% (", ".join(feed_ids), video_id)
)
return self.playlist_result(
entries, video_id, video_title, video_description
)
self.to_screen("Downloading just video %s because of --no-playlist" % video_id)
if "view_count" in video_info:
view_count = int(video_info["view_count"][0])
else:
view_count = None
# Check for "rental" videos
if "ypc_video_rental_bar_text" in video_info and "author" not in video_info:
raise ExtractorError('"rental" videos not supported')
# Start extracting information
self.report_information_extraction(video_id)
# uploader
if "author" not in video_info:
raise ExtractorError("Unable to extract uploader name")
video_uploader = compat_urllib_parse_unquote_plus(video_info["author"][0])
# uploader_id
video_uploader_id = None
mobj = re.search(
r'<link itemprop="url" href="http://www.youtube.com/(?:user|channel)/([^"]+)">',
video_webpage,
)
if mobj is not None:
video_uploader_id = mobj.group(1)
else:
self._downloader.report_warning("unable to extract uploader nickname")
# thumbnail image
# We try first to get a high quality image:
m_thumb = re.search(
r'<span itemprop="thumbnail".*?href="(.*?)">', video_webpage, re.DOTALL
)
if m_thumb is not None:
video_thumbnail = m_thumb.group(1)
elif "thumbnail_url" not in video_info:
self._downloader.report_warning("unable to extract video thumbnail")
video_thumbnail = None
else: # don't panic if we can't find it
video_thumbnail = compat_urllib_parse_unquote_plus(
video_info["thumbnail_url"][0]
)
# upload date
upload_date = self._html_search_meta(
"datePublished", video_webpage, "upload date", default=None
)
if not upload_date:
upload_date = self._search_regex(
[
r'(?s)id="eow-date.*?>(.*?)</span>',
r'id="watch-uploader-info".*?>.*?(?:Published|Uploaded|Streamed live|Started) on (.+?)</strong>',
],
video_webpage,
"upload date",
default=None,
)
if upload_date:
upload_date = " ".join(re.sub(r"[/,-]", r" ", mobj.group(1)).split())
upload_date = unified_strdate(upload_date)
m_cat_container = self._search_regex(
r"(?s)<h4[^>]*>\s*Category\s*</h4>\s*<ul[^>]*>(.*?)</ul>",
video_webpage,
"categories",
default=None,
)
if m_cat_container:
category = self._html_search_regex(
r"(?s)<a[^<]+>(.*?)</a>", m_cat_container, "category", default=None
)
video_categories = None if category is None else [category]
else:
video_categories = None
video_tags = [
unescapeHTML(m.group("content"))
for m in re.finditer(self._meta_regex("og:video:tag"), video_webpage)
]
def _extract_count(count_name):
return str_to_int(
self._search_regex(
r'-%s-button[^>]+><span[^>]+class="yt-uix-button-content"[^>]*>([\d,]+)</span>'
% re.escape(count_name),
video_webpage,
count_name,
default=None,
)
)
like_count = _extract_count("like")
dislike_count = _extract_count("dislike")
# subtitles
video_subtitles = self.extract_subtitles(video_id, video_webpage)
automatic_captions = self.extract_automatic_captions(video_id, video_webpage)
if "length_seconds" not in video_info:
self._downloader.report_warning("unable to extract video duration")
video_duration = None
else:
video_duration = int(
compat_urllib_parse_unquote_plus(video_info["length_seconds"][0])
)
# annotations
video_annotations = None
if self._downloader.params.get("writeannotations", False):
video_annotations = self._extract_annotations(video_id)
def _map_to_format_list(urlmap):
formats = []
for itag, video_real_url in urlmap.items():
dct = {
"format_id": itag,
"url": video_real_url,
"player_url": player_url,
}
if itag in self._formats:
dct.update(self._formats[itag])
formats.append(dct)
return formats
if "conn" in video_info and video_info["conn"][0].startswith("rtmp"):
self.report_rtmp_download()
formats = [
{
"format_id": "_rtmp",
"protocol": "rtmp",
"url": video_info["conn"][0],
"player_url": player_url,
}
]
elif (
len(video_info.get("url_encoded_fmt_stream_map", [""])[0]) >= 1
or len(video_info.get("adaptive_fmts", [""])[0]) >= 1
):
encoded_url_map = (
video_info.get("url_encoded_fmt_stream_map", [""])[0]
+ ","
+ video_info.get("adaptive_fmts", [""])[0]
)
if "rtmpe%3Dyes" in encoded_url_map:
raise ExtractorError(
"rtmpe downloads are not supported, see https://github.com/rg3/youtube-dl/issues/343 for more information.",
expected=True,
)
formats = []
for url_data_str in encoded_url_map.split(","):
url_data = compat_parse_qs(url_data_str)
if "itag" not in url_data or "url" not in url_data:
continue
format_id = url_data["itag"][0]
url = url_data["url"][0]
if "sig" in url_data:
url += "&signature=" + url_data["sig"][0]
elif "s" in url_data:
encrypted_sig = url_data["s"][0]
ASSETS_RE = r'"assets":.+?"js":\s*("[^"]+")'
jsplayer_url_json = self._search_regex(
ASSETS_RE,
embed_webpage if age_gate else video_webpage,
"JS player URL (1)",
default=None,
)
if not jsplayer_url_json and not age_gate:
# We need the embed website after all
if embed_webpage is None:
embed_url = proto + "://www.youtube.com/embed/%s" % video_id
embed_webpage = self._download_webpage(
embed_url, video_id, "Downloading embed webpage"
)
jsplayer_url_json = self._search_regex(
ASSETS_RE, embed_webpage, "JS player URL"
)
player_url = json.loads(jsplayer_url_json)
if player_url is None:
player_url_json = self._search_regex(
r'ytplayer\.config.*?"url"\s*:\s*("[^"]+")',
video_webpage,
"age gate player URL",
)
player_url = json.loads(player_url_json)
if self._downloader.params.get("verbose"):
if player_url is None:
player_version = "unknown"
player_desc = "unknown"
else:
if player_url.endswith("swf"):
player_version = self._search_regex(
r"-(.+?)(?:/watch_as3)?\.swf$",
player_url,
"flash player",
fatal=False,
)
player_desc = "flash player %s" % player_version
else:
player_version = self._search_regex(
[
r"html5player-([^/]+?)(?:/html5player(?:-new)?)?\.js",
r"(?:www|player)-([^/]+)/base\.js",
],
player_url,
"html5 player",
fatal=False,
)
player_desc = "html5 player %s" % player_version
parts_sizes = self._signature_cache_id(encrypted_sig)
self.to_screen(
"{%s} signature length %s, %s"
% (format_id, parts_sizes, player_desc)
)
signature = self._decrypt_signature(
encrypted_sig, video_id, player_url, age_gate
)
url += "&signature=" + signature
if "ratebypass" not in url:
url += "&ratebypass=yes"
# Some itags are not included in DASH manifest thus corresponding formats will
# lack metadata (see https://github.com/rg3/youtube-dl/pull/5993).
# Trying to extract metadata from url_encoded_fmt_stream_map entry.
mobj = re.search(
r"^(?P<width>\d+)[xX](?P<height>\d+)$", url_data.get("size", [""])[0]
)
width, height = (
(int(mobj.group("width")), int(mobj.group("height")))
if mobj
else (None, None)
)
dct = {
"format_id": format_id,
"url": url,
"player_url": player_url,
"filesize": int_or_none(url_data.get("clen", [None])[0]),
"tbr": float_or_none(url_data.get("bitrate", [None])[0], 1000),
"width": width,
"height": height,
"fps": int_or_none(url_data.get("fps", [None])[0]),
"format_note": url_data.get("quality_label", [None])[0]
or url_data.get("quality", [None])[0],
}
type_ = url_data.get("type", [None])[0]
if type_:
type_split = type_.split(";")
kind_ext = type_split[0].split("/")
if len(kind_ext) == 2:
kind, ext = kind_ext
dct["ext"] = ext
if kind in ("audio", "video"):
codecs = None
for mobj in re.finditer(
r'(?P<key>[a-zA-Z_-]+)=(?P<quote>["\']?)(?P<val>.+?)(?P=quote)(?:;|$)',
type_,
):
if mobj.group("key") == "codecs":
codecs = mobj.group("val")
break
if codecs:
codecs = codecs.split(",")
if len(codecs) == 2:
acodec, vcodec = codecs[0], codecs[1]
else:
acodec, vcodec = (
(codecs[0], "none")
if kind == "audio"
else ("none", codecs[0])
)
dct.update(
{
"acodec": acodec,
"vcodec": vcodec,
}
)
if format_id in self._formats:
dct.update(self._formats[format_id])
formats.append(dct)
elif video_info.get("hlsvp"):
manifest_url = video_info["hlsvp"][0]
url_map = self._extract_from_m3u8(manifest_url, video_id)
formats = _map_to_format_list(url_map)
else:
raise ExtractorError(
"no conn, hlsvp or url_encoded_fmt_stream_map information found in video info"
)
# Look for the DASH manifest
if self._downloader.params.get("youtube_include_dash_manifest", True):
dash_mpd_fatal = True
for dash_manifest_url in dash_mpds:
dash_formats = {}
try:
for df in self._parse_dash_manifest(
video_id, dash_manifest_url, player_url, age_gate, dash_mpd_fatal
):
# Do not overwrite DASH format found in some previous DASH manifest
if df["format_id"] not in dash_formats:
dash_formats[df["format_id"]] = df
# Additional DASH manifests may end up in HTTP Error 403 therefore
# allow them to fail without bug report message if we already have
# some DASH manifest succeeded. This is temporary workaround to reduce
# burst of bug reports until we figure out the reason and whether it
# can be fixed at all.
dash_mpd_fatal = False
except (ExtractorError, KeyError) as e:
self.report_warning("Skipping DASH manifest: %r" % e, video_id)
if dash_formats:
# Remove the formats we found through non-DASH, they
# contain less info and it can be wrong, because we use
# fixed values (for example the resolution). See
# https://github.com/rg3/youtube-dl/issues/5774 for an
# example.
formats = [
f for f in formats if f["format_id"] not in dash_formats.keys()
]
formats.extend(dash_formats.values())
# Check for malformed aspect ratio
stretched_m = re.search(
r'<meta\s+property="og:video:tag".*?content="yt:stretch=(?P<w>[0-9]+):(?P<h>[0-9]+)">',
video_webpage,
)
if stretched_m:
ratio = float(stretched_m.group("w")) / float(stretched_m.group("h"))
for f in formats:
if f.get("vcodec") != "none":
f["stretched_ratio"] = ratio
self._sort_formats(formats)
return {
"id": video_id,
"uploader": video_uploader,
"uploader_id": video_uploader_id,
"upload_date": upload_date,
"title": video_title,
"thumbnail": video_thumbnail,
"description": video_description,
"categories": video_categories,
"tags": video_tags,
"subtitles": video_subtitles,
"automatic_captions": automatic_captions,
"duration": video_duration,
"age_limit": 18 if age_gate else 0,
"annotations": video_annotations,
"webpage_url": proto + "://www.youtube.com/watch?v=%s" % video_id,
"view_count": view_count,
"like_count": like_count,
"dislike_count": dislike_count,
"average_rating": float_or_none(video_info.get("avg_rating", [None])[0]),
"formats": formats,
"is_live": is_live,
"start_time": start_time,
"end_time": end_time,
}
|
def _real_extract(self, url):
url, smuggled_data = unsmuggle_url(url, {})
proto = "http" if self._downloader.params.get("prefer_insecure", False) else "https"
start_time = None
end_time = None
parsed_url = compat_urllib_parse_urlparse(url)
for component in [parsed_url.fragment, parsed_url.query]:
query = compat_parse_qs(component)
if start_time is None and "t" in query:
start_time = parse_duration(query["t"][0])
if start_time is None and "start" in query:
start_time = parse_duration(query["start"][0])
if end_time is None and "end" in query:
end_time = parse_duration(query["end"][0])
# Extract original video URL from URL with redirection, like age verification, using next_url parameter
mobj = re.search(self._NEXT_URL_RE, url)
if mobj:
url = (
proto
+ "://www.youtube.com/"
+ compat_urllib_parse_unquote(mobj.group(1)).lstrip("/")
)
video_id = self.extract_id(url)
# Get video webpage
url = (
proto
+ "://www.youtube.com/watch?v=%s&gl=US&hl=en&has_verified=1&bpctr=9999999999"
% video_id
)
video_webpage = self._download_webpage(url, video_id)
# Attempt to extract SWF player URL
mobj = re.search(
r'swfConfig.*?"(https?:\\/\\/.*?watch.*?-.*?\.swf)"', video_webpage
)
if mobj is not None:
player_url = re.sub(r"\\(.)", r"\1", mobj.group(1))
else:
player_url = None
dash_mpds = []
def add_dash_mpd(video_info):
dash_mpd = video_info.get("dashmpd")
if dash_mpd and dash_mpd[0] not in dash_mpds:
dash_mpds.append(dash_mpd[0])
# Get video info
embed_webpage = None
is_live = None
if re.search(r'player-age-gate-content">', video_webpage) is not None:
age_gate = True
# We simulate the access to the video from www.youtube.com/v/{video_id}
# this can be viewed without login into Youtube
url = proto + "://www.youtube.com/embed/%s" % video_id
embed_webpage = self._download_webpage(
url, video_id, "Downloading embed webpage"
)
data = compat_urllib_parse.urlencode(
{
"video_id": video_id,
"eurl": "https://youtube.googleapis.com/v/" + video_id,
"sts": self._search_regex(
r'"sts"\s*:\s*(\d+)', embed_webpage, "sts", default=""
),
}
)
video_info_url = proto + "://www.youtube.com/get_video_info?" + data
video_info_webpage = self._download_webpage(
video_info_url,
video_id,
note="Refetching age-gated info webpage",
errnote="unable to download video info webpage",
)
video_info = compat_parse_qs(video_info_webpage)
add_dash_mpd(video_info)
else:
age_gate = False
video_info = None
# Try looking directly into the video webpage
mobj = re.search(r";ytplayer\.config\s*=\s*({.*?});", video_webpage)
if mobj:
json_code = uppercase_escape(mobj.group(1))
ytplayer_config = json.loads(json_code)
args = ytplayer_config["args"]
if args.get("url_encoded_fmt_stream_map"):
# Convert to the same format returned by compat_parse_qs
video_info = dict((k, [v]) for k, v in args.items())
add_dash_mpd(video_info)
if args.get("livestream") == "1" or args.get("live_playback") == 1:
is_live = True
if not video_info or self._downloader.params.get(
"youtube_include_dash_manifest", True
):
# We also try looking in get_video_info since it may contain different dashmpd
# URL that points to a DASH manifest with possibly different itag set (some itags
# are missing from DASH manifest pointed by webpage's dashmpd, some - from DASH
# manifest pointed by get_video_info's dashmpd).
# The general idea is to take a union of itags of both DASH manifests (for example
# video with such 'manifest behavior' see https://github.com/rg3/youtube-dl/issues/6093)
self.report_video_info_webpage_download(video_id)
for el_type in [
"&el=info",
"&el=embedded",
"&el=detailpage",
"&el=vevo",
"",
]:
video_info_url = (
"%s://www.youtube.com/get_video_info?&video_id=%s%s&ps=default&eurl=&gl=US&hl=en"
% (proto, video_id, el_type)
)
video_info_webpage = self._download_webpage(
video_info_url,
video_id,
note=False,
errnote="unable to download video info webpage",
)
get_video_info = compat_parse_qs(video_info_webpage)
if get_video_info.get("use_cipher_signature") != ["True"]:
add_dash_mpd(get_video_info)
if not video_info:
video_info = get_video_info
if "token" in get_video_info:
# Different get_video_info requests may report different results, e.g.
# some may report video unavailability, but some may serve it without
# any complaint (see https://github.com/rg3/youtube-dl/issues/7362,
# the original webpage as well as el=info and el=embedded get_video_info
# requests report video unavailability due to geo restriction while
# el=detailpage succeeds and returns valid data). This is probably
# due to YouTube measures against IP ranges of hosting providers.
# Working around by preferring the first succeeded video_info containing
# the token if no such video_info yet was found.
if "token" not in video_info:
video_info = get_video_info
break
if "token" not in video_info:
if "reason" in video_info:
if (
"The uploader has not made this video available in your country."
in video_info["reason"]
):
regions_allowed = self._html_search_meta(
"regionsAllowed", video_webpage, default=None
)
if regions_allowed:
raise ExtractorError(
"YouTube said: This video is available in %s only"
% (
", ".join(
map(ISO3166Utils.short2full, regions_allowed.split(","))
)
),
expected=True,
)
raise ExtractorError(
"YouTube said: %s" % video_info["reason"][0],
expected=True,
video_id=video_id,
)
else:
raise ExtractorError(
'"token" parameter not in video info for unknown reason',
video_id=video_id,
)
# title
if "title" in video_info:
video_title = video_info["title"][0]
else:
self._downloader.report_warning("Unable to extract video title")
video_title = "_"
# description
video_description = get_element_by_id("eow-description", video_webpage)
if video_description:
video_description = re.sub(
r"""(?x)
<a\s+
(?:[a-zA-Z-]+="[^"]+"\s+)*?
title="([^"]+)"\s+
(?:[a-zA-Z-]+="[^"]+"\s+)*?
class="yt-uix-redirect-link"\s*>
[^<]+
</a>
""",
r"\1",
video_description,
)
video_description = clean_html(video_description)
else:
fd_mobj = re.search(
r'<meta name="description" content="([^"]+)"', video_webpage
)
if fd_mobj:
video_description = unescapeHTML(fd_mobj.group(1))
else:
video_description = ""
if "multifeed_metadata_list" in video_info and not smuggled_data.get(
"force_singlefeed", False
):
if not self._downloader.params.get("noplaylist"):
entries = []
feed_ids = []
multifeed_metadata_list = compat_urllib_parse_unquote_plus(
video_info["multifeed_metadata_list"][0]
)
for feed in multifeed_metadata_list.split(","):
feed_data = compat_parse_qs(feed)
entries.append(
{
"_type": "url_transparent",
"ie_key": "Youtube",
"url": smuggle_url(
"%s://www.youtube.com/watch?v=%s"
% (proto, feed_data["id"][0]),
{"force_singlefeed": True},
),
"title": "%s (%s)" % (video_title, feed_data["title"][0]),
}
)
feed_ids.append(feed_data["id"][0])
self.to_screen(
"Downloading multifeed video (%s) - add --no-playlist to just download video %s"
% (", ".join(feed_ids), video_id)
)
return self.playlist_result(
entries, video_id, video_title, video_description
)
self.to_screen("Downloading just video %s because of --no-playlist" % video_id)
if "view_count" in video_info:
view_count = int(video_info["view_count"][0])
else:
view_count = None
# Check for "rental" videos
if "ypc_video_rental_bar_text" in video_info and "author" not in video_info:
raise ExtractorError('"rental" videos not supported')
# Start extracting information
self.report_information_extraction(video_id)
# uploader
if "author" not in video_info:
raise ExtractorError("Unable to extract uploader name")
video_uploader = compat_urllib_parse_unquote_plus(video_info["author"][0])
# uploader_id
video_uploader_id = None
mobj = re.search(
r'<link itemprop="url" href="http://www.youtube.com/(?:user|channel)/([^"]+)">',
video_webpage,
)
if mobj is not None:
video_uploader_id = mobj.group(1)
else:
self._downloader.report_warning("unable to extract uploader nickname")
# thumbnail image
# We try first to get a high quality image:
m_thumb = re.search(
r'<span itemprop="thumbnail".*?href="(.*?)">', video_webpage, re.DOTALL
)
if m_thumb is not None:
video_thumbnail = m_thumb.group(1)
elif "thumbnail_url" not in video_info:
self._downloader.report_warning("unable to extract video thumbnail")
video_thumbnail = None
else: # don't panic if we can't find it
video_thumbnail = compat_urllib_parse_unquote_plus(
video_info["thumbnail_url"][0]
)
# upload date
upload_date = self._html_search_meta(
"datePublished", video_webpage, "upload date", default=None
)
if not upload_date:
upload_date = self._search_regex(
[
r'(?s)id="eow-date.*?>(.*?)</span>',
r'id="watch-uploader-info".*?>.*?(?:Published|Uploaded|Streamed live|Started) on (.+?)</strong>',
],
video_webpage,
"upload date",
default=None,
)
if upload_date:
upload_date = " ".join(re.sub(r"[/,-]", r" ", mobj.group(1)).split())
upload_date = unified_strdate(upload_date)
m_cat_container = self._search_regex(
r"(?s)<h4[^>]*>\s*Category\s*</h4>\s*<ul[^>]*>(.*?)</ul>",
video_webpage,
"categories",
default=None,
)
if m_cat_container:
category = self._html_search_regex(
r"(?s)<a[^<]+>(.*?)</a>", m_cat_container, "category", default=None
)
video_categories = None if category is None else [category]
else:
video_categories = None
video_tags = [
unescapeHTML(m.group("content"))
for m in re.finditer(self._meta_regex("og:video:tag"), video_webpage)
]
def _extract_count(count_name):
return str_to_int(
self._search_regex(
r'-%s-button[^>]+><span[^>]+class="yt-uix-button-content"[^>]*>([\d,]+)</span>'
% re.escape(count_name),
video_webpage,
count_name,
default=None,
)
)
like_count = _extract_count("like")
dislike_count = _extract_count("dislike")
# subtitles
video_subtitles = self.extract_subtitles(video_id, video_webpage)
automatic_captions = self.extract_automatic_captions(video_id, video_webpage)
if "length_seconds" not in video_info:
self._downloader.report_warning("unable to extract video duration")
video_duration = None
else:
video_duration = int(
compat_urllib_parse_unquote_plus(video_info["length_seconds"][0])
)
# annotations
video_annotations = None
if self._downloader.params.get("writeannotations", False):
video_annotations = self._extract_annotations(video_id)
def _map_to_format_list(urlmap):
formats = []
for itag, video_real_url in urlmap.items():
dct = {
"format_id": itag,
"url": video_real_url,
"player_url": player_url,
}
if itag in self._formats:
dct.update(self._formats[itag])
formats.append(dct)
return formats
if "conn" in video_info and video_info["conn"][0].startswith("rtmp"):
self.report_rtmp_download()
formats = [
{
"format_id": "_rtmp",
"protocol": "rtmp",
"url": video_info["conn"][0],
"player_url": player_url,
}
]
elif (
len(video_info.get("url_encoded_fmt_stream_map", [""])[0]) >= 1
or len(video_info.get("adaptive_fmts", [""])[0]) >= 1
):
encoded_url_map = (
video_info.get("url_encoded_fmt_stream_map", [""])[0]
+ ","
+ video_info.get("adaptive_fmts", [""])[0]
)
if "rtmpe%3Dyes" in encoded_url_map:
raise ExtractorError(
"rtmpe downloads are not supported, see https://github.com/rg3/youtube-dl/issues/343 for more information.",
expected=True,
)
formats = []
for url_data_str in encoded_url_map.split(","):
url_data = compat_parse_qs(url_data_str)
if "itag" not in url_data or "url" not in url_data:
continue
format_id = url_data["itag"][0]
url = url_data["url"][0]
if "sig" in url_data:
url += "&signature=" + url_data["sig"][0]
elif "s" in url_data:
encrypted_sig = url_data["s"][0]
ASSETS_RE = r'"assets":.+?"js":\s*("[^"]+")'
jsplayer_url_json = self._search_regex(
ASSETS_RE,
embed_webpage if age_gate else video_webpage,
"JS player URL (1)",
default=None,
)
if not jsplayer_url_json and not age_gate:
# We need the embed website after all
if embed_webpage is None:
embed_url = proto + "://www.youtube.com/embed/%s" % video_id
embed_webpage = self._download_webpage(
embed_url, video_id, "Downloading embed webpage"
)
jsplayer_url_json = self._search_regex(
ASSETS_RE, embed_webpage, "JS player URL"
)
player_url = json.loads(jsplayer_url_json)
if player_url is None:
player_url_json = self._search_regex(
r'ytplayer\.config.*?"url"\s*:\s*("[^"]+")',
video_webpage,
"age gate player URL",
)
player_url = json.loads(player_url_json)
if self._downloader.params.get("verbose"):
if player_url is None:
player_version = "unknown"
player_desc = "unknown"
else:
if player_url.endswith("swf"):
player_version = self._search_regex(
r"-(.+?)(?:/watch_as3)?\.swf$",
player_url,
"flash player",
fatal=False,
)
player_desc = "flash player %s" % player_version
else:
player_version = self._search_regex(
[
r"html5player-([^/]+?)(?:/html5player(?:-new)?)?\.js",
r"(?:www|player)-([^/]+)/base\.js",
],
player_url,
"html5 player",
fatal=False,
)
player_desc = "html5 player %s" % player_version
parts_sizes = self._signature_cache_id(encrypted_sig)
self.to_screen(
"{%s} signature length %s, %s"
% (format_id, parts_sizes, player_desc)
)
signature = self._decrypt_signature(
encrypted_sig, video_id, player_url, age_gate
)
url += "&signature=" + signature
if "ratebypass" not in url:
url += "&ratebypass=yes"
# Some itags are not included in DASH manifest thus corresponding formats will
# lack metadata (see https://github.com/rg3/youtube-dl/pull/5993).
# Trying to extract metadata from url_encoded_fmt_stream_map entry.
mobj = re.search(
r"^(?P<width>\d+)[xX](?P<height>\d+)$", url_data.get("size", [""])[0]
)
width, height = (
(int(mobj.group("width")), int(mobj.group("height")))
if mobj
else (None, None)
)
dct = {
"format_id": format_id,
"url": url,
"player_url": player_url,
"filesize": int_or_none(url_data.get("clen", [None])[0]),
"tbr": float_or_none(url_data.get("bitrate", [None])[0], 1000),
"width": width,
"height": height,
"fps": int_or_none(url_data.get("fps", [None])[0]),
"format_note": url_data.get("quality_label", [None])[0]
or url_data.get("quality", [None])[0],
}
type_ = url_data.get("type", [None])[0]
if type_:
type_split = type_.split(";")
kind_ext = type_split[0].split("/")
if len(kind_ext) == 2:
kind, ext = kind_ext
dct["ext"] = ext
if kind in ("audio", "video"):
codecs = None
for mobj in re.finditer(
r'(?P<key>[a-zA-Z_-]+)=(?P<quote>["\']?)(?P<val>.+?)(?P=quote)(?:;|$)',
type_,
):
if mobj.group("key") == "codecs":
codecs = mobj.group("val")
break
if codecs:
codecs = codecs.split(",")
if len(codecs) == 2:
acodec, vcodec = codecs[0], codecs[1]
else:
acodec, vcodec = (
(codecs[0], "none")
if kind == "audio"
else ("none", codecs[0])
)
dct.update(
{
"acodec": acodec,
"vcodec": vcodec,
}
)
if format_id in self._formats:
dct.update(self._formats[format_id])
formats.append(dct)
elif video_info.get("hlsvp"):
manifest_url = video_info["hlsvp"][0]
url_map = self._extract_from_m3u8(manifest_url, video_id)
formats = _map_to_format_list(url_map)
else:
raise ExtractorError(
"no conn, hlsvp or url_encoded_fmt_stream_map information found in video info"
)
# Look for the DASH manifest
if self._downloader.params.get("youtube_include_dash_manifest", True):
dash_mpd_fatal = True
for dash_manifest_url in dash_mpds:
dash_formats = {}
try:
for df in self._parse_dash_manifest(
video_id, dash_manifest_url, player_url, age_gate, dash_mpd_fatal
):
# Do not overwrite DASH format found in some previous DASH manifest
if df["format_id"] not in dash_formats:
dash_formats[df["format_id"]] = df
# Additional DASH manifests may end up in HTTP Error 403 therefore
# allow them to fail without bug report message if we already have
# some DASH manifest succeeded. This is temporary workaround to reduce
# burst of bug reports until we figure out the reason and whether it
# can be fixed at all.
dash_mpd_fatal = False
except (ExtractorError, KeyError) as e:
self.report_warning("Skipping DASH manifest: %r" % e, video_id)
if dash_formats:
# Remove the formats we found through non-DASH, they
# contain less info and it can be wrong, because we use
# fixed values (for example the resolution). See
# https://github.com/rg3/youtube-dl/issues/5774 for an
# example.
formats = [
f for f in formats if f["format_id"] not in dash_formats.keys()
]
formats.extend(dash_formats.values())
# Check for malformed aspect ratio
stretched_m = re.search(
r'<meta\s+property="og:video:tag".*?content="yt:stretch=(?P<w>[0-9]+):(?P<h>[0-9]+)">',
video_webpage,
)
if stretched_m:
ratio = float(stretched_m.group("w")) / float(stretched_m.group("h"))
for f in formats:
if f.get("vcodec") != "none":
f["stretched_ratio"] = ratio
self._sort_formats(formats)
return {
"id": video_id,
"uploader": video_uploader,
"uploader_id": video_uploader_id,
"upload_date": upload_date,
"title": video_title,
"thumbnail": video_thumbnail,
"description": video_description,
"categories": video_categories,
"tags": video_tags,
"subtitles": video_subtitles,
"automatic_captions": automatic_captions,
"duration": video_duration,
"age_limit": 18 if age_gate else 0,
"annotations": video_annotations,
"webpage_url": proto + "://www.youtube.com/watch?v=%s" % video_id,
"view_count": view_count,
"like_count": like_count,
"dislike_count": dislike_count,
"average_rating": float_or_none(video_info.get("avg_rating", [None])[0]),
"formats": formats,
"is_live": is_live,
"start_time": start_time,
"end_time": end_time,
}
|
https://github.com/ytdl-org/youtube-dl/issues/7468
|
$ PYTHONPATH=`pwd` ./bin/youtube-dl --verbose --list-formats 'https://www.youtube.com/watch?v=Ms7iBXnlUO8'
[debug] System config: []
[debug] User config: []
[debug] Command-line args: [u'--verbose', u'--list-formats', u'https://www.youtube.com/watch?v=Ms7iBXnlUO8']
[debug] Encodings: locale UTF-8, fs UTF-8, out UTF-8, pref UTF-8
[debug] youtube-dl version 2015.11.10
[debug] Git HEAD: fcd817a
[debug] Python version 2.7.9 - Linux-3.19.0-33-generic-x86_64-with-Ubuntu-15.04-vivid
[debug] exe versions: ffmpeg 2.5.8-0ubuntu0.15.04.1, ffprobe 2.5.8-0ubuntu0.15.04.1, rtmpdump 2.4
[debug] Proxy map: {}
[youtube] Ms7iBXnlUO8: Downloading webpage
Traceback (most recent call last):
File "./bin/youtube-dl", line 6, in <module>
youtube_dl.main()
File "/home/lukas/work/youtube-dl/youtube_dl/__init__.py", line 410, in main
_real_main(argv)
File "/home/lukas/work/youtube-dl/youtube_dl/__init__.py", line 400, in _real_main
retcode = ydl.download(all_urls)
File "/home/lukas/work/youtube-dl/youtube_dl/YoutubeDL.py", line 1666, in download
url, force_generic_extractor=self.params.get('force_generic_extractor', False))
File "/home/lukas/work/youtube-dl/youtube_dl/YoutubeDL.py", line 661, in extract_info
ie_result = ie.extract(url)
File "/home/lukas/work/youtube-dl/youtube_dl/extractor/common.py", line 290, in extract
return self._real_extract(url)
File "/home/lukas/work/youtube-dl/youtube_dl/extractor/youtube.py", line 1080, in _real_extract
ytplayer_config = json.loads(json_code)
File "/usr/lib/python2.7/json/__init__.py", line 338, in loads
return _default_decoder.decode(s)
File "/usr/lib/python2.7/json/decoder.py", line 366, in decode
obj, end = self.raw_decode(s, idx=_w(s, 0).end())
File "/usr/lib/python2.7/json/decoder.py", line 382, in raw_decode
obj, end = self.scan_once(s, idx)
ValueError: Unterminated string starting at: line 1 column 6498 (char 6497)
|
ValueError
|
def _get_automatic_captions(self, video_id, webpage):
"""We need the webpage for getting the captions url, pass it as an
argument to speed up the process."""
self.to_screen("%s: Looking for automatic captions" % video_id)
player_config = self._get_ytplayer_config(webpage)
err_msg = "Couldn't find automatic captions for %s" % video_id
if player_config is None:
self._downloader.report_warning(err_msg)
return {}
try:
args = player_config["args"]
caption_url = args["ttsurl"]
timestamp = args["timestamp"]
# We get the available subtitles
list_params = compat_urllib_parse.urlencode(
{
"type": "list",
"tlangs": 1,
"asrs": 1,
}
)
list_url = caption_url + "&" + list_params
caption_list = self._download_xml(list_url, video_id)
original_lang_node = caption_list.find("track")
if original_lang_node is None:
self._downloader.report_warning("Video doesn't have automatic captions")
return {}
original_lang = original_lang_node.attrib["lang_code"]
caption_kind = original_lang_node.attrib.get("kind", "")
sub_lang_list = {}
for lang_node in caption_list.findall("target"):
sub_lang = lang_node.attrib["lang_code"]
sub_formats = []
for ext in ["sbv", "vtt", "srt"]:
params = compat_urllib_parse.urlencode(
{
"lang": original_lang,
"tlang": sub_lang,
"fmt": ext,
"ts": timestamp,
"kind": caption_kind,
}
)
sub_formats.append(
{
"url": caption_url + "&" + params,
"ext": ext,
}
)
sub_lang_list[sub_lang] = sub_formats
return sub_lang_list
# An extractor error can be raise by the download process if there are
# no automatic captions but there are subtitles
except (KeyError, ExtractorError):
self._downloader.report_warning(err_msg)
return {}
|
def _get_automatic_captions(self, video_id, webpage):
"""We need the webpage for getting the captions url, pass it as an
argument to speed up the process."""
self.to_screen("%s: Looking for automatic captions" % video_id)
mobj = re.search(r";ytplayer.config = ({.*?});", webpage)
err_msg = "Couldn't find automatic captions for %s" % video_id
if mobj is None:
self._downloader.report_warning(err_msg)
return {}
player_config = json.loads(mobj.group(1))
try:
args = player_config["args"]
caption_url = args["ttsurl"]
timestamp = args["timestamp"]
# We get the available subtitles
list_params = compat_urllib_parse.urlencode(
{
"type": "list",
"tlangs": 1,
"asrs": 1,
}
)
list_url = caption_url + "&" + list_params
caption_list = self._download_xml(list_url, video_id)
original_lang_node = caption_list.find("track")
if original_lang_node is None:
self._downloader.report_warning("Video doesn't have automatic captions")
return {}
original_lang = original_lang_node.attrib["lang_code"]
caption_kind = original_lang_node.attrib.get("kind", "")
sub_lang_list = {}
for lang_node in caption_list.findall("target"):
sub_lang = lang_node.attrib["lang_code"]
sub_formats = []
for ext in ["sbv", "vtt", "srt"]:
params = compat_urllib_parse.urlencode(
{
"lang": original_lang,
"tlang": sub_lang,
"fmt": ext,
"ts": timestamp,
"kind": caption_kind,
}
)
sub_formats.append(
{
"url": caption_url + "&" + params,
"ext": ext,
}
)
sub_lang_list[sub_lang] = sub_formats
return sub_lang_list
# An extractor error can be raise by the download process if there are
# no automatic captions but there are subtitles
except (KeyError, ExtractorError):
self._downloader.report_warning(err_msg)
return {}
|
https://github.com/ytdl-org/youtube-dl/issues/7468
|
$ PYTHONPATH=`pwd` ./bin/youtube-dl --verbose --list-formats 'https://www.youtube.com/watch?v=Ms7iBXnlUO8'
[debug] System config: []
[debug] User config: []
[debug] Command-line args: [u'--verbose', u'--list-formats', u'https://www.youtube.com/watch?v=Ms7iBXnlUO8']
[debug] Encodings: locale UTF-8, fs UTF-8, out UTF-8, pref UTF-8
[debug] youtube-dl version 2015.11.10
[debug] Git HEAD: fcd817a
[debug] Python version 2.7.9 - Linux-3.19.0-33-generic-x86_64-with-Ubuntu-15.04-vivid
[debug] exe versions: ffmpeg 2.5.8-0ubuntu0.15.04.1, ffprobe 2.5.8-0ubuntu0.15.04.1, rtmpdump 2.4
[debug] Proxy map: {}
[youtube] Ms7iBXnlUO8: Downloading webpage
Traceback (most recent call last):
File "./bin/youtube-dl", line 6, in <module>
youtube_dl.main()
File "/home/lukas/work/youtube-dl/youtube_dl/__init__.py", line 410, in main
_real_main(argv)
File "/home/lukas/work/youtube-dl/youtube_dl/__init__.py", line 400, in _real_main
retcode = ydl.download(all_urls)
File "/home/lukas/work/youtube-dl/youtube_dl/YoutubeDL.py", line 1666, in download
url, force_generic_extractor=self.params.get('force_generic_extractor', False))
File "/home/lukas/work/youtube-dl/youtube_dl/YoutubeDL.py", line 661, in extract_info
ie_result = ie.extract(url)
File "/home/lukas/work/youtube-dl/youtube_dl/extractor/common.py", line 290, in extract
return self._real_extract(url)
File "/home/lukas/work/youtube-dl/youtube_dl/extractor/youtube.py", line 1080, in _real_extract
ytplayer_config = json.loads(json_code)
File "/usr/lib/python2.7/json/__init__.py", line 338, in loads
return _default_decoder.decode(s)
File "/usr/lib/python2.7/json/decoder.py", line 366, in decode
obj, end = self.raw_decode(s, idx=_w(s, 0).end())
File "/usr/lib/python2.7/json/decoder.py", line 382, in raw_decode
obj, end = self.scan_once(s, idx)
ValueError: Unterminated string starting at: line 1 column 6498 (char 6497)
|
ValueError
|
def _real_extract(self, url):
url, data = unsmuggle_url(url)
headers = std_headers
if data is not None:
headers = headers.copy()
headers.update(data)
if "Referer" not in headers:
headers["Referer"] = url
# Extract ID from URL
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group("id")
orig_url = url
if mobj.group("pro") or mobj.group("player"):
url = "http://player.vimeo.com/video/" + video_id
password = self._downloader.params.get("videopassword", None)
if password:
headers["Cookie"] = "%s_password=%s" % (
video_id,
hashlib.md5(password).hexdigest(),
)
# Retrieve video webpage to extract further information
request = compat_urllib_request.Request(url, None, headers)
try:
webpage = self._download_webpage(request, video_id)
except ExtractorError as ee:
if isinstance(ee.cause, compat_HTTPError) and ee.cause.code == 403:
errmsg = ee.cause.read()
if (
b"Because of its privacy settings, this video cannot be played here"
in errmsg
):
raise ExtractorError(
"Cannot download embed-only video without embedding "
"URL. Please call youtube-dl with the URL of the page "
"that embeds this video.",
expected=True,
)
raise
# Now we begin extracting as much information as we can from what we
# retrieved. First we extract the information common to all extractors,
# and latter we extract those that are Vimeo specific.
self.report_extraction(video_id)
# Extract the config JSON
try:
try:
config_url = self._html_search_regex(
r' data-config-url="(.+?)"', webpage, "config URL"
)
config_json = self._download_webpage(config_url, video_id)
config = json.loads(config_json)
except RegexNotFoundError:
# For pro videos or player.vimeo.com urls
# We try to find out to which variable is assigned the config dic
m_variable_name = re.search("(\w)\.video\.id", webpage)
if m_variable_name is not None:
config_re = r"%s=({[^}].+?});" % re.escape(m_variable_name.group(1))
else:
config_re = [r" = {config:({.+?}),assets:", r"(?:[abc])=({.+?});"]
config = self._search_regex(
config_re, webpage, "info section", flags=re.DOTALL
)
config = json.loads(config)
except Exception as e:
if re.search(
"The creator of this video has not given you permission to embed it on this domain.",
webpage,
):
raise ExtractorError(
'The author has restricted the access to this video, try with the "--referer" option'
)
if re.search(r'<form[^>]+?id="pw_form"', webpage) is not None:
if data and "_video_password_verified" in data:
raise ExtractorError("video password verification failed!")
self._verify_video_password(url, video_id, webpage)
return self._real_extract(
smuggle_url(url, {"_video_password_verified": "verified"})
)
else:
raise ExtractorError("Unable to extract info section", cause=e)
else:
if config.get("view") == 4:
config = self._verify_player_video_password(url, video_id)
# Extract title
video_title = config["video"]["title"]
# Extract uploader and uploader_id
video_uploader = config["video"]["owner"]["name"]
video_uploader_id = (
config["video"]["owner"]["url"].split("/")[-1]
if config["video"]["owner"]["url"]
else None
)
# Extract video thumbnail
video_thumbnail = config["video"].get("thumbnail")
if video_thumbnail is None:
video_thumbs = config["video"].get("thumbs")
if video_thumbs and isinstance(video_thumbs, dict):
_, video_thumbnail = sorted(
(int(width if width.isdigit() else 0), t_url)
for (width, t_url) in video_thumbs.items()
)[-1]
# Extract video description
video_description = self._html_search_regex(
r'(?s)<div\s+class="[^"]*description[^"]*"[^>]*>(.*?)</div>',
webpage,
"description",
default=None,
)
if not video_description:
video_description = self._html_search_meta("description", webpage, default=None)
if not video_description and mobj.group("pro"):
orig_webpage = self._download_webpage(
orig_url, video_id, note="Downloading webpage for description", fatal=False
)
if orig_webpage:
video_description = self._html_search_meta(
"description", orig_webpage, default=None
)
if not video_description and not mobj.group("player"):
self._downloader.report_warning("Cannot find video description")
# Extract video duration
video_duration = int_or_none(config["video"].get("duration"))
# Extract upload date
video_upload_date = None
mobj = re.search(
r'<meta itemprop="dateCreated" content="(\d{4})-(\d{2})-(\d{2})T', webpage
)
if mobj is not None:
video_upload_date = mobj.group(1) + mobj.group(2) + mobj.group(3)
try:
view_count = int(self._search_regex(r"UserPlays:(\d+)", webpage, "view count"))
like_count = int(self._search_regex(r"UserLikes:(\d+)", webpage, "like count"))
comment_count = int(
self._search_regex(r"UserComments:(\d+)", webpage, "comment count")
)
except RegexNotFoundError:
# This info is only available in vimeo.com/{id} urls
view_count = None
like_count = None
comment_count = None
# Vimeo specific: extract request signature and timestamp
sig = config["request"]["signature"]
timestamp = config["request"]["timestamp"]
# Vimeo specific: extract video codec and quality information
# First consider quality, then codecs, then take everything
codecs = [("vp6", "flv"), ("vp8", "flv"), ("h264", "mp4")]
files = {"hd": [], "sd": [], "other": []}
config_files = config["video"].get("files") or config["request"].get("files")
for codec_name, codec_extension in codecs:
for quality in config_files.get(codec_name, []):
format_id = "-".join((codec_name, quality)).lower()
key = quality if quality in files else "other"
video_url = None
if isinstance(config_files[codec_name], dict):
file_info = config_files[codec_name][quality]
video_url = file_info.get("url")
else:
file_info = {}
if video_url is None:
video_url = (
"http://player.vimeo.com/play_redirect?clip_id=%s&sig=%s&time=%s&quality=%s&codecs=%s&type=moogaloop_local&embed_location="
% (video_id, sig, timestamp, quality, codec_name.upper())
)
files[key].append(
{
"ext": codec_extension,
"url": video_url,
"format_id": format_id,
"width": file_info.get("width"),
"height": file_info.get("height"),
}
)
formats = []
for key in ("other", "sd", "hd"):
formats += files[key]
if len(formats) == 0:
raise ExtractorError("No known codec found")
subtitles = {}
text_tracks = config["request"].get("text_tracks")
if text_tracks:
for tt in text_tracks:
subtitles[tt["lang"]] = "http://vimeo.com" + tt["url"]
video_subtitles = self.extract_subtitles(video_id, subtitles)
if self._downloader.params.get("listsubtitles", False):
self._list_available_subtitles(video_id, subtitles)
return
return {
"id": video_id,
"uploader": video_uploader,
"uploader_id": video_uploader_id,
"upload_date": video_upload_date,
"title": video_title,
"thumbnail": video_thumbnail,
"description": video_description,
"duration": video_duration,
"formats": formats,
"webpage_url": url,
"view_count": view_count,
"like_count": like_count,
"comment_count": comment_count,
"subtitles": video_subtitles,
}
|
def _real_extract(self, url):
url, data = unsmuggle_url(url)
headers = std_headers
if data is not None:
headers = headers.copy()
headers.update(data)
if "Referer" not in headers:
headers["Referer"] = url
# Extract ID from URL
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group("id")
orig_url = url
if mobj.group("pro") or mobj.group("player"):
url = "http://player.vimeo.com/video/" + video_id
# Retrieve video webpage to extract further information
request = compat_urllib_request.Request(url, None, headers)
try:
webpage = self._download_webpage(request, video_id)
except ExtractorError as ee:
if isinstance(ee.cause, compat_HTTPError) and ee.cause.code == 403:
errmsg = ee.cause.read()
if (
b"Because of its privacy settings, this video cannot be played here"
in errmsg
):
raise ExtractorError(
"Cannot download embed-only video without embedding "
"URL. Please call youtube-dl with the URL of the page "
"that embeds this video.",
expected=True,
)
raise
# Now we begin extracting as much information as we can from what we
# retrieved. First we extract the information common to all extractors,
# and latter we extract those that are Vimeo specific.
self.report_extraction(video_id)
# Extract the config JSON
try:
try:
config_url = self._html_search_regex(
r' data-config-url="(.+?)"', webpage, "config URL"
)
config_json = self._download_webpage(config_url, video_id)
config = json.loads(config_json)
except RegexNotFoundError:
# For pro videos or player.vimeo.com urls
# We try to find out to which variable is assigned the config dic
m_variable_name = re.search("(\w)\.video\.id", webpage)
if m_variable_name is not None:
config_re = r"%s=({[^}].+?});" % re.escape(m_variable_name.group(1))
else:
config_re = [r" = {config:({.+?}),assets:", r"(?:[abc])=({.+?});"]
config = self._search_regex(
config_re, webpage, "info section", flags=re.DOTALL
)
config = json.loads(config)
except Exception as e:
if re.search(
"The creator of this video has not given you permission to embed it on this domain.",
webpage,
):
raise ExtractorError(
'The author has restricted the access to this video, try with the "--referer" option'
)
if re.search(r'<form[^>]+?id="pw_form"', webpage) is not None:
if data and "_video_password_verified" in data:
raise ExtractorError("video password verification failed!")
self._verify_video_password(url, video_id, webpage)
return self._real_extract(
smuggle_url(url, {"_video_password_verified": "verified"})
)
else:
raise ExtractorError("Unable to extract info section", cause=e)
else:
if config.get("view") == 4:
config = self._verify_player_video_password(url, video_id)
# Extract title
video_title = config["video"]["title"]
# Extract uploader and uploader_id
video_uploader = config["video"]["owner"]["name"]
video_uploader_id = (
config["video"]["owner"]["url"].split("/")[-1]
if config["video"]["owner"]["url"]
else None
)
# Extract video thumbnail
video_thumbnail = config["video"].get("thumbnail")
if video_thumbnail is None:
video_thumbs = config["video"].get("thumbs")
if video_thumbs and isinstance(video_thumbs, dict):
_, video_thumbnail = sorted(
(int(width if width.isdigit() else 0), t_url)
for (width, t_url) in video_thumbs.items()
)[-1]
# Extract video description
video_description = self._html_search_regex(
r'(?s)<div\s+class="[^"]*description[^"]*"[^>]*>(.*?)</div>',
webpage,
"description",
default=None,
)
if not video_description:
video_description = self._html_search_meta("description", webpage, default=None)
if not video_description and mobj.group("pro"):
orig_webpage = self._download_webpage(
orig_url, video_id, note="Downloading webpage for description", fatal=False
)
if orig_webpage:
video_description = self._html_search_meta(
"description", orig_webpage, default=None
)
if not video_description and not mobj.group("player"):
self._downloader.report_warning("Cannot find video description")
# Extract video duration
video_duration = int_or_none(config["video"].get("duration"))
# Extract upload date
video_upload_date = None
mobj = re.search(
r'<meta itemprop="dateCreated" content="(\d{4})-(\d{2})-(\d{2})T', webpage
)
if mobj is not None:
video_upload_date = mobj.group(1) + mobj.group(2) + mobj.group(3)
try:
view_count = int(self._search_regex(r"UserPlays:(\d+)", webpage, "view count"))
like_count = int(self._search_regex(r"UserLikes:(\d+)", webpage, "like count"))
comment_count = int(
self._search_regex(r"UserComments:(\d+)", webpage, "comment count")
)
except RegexNotFoundError:
# This info is only available in vimeo.com/{id} urls
view_count = None
like_count = None
comment_count = None
# Vimeo specific: extract request signature and timestamp
sig = config["request"]["signature"]
timestamp = config["request"]["timestamp"]
# Vimeo specific: extract video codec and quality information
# First consider quality, then codecs, then take everything
codecs = [("vp6", "flv"), ("vp8", "flv"), ("h264", "mp4")]
files = {"hd": [], "sd": [], "other": []}
config_files = config["video"].get("files") or config["request"].get("files")
for codec_name, codec_extension in codecs:
for quality in config_files.get(codec_name, []):
format_id = "-".join((codec_name, quality)).lower()
key = quality if quality in files else "other"
video_url = None
if isinstance(config_files[codec_name], dict):
file_info = config_files[codec_name][quality]
video_url = file_info.get("url")
else:
file_info = {}
if video_url is None:
video_url = (
"http://player.vimeo.com/play_redirect?clip_id=%s&sig=%s&time=%s&quality=%s&codecs=%s&type=moogaloop_local&embed_location="
% (video_id, sig, timestamp, quality, codec_name.upper())
)
files[key].append(
{
"ext": codec_extension,
"url": video_url,
"format_id": format_id,
"width": file_info.get("width"),
"height": file_info.get("height"),
}
)
formats = []
for key in ("other", "sd", "hd"):
formats += files[key]
if len(formats) == 0:
raise ExtractorError("No known codec found")
subtitles = {}
text_tracks = config["request"].get("text_tracks")
if text_tracks:
for tt in text_tracks:
subtitles[tt["lang"]] = "http://vimeo.com" + tt["url"]
video_subtitles = self.extract_subtitles(video_id, subtitles)
if self._downloader.params.get("listsubtitles", False):
self._list_available_subtitles(video_id, subtitles)
return
return {
"id": video_id,
"uploader": video_uploader,
"uploader_id": video_uploader_id,
"upload_date": video_upload_date,
"title": video_title,
"thumbnail": video_thumbnail,
"description": video_description,
"duration": video_duration,
"formats": formats,
"webpage_url": url,
"view_count": view_count,
"like_count": like_count,
"comment_count": comment_count,
"subtitles": video_subtitles,
}
|
https://github.com/ytdl-org/youtube-dl/issues/5001
|
benjaminmikiten@benjaminmikiten ~/Dropbox/printing_vids $ youtube-dl --batch-file batch.txt --video-password dcp2015 -v
[debug] System config: []
[debug] User config: []
[debug] Command-line args: ['--batch-file', 'batch.txt', '--video-password', u'PRIVATE', '-v']
[debug] Batch file urls: [u'https://vimeo.com/118076403', u'https://vimeo.com/118062456', u'https://vimeo.com/118072708', u'https://vimeo.com/118079046', u'https://vimeo.com/118080059', u'https://vimeo.com/118081777', u'https://vimeo.com/118085658', u'https://vimeo.com/118073801', u'https://vimeo.com/118086390', u'https://vimeo.com/118092186', u'https://vimeo.com/118092776']
[debug] Encodings: locale UTF-8, fs utf-8, out UTF-8, pref UTF-8
[debug] youtube-dl version 2015.02.19.3
[debug] Python version 2.7.6 - Darwin-14.1.0-x86_64-i386-64bit
[debug] exe versions: ffmpeg 2.5.4, ffprobe 2.5.4
[debug] Proxy map: {}
[vimeo] 118076403: Downloading webpage
[vimeo] 118076403: Extracting information
[vimeo] 118076403: Verifying the password
[vimeo] 118076403: Downloading webpage
[vimeo] 118076403: Extracting information
ERROR: video password verification failed!; please report this issue on https://yt-dl.org/bug . Make sure you are using the latest version; type youtube-dl -U to update. Be sure to call youtube-dl with the --verbose flag and include its complete output.
Traceback (most recent call last):
File "/usr/local/bin/youtube-dl/youtube_dl/extractor/vimeo.py", line 264, in _real_extract
flags=re.DOTALL)
File "/usr/local/bin/youtube-dl/youtube_dl/extractor/common.py", line 531, in _search_regex
raise RegexNotFoundError('Unable to extract %s' % _name)
RegexNotFoundError: Unable to extract info section; please report this issue on https://yt-dl.org/bug . Make sure you are using the latest version; type youtube-dl -U to update. Be sure to call youtube-dl with the --verbose flag and include its complete output.
Traceback (most recent call last):
File "/usr/local/bin/youtube-dl/youtube_dl/YoutubeDL.py", line 643, in extract_info
ie_result = ie.extract(url)
File "/usr/local/bin/youtube-dl/youtube_dl/extractor/common.py", line 269, in extract
return self._real_extract(url)
File "/usr/local/bin/youtube-dl/youtube_dl/extractor/vimeo.py", line 275, in _real_extract
smuggle_url(url, {'_video_password_verified': 'verified'}))
File "/usr/local/bin/youtube-dl/youtube_dl/extractor/vimeo.py", line 272, in _real_extract
raise ExtractorError('video password verification failed!')
ExtractorError: video password verification failed!; please report this issue on https://yt-dl.org/bug . Make sure you are using the latest version; type youtube-dl -U to update. Be sure to call youtube-dl with the --verbose flag and include its complete output.
|
RegexNotFoundError
|
def _verify_video_password(self, url, video_id, webpage):
password = self._downloader.params.get("videopassword", None)
if password is None:
raise ExtractorError(
"This video is protected by a password, use the --video-password option",
expected=True,
)
token = self._search_regex(r"xsrft = \'(.*?)\'", webpage, "login token")
data = urlencode_postdata(
{
"password": password,
"token": token,
}
)
if url.startswith("http://"):
# vimeo only supports https now, but the user can give an http url
url = url.replace("http://", "https://")
password_request = compat_urllib_request.Request(url + "/password", data)
password_request.add_header("Content-Type", "application/x-www-form-urlencoded")
password_request.add_header("Cookie", "xsrft=%s" % token)
return self._download_webpage(
password_request, video_id, "Verifying the password", "Wrong password"
)
|
def _verify_video_password(self, url, video_id, webpage):
password = self._downloader.params.get("videopassword", None)
if password is None:
raise ExtractorError(
"This video is protected by a password, use the --video-password option",
expected=True,
)
token = self._search_regex(r"xsrft: \'(.*?)\'", webpage, "login token")
data = compat_urllib_parse.urlencode(
{
"password": password,
"token": token,
}
)
# I didn't manage to use the password with https
if url.startswith("https"):
pass_url = url.replace("https", "http")
else:
pass_url = url
password_request = compat_urllib_request.Request(pass_url + "/password", data)
password_request.add_header("Content-Type", "application/x-www-form-urlencoded")
password_request.add_header("Cookie", "xsrft=%s" % token)
return self._download_webpage(
password_request, video_id, "Verifying the password", "Wrong password"
)
|
https://github.com/ytdl-org/youtube-dl/issues/5001
|
benjaminmikiten@benjaminmikiten ~/Dropbox/printing_vids $ youtube-dl --batch-file batch.txt --video-password dcp2015 -v
[debug] System config: []
[debug] User config: []
[debug] Command-line args: ['--batch-file', 'batch.txt', '--video-password', u'PRIVATE', '-v']
[debug] Batch file urls: [u'https://vimeo.com/118076403', u'https://vimeo.com/118062456', u'https://vimeo.com/118072708', u'https://vimeo.com/118079046', u'https://vimeo.com/118080059', u'https://vimeo.com/118081777', u'https://vimeo.com/118085658', u'https://vimeo.com/118073801', u'https://vimeo.com/118086390', u'https://vimeo.com/118092186', u'https://vimeo.com/118092776']
[debug] Encodings: locale UTF-8, fs utf-8, out UTF-8, pref UTF-8
[debug] youtube-dl version 2015.02.19.3
[debug] Python version 2.7.6 - Darwin-14.1.0-x86_64-i386-64bit
[debug] exe versions: ffmpeg 2.5.4, ffprobe 2.5.4
[debug] Proxy map: {}
[vimeo] 118076403: Downloading webpage
[vimeo] 118076403: Extracting information
[vimeo] 118076403: Verifying the password
[vimeo] 118076403: Downloading webpage
[vimeo] 118076403: Extracting information
ERROR: video password verification failed!; please report this issue on https://yt-dl.org/bug . Make sure you are using the latest version; type youtube-dl -U to update. Be sure to call youtube-dl with the --verbose flag and include its complete output.
Traceback (most recent call last):
File "/usr/local/bin/youtube-dl/youtube_dl/extractor/vimeo.py", line 264, in _real_extract
flags=re.DOTALL)
File "/usr/local/bin/youtube-dl/youtube_dl/extractor/common.py", line 531, in _search_regex
raise RegexNotFoundError('Unable to extract %s' % _name)
RegexNotFoundError: Unable to extract info section; please report this issue on https://yt-dl.org/bug . Make sure you are using the latest version; type youtube-dl -U to update. Be sure to call youtube-dl with the --verbose flag and include its complete output.
Traceback (most recent call last):
File "/usr/local/bin/youtube-dl/youtube_dl/YoutubeDL.py", line 643, in extract_info
ie_result = ie.extract(url)
File "/usr/local/bin/youtube-dl/youtube_dl/extractor/common.py", line 269, in extract
return self._real_extract(url)
File "/usr/local/bin/youtube-dl/youtube_dl/extractor/vimeo.py", line 275, in _real_extract
smuggle_url(url, {'_video_password_verified': 'verified'}))
File "/usr/local/bin/youtube-dl/youtube_dl/extractor/vimeo.py", line 272, in _real_extract
raise ExtractorError('video password verification failed!')
ExtractorError: video password verification failed!; please report this issue on https://yt-dl.org/bug . Make sure you are using the latest version; type youtube-dl -U to update. Be sure to call youtube-dl with the --verbose flag and include its complete output.
|
RegexNotFoundError
|
def _real_extract(self, url):
url, data = unsmuggle_url(url)
headers = std_headers
if data is not None:
headers = headers.copy()
headers.update(data)
if "Referer" not in headers:
headers["Referer"] = url
# Extract ID from URL
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group("id")
orig_url = url
if mobj.group("pro") or mobj.group("player"):
url = "http://player.vimeo.com/video/" + video_id
# Retrieve video webpage to extract further information
request = compat_urllib_request.Request(url, None, headers)
try:
webpage = self._download_webpage(request, video_id)
except ExtractorError as ee:
if isinstance(ee.cause, compat_HTTPError) and ee.cause.code == 403:
errmsg = ee.cause.read()
if (
b"Because of its privacy settings, this video cannot be played here"
in errmsg
):
raise ExtractorError(
"Cannot download embed-only video without embedding "
"URL. Please call youtube-dl with the URL of the page "
"that embeds this video.",
expected=True,
)
raise
# Now we begin extracting as much information as we can from what we
# retrieved. First we extract the information common to all extractors,
# and latter we extract those that are Vimeo specific.
self.report_extraction(video_id)
# Extract the config JSON
try:
try:
config_url = self._html_search_regex(
r' data-config-url="(.+?)"', webpage, "config URL"
)
config_json = self._download_webpage(config_url, video_id)
config = json.loads(config_json)
except RegexNotFoundError:
# For pro videos or player.vimeo.com urls
# We try to find out to which variable is assigned the config dic
m_variable_name = re.search("(\w)\.video\.id", webpage)
if m_variable_name is not None:
config_re = r"%s=({[^}].+?});" % re.escape(m_variable_name.group(1))
else:
config_re = [r" = {config:({.+?}),assets:", r"(?:[abc])=({.+?});"]
config = self._search_regex(
config_re, webpage, "info section", flags=re.DOTALL
)
config = json.loads(config)
except Exception as e:
if re.search(
"The creator of this video has not given you permission to embed it on this domain.",
webpage,
):
raise ExtractorError(
'The author has restricted the access to this video, try with the "--referer" option'
)
if re.search(r'<form[^>]+?id="pw_form"', webpage) is not None:
if data and "_video_password_verified" in data:
raise ExtractorError("video password verification failed!")
self._verify_video_password(url, video_id, webpage)
return self._real_extract(
smuggle_url(url, {"_video_password_verified": "verified"})
)
else:
raise ExtractorError("Unable to extract info section", cause=e)
else:
if config.get("view") == 4:
config = self._verify_player_video_password(url, video_id)
# Extract title
video_title = config["video"]["title"]
# Extract uploader and uploader_id
video_uploader = config["video"]["owner"]["name"]
video_uploader_id = (
config["video"]["owner"]["url"].split("/")[-1]
if config["video"]["owner"]["url"]
else None
)
# Extract video thumbnail
video_thumbnail = config["video"].get("thumbnail")
if video_thumbnail is None:
video_thumbs = config["video"].get("thumbs")
if video_thumbs and isinstance(video_thumbs, dict):
_, video_thumbnail = sorted(
(int(width if width.isdigit() else 0), t_url)
for (width, t_url) in video_thumbs.items()
)[-1]
# Extract video description
video_description = self._html_search_regex(
r'(?s)<div\s+class="[^"]*description[^"]*"[^>]*>(.*?)</div>',
webpage,
"description",
default=None,
)
if not video_description:
video_description = self._html_search_meta("description", webpage, default=None)
if not video_description and mobj.group("pro"):
orig_webpage = self._download_webpage(
orig_url, video_id, note="Downloading webpage for description", fatal=False
)
if orig_webpage:
video_description = self._html_search_meta(
"description", orig_webpage, default=None
)
if not video_description and not mobj.group("player"):
self._downloader.report_warning("Cannot find video description")
# Extract video duration
video_duration = int_or_none(config["video"].get("duration"))
# Extract upload date
video_upload_date = None
mobj = re.search(r'<time[^>]+datetime="([^"]+)"', webpage)
if mobj is not None:
video_upload_date = unified_strdate(mobj.group(1))
try:
view_count = int(self._search_regex(r"UserPlays:(\d+)", webpage, "view count"))
like_count = int(self._search_regex(r"UserLikes:(\d+)", webpage, "like count"))
comment_count = int(
self._search_regex(r"UserComments:(\d+)", webpage, "comment count")
)
except RegexNotFoundError:
# This info is only available in vimeo.com/{id} urls
view_count = None
like_count = None
comment_count = None
# Vimeo specific: extract request signature and timestamp
sig = config["request"]["signature"]
timestamp = config["request"]["timestamp"]
# Vimeo specific: extract video codec and quality information
# First consider quality, then codecs, then take everything
codecs = [("vp6", "flv"), ("vp8", "flv"), ("h264", "mp4")]
files = {"hd": [], "sd": [], "other": []}
config_files = config["video"].get("files") or config["request"].get("files")
for codec_name, codec_extension in codecs:
for quality in config_files.get(codec_name, []):
format_id = "-".join((codec_name, quality)).lower()
key = quality if quality in files else "other"
video_url = None
if isinstance(config_files[codec_name], dict):
file_info = config_files[codec_name][quality]
video_url = file_info.get("url")
else:
file_info = {}
if video_url is None:
video_url = (
"http://player.vimeo.com/play_redirect?clip_id=%s&sig=%s&time=%s&quality=%s&codecs=%s&type=moogaloop_local&embed_location="
% (video_id, sig, timestamp, quality, codec_name.upper())
)
files[key].append(
{
"ext": codec_extension,
"url": video_url,
"format_id": format_id,
"width": file_info.get("width"),
"height": file_info.get("height"),
}
)
formats = []
for key in ("other", "sd", "hd"):
formats += files[key]
if len(formats) == 0:
raise ExtractorError("No known codec found")
subtitles = {}
text_tracks = config["request"].get("text_tracks")
if text_tracks:
for tt in text_tracks:
subtitles[tt["lang"]] = [
{
"ext": "vtt",
"url": "http://vimeo.com" + tt["url"],
}
]
return {
"id": video_id,
"uploader": video_uploader,
"uploader_id": video_uploader_id,
"upload_date": video_upload_date,
"title": video_title,
"thumbnail": video_thumbnail,
"description": video_description,
"duration": video_duration,
"formats": formats,
"webpage_url": url,
"view_count": view_count,
"like_count": like_count,
"comment_count": comment_count,
"subtitles": subtitles,
}
|
def _real_extract(self, url):
url, data = unsmuggle_url(url)
headers = std_headers
if data is not None:
headers = headers.copy()
headers.update(data)
if "Referer" not in headers:
headers["Referer"] = url
# Extract ID from URL
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group("id")
orig_url = url
if mobj.group("pro") or mobj.group("player"):
url = "http://player.vimeo.com/video/" + video_id
password = self._downloader.params.get("videopassword", None)
if password:
headers["Cookie"] = "%s_password=%s" % (
video_id,
hashlib.md5(password.encode("utf-8")).hexdigest(),
)
# Retrieve video webpage to extract further information
request = compat_urllib_request.Request(url, None, headers)
try:
webpage = self._download_webpage(request, video_id)
except ExtractorError as ee:
if isinstance(ee.cause, compat_HTTPError) and ee.cause.code == 403:
errmsg = ee.cause.read()
if (
b"Because of its privacy settings, this video cannot be played here"
in errmsg
):
raise ExtractorError(
"Cannot download embed-only video without embedding "
"URL. Please call youtube-dl with the URL of the page "
"that embeds this video.",
expected=True,
)
raise
# Now we begin extracting as much information as we can from what we
# retrieved. First we extract the information common to all extractors,
# and latter we extract those that are Vimeo specific.
self.report_extraction(video_id)
# Extract the config JSON
try:
try:
config_url = self._html_search_regex(
r' data-config-url="(.+?)"', webpage, "config URL"
)
config_json = self._download_webpage(config_url, video_id)
config = json.loads(config_json)
except RegexNotFoundError:
# For pro videos or player.vimeo.com urls
# We try to find out to which variable is assigned the config dic
m_variable_name = re.search("(\w)\.video\.id", webpage)
if m_variable_name is not None:
config_re = r"%s=({[^}].+?});" % re.escape(m_variable_name.group(1))
else:
config_re = [r" = {config:({.+?}),assets:", r"(?:[abc])=({.+?});"]
config = self._search_regex(
config_re, webpage, "info section", flags=re.DOTALL
)
config = json.loads(config)
except Exception as e:
if re.search(
"The creator of this video has not given you permission to embed it on this domain.",
webpage,
):
raise ExtractorError(
'The author has restricted the access to this video, try with the "--referer" option'
)
if re.search(r'<form[^>]+?id="pw_form"', webpage) is not None:
if data and "_video_password_verified" in data:
raise ExtractorError("video password verification failed!")
self._verify_video_password(url, video_id, webpage)
return self._real_extract(
smuggle_url(url, {"_video_password_verified": "verified"})
)
else:
raise ExtractorError("Unable to extract info section", cause=e)
else:
if config.get("view") == 4:
config = self._verify_player_video_password(url, video_id)
# Extract title
video_title = config["video"]["title"]
# Extract uploader and uploader_id
video_uploader = config["video"]["owner"]["name"]
video_uploader_id = (
config["video"]["owner"]["url"].split("/")[-1]
if config["video"]["owner"]["url"]
else None
)
# Extract video thumbnail
video_thumbnail = config["video"].get("thumbnail")
if video_thumbnail is None:
video_thumbs = config["video"].get("thumbs")
if video_thumbs and isinstance(video_thumbs, dict):
_, video_thumbnail = sorted(
(int(width if width.isdigit() else 0), t_url)
for (width, t_url) in video_thumbs.items()
)[-1]
# Extract video description
video_description = self._html_search_regex(
r'(?s)<div\s+class="[^"]*description[^"]*"[^>]*>(.*?)</div>',
webpage,
"description",
default=None,
)
if not video_description:
video_description = self._html_search_meta("description", webpage, default=None)
if not video_description and mobj.group("pro"):
orig_webpage = self._download_webpage(
orig_url, video_id, note="Downloading webpage for description", fatal=False
)
if orig_webpage:
video_description = self._html_search_meta(
"description", orig_webpage, default=None
)
if not video_description and not mobj.group("player"):
self._downloader.report_warning("Cannot find video description")
# Extract video duration
video_duration = int_or_none(config["video"].get("duration"))
# Extract upload date
video_upload_date = None
mobj = re.search(r'<time[^>]+datetime="([^"]+)"', webpage)
if mobj is not None:
video_upload_date = unified_strdate(mobj.group(1))
try:
view_count = int(self._search_regex(r"UserPlays:(\d+)", webpage, "view count"))
like_count = int(self._search_regex(r"UserLikes:(\d+)", webpage, "like count"))
comment_count = int(
self._search_regex(r"UserComments:(\d+)", webpage, "comment count")
)
except RegexNotFoundError:
# This info is only available in vimeo.com/{id} urls
view_count = None
like_count = None
comment_count = None
# Vimeo specific: extract request signature and timestamp
sig = config["request"]["signature"]
timestamp = config["request"]["timestamp"]
# Vimeo specific: extract video codec and quality information
# First consider quality, then codecs, then take everything
codecs = [("vp6", "flv"), ("vp8", "flv"), ("h264", "mp4")]
files = {"hd": [], "sd": [], "other": []}
config_files = config["video"].get("files") or config["request"].get("files")
for codec_name, codec_extension in codecs:
for quality in config_files.get(codec_name, []):
format_id = "-".join((codec_name, quality)).lower()
key = quality if quality in files else "other"
video_url = None
if isinstance(config_files[codec_name], dict):
file_info = config_files[codec_name][quality]
video_url = file_info.get("url")
else:
file_info = {}
if video_url is None:
video_url = (
"http://player.vimeo.com/play_redirect?clip_id=%s&sig=%s&time=%s&quality=%s&codecs=%s&type=moogaloop_local&embed_location="
% (video_id, sig, timestamp, quality, codec_name.upper())
)
files[key].append(
{
"ext": codec_extension,
"url": video_url,
"format_id": format_id,
"width": file_info.get("width"),
"height": file_info.get("height"),
}
)
formats = []
for key in ("other", "sd", "hd"):
formats += files[key]
if len(formats) == 0:
raise ExtractorError("No known codec found")
subtitles = {}
text_tracks = config["request"].get("text_tracks")
if text_tracks:
for tt in text_tracks:
subtitles[tt["lang"]] = [
{
"ext": "vtt",
"url": "http://vimeo.com" + tt["url"],
}
]
return {
"id": video_id,
"uploader": video_uploader,
"uploader_id": video_uploader_id,
"upload_date": video_upload_date,
"title": video_title,
"thumbnail": video_thumbnail,
"description": video_description,
"duration": video_duration,
"formats": formats,
"webpage_url": url,
"view_count": view_count,
"like_count": like_count,
"comment_count": comment_count,
"subtitles": subtitles,
}
|
https://github.com/ytdl-org/youtube-dl/issues/5001
|
benjaminmikiten@benjaminmikiten ~/Dropbox/printing_vids $ youtube-dl --batch-file batch.txt --video-password dcp2015 -v
[debug] System config: []
[debug] User config: []
[debug] Command-line args: ['--batch-file', 'batch.txt', '--video-password', u'PRIVATE', '-v']
[debug] Batch file urls: [u'https://vimeo.com/118076403', u'https://vimeo.com/118062456', u'https://vimeo.com/118072708', u'https://vimeo.com/118079046', u'https://vimeo.com/118080059', u'https://vimeo.com/118081777', u'https://vimeo.com/118085658', u'https://vimeo.com/118073801', u'https://vimeo.com/118086390', u'https://vimeo.com/118092186', u'https://vimeo.com/118092776']
[debug] Encodings: locale UTF-8, fs utf-8, out UTF-8, pref UTF-8
[debug] youtube-dl version 2015.02.19.3
[debug] Python version 2.7.6 - Darwin-14.1.0-x86_64-i386-64bit
[debug] exe versions: ffmpeg 2.5.4, ffprobe 2.5.4
[debug] Proxy map: {}
[vimeo] 118076403: Downloading webpage
[vimeo] 118076403: Extracting information
[vimeo] 118076403: Verifying the password
[vimeo] 118076403: Downloading webpage
[vimeo] 118076403: Extracting information
ERROR: video password verification failed!; please report this issue on https://yt-dl.org/bug . Make sure you are using the latest version; type youtube-dl -U to update. Be sure to call youtube-dl with the --verbose flag and include its complete output.
Traceback (most recent call last):
File "/usr/local/bin/youtube-dl/youtube_dl/extractor/vimeo.py", line 264, in _real_extract
flags=re.DOTALL)
File "/usr/local/bin/youtube-dl/youtube_dl/extractor/common.py", line 531, in _search_regex
raise RegexNotFoundError('Unable to extract %s' % _name)
RegexNotFoundError: Unable to extract info section; please report this issue on https://yt-dl.org/bug . Make sure you are using the latest version; type youtube-dl -U to update. Be sure to call youtube-dl with the --verbose flag and include its complete output.
Traceback (most recent call last):
File "/usr/local/bin/youtube-dl/youtube_dl/YoutubeDL.py", line 643, in extract_info
ie_result = ie.extract(url)
File "/usr/local/bin/youtube-dl/youtube_dl/extractor/common.py", line 269, in extract
return self._real_extract(url)
File "/usr/local/bin/youtube-dl/youtube_dl/extractor/vimeo.py", line 275, in _real_extract
smuggle_url(url, {'_video_password_verified': 'verified'}))
File "/usr/local/bin/youtube-dl/youtube_dl/extractor/vimeo.py", line 272, in _real_extract
raise ExtractorError('video password verification failed!')
ExtractorError: video password verification failed!; please report this issue on https://yt-dl.org/bug . Make sure you are using the latest version; type youtube-dl -U to update. Be sure to call youtube-dl with the --verbose flag and include its complete output.
|
RegexNotFoundError
|
def prepare_filename(self, info_dict):
"""Generate the output filename."""
try:
template_dict = dict(info_dict)
template_dict["epoch"] = int(time.time())
autonumber_size = self.params.get("autonumber_size")
if autonumber_size is None:
autonumber_size = 5
autonumber_templ = "%0" + str(autonumber_size) + "d"
template_dict["autonumber"] = autonumber_templ % self._num_downloads
if template_dict.get("playlist_index") is not None:
template_dict["playlist_index"] = "%0*d" % (
len(str(template_dict["n_entries"])),
template_dict["playlist_index"],
)
if template_dict.get("resolution") is None:
if template_dict.get("width") and template_dict.get("height"):
template_dict["resolution"] = "%dx%d" % (
template_dict["width"],
template_dict["height"],
)
elif template_dict.get("height"):
template_dict["resolution"] = "%sp" % template_dict["height"]
elif template_dict.get("width"):
template_dict["resolution"] = "?x%d" % template_dict["width"]
sanitize = lambda k, v: sanitize_filename(
compat_str(v),
restricted=self.params.get("restrictfilenames"),
is_id=(k == "id"),
)
template_dict = dict(
(k, sanitize(k, v)) for k, v in template_dict.items() if v is not None
)
template_dict = collections.defaultdict(lambda: "NA", template_dict)
outtmpl = self.params.get("outtmpl", DEFAULT_OUTTMPL)
tmpl = compat_expanduser(outtmpl)
filename = tmpl % template_dict
# Temporary fix for #4787
# 'Treat' all problem characters by passing filename through preferredencoding
# to workaround encoding issues with subprocess on python2 @ Windows
if sys.version_info < (3, 0) and sys.platform == "win32":
filename = encodeFilename(filename, True).decode(preferredencoding())
return filename
except ValueError as err:
self.report_error(
"Error in output template: "
+ str(err)
+ " (encoding: "
+ repr(preferredencoding())
+ ")"
)
return None
|
def prepare_filename(self, info_dict):
"""Generate the output filename."""
try:
template_dict = dict(info_dict)
template_dict["epoch"] = int(time.time())
autonumber_size = self.params.get("autonumber_size")
if autonumber_size is None:
autonumber_size = 5
autonumber_templ = "%0" + str(autonumber_size) + "d"
template_dict["autonumber"] = autonumber_templ % self._num_downloads
if template_dict.get("playlist_index") is not None:
template_dict["playlist_index"] = "%0*d" % (
len(str(template_dict["n_entries"])),
template_dict["playlist_index"],
)
if template_dict.get("resolution") is None:
if template_dict.get("width") and template_dict.get("height"):
template_dict["resolution"] = "%dx%d" % (
template_dict["width"],
template_dict["height"],
)
elif template_dict.get("height"):
template_dict["resolution"] = "%sp" % template_dict["height"]
elif template_dict.get("width"):
template_dict["resolution"] = "?x%d" % template_dict["width"]
sanitize = lambda k, v: sanitize_filename(
compat_str(v),
restricted=self.params.get("restrictfilenames"),
is_id=(k == "id"),
)
template_dict = dict(
(k, sanitize(k, v)) for k, v in template_dict.items() if v is not None
)
template_dict = collections.defaultdict(lambda: "NA", template_dict)
outtmpl = self.params.get("outtmpl", DEFAULT_OUTTMPL)
tmpl = compat_expanduser(outtmpl)
filename = tmpl % template_dict
return filename
except ValueError as err:
self.report_error(
"Error in output template: "
+ str(err)
+ " (encoding: "
+ repr(preferredencoding())
+ ")"
)
return None
|
https://github.com/ytdl-org/youtube-dl/issues/4787
|
python -m youtube_dl -f 160+140 -v Y9o7ILCPSFU
[debug] System config: []
[debug] User config: []
[debug] Command-line args: ['-f', '160+140', '-v', 'Y9o7ILCPSFU']
[debug] Encodings: locale cp1251, fs mbcs, out cp866, pref cp1251
[debug] youtube-dl version 2015.01.23.4
[debug] Git HEAD: 80a49d3
[debug] Python version 2.7.6 - Windows-2003Server-5.2.3790-SP2
[debug] exe versions: ffmpeg N-68756-g627f565, ffprobe N-68694-g7c210c4, rtmpdump 2.3
[debug] Proxy map: {}
[youtube] Y9o7ILCPSFU: Downloading webpage
[youtube] Y9o7ILCPSFU: Extracting video information
[youtube] Y9o7ILCPSFU: Downloading DASH manifest
[debug] Invoking downloader on 'https://r19---sn-oguesne6.googlevideo.com/videoplayback?id=63da3b20b08f4855&itag=160&source=youtube&requiressl=yes&pl=22&mv=m&ms=au&mm=31&nh=IgpwcjAyLm5ydDE5KgkxMjcuMC4wLjE&ratebypass=yes&mime=video/mp4&gir=yes&clen=1646278&lmt=1418493117455845&dur=272.105&mt=1422286131&upn=ST6SA4yKetw&signature=5982C88337984594B47DD0AE038E9E079D694B4F.09BBB44FE6C75594F04EF70F97E252516F31940D&key=dg_yt0&sver=3&fexp=900718,907263,922243,924638,927622,930016,9406
082,9406326,941004,943917,947225,948124,952302,952605,952901,955301,957103,95710
5,957201,959701,962715&ip=PRIVATE&ipbits=0&expire=1422307758&sparams=ip,ip
bits,expire,id,itag,source,requiressl,pl,mv,ms,mm,nh,ratebypass,mime,gir,clen,lmt,dur'
[download] Destination: Trentemøller - Miss You (Mahmut Orhan Remix)-Y9o7ILCPSFU.f160.mp4
[download] 100% of 1.57MiB in 00:33
[debug] Invoking downloader on 'https://r19---sn-oguesne6.googlevideo.com/videoplayback?id=63da3b20b08f4855&itag=140&source=youtube&requiressl=yes&pl=22&mv=m&ms=au&mm=31&nh=IgpwcjAyLm5ydDE5KgkxMjcuMC4wLjE&ratebypass=yes&mime=audio/mp4&gir=yes&clen=4371063&lmt=1418493113768940&dur=272.230&mt=1422286131&upn=ST6SA4yKetw&signature=23BBC9979391613C6CBE8C5FC1E54B3E01BEF5D7.8686958B23B344D9E7E24C1AF687379F4D291A47&key=dg_yt0&sver=3&fexp=900718,907263,922243,924638,927622,930016,9406082,9406326,941004,943917,947225,948124,952302,952605,952901,955301,957103,957105,957201,959701,962715&ip=PRIVATE&ipbits=0&expire=1422307758&sparams=ip,ipbits,expire,id,itag,source,requiressl,pl,mv,ms,mm,nh,ratebypass,mime,gir,clen,lm
t,dur'
[download] Destination: Trentemøller - Miss You (Mahmut Orhan Remix)-Y9o7ILCPSFU.f140.m4a
[download] 100% of 4.17MiB in 01:46
[ffmpeg] Merging formats into "Trentemøller - Miss You (Mahmut Orhan Remix)-Y9o7ILCPSFU.mp4"
[debug] ffmpeg command line: ffmpeg -y -i 'Trentemller - Miss You (Mahmut OrhanRemix)-Y9o7ILCPSFU.f160.mp4' -i 'Trentemller - Miss You (Mahmut Orhan Remix)-Y9o7ILCPSFU.f140.m4a' -c copy -map 0:v:0 -map 1:a:0 'Trentemller - Miss You (Mahmut Orhan Remix)-Y9o7ILCPSFU.mp4'
ERROR: Trentemller - Miss You (Mahmut Orhan Remix)-Y9o7ILCPSFU.f160.mp4: No such file or directory
Traceback (most recent call last):
File "youtube_dl\YoutubeDL.py", line 1384, in post_process
keep_video_wish, info = pp.run(info)
File "youtube_dl\postprocessor\ffmpeg.py", line 544, in run
self.run_ffmpeg_multiple_files(info['__files_to_merge'], filename, args)
File "youtube_dl\postprocessor\ffmpeg.py", line 103, in run_ffmpeg_multiple_files
raise FFmpegPostProcessorError(msg)
FFmpegPostProcessorError
|
FFmpegPostProcessorError
|
def _real_extract(self, url):
proto = "http" if self._downloader.params.get("prefer_insecure", False) else "https"
# Extract original video URL from URL with redirection, like age verification, using next_url parameter
mobj = re.search(self._NEXT_URL_RE, url)
if mobj:
url = (
proto
+ "://www.youtube.com/"
+ compat_urllib_parse.unquote(mobj.group(1)).lstrip("/")
)
video_id = self.extract_id(url)
# Get video webpage
url = (
proto
+ "://www.youtube.com/watch?v=%s&gl=US&hl=en&has_verified=1&bpctr=9999999999"
% video_id
)
video_webpage = self._download_webpage(url, video_id)
# Attempt to extract SWF player URL
mobj = re.search(
r'swfConfig.*?"(https?:\\/\\/.*?watch.*?-.*?\.swf)"', video_webpage
)
if mobj is not None:
player_url = re.sub(r"\\(.)", r"\1", mobj.group(1))
else:
player_url = None
# Get video info
if re.search(r'player-age-gate-content">', video_webpage) is not None:
age_gate = True
# We simulate the access to the video from www.youtube.com/v/{video_id}
# this can be viewed without login into Youtube
data = compat_urllib_parse.urlencode(
{
"video_id": video_id,
"eurl": "https://youtube.googleapis.com/v/" + video_id,
"sts": self._search_regex(
r'"sts"\s*:\s*(\d+)', video_webpage, "sts", default=""
),
}
)
video_info_url = proto + "://www.youtube.com/get_video_info?" + data
video_info_webpage = self._download_webpage(
video_info_url,
video_id,
note="Refetching age-gated info webpage",
errnote="unable to download video info webpage",
)
video_info = compat_parse_qs(video_info_webpage)
else:
age_gate = False
try:
# Try looking directly into the video webpage
mobj = re.search(r";ytplayer\.config\s*=\s*({.*?});", video_webpage)
if not mobj:
raise ValueError("Could not find ytplayer.config") # caught below
json_code = uppercase_escape(mobj.group(1))
ytplayer_config = json.loads(json_code)
args = ytplayer_config["args"]
# Convert to the same format returned by compat_parse_qs
video_info = dict((k, [v]) for k, v in args.items())
if "url_encoded_fmt_stream_map" not in args:
raise ValueError("No stream_map present") # caught below
except ValueError:
# We fallback to the get_video_info pages (used by the embed page)
self.report_video_info_webpage_download(video_id)
for el_type in ["&el=embedded", "&el=detailpage", "&el=vevo", ""]:
video_info_url = (
"%s://www.youtube.com/get_video_info?&video_id=%s%s&ps=default&eurl=&gl=US&hl=en"
% (proto, video_id, el_type)
)
video_info_webpage = self._download_webpage(
video_info_url,
video_id,
note=False,
errnote="unable to download video info webpage",
)
video_info = compat_parse_qs(video_info_webpage)
if "token" in video_info:
break
if "token" not in video_info:
if "reason" in video_info:
raise ExtractorError(
"YouTube said: %s" % video_info["reason"][0],
expected=True,
video_id=video_id,
)
else:
raise ExtractorError(
'"token" parameter not in video info for unknown reason',
video_id=video_id,
)
if "view_count" in video_info:
view_count = int(video_info["view_count"][0])
else:
view_count = None
# Check for "rental" videos
if "ypc_video_rental_bar_text" in video_info and "author" not in video_info:
raise ExtractorError('"rental" videos not supported')
# Start extracting information
self.report_information_extraction(video_id)
# uploader
if "author" not in video_info:
raise ExtractorError("Unable to extract uploader name")
video_uploader = compat_urllib_parse.unquote_plus(video_info["author"][0])
# uploader_id
video_uploader_id = None
mobj = re.search(
r'<link itemprop="url" href="http://www.youtube.com/(?:user|channel)/([^"]+)">',
video_webpage,
)
if mobj is not None:
video_uploader_id = mobj.group(1)
else:
self._downloader.report_warning("unable to extract uploader nickname")
# title
if "title" in video_info:
video_title = video_info["title"][0]
else:
self._downloader.report_warning("Unable to extract video title")
video_title = "_"
# thumbnail image
# We try first to get a high quality image:
m_thumb = re.search(
r'<span itemprop="thumbnail".*?href="(.*?)">', video_webpage, re.DOTALL
)
if m_thumb is not None:
video_thumbnail = m_thumb.group(1)
elif "thumbnail_url" not in video_info:
self._downloader.report_warning("unable to extract video thumbnail")
video_thumbnail = None
else: # don't panic if we can't find it
video_thumbnail = compat_urllib_parse.unquote_plus(
video_info["thumbnail_url"][0]
)
# upload date
upload_date = None
mobj = re.search(r'(?s)id="eow-date.*?>(.*?)</span>', video_webpage)
if mobj is None:
mobj = re.search(
r'(?s)id="watch-uploader-info".*?>.*?(?:Published|Uploaded|Streamed live) on (.*?)</strong>',
video_webpage,
)
if mobj is not None:
upload_date = " ".join(re.sub(r"[/,-]", r" ", mobj.group(1)).split())
upload_date = unified_strdate(upload_date)
m_cat_container = self._search_regex(
r"(?s)<h4[^>]*>\s*Category\s*</h4>\s*<ul[^>]*>(.*?)</ul>",
video_webpage,
"categories",
fatal=False,
)
if m_cat_container:
category = self._html_search_regex(
r"(?s)<a[^<]+>(.*?)</a>", m_cat_container, "category", default=None
)
video_categories = None if category is None else [category]
else:
video_categories = None
# description
video_description = get_element_by_id("eow-description", video_webpage)
if video_description:
video_description = re.sub(
r"""(?x)
<a\s+
(?:[a-zA-Z-]+="[^"]+"\s+)*?
title="([^"]+)"\s+
(?:[a-zA-Z-]+="[^"]+"\s+)*?
class="yt-uix-redirect-link"\s*>
[^<]+
</a>
""",
r"\1",
video_description,
)
video_description = clean_html(video_description)
else:
fd_mobj = re.search(
r'<meta name="description" content="([^"]+)"', video_webpage
)
if fd_mobj:
video_description = unescapeHTML(fd_mobj.group(1))
else:
video_description = ""
def _extract_count(count_name):
count = self._search_regex(
r'id="watch-%s"[^>]*>.*?([\d,]+)\s*</span>' % re.escape(count_name),
video_webpage,
count_name,
default=None,
)
if count is not None:
return int(count.replace(",", ""))
return None
like_count = _extract_count("like")
dislike_count = _extract_count("dislike")
# subtitles
video_subtitles = self.extract_subtitles(video_id, video_webpage)
if self._downloader.params.get("listsubtitles", False):
self._list_available_subtitles(video_id, video_webpage)
return
if "length_seconds" not in video_info:
self._downloader.report_warning("unable to extract video duration")
video_duration = None
else:
video_duration = int(
compat_urllib_parse.unquote_plus(video_info["length_seconds"][0])
)
# annotations
video_annotations = None
if self._downloader.params.get("writeannotations", False):
video_annotations = self._extract_annotations(video_id)
def _map_to_format_list(urlmap):
formats = []
for itag, video_real_url in urlmap.items():
dct = {
"format_id": itag,
"url": video_real_url,
"player_url": player_url,
}
if itag in self._formats:
dct.update(self._formats[itag])
formats.append(dct)
return formats
if "conn" in video_info and video_info["conn"][0].startswith("rtmp"):
self.report_rtmp_download()
formats = [
{
"format_id": "_rtmp",
"protocol": "rtmp",
"url": video_info["conn"][0],
"player_url": player_url,
}
]
elif (
len(video_info.get("url_encoded_fmt_stream_map", [""])[0]) >= 1
or len(video_info.get("adaptive_fmts", [""])[0]) >= 1
):
encoded_url_map = (
video_info.get("url_encoded_fmt_stream_map", [""])[0]
+ ","
+ video_info.get("adaptive_fmts", [""])[0]
)
if "rtmpe%3Dyes" in encoded_url_map:
raise ExtractorError(
"rtmpe downloads are not supported, see https://github.com/rg3/youtube-dl/issues/343 for more information.",
expected=True,
)
url_map = {}
for url_data_str in encoded_url_map.split(","):
url_data = compat_parse_qs(url_data_str)
if "itag" not in url_data or "url" not in url_data:
continue
format_id = url_data["itag"][0]
url = url_data["url"][0]
if "sig" in url_data:
url += "&signature=" + url_data["sig"][0]
elif "s" in url_data:
encrypted_sig = url_data["s"][0]
if not age_gate:
jsplayer_url_json = self._search_regex(
r'"assets":.+?"js":\s*("[^"]+")', video_webpage, "JS player URL"
)
player_url = json.loads(jsplayer_url_json)
if player_url is None:
player_url_json = self._search_regex(
r'ytplayer\.config.*?"url"\s*:\s*("[^"]+")',
video_webpage,
"age gate player URL",
)
player_url = json.loads(player_url_json)
if self._downloader.params.get("verbose"):
if player_url is None:
player_version = "unknown"
player_desc = "unknown"
else:
if player_url.endswith("swf"):
player_version = self._search_regex(
r"-(.+?)(?:/watch_as3)?\.swf$",
player_url,
"flash player",
fatal=False,
)
player_desc = "flash player %s" % player_version
else:
player_version = self._search_regex(
r"html5player-([^/]+?)(?:/html5player)?\.js",
player_url,
"html5 player",
fatal=False,
)
player_desc = "html5 player %s" % player_version
parts_sizes = self._signature_cache_id(encrypted_sig)
self.to_screen(
"{%s} signature length %s, %s"
% (format_id, parts_sizes, player_desc)
)
signature = self._decrypt_signature(
encrypted_sig, video_id, player_url, age_gate
)
url += "&signature=" + signature
if "ratebypass" not in url:
url += "&ratebypass=yes"
url_map[format_id] = url
formats = _map_to_format_list(url_map)
elif video_info.get("hlsvp"):
manifest_url = video_info["hlsvp"][0]
url_map = self._extract_from_m3u8(manifest_url, video_id)
formats = _map_to_format_list(url_map)
else:
raise ExtractorError(
"no conn, hlsvp or url_encoded_fmt_stream_map information found in video info"
)
# Look for the DASH manifest
if self._downloader.params.get("youtube_include_dash_manifest", True):
dash_mpd = video_info.get("dashmpd")
if not dash_mpd:
self.report_warning("%s: DASH manifest missing" % video_id)
else:
dash_manifest_url = dash_mpd[0]
try:
dash_formats = self._parse_dash_manifest(
video_id, dash_manifest_url, player_url, age_gate
)
except (ExtractorError, KeyError) as e:
self.report_warning("Skipping DASH manifest: %r" % e, video_id)
else:
formats.extend(dash_formats)
self._sort_formats(formats)
return {
"id": video_id,
"uploader": video_uploader,
"uploader_id": video_uploader_id,
"upload_date": upload_date,
"title": video_title,
"thumbnail": video_thumbnail,
"description": video_description,
"categories": video_categories,
"subtitles": video_subtitles,
"duration": video_duration,
"age_limit": 18 if age_gate else 0,
"annotations": video_annotations,
"webpage_url": proto + "://www.youtube.com/watch?v=%s" % video_id,
"view_count": view_count,
"like_count": like_count,
"dislike_count": dislike_count,
"formats": formats,
}
|
def _real_extract(self, url):
proto = "http" if self._downloader.params.get("prefer_insecure", False) else "https"
# Extract original video URL from URL with redirection, like age verification, using next_url parameter
mobj = re.search(self._NEXT_URL_RE, url)
if mobj:
url = (
proto
+ "://www.youtube.com/"
+ compat_urllib_parse.unquote(mobj.group(1)).lstrip("/")
)
video_id = self.extract_id(url)
# Get video webpage
url = (
proto
+ "://www.youtube.com/watch?v=%s&gl=US&hl=en&has_verified=1&bpctr=9999999999"
% video_id
)
video_webpage = self._download_webpage(url, video_id)
# Attempt to extract SWF player URL
mobj = re.search(
r'swfConfig.*?"(https?:\\/\\/.*?watch.*?-.*?\.swf)"', video_webpage
)
if mobj is not None:
player_url = re.sub(r"\\(.)", r"\1", mobj.group(1))
else:
player_url = None
# Get video info
if re.search(r'player-age-gate-content">', video_webpage) is not None:
age_gate = True
# We simulate the access to the video from www.youtube.com/v/{video_id}
# this can be viewed without login into Youtube
data = compat_urllib_parse.urlencode(
{
"video_id": video_id,
"eurl": "https://youtube.googleapis.com/v/" + video_id,
"sts": self._search_regex(
r'"sts"\s*:\s*(\d+)', video_webpage, "sts", default=""
),
}
)
video_info_url = proto + "://www.youtube.com/get_video_info?" + data
video_info_webpage = self._download_webpage(
video_info_url,
video_id,
note="Refetching age-gated info webpage",
errnote="unable to download video info webpage",
)
video_info = compat_parse_qs(video_info_webpage)
else:
age_gate = False
try:
# Try looking directly into the video webpage
mobj = re.search(r";ytplayer\.config\s*=\s*({.*?});", video_webpage)
if not mobj:
raise ValueError("Could not find ytplayer.config") # caught below
json_code = uppercase_escape(mobj.group(1))
ytplayer_config = json.loads(json_code)
args = ytplayer_config["args"]
# Convert to the same format returned by compat_parse_qs
video_info = dict((k, [v]) for k, v in args.items())
if "url_encoded_fmt_stream_map" not in args:
raise ValueError("No stream_map present") # caught below
except ValueError:
# We fallback to the get_video_info pages (used by the embed page)
self.report_video_info_webpage_download(video_id)
for el_type in ["&el=embedded", "&el=detailpage", "&el=vevo", ""]:
video_info_url = (
"%s://www.youtube.com/get_video_info?&video_id=%s%s&ps=default&eurl=&gl=US&hl=en"
% (proto, video_id, el_type)
)
video_info_webpage = self._download_webpage(
video_info_url,
video_id,
note=False,
errnote="unable to download video info webpage",
)
video_info = compat_parse_qs(video_info_webpage)
if "token" in video_info:
break
if "token" not in video_info:
if "reason" in video_info:
raise ExtractorError(
"YouTube said: %s" % video_info["reason"][0],
expected=True,
video_id=video_id,
)
else:
raise ExtractorError(
'"token" parameter not in video info for unknown reason',
video_id=video_id,
)
if "view_count" in video_info:
view_count = int(video_info["view_count"][0])
else:
view_count = None
# Check for "rental" videos
if "ypc_video_rental_bar_text" in video_info and "author" not in video_info:
raise ExtractorError('"rental" videos not supported')
# Start extracting information
self.report_information_extraction(video_id)
# uploader
if "author" not in video_info:
raise ExtractorError("Unable to extract uploader name")
video_uploader = compat_urllib_parse.unquote_plus(video_info["author"][0])
# uploader_id
video_uploader_id = None
mobj = re.search(
r'<link itemprop="url" href="http://www.youtube.com/(?:user|channel)/([^"]+)">',
video_webpage,
)
if mobj is not None:
video_uploader_id = mobj.group(1)
else:
self._downloader.report_warning("unable to extract uploader nickname")
# title
if "title" in video_info:
video_title = video_info["title"][0]
else:
self._downloader.report_warning("Unable to extract video title")
video_title = "_"
# thumbnail image
# We try first to get a high quality image:
m_thumb = re.search(
r'<span itemprop="thumbnail".*?href="(.*?)">', video_webpage, re.DOTALL
)
if m_thumb is not None:
video_thumbnail = m_thumb.group(1)
elif "thumbnail_url" not in video_info:
self._downloader.report_warning("unable to extract video thumbnail")
video_thumbnail = None
else: # don't panic if we can't find it
video_thumbnail = compat_urllib_parse.unquote_plus(
video_info["thumbnail_url"][0]
)
# upload date
upload_date = None
mobj = re.search(r'(?s)id="eow-date.*?>(.*?)</span>', video_webpage)
if mobj is None:
mobj = re.search(
r'(?s)id="watch-uploader-info".*?>.*?(?:Published|Uploaded|Streamed live) on (.*?)</strong>',
video_webpage,
)
if mobj is not None:
upload_date = " ".join(re.sub(r"[/,-]", r" ", mobj.group(1)).split())
upload_date = unified_strdate(upload_date)
m_cat_container = self._search_regex(
r"(?s)<h4[^>]*>\s*Category\s*</h4>\s*<ul[^>]*>(.*?)</ul>",
video_webpage,
"categories",
fatal=False,
)
if m_cat_container:
category = self._html_search_regex(
r"(?s)<a[^<]+>(.*?)</a>", m_cat_container, "category", default=None
)
video_categories = None if category is None else [category]
else:
video_categories = None
# description
video_description = get_element_by_id("eow-description", video_webpage)
if video_description:
video_description = re.sub(
r"""(?x)
<a\s+
(?:[a-zA-Z-]+="[^"]+"\s+)*?
title="([^"]+)"\s+
(?:[a-zA-Z-]+="[^"]+"\s+)*?
class="yt-uix-redirect-link"\s*>
[^<]+
</a>
""",
r"\1",
video_description,
)
video_description = clean_html(video_description)
else:
fd_mobj = re.search(
r'<meta name="description" content="([^"]+)"', video_webpage
)
if fd_mobj:
video_description = unescapeHTML(fd_mobj.group(1))
else:
video_description = ""
def _extract_count(count_name):
count = self._search_regex(
r'id="watch-%s"[^>]*>.*?([\d,]+)\s*</span>' % re.escape(count_name),
video_webpage,
count_name,
default=None,
)
if count is not None:
return int(count.replace(",", ""))
return None
like_count = _extract_count("like")
dislike_count = _extract_count("dislike")
# subtitles
video_subtitles = self.extract_subtitles(video_id, video_webpage)
if self._downloader.params.get("listsubtitles", False):
self._list_available_subtitles(video_id, video_webpage)
return
if "length_seconds" not in video_info:
self._downloader.report_warning("unable to extract video duration")
video_duration = None
else:
video_duration = int(
compat_urllib_parse.unquote_plus(video_info["length_seconds"][0])
)
# annotations
video_annotations = None
if self._downloader.params.get("writeannotations", False):
video_annotations = self._extract_annotations(video_id)
def _map_to_format_list(urlmap):
formats = []
for itag, video_real_url in urlmap.items():
dct = {
"format_id": itag,
"url": video_real_url,
"player_url": player_url,
}
if itag in self._formats:
dct.update(self._formats[itag])
formats.append(dct)
return formats
if "conn" in video_info and video_info["conn"][0].startswith("rtmp"):
self.report_rtmp_download()
formats = [
{
"format_id": "_rtmp",
"protocol": "rtmp",
"url": video_info["conn"][0],
"player_url": player_url,
}
]
elif (
len(video_info.get("url_encoded_fmt_stream_map", [])) >= 1
or len(video_info.get("adaptive_fmts", [])) >= 1
):
encoded_url_map = (
video_info.get("url_encoded_fmt_stream_map", [""])[0]
+ ","
+ video_info.get("adaptive_fmts", [""])[0]
)
if "rtmpe%3Dyes" in encoded_url_map:
raise ExtractorError(
"rtmpe downloads are not supported, see https://github.com/rg3/youtube-dl/issues/343 for more information.",
expected=True,
)
url_map = {}
for url_data_str in encoded_url_map.split(","):
url_data = compat_parse_qs(url_data_str)
if "itag" not in url_data or "url" not in url_data:
continue
format_id = url_data["itag"][0]
url = url_data["url"][0]
if "sig" in url_data:
url += "&signature=" + url_data["sig"][0]
elif "s" in url_data:
encrypted_sig = url_data["s"][0]
if not age_gate:
jsplayer_url_json = self._search_regex(
r'"assets":.+?"js":\s*("[^"]+")', video_webpage, "JS player URL"
)
player_url = json.loads(jsplayer_url_json)
if player_url is None:
player_url_json = self._search_regex(
r'ytplayer\.config.*?"url"\s*:\s*("[^"]+")',
video_webpage,
"age gate player URL",
)
player_url = json.loads(player_url_json)
if self._downloader.params.get("verbose"):
if player_url is None:
player_version = "unknown"
player_desc = "unknown"
else:
if player_url.endswith("swf"):
player_version = self._search_regex(
r"-(.+?)(?:/watch_as3)?\.swf$",
player_url,
"flash player",
fatal=False,
)
player_desc = "flash player %s" % player_version
else:
player_version = self._search_regex(
r"html5player-([^/]+?)(?:/html5player)?\.js",
player_url,
"html5 player",
fatal=False,
)
player_desc = "html5 player %s" % player_version
parts_sizes = self._signature_cache_id(encrypted_sig)
self.to_screen(
"{%s} signature length %s, %s"
% (format_id, parts_sizes, player_desc)
)
signature = self._decrypt_signature(
encrypted_sig, video_id, player_url, age_gate
)
url += "&signature=" + signature
if "ratebypass" not in url:
url += "&ratebypass=yes"
url_map[format_id] = url
formats = _map_to_format_list(url_map)
elif video_info.get("hlsvp"):
manifest_url = video_info["hlsvp"][0]
url_map = self._extract_from_m3u8(manifest_url, video_id)
formats = _map_to_format_list(url_map)
else:
raise ExtractorError(
"no conn, hlsvp or url_encoded_fmt_stream_map information found in video info"
)
# Look for the DASH manifest
if self._downloader.params.get("youtube_include_dash_manifest", True):
dash_mpd = video_info.get("dashmpd")
if not dash_mpd:
self.report_warning("%s: DASH manifest missing" % video_id)
else:
dash_manifest_url = dash_mpd[0]
try:
dash_formats = self._parse_dash_manifest(
video_id, dash_manifest_url, player_url, age_gate
)
except (ExtractorError, KeyError) as e:
self.report_warning("Skipping DASH manifest: %r" % e, video_id)
else:
formats.extend(dash_formats)
self._sort_formats(formats)
return {
"id": video_id,
"uploader": video_uploader,
"uploader_id": video_uploader_id,
"upload_date": upload_date,
"title": video_title,
"thumbnail": video_thumbnail,
"description": video_description,
"categories": video_categories,
"subtitles": video_subtitles,
"duration": video_duration,
"age_limit": 18 if age_gate else 0,
"annotations": video_annotations,
"webpage_url": proto + "://www.youtube.com/watch?v=%s" % video_id,
"view_count": view_count,
"like_count": like_count,
"dislike_count": dislike_count,
"formats": formats,
}
|
https://github.com/ytdl-org/youtube-dl/issues/4431
|
youtube-dl -f bestvideo+bestaudio/best --verbose lqQg6PlCWgI
[debug] System config: []
[debug] User config: []
[debug] Command-line args: ['-f', 'bestvideo+bestaudio/best', '--verbose', 'lqQg6PlCWgI']
[debug] Encodings: locale cp1252, fs mbcs, out cp850, pref cp1252
[debug] youtube-dl version 2014.12.01
[debug] Python version 2.7.8 - Windows-8-6.2.9200
[debug] exe versions: ffmpeg N-56165-, ffprobe N-56165-, rtmpdump 2.4
[debug] Proxy map: {}
[youtube] lqQg6PlCWgI: Downloading webpage
[youtube] lqQg6PlCWgI: Extracting video information
Traceback (most recent call last):
File "__main__.py", line 19, in <module>
File "youtube_dl\__init__.pyo", line 355, in main
File "youtube_dl\__init__.pyo", line 345, in _real_main
File "youtube_dl\YoutubeDL.pyo", line 1117, in download
File "youtube_dl\YoutubeDL.pyo", line 553, in extract_info
File "youtube_dl\extractor\common.pyo", line 241, in extract
File "youtube_dl\extractor\youtube.pyo", line 949, in _real_extract
TypeError: 'NoneType' object has no attribute '__getitem__'
|
TypeError
|
def process_info(self, info_dict):
"""Process a single resolved IE result."""
assert info_dict.get("_type", "video") == "video"
max_downloads = self.params.get("max_downloads")
if max_downloads is not None:
if self._num_downloads >= int(max_downloads):
raise MaxDownloadsReached()
info_dict["fulltitle"] = info_dict["title"]
if len(info_dict["title"]) > 200:
info_dict["title"] = info_dict["title"][:197] + "..."
# Keep for backwards compatibility
info_dict["stitle"] = info_dict["title"]
if "format" not in info_dict:
info_dict["format"] = info_dict["ext"]
reason = self._match_entry(info_dict)
if reason is not None:
self.to_screen("[download] " + reason)
return
self._num_downloads += 1
filename = self.prepare_filename(info_dict)
# Forced printings
if self.params.get("forcetitle", False):
self.to_stdout(info_dict["fulltitle"])
if self.params.get("forceid", False):
self.to_stdout(info_dict["id"])
if self.params.get("forceurl", False):
if info_dict.get("requested_formats") is not None:
for f in info_dict["requested_formats"]:
self.to_stdout(f["url"] + f.get("play_path", ""))
else:
# For RTMP URLs, also include the playpath
self.to_stdout(info_dict["url"] + info_dict.get("play_path", ""))
if (
self.params.get("forcethumbnail", False)
and info_dict.get("thumbnail") is not None
):
self.to_stdout(info_dict["thumbnail"])
if (
self.params.get("forcedescription", False)
and info_dict.get("description") is not None
):
self.to_stdout(info_dict["description"])
if self.params.get("forcefilename", False) and filename is not None:
self.to_stdout(filename)
if (
self.params.get("forceduration", False)
and info_dict.get("duration") is not None
):
self.to_stdout(formatSeconds(info_dict["duration"]))
if self.params.get("forceformat", False):
self.to_stdout(info_dict["format"])
if self.params.get("forcejson", False):
info_dict["_filename"] = filename
self.to_stdout(json.dumps(info_dict))
if self.params.get("dump_single_json", False):
info_dict["_filename"] = filename
# Do nothing else if in simulate mode
if self.params.get("simulate", False):
return
if filename is None:
return
try:
dn = os.path.dirname(encodeFilename(filename))
if dn and not os.path.exists(dn):
os.makedirs(dn)
except (OSError, IOError) as err:
self.report_error("unable to create directory " + compat_str(err))
return
if self.params.get("writedescription", False):
descfn = filename + ".description"
if self.params.get("nooverwrites", False) and os.path.exists(
encodeFilename(descfn)
):
self.to_screen("[info] Video description is already present")
else:
try:
self.to_screen("[info] Writing video description to: " + descfn)
with io.open(encodeFilename(descfn), "w", encoding="utf-8") as descfile:
descfile.write(info_dict["description"])
except (KeyError, TypeError):
self.report_warning("There's no description to write.")
except (OSError, IOError):
self.report_error("Cannot write description file " + descfn)
return
if self.params.get("writeannotations", False):
annofn = filename + ".annotations.xml"
if self.params.get("nooverwrites", False) and os.path.exists(
encodeFilename(annofn)
):
self.to_screen("[info] Video annotations are already present")
else:
try:
self.to_screen("[info] Writing video annotations to: " + annofn)
with io.open(encodeFilename(annofn), "w", encoding="utf-8") as annofile:
annofile.write(info_dict["annotations"])
except (KeyError, TypeError):
self.report_warning("There are no annotations to write.")
except (OSError, IOError):
self.report_error("Cannot write annotations file: " + annofn)
return
subtitles_are_requested = any(
[self.params.get("writesubtitles", False), self.params.get("writeautomaticsub")]
)
if subtitles_are_requested and "subtitles" in info_dict and info_dict["subtitles"]:
# subtitles download errors are already managed as troubles in relevant IE
# that way it will silently go on when used with unsupporting IE
subtitles = info_dict["subtitles"]
sub_format = self.params.get("subtitlesformat", "srt")
for sub_lang in subtitles.keys():
sub = subtitles[sub_lang]
if sub is None:
continue
try:
sub_filename = subtitles_filename(filename, sub_lang, sub_format)
if self.params.get("nooverwrites", False) and os.path.exists(
encodeFilename(sub_filename)
):
self.to_screen(
"[info] Video subtitle %s.%s is already_present"
% (sub_lang, sub_format)
)
else:
self.to_screen("[info] Writing video subtitles to: " + sub_filename)
with io.open(
encodeFilename(sub_filename), "w", encoding="utf-8"
) as subfile:
subfile.write(sub)
except (OSError, IOError):
self.report_error("Cannot write subtitles file " + sub_filename)
return
if self.params.get("writeinfojson", False):
infofn = os.path.splitext(filename)[0] + ".info.json"
if self.params.get("nooverwrites", False) and os.path.exists(
encodeFilename(infofn)
):
self.to_screen("[info] Video description metadata is already present")
else:
self.to_screen(
"[info] Writing video description metadata as JSON to: " + infofn
)
try:
write_json_file(info_dict, infofn)
except (OSError, IOError):
self.report_error("Cannot write metadata to JSON file " + infofn)
return
if self.params.get("writethumbnail", False):
if info_dict.get("thumbnail") is not None:
thumb_format = determine_ext(info_dict["thumbnail"], "jpg")
thumb_filename = os.path.splitext(filename)[0] + "." + thumb_format
if self.params.get("nooverwrites", False) and os.path.exists(
encodeFilename(thumb_filename)
):
self.to_screen(
"[%s] %s: Thumbnail is already present"
% (info_dict["extractor"], info_dict["id"])
)
else:
self.to_screen(
"[%s] %s: Downloading thumbnail ..."
% (info_dict["extractor"], info_dict["id"])
)
try:
uf = self.urlopen(info_dict["thumbnail"])
with open(thumb_filename, "wb") as thumbf:
shutil.copyfileobj(uf, thumbf)
self.to_screen(
"[%s] %s: Writing thumbnail to: %s"
% (info_dict["extractor"], info_dict["id"], thumb_filename)
)
except (
compat_urllib_error.URLError,
compat_http_client.HTTPException,
socket.error,
) as err:
self.report_warning(
'Unable to download thumbnail "%s": %s'
% (info_dict["thumbnail"], compat_str(err))
)
if not self.params.get("skip_download", False):
if self.params.get("nooverwrites", False) and os.path.exists(
encodeFilename(filename)
):
success = True
else:
try:
def dl(name, info):
fd = get_suitable_downloader(info)(self, self.params)
for ph in self._progress_hooks:
fd.add_progress_hook(ph)
if self.params.get("verbose"):
self.to_stdout(
"[debug] Invoking downloader on %r" % info.get("url")
)
return fd.download(name, info)
if info_dict.get("requested_formats") is not None:
downloaded = []
success = True
merger = FFmpegMergerPP(self, not self.params.get("keepvideo"))
if not merger._executable:
postprocessors = []
self.report_warning(
"You have requested multiple "
"formats but ffmpeg or avconv are not installed."
" The formats won't be merged"
)
else:
postprocessors = [merger]
for f in info_dict["requested_formats"]:
new_info = dict(info_dict)
new_info.update(f)
fname = self.prepare_filename(new_info)
fname = prepend_extension(fname, "f%s" % f["format_id"])
downloaded.append(fname)
partial_success = dl(fname, new_info)
success = success and partial_success
info_dict["__postprocessors"] = postprocessors
info_dict["__files_to_merge"] = downloaded
else:
# Just a single file
success = dl(filename, info_dict)
except (
compat_urllib_error.URLError,
compat_http_client.HTTPException,
socket.error,
) as err:
self.report_error("unable to download video data: %s" % str(err))
return
except (OSError, IOError) as err:
raise UnavailableVideoError(err)
except (ContentTooShortError,) as err:
self.report_error(
"content too short (expected %s bytes and served %s)"
% (err.expected, err.downloaded)
)
return
if success:
try:
self.post_process(filename, info_dict)
except PostProcessingError as err:
self.report_error("postprocessing: %s" % str(err))
return
self.record_download_archive(info_dict)
|
def process_info(self, info_dict):
"""Process a single resolved IE result."""
assert info_dict.get("_type", "video") == "video"
max_downloads = self.params.get("max_downloads")
if max_downloads is not None:
if self._num_downloads >= int(max_downloads):
raise MaxDownloadsReached()
info_dict["fulltitle"] = info_dict["title"]
if len(info_dict["title"]) > 200:
info_dict["title"] = info_dict["title"][:197] + "..."
# Keep for backwards compatibility
info_dict["stitle"] = info_dict["title"]
if "format" not in info_dict:
info_dict["format"] = info_dict["ext"]
reason = self._match_entry(info_dict)
if reason is not None:
self.to_screen("[download] " + reason)
return
self._num_downloads += 1
filename = self.prepare_filename(info_dict)
# Forced printings
if self.params.get("forcetitle", False):
self.to_stdout(info_dict["fulltitle"])
if self.params.get("forceid", False):
self.to_stdout(info_dict["id"])
if self.params.get("forceurl", False):
# For RTMP URLs, also include the playpath
self.to_stdout(info_dict["url"] + info_dict.get("play_path", ""))
if (
self.params.get("forcethumbnail", False)
and info_dict.get("thumbnail") is not None
):
self.to_stdout(info_dict["thumbnail"])
if (
self.params.get("forcedescription", False)
and info_dict.get("description") is not None
):
self.to_stdout(info_dict["description"])
if self.params.get("forcefilename", False) and filename is not None:
self.to_stdout(filename)
if (
self.params.get("forceduration", False)
and info_dict.get("duration") is not None
):
self.to_stdout(formatSeconds(info_dict["duration"]))
if self.params.get("forceformat", False):
self.to_stdout(info_dict["format"])
if self.params.get("forcejson", False):
info_dict["_filename"] = filename
self.to_stdout(json.dumps(info_dict))
if self.params.get("dump_single_json", False):
info_dict["_filename"] = filename
# Do nothing else if in simulate mode
if self.params.get("simulate", False):
return
if filename is None:
return
try:
dn = os.path.dirname(encodeFilename(filename))
if dn and not os.path.exists(dn):
os.makedirs(dn)
except (OSError, IOError) as err:
self.report_error("unable to create directory " + compat_str(err))
return
if self.params.get("writedescription", False):
descfn = filename + ".description"
if self.params.get("nooverwrites", False) and os.path.exists(
encodeFilename(descfn)
):
self.to_screen("[info] Video description is already present")
else:
try:
self.to_screen("[info] Writing video description to: " + descfn)
with io.open(encodeFilename(descfn), "w", encoding="utf-8") as descfile:
descfile.write(info_dict["description"])
except (KeyError, TypeError):
self.report_warning("There's no description to write.")
except (OSError, IOError):
self.report_error("Cannot write description file " + descfn)
return
if self.params.get("writeannotations", False):
annofn = filename + ".annotations.xml"
if self.params.get("nooverwrites", False) and os.path.exists(
encodeFilename(annofn)
):
self.to_screen("[info] Video annotations are already present")
else:
try:
self.to_screen("[info] Writing video annotations to: " + annofn)
with io.open(encodeFilename(annofn), "w", encoding="utf-8") as annofile:
annofile.write(info_dict["annotations"])
except (KeyError, TypeError):
self.report_warning("There are no annotations to write.")
except (OSError, IOError):
self.report_error("Cannot write annotations file: " + annofn)
return
subtitles_are_requested = any(
[self.params.get("writesubtitles", False), self.params.get("writeautomaticsub")]
)
if subtitles_are_requested and "subtitles" in info_dict and info_dict["subtitles"]:
# subtitles download errors are already managed as troubles in relevant IE
# that way it will silently go on when used with unsupporting IE
subtitles = info_dict["subtitles"]
sub_format = self.params.get("subtitlesformat", "srt")
for sub_lang in subtitles.keys():
sub = subtitles[sub_lang]
if sub is None:
continue
try:
sub_filename = subtitles_filename(filename, sub_lang, sub_format)
if self.params.get("nooverwrites", False) and os.path.exists(
encodeFilename(sub_filename)
):
self.to_screen(
"[info] Video subtitle %s.%s is already_present"
% (sub_lang, sub_format)
)
else:
self.to_screen("[info] Writing video subtitles to: " + sub_filename)
with io.open(
encodeFilename(sub_filename), "w", encoding="utf-8"
) as subfile:
subfile.write(sub)
except (OSError, IOError):
self.report_error("Cannot write subtitles file " + sub_filename)
return
if self.params.get("writeinfojson", False):
infofn = os.path.splitext(filename)[0] + ".info.json"
if self.params.get("nooverwrites", False) and os.path.exists(
encodeFilename(infofn)
):
self.to_screen("[info] Video description metadata is already present")
else:
self.to_screen(
"[info] Writing video description metadata as JSON to: " + infofn
)
try:
write_json_file(info_dict, infofn)
except (OSError, IOError):
self.report_error("Cannot write metadata to JSON file " + infofn)
return
if self.params.get("writethumbnail", False):
if info_dict.get("thumbnail") is not None:
thumb_format = determine_ext(info_dict["thumbnail"], "jpg")
thumb_filename = os.path.splitext(filename)[0] + "." + thumb_format
if self.params.get("nooverwrites", False) and os.path.exists(
encodeFilename(thumb_filename)
):
self.to_screen(
"[%s] %s: Thumbnail is already present"
% (info_dict["extractor"], info_dict["id"])
)
else:
self.to_screen(
"[%s] %s: Downloading thumbnail ..."
% (info_dict["extractor"], info_dict["id"])
)
try:
uf = self.urlopen(info_dict["thumbnail"])
with open(thumb_filename, "wb") as thumbf:
shutil.copyfileobj(uf, thumbf)
self.to_screen(
"[%s] %s: Writing thumbnail to: %s"
% (info_dict["extractor"], info_dict["id"], thumb_filename)
)
except (
compat_urllib_error.URLError,
compat_http_client.HTTPException,
socket.error,
) as err:
self.report_warning(
'Unable to download thumbnail "%s": %s'
% (info_dict["thumbnail"], compat_str(err))
)
if not self.params.get("skip_download", False):
if self.params.get("nooverwrites", False) and os.path.exists(
encodeFilename(filename)
):
success = True
else:
try:
def dl(name, info):
fd = get_suitable_downloader(info)(self, self.params)
for ph in self._progress_hooks:
fd.add_progress_hook(ph)
if self.params.get("verbose"):
self.to_stdout(
"[debug] Invoking downloader on %r" % info.get("url")
)
return fd.download(name, info)
if info_dict.get("requested_formats") is not None:
downloaded = []
success = True
merger = FFmpegMergerPP(self, not self.params.get("keepvideo"))
if not merger._executable:
postprocessors = []
self.report_warning(
"You have requested multiple "
"formats but ffmpeg or avconv are not installed."
" The formats won't be merged"
)
else:
postprocessors = [merger]
for f in info_dict["requested_formats"]:
new_info = dict(info_dict)
new_info.update(f)
fname = self.prepare_filename(new_info)
fname = prepend_extension(fname, "f%s" % f["format_id"])
downloaded.append(fname)
partial_success = dl(fname, new_info)
success = success and partial_success
info_dict["__postprocessors"] = postprocessors
info_dict["__files_to_merge"] = downloaded
else:
# Just a single file
success = dl(filename, info_dict)
except (
compat_urllib_error.URLError,
compat_http_client.HTTPException,
socket.error,
) as err:
self.report_error("unable to download video data: %s" % str(err))
return
except (OSError, IOError) as err:
raise UnavailableVideoError(err)
except (ContentTooShortError,) as err:
self.report_error(
"content too short (expected %s bytes and served %s)"
% (err.expected, err.downloaded)
)
return
if success:
try:
self.post_process(filename, info_dict)
except PostProcessingError as err:
self.report_error("postprocessing: %s" % str(err))
return
self.record_download_archive(info_dict)
|
https://github.com/ytdl-org/youtube-dl/issues/2883
|
$ youtube-dl --format='bestvideo+bestaudio' --get-url https://www.youtube.com/watch?v=MjQG1s3Isgg
Traceback (most recent call last):
File "/usr/bin/youtube-dl", line 6, in <module>
youtube_dl.main()
File "/usr/lib64/python2.7/site-packages/youtube_dl/__init__.py", line 847, in main
_real_main(argv)
File "/usr/lib64/python2.7/site-packages/youtube_dl/__init__.py", line 837, in _real_main
retcode = ydl.download(all_urls)
File "/usr/lib64/python2.7/site-packages/youtube_dl/YoutubeDL.py", line 1039, in download
self.extract_info(url)
File "/usr/lib64/python2.7/site-packages/youtube_dl/YoutubeDL.py", line 527, in extract_info
return self.process_ie_result(ie_result, download, extra_info)
File "/usr/lib64/python2.7/site-packages/youtube_dl/YoutubeDL.py", line 564, in process_ie_result
return self.process_video_result(ie_result, download=download)
File "/usr/lib64/python2.7/site-packages/youtube_dl/YoutubeDL.py", line 819, in process_video_result
self.process_info(new_info)
File "/usr/lib64/python2.7/site-packages/youtube_dl/YoutubeDL.py", line 860, in process_info
self.to_stdout(info_dict['url'] + info_dict.get('play_path', ''))
KeyError: u'url'
|
KeyError
|
def process_info(self, info_dict):
"""Process a single resolved IE result."""
assert info_dict.get("_type", "video") == "video"
# We increment the download the download count here to match the previous behaviour.
self.increment_downloads()
info_dict["fulltitle"] = info_dict["title"]
if len(info_dict["title"]) > 200:
info_dict["title"] = info_dict["title"][:197] + "..."
# Keep for backwards compatibility
info_dict["stitle"] = info_dict["title"]
if not "format" in info_dict:
info_dict["format"] = info_dict["ext"]
reason = self._match_entry(info_dict)
if reason is not None:
self.to_screen("[download] " + reason)
return
max_downloads = self.params.get("max_downloads")
if max_downloads is not None:
if self._num_downloads > int(max_downloads):
raise MaxDownloadsReached()
filename = self.prepare_filename(info_dict)
# Forced printings
if self.params.get("forcetitle", False):
compat_print(info_dict["title"])
if self.params.get("forceid", False):
compat_print(info_dict["id"])
if self.params.get("forceurl", False):
# For RTMP URLs, also include the playpath
compat_print(info_dict["url"] + info_dict.get("play_path", ""))
if self.params.get("forcethumbnail", False) and "thumbnail" in info_dict:
compat_print(info_dict["thumbnail"])
if self.params.get("forcedescription", False) and "description" in info_dict:
compat_print(info_dict["description"])
if self.params.get("forcefilename", False) and filename is not None:
compat_print(filename)
if self.params.get("forceformat", False):
compat_print(info_dict["format"])
# Do nothing else if in simulate mode
if self.params.get("simulate", False):
return
if filename is None:
return
try:
dn = os.path.dirname(encodeFilename(filename))
if dn != "" and not os.path.exists(dn):
os.makedirs(dn)
except (OSError, IOError) as err:
self.report_error("unable to create directory " + compat_str(err))
return
if self.params.get("writedescription", False):
try:
descfn = filename + ".description"
self.report_writedescription(descfn)
with io.open(encodeFilename(descfn), "w", encoding="utf-8") as descfile:
descfile.write(info_dict["description"])
except (KeyError, TypeError):
self.report_warning("There's no description to write.")
except (OSError, IOError):
self.report_error("Cannot write description file " + descfn)
return
subtitles_are_requested = any(
[
self.params.get("writesubtitles", False),
self.params.get("writeautomaticsub"),
self.params.get("allsubtitles", False),
]
)
if subtitles_are_requested and "subtitles" in info_dict and info_dict["subtitles"]:
# subtitles download errors are already managed as troubles in relevant IE
# that way it will silently go on when used with unsupporting IE
subtitles = info_dict["subtitles"]
sub_format = self.params.get("subtitlesformat")
for sub_lang in subtitles.keys():
sub = subtitles[sub_lang]
if sub is None:
continue
try:
sub_filename = subtitles_filename(filename, sub_lang, sub_format)
self.report_writesubtitles(sub_filename)
with io.open(
encodeFilename(sub_filename), "w", encoding="utf-8"
) as subfile:
subfile.write(sub)
except (OSError, IOError):
self.report_error("Cannot write subtitles file " + descfn)
return
if self.params.get("writeinfojson", False):
infofn = filename + ".info.json"
self.report_writeinfojson(infofn)
try:
json_info_dict = dict(
(k, v) for k, v in info_dict.items() if not k in ["urlhandle"]
)
write_json_file(json_info_dict, encodeFilename(infofn))
except (OSError, IOError):
self.report_error("Cannot write metadata to JSON file " + infofn)
return
if self.params.get("writethumbnail", False):
if info_dict.get("thumbnail") is not None:
thumb_format = determine_ext(info_dict["thumbnail"], "jpg")
thumb_filename = filename.rpartition(".")[0] + "." + thumb_format
self.to_screen(
"[%s] %s: Downloading thumbnail ..."
% (info_dict["extractor"], info_dict["id"])
)
uf = compat_urllib_request.urlopen(info_dict["thumbnail"])
with open(thumb_filename, "wb") as thumbf:
shutil.copyfileobj(uf, thumbf)
self.to_screen(
"[%s] %s: Writing thumbnail to: %s"
% (info_dict["extractor"], info_dict["id"], thumb_filename)
)
if not self.params.get("skip_download", False):
if self.params.get("nooverwrites", False) and os.path.exists(
encodeFilename(filename)
):
success = True
else:
try:
success = self.fd._do_download(filename, info_dict)
except (OSError, IOError) as err:
raise UnavailableVideoError(err)
except (
compat_urllib_error.URLError,
compat_http_client.HTTPException,
socket.error,
) as err:
self.report_error("unable to download video data: %s" % str(err))
return
except (ContentTooShortError,) as err:
self.report_error(
"content too short (expected %s bytes and served %s)"
% (err.expected, err.downloaded)
)
return
if success:
try:
self.post_process(filename, info_dict)
except PostProcessingError as err:
self.report_error("postprocessing: %s" % str(err))
return
|
def process_info(self, info_dict):
"""Process a single resolved IE result."""
assert info_dict.get("_type", "video") == "video"
# We increment the download the download count here to match the previous behaviour.
self.increment_downloads()
info_dict["fulltitle"] = info_dict["title"]
if len(info_dict["title"]) > 200:
info_dict["title"] = info_dict["title"][:197] + "..."
# Keep for backwards compatibility
info_dict["stitle"] = info_dict["title"]
if not "format" in info_dict:
info_dict["format"] = info_dict["ext"]
reason = self._match_entry(info_dict)
if reason is not None:
self.to_screen("[download] " + reason)
return
max_downloads = self.params.get("max_downloads")
if max_downloads is not None:
if self._num_downloads > int(max_downloads):
raise MaxDownloadsReached()
filename = self.prepare_filename(info_dict)
# Forced printings
if self.params.get("forcetitle", False):
compat_print(info_dict["title"])
if self.params.get("forceid", False):
compat_print(info_dict["id"])
if self.params.get("forceurl", False):
# For RTMP URLs, also include the playpath
compat_print(info_dict["url"] + info_dict.get("play_path", ""))
if self.params.get("forcethumbnail", False) and "thumbnail" in info_dict:
compat_print(info_dict["thumbnail"])
if self.params.get("forcedescription", False) and "description" in info_dict:
compat_print(info_dict["description"])
if self.params.get("forcefilename", False) and filename is not None:
compat_print(filename)
if self.params.get("forceformat", False):
compat_print(info_dict["format"])
# Do nothing else if in simulate mode
if self.params.get("simulate", False):
return
if filename is None:
return
try:
dn = os.path.dirname(encodeFilename(filename))
if dn != "" and not os.path.exists(dn):
os.makedirs(dn)
except (OSError, IOError) as err:
self.report_error("unable to create directory " + compat_str(err))
return
if self.params.get("writedescription", False):
try:
descfn = filename + ".description"
self.report_writedescription(descfn)
with io.open(encodeFilename(descfn), "w", encoding="utf-8") as descfile:
descfile.write(info_dict["description"])
except (OSError, IOError):
self.report_error("Cannot write description file " + descfn)
return
subtitles_are_requested = any(
[
self.params.get("writesubtitles", False),
self.params.get("writeautomaticsub"),
self.params.get("allsubtitles", False),
]
)
if subtitles_are_requested and "subtitles" in info_dict and info_dict["subtitles"]:
# subtitles download errors are already managed as troubles in relevant IE
# that way it will silently go on when used with unsupporting IE
subtitles = info_dict["subtitles"]
sub_format = self.params.get("subtitlesformat")
for sub_lang in subtitles.keys():
sub = subtitles[sub_lang]
if sub is None:
continue
try:
sub_filename = subtitles_filename(filename, sub_lang, sub_format)
self.report_writesubtitles(sub_filename)
with io.open(
encodeFilename(sub_filename), "w", encoding="utf-8"
) as subfile:
subfile.write(sub)
except (OSError, IOError):
self.report_error("Cannot write subtitles file " + descfn)
return
if self.params.get("writeinfojson", False):
infofn = filename + ".info.json"
self.report_writeinfojson(infofn)
try:
json_info_dict = dict(
(k, v) for k, v in info_dict.items() if not k in ["urlhandle"]
)
write_json_file(json_info_dict, encodeFilename(infofn))
except (OSError, IOError):
self.report_error("Cannot write metadata to JSON file " + infofn)
return
if self.params.get("writethumbnail", False):
if info_dict.get("thumbnail") is not None:
thumb_format = determine_ext(info_dict["thumbnail"], "jpg")
thumb_filename = filename.rpartition(".")[0] + "." + thumb_format
self.to_screen(
"[%s] %s: Downloading thumbnail ..."
% (info_dict["extractor"], info_dict["id"])
)
uf = compat_urllib_request.urlopen(info_dict["thumbnail"])
with open(thumb_filename, "wb") as thumbf:
shutil.copyfileobj(uf, thumbf)
self.to_screen(
"[%s] %s: Writing thumbnail to: %s"
% (info_dict["extractor"], info_dict["id"], thumb_filename)
)
if not self.params.get("skip_download", False):
if self.params.get("nooverwrites", False) and os.path.exists(
encodeFilename(filename)
):
success = True
else:
try:
success = self.fd._do_download(filename, info_dict)
except (OSError, IOError) as err:
raise UnavailableVideoError(err)
except (
compat_urllib_error.URLError,
compat_http_client.HTTPException,
socket.error,
) as err:
self.report_error("unable to download video data: %s" % str(err))
return
except (ContentTooShortError,) as err:
self.report_error(
"content too short (expected %s bytes and served %s)"
% (err.expected, err.downloaded)
)
return
if success:
try:
self.post_process(filename, info_dict)
except PostProcessingError as err:
self.report_error("postprocessing: %s" % str(err))
return
|
https://github.com/ytdl-org/youtube-dl/issues/1277
|
youtube-dl --write-description http://www.dailymotion.com/video/xp0tg8_hjernevask-brainwashing-in-norway-english-part-1-the-gender-equality-paradox_news
[dailymotion] xp0tg8: Downloading webpage
[dailymotion] xp0tg8: Extracting information
[dailymotion] xp0tg8: Downloading embed page
[dailymotion] Using stream_h264_hq_url
[info] Writing video description to: Hjernevask ('Brainwashing') - English - Part 1 - The Gender Equality Paradox-xp0tg8.mp4.description
Traceback (most recent call last):
File "/usr/lib/python2.7/runpy.py", line 162, in _run_module_as_main
"__main__", fname, loader, pkg_name)
File "/usr/lib/python2.7/runpy.py", line 72, in _run_code
exec code in run_globals
File "/home/phihag/projects/youtube-dl/youtube_dl/__main__.py", line 18, in <module>
youtube_dl.main()
File "/home/phihag/projects/youtube-dl/youtube_dl/__init__.py", line 639, in main
_real_main(argv)
File "/home/phihag/projects/youtube-dl/youtube_dl/__init__.py", line 623, in _real_main
retcode = ydl.download(all_urls)
File "/home/phihag/projects/youtube-dl/youtube_dl/YoutubeDL.py", line 573, in download
videos = self.extract_info(url)
File "/home/phihag/projects/youtube-dl/youtube_dl/YoutubeDL.py", line 327, in extract_info
return self.process_ie_result(ie_result, download=download)
File "/home/phihag/projects/youtube-dl/youtube_dl/YoutubeDL.py", line 410, in process_ie_result
for r in ie_result['entries']
File "/home/phihag/projects/youtube-dl/youtube_dl/YoutubeDL.py", line 357, in process_ie_result
self.process_info(ie_result)
File "/home/phihag/projects/youtube-dl/youtube_dl/YoutubeDL.py", line 481, in process_info
descfile.write(info_dict['description'])
KeyError: 'description
|
KeyError
|
async def sendfile(self, writer: asyncio.StreamWriter) -> int:
"""
Read and send the file to the writer and return the number of bytes sent
"""
if not self.is_readable():
raise OSError("blob files cannot be read")
with self.reader_context() as handle:
try:
return await self.loop.sendfile(
writer.transport, handle, count=self.get_length()
)
except (
ConnectionError,
BrokenPipeError,
RuntimeError,
OSError,
AttributeError,
):
return -1
|
async def sendfile(self, writer: asyncio.StreamWriter) -> int:
"""
Read and send the file to the writer and return the number of bytes sent
"""
if not self.is_readable():
raise OSError("blob files cannot be read")
with self.reader_context() as handle:
try:
return await self.loop.sendfile(
writer.transport, handle, count=self.get_length()
)
except (
ConnectionResetError,
BrokenPipeError,
RuntimeError,
OSError,
AttributeError,
):
return -1
|
https://github.com/lbryio/lbry-sdk/issues/2368
|
2019-08-01 18:18:06,916 INFO lbry.stream.reflector.client:119: Sent reflector blob e5828b7e
2019-08-01 18:18:29,221 INFO lbry.stream.reflector.client:119: Sent reflector blob 867d1a2c
2019-08-01 18:18:51,545 INFO lbry.stream.reflector.client:119: Sent reflector blob 6e365367
2019-08-01 18:18:58,400 INFO lbry.extras.daemon.Daemon:2330: publishing: name: xxxx params: {'title': 'xxxx', 'description': 'xxxxxx', 'bid': '0.10000000', 'languages': ['en'], 'tags': ['xxxxx'], 'thumbnail_url': 'https://spee.ch/8/xxxxxxxxx', 'license': 'All rights reserved Published With Permission', 'release_time': 1564696220, 'channel_id': 'xxxxxxxxxxxx', 'fee_currency': 'LBC', 'fee_amount': '1.00000000', 'file_path': 'C:\\Users\\David T\\Desktop\\GMCZ0549_VID_1280x720x1500kbs_HCVC549.mp4'}
2019-08-01 18:19:22,348 ERROR lbry.extras.daemon.Daemon:586: error handling api request
Traceback (most recent call last):
File "lbry\extras\daemon\Daemon.py", line 580, in _process_rpc_call
File "lbry\extras\daemon\Daemon.py", line 2342, in jsonrpc_publish
File "lbry\extras\daemon\Daemon.py", line 2659, in jsonrpc_stream_update
File "lbry\stream\stream_manager.py", line 232, in delete_stream
AttributeError: 'NoneType' object has no attribute 'stop_tasks'
2019-08-01 18:19:37,886 INFO lbry.stream.reflector.client:119: Sent reflector blob f5b10e7b
2019-08-01 18:20:00,717 INFO lbry.stream.reflector.client:119: Sent reflector blob d5ffe375
|
AttributeError
|
async def handle_request(self, request: BlobRequest):
addr = self.transport.get_extra_info("peername")
peer_address, peer_port = addr
responses = []
address_request = request.get_address_request()
if address_request:
responses.append(
BlobPaymentAddressResponse(lbrycrd_address=self.lbrycrd_address)
)
availability_request = request.get_availability_request()
if availability_request:
responses.append(
BlobAvailabilityResponse(
available_blobs=list(
set(
filter(
lambda blob_hash: blob_hash
in self.blob_manager.completed_blob_hashes,
availability_request.requested_blobs,
)
)
)
)
)
price_request = request.get_price_request()
if price_request:
responses.append(BlobPriceResponse(blob_data_payment_rate="RATE_ACCEPTED"))
download_request = request.get_blob_request()
if download_request:
blob = self.blob_manager.get_blob(download_request.requested_blob)
if blob.get_is_verified():
incoming_blob = {"blob_hash": blob.blob_hash, "length": blob.length}
responses.append(BlobDownloadResponse(incoming_blob=incoming_blob))
self.send_response(responses)
blob_hash = blob.blob_hash[:8]
log.debug("send %s to %s:%i", blob_hash, peer_address, peer_port)
self.started_transfer.set()
try:
sent = await asyncio.wait_for(
blob.sendfile(self), self.transfer_timeout, loop=self.loop
)
if sent and sent > 0:
self.blob_manager.connection_manager.sent_data(
self.peer_address_and_port, sent
)
log.info(
"sent %s (%i bytes) to %s:%i",
blob_hash,
sent,
peer_address,
peer_port,
)
else:
self.close()
log.debug(
"stopped sending %s to %s:%i",
blob_hash,
peer_address,
peer_port,
)
return
except (OSError, ValueError, asyncio.TimeoutError) as err:
if isinstance(err, asyncio.TimeoutError):
log.debug(
"timed out sending blob %s to %s", blob_hash, peer_address
)
else:
log.warning(
"could not read blob %s to send %s:%i",
blob_hash,
peer_address,
peer_port,
)
self.close()
return
finally:
self.transfer_finished.set()
else:
log.info(
"don't have %s to send %s:%i",
blob.blob_hash[:8],
peer_address,
peer_port,
)
if responses and not self.transport.is_closing():
self.send_response(responses)
|
async def handle_request(self, request: BlobRequest):
addr = self.transport.get_extra_info("peername")
peer_address, peer_port = addr
responses = []
address_request = request.get_address_request()
if address_request:
responses.append(
BlobPaymentAddressResponse(lbrycrd_address=self.lbrycrd_address)
)
availability_request = request.get_availability_request()
if availability_request:
responses.append(
BlobAvailabilityResponse(
available_blobs=list(
set(
filter(
lambda blob_hash: blob_hash
in self.blob_manager.completed_blob_hashes,
availability_request.requested_blobs,
)
)
)
)
)
price_request = request.get_price_request()
if price_request:
responses.append(BlobPriceResponse(blob_data_payment_rate="RATE_ACCEPTED"))
download_request = request.get_blob_request()
if download_request:
blob = self.blob_manager.get_blob(download_request.requested_blob)
if blob.get_is_verified():
incoming_blob = {"blob_hash": blob.blob_hash, "length": blob.length}
responses.append(BlobDownloadResponse(incoming_blob=incoming_blob))
self.send_response(responses)
blob_hash = blob.blob_hash[:8]
log.debug("send %s to %s:%i", blob_hash, peer_address, peer_port)
self.started_transfer.set()
try:
sent = await asyncio.wait_for(
blob.sendfile(self), self.transfer_timeout, loop=self.loop
)
if sent and sent > 0:
self.blob_manager.connection_manager.sent_data(
self.peer_address_and_port, sent
)
log.info(
"sent %s (%i bytes) to %s:%i",
blob_hash,
sent,
peer_address,
peer_port,
)
else:
log.debug(
"stopped sending %s to %s:%i",
blob_hash,
peer_address,
peer_port,
)
except (OSError, asyncio.TimeoutError) as err:
if isinstance(err, asyncio.TimeoutError):
log.debug(
"timed out sending blob %s to %s", blob_hash, peer_address
)
else:
log.warning(
"could not read blob %s to send %s:%i",
blob_hash,
peer_address,
peer_port,
)
self.close()
finally:
self.transfer_finished.set()
else:
log.info(
"don't have %s to send %s:%i",
blob.blob_hash[:8],
peer_address,
peer_port,
)
if responses:
self.send_response(responses)
|
https://github.com/lbryio/lbry-sdk/issues/2368
|
2019-08-01 18:18:06,916 INFO lbry.stream.reflector.client:119: Sent reflector blob e5828b7e
2019-08-01 18:18:29,221 INFO lbry.stream.reflector.client:119: Sent reflector blob 867d1a2c
2019-08-01 18:18:51,545 INFO lbry.stream.reflector.client:119: Sent reflector blob 6e365367
2019-08-01 18:18:58,400 INFO lbry.extras.daemon.Daemon:2330: publishing: name: xxxx params: {'title': 'xxxx', 'description': 'xxxxxx', 'bid': '0.10000000', 'languages': ['en'], 'tags': ['xxxxx'], 'thumbnail_url': 'https://spee.ch/8/xxxxxxxxx', 'license': 'All rights reserved Published With Permission', 'release_time': 1564696220, 'channel_id': 'xxxxxxxxxxxx', 'fee_currency': 'LBC', 'fee_amount': '1.00000000', 'file_path': 'C:\\Users\\David T\\Desktop\\GMCZ0549_VID_1280x720x1500kbs_HCVC549.mp4'}
2019-08-01 18:19:22,348 ERROR lbry.extras.daemon.Daemon:586: error handling api request
Traceback (most recent call last):
File "lbry\extras\daemon\Daemon.py", line 580, in _process_rpc_call
File "lbry\extras\daemon\Daemon.py", line 2342, in jsonrpc_publish
File "lbry\extras\daemon\Daemon.py", line 2659, in jsonrpc_stream_update
File "lbry\stream\stream_manager.py", line 232, in delete_stream
AttributeError: 'NoneType' object has no attribute 'stop_tasks'
2019-08-01 18:19:37,886 INFO lbry.stream.reflector.client:119: Sent reflector blob f5b10e7b
2019-08-01 18:20:00,717 INFO lbry.stream.reflector.client:119: Sent reflector blob d5ffe375
|
AttributeError
|
async def jsonrpc_wallet_send(
self,
amount,
addresses,
wallet_id=None,
change_account_id=None,
funding_account_ids=None,
preview=False,
):
"""
Send the same number of credits to multiple addresses using all accounts in wallet to
fund the transaction and the default account to receive any change.
Usage:
wallet_send <amount> <addresses>... [--wallet_id=<wallet_id>] [--preview]
[--change_account_id=None] [--funding_account_ids=<funding_account_ids>...]
Options:
--wallet_id=<wallet_id> : (str) restrict operation to specific wallet
--change_account_id=<wallet_id> : (str) account where change will go
--funding_account_ids=<funding_account_ids> : (str) accounts to fund the transaction
--preview : (bool) do not broadcast the transaction
Returns: {Transaction}
"""
wallet = self.wallet_manager.get_wallet_or_default(wallet_id)
assert not wallet.is_locked, "Cannot spend funds with locked wallet, unlock first."
account = wallet.get_account_or_default(change_account_id)
accounts = wallet.get_accounts_or_all(funding_account_ids)
amount = self.get_dewies_or_error("amount", amount)
if addresses and not isinstance(addresses, list):
addresses = [addresses]
outputs = []
for address in addresses:
self.valid_address_or_error(address)
outputs.append(
Output.pay_pubkey_hash(amount, self.ledger.address_to_hash160(address))
)
tx = await Transaction.create([], outputs, accounts, account)
if not preview:
await self.ledger.broadcast(tx)
self.component_manager.loop.create_task(
self.analytics_manager.send_credits_sent()
)
else:
await self.ledger.release_tx(tx)
return tx
|
async def jsonrpc_wallet_send(
self,
amount,
addresses,
wallet_id=None,
change_account_id=None,
funding_account_ids=None,
preview=False,
):
"""
Send the same number of credits to multiple addresses using all accounts in wallet to
fund the transaction and the default account to receive any change.
Usage:
wallet_send <amount> <addresses>... [--wallet_id=<wallet_id>] [--preview]
[--change_account_id=None] [--funding_account_ids=<funding_account_ids>...]
Options:
--wallet_id=<wallet_id> : (str) restrict operation to specific wallet
--change_account_id=<wallet_id> : (str) account where change will go
--funding_account_ids=<funding_account_ids> : (str) accounts to fund the transaction
--preview : (bool) do not broadcast the transaction
Returns: {Transaction}
"""
wallet = self.wallet_manager.get_wallet_or_default(wallet_id)
assert not wallet.is_locked, "Cannot spend funds with locked wallet, unlock first."
account = wallet.get_account_or_default(change_account_id)
accounts = wallet.get_accounts_or_all(funding_account_ids)
amount = self.get_dewies_or_error("amount", amount)
if addresses and not isinstance(addresses, list):
addresses = [addresses]
outputs = []
for address in addresses:
self.valid_address_or_error(address)
outputs.append(
Output.pay_pubkey_hash(amount, self.ledger.address_to_hash160(address))
)
tx = await Transaction.create([], outputs, accounts, account)
if not preview:
await self.ledger.broadcast(tx)
await self.analytics_manager.send_credits_sent()
else:
await self.ledger.release_tx(tx)
return tx
|
https://github.com/lbryio/lbry-sdk/issues/2368
|
2019-08-01 18:18:06,916 INFO lbry.stream.reflector.client:119: Sent reflector blob e5828b7e
2019-08-01 18:18:29,221 INFO lbry.stream.reflector.client:119: Sent reflector blob 867d1a2c
2019-08-01 18:18:51,545 INFO lbry.stream.reflector.client:119: Sent reflector blob 6e365367
2019-08-01 18:18:58,400 INFO lbry.extras.daemon.Daemon:2330: publishing: name: xxxx params: {'title': 'xxxx', 'description': 'xxxxxx', 'bid': '0.10000000', 'languages': ['en'], 'tags': ['xxxxx'], 'thumbnail_url': 'https://spee.ch/8/xxxxxxxxx', 'license': 'All rights reserved Published With Permission', 'release_time': 1564696220, 'channel_id': 'xxxxxxxxxxxx', 'fee_currency': 'LBC', 'fee_amount': '1.00000000', 'file_path': 'C:\\Users\\David T\\Desktop\\GMCZ0549_VID_1280x720x1500kbs_HCVC549.mp4'}
2019-08-01 18:19:22,348 ERROR lbry.extras.daemon.Daemon:586: error handling api request
Traceback (most recent call last):
File "lbry\extras\daemon\Daemon.py", line 580, in _process_rpc_call
File "lbry\extras\daemon\Daemon.py", line 2342, in jsonrpc_publish
File "lbry\extras\daemon\Daemon.py", line 2659, in jsonrpc_stream_update
File "lbry\stream\stream_manager.py", line 232, in delete_stream
AttributeError: 'NoneType' object has no attribute 'stop_tasks'
2019-08-01 18:19:37,886 INFO lbry.stream.reflector.client:119: Sent reflector blob f5b10e7b
2019-08-01 18:20:00,717 INFO lbry.stream.reflector.client:119: Sent reflector blob d5ffe375
|
AttributeError
|
async def jsonrpc_channel_create(
self,
name,
bid,
allow_duplicate_name=False,
account_id=None,
wallet_id=None,
claim_address=None,
funding_account_ids=None,
preview=False,
blocking=False,
**kwargs,
):
"""
Create a new channel by generating a channel private key and establishing an '@' prefixed claim.
Usage:
channel_create (<name> | --name=<name>) (<bid> | --bid=<bid>)
[--allow_duplicate_name=<allow_duplicate_name>]
[--title=<title>] [--description=<description>] [--email=<email>]
[--website_url=<website_url>] [--featured=<featured>...]
[--tags=<tags>...] [--languages=<languages>...] [--locations=<locations>...]
[--thumbnail_url=<thumbnail_url>] [--cover_url=<cover_url>]
[--account_id=<account_id>] [--wallet_id=<wallet_id>]
[--claim_address=<claim_address>] [--funding_account_ids=<funding_account_ids>...]
[--preview] [--blocking]
Options:
--name=<name> : (str) name of the channel prefixed with '@'
--bid=<bid> : (decimal) amount to back the claim
--allow_duplicate_name=<allow_duplicate_name> : (bool) create new channel even if one already exists with
given name. default: false.
--title=<title> : (str) title of the publication
--description=<description> : (str) description of the publication
--email=<email> : (str) email of channel owner
--website_url=<website_url> : (str) website url
--featured=<featured> : (list) claim_ids of featured content in channel
--tags=<tags> : (list) content tags
--languages=<languages> : (list) languages used by the channel,
using RFC 5646 format, eg:
for English `--languages=en`
for Spanish (Spain) `--languages=es-ES`
for Spanish (Mexican) `--languages=es-MX`
for Chinese (Simplified) `--languages=zh-Hans`
for Chinese (Traditional) `--languages=zh-Hant`
--locations=<locations> : (list) locations of the channel, consisting of 2 letter
`country` code and a `state`, `city` and a postal
`code` along with a `latitude` and `longitude`.
for JSON RPC: pass a dictionary with aforementioned
attributes as keys, eg:
...
"locations": [{'country': 'US', 'state': 'NH'}]
...
for command line: pass a colon delimited list
with values in the following order:
"COUNTRY:STATE:CITY:CODE:LATITUDE:LONGITUDE"
making sure to include colon for blank values, for
example to provide only the city:
... --locations="::Manchester"
with all values set:
... --locations="US:NH:Manchester:03101:42.990605:-71.460989"
optionally, you can just pass the "LATITUDE:LONGITUDE":
... --locations="42.990605:-71.460989"
finally, you can also pass JSON string of dictionary
on the command line as you would via JSON RPC
... --locations="{'country': 'US', 'state': 'NH'}"
--thumbnail_url=<thumbnail_url>: (str) thumbnail url
--cover_url=<cover_url> : (str) url of cover image
--account_id=<account_id> : (str) account to use for holding the transaction
--wallet_id=<wallet_id> : (str) restrict operation to specific wallet
--funding_account_ids=<funding_account_ids>: (list) ids of accounts to fund this transaction
--claim_address=<claim_address>: (str) address where the channel is sent to, if not specified
it will be determined automatically from the account
--preview : (bool) do not broadcast the transaction
--blocking : (bool) wait until transaction is in mempool
Returns: {Transaction}
"""
wallet = self.wallet_manager.get_wallet_or_default(wallet_id)
assert not wallet.is_locked, "Cannot spend funds with locked wallet, unlock first."
account = wallet.get_account_or_default(account_id)
funding_accounts = wallet.get_accounts_or_all(funding_account_ids)
self.valid_channel_name_or_error(name)
amount = self.get_dewies_or_error("bid", bid, positive_value=True)
claim_address = await self.get_receiving_address(claim_address, account)
existing_channels = await self.ledger.get_channels(
accounts=wallet.accounts, claim_name=name
)
if len(existing_channels) > 0:
if not allow_duplicate_name:
raise Exception(
f"You already have a channel under the name '{name}'. "
f"Use --allow-duplicate-name flag to override."
)
claim = Claim()
claim.channel.update(**kwargs)
tx = await Transaction.claim_create(
name, claim, amount, claim_address, funding_accounts, funding_accounts[0]
)
txo = tx.outputs[0]
txo.generate_channel_private_key()
await tx.sign(funding_accounts)
if not preview:
account.add_channel_private_key(txo.private_key)
wallet.save()
await self.broadcast_or_release(tx, blocking)
await self.storage.save_claims(
[
self._old_get_temp_claim_info(
tx, txo, claim_address, claim, name, dewies_to_lbc(amount)
)
]
)
self.component_manager.loop.create_task(
self.analytics_manager.send_new_channel()
)
else:
await account.ledger.release_tx(tx)
return tx
|
async def jsonrpc_channel_create(
self,
name,
bid,
allow_duplicate_name=False,
account_id=None,
wallet_id=None,
claim_address=None,
funding_account_ids=None,
preview=False,
blocking=False,
**kwargs,
):
"""
Create a new channel by generating a channel private key and establishing an '@' prefixed claim.
Usage:
channel_create (<name> | --name=<name>) (<bid> | --bid=<bid>)
[--allow_duplicate_name=<allow_duplicate_name>]
[--title=<title>] [--description=<description>] [--email=<email>]
[--website_url=<website_url>] [--featured=<featured>...]
[--tags=<tags>...] [--languages=<languages>...] [--locations=<locations>...]
[--thumbnail_url=<thumbnail_url>] [--cover_url=<cover_url>]
[--account_id=<account_id>] [--wallet_id=<wallet_id>]
[--claim_address=<claim_address>] [--funding_account_ids=<funding_account_ids>...]
[--preview] [--blocking]
Options:
--name=<name> : (str) name of the channel prefixed with '@'
--bid=<bid> : (decimal) amount to back the claim
--allow_duplicate_name=<allow_duplicate_name> : (bool) create new channel even if one already exists with
given name. default: false.
--title=<title> : (str) title of the publication
--description=<description> : (str) description of the publication
--email=<email> : (str) email of channel owner
--website_url=<website_url> : (str) website url
--featured=<featured> : (list) claim_ids of featured content in channel
--tags=<tags> : (list) content tags
--languages=<languages> : (list) languages used by the channel,
using RFC 5646 format, eg:
for English `--languages=en`
for Spanish (Spain) `--languages=es-ES`
for Spanish (Mexican) `--languages=es-MX`
for Chinese (Simplified) `--languages=zh-Hans`
for Chinese (Traditional) `--languages=zh-Hant`
--locations=<locations> : (list) locations of the channel, consisting of 2 letter
`country` code and a `state`, `city` and a postal
`code` along with a `latitude` and `longitude`.
for JSON RPC: pass a dictionary with aforementioned
attributes as keys, eg:
...
"locations": [{'country': 'US', 'state': 'NH'}]
...
for command line: pass a colon delimited list
with values in the following order:
"COUNTRY:STATE:CITY:CODE:LATITUDE:LONGITUDE"
making sure to include colon for blank values, for
example to provide only the city:
... --locations="::Manchester"
with all values set:
... --locations="US:NH:Manchester:03101:42.990605:-71.460989"
optionally, you can just pass the "LATITUDE:LONGITUDE":
... --locations="42.990605:-71.460989"
finally, you can also pass JSON string of dictionary
on the command line as you would via JSON RPC
... --locations="{'country': 'US', 'state': 'NH'}"
--thumbnail_url=<thumbnail_url>: (str) thumbnail url
--cover_url=<cover_url> : (str) url of cover image
--account_id=<account_id> : (str) account to use for holding the transaction
--wallet_id=<wallet_id> : (str) restrict operation to specific wallet
--funding_account_ids=<funding_account_ids>: (list) ids of accounts to fund this transaction
--claim_address=<claim_address>: (str) address where the channel is sent to, if not specified
it will be determined automatically from the account
--preview : (bool) do not broadcast the transaction
--blocking : (bool) wait until transaction is in mempool
Returns: {Transaction}
"""
wallet = self.wallet_manager.get_wallet_or_default(wallet_id)
assert not wallet.is_locked, "Cannot spend funds with locked wallet, unlock first."
account = wallet.get_account_or_default(account_id)
funding_accounts = wallet.get_accounts_or_all(funding_account_ids)
self.valid_channel_name_or_error(name)
amount = self.get_dewies_or_error("bid", bid, positive_value=True)
claim_address = await self.get_receiving_address(claim_address, account)
existing_channels = await self.ledger.get_channels(
accounts=wallet.accounts, claim_name=name
)
if len(existing_channels) > 0:
if not allow_duplicate_name:
raise Exception(
f"You already have a channel under the name '{name}'. "
f"Use --allow-duplicate-name flag to override."
)
claim = Claim()
claim.channel.update(**kwargs)
tx = await Transaction.claim_create(
name, claim, amount, claim_address, funding_accounts, funding_accounts[0]
)
txo = tx.outputs[0]
txo.generate_channel_private_key()
await tx.sign(funding_accounts)
if not preview:
account.add_channel_private_key(txo.private_key)
wallet.save()
await self.broadcast_or_release(tx, blocking)
await self.storage.save_claims(
[
self._old_get_temp_claim_info(
tx, txo, claim_address, claim, name, dewies_to_lbc(amount)
)
]
)
await self.analytics_manager.send_new_channel()
else:
await account.ledger.release_tx(tx)
return tx
|
https://github.com/lbryio/lbry-sdk/issues/2368
|
2019-08-01 18:18:06,916 INFO lbry.stream.reflector.client:119: Sent reflector blob e5828b7e
2019-08-01 18:18:29,221 INFO lbry.stream.reflector.client:119: Sent reflector blob 867d1a2c
2019-08-01 18:18:51,545 INFO lbry.stream.reflector.client:119: Sent reflector blob 6e365367
2019-08-01 18:18:58,400 INFO lbry.extras.daemon.Daemon:2330: publishing: name: xxxx params: {'title': 'xxxx', 'description': 'xxxxxx', 'bid': '0.10000000', 'languages': ['en'], 'tags': ['xxxxx'], 'thumbnail_url': 'https://spee.ch/8/xxxxxxxxx', 'license': 'All rights reserved Published With Permission', 'release_time': 1564696220, 'channel_id': 'xxxxxxxxxxxx', 'fee_currency': 'LBC', 'fee_amount': '1.00000000', 'file_path': 'C:\\Users\\David T\\Desktop\\GMCZ0549_VID_1280x720x1500kbs_HCVC549.mp4'}
2019-08-01 18:19:22,348 ERROR lbry.extras.daemon.Daemon:586: error handling api request
Traceback (most recent call last):
File "lbry\extras\daemon\Daemon.py", line 580, in _process_rpc_call
File "lbry\extras\daemon\Daemon.py", line 2342, in jsonrpc_publish
File "lbry\extras\daemon\Daemon.py", line 2659, in jsonrpc_stream_update
File "lbry\stream\stream_manager.py", line 232, in delete_stream
AttributeError: 'NoneType' object has no attribute 'stop_tasks'
2019-08-01 18:19:37,886 INFO lbry.stream.reflector.client:119: Sent reflector blob f5b10e7b
2019-08-01 18:20:00,717 INFO lbry.stream.reflector.client:119: Sent reflector blob d5ffe375
|
AttributeError
|
async def jsonrpc_channel_update(
self,
claim_id,
bid=None,
account_id=None,
wallet_id=None,
claim_address=None,
funding_account_ids=None,
new_signing_key=False,
preview=False,
blocking=False,
replace=False,
**kwargs,
):
"""
Update an existing channel claim.
Usage:
channel_update (<claim_id> | --claim_id=<claim_id>) [<bid> | --bid=<bid>]
[--title=<title>] [--description=<description>] [--email=<email>]
[--website_url=<website_url>]
[--featured=<featured>...] [--clear_featured]
[--tags=<tags>...] [--clear_tags]
[--languages=<languages>...] [--clear_languages]
[--locations=<locations>...] [--clear_locations]
[--thumbnail_url=<thumbnail_url>] [--cover_url=<cover_url>]
[--account_id=<account_id>] [--wallet_id=<wallet_id>]
[--claim_address=<claim_address>] [--new_signing_key]
[--funding_account_ids=<funding_account_ids>...]
[--preview] [--blocking] [--replace]
Options:
--claim_id=<claim_id> : (str) claim_id of the channel to update
--bid=<bid> : (decimal) amount to back the claim
--title=<title> : (str) title of the publication
--description=<description> : (str) description of the publication
--email=<email> : (str) email of channel owner
--website_url=<website_url> : (str) website url
--featured=<featured> : (list) claim_ids of featured content in channel
--clear_featured : (bool) clear existing featured content (prior to adding new ones)
--tags=<tags> : (list) add content tags
--clear_tags : (bool) clear existing tags (prior to adding new ones)
--languages=<languages> : (list) languages used by the channel,
using RFC 5646 format, eg:
for English `--languages=en`
for Spanish (Spain) `--languages=es-ES`
for Spanish (Mexican) `--languages=es-MX`
for Chinese (Simplified) `--languages=zh-Hans`
for Chinese (Traditional) `--languages=zh-Hant`
--clear_languages : (bool) clear existing languages (prior to adding new ones)
--locations=<locations> : (list) locations of the channel, consisting of 2 letter
`country` code and a `state`, `city` and a postal
`code` along with a `latitude` and `longitude`.
for JSON RPC: pass a dictionary with aforementioned
attributes as keys, eg:
...
"locations": [{'country': 'US', 'state': 'NH'}]
...
for command line: pass a colon delimited list
with values in the following order:
"COUNTRY:STATE:CITY:CODE:LATITUDE:LONGITUDE"
making sure to include colon for blank values, for
example to provide only the city:
... --locations="::Manchester"
with all values set:
... --locations="US:NH:Manchester:03101:42.990605:-71.460989"
optionally, you can just pass the "LATITUDE:LONGITUDE":
... --locations="42.990605:-71.460989"
finally, you can also pass JSON string of dictionary
on the command line as you would via JSON RPC
... --locations="{'country': 'US', 'state': 'NH'}"
--clear_locations : (bool) clear existing locations (prior to adding new ones)
--thumbnail_url=<thumbnail_url>: (str) thumbnail url
--cover_url=<cover_url> : (str) url of cover image
--account_id=<account_id> : (str) account in which to look for channel (default: all)
--wallet_id=<wallet_id> : (str) restrict operation to specific wallet
--funding_account_ids=<funding_account_ids>: (list) ids of accounts to fund this transaction
--claim_address=<claim_address>: (str) address where the channel is sent
--new_signing_key : (bool) generate a new signing key, will invalidate all previous publishes
--preview : (bool) do not broadcast the transaction
--blocking : (bool) wait until transaction is in mempool
--replace : (bool) instead of modifying specific values on
the channel, this will clear all existing values
and only save passed in values, useful for form
submissions where all values are always set
Returns: {Transaction}
"""
wallet = self.wallet_manager.get_wallet_or_default(wallet_id)
assert not wallet.is_locked, "Cannot spend funds with locked wallet, unlock first."
funding_accounts = wallet.get_accounts_or_all(funding_account_ids)
if account_id:
account = wallet.get_account_or_error(account_id)
accounts = [account]
else:
account = wallet.default_account
accounts = wallet.accounts
existing_channels = await self.ledger.get_claims(
wallet=wallet, accounts=accounts, claim_id=claim_id
)
if len(existing_channels) != 1:
account_ids = ", ".join(f"'{account.id}'" for account in accounts)
raise Exception(
f"Can't find the channel '{claim_id}' in account(s) {account_ids}."
)
old_txo = existing_channels[0]
if not old_txo.claim.is_channel:
raise Exception(
f"A claim with id '{claim_id}' was found but it is not a channel."
)
if bid is not None:
amount = self.get_dewies_or_error("bid", bid, positive_value=True)
else:
amount = old_txo.amount
if claim_address is not None:
self.valid_address_or_error(claim_address)
else:
claim_address = old_txo.get_address(account.ledger)
if replace:
claim = Claim()
claim.channel.public_key_bytes = old_txo.claim.channel.public_key_bytes
else:
claim = Claim.from_bytes(old_txo.claim.to_bytes())
claim.channel.update(**kwargs)
tx = await Transaction.claim_update(
old_txo, claim, amount, claim_address, funding_accounts, funding_accounts[0]
)
new_txo = tx.outputs[0]
if new_signing_key:
new_txo.generate_channel_private_key()
else:
new_txo.private_key = old_txo.private_key
new_txo.script.generate()
await tx.sign(funding_accounts)
if not preview:
account.add_channel_private_key(new_txo.private_key)
wallet.save()
await self.broadcast_or_release(tx, blocking)
await self.storage.save_claims(
[
self._old_get_temp_claim_info(
tx,
new_txo,
claim_address,
new_txo.claim,
new_txo.claim_name,
dewies_to_lbc(amount),
)
]
)
self.component_manager.loop.create_task(
self.analytics_manager.send_new_channel()
)
else:
await account.ledger.release_tx(tx)
return tx
|
async def jsonrpc_channel_update(
self,
claim_id,
bid=None,
account_id=None,
wallet_id=None,
claim_address=None,
funding_account_ids=None,
new_signing_key=False,
preview=False,
blocking=False,
replace=False,
**kwargs,
):
"""
Update an existing channel claim.
Usage:
channel_update (<claim_id> | --claim_id=<claim_id>) [<bid> | --bid=<bid>]
[--title=<title>] [--description=<description>] [--email=<email>]
[--website_url=<website_url>]
[--featured=<featured>...] [--clear_featured]
[--tags=<tags>...] [--clear_tags]
[--languages=<languages>...] [--clear_languages]
[--locations=<locations>...] [--clear_locations]
[--thumbnail_url=<thumbnail_url>] [--cover_url=<cover_url>]
[--account_id=<account_id>] [--wallet_id=<wallet_id>]
[--claim_address=<claim_address>] [--new_signing_key]
[--funding_account_ids=<funding_account_ids>...]
[--preview] [--blocking] [--replace]
Options:
--claim_id=<claim_id> : (str) claim_id of the channel to update
--bid=<bid> : (decimal) amount to back the claim
--title=<title> : (str) title of the publication
--description=<description> : (str) description of the publication
--email=<email> : (str) email of channel owner
--website_url=<website_url> : (str) website url
--featured=<featured> : (list) claim_ids of featured content in channel
--clear_featured : (bool) clear existing featured content (prior to adding new ones)
--tags=<tags> : (list) add content tags
--clear_tags : (bool) clear existing tags (prior to adding new ones)
--languages=<languages> : (list) languages used by the channel,
using RFC 5646 format, eg:
for English `--languages=en`
for Spanish (Spain) `--languages=es-ES`
for Spanish (Mexican) `--languages=es-MX`
for Chinese (Simplified) `--languages=zh-Hans`
for Chinese (Traditional) `--languages=zh-Hant`
--clear_languages : (bool) clear existing languages (prior to adding new ones)
--locations=<locations> : (list) locations of the channel, consisting of 2 letter
`country` code and a `state`, `city` and a postal
`code` along with a `latitude` and `longitude`.
for JSON RPC: pass a dictionary with aforementioned
attributes as keys, eg:
...
"locations": [{'country': 'US', 'state': 'NH'}]
...
for command line: pass a colon delimited list
with values in the following order:
"COUNTRY:STATE:CITY:CODE:LATITUDE:LONGITUDE"
making sure to include colon for blank values, for
example to provide only the city:
... --locations="::Manchester"
with all values set:
... --locations="US:NH:Manchester:03101:42.990605:-71.460989"
optionally, you can just pass the "LATITUDE:LONGITUDE":
... --locations="42.990605:-71.460989"
finally, you can also pass JSON string of dictionary
on the command line as you would via JSON RPC
... --locations="{'country': 'US', 'state': 'NH'}"
--clear_locations : (bool) clear existing locations (prior to adding new ones)
--thumbnail_url=<thumbnail_url>: (str) thumbnail url
--cover_url=<cover_url> : (str) url of cover image
--account_id=<account_id> : (str) account in which to look for channel (default: all)
--wallet_id=<wallet_id> : (str) restrict operation to specific wallet
--funding_account_ids=<funding_account_ids>: (list) ids of accounts to fund this transaction
--claim_address=<claim_address>: (str) address where the channel is sent
--new_signing_key : (bool) generate a new signing key, will invalidate all previous publishes
--preview : (bool) do not broadcast the transaction
--blocking : (bool) wait until transaction is in mempool
--replace : (bool) instead of modifying specific values on
the channel, this will clear all existing values
and only save passed in values, useful for form
submissions where all values are always set
Returns: {Transaction}
"""
wallet = self.wallet_manager.get_wallet_or_default(wallet_id)
assert not wallet.is_locked, "Cannot spend funds with locked wallet, unlock first."
funding_accounts = wallet.get_accounts_or_all(funding_account_ids)
if account_id:
account = wallet.get_account_or_error(account_id)
accounts = [account]
else:
account = wallet.default_account
accounts = wallet.accounts
existing_channels = await self.ledger.get_claims(
wallet=wallet, accounts=accounts, claim_id=claim_id
)
if len(existing_channels) != 1:
account_ids = ", ".join(f"'{account.id}'" for account in accounts)
raise Exception(
f"Can't find the channel '{claim_id}' in account(s) {account_ids}."
)
old_txo = existing_channels[0]
if not old_txo.claim.is_channel:
raise Exception(
f"A claim with id '{claim_id}' was found but it is not a channel."
)
if bid is not None:
amount = self.get_dewies_or_error("bid", bid, positive_value=True)
else:
amount = old_txo.amount
if claim_address is not None:
self.valid_address_or_error(claim_address)
else:
claim_address = old_txo.get_address(account.ledger)
if replace:
claim = Claim()
claim.channel.public_key_bytes = old_txo.claim.channel.public_key_bytes
else:
claim = Claim.from_bytes(old_txo.claim.to_bytes())
claim.channel.update(**kwargs)
tx = await Transaction.claim_update(
old_txo, claim, amount, claim_address, funding_accounts, funding_accounts[0]
)
new_txo = tx.outputs[0]
if new_signing_key:
new_txo.generate_channel_private_key()
else:
new_txo.private_key = old_txo.private_key
new_txo.script.generate()
await tx.sign(funding_accounts)
if not preview:
account.add_channel_private_key(new_txo.private_key)
wallet.save()
await self.broadcast_or_release(tx, blocking)
await self.storage.save_claims(
[
self._old_get_temp_claim_info(
tx,
new_txo,
claim_address,
new_txo.claim,
new_txo.claim_name,
dewies_to_lbc(amount),
)
]
)
await self.analytics_manager.send_new_channel()
else:
await account.ledger.release_tx(tx)
return tx
|
https://github.com/lbryio/lbry-sdk/issues/2368
|
2019-08-01 18:18:06,916 INFO lbry.stream.reflector.client:119: Sent reflector blob e5828b7e
2019-08-01 18:18:29,221 INFO lbry.stream.reflector.client:119: Sent reflector blob 867d1a2c
2019-08-01 18:18:51,545 INFO lbry.stream.reflector.client:119: Sent reflector blob 6e365367
2019-08-01 18:18:58,400 INFO lbry.extras.daemon.Daemon:2330: publishing: name: xxxx params: {'title': 'xxxx', 'description': 'xxxxxx', 'bid': '0.10000000', 'languages': ['en'], 'tags': ['xxxxx'], 'thumbnail_url': 'https://spee.ch/8/xxxxxxxxx', 'license': 'All rights reserved Published With Permission', 'release_time': 1564696220, 'channel_id': 'xxxxxxxxxxxx', 'fee_currency': 'LBC', 'fee_amount': '1.00000000', 'file_path': 'C:\\Users\\David T\\Desktop\\GMCZ0549_VID_1280x720x1500kbs_HCVC549.mp4'}
2019-08-01 18:19:22,348 ERROR lbry.extras.daemon.Daemon:586: error handling api request
Traceback (most recent call last):
File "lbry\extras\daemon\Daemon.py", line 580, in _process_rpc_call
File "lbry\extras\daemon\Daemon.py", line 2342, in jsonrpc_publish
File "lbry\extras\daemon\Daemon.py", line 2659, in jsonrpc_stream_update
File "lbry\stream\stream_manager.py", line 232, in delete_stream
AttributeError: 'NoneType' object has no attribute 'stop_tasks'
2019-08-01 18:19:37,886 INFO lbry.stream.reflector.client:119: Sent reflector blob f5b10e7b
2019-08-01 18:20:00,717 INFO lbry.stream.reflector.client:119: Sent reflector blob d5ffe375
|
AttributeError
|
async def jsonrpc_channel_abandon(
self,
claim_id=None,
txid=None,
nout=None,
account_id=None,
wallet_id=None,
preview=False,
blocking=True,
):
"""
Abandon one of my channel claims.
Usage:
channel_abandon [<claim_id> | --claim_id=<claim_id>]
[<txid> | --txid=<txid>] [<nout> | --nout=<nout>]
[--account_id=<account_id>] [--wallet_id=<wallet_id>]
[--preview] [--blocking]
Options:
--claim_id=<claim_id> : (str) claim_id of the claim to abandon
--txid=<txid> : (str) txid of the claim to abandon
--nout=<nout> : (int) nout of the claim to abandon
--account_id=<account_id> : (str) id of the account to use
--wallet_id=<wallet_id> : (str) restrict operation to specific wallet
--preview : (bool) do not broadcast the transaction
--blocking : (bool) wait until abandon is in mempool
Returns: {Transaction}
"""
wallet = self.wallet_manager.get_wallet_or_default(wallet_id)
assert not wallet.is_locked, "Cannot spend funds with locked wallet, unlock first."
if account_id:
account = wallet.get_account_or_error(account_id)
accounts = [account]
else:
account = wallet.default_account
accounts = wallet.accounts
if txid is not None and nout is not None:
claims = await self.ledger.get_claims(
wallet=wallet, accounts=accounts, **{"txo.txid": txid, "txo.position": nout}
)
elif claim_id is not None:
claims = await self.ledger.get_claims(
wallet=wallet, accounts=accounts, claim_id=claim_id
)
else:
raise Exception("Must specify claim_id, or txid and nout")
if not claims:
raise Exception("No claim found for the specified claim_id or txid:nout")
tx = await Transaction.create(
[Input.spend(txo) for txo in claims], [], [account], account
)
if not preview:
await self.broadcast_or_release(tx, blocking)
self.component_manager.loop.create_task(
self.analytics_manager.send_claim_action("abandon")
)
else:
await account.ledger.release_tx(tx)
return tx
|
async def jsonrpc_channel_abandon(
self,
claim_id=None,
txid=None,
nout=None,
account_id=None,
wallet_id=None,
preview=False,
blocking=True,
):
"""
Abandon one of my channel claims.
Usage:
channel_abandon [<claim_id> | --claim_id=<claim_id>]
[<txid> | --txid=<txid>] [<nout> | --nout=<nout>]
[--account_id=<account_id>] [--wallet_id=<wallet_id>]
[--preview] [--blocking]
Options:
--claim_id=<claim_id> : (str) claim_id of the claim to abandon
--txid=<txid> : (str) txid of the claim to abandon
--nout=<nout> : (int) nout of the claim to abandon
--account_id=<account_id> : (str) id of the account to use
--wallet_id=<wallet_id> : (str) restrict operation to specific wallet
--preview : (bool) do not broadcast the transaction
--blocking : (bool) wait until abandon is in mempool
Returns: {Transaction}
"""
wallet = self.wallet_manager.get_wallet_or_default(wallet_id)
assert not wallet.is_locked, "Cannot spend funds with locked wallet, unlock first."
if account_id:
account = wallet.get_account_or_error(account_id)
accounts = [account]
else:
account = wallet.default_account
accounts = wallet.accounts
if txid is not None and nout is not None:
claims = await self.ledger.get_claims(
wallet=wallet, accounts=accounts, **{"txo.txid": txid, "txo.position": nout}
)
elif claim_id is not None:
claims = await self.ledger.get_claims(
wallet=wallet, accounts=accounts, claim_id=claim_id
)
else:
raise Exception("Must specify claim_id, or txid and nout")
if not claims:
raise Exception("No claim found for the specified claim_id or txid:nout")
tx = await Transaction.create(
[Input.spend(txo) for txo in claims], [], [account], account
)
if not preview:
await self.broadcast_or_release(tx, blocking)
await self.analytics_manager.send_claim_action("abandon")
else:
await account.ledger.release_tx(tx)
return tx
|
https://github.com/lbryio/lbry-sdk/issues/2368
|
2019-08-01 18:18:06,916 INFO lbry.stream.reflector.client:119: Sent reflector blob e5828b7e
2019-08-01 18:18:29,221 INFO lbry.stream.reflector.client:119: Sent reflector blob 867d1a2c
2019-08-01 18:18:51,545 INFO lbry.stream.reflector.client:119: Sent reflector blob 6e365367
2019-08-01 18:18:58,400 INFO lbry.extras.daemon.Daemon:2330: publishing: name: xxxx params: {'title': 'xxxx', 'description': 'xxxxxx', 'bid': '0.10000000', 'languages': ['en'], 'tags': ['xxxxx'], 'thumbnail_url': 'https://spee.ch/8/xxxxxxxxx', 'license': 'All rights reserved Published With Permission', 'release_time': 1564696220, 'channel_id': 'xxxxxxxxxxxx', 'fee_currency': 'LBC', 'fee_amount': '1.00000000', 'file_path': 'C:\\Users\\David T\\Desktop\\GMCZ0549_VID_1280x720x1500kbs_HCVC549.mp4'}
2019-08-01 18:19:22,348 ERROR lbry.extras.daemon.Daemon:586: error handling api request
Traceback (most recent call last):
File "lbry\extras\daemon\Daemon.py", line 580, in _process_rpc_call
File "lbry\extras\daemon\Daemon.py", line 2342, in jsonrpc_publish
File "lbry\extras\daemon\Daemon.py", line 2659, in jsonrpc_stream_update
File "lbry\stream\stream_manager.py", line 232, in delete_stream
AttributeError: 'NoneType' object has no attribute 'stop_tasks'
2019-08-01 18:19:37,886 INFO lbry.stream.reflector.client:119: Sent reflector blob f5b10e7b
2019-08-01 18:20:00,717 INFO lbry.stream.reflector.client:119: Sent reflector blob d5ffe375
|
AttributeError
|
async def jsonrpc_stream_repost(
self,
name,
bid,
claim_id,
allow_duplicate_name=False,
channel_id=None,
channel_name=None,
channel_account_id=None,
account_id=None,
wallet_id=None,
claim_address=None,
funding_account_ids=None,
preview=False,
blocking=False,
):
"""
Creates a claim that references an existing stream by its claim id.
Usage:
stream_repost (<name> | --name=<name>) (<bid> | --bid=<bid>) (<claim_id> | --claim_id=<claim_id>)
[--allow_duplicate_name=<allow_duplicate_name>]
[--channel_id=<channel_id> | --channel_name=<channel_name>]
[--channel_account_id=<channel_account_id>...]
[--account_id=<account_id>] [--wallet_id=<wallet_id>]
[--claim_address=<claim_address>] [--funding_account_ids=<funding_account_ids>...]
[--preview] [--blocking]
Options:
--name=<name> : (str) name of the content (can only consist of a-z A-Z 0-9 and -(dash))
--bid=<bid> : (decimal) amount to back the claim
--claim_id=<claim_id> : (str) id of the claim being reposted
--allow_duplicate_name=<allow_duplicate_name> : (bool) create new claim even if one already exists with
given name. default: false.
--channel_id=<channel_id> : (str) claim id of the publisher channel
--channel_name=<channel_name> : (str) name of the publisher channel
--channel_account_id=<channel_account_id>: (str) one or more account ids for accounts to look in
for channel certificates, defaults to all accounts.
--account_id=<account_id> : (str) account to use for holding the transaction
--wallet_id=<wallet_id> : (str) restrict operation to specific wallet
--funding_account_ids=<funding_account_ids>: (list) ids of accounts to fund this transaction
--claim_address=<claim_address>: (str) address where the claim is sent to, if not specified
it will be determined automatically from the account
--preview : (bool) do not broadcast the transaction
--blocking : (bool) wait until transaction is in mempool
Returns: {Transaction}
"""
wallet = self.wallet_manager.get_wallet_or_default(wallet_id)
self.valid_stream_name_or_error(name)
account = wallet.get_account_or_default(account_id)
funding_accounts = wallet.get_accounts_or_all(funding_account_ids)
channel = await self.get_channel_or_none(
wallet, channel_account_id, channel_id, channel_name, for_signing=True
)
amount = self.get_dewies_or_error("bid", bid, positive_value=True)
claim_address = await self.get_receiving_address(claim_address, account)
claims = await account.get_claims(claim_name=name)
if len(claims) > 0:
if not allow_duplicate_name:
raise Exception(
f"You already have a stream claim published under the name '{name}'. "
f"Use --allow-duplicate-name flag to override."
)
if not VALID_FULL_CLAIM_ID.fullmatch(claim_id):
raise Exception(
"Invalid claim id. It is expected to be a 40 characters long hexadecimal string."
)
claim = Claim()
claim.repost.reference.claim_id = claim_id
tx = await Transaction.claim_create(
name,
claim,
amount,
claim_address,
funding_accounts,
funding_accounts[0],
channel,
)
new_txo = tx.outputs[0]
if channel:
new_txo.sign(channel)
await tx.sign(funding_accounts)
if not preview:
await self.broadcast_or_release(tx, blocking)
self.component_manager.loop.create_task(
self.analytics_manager.send_claim_action("publish")
)
else:
await account.ledger.release_tx(tx)
return tx
|
async def jsonrpc_stream_repost(
self,
name,
bid,
claim_id,
allow_duplicate_name=False,
channel_id=None,
channel_name=None,
channel_account_id=None,
account_id=None,
wallet_id=None,
claim_address=None,
funding_account_ids=None,
preview=False,
blocking=False,
):
"""
Creates a claim that references an existing stream by its claim id.
Usage:
stream_repost (<name> | --name=<name>) (<bid> | --bid=<bid>) (<claim_id> | --claim_id=<claim_id>)
[--allow_duplicate_name=<allow_duplicate_name>]
[--channel_id=<channel_id> | --channel_name=<channel_name>]
[--channel_account_id=<channel_account_id>...]
[--account_id=<account_id>] [--wallet_id=<wallet_id>]
[--claim_address=<claim_address>] [--funding_account_ids=<funding_account_ids>...]
[--preview] [--blocking]
Options:
--name=<name> : (str) name of the content (can only consist of a-z A-Z 0-9 and -(dash))
--bid=<bid> : (decimal) amount to back the claim
--claim_id=<claim_id> : (str) id of the claim being reposted
--allow_duplicate_name=<allow_duplicate_name> : (bool) create new claim even if one already exists with
given name. default: false.
--channel_id=<channel_id> : (str) claim id of the publisher channel
--channel_name=<channel_name> : (str) name of the publisher channel
--channel_account_id=<channel_account_id>: (str) one or more account ids for accounts to look in
for channel certificates, defaults to all accounts.
--account_id=<account_id> : (str) account to use for holding the transaction
--wallet_id=<wallet_id> : (str) restrict operation to specific wallet
--funding_account_ids=<funding_account_ids>: (list) ids of accounts to fund this transaction
--claim_address=<claim_address>: (str) address where the claim is sent to, if not specified
it will be determined automatically from the account
--preview : (bool) do not broadcast the transaction
--blocking : (bool) wait until transaction is in mempool
Returns: {Transaction}
"""
wallet = self.wallet_manager.get_wallet_or_default(wallet_id)
self.valid_stream_name_or_error(name)
account = wallet.get_account_or_default(account_id)
funding_accounts = wallet.get_accounts_or_all(funding_account_ids)
channel = await self.get_channel_or_none(
wallet, channel_account_id, channel_id, channel_name, for_signing=True
)
amount = self.get_dewies_or_error("bid", bid, positive_value=True)
claim_address = await self.get_receiving_address(claim_address, account)
claims = await account.get_claims(claim_name=name)
if len(claims) > 0:
if not allow_duplicate_name:
raise Exception(
f"You already have a stream claim published under the name '{name}'. "
f"Use --allow-duplicate-name flag to override."
)
if not VALID_FULL_CLAIM_ID.fullmatch(claim_id):
raise Exception(
"Invalid claim id. It is expected to be a 40 characters long hexadecimal string."
)
claim = Claim()
claim.repost.reference.claim_id = claim_id
tx = await Transaction.claim_create(
name,
claim,
amount,
claim_address,
funding_accounts,
funding_accounts[0],
channel,
)
new_txo = tx.outputs[0]
if channel:
new_txo.sign(channel)
await tx.sign(funding_accounts)
if not preview:
await self.broadcast_or_release(tx, blocking)
await self.analytics_manager.send_claim_action("publish")
else:
await account.ledger.release_tx(tx)
return tx
|
https://github.com/lbryio/lbry-sdk/issues/2368
|
2019-08-01 18:18:06,916 INFO lbry.stream.reflector.client:119: Sent reflector blob e5828b7e
2019-08-01 18:18:29,221 INFO lbry.stream.reflector.client:119: Sent reflector blob 867d1a2c
2019-08-01 18:18:51,545 INFO lbry.stream.reflector.client:119: Sent reflector blob 6e365367
2019-08-01 18:18:58,400 INFO lbry.extras.daemon.Daemon:2330: publishing: name: xxxx params: {'title': 'xxxx', 'description': 'xxxxxx', 'bid': '0.10000000', 'languages': ['en'], 'tags': ['xxxxx'], 'thumbnail_url': 'https://spee.ch/8/xxxxxxxxx', 'license': 'All rights reserved Published With Permission', 'release_time': 1564696220, 'channel_id': 'xxxxxxxxxxxx', 'fee_currency': 'LBC', 'fee_amount': '1.00000000', 'file_path': 'C:\\Users\\David T\\Desktop\\GMCZ0549_VID_1280x720x1500kbs_HCVC549.mp4'}
2019-08-01 18:19:22,348 ERROR lbry.extras.daemon.Daemon:586: error handling api request
Traceback (most recent call last):
File "lbry\extras\daemon\Daemon.py", line 580, in _process_rpc_call
File "lbry\extras\daemon\Daemon.py", line 2342, in jsonrpc_publish
File "lbry\extras\daemon\Daemon.py", line 2659, in jsonrpc_stream_update
File "lbry\stream\stream_manager.py", line 232, in delete_stream
AttributeError: 'NoneType' object has no attribute 'stop_tasks'
2019-08-01 18:19:37,886 INFO lbry.stream.reflector.client:119: Sent reflector blob f5b10e7b
2019-08-01 18:20:00,717 INFO lbry.stream.reflector.client:119: Sent reflector blob d5ffe375
|
AttributeError
|
async def jsonrpc_stream_create(
self,
name,
bid,
file_path,
allow_duplicate_name=False,
channel_id=None,
channel_name=None,
channel_account_id=None,
account_id=None,
wallet_id=None,
claim_address=None,
funding_account_ids=None,
preview=False,
blocking=False,
validate_file=False,
optimize_file=False,
**kwargs,
):
"""
Make a new stream claim and announce the associated file to lbrynet.
Usage:
stream_create (<name> | --name=<name>) (<bid> | --bid=<bid>) (<file_path> | --file_path=<file_path>)
[--validate_file] [--optimize_file]
[--allow_duplicate_name=<allow_duplicate_name>]
[--fee_currency=<fee_currency>] [--fee_amount=<fee_amount>] [--fee_address=<fee_address>]
[--title=<title>] [--description=<description>] [--author=<author>]
[--tags=<tags>...] [--languages=<languages>...] [--locations=<locations>...]
[--license=<license>] [--license_url=<license_url>] [--thumbnail_url=<thumbnail_url>]
[--release_time=<release_time>] [--width=<width>] [--height=<height>] [--duration=<duration>]
[--channel_id=<channel_id> | --channel_name=<channel_name>]
[--channel_account_id=<channel_account_id>...]
[--account_id=<account_id>] [--wallet_id=<wallet_id>]
[--claim_address=<claim_address>] [--funding_account_ids=<funding_account_ids>...]
[--preview] [--blocking]
Options:
--name=<name> : (str) name of the content (can only consist of a-z A-Z 0-9 and -(dash))
--bid=<bid> : (decimal) amount to back the claim
--file_path=<file_path> : (str) path to file to be associated with name.
--validate_file : (bool) validate that the video container and encodings match
common web browser support or that optimization succeeds if specified.
FFmpeg is required
--optimize_file : (bool) transcode the video & audio if necessary to ensure
common web browser support. FFmpeg is required
--allow_duplicate_name=<allow_duplicate_name> : (bool) create new claim even if one already exists with
given name. default: false.
--fee_currency=<fee_currency> : (string) specify fee currency
--fee_amount=<fee_amount> : (decimal) content download fee
--fee_address=<fee_address> : (str) address where to send fee payments, will use
value from --claim_address if not provided
--title=<title> : (str) title of the publication
--description=<description> : (str) description of the publication
--author=<author> : (str) author of the publication. The usage for this field is not
the same as for channels. The author field is used to credit an author
who is not the publisher and is not represented by the channel. For
example, a pdf file of 'The Odyssey' has an author of 'Homer' but may
by published to a channel such as '@classics', or to no channel at all
--tags=<tags> : (list) add content tags
--languages=<languages> : (list) languages used by the channel,
using RFC 5646 format, eg:
for English `--languages=en`
for Spanish (Spain) `--languages=es-ES`
for Spanish (Mexican) `--languages=es-MX`
for Chinese (Simplified) `--languages=zh-Hans`
for Chinese (Traditional) `--languages=zh-Hant`
--locations=<locations> : (list) locations relevant to the stream, consisting of 2 letter
`country` code and a `state`, `city` and a postal
`code` along with a `latitude` and `longitude`.
for JSON RPC: pass a dictionary with aforementioned
attributes as keys, eg:
...
"locations": [{'country': 'US', 'state': 'NH'}]
...
for command line: pass a colon delimited list
with values in the following order:
"COUNTRY:STATE:CITY:CODE:LATITUDE:LONGITUDE"
making sure to include colon for blank values, for
example to provide only the city:
... --locations="::Manchester"
with all values set:
... --locations="US:NH:Manchester:03101:42.990605:-71.460989"
optionally, you can just pass the "LATITUDE:LONGITUDE":
... --locations="42.990605:-71.460989"
finally, you can also pass JSON string of dictionary
on the command line as you would via JSON RPC
... --locations="{'country': 'US', 'state': 'NH'}"
--license=<license> : (str) publication license
--license_url=<license_url> : (str) publication license url
--thumbnail_url=<thumbnail_url>: (str) thumbnail url
--release_time=<release_time> : (int) original public release of content, seconds since UNIX epoch
--width=<width> : (int) image/video width, automatically calculated from media file
--height=<height> : (int) image/video height, automatically calculated from media file
--duration=<duration> : (int) audio/video duration in seconds, automatically calculated
--channel_id=<channel_id> : (str) claim id of the publisher channel
--channel_name=<channel_name> : (str) name of the publisher channel
--channel_account_id=<channel_account_id>: (str) one or more account ids for accounts to look in
for channel certificates, defaults to all accounts.
--account_id=<account_id> : (str) account to use for holding the transaction
--wallet_id=<wallet_id> : (str) restrict operation to specific wallet
--funding_account_ids=<funding_account_ids>: (list) ids of accounts to fund this transaction
--claim_address=<claim_address>: (str) address where the claim is sent to, if not specified
it will be determined automatically from the account
--preview : (bool) do not broadcast the transaction
--blocking : (bool) wait until transaction is in mempool
Returns: {Transaction}
"""
wallet = self.wallet_manager.get_wallet_or_default(wallet_id)
assert not wallet.is_locked, "Cannot spend funds with locked wallet, unlock first."
self.valid_stream_name_or_error(name)
account = wallet.get_account_or_default(account_id)
funding_accounts = wallet.get_accounts_or_all(funding_account_ids)
channel = await self.get_channel_or_none(
wallet, channel_account_id, channel_id, channel_name, for_signing=True
)
amount = self.get_dewies_or_error("bid", bid, positive_value=True)
claim_address = await self.get_receiving_address(claim_address, account)
kwargs["fee_address"] = self.get_fee_address(kwargs, claim_address)
claims = await account.get_claims(claim_name=name)
if len(claims) > 0:
if not allow_duplicate_name:
raise Exception(
f"You already have a stream claim published under the name '{name}'. "
f"Use --allow-duplicate-name flag to override."
)
file_path = await self._video_file_analyzer.verify_or_repair(
validate_file, optimize_file, file_path
)
claim = Claim()
claim.stream.update(file_path=file_path, sd_hash="0" * 96, **kwargs)
tx = await Transaction.claim_create(
name,
claim,
amount,
claim_address,
funding_accounts,
funding_accounts[0],
channel,
)
new_txo = tx.outputs[0]
file_stream = None
if not preview:
file_stream = await self.stream_manager.create_stream(file_path)
claim.stream.source.sd_hash = file_stream.sd_hash
new_txo.script.generate()
if channel:
new_txo.sign(channel)
await tx.sign(funding_accounts)
if not preview:
await self.broadcast_or_release(tx, blocking)
await self.storage.save_claims(
[
self._old_get_temp_claim_info(
tx, new_txo, claim_address, claim, name, dewies_to_lbc(amount)
)
]
)
await self.storage.save_content_claim(file_stream.stream_hash, new_txo.id)
self.component_manager.loop.create_task(
self.analytics_manager.send_claim_action("publish")
)
else:
await account.ledger.release_tx(tx)
return tx
|
async def jsonrpc_stream_create(
self,
name,
bid,
file_path,
allow_duplicate_name=False,
channel_id=None,
channel_name=None,
channel_account_id=None,
account_id=None,
wallet_id=None,
claim_address=None,
funding_account_ids=None,
preview=False,
blocking=False,
validate_file=False,
optimize_file=False,
**kwargs,
):
"""
Make a new stream claim and announce the associated file to lbrynet.
Usage:
stream_create (<name> | --name=<name>) (<bid> | --bid=<bid>) (<file_path> | --file_path=<file_path>)
[--validate_file] [--optimize_file]
[--allow_duplicate_name=<allow_duplicate_name>]
[--fee_currency=<fee_currency>] [--fee_amount=<fee_amount>] [--fee_address=<fee_address>]
[--title=<title>] [--description=<description>] [--author=<author>]
[--tags=<tags>...] [--languages=<languages>...] [--locations=<locations>...]
[--license=<license>] [--license_url=<license_url>] [--thumbnail_url=<thumbnail_url>]
[--release_time=<release_time>] [--width=<width>] [--height=<height>] [--duration=<duration>]
[--channel_id=<channel_id> | --channel_name=<channel_name>]
[--channel_account_id=<channel_account_id>...]
[--account_id=<account_id>] [--wallet_id=<wallet_id>]
[--claim_address=<claim_address>] [--funding_account_ids=<funding_account_ids>...]
[--preview] [--blocking]
Options:
--name=<name> : (str) name of the content (can only consist of a-z A-Z 0-9 and -(dash))
--bid=<bid> : (decimal) amount to back the claim
--file_path=<file_path> : (str) path to file to be associated with name.
--validate_file : (bool) validate that the video container and encodings match
common web browser support or that optimization succeeds if specified.
FFmpeg is required
--optimize_file : (bool) transcode the video & audio if necessary to ensure
common web browser support. FFmpeg is required
--allow_duplicate_name=<allow_duplicate_name> : (bool) create new claim even if one already exists with
given name. default: false.
--fee_currency=<fee_currency> : (string) specify fee currency
--fee_amount=<fee_amount> : (decimal) content download fee
--fee_address=<fee_address> : (str) address where to send fee payments, will use
value from --claim_address if not provided
--title=<title> : (str) title of the publication
--description=<description> : (str) description of the publication
--author=<author> : (str) author of the publication. The usage for this field is not
the same as for channels. The author field is used to credit an author
who is not the publisher and is not represented by the channel. For
example, a pdf file of 'The Odyssey' has an author of 'Homer' but may
by published to a channel such as '@classics', or to no channel at all
--tags=<tags> : (list) add content tags
--languages=<languages> : (list) languages used by the channel,
using RFC 5646 format, eg:
for English `--languages=en`
for Spanish (Spain) `--languages=es-ES`
for Spanish (Mexican) `--languages=es-MX`
for Chinese (Simplified) `--languages=zh-Hans`
for Chinese (Traditional) `--languages=zh-Hant`
--locations=<locations> : (list) locations relevant to the stream, consisting of 2 letter
`country` code and a `state`, `city` and a postal
`code` along with a `latitude` and `longitude`.
for JSON RPC: pass a dictionary with aforementioned
attributes as keys, eg:
...
"locations": [{'country': 'US', 'state': 'NH'}]
...
for command line: pass a colon delimited list
with values in the following order:
"COUNTRY:STATE:CITY:CODE:LATITUDE:LONGITUDE"
making sure to include colon for blank values, for
example to provide only the city:
... --locations="::Manchester"
with all values set:
... --locations="US:NH:Manchester:03101:42.990605:-71.460989"
optionally, you can just pass the "LATITUDE:LONGITUDE":
... --locations="42.990605:-71.460989"
finally, you can also pass JSON string of dictionary
on the command line as you would via JSON RPC
... --locations="{'country': 'US', 'state': 'NH'}"
--license=<license> : (str) publication license
--license_url=<license_url> : (str) publication license url
--thumbnail_url=<thumbnail_url>: (str) thumbnail url
--release_time=<release_time> : (int) original public release of content, seconds since UNIX epoch
--width=<width> : (int) image/video width, automatically calculated from media file
--height=<height> : (int) image/video height, automatically calculated from media file
--duration=<duration> : (int) audio/video duration in seconds, automatically calculated
--channel_id=<channel_id> : (str) claim id of the publisher channel
--channel_name=<channel_name> : (str) name of the publisher channel
--channel_account_id=<channel_account_id>: (str) one or more account ids for accounts to look in
for channel certificates, defaults to all accounts.
--account_id=<account_id> : (str) account to use for holding the transaction
--wallet_id=<wallet_id> : (str) restrict operation to specific wallet
--funding_account_ids=<funding_account_ids>: (list) ids of accounts to fund this transaction
--claim_address=<claim_address>: (str) address where the claim is sent to, if not specified
it will be determined automatically from the account
--preview : (bool) do not broadcast the transaction
--blocking : (bool) wait until transaction is in mempool
Returns: {Transaction}
"""
wallet = self.wallet_manager.get_wallet_or_default(wallet_id)
assert not wallet.is_locked, "Cannot spend funds with locked wallet, unlock first."
self.valid_stream_name_or_error(name)
account = wallet.get_account_or_default(account_id)
funding_accounts = wallet.get_accounts_or_all(funding_account_ids)
channel = await self.get_channel_or_none(
wallet, channel_account_id, channel_id, channel_name, for_signing=True
)
amount = self.get_dewies_or_error("bid", bid, positive_value=True)
claim_address = await self.get_receiving_address(claim_address, account)
kwargs["fee_address"] = self.get_fee_address(kwargs, claim_address)
claims = await account.get_claims(claim_name=name)
if len(claims) > 0:
if not allow_duplicate_name:
raise Exception(
f"You already have a stream claim published under the name '{name}'. "
f"Use --allow-duplicate-name flag to override."
)
file_path = await self._video_file_analyzer.verify_or_repair(
validate_file, optimize_file, file_path
)
claim = Claim()
claim.stream.update(file_path=file_path, sd_hash="0" * 96, **kwargs)
tx = await Transaction.claim_create(
name,
claim,
amount,
claim_address,
funding_accounts,
funding_accounts[0],
channel,
)
new_txo = tx.outputs[0]
file_stream = None
if not preview:
file_stream = await self.stream_manager.create_stream(file_path)
claim.stream.source.sd_hash = file_stream.sd_hash
new_txo.script.generate()
if channel:
new_txo.sign(channel)
await tx.sign(funding_accounts)
if not preview:
await self.broadcast_or_release(tx, blocking)
await self.storage.save_claims(
[
self._old_get_temp_claim_info(
tx, new_txo, claim_address, claim, name, dewies_to_lbc(amount)
)
]
)
await self.storage.save_content_claim(file_stream.stream_hash, new_txo.id)
await self.analytics_manager.send_claim_action("publish")
else:
await account.ledger.release_tx(tx)
return tx
|
https://github.com/lbryio/lbry-sdk/issues/2368
|
2019-08-01 18:18:06,916 INFO lbry.stream.reflector.client:119: Sent reflector blob e5828b7e
2019-08-01 18:18:29,221 INFO lbry.stream.reflector.client:119: Sent reflector blob 867d1a2c
2019-08-01 18:18:51,545 INFO lbry.stream.reflector.client:119: Sent reflector blob 6e365367
2019-08-01 18:18:58,400 INFO lbry.extras.daemon.Daemon:2330: publishing: name: xxxx params: {'title': 'xxxx', 'description': 'xxxxxx', 'bid': '0.10000000', 'languages': ['en'], 'tags': ['xxxxx'], 'thumbnail_url': 'https://spee.ch/8/xxxxxxxxx', 'license': 'All rights reserved Published With Permission', 'release_time': 1564696220, 'channel_id': 'xxxxxxxxxxxx', 'fee_currency': 'LBC', 'fee_amount': '1.00000000', 'file_path': 'C:\\Users\\David T\\Desktop\\GMCZ0549_VID_1280x720x1500kbs_HCVC549.mp4'}
2019-08-01 18:19:22,348 ERROR lbry.extras.daemon.Daemon:586: error handling api request
Traceback (most recent call last):
File "lbry\extras\daemon\Daemon.py", line 580, in _process_rpc_call
File "lbry\extras\daemon\Daemon.py", line 2342, in jsonrpc_publish
File "lbry\extras\daemon\Daemon.py", line 2659, in jsonrpc_stream_update
File "lbry\stream\stream_manager.py", line 232, in delete_stream
AttributeError: 'NoneType' object has no attribute 'stop_tasks'
2019-08-01 18:19:37,886 INFO lbry.stream.reflector.client:119: Sent reflector blob f5b10e7b
2019-08-01 18:20:00,717 INFO lbry.stream.reflector.client:119: Sent reflector blob d5ffe375
|
AttributeError
|
async def jsonrpc_stream_update(
self,
claim_id,
bid=None,
file_path=None,
channel_id=None,
channel_name=None,
channel_account_id=None,
clear_channel=False,
account_id=None,
wallet_id=None,
claim_address=None,
funding_account_ids=None,
preview=False,
blocking=False,
replace=False,
**kwargs,
):
"""
Update an existing stream claim and if a new file is provided announce it to lbrynet.
Usage:
stream_update (<claim_id> | --claim_id=<claim_id>) [--bid=<bid>] [--file_path=<file_path>]
[--file_name=<file_name>] [--file_size=<file_size>] [--file_hash=<file_hash>]
[--fee_currency=<fee_currency>] [--fee_amount=<fee_amount>]
[--fee_address=<fee_address>] [--clear_fee]
[--title=<title>] [--description=<description>] [--author=<author>]
[--tags=<tags>...] [--clear_tags]
[--languages=<languages>...] [--clear_languages]
[--locations=<locations>...] [--clear_locations]
[--license=<license>] [--license_url=<license_url>] [--thumbnail_url=<thumbnail_url>]
[--release_time=<release_time>] [--width=<width>] [--height=<height>] [--duration=<duration>]
[--channel_id=<channel_id> | --channel_name=<channel_name> | --clear_channel]
[--channel_account_id=<channel_account_id>...]
[--account_id=<account_id>] [--wallet_id=<wallet_id>]
[--claim_address=<claim_address>] [--funding_account_ids=<funding_account_ids>...]
[--preview] [--blocking] [--replace]
Options:
--claim_id=<claim_id> : (str) id of the stream claim to update
--bid=<bid> : (decimal) amount to back the claim
--file_path=<file_path> : (str) path to file to be associated with name.
--file_name=<file_name> : (str) override file name, defaults to name from file_path.
--file_size=<file_size> : (str) override file size, otherwise automatically computed.
--file_hash=<file_hash> : (str) override file hash, otherwise automatically computed.
--fee_currency=<fee_currency> : (string) specify fee currency
--fee_amount=<fee_amount> : (decimal) content download fee
--fee_address=<fee_address> : (str) address where to send fee payments, will use
value from --claim_address if not provided
--clear_fee : (bool) clear previously set fee
--title=<title> : (str) title of the publication
--description=<description> : (str) description of the publication
--author=<author> : (str) author of the publication. The usage for this field is not
the same as for channels. The author field is used to credit an author
who is not the publisher and is not represented by the channel. For
example, a pdf file of 'The Odyssey' has an author of 'Homer' but may
by published to a channel such as '@classics', or to no channel at all
--tags=<tags> : (list) add content tags
--clear_tags : (bool) clear existing tags (prior to adding new ones)
--languages=<languages> : (list) languages used by the channel,
using RFC 5646 format, eg:
for English `--languages=en`
for Spanish (Spain) `--languages=es-ES`
for Spanish (Mexican) `--languages=es-MX`
for Chinese (Simplified) `--languages=zh-Hans`
for Chinese (Traditional) `--languages=zh-Hant`
--clear_languages : (bool) clear existing languages (prior to adding new ones)
--locations=<locations> : (list) locations relevant to the stream, consisting of 2 letter
`country` code and a `state`, `city` and a postal
`code` along with a `latitude` and `longitude`.
for JSON RPC: pass a dictionary with aforementioned
attributes as keys, eg:
...
"locations": [{'country': 'US', 'state': 'NH'}]
...
for command line: pass a colon delimited list
with values in the following order:
"COUNTRY:STATE:CITY:CODE:LATITUDE:LONGITUDE"
making sure to include colon for blank values, for
example to provide only the city:
... --locations="::Manchester"
with all values set:
... --locations="US:NH:Manchester:03101:42.990605:-71.460989"
optionally, you can just pass the "LATITUDE:LONGITUDE":
... --locations="42.990605:-71.460989"
finally, you can also pass JSON string of dictionary
on the command line as you would via JSON RPC
... --locations="{'country': 'US', 'state': 'NH'}"
--clear_locations : (bool) clear existing locations (prior to adding new ones)
--license=<license> : (str) publication license
--license_url=<license_url> : (str) publication license url
--thumbnail_url=<thumbnail_url>: (str) thumbnail url
--release_time=<release_time> : (int) original public release of content, seconds since UNIX epoch
--width=<width> : (int) image/video width, automatically calculated from media file
--height=<height> : (int) image/video height, automatically calculated from media file
--duration=<duration> : (int) audio/video duration in seconds, automatically calculated
--channel_id=<channel_id> : (str) claim id of the publisher channel
--channel_name=<channel_name> : (str) name of the publisher channel
--clear_channel : (bool) remove channel signature
--channel_account_id=<channel_account_id>: (str) one or more account ids for accounts to look in
for channel certificates, defaults to all accounts.
--account_id=<account_id> : (str) account in which to look for stream (default: all)
--wallet_id=<wallet_id> : (str) restrict operation to specific wallet
--funding_account_ids=<funding_account_ids>: (list) ids of accounts to fund this transaction
--claim_address=<claim_address>: (str) address where the claim is sent to, if not specified
it will be determined automatically from the account
--preview : (bool) do not broadcast the transaction
--blocking : (bool) wait until transaction is in mempool
--replace : (bool) instead of modifying specific values on
the stream, this will clear all existing values
and only save passed in values, useful for form
submissions where all values are always set
Returns: {Transaction}
"""
wallet = self.wallet_manager.get_wallet_or_default(wallet_id)
assert not wallet.is_locked, "Cannot spend funds with locked wallet, unlock first."
funding_accounts = wallet.get_accounts_or_all(funding_account_ids)
if account_id:
account = wallet.get_account_or_error(account_id)
accounts = [account]
else:
account = wallet.default_account
accounts = wallet.accounts
existing_claims = await self.ledger.get_claims(
wallet=wallet, accounts=accounts, claim_id=claim_id
)
if len(existing_claims) != 1:
account_ids = ", ".join(f"'{account.id}'" for account in accounts)
raise Exception(
f"Can't find the stream '{claim_id}' in account(s) {account_ids}."
)
old_txo = existing_claims[0]
if not old_txo.claim.is_stream:
raise Exception(
f"A claim with id '{claim_id}' was found but it is not a stream claim."
)
if bid is not None:
amount = self.get_dewies_or_error("bid", bid, positive_value=True)
else:
amount = old_txo.amount
if claim_address is not None:
self.valid_address_or_error(claim_address)
else:
claim_address = old_txo.get_address(account.ledger)
channel = None
if channel_id or channel_name:
channel = await self.get_channel_or_error(
wallet, channel_account_id, channel_id, channel_name, for_signing=True
)
elif old_txo.claim.is_signed and not clear_channel and not replace:
channel = old_txo.channel
fee_address = self.get_fee_address(kwargs, claim_address)
if fee_address:
kwargs["fee_address"] = fee_address
if replace:
claim = Claim()
claim.stream.message.source.CopyFrom(old_txo.claim.stream.message.source)
stream_type = old_txo.claim.stream.stream_type
if stream_type:
old_stream_type = getattr(old_txo.claim.stream.message, stream_type)
new_stream_type = getattr(claim.stream.message, stream_type)
new_stream_type.CopyFrom(old_stream_type)
claim.stream.update(file_path=file_path, **kwargs)
else:
claim = Claim.from_bytes(old_txo.claim.to_bytes())
claim.stream.update(file_path=file_path, **kwargs)
tx = await Transaction.claim_update(
old_txo,
claim,
amount,
claim_address,
funding_accounts,
funding_accounts[0],
channel,
)
new_txo = tx.outputs[0]
stream_hash = None
if not preview:
old_stream = self.stream_manager.streams.get(
old_txo.claim.stream.source.sd_hash, None
)
if file_path is not None:
if old_stream:
await self.stream_manager.delete_stream(old_stream, delete_file=False)
file_stream = await self.stream_manager.create_stream(file_path)
new_txo.claim.stream.source.sd_hash = file_stream.sd_hash
new_txo.script.generate()
stream_hash = file_stream.stream_hash
elif old_stream:
stream_hash = old_stream.stream_hash
if channel:
new_txo.sign(channel)
await tx.sign(funding_accounts)
if not preview:
await self.broadcast_or_release(tx, blocking)
await self.storage.save_claims(
[
self._old_get_temp_claim_info(
tx,
new_txo,
claim_address,
new_txo.claim,
new_txo.claim_name,
dewies_to_lbc(amount),
)
]
)
if stream_hash:
await self.storage.save_content_claim(stream_hash, new_txo.id)
self.component_manager.loop.create_task(
self.analytics_manager.send_claim_action("publish")
)
else:
await account.ledger.release_tx(tx)
return tx
|
async def jsonrpc_stream_update(
self,
claim_id,
bid=None,
file_path=None,
channel_id=None,
channel_name=None,
channel_account_id=None,
clear_channel=False,
account_id=None,
wallet_id=None,
claim_address=None,
funding_account_ids=None,
preview=False,
blocking=False,
replace=False,
**kwargs,
):
"""
Update an existing stream claim and if a new file is provided announce it to lbrynet.
Usage:
stream_update (<claim_id> | --claim_id=<claim_id>) [--bid=<bid>] [--file_path=<file_path>]
[--file_name=<file_name>] [--file_size=<file_size>] [--file_hash=<file_hash>]
[--fee_currency=<fee_currency>] [--fee_amount=<fee_amount>]
[--fee_address=<fee_address>] [--clear_fee]
[--title=<title>] [--description=<description>] [--author=<author>]
[--tags=<tags>...] [--clear_tags]
[--languages=<languages>...] [--clear_languages]
[--locations=<locations>...] [--clear_locations]
[--license=<license>] [--license_url=<license_url>] [--thumbnail_url=<thumbnail_url>]
[--release_time=<release_time>] [--width=<width>] [--height=<height>] [--duration=<duration>]
[--channel_id=<channel_id> | --channel_name=<channel_name> | --clear_channel]
[--channel_account_id=<channel_account_id>...]
[--account_id=<account_id>] [--wallet_id=<wallet_id>]
[--claim_address=<claim_address>] [--funding_account_ids=<funding_account_ids>...]
[--preview] [--blocking] [--replace]
Options:
--claim_id=<claim_id> : (str) id of the stream claim to update
--bid=<bid> : (decimal) amount to back the claim
--file_path=<file_path> : (str) path to file to be associated with name.
--file_name=<file_name> : (str) override file name, defaults to name from file_path.
--file_size=<file_size> : (str) override file size, otherwise automatically computed.
--file_hash=<file_hash> : (str) override file hash, otherwise automatically computed.
--fee_currency=<fee_currency> : (string) specify fee currency
--fee_amount=<fee_amount> : (decimal) content download fee
--fee_address=<fee_address> : (str) address where to send fee payments, will use
value from --claim_address if not provided
--clear_fee : (bool) clear previously set fee
--title=<title> : (str) title of the publication
--description=<description> : (str) description of the publication
--author=<author> : (str) author of the publication. The usage for this field is not
the same as for channels. The author field is used to credit an author
who is not the publisher and is not represented by the channel. For
example, a pdf file of 'The Odyssey' has an author of 'Homer' but may
by published to a channel such as '@classics', or to no channel at all
--tags=<tags> : (list) add content tags
--clear_tags : (bool) clear existing tags (prior to adding new ones)
--languages=<languages> : (list) languages used by the channel,
using RFC 5646 format, eg:
for English `--languages=en`
for Spanish (Spain) `--languages=es-ES`
for Spanish (Mexican) `--languages=es-MX`
for Chinese (Simplified) `--languages=zh-Hans`
for Chinese (Traditional) `--languages=zh-Hant`
--clear_languages : (bool) clear existing languages (prior to adding new ones)
--locations=<locations> : (list) locations relevant to the stream, consisting of 2 letter
`country` code and a `state`, `city` and a postal
`code` along with a `latitude` and `longitude`.
for JSON RPC: pass a dictionary with aforementioned
attributes as keys, eg:
...
"locations": [{'country': 'US', 'state': 'NH'}]
...
for command line: pass a colon delimited list
with values in the following order:
"COUNTRY:STATE:CITY:CODE:LATITUDE:LONGITUDE"
making sure to include colon for blank values, for
example to provide only the city:
... --locations="::Manchester"
with all values set:
... --locations="US:NH:Manchester:03101:42.990605:-71.460989"
optionally, you can just pass the "LATITUDE:LONGITUDE":
... --locations="42.990605:-71.460989"
finally, you can also pass JSON string of dictionary
on the command line as you would via JSON RPC
... --locations="{'country': 'US', 'state': 'NH'}"
--clear_locations : (bool) clear existing locations (prior to adding new ones)
--license=<license> : (str) publication license
--license_url=<license_url> : (str) publication license url
--thumbnail_url=<thumbnail_url>: (str) thumbnail url
--release_time=<release_time> : (int) original public release of content, seconds since UNIX epoch
--width=<width> : (int) image/video width, automatically calculated from media file
--height=<height> : (int) image/video height, automatically calculated from media file
--duration=<duration> : (int) audio/video duration in seconds, automatically calculated
--channel_id=<channel_id> : (str) claim id of the publisher channel
--channel_name=<channel_name> : (str) name of the publisher channel
--clear_channel : (bool) remove channel signature
--channel_account_id=<channel_account_id>: (str) one or more account ids for accounts to look in
for channel certificates, defaults to all accounts.
--account_id=<account_id> : (str) account in which to look for stream (default: all)
--wallet_id=<wallet_id> : (str) restrict operation to specific wallet
--funding_account_ids=<funding_account_ids>: (list) ids of accounts to fund this transaction
--claim_address=<claim_address>: (str) address where the claim is sent to, if not specified
it will be determined automatically from the account
--preview : (bool) do not broadcast the transaction
--blocking : (bool) wait until transaction is in mempool
--replace : (bool) instead of modifying specific values on
the stream, this will clear all existing values
and only save passed in values, useful for form
submissions where all values are always set
Returns: {Transaction}
"""
wallet = self.wallet_manager.get_wallet_or_default(wallet_id)
assert not wallet.is_locked, "Cannot spend funds with locked wallet, unlock first."
funding_accounts = wallet.get_accounts_or_all(funding_account_ids)
if account_id:
account = wallet.get_account_or_error(account_id)
accounts = [account]
else:
account = wallet.default_account
accounts = wallet.accounts
existing_claims = await self.ledger.get_claims(
wallet=wallet, accounts=accounts, claim_id=claim_id
)
if len(existing_claims) != 1:
account_ids = ", ".join(f"'{account.id}'" for account in accounts)
raise Exception(
f"Can't find the stream '{claim_id}' in account(s) {account_ids}."
)
old_txo = existing_claims[0]
if not old_txo.claim.is_stream:
raise Exception(
f"A claim with id '{claim_id}' was found but it is not a stream claim."
)
if bid is not None:
amount = self.get_dewies_or_error("bid", bid, positive_value=True)
else:
amount = old_txo.amount
if claim_address is not None:
self.valid_address_or_error(claim_address)
else:
claim_address = old_txo.get_address(account.ledger)
channel = None
if channel_id or channel_name:
channel = await self.get_channel_or_error(
wallet, channel_account_id, channel_id, channel_name, for_signing=True
)
elif old_txo.claim.is_signed and not clear_channel and not replace:
channel = old_txo.channel
fee_address = self.get_fee_address(kwargs, claim_address)
if fee_address:
kwargs["fee_address"] = fee_address
if replace:
claim = Claim()
claim.stream.message.source.CopyFrom(old_txo.claim.stream.message.source)
stream_type = old_txo.claim.stream.stream_type
if stream_type:
old_stream_type = getattr(old_txo.claim.stream.message, stream_type)
new_stream_type = getattr(claim.stream.message, stream_type)
new_stream_type.CopyFrom(old_stream_type)
claim.stream.update(file_path=file_path, **kwargs)
else:
claim = Claim.from_bytes(old_txo.claim.to_bytes())
claim.stream.update(file_path=file_path, **kwargs)
tx = await Transaction.claim_update(
old_txo,
claim,
amount,
claim_address,
funding_accounts,
funding_accounts[0],
channel,
)
new_txo = tx.outputs[0]
stream_hash = None
if not preview:
old_stream_hash = await self.storage.get_stream_hash_for_sd_hash(
old_txo.claim.stream.source.sd_hash
)
if file_path is not None:
if old_stream_hash:
stream_to_delete = self.stream_manager.get_stream_by_stream_hash(
old_stream_hash
)
await self.stream_manager.delete_stream(
stream_to_delete, delete_file=False
)
file_stream = await self.stream_manager.create_stream(file_path)
new_txo.claim.stream.source.sd_hash = file_stream.sd_hash
new_txo.script.generate()
stream_hash = file_stream.stream_hash
else:
stream_hash = old_stream_hash
if channel:
new_txo.sign(channel)
await tx.sign(funding_accounts)
if not preview:
await self.broadcast_or_release(tx, blocking)
await self.storage.save_claims(
[
self._old_get_temp_claim_info(
tx,
new_txo,
claim_address,
new_txo.claim,
new_txo.claim_name,
dewies_to_lbc(amount),
)
]
)
if stream_hash:
await self.storage.save_content_claim(stream_hash, new_txo.id)
await self.analytics_manager.send_claim_action("publish")
else:
await account.ledger.release_tx(tx)
return tx
|
https://github.com/lbryio/lbry-sdk/issues/2368
|
2019-08-01 18:18:06,916 INFO lbry.stream.reflector.client:119: Sent reflector blob e5828b7e
2019-08-01 18:18:29,221 INFO lbry.stream.reflector.client:119: Sent reflector blob 867d1a2c
2019-08-01 18:18:51,545 INFO lbry.stream.reflector.client:119: Sent reflector blob 6e365367
2019-08-01 18:18:58,400 INFO lbry.extras.daemon.Daemon:2330: publishing: name: xxxx params: {'title': 'xxxx', 'description': 'xxxxxx', 'bid': '0.10000000', 'languages': ['en'], 'tags': ['xxxxx'], 'thumbnail_url': 'https://spee.ch/8/xxxxxxxxx', 'license': 'All rights reserved Published With Permission', 'release_time': 1564696220, 'channel_id': 'xxxxxxxxxxxx', 'fee_currency': 'LBC', 'fee_amount': '1.00000000', 'file_path': 'C:\\Users\\David T\\Desktop\\GMCZ0549_VID_1280x720x1500kbs_HCVC549.mp4'}
2019-08-01 18:19:22,348 ERROR lbry.extras.daemon.Daemon:586: error handling api request
Traceback (most recent call last):
File "lbry\extras\daemon\Daemon.py", line 580, in _process_rpc_call
File "lbry\extras\daemon\Daemon.py", line 2342, in jsonrpc_publish
File "lbry\extras\daemon\Daemon.py", line 2659, in jsonrpc_stream_update
File "lbry\stream\stream_manager.py", line 232, in delete_stream
AttributeError: 'NoneType' object has no attribute 'stop_tasks'
2019-08-01 18:19:37,886 INFO lbry.stream.reflector.client:119: Sent reflector blob f5b10e7b
2019-08-01 18:20:00,717 INFO lbry.stream.reflector.client:119: Sent reflector blob d5ffe375
|
AttributeError
|
async def jsonrpc_stream_abandon(
self,
claim_id=None,
txid=None,
nout=None,
account_id=None,
wallet_id=None,
preview=False,
blocking=False,
):
"""
Abandon one of my stream claims.
Usage:
stream_abandon [<claim_id> | --claim_id=<claim_id>]
[<txid> | --txid=<txid>] [<nout> | --nout=<nout>]
[--account_id=<account_id>] [--wallet_id=<wallet_id>]
[--preview] [--blocking]
Options:
--claim_id=<claim_id> : (str) claim_id of the claim to abandon
--txid=<txid> : (str) txid of the claim to abandon
--nout=<nout> : (int) nout of the claim to abandon
--account_id=<account_id> : (str) id of the account to use
--wallet_id=<wallet_id> : (str) restrict operation to specific wallet
--preview : (bool) do not broadcast the transaction
--blocking : (bool) wait until abandon is in mempool
Returns: {Transaction}
"""
wallet = self.wallet_manager.get_wallet_or_default(wallet_id)
assert not wallet.is_locked, "Cannot spend funds with locked wallet, unlock first."
if account_id:
account = wallet.get_account_or_error(account_id)
accounts = [account]
else:
account = wallet.default_account
accounts = wallet.accounts
if txid is not None and nout is not None:
claims = await self.ledger.get_claims(
wallet=wallet, accounts=accounts, **{"txo.txid": txid, "txo.position": nout}
)
elif claim_id is not None:
claims = await self.ledger.get_claims(
wallet=wallet, accounts=accounts, claim_id=claim_id
)
else:
raise Exception("Must specify claim_id, or txid and nout")
if not claims:
raise Exception("No claim found for the specified claim_id or txid:nout")
tx = await Transaction.create(
[Input.spend(txo) for txo in claims], [], accounts, account
)
if not preview:
await self.broadcast_or_release(tx, blocking)
self.component_manager.loop.create_task(
self.analytics_manager.send_claim_action("abandon")
)
else:
await self.ledger.release_tx(tx)
return tx
|
async def jsonrpc_stream_abandon(
self,
claim_id=None,
txid=None,
nout=None,
account_id=None,
wallet_id=None,
preview=False,
blocking=False,
):
"""
Abandon one of my stream claims.
Usage:
stream_abandon [<claim_id> | --claim_id=<claim_id>]
[<txid> | --txid=<txid>] [<nout> | --nout=<nout>]
[--account_id=<account_id>] [--wallet_id=<wallet_id>]
[--preview] [--blocking]
Options:
--claim_id=<claim_id> : (str) claim_id of the claim to abandon
--txid=<txid> : (str) txid of the claim to abandon
--nout=<nout> : (int) nout of the claim to abandon
--account_id=<account_id> : (str) id of the account to use
--wallet_id=<wallet_id> : (str) restrict operation to specific wallet
--preview : (bool) do not broadcast the transaction
--blocking : (bool) wait until abandon is in mempool
Returns: {Transaction}
"""
wallet = self.wallet_manager.get_wallet_or_default(wallet_id)
assert not wallet.is_locked, "Cannot spend funds with locked wallet, unlock first."
if account_id:
account = wallet.get_account_or_error(account_id)
accounts = [account]
else:
account = wallet.default_account
accounts = wallet.accounts
if txid is not None and nout is not None:
claims = await self.ledger.get_claims(
wallet=wallet, accounts=accounts, **{"txo.txid": txid, "txo.position": nout}
)
elif claim_id is not None:
claims = await self.ledger.get_claims(
wallet=wallet, accounts=accounts, claim_id=claim_id
)
else:
raise Exception("Must specify claim_id, or txid and nout")
if not claims:
raise Exception("No claim found for the specified claim_id or txid:nout")
tx = await Transaction.create(
[Input.spend(txo) for txo in claims], [], accounts, account
)
if not preview:
await self.broadcast_or_release(tx, blocking)
await self.analytics_manager.send_claim_action("abandon")
else:
await self.ledger.release_tx(tx)
return tx
|
https://github.com/lbryio/lbry-sdk/issues/2368
|
2019-08-01 18:18:06,916 INFO lbry.stream.reflector.client:119: Sent reflector blob e5828b7e
2019-08-01 18:18:29,221 INFO lbry.stream.reflector.client:119: Sent reflector blob 867d1a2c
2019-08-01 18:18:51,545 INFO lbry.stream.reflector.client:119: Sent reflector blob 6e365367
2019-08-01 18:18:58,400 INFO lbry.extras.daemon.Daemon:2330: publishing: name: xxxx params: {'title': 'xxxx', 'description': 'xxxxxx', 'bid': '0.10000000', 'languages': ['en'], 'tags': ['xxxxx'], 'thumbnail_url': 'https://spee.ch/8/xxxxxxxxx', 'license': 'All rights reserved Published With Permission', 'release_time': 1564696220, 'channel_id': 'xxxxxxxxxxxx', 'fee_currency': 'LBC', 'fee_amount': '1.00000000', 'file_path': 'C:\\Users\\David T\\Desktop\\GMCZ0549_VID_1280x720x1500kbs_HCVC549.mp4'}
2019-08-01 18:19:22,348 ERROR lbry.extras.daemon.Daemon:586: error handling api request
Traceback (most recent call last):
File "lbry\extras\daemon\Daemon.py", line 580, in _process_rpc_call
File "lbry\extras\daemon\Daemon.py", line 2342, in jsonrpc_publish
File "lbry\extras\daemon\Daemon.py", line 2659, in jsonrpc_stream_update
File "lbry\stream\stream_manager.py", line 232, in delete_stream
AttributeError: 'NoneType' object has no attribute 'stop_tasks'
2019-08-01 18:19:37,886 INFO lbry.stream.reflector.client:119: Sent reflector blob f5b10e7b
2019-08-01 18:20:00,717 INFO lbry.stream.reflector.client:119: Sent reflector blob d5ffe375
|
AttributeError
|
async def jsonrpc_collection_create(
self,
name,
bid,
claims,
allow_duplicate_name=False,
channel_id=None,
channel_name=None,
channel_account_id=None,
account_id=None,
wallet_id=None,
claim_address=None,
funding_account_ids=None,
preview=False,
blocking=False,
**kwargs,
):
"""
Create a new collection.
Usage:
collection_create (<name> | --name=<name>) (<bid> | --bid=<bid>)
(<claims>... | --claims=<claims>...)
[--allow_duplicate_name]
[--title=<title>] [--description=<description>]
[--tags=<tags>...] [--languages=<languages>...] [--locations=<locations>...]
[--thumbnail_url=<thumbnail_url>]
[--account_id=<account_id>] [--wallet_id=<wallet_id>]
[--claim_address=<claim_address>] [--funding_account_ids=<funding_account_ids>...]
[--preview] [--blocking]
Options:
--name=<name> : (str) name of the collection
--bid=<bid> : (decimal) amount to back the claim
--claims=<claims> : (list) claim ids to be included in the collection
--allow_duplicate_name : (bool) create new collection even if one already exists with
given name. default: false.
--title=<title> : (str) title of the collection
--description=<description> : (str) description of the collection
--clear_languages : (bool) clear existing languages (prior to adding new ones)
--tags=<tags> : (list) content tags
--clear_languages : (bool) clear existing languages (prior to adding new ones)
--languages=<languages> : (list) languages used by the collection,
using RFC 5646 format, eg:
for English `--languages=en`
for Spanish (Spain) `--languages=es-ES`
for Spanish (Mexican) `--languages=es-MX`
for Chinese (Simplified) `--languages=zh-Hans`
for Chinese (Traditional) `--languages=zh-Hant`
--locations=<locations> : (list) locations of the collection, consisting of 2 letter
`country` code and a `state`, `city` and a postal
`code` along with a `latitude` and `longitude`.
for JSON RPC: pass a dictionary with aforementioned
attributes as keys, eg:
...
"locations": [{'country': 'US', 'state': 'NH'}]
...
for command line: pass a colon delimited list
with values in the following order:
"COUNTRY:STATE:CITY:CODE:LATITUDE:LONGITUDE"
making sure to include colon for blank values, for
example to provide only the city:
... --locations="::Manchester"
with all values set:
... --locations="US:NH:Manchester:03101:42.990605:-71.460989"
optionally, you can just pass the "LATITUDE:LONGITUDE":
... --locations="42.990605:-71.460989"
finally, you can also pass JSON string of dictionary
on the command line as you would via JSON RPC
... --locations="{'country': 'US', 'state': 'NH'}"
--thumbnail_url=<thumbnail_url>: (str) thumbnail url
--account_id=<account_id> : (str) account to use for holding the transaction
--wallet_id=<wallet_id> : (str) restrict operation to specific wallet
--funding_account_ids=<funding_account_ids>: (list) ids of accounts to fund this transaction
--claim_address=<claim_address>: (str) address where the collection is sent to, if not specified
it will be determined automatically from the account
--preview : (bool) do not broadcast the transaction
--blocking : (bool) wait until transaction is in mempool
Returns: {Transaction}
"""
wallet = self.wallet_manager.get_wallet_or_default(wallet_id)
account = wallet.get_account_or_default(account_id)
funding_accounts = wallet.get_accounts_or_all(funding_account_ids)
self.valid_collection_name_or_error(name)
channel = await self.get_channel_or_none(
wallet, channel_account_id, channel_id, channel_name, for_signing=True
)
amount = self.get_dewies_or_error("bid", bid, positive_value=True)
claim_address = await self.get_receiving_address(claim_address, account)
existing_collections = await self.ledger.get_collections(
accounts=wallet.accounts, claim_name=name
)
if len(existing_collections) > 0:
if not allow_duplicate_name:
raise Exception(
f"You already have a collection under the name '{name}'. "
f"Use --allow-duplicate-name flag to override."
)
claim = Claim()
claim.collection.update(claims=claims, **kwargs)
tx = await Transaction.claim_create(
name,
claim,
amount,
claim_address,
funding_accounts,
funding_accounts[0],
channel,
)
new_txo = tx.outputs[0]
if channel:
new_txo.sign(channel)
await tx.sign(funding_accounts)
if not preview:
await self.broadcast_or_release(tx, blocking)
self.component_manager.loop.create_task(
self.analytics_manager.send_claim_action("publish")
)
else:
await account.ledger.release_tx(tx)
return tx
|
async def jsonrpc_collection_create(
self,
name,
bid,
claims,
allow_duplicate_name=False,
channel_id=None,
channel_name=None,
channel_account_id=None,
account_id=None,
wallet_id=None,
claim_address=None,
funding_account_ids=None,
preview=False,
blocking=False,
**kwargs,
):
"""
Create a new collection.
Usage:
collection_create (<name> | --name=<name>) (<bid> | --bid=<bid>)
(<claims>... | --claims=<claims>...)
[--allow_duplicate_name]
[--title=<title>] [--description=<description>]
[--tags=<tags>...] [--languages=<languages>...] [--locations=<locations>...]
[--thumbnail_url=<thumbnail_url>]
[--account_id=<account_id>] [--wallet_id=<wallet_id>]
[--claim_address=<claim_address>] [--funding_account_ids=<funding_account_ids>...]
[--preview] [--blocking]
Options:
--name=<name> : (str) name of the collection
--bid=<bid> : (decimal) amount to back the claim
--claims=<claims> : (list) claim ids to be included in the collection
--allow_duplicate_name : (bool) create new collection even if one already exists with
given name. default: false.
--title=<title> : (str) title of the collection
--description=<description> : (str) description of the collection
--clear_languages : (bool) clear existing languages (prior to adding new ones)
--tags=<tags> : (list) content tags
--clear_languages : (bool) clear existing languages (prior to adding new ones)
--languages=<languages> : (list) languages used by the collection,
using RFC 5646 format, eg:
for English `--languages=en`
for Spanish (Spain) `--languages=es-ES`
for Spanish (Mexican) `--languages=es-MX`
for Chinese (Simplified) `--languages=zh-Hans`
for Chinese (Traditional) `--languages=zh-Hant`
--locations=<locations> : (list) locations of the collection, consisting of 2 letter
`country` code and a `state`, `city` and a postal
`code` along with a `latitude` and `longitude`.
for JSON RPC: pass a dictionary with aforementioned
attributes as keys, eg:
...
"locations": [{'country': 'US', 'state': 'NH'}]
...
for command line: pass a colon delimited list
with values in the following order:
"COUNTRY:STATE:CITY:CODE:LATITUDE:LONGITUDE"
making sure to include colon for blank values, for
example to provide only the city:
... --locations="::Manchester"
with all values set:
... --locations="US:NH:Manchester:03101:42.990605:-71.460989"
optionally, you can just pass the "LATITUDE:LONGITUDE":
... --locations="42.990605:-71.460989"
finally, you can also pass JSON string of dictionary
on the command line as you would via JSON RPC
... --locations="{'country': 'US', 'state': 'NH'}"
--thumbnail_url=<thumbnail_url>: (str) thumbnail url
--account_id=<account_id> : (str) account to use for holding the transaction
--wallet_id=<wallet_id> : (str) restrict operation to specific wallet
--funding_account_ids=<funding_account_ids>: (list) ids of accounts to fund this transaction
--claim_address=<claim_address>: (str) address where the collection is sent to, if not specified
it will be determined automatically from the account
--preview : (bool) do not broadcast the transaction
--blocking : (bool) wait until transaction is in mempool
Returns: {Transaction}
"""
wallet = self.wallet_manager.get_wallet_or_default(wallet_id)
account = wallet.get_account_or_default(account_id)
funding_accounts = wallet.get_accounts_or_all(funding_account_ids)
self.valid_collection_name_or_error(name)
channel = await self.get_channel_or_none(
wallet, channel_account_id, channel_id, channel_name, for_signing=True
)
amount = self.get_dewies_or_error("bid", bid, positive_value=True)
claim_address = await self.get_receiving_address(claim_address, account)
existing_collections = await self.ledger.get_collections(
accounts=wallet.accounts, claim_name=name
)
if len(existing_collections) > 0:
if not allow_duplicate_name:
raise Exception(
f"You already have a collection under the name '{name}'. "
f"Use --allow-duplicate-name flag to override."
)
claim = Claim()
claim.collection.update(claims=claims, **kwargs)
tx = await Transaction.claim_create(
name,
claim,
amount,
claim_address,
funding_accounts,
funding_accounts[0],
channel,
)
new_txo = tx.outputs[0]
if channel:
new_txo.sign(channel)
await tx.sign(funding_accounts)
if not preview:
await self.broadcast_or_release(tx, blocking)
await self.analytics_manager.send_claim_action("publish")
else:
await account.ledger.release_tx(tx)
return tx
|
https://github.com/lbryio/lbry-sdk/issues/2368
|
2019-08-01 18:18:06,916 INFO lbry.stream.reflector.client:119: Sent reflector blob e5828b7e
2019-08-01 18:18:29,221 INFO lbry.stream.reflector.client:119: Sent reflector blob 867d1a2c
2019-08-01 18:18:51,545 INFO lbry.stream.reflector.client:119: Sent reflector blob 6e365367
2019-08-01 18:18:58,400 INFO lbry.extras.daemon.Daemon:2330: publishing: name: xxxx params: {'title': 'xxxx', 'description': 'xxxxxx', 'bid': '0.10000000', 'languages': ['en'], 'tags': ['xxxxx'], 'thumbnail_url': 'https://spee.ch/8/xxxxxxxxx', 'license': 'All rights reserved Published With Permission', 'release_time': 1564696220, 'channel_id': 'xxxxxxxxxxxx', 'fee_currency': 'LBC', 'fee_amount': '1.00000000', 'file_path': 'C:\\Users\\David T\\Desktop\\GMCZ0549_VID_1280x720x1500kbs_HCVC549.mp4'}
2019-08-01 18:19:22,348 ERROR lbry.extras.daemon.Daemon:586: error handling api request
Traceback (most recent call last):
File "lbry\extras\daemon\Daemon.py", line 580, in _process_rpc_call
File "lbry\extras\daemon\Daemon.py", line 2342, in jsonrpc_publish
File "lbry\extras\daemon\Daemon.py", line 2659, in jsonrpc_stream_update
File "lbry\stream\stream_manager.py", line 232, in delete_stream
AttributeError: 'NoneType' object has no attribute 'stop_tasks'
2019-08-01 18:19:37,886 INFO lbry.stream.reflector.client:119: Sent reflector blob f5b10e7b
2019-08-01 18:20:00,717 INFO lbry.stream.reflector.client:119: Sent reflector blob d5ffe375
|
AttributeError
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.