file_name large_stringlengths 4 140 | prefix large_stringlengths 0 39k | suffix large_stringlengths 0 36.1k | middle large_stringlengths 0 29.4k | fim_type large_stringclasses 4
values |
|---|---|---|---|---|
astutils.py | """
Various bits of reusable code related to L{ast.AST} node processing.
"""
import inspect
import platform
import sys
from numbers import Number
from typing import Iterator, Optional, List, Iterable, Sequence, TYPE_CHECKING, Tuple, Union
from inspect import BoundArguments, Signature
import ast
from pydoctor import visitor
if TYPE_CHECKING:
from pydoctor import model
# AST visitors
def iter_values(node: ast.AST) -> Iterator[ast.AST]:
for _, value in ast.iter_fields(node):
if isinstance(value, list):
for item in value:
if isinstance(item, ast.AST):
yield item
elif isinstance(value, ast.AST):
yield value
class NodeVisitor(visitor.PartialVisitor[ast.AST]):
"""
Generic AST node visitor. This class does not work like L{ast.NodeVisitor},
it only visits statements directly within a C{B{body}}. Also, visitor methods can't return anything.
:See: L{visitor} for more informations.
"""
def generic_visit(self, node: ast.AST) -> None:
"""
Helper method to visit a node by calling C{visit()} on each child of the node.
This is useful because this vistitor only visits statements inside C{.body} attribute.
So if one wants to visit L{ast.Expr} children with their visitor, they should include::
def visit_Expr(self, node:ast.Expr):
self.generic_visit(node)
"""
for v in iter_values(node):
self.visit(v)
@classmethod
def get_children(cls, node: ast.AST) -> Iterable[ast.AST]:
"""
Returns the nested nodes in the body of a node.
"""
body: Optional[Sequence[ast.AST]] = getattr(node, 'body', None)
if body is not None:
for child in body:
yield child
class NodeVisitorExt(visitor.VisitorExt[ast.AST]):
...
_AssingT = Union[ast.Assign, ast.AnnAssign]
def iterassign(node:_AssingT) -> Iterator[Optional[List[str]]]:
"""
Utility function to iterate assignments targets.
Useful for all the following AST assignments:
>>> var:int=2
>>> self.var = target = node.astext()
>>> lol = ['extensions']
NOT Useful for the following AST assignments:
>>> x, y = [1,2]
Example:
>>> from pydoctor.astutils import iterassign
>>> from ast import parse
>>> node = parse('self.var = target = thing[0] = node.astext()').body[0]
>>> list(iterassign(node))
"""
for target in node.targets if isinstance(node, ast.Assign) else [node.target]:
dottedname = node2dottedname(target)
yield dottedname
def node2dottedname(node: Optional[ast.AST]) -> Optional[List[str]]:
"""
Resove expression composed by L{ast.Attribute} and L{ast.Name} nodes to a list of names.
"""
parts = []
while isinstance(node, ast.Attribute):
parts.append(node.attr)
node = node.value
if isinstance(node, ast.Name):
parts.append(node.id)
else:
return None
parts.reverse()
return parts
def node2fullname(expr: Optional[ast.AST], ctx: 'model.Documentable') -> Optional[str]:
dottedname = node2dottedname(expr)
if dottedname is None:
return None
return ctx.expandName('.'.join(dottedname))
def bind_args(sig: Signature, call: ast.Call) -> BoundArguments:
"""
Binds the arguments of a function call to that function's signature.
@raise TypeError: If the arguments do not match the signature.
"""
kwargs = {
kw.arg: kw.value
for kw in call.keywords
# When keywords are passed using '**kwargs', the 'arg' field will
# be None. We don't currently support keywords passed that way.
if kw.arg is not None
}
return sig.bind(*call.args, **kwargs)
if sys.version_info[:2] >= (3, 8):
# Since Python 3.8 "foo" is parsed as ast.Constant.
def get_str_value(expr:ast.expr) -> Optional[str]:
if isinstance(expr, ast.Constant) and isinstance(expr.value, str):
return expr.value
return None
def get_num_value(expr:ast.expr) -> Optional[Number]:
if isinstance(expr, ast.Constant) and isinstance(expr.value, Number):
return expr.value
return None
def _is_str_constant(expr: ast.expr, s: str) -> bool:
return isinstance(expr, ast.Constant) and expr.value == s
else:
# Before Python 3.8 "foo" was parsed as ast.Str.
def get_str_value(expr:ast.expr) -> Optional[str]:
if isinstance(expr, ast.Str):
return expr.s
return None
def get_num_value(expr:ast.expr) -> Optional[Number]:
if isinstance(expr, ast.Num):
return expr.n
return None
def _is_str_constant(expr: ast.expr, s: str) -> bool:
return isinstance(expr, ast.Str) and expr.s == s
def get_int_value(expr: ast.expr) -> Optional[int]:
num = get_num_value(expr)
if isinstance(num, int):
return num # type:ignore[unreachable]
return None
def is__name__equals__main__(cmp: ast.Compare) -> bool:
"""
Returns whether or not the given L{ast.Compare} is equal to C{__name__ == '__main__'}.
"""
return isinstance(cmp.left, ast.Name) \
and cmp.left.id == '__name__' \
and len(cmp.ops) == 1 \
and isinstance(cmp.ops[0], ast.Eq) \
and len(cmp.comparators) == 1 \
and _is_str_constant(cmp.comparators[0], '__main__')
def is_using_typing_final(expr: Optional[ast.AST],
ctx:'model.Documentable') -> bool:
return is_using_annotations(expr, ("typing.Final", "typing_extensions.Final"), ctx)
def is_using_typing_classvar(expr: Optional[ast.AST],
ctx:'model.Documentable') -> bool:
return is_using_annotations(expr, ('typing.ClassVar', "typing_extensions.ClassVar"), ctx)
def is_using_annotations(expr: Optional[ast.AST],
annotations:Sequence[str],
ctx:'model.Documentable') -> bool:
"""
Detect if this expr is firstly composed by one of the specified annotation(s)' full name.
"""
full_name = node2fullname(expr, ctx)
if full_name in annotations:
return True
if isinstance(expr, ast.Subscript):
# Final[...] or typing.Final[...] expressions
if isinstance(expr.value, (ast.Name, ast.Attribute)):
value = expr.value
full_name = node2fullname(value, ctx)
if full_name in annotations:
return True
return False
def is_none_literal(node: ast.expr) -> bool:
"""Does this AST node represent the literal constant None?"""
return isinstance(node, (ast.Constant, ast.NameConstant)) and node.value is None
def unstring_annotation(node: ast.expr, ctx:'model.Documentable', section:str='annotation') -> ast.expr:
"""Replace all strings in the given expression by parsed versions.
@return: The unstringed node. If parsing fails, an error is logged
and the original node is returned.
"""
try:
expr = _AnnotationStringParser().visit(node)
except SyntaxError as ex:
module = ctx.module
assert module is not None
module.report(f'syntax error in {section}: {ex}', lineno_offset=node.lineno, section=section)
return node
else:
assert isinstance(expr, ast.expr), expr
return expr
class _AnnotationStringParser(ast.NodeTransformer):
"""Implementation of L{unstring_annotation()}.
When given an expression, the node returned by L{ast.NodeVisitor.visit()}
will also be an expression.
If any string literal contained in the original expression is either
invalid Python or not a singular expression, L{SyntaxError} is raised.
"""
def _parse_string(self, value: str) -> ast.expr:
statements = ast.parse(value).body
if len(statements) != 1:
raise SyntaxError("expected expression, found multiple statements")
stmt, = statements
if isinstance(stmt, ast.Expr):
# Expression wrapped in an Expr statement.
expr = self.visit(stmt.value)
assert isinstance(expr, ast.expr), expr
return expr
else:
raise SyntaxError("expected expression, found statement")
def visit_Subscript(self, node: ast.Subscript) -> ast.Subscript:
value = self.visit(node.value)
if isinstance(value, ast.Name) and value.id == 'Literal':
# Literal[...] expression; don't unstring the arguments.
slice = node.slice
elif isinstance(value, ast.Attribute) and value.attr == 'Literal':
# typing.Literal[...] expression; don't unstring the arguments.
slice = node.slice
else:
# Other subscript; unstring the slice.
slice = self.visit(node.slice)
return ast.copy_location(ast.Subscript(value, slice, node.ctx), node)
# For Python >= 3.8:
def visit_Constant(self, node: ast.Constant) -> ast.expr:
value = node.value
if isinstance(value, str):
return ast.copy_location(self._parse_string(value), node)
else:
const = self.generic_visit(node)
assert isinstance(const, ast.Constant), const
return const
# For Python < 3.8:
def visit_Str(self, node: ast.Str) -> ast.expr:
return ast.copy_location(self._parse_string(node.s), node)
TYPING_ALIAS = (
"typing.Hashable",
"typing.Awaitable",
"typing.Coroutine",
"typing.AsyncIterable",
"typing.AsyncIterator",
"typing.Iterable",
"typing.Iterator",
"typing.Reversible",
"typing.Sized",
"typing.Container",
"typing.Collection",
"typing.Callable",
"typing.AbstractSet",
"typing.MutableSet",
"typing.Mapping",
"typing.MutableMapping",
"typing.Sequence",
"typing.MutableSequence",
"typing.ByteString",
"typing.Tuple",
"typing.List",
"typing.Deque",
"typing.Set",
"typing.FrozenSet",
"typing.MappingView",
"typing.KeysView",
"typing.ItemsView",
"typing.ValuesView",
"typing.ContextManager",
"typing.AsyncContextManager",
"typing.Dict",
"typing.DefaultDict",
"typing.OrderedDict",
"typing.Counter",
"typing.ChainMap",
"typing.Generator",
"typing.AsyncGenerator",
"typing.Type",
"typing.Pattern",
"typing.Match",
# Special forms
"typing.Union",
"typing.Literal",
"typing.Optional",
)
SUBSCRIPTABLE_CLASSES_PEP585 = (
"tuple",
"list",
"dict",
"set",
"frozenset",
"type",
"collections.deque",
"collections.defaultdict",
"collections.OrderedDict",
"collections.Counter",
"collections.ChainMap",
"collections.abc.Awaitable",
"collections.abc.Coroutine",
"collections.abc.AsyncIterable",
"collections.abc.AsyncIterator",
"collections.abc.AsyncGenerator",
"collections.abc.Iterable",
"collections.abc.Iterator",
"collections.abc.Generator",
"collections.abc.Reversible",
"collections.abc.Container",
"collections.abc.Collection",
"collections.abc.Callable",
"collections.abc.Set",
"collections.abc.MutableSet",
"collections.abc.Mapping",
"collections.abc.MutableMapping",
"collections.abc.Sequence",
"collections.abc.MutableSequence",
"collections.abc.ByteString",
"collections.abc.MappingView",
"collections.abc.KeysView",
"collections.abc.ItemsView",
"collections.abc.ValuesView",
"contextlib.AbstractContextManager",
"contextlib.AbstractAsyncContextManager",
"re.Pattern",
"re.Match",
)
def is_typing_annotation(node: ast.AST, ctx: 'model.Documentable') -> bool:
"""
Whether this annotation node refers to a typing alias.
"""
return is_using_annotations(node, TYPING_ALIAS, ctx) or \
is_using_annotations(node, SUBSCRIPTABLE_CLASSES_PEP585, ctx)
_string_lineno_is_end = sys.version_info < (3,8) \
and platform.python_implementation() != 'PyPy'
"""True iff the 'lineno' attribute of an AST string node points to the last
line in the string, rather than the first line.
"""
def extract_docstring_linenum(node: ast.Str) -> int:
r"""
In older CPython versions, the AST only tells us the end line
number and we must approximate the start line number.
This approximation is correct if the docstring does not contain
explicit newlines ('\n') or joined lines ('\' at end of line).
Leading blank lines are stripped by cleandoc(), so we must
return the line number of the first non-blank line.
"""
doc = node.s
lineno = node.lineno
if _string_lineno_is_end:
# In older CPython versions, the AST only tells us the end line
# number and we must approximate the start line number.
# This approximation is correct if the docstring does not contain
# explicit newlines ('\n') or joined lines ('\' at end of line).
lineno -= doc.count('\n')
# Leading blank lines are stripped by cleandoc(), so we must
# return the line number of the first non-blank line.
for ch in doc:
if ch == '\n':
lineno += 1
elif not ch.isspace():
|
return lineno
def extract_docstring(node: ast.Str) -> Tuple[int, str]:
"""
Extract docstring information from an ast node that represents the docstring.
@returns:
- The line number of the first non-blank line of the docsring. See L{extract_docstring_linenum}.
- The docstring to be parsed, cleaned by L{inspect.cleandoc}.
"""
lineno = extract_docstring_linenum(node)
return lineno, inspect.cleandoc(node.s) | break | conditional_block |
astutils.py | """
Various bits of reusable code related to L{ast.AST} node processing.
"""
import inspect
import platform
import sys
from numbers import Number
from typing import Iterator, Optional, List, Iterable, Sequence, TYPE_CHECKING, Tuple, Union
from inspect import BoundArguments, Signature
import ast
from pydoctor import visitor
if TYPE_CHECKING:
from pydoctor import model
# AST visitors
def iter_values(node: ast.AST) -> Iterator[ast.AST]:
for _, value in ast.iter_fields(node):
if isinstance(value, list):
for item in value:
if isinstance(item, ast.AST):
yield item
elif isinstance(value, ast.AST):
yield value
class NodeVisitor(visitor.PartialVisitor[ast.AST]):
"""
Generic AST node visitor. This class does not work like L{ast.NodeVisitor},
it only visits statements directly within a C{B{body}}. Also, visitor methods can't return anything.
:See: L{visitor} for more informations.
"""
def generic_visit(self, node: ast.AST) -> None:
"""
Helper method to visit a node by calling C{visit()} on each child of the node.
This is useful because this vistitor only visits statements inside C{.body} attribute.
So if one wants to visit L{ast.Expr} children with their visitor, they should include::
def visit_Expr(self, node:ast.Expr):
self.generic_visit(node)
"""
for v in iter_values(node):
self.visit(v)
@classmethod
def get_children(cls, node: ast.AST) -> Iterable[ast.AST]:
"""
Returns the nested nodes in the body of a node.
"""
body: Optional[Sequence[ast.AST]] = getattr(node, 'body', None)
if body is not None:
for child in body:
yield child
class NodeVisitorExt(visitor.VisitorExt[ast.AST]):
...
_AssingT = Union[ast.Assign, ast.AnnAssign]
def iterassign(node:_AssingT) -> Iterator[Optional[List[str]]]:
"""
Utility function to iterate assignments targets.
Useful for all the following AST assignments:
>>> var:int=2
>>> self.var = target = node.astext()
>>> lol = ['extensions']
NOT Useful for the following AST assignments:
>>> x, y = [1,2]
Example:
>>> from pydoctor.astutils import iterassign
>>> from ast import parse
>>> node = parse('self.var = target = thing[0] = node.astext()').body[0]
>>> list(iterassign(node))
"""
for target in node.targets if isinstance(node, ast.Assign) else [node.target]:
dottedname = node2dottedname(target)
yield dottedname
def node2dottedname(node: Optional[ast.AST]) -> Optional[List[str]]:
"""
Resove expression composed by L{ast.Attribute} and L{ast.Name} nodes to a list of names.
"""
parts = []
while isinstance(node, ast.Attribute):
parts.append(node.attr)
node = node.value
if isinstance(node, ast.Name):
parts.append(node.id)
else:
return None
parts.reverse()
return parts
def node2fullname(expr: Optional[ast.AST], ctx: 'model.Documentable') -> Optional[str]:
dottedname = node2dottedname(expr)
if dottedname is None:
return None
return ctx.expandName('.'.join(dottedname))
def | (sig: Signature, call: ast.Call) -> BoundArguments:
"""
Binds the arguments of a function call to that function's signature.
@raise TypeError: If the arguments do not match the signature.
"""
kwargs = {
kw.arg: kw.value
for kw in call.keywords
# When keywords are passed using '**kwargs', the 'arg' field will
# be None. We don't currently support keywords passed that way.
if kw.arg is not None
}
return sig.bind(*call.args, **kwargs)
if sys.version_info[:2] >= (3, 8):
# Since Python 3.8 "foo" is parsed as ast.Constant.
def get_str_value(expr:ast.expr) -> Optional[str]:
if isinstance(expr, ast.Constant) and isinstance(expr.value, str):
return expr.value
return None
def get_num_value(expr:ast.expr) -> Optional[Number]:
if isinstance(expr, ast.Constant) and isinstance(expr.value, Number):
return expr.value
return None
def _is_str_constant(expr: ast.expr, s: str) -> bool:
return isinstance(expr, ast.Constant) and expr.value == s
else:
# Before Python 3.8 "foo" was parsed as ast.Str.
def get_str_value(expr:ast.expr) -> Optional[str]:
if isinstance(expr, ast.Str):
return expr.s
return None
def get_num_value(expr:ast.expr) -> Optional[Number]:
if isinstance(expr, ast.Num):
return expr.n
return None
def _is_str_constant(expr: ast.expr, s: str) -> bool:
return isinstance(expr, ast.Str) and expr.s == s
def get_int_value(expr: ast.expr) -> Optional[int]:
num = get_num_value(expr)
if isinstance(num, int):
return num # type:ignore[unreachable]
return None
def is__name__equals__main__(cmp: ast.Compare) -> bool:
"""
Returns whether or not the given L{ast.Compare} is equal to C{__name__ == '__main__'}.
"""
return isinstance(cmp.left, ast.Name) \
and cmp.left.id == '__name__' \
and len(cmp.ops) == 1 \
and isinstance(cmp.ops[0], ast.Eq) \
and len(cmp.comparators) == 1 \
and _is_str_constant(cmp.comparators[0], '__main__')
def is_using_typing_final(expr: Optional[ast.AST],
ctx:'model.Documentable') -> bool:
return is_using_annotations(expr, ("typing.Final", "typing_extensions.Final"), ctx)
def is_using_typing_classvar(expr: Optional[ast.AST],
ctx:'model.Documentable') -> bool:
return is_using_annotations(expr, ('typing.ClassVar', "typing_extensions.ClassVar"), ctx)
def is_using_annotations(expr: Optional[ast.AST],
annotations:Sequence[str],
ctx:'model.Documentable') -> bool:
"""
Detect if this expr is firstly composed by one of the specified annotation(s)' full name.
"""
full_name = node2fullname(expr, ctx)
if full_name in annotations:
return True
if isinstance(expr, ast.Subscript):
# Final[...] or typing.Final[...] expressions
if isinstance(expr.value, (ast.Name, ast.Attribute)):
value = expr.value
full_name = node2fullname(value, ctx)
if full_name in annotations:
return True
return False
def is_none_literal(node: ast.expr) -> bool:
"""Does this AST node represent the literal constant None?"""
return isinstance(node, (ast.Constant, ast.NameConstant)) and node.value is None
def unstring_annotation(node: ast.expr, ctx:'model.Documentable', section:str='annotation') -> ast.expr:
"""Replace all strings in the given expression by parsed versions.
@return: The unstringed node. If parsing fails, an error is logged
and the original node is returned.
"""
try:
expr = _AnnotationStringParser().visit(node)
except SyntaxError as ex:
module = ctx.module
assert module is not None
module.report(f'syntax error in {section}: {ex}', lineno_offset=node.lineno, section=section)
return node
else:
assert isinstance(expr, ast.expr), expr
return expr
class _AnnotationStringParser(ast.NodeTransformer):
"""Implementation of L{unstring_annotation()}.
When given an expression, the node returned by L{ast.NodeVisitor.visit()}
will also be an expression.
If any string literal contained in the original expression is either
invalid Python or not a singular expression, L{SyntaxError} is raised.
"""
def _parse_string(self, value: str) -> ast.expr:
statements = ast.parse(value).body
if len(statements) != 1:
raise SyntaxError("expected expression, found multiple statements")
stmt, = statements
if isinstance(stmt, ast.Expr):
# Expression wrapped in an Expr statement.
expr = self.visit(stmt.value)
assert isinstance(expr, ast.expr), expr
return expr
else:
raise SyntaxError("expected expression, found statement")
def visit_Subscript(self, node: ast.Subscript) -> ast.Subscript:
value = self.visit(node.value)
if isinstance(value, ast.Name) and value.id == 'Literal':
# Literal[...] expression; don't unstring the arguments.
slice = node.slice
elif isinstance(value, ast.Attribute) and value.attr == 'Literal':
# typing.Literal[...] expression; don't unstring the arguments.
slice = node.slice
else:
# Other subscript; unstring the slice.
slice = self.visit(node.slice)
return ast.copy_location(ast.Subscript(value, slice, node.ctx), node)
# For Python >= 3.8:
def visit_Constant(self, node: ast.Constant) -> ast.expr:
value = node.value
if isinstance(value, str):
return ast.copy_location(self._parse_string(value), node)
else:
const = self.generic_visit(node)
assert isinstance(const, ast.Constant), const
return const
# For Python < 3.8:
def visit_Str(self, node: ast.Str) -> ast.expr:
return ast.copy_location(self._parse_string(node.s), node)
TYPING_ALIAS = (
"typing.Hashable",
"typing.Awaitable",
"typing.Coroutine",
"typing.AsyncIterable",
"typing.AsyncIterator",
"typing.Iterable",
"typing.Iterator",
"typing.Reversible",
"typing.Sized",
"typing.Container",
"typing.Collection",
"typing.Callable",
"typing.AbstractSet",
"typing.MutableSet",
"typing.Mapping",
"typing.MutableMapping",
"typing.Sequence",
"typing.MutableSequence",
"typing.ByteString",
"typing.Tuple",
"typing.List",
"typing.Deque",
"typing.Set",
"typing.FrozenSet",
"typing.MappingView",
"typing.KeysView",
"typing.ItemsView",
"typing.ValuesView",
"typing.ContextManager",
"typing.AsyncContextManager",
"typing.Dict",
"typing.DefaultDict",
"typing.OrderedDict",
"typing.Counter",
"typing.ChainMap",
"typing.Generator",
"typing.AsyncGenerator",
"typing.Type",
"typing.Pattern",
"typing.Match",
# Special forms
"typing.Union",
"typing.Literal",
"typing.Optional",
)
SUBSCRIPTABLE_CLASSES_PEP585 = (
"tuple",
"list",
"dict",
"set",
"frozenset",
"type",
"collections.deque",
"collections.defaultdict",
"collections.OrderedDict",
"collections.Counter",
"collections.ChainMap",
"collections.abc.Awaitable",
"collections.abc.Coroutine",
"collections.abc.AsyncIterable",
"collections.abc.AsyncIterator",
"collections.abc.AsyncGenerator",
"collections.abc.Iterable",
"collections.abc.Iterator",
"collections.abc.Generator",
"collections.abc.Reversible",
"collections.abc.Container",
"collections.abc.Collection",
"collections.abc.Callable",
"collections.abc.Set",
"collections.abc.MutableSet",
"collections.abc.Mapping",
"collections.abc.MutableMapping",
"collections.abc.Sequence",
"collections.abc.MutableSequence",
"collections.abc.ByteString",
"collections.abc.MappingView",
"collections.abc.KeysView",
"collections.abc.ItemsView",
"collections.abc.ValuesView",
"contextlib.AbstractContextManager",
"contextlib.AbstractAsyncContextManager",
"re.Pattern",
"re.Match",
)
def is_typing_annotation(node: ast.AST, ctx: 'model.Documentable') -> bool:
"""
Whether this annotation node refers to a typing alias.
"""
return is_using_annotations(node, TYPING_ALIAS, ctx) or \
is_using_annotations(node, SUBSCRIPTABLE_CLASSES_PEP585, ctx)
_string_lineno_is_end = sys.version_info < (3,8) \
and platform.python_implementation() != 'PyPy'
"""True iff the 'lineno' attribute of an AST string node points to the last
line in the string, rather than the first line.
"""
def extract_docstring_linenum(node: ast.Str) -> int:
r"""
In older CPython versions, the AST only tells us the end line
number and we must approximate the start line number.
This approximation is correct if the docstring does not contain
explicit newlines ('\n') or joined lines ('\' at end of line).
Leading blank lines are stripped by cleandoc(), so we must
return the line number of the first non-blank line.
"""
doc = node.s
lineno = node.lineno
if _string_lineno_is_end:
# In older CPython versions, the AST only tells us the end line
# number and we must approximate the start line number.
# This approximation is correct if the docstring does not contain
# explicit newlines ('\n') or joined lines ('\' at end of line).
lineno -= doc.count('\n')
# Leading blank lines are stripped by cleandoc(), so we must
# return the line number of the first non-blank line.
for ch in doc:
if ch == '\n':
lineno += 1
elif not ch.isspace():
break
return lineno
def extract_docstring(node: ast.Str) -> Tuple[int, str]:
"""
Extract docstring information from an ast node that represents the docstring.
@returns:
- The line number of the first non-blank line of the docsring. See L{extract_docstring_linenum}.
- The docstring to be parsed, cleaned by L{inspect.cleandoc}.
"""
lineno = extract_docstring_linenum(node)
return lineno, inspect.cleandoc(node.s) | bind_args | identifier_name |
astutils.py | """
Various bits of reusable code related to L{ast.AST} node processing.
"""
import inspect
import platform
import sys
from numbers import Number
from typing import Iterator, Optional, List, Iterable, Sequence, TYPE_CHECKING, Tuple, Union
from inspect import BoundArguments, Signature
import ast
from pydoctor import visitor
if TYPE_CHECKING:
from pydoctor import model
# AST visitors
def iter_values(node: ast.AST) -> Iterator[ast.AST]:
for _, value in ast.iter_fields(node):
if isinstance(value, list):
for item in value:
if isinstance(item, ast.AST):
yield item
elif isinstance(value, ast.AST):
yield value
class NodeVisitor(visitor.PartialVisitor[ast.AST]):
"""
Generic AST node visitor. This class does not work like L{ast.NodeVisitor},
it only visits statements directly within a C{B{body}}. Also, visitor methods can't return anything.
:See: L{visitor} for more informations.
"""
def generic_visit(self, node: ast.AST) -> None:
"""
Helper method to visit a node by calling C{visit()} on each child of the node.
This is useful because this vistitor only visits statements inside C{.body} attribute.
So if one wants to visit L{ast.Expr} children with their visitor, they should include::
def visit_Expr(self, node:ast.Expr):
self.generic_visit(node)
"""
for v in iter_values(node):
self.visit(v)
@classmethod
def get_children(cls, node: ast.AST) -> Iterable[ast.AST]:
"""
Returns the nested nodes in the body of a node.
"""
body: Optional[Sequence[ast.AST]] = getattr(node, 'body', None)
if body is not None:
for child in body:
yield child
class NodeVisitorExt(visitor.VisitorExt[ast.AST]):
...
_AssingT = Union[ast.Assign, ast.AnnAssign]
def iterassign(node:_AssingT) -> Iterator[Optional[List[str]]]:
"""
Utility function to iterate assignments targets.
Useful for all the following AST assignments:
>>> var:int=2
>>> self.var = target = node.astext()
>>> lol = ['extensions']
NOT Useful for the following AST assignments:
>>> x, y = [1,2]
Example:
>>> from pydoctor.astutils import iterassign
>>> from ast import parse
>>> node = parse('self.var = target = thing[0] = node.astext()').body[0]
>>> list(iterassign(node))
"""
for target in node.targets if isinstance(node, ast.Assign) else [node.target]:
dottedname = node2dottedname(target)
yield dottedname
def node2dottedname(node: Optional[ast.AST]) -> Optional[List[str]]:
"""
Resove expression composed by L{ast.Attribute} and L{ast.Name} nodes to a list of names.
"""
parts = []
while isinstance(node, ast.Attribute):
parts.append(node.attr)
node = node.value
if isinstance(node, ast.Name):
parts.append(node.id)
else:
return None
parts.reverse()
return parts
def node2fullname(expr: Optional[ast.AST], ctx: 'model.Documentable') -> Optional[str]:
dottedname = node2dottedname(expr)
if dottedname is None:
return None
return ctx.expandName('.'.join(dottedname))
def bind_args(sig: Signature, call: ast.Call) -> BoundArguments:
"""
Binds the arguments of a function call to that function's signature.
@raise TypeError: If the arguments do not match the signature.
"""
kwargs = {
kw.arg: kw.value
for kw in call.keywords
# When keywords are passed using '**kwargs', the 'arg' field will
# be None. We don't currently support keywords passed that way.
if kw.arg is not None
}
return sig.bind(*call.args, **kwargs)
if sys.version_info[:2] >= (3, 8):
# Since Python 3.8 "foo" is parsed as ast.Constant.
def get_str_value(expr:ast.expr) -> Optional[str]:
if isinstance(expr, ast.Constant) and isinstance(expr.value, str):
return expr.value
return None
def get_num_value(expr:ast.expr) -> Optional[Number]:
if isinstance(expr, ast.Constant) and isinstance(expr.value, Number):
return expr.value
return None
def _is_str_constant(expr: ast.expr, s: str) -> bool:
return isinstance(expr, ast.Constant) and expr.value == s
else:
# Before Python 3.8 "foo" was parsed as ast.Str.
def get_str_value(expr:ast.expr) -> Optional[str]:
if isinstance(expr, ast.Str):
return expr.s
return None
def get_num_value(expr:ast.expr) -> Optional[Number]:
if isinstance(expr, ast.Num):
return expr.n
return None
def _is_str_constant(expr: ast.expr, s: str) -> bool:
return isinstance(expr, ast.Str) and expr.s == s
def get_int_value(expr: ast.expr) -> Optional[int]:
num = get_num_value(expr)
if isinstance(num, int):
return num # type:ignore[unreachable]
return None
def is__name__equals__main__(cmp: ast.Compare) -> bool:
"""
Returns whether or not the given L{ast.Compare} is equal to C{__name__ == '__main__'}.
"""
return isinstance(cmp.left, ast.Name) \
and cmp.left.id == '__name__' \
and len(cmp.ops) == 1 \
and isinstance(cmp.ops[0], ast.Eq) \
and len(cmp.comparators) == 1 \
and _is_str_constant(cmp.comparators[0], '__main__')
def is_using_typing_final(expr: Optional[ast.AST],
ctx:'model.Documentable') -> bool:
return is_using_annotations(expr, ("typing.Final", "typing_extensions.Final"), ctx)
def is_using_typing_classvar(expr: Optional[ast.AST],
ctx:'model.Documentable') -> bool:
return is_using_annotations(expr, ('typing.ClassVar', "typing_extensions.ClassVar"), ctx)
def is_using_annotations(expr: Optional[ast.AST],
annotations:Sequence[str],
ctx:'model.Documentable') -> bool:
"""
Detect if this expr is firstly composed by one of the specified annotation(s)' full name.
"""
full_name = node2fullname(expr, ctx)
if full_name in annotations:
return True
if isinstance(expr, ast.Subscript):
# Final[...] or typing.Final[...] expressions
if isinstance(expr.value, (ast.Name, ast.Attribute)):
value = expr.value
full_name = node2fullname(value, ctx)
if full_name in annotations:
return True
return False
def is_none_literal(node: ast.expr) -> bool:
"""Does this AST node represent the literal constant None?"""
return isinstance(node, (ast.Constant, ast.NameConstant)) and node.value is None
def unstring_annotation(node: ast.expr, ctx:'model.Documentable', section:str='annotation') -> ast.expr:
"""Replace all strings in the given expression by parsed versions.
@return: The unstringed node. If parsing fails, an error is logged
and the original node is returned. | module = ctx.module
assert module is not None
module.report(f'syntax error in {section}: {ex}', lineno_offset=node.lineno, section=section)
return node
else:
assert isinstance(expr, ast.expr), expr
return expr
class _AnnotationStringParser(ast.NodeTransformer):
"""Implementation of L{unstring_annotation()}.
When given an expression, the node returned by L{ast.NodeVisitor.visit()}
will also be an expression.
If any string literal contained in the original expression is either
invalid Python or not a singular expression, L{SyntaxError} is raised.
"""
def _parse_string(self, value: str) -> ast.expr:
statements = ast.parse(value).body
if len(statements) != 1:
raise SyntaxError("expected expression, found multiple statements")
stmt, = statements
if isinstance(stmt, ast.Expr):
# Expression wrapped in an Expr statement.
expr = self.visit(stmt.value)
assert isinstance(expr, ast.expr), expr
return expr
else:
raise SyntaxError("expected expression, found statement")
def visit_Subscript(self, node: ast.Subscript) -> ast.Subscript:
value = self.visit(node.value)
if isinstance(value, ast.Name) and value.id == 'Literal':
# Literal[...] expression; don't unstring the arguments.
slice = node.slice
elif isinstance(value, ast.Attribute) and value.attr == 'Literal':
# typing.Literal[...] expression; don't unstring the arguments.
slice = node.slice
else:
# Other subscript; unstring the slice.
slice = self.visit(node.slice)
return ast.copy_location(ast.Subscript(value, slice, node.ctx), node)
# For Python >= 3.8:
def visit_Constant(self, node: ast.Constant) -> ast.expr:
value = node.value
if isinstance(value, str):
return ast.copy_location(self._parse_string(value), node)
else:
const = self.generic_visit(node)
assert isinstance(const, ast.Constant), const
return const
# For Python < 3.8:
def visit_Str(self, node: ast.Str) -> ast.expr:
return ast.copy_location(self._parse_string(node.s), node)
TYPING_ALIAS = (
"typing.Hashable",
"typing.Awaitable",
"typing.Coroutine",
"typing.AsyncIterable",
"typing.AsyncIterator",
"typing.Iterable",
"typing.Iterator",
"typing.Reversible",
"typing.Sized",
"typing.Container",
"typing.Collection",
"typing.Callable",
"typing.AbstractSet",
"typing.MutableSet",
"typing.Mapping",
"typing.MutableMapping",
"typing.Sequence",
"typing.MutableSequence",
"typing.ByteString",
"typing.Tuple",
"typing.List",
"typing.Deque",
"typing.Set",
"typing.FrozenSet",
"typing.MappingView",
"typing.KeysView",
"typing.ItemsView",
"typing.ValuesView",
"typing.ContextManager",
"typing.AsyncContextManager",
"typing.Dict",
"typing.DefaultDict",
"typing.OrderedDict",
"typing.Counter",
"typing.ChainMap",
"typing.Generator",
"typing.AsyncGenerator",
"typing.Type",
"typing.Pattern",
"typing.Match",
# Special forms
"typing.Union",
"typing.Literal",
"typing.Optional",
)
SUBSCRIPTABLE_CLASSES_PEP585 = (
"tuple",
"list",
"dict",
"set",
"frozenset",
"type",
"collections.deque",
"collections.defaultdict",
"collections.OrderedDict",
"collections.Counter",
"collections.ChainMap",
"collections.abc.Awaitable",
"collections.abc.Coroutine",
"collections.abc.AsyncIterable",
"collections.abc.AsyncIterator",
"collections.abc.AsyncGenerator",
"collections.abc.Iterable",
"collections.abc.Iterator",
"collections.abc.Generator",
"collections.abc.Reversible",
"collections.abc.Container",
"collections.abc.Collection",
"collections.abc.Callable",
"collections.abc.Set",
"collections.abc.MutableSet",
"collections.abc.Mapping",
"collections.abc.MutableMapping",
"collections.abc.Sequence",
"collections.abc.MutableSequence",
"collections.abc.ByteString",
"collections.abc.MappingView",
"collections.abc.KeysView",
"collections.abc.ItemsView",
"collections.abc.ValuesView",
"contextlib.AbstractContextManager",
"contextlib.AbstractAsyncContextManager",
"re.Pattern",
"re.Match",
)
def is_typing_annotation(node: ast.AST, ctx: 'model.Documentable') -> bool:
"""
Whether this annotation node refers to a typing alias.
"""
return is_using_annotations(node, TYPING_ALIAS, ctx) or \
is_using_annotations(node, SUBSCRIPTABLE_CLASSES_PEP585, ctx)
_string_lineno_is_end = sys.version_info < (3,8) \
and platform.python_implementation() != 'PyPy'
"""True iff the 'lineno' attribute of an AST string node points to the last
line in the string, rather than the first line.
"""
def extract_docstring_linenum(node: ast.Str) -> int:
r"""
In older CPython versions, the AST only tells us the end line
number and we must approximate the start line number.
This approximation is correct if the docstring does not contain
explicit newlines ('\n') or joined lines ('\' at end of line).
Leading blank lines are stripped by cleandoc(), so we must
return the line number of the first non-blank line.
"""
doc = node.s
lineno = node.lineno
if _string_lineno_is_end:
# In older CPython versions, the AST only tells us the end line
# number and we must approximate the start line number.
# This approximation is correct if the docstring does not contain
# explicit newlines ('\n') or joined lines ('\' at end of line).
lineno -= doc.count('\n')
# Leading blank lines are stripped by cleandoc(), so we must
# return the line number of the first non-blank line.
for ch in doc:
if ch == '\n':
lineno += 1
elif not ch.isspace():
break
return lineno
def extract_docstring(node: ast.Str) -> Tuple[int, str]:
"""
Extract docstring information from an ast node that represents the docstring.
@returns:
- The line number of the first non-blank line of the docsring. See L{extract_docstring_linenum}.
- The docstring to be parsed, cleaned by L{inspect.cleandoc}.
"""
lineno = extract_docstring_linenum(node)
return lineno, inspect.cleandoc(node.s) | """
try:
expr = _AnnotationStringParser().visit(node)
except SyntaxError as ex: | random_line_split |
astutils.py | """
Various bits of reusable code related to L{ast.AST} node processing.
"""
import inspect
import platform
import sys
from numbers import Number
from typing import Iterator, Optional, List, Iterable, Sequence, TYPE_CHECKING, Tuple, Union
from inspect import BoundArguments, Signature
import ast
from pydoctor import visitor
if TYPE_CHECKING:
from pydoctor import model
# AST visitors
def iter_values(node: ast.AST) -> Iterator[ast.AST]:
for _, value in ast.iter_fields(node):
if isinstance(value, list):
for item in value:
if isinstance(item, ast.AST):
yield item
elif isinstance(value, ast.AST):
yield value
class NodeVisitor(visitor.PartialVisitor[ast.AST]):
"""
Generic AST node visitor. This class does not work like L{ast.NodeVisitor},
it only visits statements directly within a C{B{body}}. Also, visitor methods can't return anything.
:See: L{visitor} for more informations.
"""
def generic_visit(self, node: ast.AST) -> None:
"""
Helper method to visit a node by calling C{visit()} on each child of the node.
This is useful because this vistitor only visits statements inside C{.body} attribute.
So if one wants to visit L{ast.Expr} children with their visitor, they should include::
def visit_Expr(self, node:ast.Expr):
self.generic_visit(node)
"""
for v in iter_values(node):
self.visit(v)
@classmethod
def get_children(cls, node: ast.AST) -> Iterable[ast.AST]:
"""
Returns the nested nodes in the body of a node.
"""
body: Optional[Sequence[ast.AST]] = getattr(node, 'body', None)
if body is not None:
for child in body:
yield child
class NodeVisitorExt(visitor.VisitorExt[ast.AST]):
...
_AssingT = Union[ast.Assign, ast.AnnAssign]
def iterassign(node:_AssingT) -> Iterator[Optional[List[str]]]:
"""
Utility function to iterate assignments targets.
Useful for all the following AST assignments:
>>> var:int=2
>>> self.var = target = node.astext()
>>> lol = ['extensions']
NOT Useful for the following AST assignments:
>>> x, y = [1,2]
Example:
>>> from pydoctor.astutils import iterassign
>>> from ast import parse
>>> node = parse('self.var = target = thing[0] = node.astext()').body[0]
>>> list(iterassign(node))
"""
for target in node.targets if isinstance(node, ast.Assign) else [node.target]:
dottedname = node2dottedname(target)
yield dottedname
def node2dottedname(node: Optional[ast.AST]) -> Optional[List[str]]:
"""
Resove expression composed by L{ast.Attribute} and L{ast.Name} nodes to a list of names.
"""
parts = []
while isinstance(node, ast.Attribute):
parts.append(node.attr)
node = node.value
if isinstance(node, ast.Name):
parts.append(node.id)
else:
return None
parts.reverse()
return parts
def node2fullname(expr: Optional[ast.AST], ctx: 'model.Documentable') -> Optional[str]:
dottedname = node2dottedname(expr)
if dottedname is None:
return None
return ctx.expandName('.'.join(dottedname))
def bind_args(sig: Signature, call: ast.Call) -> BoundArguments:
"""
Binds the arguments of a function call to that function's signature.
@raise TypeError: If the arguments do not match the signature.
"""
kwargs = {
kw.arg: kw.value
for kw in call.keywords
# When keywords are passed using '**kwargs', the 'arg' field will
# be None. We don't currently support keywords passed that way.
if kw.arg is not None
}
return sig.bind(*call.args, **kwargs)
if sys.version_info[:2] >= (3, 8):
# Since Python 3.8 "foo" is parsed as ast.Constant.
def get_str_value(expr:ast.expr) -> Optional[str]:
if isinstance(expr, ast.Constant) and isinstance(expr.value, str):
return expr.value
return None
def get_num_value(expr:ast.expr) -> Optional[Number]:
if isinstance(expr, ast.Constant) and isinstance(expr.value, Number):
return expr.value
return None
def _is_str_constant(expr: ast.expr, s: str) -> bool:
return isinstance(expr, ast.Constant) and expr.value == s
else:
# Before Python 3.8 "foo" was parsed as ast.Str.
def get_str_value(expr:ast.expr) -> Optional[str]:
if isinstance(expr, ast.Str):
return expr.s
return None
def get_num_value(expr:ast.expr) -> Optional[Number]:
if isinstance(expr, ast.Num):
return expr.n
return None
def _is_str_constant(expr: ast.expr, s: str) -> bool:
return isinstance(expr, ast.Str) and expr.s == s
def get_int_value(expr: ast.expr) -> Optional[int]:
num = get_num_value(expr)
if isinstance(num, int):
return num # type:ignore[unreachable]
return None
def is__name__equals__main__(cmp: ast.Compare) -> bool:
"""
Returns whether or not the given L{ast.Compare} is equal to C{__name__ == '__main__'}.
"""
return isinstance(cmp.left, ast.Name) \
and cmp.left.id == '__name__' \
and len(cmp.ops) == 1 \
and isinstance(cmp.ops[0], ast.Eq) \
and len(cmp.comparators) == 1 \
and _is_str_constant(cmp.comparators[0], '__main__')
def is_using_typing_final(expr: Optional[ast.AST],
ctx:'model.Documentable') -> bool:
return is_using_annotations(expr, ("typing.Final", "typing_extensions.Final"), ctx)
def is_using_typing_classvar(expr: Optional[ast.AST],
ctx:'model.Documentable') -> bool:
return is_using_annotations(expr, ('typing.ClassVar', "typing_extensions.ClassVar"), ctx)
def is_using_annotations(expr: Optional[ast.AST],
annotations:Sequence[str],
ctx:'model.Documentable') -> bool:
"""
Detect if this expr is firstly composed by one of the specified annotation(s)' full name.
"""
full_name = node2fullname(expr, ctx)
if full_name in annotations:
return True
if isinstance(expr, ast.Subscript):
# Final[...] or typing.Final[...] expressions
if isinstance(expr.value, (ast.Name, ast.Attribute)):
value = expr.value
full_name = node2fullname(value, ctx)
if full_name in annotations:
return True
return False
def is_none_literal(node: ast.expr) -> bool:
"""Does this AST node represent the literal constant None?"""
return isinstance(node, (ast.Constant, ast.NameConstant)) and node.value is None
def unstring_annotation(node: ast.expr, ctx:'model.Documentable', section:str='annotation') -> ast.expr:
"""Replace all strings in the given expression by parsed versions.
@return: The unstringed node. If parsing fails, an error is logged
and the original node is returned.
"""
try:
expr = _AnnotationStringParser().visit(node)
except SyntaxError as ex:
module = ctx.module
assert module is not None
module.report(f'syntax error in {section}: {ex}', lineno_offset=node.lineno, section=section)
return node
else:
assert isinstance(expr, ast.expr), expr
return expr
class _AnnotationStringParser(ast.NodeTransformer):
"""Implementation of L{unstring_annotation()}.
When given an expression, the node returned by L{ast.NodeVisitor.visit()}
will also be an expression.
If any string literal contained in the original expression is either
invalid Python or not a singular expression, L{SyntaxError} is raised.
"""
def _parse_string(self, value: str) -> ast.expr:
statements = ast.parse(value).body
if len(statements) != 1:
raise SyntaxError("expected expression, found multiple statements")
stmt, = statements
if isinstance(stmt, ast.Expr):
# Expression wrapped in an Expr statement.
expr = self.visit(stmt.value)
assert isinstance(expr, ast.expr), expr
return expr
else:
raise SyntaxError("expected expression, found statement")
def visit_Subscript(self, node: ast.Subscript) -> ast.Subscript:
value = self.visit(node.value)
if isinstance(value, ast.Name) and value.id == 'Literal':
# Literal[...] expression; don't unstring the arguments.
slice = node.slice
elif isinstance(value, ast.Attribute) and value.attr == 'Literal':
# typing.Literal[...] expression; don't unstring the arguments.
slice = node.slice
else:
# Other subscript; unstring the slice.
slice = self.visit(node.slice)
return ast.copy_location(ast.Subscript(value, slice, node.ctx), node)
# For Python >= 3.8:
def visit_Constant(self, node: ast.Constant) -> ast.expr:
|
# For Python < 3.8:
def visit_Str(self, node: ast.Str) -> ast.expr:
return ast.copy_location(self._parse_string(node.s), node)
TYPING_ALIAS = (
"typing.Hashable",
"typing.Awaitable",
"typing.Coroutine",
"typing.AsyncIterable",
"typing.AsyncIterator",
"typing.Iterable",
"typing.Iterator",
"typing.Reversible",
"typing.Sized",
"typing.Container",
"typing.Collection",
"typing.Callable",
"typing.AbstractSet",
"typing.MutableSet",
"typing.Mapping",
"typing.MutableMapping",
"typing.Sequence",
"typing.MutableSequence",
"typing.ByteString",
"typing.Tuple",
"typing.List",
"typing.Deque",
"typing.Set",
"typing.FrozenSet",
"typing.MappingView",
"typing.KeysView",
"typing.ItemsView",
"typing.ValuesView",
"typing.ContextManager",
"typing.AsyncContextManager",
"typing.Dict",
"typing.DefaultDict",
"typing.OrderedDict",
"typing.Counter",
"typing.ChainMap",
"typing.Generator",
"typing.AsyncGenerator",
"typing.Type",
"typing.Pattern",
"typing.Match",
# Special forms
"typing.Union",
"typing.Literal",
"typing.Optional",
)
SUBSCRIPTABLE_CLASSES_PEP585 = (
"tuple",
"list",
"dict",
"set",
"frozenset",
"type",
"collections.deque",
"collections.defaultdict",
"collections.OrderedDict",
"collections.Counter",
"collections.ChainMap",
"collections.abc.Awaitable",
"collections.abc.Coroutine",
"collections.abc.AsyncIterable",
"collections.abc.AsyncIterator",
"collections.abc.AsyncGenerator",
"collections.abc.Iterable",
"collections.abc.Iterator",
"collections.abc.Generator",
"collections.abc.Reversible",
"collections.abc.Container",
"collections.abc.Collection",
"collections.abc.Callable",
"collections.abc.Set",
"collections.abc.MutableSet",
"collections.abc.Mapping",
"collections.abc.MutableMapping",
"collections.abc.Sequence",
"collections.abc.MutableSequence",
"collections.abc.ByteString",
"collections.abc.MappingView",
"collections.abc.KeysView",
"collections.abc.ItemsView",
"collections.abc.ValuesView",
"contextlib.AbstractContextManager",
"contextlib.AbstractAsyncContextManager",
"re.Pattern",
"re.Match",
)
def is_typing_annotation(node: ast.AST, ctx: 'model.Documentable') -> bool:
"""
Whether this annotation node refers to a typing alias.
"""
return is_using_annotations(node, TYPING_ALIAS, ctx) or \
is_using_annotations(node, SUBSCRIPTABLE_CLASSES_PEP585, ctx)
_string_lineno_is_end = sys.version_info < (3,8) \
and platform.python_implementation() != 'PyPy'
"""True iff the 'lineno' attribute of an AST string node points to the last
line in the string, rather than the first line.
"""
def extract_docstring_linenum(node: ast.Str) -> int:
r"""
In older CPython versions, the AST only tells us the end line
number and we must approximate the start line number.
This approximation is correct if the docstring does not contain
explicit newlines ('\n') or joined lines ('\' at end of line).
Leading blank lines are stripped by cleandoc(), so we must
return the line number of the first non-blank line.
"""
doc = node.s
lineno = node.lineno
if _string_lineno_is_end:
# In older CPython versions, the AST only tells us the end line
# number and we must approximate the start line number.
# This approximation is correct if the docstring does not contain
# explicit newlines ('\n') or joined lines ('\' at end of line).
lineno -= doc.count('\n')
# Leading blank lines are stripped by cleandoc(), so we must
# return the line number of the first non-blank line.
for ch in doc:
if ch == '\n':
lineno += 1
elif not ch.isspace():
break
return lineno
def extract_docstring(node: ast.Str) -> Tuple[int, str]:
"""
Extract docstring information from an ast node that represents the docstring.
@returns:
- The line number of the first non-blank line of the docsring. See L{extract_docstring_linenum}.
- The docstring to be parsed, cleaned by L{inspect.cleandoc}.
"""
lineno = extract_docstring_linenum(node)
return lineno, inspect.cleandoc(node.s) | value = node.value
if isinstance(value, str):
return ast.copy_location(self._parse_string(value), node)
else:
const = self.generic_visit(node)
assert isinstance(const, ast.Constant), const
return const | identifier_body |
lid_shutdown.rs | // Copyright 2021 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
use crate::error::PowerManagerError;
use crate::message::{Message, MessageReturn};
use crate::node::Node;
use crate::shutdown_request::ShutdownRequest;
use crate::utils::connect_to_driver;
use anyhow::{format_err, Error};
use async_trait::async_trait;
use fidl_fuchsia_hardware_input::{DeviceMarker as LidMarker, DeviceProxy as LidProxy};
use fuchsia_inspect::{self as inspect, NumericProperty, Property};
use fuchsia_inspect_contrib::{inspect_log, nodes::BoundedListNode};
use fuchsia_vfs_watcher as vfs;
use fuchsia_zircon::{self as zx, AsHandleRef};
use futures::{
future::{FutureExt, LocalBoxFuture},
stream::FuturesUnordered,
TryStreamExt,
};
use io_util::{open_directory_in_namespace, OPEN_RIGHT_READABLE};
use log::*;
use serde_derive::Deserialize;
use serde_json as json;
use std::{
cell::RefCell,
collections::HashMap,
path::{Path, PathBuf},
rc::Rc,
};
/// Node: LidShutdown
///
/// Summary: Responds to lid closed events from devices with a lid sensor by waiting for a report
/// using the input FIDL protocol.
///
/// Handles Messages: N/A
///
/// Sends Messages:
/// - SystemShutdown
///
/// FIDL dependencies:
/// - fuchsia.hardware.input: the node uses this protocol to wait on reports from the
/// lid device
/// The lid sensor is not a real HID device however this descriptor is defined in the lid driver
/// and will be used to send lid HID reports for any ACPI lid sensor.
const HID_LID_DESCRIPTOR: [u8; 9] = [
0x05, 0x01, // Usage Page (Generic Desktop)
0x09, 0x80, // Usage (System Control)
0xA1, 0x01, // Collection (Application)
0x0A, 0xFF, 0x01, // Usage (0x01FF, unique to lid reports)
];
// Lid closed HID report
const LID_CLOSED: u8 = 0x0;
static INPUT_DEVICES_DIRECTORY: &str = "/dev/class/input";
pub struct LidShutdownBuilder<'a> {
proxy: Option<LidProxy>,
lid_report_event: Option<zx::Event>,
system_shutdown_node: Rc<dyn Node>,
inspect_root: Option<&'a inspect::Node>,
}
impl<'a> LidShutdownBuilder<'a> {
pub fn new(system_shutdown_node: Rc<dyn Node>) -> Self {
LidShutdownBuilder {
proxy: None,
lid_report_event: None,
system_shutdown_node,
inspect_root: None,
}
}
#[cfg(test)]
pub fn new_with_event_and_proxy(
proxy: LidProxy,
lid_report_event: zx::Event,
system_shutdown_node: Rc<dyn Node>,
) -> Self {
Self {
proxy: Some(proxy),
lid_report_event: Some(lid_report_event),
system_shutdown_node,
inspect_root: None,
}
}
pub fn new_from_json(json_data: json::Value, nodes: &HashMap<String, Rc<dyn Node>>) -> Self {
#[derive(Deserialize)]
struct Dependencies {
system_shutdown_node: String,
}
#[derive(Deserialize)]
struct JsonData {
dependencies: Dependencies,
}
let data: JsonData = json::from_value(json_data).unwrap();
Self::new(nodes[&data.dependencies.system_shutdown_node].clone())
}
#[cfg(test)]
pub fn with_inspect_root(mut self, root: &'a inspect::Node) -> Self {
self.inspect_root = Some(root);
self
}
pub async fn build<'b>(
self,
futures_out: &FuturesUnordered<LocalBoxFuture<'b, ()>>,
) -> Result<Rc<LidShutdown>, Error> {
// In tests use the default proxy.
let proxy = match self.proxy {
Some(proxy) => proxy,
None => Self::find_lid_sensor().await?,
};
// In tests use the default event.
let report_event = match self.lid_report_event {
Some(report_event) => report_event,
None => match proxy.get_reports_event().await {
Ok((_, report_event)) => report_event,
Err(_e) => return Err(format_err!("Could not get report event.")),
},
};
// In tests use the default inspect root node
let inspect_root = self.inspect_root.unwrap_or(inspect::component::inspector().root());
let node = Rc::new(LidShutdown {
proxy,
report_event,
system_shutdown_node: self.system_shutdown_node,
inspect: InspectData::new(inspect_root, "LidShutdown".to_string()),
});
futures_out.push(node.clone().watch_lid());
Ok(node)
}
/// Checks all the input devices until the lid sensor is found.
async fn find_lid_sensor() -> Result<LidProxy, Error> {
let dir_proxy = open_directory_in_namespace(INPUT_DEVICES_DIRECTORY, OPEN_RIGHT_READABLE)?;
let mut watcher = vfs::Watcher::new(dir_proxy).await?;
while let Some(msg) = watcher.try_next().await? {
match msg.event {
vfs::WatchEvent::EXISTING | vfs::WatchEvent::ADD_FILE => {
match Self::open_sensor(&msg.filename).await {
Ok(device) => return Ok(device),
_ => (),
}
}
_ => (),
}
}
Err(format_err!("No lid device found"))
}
/// Opens the sensor's device file. Returns the device if the correct HID
/// report descriptor is found.
async fn open_sensor(filename: &PathBuf) -> Result<LidProxy, Error> {
let path = Path::new(INPUT_DEVICES_DIRECTORY).join(filename);
let device = connect_to_driver::<LidMarker>(&String::from(
path.to_str().ok_or(format_err!("Could not read path {:?}", path))?,
))
.await?;
if let Ok(device_descriptor) = device.get_report_desc().await {
if device_descriptor.len() < HID_LID_DESCRIPTOR.len() {
return Err(format_err!("Short HID header"));
}
let device_header = &device_descriptor[0..HID_LID_DESCRIPTOR.len()];
if device_header == HID_LID_DESCRIPTOR {
return Ok(device);
} else {
return Err(format_err!("Device is not lid sensor"));
}
}
Err(format_err!("Could not get device HID report descriptor"))
}
}
pub struct LidShutdown {
proxy: LidProxy,
/// Event that will signal |USER_0| when a report is in the lid device's report FIFO.
report_event: zx::Event,
/// Node to provide the system shutdown functionality via the SystemShutdown message.
system_shutdown_node: Rc<dyn Node>,
/// A struct for managing Component Inspection data
inspect: InspectData,
}
impl LidShutdown {
fn watch_lid<'a>(self: Rc<Self>) -> LocalBoxFuture<'a, ()> {
async move {
loop {
self.watch_lid_inner().await;
}
}
.boxed_local()
}
/// Watches the lid device for reports.
async fn watch_lid_inner(&self) {
match self.report_event.wait_handle(zx::Signals::USER_0, zx::Time::INFINITE) {
Err(e) => error!("Could not wait for lid event: {:?}", e),
_ => match self.check_report().await {
Ok(()) => (),
Err(e) => {
self.inspect.read_errors.add(1);
self.inspect.last_read_error.set(format!("{}", e).as_str());
error!("Failed to read lid report: {}", e)
}
},
};
}
/// Reads the report from the lid sensor and sends shutdown signal if lid is closed.
async fn check_report(&self) -> Result<(), Error> {
let (status, report, _time) = self.proxy.read_report().await?;
let status = zx::Status::from_raw(status);
if status != zx::Status::OK {
return Err(format_err!("Error reading report {}", status));
}
if report.len() != 1 {
return Err(format_err!("Expected single byte report, found {:?}", report));
}
self.inspect.log_lid_report(format!("{:?}", report));
let report = report[0];
if report == LID_CLOSED {
info!("Lid closed. Shutting down...");
self.send_message(
&self.system_shutdown_node,
&Message::SystemShutdown(ShutdownRequest::PowerOff),
)
.await
.map_err(|e| format_err!("Failed to shut down the system: {:?}", e))?;
}
Ok(())
}
}
#[async_trait(?Send)]
impl Node for LidShutdown {
fn | (&self) -> String {
"LidShutdown".to_string()
}
async fn handle_message(&self, _msg: &Message) -> Result<MessageReturn, PowerManagerError> {
Err(PowerManagerError::Unsupported)
}
}
struct InspectData {
lid_reports: RefCell<BoundedListNode>,
read_errors: inspect::UintProperty,
last_read_error: inspect::StringProperty,
}
impl InspectData {
/// Number of inspect samples to store in the `lid_reports` BoundedListNode.
// Store the last 60 lid reports
const NUM_INSPECT_LID_REPORTS: usize = 60;
fn new(parent: &inspect::Node, name: String) -> Self {
// Create a local root node and properties
let root = parent.create_child(name);
let lid_reports = RefCell::new(BoundedListNode::new(
root.create_child("lid_reports"),
Self::NUM_INSPECT_LID_REPORTS,
));
let read_errors = root.create_uint("read_lid_report_error_count", 0);
let last_read_error = root.create_string("last_read_error", "");
// Pass ownership of the new node to the parent node, otherwise it'll be dropped
parent.record(root);
InspectData { lid_reports, read_errors, last_read_error }
}
fn log_lid_report(&self, lid_report: String) {
inspect_log!(self.lid_reports.borrow_mut(), lid_report: lid_report);
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::test::mock_node::{create_dummy_node, MessageMatcher, MockNodeMaker};
use crate::{msg_eq, msg_ok_return};
use fidl_fuchsia_hardware_input as finput;
use fuchsia_async as fasync;
use fuchsia_inspect::testing::TreeAssertion;
use fuchsia_zircon::HandleBased;
use inspect::assert_data_tree;
const LID_OPEN: u8 = 0x1;
/// Spawns a new task that acts as a fake device driver for testing purposes. The driver only
/// handles requests for ReadReport - trying to send any other requests to it is a bug.
/// Each ReadReport responds with the |lid_report| specified.
fn setup_fake_driver(lid_report: u8) -> LidProxy {
let (proxy, mut stream) = fidl::endpoints::create_proxy_and_stream::<LidMarker>().unwrap();
fasync::Task::local(async move {
while let Ok(req) = stream.try_next().await {
match req {
Some(finput::DeviceRequest::ReadReport { responder }) => {
let _ = responder.send(zx::Status::OK.into_raw(), &[lid_report], 0 as i64);
}
_ => assert!(false),
}
}
})
.detach();
proxy
}
/// Tests that well-formed configuration JSON does not panic the `new_from_json` function.
#[fasync::run_singlethreaded(test)]
async fn test_new_from_json() {
let json_data = json::json!({
"type": "LidShutdown",
"name": "lid_shutdown",
"dependencies": {
"system_shutdown_node": "shutdown",
},
});
let mut nodes: HashMap<String, Rc<dyn Node>> = HashMap::new();
nodes.insert("shutdown".to_string(), create_dummy_node());
let _ = LidShutdownBuilder::new_from_json(json_data, &nodes);
}
/// Tests that when the node receives a signal on its |report_event|, it checks for a lid
/// report and, on reception of a lid closed report, it triggers a system shutdown.
#[fasync::run_singlethreaded(test)]
async fn test_triggered_shutdown() {
let mut mock_maker = MockNodeMaker::new();
let shutdown_node = mock_maker.make(
"Shutdown",
vec![(
msg_eq!(SystemShutdown(ShutdownRequest::PowerOff)),
msg_ok_return!(SystemShutdown),
)],
);
let event = zx::Event::create().unwrap();
let node = LidShutdownBuilder::new_with_event_and_proxy(
setup_fake_driver(LID_CLOSED),
event.duplicate_handle(zx::Rights::BASIC).unwrap(),
shutdown_node,
)
.build(&FuturesUnordered::new())
.await
.unwrap();
event
.signal_handle(zx::Signals::NONE, zx::Signals::USER_0)
.expect("Failed to signal event");
node.watch_lid_inner().await;
// When mock_maker goes out of scope, it verifies the ShutdownNode received the shutdown
// request.
}
/// Tests that when the node receives a signal on its |report_event|, it checks for a lid
/// report and, on reception of a lid open report, it does NOT trigger a system shutdown.
#[fasync::run_singlethreaded(test)]
async fn test_event_handling() {
let mut mock_maker = MockNodeMaker::new();
let shutdown_node = mock_maker.make(
"Shutdown",
vec![], // the shutdown node is not expected to receive any messages
);
let event = zx::Event::create().unwrap();
let node = LidShutdownBuilder::new_with_event_and_proxy(
setup_fake_driver(LID_OPEN),
event.duplicate_handle(zx::Rights::BASIC).unwrap(),
shutdown_node,
)
.build(&FuturesUnordered::new())
.await
.unwrap();
event
.signal_handle(zx::Signals::NONE, zx::Signals::USER_0)
.expect("Failed to signal event");
node.watch_lid_inner().await;
// When mock_maker will verify that the ShutdownNode receives no messages until it goes
// out of scope.
}
/// Tests that an unsupported message is handled gracefully and an error is returned.
#[fasync::run_singlethreaded(test)]
async fn test_unsupported_msg() {
let mut mock_maker = MockNodeMaker::new();
let shutdown_node = mock_maker.make("Shutdown", vec![]);
let node_futures = FuturesUnordered::new();
let node = LidShutdownBuilder::new_with_event_and_proxy(
setup_fake_driver(LID_CLOSED),
zx::Event::create().unwrap(),
shutdown_node,
)
.build(&node_futures)
.await
.unwrap();
match node.handle_message(&Message::GetCpuLoads).await {
Err(PowerManagerError::Unsupported) => {}
e => panic!("Unexpected return value: {:?}", e),
}
}
/// Tests for the presence and correctness of dynamically-added inspect data
#[fasync::run_singlethreaded(test)]
async fn test_inspect_data() {
let lid_state = LID_OPEN;
let inspector = inspect::Inspector::new();
let mut mock_maker = MockNodeMaker::new();
let shutdown_node = mock_maker.make("Shutdown", vec![]);
let event = zx::Event::create().unwrap();
let node_futures = FuturesUnordered::new();
let node = LidShutdownBuilder::new_with_event_and_proxy(
setup_fake_driver(lid_state),
event.duplicate_handle(zx::Rights::BASIC).unwrap(),
shutdown_node,
)
.with_inspect_root(inspector.root())
.build(&node_futures)
.await
.unwrap();
// The node will read the current temperature and log the sample into Inspect. Read enough
// samples to test that the correct number of samples are logged and older ones are dropped.
for _ in 0..InspectData::NUM_INSPECT_LID_REPORTS + 10 {
event
.signal_handle(zx::Signals::NONE, zx::Signals::USER_0)
.expect("Failed to signal event");
node.watch_lid_inner().await;
}
let mut root = TreeAssertion::new("LidShutdown", false);
let mut lid_reports = TreeAssertion::new("lid_reports", true);
// Since we read 10 more samples than our limit allows, the first 10 should be dropped. So
// test that the sample numbering starts at 10 and continues for the expected number of
// samples.
for i in 10..InspectData::NUM_INSPECT_LID_REPORTS + 10 {
let mut sample_child = TreeAssertion::new(&i.to_string(), true);
sample_child
.add_property_assertion("lid_report", Box::new(format!("{:?}", [lid_state])));
sample_child.add_property_assertion("@time", Box::new(inspect::testing::AnyProperty));
lid_reports.add_child_assertion(sample_child);
}
root.add_child_assertion(lid_reports);
assert_data_tree!(inspector, root: { root, });
}
}
| name | identifier_name |
lid_shutdown.rs | // Copyright 2021 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
use crate::error::PowerManagerError;
use crate::message::{Message, MessageReturn};
use crate::node::Node;
use crate::shutdown_request::ShutdownRequest;
use crate::utils::connect_to_driver;
use anyhow::{format_err, Error};
use async_trait::async_trait;
use fidl_fuchsia_hardware_input::{DeviceMarker as LidMarker, DeviceProxy as LidProxy};
use fuchsia_inspect::{self as inspect, NumericProperty, Property};
use fuchsia_inspect_contrib::{inspect_log, nodes::BoundedListNode};
use fuchsia_vfs_watcher as vfs;
use fuchsia_zircon::{self as zx, AsHandleRef};
use futures::{
future::{FutureExt, LocalBoxFuture},
stream::FuturesUnordered,
TryStreamExt,
};
use io_util::{open_directory_in_namespace, OPEN_RIGHT_READABLE};
use log::*;
use serde_derive::Deserialize;
use serde_json as json;
use std::{
cell::RefCell,
collections::HashMap,
path::{Path, PathBuf},
rc::Rc,
};
/// Node: LidShutdown
///
/// Summary: Responds to lid closed events from devices with a lid sensor by waiting for a report
/// using the input FIDL protocol.
///
/// Handles Messages: N/A
///
/// Sends Messages:
/// - SystemShutdown
///
/// FIDL dependencies:
/// - fuchsia.hardware.input: the node uses this protocol to wait on reports from the
/// lid device
/// The lid sensor is not a real HID device however this descriptor is defined in the lid driver
/// and will be used to send lid HID reports for any ACPI lid sensor.
const HID_LID_DESCRIPTOR: [u8; 9] = [
0x05, 0x01, // Usage Page (Generic Desktop)
0x09, 0x80, // Usage (System Control)
0xA1, 0x01, // Collection (Application)
0x0A, 0xFF, 0x01, // Usage (0x01FF, unique to lid reports)
];
// Lid closed HID report
const LID_CLOSED: u8 = 0x0;
static INPUT_DEVICES_DIRECTORY: &str = "/dev/class/input";
pub struct LidShutdownBuilder<'a> {
proxy: Option<LidProxy>,
lid_report_event: Option<zx::Event>,
system_shutdown_node: Rc<dyn Node>,
inspect_root: Option<&'a inspect::Node>,
}
impl<'a> LidShutdownBuilder<'a> {
pub fn new(system_shutdown_node: Rc<dyn Node>) -> Self {
LidShutdownBuilder {
proxy: None,
lid_report_event: None,
system_shutdown_node,
inspect_root: None,
}
}
#[cfg(test)]
pub fn new_with_event_and_proxy(
proxy: LidProxy,
lid_report_event: zx::Event,
system_shutdown_node: Rc<dyn Node>,
) -> Self {
Self {
proxy: Some(proxy),
lid_report_event: Some(lid_report_event),
system_shutdown_node,
inspect_root: None,
}
}
pub fn new_from_json(json_data: json::Value, nodes: &HashMap<String, Rc<dyn Node>>) -> Self {
#[derive(Deserialize)]
struct Dependencies {
system_shutdown_node: String,
}
#[derive(Deserialize)]
struct JsonData {
dependencies: Dependencies,
}
let data: JsonData = json::from_value(json_data).unwrap();
Self::new(nodes[&data.dependencies.system_shutdown_node].clone())
}
#[cfg(test)]
pub fn with_inspect_root(mut self, root: &'a inspect::Node) -> Self {
self.inspect_root = Some(root);
self
}
pub async fn build<'b>(
self,
futures_out: &FuturesUnordered<LocalBoxFuture<'b, ()>>,
) -> Result<Rc<LidShutdown>, Error> {
// In tests use the default proxy.
let proxy = match self.proxy {
Some(proxy) => proxy,
None => Self::find_lid_sensor().await?,
};
// In tests use the default event.
let report_event = match self.lid_report_event {
Some(report_event) => report_event,
None => match proxy.get_reports_event().await {
Ok((_, report_event)) => report_event,
Err(_e) => return Err(format_err!("Could not get report event.")),
},
};
// In tests use the default inspect root node
let inspect_root = self.inspect_root.unwrap_or(inspect::component::inspector().root());
let node = Rc::new(LidShutdown {
proxy,
report_event,
system_shutdown_node: self.system_shutdown_node,
inspect: InspectData::new(inspect_root, "LidShutdown".to_string()),
});
futures_out.push(node.clone().watch_lid());
Ok(node)
}
/// Checks all the input devices until the lid sensor is found.
async fn find_lid_sensor() -> Result<LidProxy, Error> {
let dir_proxy = open_directory_in_namespace(INPUT_DEVICES_DIRECTORY, OPEN_RIGHT_READABLE)?;
let mut watcher = vfs::Watcher::new(dir_proxy).await?;
while let Some(msg) = watcher.try_next().await? {
match msg.event {
vfs::WatchEvent::EXISTING | vfs::WatchEvent::ADD_FILE => {
match Self::open_sensor(&msg.filename).await {
Ok(device) => return Ok(device),
_ => (),
}
}
_ => (),
}
}
Err(format_err!("No lid device found"))
}
/// Opens the sensor's device file. Returns the device if the correct HID
/// report descriptor is found.
async fn open_sensor(filename: &PathBuf) -> Result<LidProxy, Error> {
let path = Path::new(INPUT_DEVICES_DIRECTORY).join(filename);
let device = connect_to_driver::<LidMarker>(&String::from(
path.to_str().ok_or(format_err!("Could not read path {:?}", path))?,
))
.await?;
if let Ok(device_descriptor) = device.get_report_desc().await {
if device_descriptor.len() < HID_LID_DESCRIPTOR.len() {
return Err(format_err!("Short HID header"));
}
let device_header = &device_descriptor[0..HID_LID_DESCRIPTOR.len()];
if device_header == HID_LID_DESCRIPTOR {
return Ok(device);
} else {
return Err(format_err!("Device is not lid sensor"));
}
}
Err(format_err!("Could not get device HID report descriptor"))
}
}
pub struct LidShutdown {
proxy: LidProxy,
/// Event that will signal |USER_0| when a report is in the lid device's report FIFO.
report_event: zx::Event,
/// Node to provide the system shutdown functionality via the SystemShutdown message.
system_shutdown_node: Rc<dyn Node>,
/// A struct for managing Component Inspection data
inspect: InspectData,
}
impl LidShutdown {
fn watch_lid<'a>(self: Rc<Self>) -> LocalBoxFuture<'a, ()> {
async move {
loop {
self.watch_lid_inner().await;
}
}
.boxed_local()
}
/// Watches the lid device for reports.
async fn watch_lid_inner(&self) {
match self.report_event.wait_handle(zx::Signals::USER_0, zx::Time::INFINITE) {
Err(e) => error!("Could not wait for lid event: {:?}", e),
_ => match self.check_report().await {
Ok(()) => (),
Err(e) => {
self.inspect.read_errors.add(1);
self.inspect.last_read_error.set(format!("{}", e).as_str());
error!("Failed to read lid report: {}", e)
} | /// Reads the report from the lid sensor and sends shutdown signal if lid is closed.
async fn check_report(&self) -> Result<(), Error> {
let (status, report, _time) = self.proxy.read_report().await?;
let status = zx::Status::from_raw(status);
if status != zx::Status::OK {
return Err(format_err!("Error reading report {}", status));
}
if report.len() != 1 {
return Err(format_err!("Expected single byte report, found {:?}", report));
}
self.inspect.log_lid_report(format!("{:?}", report));
let report = report[0];
if report == LID_CLOSED {
info!("Lid closed. Shutting down...");
self.send_message(
&self.system_shutdown_node,
&Message::SystemShutdown(ShutdownRequest::PowerOff),
)
.await
.map_err(|e| format_err!("Failed to shut down the system: {:?}", e))?;
}
Ok(())
}
}
#[async_trait(?Send)]
impl Node for LidShutdown {
fn name(&self) -> String {
"LidShutdown".to_string()
}
async fn handle_message(&self, _msg: &Message) -> Result<MessageReturn, PowerManagerError> {
Err(PowerManagerError::Unsupported)
}
}
struct InspectData {
lid_reports: RefCell<BoundedListNode>,
read_errors: inspect::UintProperty,
last_read_error: inspect::StringProperty,
}
impl InspectData {
/// Number of inspect samples to store in the `lid_reports` BoundedListNode.
// Store the last 60 lid reports
const NUM_INSPECT_LID_REPORTS: usize = 60;
fn new(parent: &inspect::Node, name: String) -> Self {
// Create a local root node and properties
let root = parent.create_child(name);
let lid_reports = RefCell::new(BoundedListNode::new(
root.create_child("lid_reports"),
Self::NUM_INSPECT_LID_REPORTS,
));
let read_errors = root.create_uint("read_lid_report_error_count", 0);
let last_read_error = root.create_string("last_read_error", "");
// Pass ownership of the new node to the parent node, otherwise it'll be dropped
parent.record(root);
InspectData { lid_reports, read_errors, last_read_error }
}
fn log_lid_report(&self, lid_report: String) {
inspect_log!(self.lid_reports.borrow_mut(), lid_report: lid_report);
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::test::mock_node::{create_dummy_node, MessageMatcher, MockNodeMaker};
use crate::{msg_eq, msg_ok_return};
use fidl_fuchsia_hardware_input as finput;
use fuchsia_async as fasync;
use fuchsia_inspect::testing::TreeAssertion;
use fuchsia_zircon::HandleBased;
use inspect::assert_data_tree;
const LID_OPEN: u8 = 0x1;
/// Spawns a new task that acts as a fake device driver for testing purposes. The driver only
/// handles requests for ReadReport - trying to send any other requests to it is a bug.
/// Each ReadReport responds with the |lid_report| specified.
fn setup_fake_driver(lid_report: u8) -> LidProxy {
let (proxy, mut stream) = fidl::endpoints::create_proxy_and_stream::<LidMarker>().unwrap();
fasync::Task::local(async move {
while let Ok(req) = stream.try_next().await {
match req {
Some(finput::DeviceRequest::ReadReport { responder }) => {
let _ = responder.send(zx::Status::OK.into_raw(), &[lid_report], 0 as i64);
}
_ => assert!(false),
}
}
})
.detach();
proxy
}
/// Tests that well-formed configuration JSON does not panic the `new_from_json` function.
#[fasync::run_singlethreaded(test)]
async fn test_new_from_json() {
let json_data = json::json!({
"type": "LidShutdown",
"name": "lid_shutdown",
"dependencies": {
"system_shutdown_node": "shutdown",
},
});
let mut nodes: HashMap<String, Rc<dyn Node>> = HashMap::new();
nodes.insert("shutdown".to_string(), create_dummy_node());
let _ = LidShutdownBuilder::new_from_json(json_data, &nodes);
}
/// Tests that when the node receives a signal on its |report_event|, it checks for a lid
/// report and, on reception of a lid closed report, it triggers a system shutdown.
#[fasync::run_singlethreaded(test)]
async fn test_triggered_shutdown() {
let mut mock_maker = MockNodeMaker::new();
let shutdown_node = mock_maker.make(
"Shutdown",
vec![(
msg_eq!(SystemShutdown(ShutdownRequest::PowerOff)),
msg_ok_return!(SystemShutdown),
)],
);
let event = zx::Event::create().unwrap();
let node = LidShutdownBuilder::new_with_event_and_proxy(
setup_fake_driver(LID_CLOSED),
event.duplicate_handle(zx::Rights::BASIC).unwrap(),
shutdown_node,
)
.build(&FuturesUnordered::new())
.await
.unwrap();
event
.signal_handle(zx::Signals::NONE, zx::Signals::USER_0)
.expect("Failed to signal event");
node.watch_lid_inner().await;
// When mock_maker goes out of scope, it verifies the ShutdownNode received the shutdown
// request.
}
/// Tests that when the node receives a signal on its |report_event|, it checks for a lid
/// report and, on reception of a lid open report, it does NOT trigger a system shutdown.
#[fasync::run_singlethreaded(test)]
async fn test_event_handling() {
let mut mock_maker = MockNodeMaker::new();
let shutdown_node = mock_maker.make(
"Shutdown",
vec![], // the shutdown node is not expected to receive any messages
);
let event = zx::Event::create().unwrap();
let node = LidShutdownBuilder::new_with_event_and_proxy(
setup_fake_driver(LID_OPEN),
event.duplicate_handle(zx::Rights::BASIC).unwrap(),
shutdown_node,
)
.build(&FuturesUnordered::new())
.await
.unwrap();
event
.signal_handle(zx::Signals::NONE, zx::Signals::USER_0)
.expect("Failed to signal event");
node.watch_lid_inner().await;
// When mock_maker will verify that the ShutdownNode receives no messages until it goes
// out of scope.
}
/// Tests that an unsupported message is handled gracefully and an error is returned.
#[fasync::run_singlethreaded(test)]
async fn test_unsupported_msg() {
let mut mock_maker = MockNodeMaker::new();
let shutdown_node = mock_maker.make("Shutdown", vec![]);
let node_futures = FuturesUnordered::new();
let node = LidShutdownBuilder::new_with_event_and_proxy(
setup_fake_driver(LID_CLOSED),
zx::Event::create().unwrap(),
shutdown_node,
)
.build(&node_futures)
.await
.unwrap();
match node.handle_message(&Message::GetCpuLoads).await {
Err(PowerManagerError::Unsupported) => {}
e => panic!("Unexpected return value: {:?}", e),
}
}
/// Tests for the presence and correctness of dynamically-added inspect data
#[fasync::run_singlethreaded(test)]
async fn test_inspect_data() {
let lid_state = LID_OPEN;
let inspector = inspect::Inspector::new();
let mut mock_maker = MockNodeMaker::new();
let shutdown_node = mock_maker.make("Shutdown", vec![]);
let event = zx::Event::create().unwrap();
let node_futures = FuturesUnordered::new();
let node = LidShutdownBuilder::new_with_event_and_proxy(
setup_fake_driver(lid_state),
event.duplicate_handle(zx::Rights::BASIC).unwrap(),
shutdown_node,
)
.with_inspect_root(inspector.root())
.build(&node_futures)
.await
.unwrap();
// The node will read the current temperature and log the sample into Inspect. Read enough
// samples to test that the correct number of samples are logged and older ones are dropped.
for _ in 0..InspectData::NUM_INSPECT_LID_REPORTS + 10 {
event
.signal_handle(zx::Signals::NONE, zx::Signals::USER_0)
.expect("Failed to signal event");
node.watch_lid_inner().await;
}
let mut root = TreeAssertion::new("LidShutdown", false);
let mut lid_reports = TreeAssertion::new("lid_reports", true);
// Since we read 10 more samples than our limit allows, the first 10 should be dropped. So
// test that the sample numbering starts at 10 and continues for the expected number of
// samples.
for i in 10..InspectData::NUM_INSPECT_LID_REPORTS + 10 {
let mut sample_child = TreeAssertion::new(&i.to_string(), true);
sample_child
.add_property_assertion("lid_report", Box::new(format!("{:?}", [lid_state])));
sample_child.add_property_assertion("@time", Box::new(inspect::testing::AnyProperty));
lid_reports.add_child_assertion(sample_child);
}
root.add_child_assertion(lid_reports);
assert_data_tree!(inspector, root: { root, });
}
} | },
};
}
| random_line_split |
lid_shutdown.rs | // Copyright 2021 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
use crate::error::PowerManagerError;
use crate::message::{Message, MessageReturn};
use crate::node::Node;
use crate::shutdown_request::ShutdownRequest;
use crate::utils::connect_to_driver;
use anyhow::{format_err, Error};
use async_trait::async_trait;
use fidl_fuchsia_hardware_input::{DeviceMarker as LidMarker, DeviceProxy as LidProxy};
use fuchsia_inspect::{self as inspect, NumericProperty, Property};
use fuchsia_inspect_contrib::{inspect_log, nodes::BoundedListNode};
use fuchsia_vfs_watcher as vfs;
use fuchsia_zircon::{self as zx, AsHandleRef};
use futures::{
future::{FutureExt, LocalBoxFuture},
stream::FuturesUnordered,
TryStreamExt,
};
use io_util::{open_directory_in_namespace, OPEN_RIGHT_READABLE};
use log::*;
use serde_derive::Deserialize;
use serde_json as json;
use std::{
cell::RefCell,
collections::HashMap,
path::{Path, PathBuf},
rc::Rc,
};
/// Node: LidShutdown
///
/// Summary: Responds to lid closed events from devices with a lid sensor by waiting for a report
/// using the input FIDL protocol.
///
/// Handles Messages: N/A
///
/// Sends Messages:
/// - SystemShutdown
///
/// FIDL dependencies:
/// - fuchsia.hardware.input: the node uses this protocol to wait on reports from the
/// lid device
/// The lid sensor is not a real HID device however this descriptor is defined in the lid driver
/// and will be used to send lid HID reports for any ACPI lid sensor.
const HID_LID_DESCRIPTOR: [u8; 9] = [
0x05, 0x01, // Usage Page (Generic Desktop)
0x09, 0x80, // Usage (System Control)
0xA1, 0x01, // Collection (Application)
0x0A, 0xFF, 0x01, // Usage (0x01FF, unique to lid reports)
];
// Lid closed HID report
const LID_CLOSED: u8 = 0x0;
static INPUT_DEVICES_DIRECTORY: &str = "/dev/class/input";
pub struct LidShutdownBuilder<'a> {
proxy: Option<LidProxy>,
lid_report_event: Option<zx::Event>,
system_shutdown_node: Rc<dyn Node>,
inspect_root: Option<&'a inspect::Node>,
}
impl<'a> LidShutdownBuilder<'a> {
pub fn new(system_shutdown_node: Rc<dyn Node>) -> Self {
LidShutdownBuilder {
proxy: None,
lid_report_event: None,
system_shutdown_node,
inspect_root: None,
}
}
#[cfg(test)]
pub fn new_with_event_and_proxy(
proxy: LidProxy,
lid_report_event: zx::Event,
system_shutdown_node: Rc<dyn Node>,
) -> Self {
Self {
proxy: Some(proxy),
lid_report_event: Some(lid_report_event),
system_shutdown_node,
inspect_root: None,
}
}
pub fn new_from_json(json_data: json::Value, nodes: &HashMap<String, Rc<dyn Node>>) -> Self {
#[derive(Deserialize)]
struct Dependencies {
system_shutdown_node: String,
}
#[derive(Deserialize)]
struct JsonData {
dependencies: Dependencies,
}
let data: JsonData = json::from_value(json_data).unwrap();
Self::new(nodes[&data.dependencies.system_shutdown_node].clone())
}
#[cfg(test)]
pub fn with_inspect_root(mut self, root: &'a inspect::Node) -> Self {
self.inspect_root = Some(root);
self
}
pub async fn build<'b>(
self,
futures_out: &FuturesUnordered<LocalBoxFuture<'b, ()>>,
) -> Result<Rc<LidShutdown>, Error> {
// In tests use the default proxy.
let proxy = match self.proxy {
Some(proxy) => proxy,
None => Self::find_lid_sensor().await?,
};
// In tests use the default event.
let report_event = match self.lid_report_event {
Some(report_event) => report_event,
None => match proxy.get_reports_event().await {
Ok((_, report_event)) => report_event,
Err(_e) => return Err(format_err!("Could not get report event.")),
},
};
// In tests use the default inspect root node
let inspect_root = self.inspect_root.unwrap_or(inspect::component::inspector().root());
let node = Rc::new(LidShutdown {
proxy,
report_event,
system_shutdown_node: self.system_shutdown_node,
inspect: InspectData::new(inspect_root, "LidShutdown".to_string()),
});
futures_out.push(node.clone().watch_lid());
Ok(node)
}
/// Checks all the input devices until the lid sensor is found.
async fn find_lid_sensor() -> Result<LidProxy, Error> {
let dir_proxy = open_directory_in_namespace(INPUT_DEVICES_DIRECTORY, OPEN_RIGHT_READABLE)?;
let mut watcher = vfs::Watcher::new(dir_proxy).await?;
while let Some(msg) = watcher.try_next().await? {
match msg.event {
vfs::WatchEvent::EXISTING | vfs::WatchEvent::ADD_FILE => {
match Self::open_sensor(&msg.filename).await {
Ok(device) => return Ok(device),
_ => (),
}
}
_ => (),
}
}
Err(format_err!("No lid device found"))
}
/// Opens the sensor's device file. Returns the device if the correct HID
/// report descriptor is found.
async fn open_sensor(filename: &PathBuf) -> Result<LidProxy, Error> {
let path = Path::new(INPUT_DEVICES_DIRECTORY).join(filename);
let device = connect_to_driver::<LidMarker>(&String::from(
path.to_str().ok_or(format_err!("Could not read path {:?}", path))?,
))
.await?;
if let Ok(device_descriptor) = device.get_report_desc().await {
if device_descriptor.len() < HID_LID_DESCRIPTOR.len() {
return Err(format_err!("Short HID header"));
}
let device_header = &device_descriptor[0..HID_LID_DESCRIPTOR.len()];
if device_header == HID_LID_DESCRIPTOR {
return Ok(device);
} else {
return Err(format_err!("Device is not lid sensor"));
}
}
Err(format_err!("Could not get device HID report descriptor"))
}
}
pub struct LidShutdown {
proxy: LidProxy,
/// Event that will signal |USER_0| when a report is in the lid device's report FIFO.
report_event: zx::Event,
/// Node to provide the system shutdown functionality via the SystemShutdown message.
system_shutdown_node: Rc<dyn Node>,
/// A struct for managing Component Inspection data
inspect: InspectData,
}
impl LidShutdown {
fn watch_lid<'a>(self: Rc<Self>) -> LocalBoxFuture<'a, ()> {
async move {
loop {
self.watch_lid_inner().await;
}
}
.boxed_local()
}
/// Watches the lid device for reports.
async fn watch_lid_inner(&self) {
match self.report_event.wait_handle(zx::Signals::USER_0, zx::Time::INFINITE) {
Err(e) => error!("Could not wait for lid event: {:?}", e),
_ => match self.check_report().await {
Ok(()) => (),
Err(e) => {
self.inspect.read_errors.add(1);
self.inspect.last_read_error.set(format!("{}", e).as_str());
error!("Failed to read lid report: {}", e)
}
},
};
}
/// Reads the report from the lid sensor and sends shutdown signal if lid is closed.
async fn check_report(&self) -> Result<(), Error> {
let (status, report, _time) = self.proxy.read_report().await?;
let status = zx::Status::from_raw(status);
if status != zx::Status::OK {
return Err(format_err!("Error reading report {}", status));
}
if report.len() != 1 {
return Err(format_err!("Expected single byte report, found {:?}", report));
}
self.inspect.log_lid_report(format!("{:?}", report));
let report = report[0];
if report == LID_CLOSED {
info!("Lid closed. Shutting down...");
self.send_message(
&self.system_shutdown_node,
&Message::SystemShutdown(ShutdownRequest::PowerOff),
)
.await
.map_err(|e| format_err!("Failed to shut down the system: {:?}", e))?;
}
Ok(())
}
}
#[async_trait(?Send)]
impl Node for LidShutdown {
fn name(&self) -> String {
"LidShutdown".to_string()
}
async fn handle_message(&self, _msg: &Message) -> Result<MessageReturn, PowerManagerError> {
Err(PowerManagerError::Unsupported)
}
}
struct InspectData {
lid_reports: RefCell<BoundedListNode>,
read_errors: inspect::UintProperty,
last_read_error: inspect::StringProperty,
}
impl InspectData {
/// Number of inspect samples to store in the `lid_reports` BoundedListNode.
// Store the last 60 lid reports
const NUM_INSPECT_LID_REPORTS: usize = 60;
fn new(parent: &inspect::Node, name: String) -> Self {
// Create a local root node and properties
let root = parent.create_child(name);
let lid_reports = RefCell::new(BoundedListNode::new(
root.create_child("lid_reports"),
Self::NUM_INSPECT_LID_REPORTS,
));
let read_errors = root.create_uint("read_lid_report_error_count", 0);
let last_read_error = root.create_string("last_read_error", "");
// Pass ownership of the new node to the parent node, otherwise it'll be dropped
parent.record(root);
InspectData { lid_reports, read_errors, last_read_error }
}
fn log_lid_report(&self, lid_report: String) {
inspect_log!(self.lid_reports.borrow_mut(), lid_report: lid_report);
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::test::mock_node::{create_dummy_node, MessageMatcher, MockNodeMaker};
use crate::{msg_eq, msg_ok_return};
use fidl_fuchsia_hardware_input as finput;
use fuchsia_async as fasync;
use fuchsia_inspect::testing::TreeAssertion;
use fuchsia_zircon::HandleBased;
use inspect::assert_data_tree;
const LID_OPEN: u8 = 0x1;
/// Spawns a new task that acts as a fake device driver for testing purposes. The driver only
/// handles requests for ReadReport - trying to send any other requests to it is a bug.
/// Each ReadReport responds with the |lid_report| specified.
fn setup_fake_driver(lid_report: u8) -> LidProxy {
let (proxy, mut stream) = fidl::endpoints::create_proxy_and_stream::<LidMarker>().unwrap();
fasync::Task::local(async move {
while let Ok(req) = stream.try_next().await {
match req {
Some(finput::DeviceRequest::ReadReport { responder }) => {
let _ = responder.send(zx::Status::OK.into_raw(), &[lid_report], 0 as i64);
}
_ => assert!(false),
}
}
})
.detach();
proxy
}
/// Tests that well-formed configuration JSON does not panic the `new_from_json` function.
#[fasync::run_singlethreaded(test)]
async fn test_new_from_json() {
let json_data = json::json!({
"type": "LidShutdown",
"name": "lid_shutdown",
"dependencies": {
"system_shutdown_node": "shutdown",
},
});
let mut nodes: HashMap<String, Rc<dyn Node>> = HashMap::new();
nodes.insert("shutdown".to_string(), create_dummy_node());
let _ = LidShutdownBuilder::new_from_json(json_data, &nodes);
}
/// Tests that when the node receives a signal on its |report_event|, it checks for a lid
/// report and, on reception of a lid closed report, it triggers a system shutdown.
#[fasync::run_singlethreaded(test)]
async fn test_triggered_shutdown() |
/// Tests that when the node receives a signal on its |report_event|, it checks for a lid
/// report and, on reception of a lid open report, it does NOT trigger a system shutdown.
#[fasync::run_singlethreaded(test)]
async fn test_event_handling() {
let mut mock_maker = MockNodeMaker::new();
let shutdown_node = mock_maker.make(
"Shutdown",
vec![], // the shutdown node is not expected to receive any messages
);
let event = zx::Event::create().unwrap();
let node = LidShutdownBuilder::new_with_event_and_proxy(
setup_fake_driver(LID_OPEN),
event.duplicate_handle(zx::Rights::BASIC).unwrap(),
shutdown_node,
)
.build(&FuturesUnordered::new())
.await
.unwrap();
event
.signal_handle(zx::Signals::NONE, zx::Signals::USER_0)
.expect("Failed to signal event");
node.watch_lid_inner().await;
// When mock_maker will verify that the ShutdownNode receives no messages until it goes
// out of scope.
}
/// Tests that an unsupported message is handled gracefully and an error is returned.
#[fasync::run_singlethreaded(test)]
async fn test_unsupported_msg() {
let mut mock_maker = MockNodeMaker::new();
let shutdown_node = mock_maker.make("Shutdown", vec![]);
let node_futures = FuturesUnordered::new();
let node = LidShutdownBuilder::new_with_event_and_proxy(
setup_fake_driver(LID_CLOSED),
zx::Event::create().unwrap(),
shutdown_node,
)
.build(&node_futures)
.await
.unwrap();
match node.handle_message(&Message::GetCpuLoads).await {
Err(PowerManagerError::Unsupported) => {}
e => panic!("Unexpected return value: {:?}", e),
}
}
/// Tests for the presence and correctness of dynamically-added inspect data
#[fasync::run_singlethreaded(test)]
async fn test_inspect_data() {
let lid_state = LID_OPEN;
let inspector = inspect::Inspector::new();
let mut mock_maker = MockNodeMaker::new();
let shutdown_node = mock_maker.make("Shutdown", vec![]);
let event = zx::Event::create().unwrap();
let node_futures = FuturesUnordered::new();
let node = LidShutdownBuilder::new_with_event_and_proxy(
setup_fake_driver(lid_state),
event.duplicate_handle(zx::Rights::BASIC).unwrap(),
shutdown_node,
)
.with_inspect_root(inspector.root())
.build(&node_futures)
.await
.unwrap();
// The node will read the current temperature and log the sample into Inspect. Read enough
// samples to test that the correct number of samples are logged and older ones are dropped.
for _ in 0..InspectData::NUM_INSPECT_LID_REPORTS + 10 {
event
.signal_handle(zx::Signals::NONE, zx::Signals::USER_0)
.expect("Failed to signal event");
node.watch_lid_inner().await;
}
let mut root = TreeAssertion::new("LidShutdown", false);
let mut lid_reports = TreeAssertion::new("lid_reports", true);
// Since we read 10 more samples than our limit allows, the first 10 should be dropped. So
// test that the sample numbering starts at 10 and continues for the expected number of
// samples.
for i in 10..InspectData::NUM_INSPECT_LID_REPORTS + 10 {
let mut sample_child = TreeAssertion::new(&i.to_string(), true);
sample_child
.add_property_assertion("lid_report", Box::new(format!("{:?}", [lid_state])));
sample_child.add_property_assertion("@time", Box::new(inspect::testing::AnyProperty));
lid_reports.add_child_assertion(sample_child);
}
root.add_child_assertion(lid_reports);
assert_data_tree!(inspector, root: { root, });
}
}
| {
let mut mock_maker = MockNodeMaker::new();
let shutdown_node = mock_maker.make(
"Shutdown",
vec![(
msg_eq!(SystemShutdown(ShutdownRequest::PowerOff)),
msg_ok_return!(SystemShutdown),
)],
);
let event = zx::Event::create().unwrap();
let node = LidShutdownBuilder::new_with_event_and_proxy(
setup_fake_driver(LID_CLOSED),
event.duplicate_handle(zx::Rights::BASIC).unwrap(),
shutdown_node,
)
.build(&FuturesUnordered::new())
.await
.unwrap();
event
.signal_handle(zx::Signals::NONE, zx::Signals::USER_0)
.expect("Failed to signal event");
node.watch_lid_inner().await;
// When mock_maker goes out of scope, it verifies the ShutdownNode received the shutdown
// request.
} | identifier_body |
lid_shutdown.rs | // Copyright 2021 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
use crate::error::PowerManagerError;
use crate::message::{Message, MessageReturn};
use crate::node::Node;
use crate::shutdown_request::ShutdownRequest;
use crate::utils::connect_to_driver;
use anyhow::{format_err, Error};
use async_trait::async_trait;
use fidl_fuchsia_hardware_input::{DeviceMarker as LidMarker, DeviceProxy as LidProxy};
use fuchsia_inspect::{self as inspect, NumericProperty, Property};
use fuchsia_inspect_contrib::{inspect_log, nodes::BoundedListNode};
use fuchsia_vfs_watcher as vfs;
use fuchsia_zircon::{self as zx, AsHandleRef};
use futures::{
future::{FutureExt, LocalBoxFuture},
stream::FuturesUnordered,
TryStreamExt,
};
use io_util::{open_directory_in_namespace, OPEN_RIGHT_READABLE};
use log::*;
use serde_derive::Deserialize;
use serde_json as json;
use std::{
cell::RefCell,
collections::HashMap,
path::{Path, PathBuf},
rc::Rc,
};
/// Node: LidShutdown
///
/// Summary: Responds to lid closed events from devices with a lid sensor by waiting for a report
/// using the input FIDL protocol.
///
/// Handles Messages: N/A
///
/// Sends Messages:
/// - SystemShutdown
///
/// FIDL dependencies:
/// - fuchsia.hardware.input: the node uses this protocol to wait on reports from the
/// lid device
/// The lid sensor is not a real HID device however this descriptor is defined in the lid driver
/// and will be used to send lid HID reports for any ACPI lid sensor.
const HID_LID_DESCRIPTOR: [u8; 9] = [
0x05, 0x01, // Usage Page (Generic Desktop)
0x09, 0x80, // Usage (System Control)
0xA1, 0x01, // Collection (Application)
0x0A, 0xFF, 0x01, // Usage (0x01FF, unique to lid reports)
];
// Lid closed HID report
const LID_CLOSED: u8 = 0x0;
static INPUT_DEVICES_DIRECTORY: &str = "/dev/class/input";
pub struct LidShutdownBuilder<'a> {
proxy: Option<LidProxy>,
lid_report_event: Option<zx::Event>,
system_shutdown_node: Rc<dyn Node>,
inspect_root: Option<&'a inspect::Node>,
}
impl<'a> LidShutdownBuilder<'a> {
pub fn new(system_shutdown_node: Rc<dyn Node>) -> Self {
LidShutdownBuilder {
proxy: None,
lid_report_event: None,
system_shutdown_node,
inspect_root: None,
}
}
#[cfg(test)]
pub fn new_with_event_and_proxy(
proxy: LidProxy,
lid_report_event: zx::Event,
system_shutdown_node: Rc<dyn Node>,
) -> Self {
Self {
proxy: Some(proxy),
lid_report_event: Some(lid_report_event),
system_shutdown_node,
inspect_root: None,
}
}
pub fn new_from_json(json_data: json::Value, nodes: &HashMap<String, Rc<dyn Node>>) -> Self {
#[derive(Deserialize)]
struct Dependencies {
system_shutdown_node: String,
}
#[derive(Deserialize)]
struct JsonData {
dependencies: Dependencies,
}
let data: JsonData = json::from_value(json_data).unwrap();
Self::new(nodes[&data.dependencies.system_shutdown_node].clone())
}
#[cfg(test)]
pub fn with_inspect_root(mut self, root: &'a inspect::Node) -> Self {
self.inspect_root = Some(root);
self
}
pub async fn build<'b>(
self,
futures_out: &FuturesUnordered<LocalBoxFuture<'b, ()>>,
) -> Result<Rc<LidShutdown>, Error> {
// In tests use the default proxy.
let proxy = match self.proxy {
Some(proxy) => proxy,
None => Self::find_lid_sensor().await?,
};
// In tests use the default event.
let report_event = match self.lid_report_event {
Some(report_event) => report_event,
None => match proxy.get_reports_event().await {
Ok((_, report_event)) => report_event,
Err(_e) => return Err(format_err!("Could not get report event.")),
},
};
// In tests use the default inspect root node
let inspect_root = self.inspect_root.unwrap_or(inspect::component::inspector().root());
let node = Rc::new(LidShutdown {
proxy,
report_event,
system_shutdown_node: self.system_shutdown_node,
inspect: InspectData::new(inspect_root, "LidShutdown".to_string()),
});
futures_out.push(node.clone().watch_lid());
Ok(node)
}
/// Checks all the input devices until the lid sensor is found.
async fn find_lid_sensor() -> Result<LidProxy, Error> {
let dir_proxy = open_directory_in_namespace(INPUT_DEVICES_DIRECTORY, OPEN_RIGHT_READABLE)?;
let mut watcher = vfs::Watcher::new(dir_proxy).await?;
while let Some(msg) = watcher.try_next().await? {
match msg.event {
vfs::WatchEvent::EXISTING | vfs::WatchEvent::ADD_FILE => |
_ => (),
}
}
Err(format_err!("No lid device found"))
}
/// Opens the sensor's device file. Returns the device if the correct HID
/// report descriptor is found.
async fn open_sensor(filename: &PathBuf) -> Result<LidProxy, Error> {
let path = Path::new(INPUT_DEVICES_DIRECTORY).join(filename);
let device = connect_to_driver::<LidMarker>(&String::from(
path.to_str().ok_or(format_err!("Could not read path {:?}", path))?,
))
.await?;
if let Ok(device_descriptor) = device.get_report_desc().await {
if device_descriptor.len() < HID_LID_DESCRIPTOR.len() {
return Err(format_err!("Short HID header"));
}
let device_header = &device_descriptor[0..HID_LID_DESCRIPTOR.len()];
if device_header == HID_LID_DESCRIPTOR {
return Ok(device);
} else {
return Err(format_err!("Device is not lid sensor"));
}
}
Err(format_err!("Could not get device HID report descriptor"))
}
}
pub struct LidShutdown {
proxy: LidProxy,
/// Event that will signal |USER_0| when a report is in the lid device's report FIFO.
report_event: zx::Event,
/// Node to provide the system shutdown functionality via the SystemShutdown message.
system_shutdown_node: Rc<dyn Node>,
/// A struct for managing Component Inspection data
inspect: InspectData,
}
impl LidShutdown {
fn watch_lid<'a>(self: Rc<Self>) -> LocalBoxFuture<'a, ()> {
async move {
loop {
self.watch_lid_inner().await;
}
}
.boxed_local()
}
/// Watches the lid device for reports.
async fn watch_lid_inner(&self) {
match self.report_event.wait_handle(zx::Signals::USER_0, zx::Time::INFINITE) {
Err(e) => error!("Could not wait for lid event: {:?}", e),
_ => match self.check_report().await {
Ok(()) => (),
Err(e) => {
self.inspect.read_errors.add(1);
self.inspect.last_read_error.set(format!("{}", e).as_str());
error!("Failed to read lid report: {}", e)
}
},
};
}
/// Reads the report from the lid sensor and sends shutdown signal if lid is closed.
async fn check_report(&self) -> Result<(), Error> {
let (status, report, _time) = self.proxy.read_report().await?;
let status = zx::Status::from_raw(status);
if status != zx::Status::OK {
return Err(format_err!("Error reading report {}", status));
}
if report.len() != 1 {
return Err(format_err!("Expected single byte report, found {:?}", report));
}
self.inspect.log_lid_report(format!("{:?}", report));
let report = report[0];
if report == LID_CLOSED {
info!("Lid closed. Shutting down...");
self.send_message(
&self.system_shutdown_node,
&Message::SystemShutdown(ShutdownRequest::PowerOff),
)
.await
.map_err(|e| format_err!("Failed to shut down the system: {:?}", e))?;
}
Ok(())
}
}
#[async_trait(?Send)]
impl Node for LidShutdown {
fn name(&self) -> String {
"LidShutdown".to_string()
}
async fn handle_message(&self, _msg: &Message) -> Result<MessageReturn, PowerManagerError> {
Err(PowerManagerError::Unsupported)
}
}
struct InspectData {
lid_reports: RefCell<BoundedListNode>,
read_errors: inspect::UintProperty,
last_read_error: inspect::StringProperty,
}
impl InspectData {
/// Number of inspect samples to store in the `lid_reports` BoundedListNode.
// Store the last 60 lid reports
const NUM_INSPECT_LID_REPORTS: usize = 60;
fn new(parent: &inspect::Node, name: String) -> Self {
// Create a local root node and properties
let root = parent.create_child(name);
let lid_reports = RefCell::new(BoundedListNode::new(
root.create_child("lid_reports"),
Self::NUM_INSPECT_LID_REPORTS,
));
let read_errors = root.create_uint("read_lid_report_error_count", 0);
let last_read_error = root.create_string("last_read_error", "");
// Pass ownership of the new node to the parent node, otherwise it'll be dropped
parent.record(root);
InspectData { lid_reports, read_errors, last_read_error }
}
fn log_lid_report(&self, lid_report: String) {
inspect_log!(self.lid_reports.borrow_mut(), lid_report: lid_report);
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::test::mock_node::{create_dummy_node, MessageMatcher, MockNodeMaker};
use crate::{msg_eq, msg_ok_return};
use fidl_fuchsia_hardware_input as finput;
use fuchsia_async as fasync;
use fuchsia_inspect::testing::TreeAssertion;
use fuchsia_zircon::HandleBased;
use inspect::assert_data_tree;
const LID_OPEN: u8 = 0x1;
/// Spawns a new task that acts as a fake device driver for testing purposes. The driver only
/// handles requests for ReadReport - trying to send any other requests to it is a bug.
/// Each ReadReport responds with the |lid_report| specified.
fn setup_fake_driver(lid_report: u8) -> LidProxy {
let (proxy, mut stream) = fidl::endpoints::create_proxy_and_stream::<LidMarker>().unwrap();
fasync::Task::local(async move {
while let Ok(req) = stream.try_next().await {
match req {
Some(finput::DeviceRequest::ReadReport { responder }) => {
let _ = responder.send(zx::Status::OK.into_raw(), &[lid_report], 0 as i64);
}
_ => assert!(false),
}
}
})
.detach();
proxy
}
/// Tests that well-formed configuration JSON does not panic the `new_from_json` function.
#[fasync::run_singlethreaded(test)]
async fn test_new_from_json() {
let json_data = json::json!({
"type": "LidShutdown",
"name": "lid_shutdown",
"dependencies": {
"system_shutdown_node": "shutdown",
},
});
let mut nodes: HashMap<String, Rc<dyn Node>> = HashMap::new();
nodes.insert("shutdown".to_string(), create_dummy_node());
let _ = LidShutdownBuilder::new_from_json(json_data, &nodes);
}
/// Tests that when the node receives a signal on its |report_event|, it checks for a lid
/// report and, on reception of a lid closed report, it triggers a system shutdown.
#[fasync::run_singlethreaded(test)]
async fn test_triggered_shutdown() {
let mut mock_maker = MockNodeMaker::new();
let shutdown_node = mock_maker.make(
"Shutdown",
vec![(
msg_eq!(SystemShutdown(ShutdownRequest::PowerOff)),
msg_ok_return!(SystemShutdown),
)],
);
let event = zx::Event::create().unwrap();
let node = LidShutdownBuilder::new_with_event_and_proxy(
setup_fake_driver(LID_CLOSED),
event.duplicate_handle(zx::Rights::BASIC).unwrap(),
shutdown_node,
)
.build(&FuturesUnordered::new())
.await
.unwrap();
event
.signal_handle(zx::Signals::NONE, zx::Signals::USER_0)
.expect("Failed to signal event");
node.watch_lid_inner().await;
// When mock_maker goes out of scope, it verifies the ShutdownNode received the shutdown
// request.
}
/// Tests that when the node receives a signal on its |report_event|, it checks for a lid
/// report and, on reception of a lid open report, it does NOT trigger a system shutdown.
#[fasync::run_singlethreaded(test)]
async fn test_event_handling() {
let mut mock_maker = MockNodeMaker::new();
let shutdown_node = mock_maker.make(
"Shutdown",
vec![], // the shutdown node is not expected to receive any messages
);
let event = zx::Event::create().unwrap();
let node = LidShutdownBuilder::new_with_event_and_proxy(
setup_fake_driver(LID_OPEN),
event.duplicate_handle(zx::Rights::BASIC).unwrap(),
shutdown_node,
)
.build(&FuturesUnordered::new())
.await
.unwrap();
event
.signal_handle(zx::Signals::NONE, zx::Signals::USER_0)
.expect("Failed to signal event");
node.watch_lid_inner().await;
// When mock_maker will verify that the ShutdownNode receives no messages until it goes
// out of scope.
}
/// Tests that an unsupported message is handled gracefully and an error is returned.
#[fasync::run_singlethreaded(test)]
async fn test_unsupported_msg() {
let mut mock_maker = MockNodeMaker::new();
let shutdown_node = mock_maker.make("Shutdown", vec![]);
let node_futures = FuturesUnordered::new();
let node = LidShutdownBuilder::new_with_event_and_proxy(
setup_fake_driver(LID_CLOSED),
zx::Event::create().unwrap(),
shutdown_node,
)
.build(&node_futures)
.await
.unwrap();
match node.handle_message(&Message::GetCpuLoads).await {
Err(PowerManagerError::Unsupported) => {}
e => panic!("Unexpected return value: {:?}", e),
}
}
/// Tests for the presence and correctness of dynamically-added inspect data
#[fasync::run_singlethreaded(test)]
async fn test_inspect_data() {
let lid_state = LID_OPEN;
let inspector = inspect::Inspector::new();
let mut mock_maker = MockNodeMaker::new();
let shutdown_node = mock_maker.make("Shutdown", vec![]);
let event = zx::Event::create().unwrap();
let node_futures = FuturesUnordered::new();
let node = LidShutdownBuilder::new_with_event_and_proxy(
setup_fake_driver(lid_state),
event.duplicate_handle(zx::Rights::BASIC).unwrap(),
shutdown_node,
)
.with_inspect_root(inspector.root())
.build(&node_futures)
.await
.unwrap();
// The node will read the current temperature and log the sample into Inspect. Read enough
// samples to test that the correct number of samples are logged and older ones are dropped.
for _ in 0..InspectData::NUM_INSPECT_LID_REPORTS + 10 {
event
.signal_handle(zx::Signals::NONE, zx::Signals::USER_0)
.expect("Failed to signal event");
node.watch_lid_inner().await;
}
let mut root = TreeAssertion::new("LidShutdown", false);
let mut lid_reports = TreeAssertion::new("lid_reports", true);
// Since we read 10 more samples than our limit allows, the first 10 should be dropped. So
// test that the sample numbering starts at 10 and continues for the expected number of
// samples.
for i in 10..InspectData::NUM_INSPECT_LID_REPORTS + 10 {
let mut sample_child = TreeAssertion::new(&i.to_string(), true);
sample_child
.add_property_assertion("lid_report", Box::new(format!("{:?}", [lid_state])));
sample_child.add_property_assertion("@time", Box::new(inspect::testing::AnyProperty));
lid_reports.add_child_assertion(sample_child);
}
root.add_child_assertion(lid_reports);
assert_data_tree!(inspector, root: { root, });
}
}
| {
match Self::open_sensor(&msg.filename).await {
Ok(device) => return Ok(device),
_ => (),
}
} | conditional_block |
generate_random_samples.py | import sys
sys.path.append('../')
import constants as cnst
import os
os.environ['PYTHONHASHSEED'] = '2'
import tqdm
from model.stg2_generator import StyledGenerator
import numpy as np
from my_utils.visualize_flame_overlay import OverLayViz
from my_utils.flm_dynamic_fit_overlay import camera_ringnetpp
from my_utils.generic_utils import save_set_of_images
import constants
from dataset_loaders import fast_image_reshape
import torch
from my_utils import generic_utils
from my_utils.eye_centering import position_to_given_location
from copy import deepcopy
from my_utils.photometric_optimization.models import FLAME
from my_utils.photometric_optimization import util
def ge_gen_in(flm_params, textured_rndr, norm_map, normal_map_cond, texture_cond):
if normal_map_cond and texture_cond:
return torch.cat((textured_rndr, norm_map), dim=1)
elif normal_map_cond:
return norm_map
elif texture_cond:
return textured_rndr
else:
return flm_params
def corrupt_flame_given_sigma(flm_params, corruption_type, sigma, jaw_sigma, pose_sigma):
# import ipdb; ipdb.set_trace()
# np.random.seed(2)
corrupted_flame = deepcopy(flm_params)
if corruption_type == 'shape' or corruption_type == 'all':
corrupted_flame[:, :10] = flm_params[:, :10] + \
np.clip(np.random.normal(0, sigma, flm_params[:, :10].shape),
-3 * sigma, 3 * sigma).astype('float32')
if corruption_type == 'exp_jaw'or corruption_type == 'all':
# Expression
|
if corruption_type == 'pose' or corruption_type == 'all':
# pose_perturbation = np.random.normal(0, pose_sigma[i], (corrupted_flame.shape[0], 3))
# corrupted_flame[:, 150:153] += np.clip(pose_perturbation, -3 * pose_sigma[i], 3 * pose_sigma[i])
pose_perturbation = np.random.normal(0, pose_sigma, (corrupted_flame.shape[0],))
corrupted_flame[:, 151] = flm_params[:, 151] + \
np.clip(pose_perturbation, -3 * pose_sigma, 3 * pose_sigma)
return corrupted_flame
# General settings
save_images = True
code_size = 236
use_inst_norm = True
core_tensor_res = 4
resolution = 256
alpha = 1
step_max = int(np.log2(resolution) - 2)
num_smpl_to_eval_on = 128
use_styled_conv_stylegan2 = True
flength = 5000
cam_t = np.array([0., 0., 0])
camera_params = camera_ringnetpp((512, 512), trans=cam_t, focal=flength)
# Uncomment the appropriate run_id
run_ids_1 = [29, ] # with sqrt(2)
# run_ids_1 = [7, 24, 8, 3]
# run_ids_1 = [7, 8, 3]
# run_ids_1 = [7]
settings_for_runs = \
{24: {'name': 'vector_cond', 'model_idx': '216000_1', 'normal_maps_as_cond': False,
'rendered_flame_as_condition': False, 'apply_sqrt2_fac_in_eq_lin': False},
29: {'name': 'full_model', 'model_idx': '294000_1', 'normal_maps_as_cond': True,
'rendered_flame_as_condition': True, 'apply_sqrt2_fac_in_eq_lin': True},
7: {'name': 'flm_rndr_tex_interp', 'model_idx': '051000_1', 'normal_maps_as_cond': False,
'rendered_flame_as_condition': True, 'apply_sqrt2_fac_in_eq_lin': False},
3: {'name': 'norm_mp_tex_interp', 'model_idx': '203000_1', 'normal_maps_as_cond': True,
'rendered_flame_as_condition': False, 'apply_sqrt2_fac_in_eq_lin': False},
8: {'name': 'norm_map_rend_flm_no_tex_interp', 'model_idx': '009000_1', 'normal_maps_as_cond': True,
'rendered_flame_as_condition': True, 'apply_sqrt2_fac_in_eq_lin': False},}
overlay_visualizer = OverLayViz()
# overlay_visualizer.setup_renderer(mesh_file=None)
flm_params = np.zeros((num_smpl_to_eval_on, code_size)).astype('float32')
fl_param_dict = np.load(cnst.all_flame_params_file, allow_pickle=True).item()
np.random.seed(2)
for i, key in enumerate(fl_param_dict):
flame_param = fl_param_dict[key]
shape_params = np.concatenate((np.random.normal(0, 1, [3,]), np.zeros(97))).astype('float32')
exp_params = np.concatenate((np.random.normal(0, 1, [3,]), np.zeros(47))).astype('float32')
# +- pi/4 for bad samples +- pi/8 for good samples
# pose = np.array([0, np.random.uniform(-np.pi/4, np.pi/4, 1), 0,
# np.random.uniform(0, np.pi/12, 1), 0, 0]).astype('float32')
pose = np.array([0, np.random.uniform(-np.pi / 8, np.pi / 8, 1), 0,
np.random.uniform(0, np.pi / 12, 1), 0, 0]).astype('float32')
texture = np.random.normal(0, 1, [50]).astype('float32')
# texture = flame_param['tex']
flame_param = np.hstack((shape_params, exp_params, pose, flame_param['cam'],
texture, flame_param['lit'].flatten()))
# tz = camera_params['f'][0] / (camera_params['c'][0] * flame_param[:, 156:157])
# flame_param[:, 156:159] = np.concatenate((flame_param[:, 157:], tz), axis=1)
# import ipdb; ipdb.set_trace()
flm_params[i, :] = flame_param.astype('float32')
if i == num_smpl_to_eval_on - 1:
break
batch_size = 32
num_sigmas = 1
corruption_sigma = np.linspace(0, 1.5, num_sigmas)
jaw_rot_range = (0, np.pi/8)
jaw_rot_sigmas = np.linspace(0, (jaw_rot_range[1] - jaw_rot_range[0])/6, num_sigmas)
pose_range = (-np.pi/3, np.pi/3)
pose_sigmas = np.linspace(0, (pose_range[1] - pose_range[0])/6, num_sigmas)
config_obj = util.dict2obj(cnst.flame_config)
flame_decoder = FLAME.FLAME(config_obj).cuda().eval()
for run_idx in run_ids_1:
# import ipdb; ipdb.set_trace()
generator_1 = torch.nn.DataParallel(
StyledGenerator(embedding_vocab_size=69158,
rendered_flame_ascondition=settings_for_runs[run_idx]['rendered_flame_as_condition'],
normal_maps_as_cond=settings_for_runs[run_idx]['normal_maps_as_cond'],
apply_sqrt2_fac_in_eq_lin=settings_for_runs[run_idx]['apply_sqrt2_fac_in_eq_lin'],
core_tensor_res=core_tensor_res,
w_truncation_factor=1.0,
n_mlp=8)).cuda()
model_idx = settings_for_runs[run_idx]['model_idx']
ckpt1 = torch.load(f'{cnst.output_root}checkpoint/{run_idx}/{model_idx}.model')
generator_1.load_state_dict(ckpt1['generator_running'])
generator_1 = generator_1.eval()
params_to_save = {'cam': [], 'shape': [], 'exp': [], 'pose': [], 'light_code': [], 'texture_code': [],
'identity_indices': []}
for i, sigma in enumerate(corruption_sigma):
images = np.zeros((num_smpl_to_eval_on, 3, resolution, resolution)).astype('float32')
flame_mesh_imgs = np.zeros((num_smpl_to_eval_on, 3, resolution, resolution)).astype('float32')
pbar = tqdm.tqdm(range(0, num_smpl_to_eval_on, batch_size))
pbar.set_description('Generating_images')
# print(flm_params[1, :])
for batch_idx in pbar:
flm_batch = flm_params[batch_idx:batch_idx+batch_size, :]
flm_batch = torch.from_numpy(flm_batch).cuda()
# flm_batch = eye_cntr_reg.substitute_flame_batch_with_regressed_camera(flm_batch)
flm_batch = position_to_given_location(flame_decoder, flm_batch)
if settings_for_runs[run_idx]['normal_maps_as_cond'] or \
settings_for_runs[run_idx]['rendered_flame_as_condition']:
batch_size_true = flm_batch.shape[0]
cam = flm_batch[:, constants.DECA_IDX['cam'][0]:constants.DECA_IDX['cam'][1]:]
shape = flm_batch[:, constants.INDICES['SHAPE'][0]:constants.INDICES['SHAPE'][1]]
exp = flm_batch[:, constants.INDICES['EXP'][0]:constants.INDICES['EXP'][1]]
pose = flm_batch[:, constants.INDICES['POSE'][0]:constants.INDICES['POSE'][1]]
# import ipdb; ipdb.set_trace()
light_code = \
flm_batch[:, constants.DECA_IDX['lit'][0]:constants.DECA_IDX['lit'][1]:].view((batch_size_true, 9, 3))
texture_code = flm_batch[:, constants.DECA_IDX['tex'][0]:constants.DECA_IDX['tex'][1]:]
params_to_save['cam'].append(cam.cpu().detach().numpy())
params_to_save['shape'].append(shape.cpu().detach().numpy())
params_to_save['shape'].append(shape.cpu().detach().numpy())
params_to_save['exp'].append(exp.cpu().detach().numpy())
params_to_save['pose'].append(pose.cpu().detach().numpy())
params_to_save['light_code'].append(light_code.cpu().detach().numpy())
params_to_save['texture_code'].append(texture_code.cpu().detach().numpy())
norma_map_img, _, _, _, rend_flm = \
overlay_visualizer.get_rendered_mesh(flame_params=(shape, exp, pose, light_code, texture_code),
camera_params=cam)
# import ipdb; ipdb.set_trace()
rend_flm = torch.clamp(rend_flm, 0, 1) * 2 - 1
norma_map_img = torch.clamp(norma_map_img, 0, 1) * 2 - 1
rend_flm = fast_image_reshape(rend_flm, height_out=256, width_out=256, mode='bilinear')
norma_map_img = fast_image_reshape(norma_map_img, height_out=256, width_out=256, mode='bilinear')
# Render the 2nd time to get backface culling and white texture
# norma_map_img_to_save, _, _, _, rend_flm_to_save = \
# overlay_visualizer.get_rendered_mesh(flame_params=(shape, exp, pose, light_code, texture_code),
# camera_params=cam, cull_backfaces=True, constant_albedo=0.6)
# Back face culling temporarily un-availabe
norma_map_img_to_save, _, _, _, rend_flm_to_save = \
overlay_visualizer.get_rendered_mesh(flame_params=(shape, exp, pose, light_code, texture_code),
camera_params=cam, cull_backfaces=False, constant_albedo=0.6)
rend_flm_to_save = torch.clamp(rend_flm_to_save, 0, 1) * 2 - 1
# rend_flm_to_save = rend_flm
# norma_map_img_to_save = torch.clamp(norma_map_img, 0, 1) * 2 - 1
rend_flm_to_save = fast_image_reshape(rend_flm_to_save, height_out=256, width_out=256, mode='bilinear')
# norma_map_img_to_save = fast_image_reshape(norma_map_img, height_out=256, width_out=256, mode='bilinear')
else:
rend_flm = None
norma_map_img = None
gen_1_in = ge_gen_in(flm_batch, rend_flm, norma_map_img, settings_for_runs[run_idx]['normal_maps_as_cond'],
settings_for_runs[run_idx]['rendered_flame_as_condition'])
# torch.manual_seed(2)
identity_embeddings = torch.randint(low=0, high=69158, size=(gen_1_in.shape[0], ), dtype=torch.long,
device='cuda')
mdl_1_gen_images = generic_utils.get_images_from_flame_params(
flame_params=gen_1_in.cpu().numpy(), pose=None,
model=generator_1,
step=step_max, alpha=alpha,
input_indices=identity_embeddings.cpu().numpy())
params_to_save['identity_indices'].append(identity_embeddings.cpu().detach().numpy())
# import ipdb; ipdb.set_trace()
images[batch_idx:batch_idx+batch_size_true] = torch.clamp(mdl_1_gen_images, -1, 1).cpu().numpy()
# if flame_mesh_imgs is None:
flame_mesh_imgs[batch_idx:batch_idx+batch_size_true] = torch.clamp(rend_flm_to_save, -1, 1).cpu().numpy()
if save_images:
mdl_name = settings_for_runs[run_idx]['name']
for key in params_to_save.keys():
params_to_save[key] = np.concatenate(params_to_save[key], axis=0)
save_dir = os.path.join(cnst.output_root, 'sample', str(run_idx), f'random_samples_q_eval_{mdl_name}')
os.makedirs(save_dir, exist_ok=True)
np.save(os.path.join(save_dir, 'params.npy'), params_to_save)
save_path_current_id = os.path.join(save_dir, 'images')
save_set_of_images(path=save_path_current_id, prefix='', images=(images + 1) / 2, show_prog_bar=True)
#save flam rndr
save_path_current_id_flm_rndr = os.path.join(save_dir, 'conditions')
save_set_of_images(path=save_path_current_id_flm_rndr, prefix='mesh', images=(flame_mesh_imgs + 1) / 2,
show_prog_bar=True)
| corrupted_flame[:, 100:110] = flm_params[:, 100:110] + \
np.clip(np.random.normal(0, sigma, flm_params[:, 100:110].shape),
-3 * sigma, 3 * sigma).astype('float32')
# Jaw pose
corrupted_flame[:, 153] = flm_params[:, 153] + \
np.random.normal(0, jaw_sigma, corrupted_flame.shape[0]) | conditional_block |
generate_random_samples.py | import sys
sys.path.append('../')
import constants as cnst
import os
os.environ['PYTHONHASHSEED'] = '2'
import tqdm
from model.stg2_generator import StyledGenerator
import numpy as np
from my_utils.visualize_flame_overlay import OverLayViz
from my_utils.flm_dynamic_fit_overlay import camera_ringnetpp
from my_utils.generic_utils import save_set_of_images
import constants
from dataset_loaders import fast_image_reshape
import torch
from my_utils import generic_utils
from my_utils.eye_centering import position_to_given_location
from copy import deepcopy
from my_utils.photometric_optimization.models import FLAME
from my_utils.photometric_optimization import util
def ge_gen_in(flm_params, textured_rndr, norm_map, normal_map_cond, texture_cond):
|
def corrupt_flame_given_sigma(flm_params, corruption_type, sigma, jaw_sigma, pose_sigma):
# import ipdb; ipdb.set_trace()
# np.random.seed(2)
corrupted_flame = deepcopy(flm_params)
if corruption_type == 'shape' or corruption_type == 'all':
corrupted_flame[:, :10] = flm_params[:, :10] + \
np.clip(np.random.normal(0, sigma, flm_params[:, :10].shape),
-3 * sigma, 3 * sigma).astype('float32')
if corruption_type == 'exp_jaw'or corruption_type == 'all':
# Expression
corrupted_flame[:, 100:110] = flm_params[:, 100:110] + \
np.clip(np.random.normal(0, sigma, flm_params[:, 100:110].shape),
-3 * sigma, 3 * sigma).astype('float32')
# Jaw pose
corrupted_flame[:, 153] = flm_params[:, 153] + \
np.random.normal(0, jaw_sigma, corrupted_flame.shape[0])
if corruption_type == 'pose' or corruption_type == 'all':
# pose_perturbation = np.random.normal(0, pose_sigma[i], (corrupted_flame.shape[0], 3))
# corrupted_flame[:, 150:153] += np.clip(pose_perturbation, -3 * pose_sigma[i], 3 * pose_sigma[i])
pose_perturbation = np.random.normal(0, pose_sigma, (corrupted_flame.shape[0],))
corrupted_flame[:, 151] = flm_params[:, 151] + \
np.clip(pose_perturbation, -3 * pose_sigma, 3 * pose_sigma)
return corrupted_flame
# General settings
save_images = True
code_size = 236
use_inst_norm = True
core_tensor_res = 4
resolution = 256
alpha = 1
step_max = int(np.log2(resolution) - 2)
num_smpl_to_eval_on = 128
use_styled_conv_stylegan2 = True
flength = 5000
cam_t = np.array([0., 0., 0])
camera_params = camera_ringnetpp((512, 512), trans=cam_t, focal=flength)
# Uncomment the appropriate run_id
run_ids_1 = [29, ] # with sqrt(2)
# run_ids_1 = [7, 24, 8, 3]
# run_ids_1 = [7, 8, 3]
# run_ids_1 = [7]
settings_for_runs = \
{24: {'name': 'vector_cond', 'model_idx': '216000_1', 'normal_maps_as_cond': False,
'rendered_flame_as_condition': False, 'apply_sqrt2_fac_in_eq_lin': False},
29: {'name': 'full_model', 'model_idx': '294000_1', 'normal_maps_as_cond': True,
'rendered_flame_as_condition': True, 'apply_sqrt2_fac_in_eq_lin': True},
7: {'name': 'flm_rndr_tex_interp', 'model_idx': '051000_1', 'normal_maps_as_cond': False,
'rendered_flame_as_condition': True, 'apply_sqrt2_fac_in_eq_lin': False},
3: {'name': 'norm_mp_tex_interp', 'model_idx': '203000_1', 'normal_maps_as_cond': True,
'rendered_flame_as_condition': False, 'apply_sqrt2_fac_in_eq_lin': False},
8: {'name': 'norm_map_rend_flm_no_tex_interp', 'model_idx': '009000_1', 'normal_maps_as_cond': True,
'rendered_flame_as_condition': True, 'apply_sqrt2_fac_in_eq_lin': False},}
overlay_visualizer = OverLayViz()
# overlay_visualizer.setup_renderer(mesh_file=None)
flm_params = np.zeros((num_smpl_to_eval_on, code_size)).astype('float32')
fl_param_dict = np.load(cnst.all_flame_params_file, allow_pickle=True).item()
np.random.seed(2)
for i, key in enumerate(fl_param_dict):
flame_param = fl_param_dict[key]
shape_params = np.concatenate((np.random.normal(0, 1, [3,]), np.zeros(97))).astype('float32')
exp_params = np.concatenate((np.random.normal(0, 1, [3,]), np.zeros(47))).astype('float32')
# +- pi/4 for bad samples +- pi/8 for good samples
# pose = np.array([0, np.random.uniform(-np.pi/4, np.pi/4, 1), 0,
# np.random.uniform(0, np.pi/12, 1), 0, 0]).astype('float32')
pose = np.array([0, np.random.uniform(-np.pi / 8, np.pi / 8, 1), 0,
np.random.uniform(0, np.pi / 12, 1), 0, 0]).astype('float32')
texture = np.random.normal(0, 1, [50]).astype('float32')
# texture = flame_param['tex']
flame_param = np.hstack((shape_params, exp_params, pose, flame_param['cam'],
texture, flame_param['lit'].flatten()))
# tz = camera_params['f'][0] / (camera_params['c'][0] * flame_param[:, 156:157])
# flame_param[:, 156:159] = np.concatenate((flame_param[:, 157:], tz), axis=1)
# import ipdb; ipdb.set_trace()
flm_params[i, :] = flame_param.astype('float32')
if i == num_smpl_to_eval_on - 1:
break
batch_size = 32
num_sigmas = 1
corruption_sigma = np.linspace(0, 1.5, num_sigmas)
jaw_rot_range = (0, np.pi/8)
jaw_rot_sigmas = np.linspace(0, (jaw_rot_range[1] - jaw_rot_range[0])/6, num_sigmas)
pose_range = (-np.pi/3, np.pi/3)
pose_sigmas = np.linspace(0, (pose_range[1] - pose_range[0])/6, num_sigmas)
config_obj = util.dict2obj(cnst.flame_config)
flame_decoder = FLAME.FLAME(config_obj).cuda().eval()
for run_idx in run_ids_1:
# import ipdb; ipdb.set_trace()
generator_1 = torch.nn.DataParallel(
StyledGenerator(embedding_vocab_size=69158,
rendered_flame_ascondition=settings_for_runs[run_idx]['rendered_flame_as_condition'],
normal_maps_as_cond=settings_for_runs[run_idx]['normal_maps_as_cond'],
apply_sqrt2_fac_in_eq_lin=settings_for_runs[run_idx]['apply_sqrt2_fac_in_eq_lin'],
core_tensor_res=core_tensor_res,
w_truncation_factor=1.0,
n_mlp=8)).cuda()
model_idx = settings_for_runs[run_idx]['model_idx']
ckpt1 = torch.load(f'{cnst.output_root}checkpoint/{run_idx}/{model_idx}.model')
generator_1.load_state_dict(ckpt1['generator_running'])
generator_1 = generator_1.eval()
params_to_save = {'cam': [], 'shape': [], 'exp': [], 'pose': [], 'light_code': [], 'texture_code': [],
'identity_indices': []}
for i, sigma in enumerate(corruption_sigma):
images = np.zeros((num_smpl_to_eval_on, 3, resolution, resolution)).astype('float32')
flame_mesh_imgs = np.zeros((num_smpl_to_eval_on, 3, resolution, resolution)).astype('float32')
pbar = tqdm.tqdm(range(0, num_smpl_to_eval_on, batch_size))
pbar.set_description('Generating_images')
# print(flm_params[1, :])
for batch_idx in pbar:
flm_batch = flm_params[batch_idx:batch_idx+batch_size, :]
flm_batch = torch.from_numpy(flm_batch).cuda()
# flm_batch = eye_cntr_reg.substitute_flame_batch_with_regressed_camera(flm_batch)
flm_batch = position_to_given_location(flame_decoder, flm_batch)
if settings_for_runs[run_idx]['normal_maps_as_cond'] or \
settings_for_runs[run_idx]['rendered_flame_as_condition']:
batch_size_true = flm_batch.shape[0]
cam = flm_batch[:, constants.DECA_IDX['cam'][0]:constants.DECA_IDX['cam'][1]:]
shape = flm_batch[:, constants.INDICES['SHAPE'][0]:constants.INDICES['SHAPE'][1]]
exp = flm_batch[:, constants.INDICES['EXP'][0]:constants.INDICES['EXP'][1]]
pose = flm_batch[:, constants.INDICES['POSE'][0]:constants.INDICES['POSE'][1]]
# import ipdb; ipdb.set_trace()
light_code = \
flm_batch[:, constants.DECA_IDX['lit'][0]:constants.DECA_IDX['lit'][1]:].view((batch_size_true, 9, 3))
texture_code = flm_batch[:, constants.DECA_IDX['tex'][0]:constants.DECA_IDX['tex'][1]:]
params_to_save['cam'].append(cam.cpu().detach().numpy())
params_to_save['shape'].append(shape.cpu().detach().numpy())
params_to_save['shape'].append(shape.cpu().detach().numpy())
params_to_save['exp'].append(exp.cpu().detach().numpy())
params_to_save['pose'].append(pose.cpu().detach().numpy())
params_to_save['light_code'].append(light_code.cpu().detach().numpy())
params_to_save['texture_code'].append(texture_code.cpu().detach().numpy())
norma_map_img, _, _, _, rend_flm = \
overlay_visualizer.get_rendered_mesh(flame_params=(shape, exp, pose, light_code, texture_code),
camera_params=cam)
# import ipdb; ipdb.set_trace()
rend_flm = torch.clamp(rend_flm, 0, 1) * 2 - 1
norma_map_img = torch.clamp(norma_map_img, 0, 1) * 2 - 1
rend_flm = fast_image_reshape(rend_flm, height_out=256, width_out=256, mode='bilinear')
norma_map_img = fast_image_reshape(norma_map_img, height_out=256, width_out=256, mode='bilinear')
# Render the 2nd time to get backface culling and white texture
# norma_map_img_to_save, _, _, _, rend_flm_to_save = \
# overlay_visualizer.get_rendered_mesh(flame_params=(shape, exp, pose, light_code, texture_code),
# camera_params=cam, cull_backfaces=True, constant_albedo=0.6)
# Back face culling temporarily un-availabe
norma_map_img_to_save, _, _, _, rend_flm_to_save = \
overlay_visualizer.get_rendered_mesh(flame_params=(shape, exp, pose, light_code, texture_code),
camera_params=cam, cull_backfaces=False, constant_albedo=0.6)
rend_flm_to_save = torch.clamp(rend_flm_to_save, 0, 1) * 2 - 1
# rend_flm_to_save = rend_flm
# norma_map_img_to_save = torch.clamp(norma_map_img, 0, 1) * 2 - 1
rend_flm_to_save = fast_image_reshape(rend_flm_to_save, height_out=256, width_out=256, mode='bilinear')
# norma_map_img_to_save = fast_image_reshape(norma_map_img, height_out=256, width_out=256, mode='bilinear')
else:
rend_flm = None
norma_map_img = None
gen_1_in = ge_gen_in(flm_batch, rend_flm, norma_map_img, settings_for_runs[run_idx]['normal_maps_as_cond'],
settings_for_runs[run_idx]['rendered_flame_as_condition'])
# torch.manual_seed(2)
identity_embeddings = torch.randint(low=0, high=69158, size=(gen_1_in.shape[0], ), dtype=torch.long,
device='cuda')
mdl_1_gen_images = generic_utils.get_images_from_flame_params(
flame_params=gen_1_in.cpu().numpy(), pose=None,
model=generator_1,
step=step_max, alpha=alpha,
input_indices=identity_embeddings.cpu().numpy())
params_to_save['identity_indices'].append(identity_embeddings.cpu().detach().numpy())
# import ipdb; ipdb.set_trace()
images[batch_idx:batch_idx+batch_size_true] = torch.clamp(mdl_1_gen_images, -1, 1).cpu().numpy()
# if flame_mesh_imgs is None:
flame_mesh_imgs[batch_idx:batch_idx+batch_size_true] = torch.clamp(rend_flm_to_save, -1, 1).cpu().numpy()
if save_images:
mdl_name = settings_for_runs[run_idx]['name']
for key in params_to_save.keys():
params_to_save[key] = np.concatenate(params_to_save[key], axis=0)
save_dir = os.path.join(cnst.output_root, 'sample', str(run_idx), f'random_samples_q_eval_{mdl_name}')
os.makedirs(save_dir, exist_ok=True)
np.save(os.path.join(save_dir, 'params.npy'), params_to_save)
save_path_current_id = os.path.join(save_dir, 'images')
save_set_of_images(path=save_path_current_id, prefix='', images=(images + 1) / 2, show_prog_bar=True)
#save flam rndr
save_path_current_id_flm_rndr = os.path.join(save_dir, 'conditions')
save_set_of_images(path=save_path_current_id_flm_rndr, prefix='mesh', images=(flame_mesh_imgs + 1) / 2,
show_prog_bar=True)
| if normal_map_cond and texture_cond:
return torch.cat((textured_rndr, norm_map), dim=1)
elif normal_map_cond:
return norm_map
elif texture_cond:
return textured_rndr
else:
return flm_params | identifier_body |
generate_random_samples.py | import sys
sys.path.append('../')
import constants as cnst
import os
os.environ['PYTHONHASHSEED'] = '2'
import tqdm
from model.stg2_generator import StyledGenerator
import numpy as np
from my_utils.visualize_flame_overlay import OverLayViz
from my_utils.flm_dynamic_fit_overlay import camera_ringnetpp
from my_utils.generic_utils import save_set_of_images
import constants
from dataset_loaders import fast_image_reshape
import torch
from my_utils import generic_utils
from my_utils.eye_centering import position_to_given_location
from copy import deepcopy
from my_utils.photometric_optimization.models import FLAME
from my_utils.photometric_optimization import util
def ge_gen_in(flm_params, textured_rndr, norm_map, normal_map_cond, texture_cond):
if normal_map_cond and texture_cond:
return torch.cat((textured_rndr, norm_map), dim=1)
elif normal_map_cond:
return norm_map
elif texture_cond:
return textured_rndr
else:
return flm_params
def corrupt_flame_given_sigma(flm_params, corruption_type, sigma, jaw_sigma, pose_sigma):
# import ipdb; ipdb.set_trace()
# np.random.seed(2)
corrupted_flame = deepcopy(flm_params)
if corruption_type == 'shape' or corruption_type == 'all':
corrupted_flame[:, :10] = flm_params[:, :10] + \
np.clip(np.random.normal(0, sigma, flm_params[:, :10].shape),
-3 * sigma, 3 * sigma).astype('float32')
if corruption_type == 'exp_jaw'or corruption_type == 'all':
# Expression
corrupted_flame[:, 100:110] = flm_params[:, 100:110] + \
np.clip(np.random.normal(0, sigma, flm_params[:, 100:110].shape),
-3 * sigma, 3 * sigma).astype('float32')
# Jaw pose
corrupted_flame[:, 153] = flm_params[:, 153] + \
np.random.normal(0, jaw_sigma, corrupted_flame.shape[0])
if corruption_type == 'pose' or corruption_type == 'all':
# pose_perturbation = np.random.normal(0, pose_sigma[i], (corrupted_flame.shape[0], 3))
# corrupted_flame[:, 150:153] += np.clip(pose_perturbation, -3 * pose_sigma[i], 3 * pose_sigma[i])
pose_perturbation = np.random.normal(0, pose_sigma, (corrupted_flame.shape[0],))
corrupted_flame[:, 151] = flm_params[:, 151] + \
np.clip(pose_perturbation, -3 * pose_sigma, 3 * pose_sigma)
return corrupted_flame
# General settings
save_images = True
code_size = 236
use_inst_norm = True
core_tensor_res = 4
resolution = 256
alpha = 1
step_max = int(np.log2(resolution) - 2)
num_smpl_to_eval_on = 128
use_styled_conv_stylegan2 = True
flength = 5000
cam_t = np.array([0., 0., 0])
camera_params = camera_ringnetpp((512, 512), trans=cam_t, focal=flength)
# Uncomment the appropriate run_id
run_ids_1 = [29, ] # with sqrt(2)
# run_ids_1 = [7, 24, 8, 3]
# run_ids_1 = [7, 8, 3]
# run_ids_1 = [7]
settings_for_runs = \
{24: {'name': 'vector_cond', 'model_idx': '216000_1', 'normal_maps_as_cond': False,
'rendered_flame_as_condition': False, 'apply_sqrt2_fac_in_eq_lin': False},
29: {'name': 'full_model', 'model_idx': '294000_1', 'normal_maps_as_cond': True,
'rendered_flame_as_condition': True, 'apply_sqrt2_fac_in_eq_lin': True},
7: {'name': 'flm_rndr_tex_interp', 'model_idx': '051000_1', 'normal_maps_as_cond': False,
'rendered_flame_as_condition': True, 'apply_sqrt2_fac_in_eq_lin': False},
3: {'name': 'norm_mp_tex_interp', 'model_idx': '203000_1', 'normal_maps_as_cond': True,
'rendered_flame_as_condition': False, 'apply_sqrt2_fac_in_eq_lin': False},
8: {'name': 'norm_map_rend_flm_no_tex_interp', 'model_idx': '009000_1', 'normal_maps_as_cond': True,
'rendered_flame_as_condition': True, 'apply_sqrt2_fac_in_eq_lin': False},}
overlay_visualizer = OverLayViz()
# overlay_visualizer.setup_renderer(mesh_file=None)
flm_params = np.zeros((num_smpl_to_eval_on, code_size)).astype('float32')
fl_param_dict = np.load(cnst.all_flame_params_file, allow_pickle=True).item()
np.random.seed(2)
for i, key in enumerate(fl_param_dict):
flame_param = fl_param_dict[key]
shape_params = np.concatenate((np.random.normal(0, 1, [3,]), np.zeros(97))).astype('float32')
exp_params = np.concatenate((np.random.normal(0, 1, [3,]), np.zeros(47))).astype('float32')
# +- pi/4 for bad samples +- pi/8 for good samples
# pose = np.array([0, np.random.uniform(-np.pi/4, np.pi/4, 1), 0,
# np.random.uniform(0, np.pi/12, 1), 0, 0]).astype('float32')
pose = np.array([0, np.random.uniform(-np.pi / 8, np.pi / 8, 1), 0,
np.random.uniform(0, np.pi / 12, 1), 0, 0]).astype('float32')
texture = np.random.normal(0, 1, [50]).astype('float32')
# texture = flame_param['tex']
flame_param = np.hstack((shape_params, exp_params, pose, flame_param['cam'],
texture, flame_param['lit'].flatten()))
# tz = camera_params['f'][0] / (camera_params['c'][0] * flame_param[:, 156:157])
# flame_param[:, 156:159] = np.concatenate((flame_param[:, 157:], tz), axis=1)
# import ipdb; ipdb.set_trace()
flm_params[i, :] = flame_param.astype('float32')
if i == num_smpl_to_eval_on - 1:
break
batch_size = 32
num_sigmas = 1
corruption_sigma = np.linspace(0, 1.5, num_sigmas)
jaw_rot_range = (0, np.pi/8)
jaw_rot_sigmas = np.linspace(0, (jaw_rot_range[1] - jaw_rot_range[0])/6, num_sigmas)
pose_range = (-np.pi/3, np.pi/3)
pose_sigmas = np.linspace(0, (pose_range[1] - pose_range[0])/6, num_sigmas)
config_obj = util.dict2obj(cnst.flame_config)
flame_decoder = FLAME.FLAME(config_obj).cuda().eval()
for run_idx in run_ids_1:
# import ipdb; ipdb.set_trace()
generator_1 = torch.nn.DataParallel(
StyledGenerator(embedding_vocab_size=69158,
rendered_flame_ascondition=settings_for_runs[run_idx]['rendered_flame_as_condition'],
normal_maps_as_cond=settings_for_runs[run_idx]['normal_maps_as_cond'],
apply_sqrt2_fac_in_eq_lin=settings_for_runs[run_idx]['apply_sqrt2_fac_in_eq_lin'],
core_tensor_res=core_tensor_res,
w_truncation_factor=1.0,
n_mlp=8)).cuda()
model_idx = settings_for_runs[run_idx]['model_idx'] | generator_1 = generator_1.eval()
params_to_save = {'cam': [], 'shape': [], 'exp': [], 'pose': [], 'light_code': [], 'texture_code': [],
'identity_indices': []}
for i, sigma in enumerate(corruption_sigma):
images = np.zeros((num_smpl_to_eval_on, 3, resolution, resolution)).astype('float32')
flame_mesh_imgs = np.zeros((num_smpl_to_eval_on, 3, resolution, resolution)).astype('float32')
pbar = tqdm.tqdm(range(0, num_smpl_to_eval_on, batch_size))
pbar.set_description('Generating_images')
# print(flm_params[1, :])
for batch_idx in pbar:
flm_batch = flm_params[batch_idx:batch_idx+batch_size, :]
flm_batch = torch.from_numpy(flm_batch).cuda()
# flm_batch = eye_cntr_reg.substitute_flame_batch_with_regressed_camera(flm_batch)
flm_batch = position_to_given_location(flame_decoder, flm_batch)
if settings_for_runs[run_idx]['normal_maps_as_cond'] or \
settings_for_runs[run_idx]['rendered_flame_as_condition']:
batch_size_true = flm_batch.shape[0]
cam = flm_batch[:, constants.DECA_IDX['cam'][0]:constants.DECA_IDX['cam'][1]:]
shape = flm_batch[:, constants.INDICES['SHAPE'][0]:constants.INDICES['SHAPE'][1]]
exp = flm_batch[:, constants.INDICES['EXP'][0]:constants.INDICES['EXP'][1]]
pose = flm_batch[:, constants.INDICES['POSE'][0]:constants.INDICES['POSE'][1]]
# import ipdb; ipdb.set_trace()
light_code = \
flm_batch[:, constants.DECA_IDX['lit'][0]:constants.DECA_IDX['lit'][1]:].view((batch_size_true, 9, 3))
texture_code = flm_batch[:, constants.DECA_IDX['tex'][0]:constants.DECA_IDX['tex'][1]:]
params_to_save['cam'].append(cam.cpu().detach().numpy())
params_to_save['shape'].append(shape.cpu().detach().numpy())
params_to_save['shape'].append(shape.cpu().detach().numpy())
params_to_save['exp'].append(exp.cpu().detach().numpy())
params_to_save['pose'].append(pose.cpu().detach().numpy())
params_to_save['light_code'].append(light_code.cpu().detach().numpy())
params_to_save['texture_code'].append(texture_code.cpu().detach().numpy())
norma_map_img, _, _, _, rend_flm = \
overlay_visualizer.get_rendered_mesh(flame_params=(shape, exp, pose, light_code, texture_code),
camera_params=cam)
# import ipdb; ipdb.set_trace()
rend_flm = torch.clamp(rend_flm, 0, 1) * 2 - 1
norma_map_img = torch.clamp(norma_map_img, 0, 1) * 2 - 1
rend_flm = fast_image_reshape(rend_flm, height_out=256, width_out=256, mode='bilinear')
norma_map_img = fast_image_reshape(norma_map_img, height_out=256, width_out=256, mode='bilinear')
# Render the 2nd time to get backface culling and white texture
# norma_map_img_to_save, _, _, _, rend_flm_to_save = \
# overlay_visualizer.get_rendered_mesh(flame_params=(shape, exp, pose, light_code, texture_code),
# camera_params=cam, cull_backfaces=True, constant_albedo=0.6)
# Back face culling temporarily un-availabe
norma_map_img_to_save, _, _, _, rend_flm_to_save = \
overlay_visualizer.get_rendered_mesh(flame_params=(shape, exp, pose, light_code, texture_code),
camera_params=cam, cull_backfaces=False, constant_albedo=0.6)
rend_flm_to_save = torch.clamp(rend_flm_to_save, 0, 1) * 2 - 1
# rend_flm_to_save = rend_flm
# norma_map_img_to_save = torch.clamp(norma_map_img, 0, 1) * 2 - 1
rend_flm_to_save = fast_image_reshape(rend_flm_to_save, height_out=256, width_out=256, mode='bilinear')
# norma_map_img_to_save = fast_image_reshape(norma_map_img, height_out=256, width_out=256, mode='bilinear')
else:
rend_flm = None
norma_map_img = None
gen_1_in = ge_gen_in(flm_batch, rend_flm, norma_map_img, settings_for_runs[run_idx]['normal_maps_as_cond'],
settings_for_runs[run_idx]['rendered_flame_as_condition'])
# torch.manual_seed(2)
identity_embeddings = torch.randint(low=0, high=69158, size=(gen_1_in.shape[0], ), dtype=torch.long,
device='cuda')
mdl_1_gen_images = generic_utils.get_images_from_flame_params(
flame_params=gen_1_in.cpu().numpy(), pose=None,
model=generator_1,
step=step_max, alpha=alpha,
input_indices=identity_embeddings.cpu().numpy())
params_to_save['identity_indices'].append(identity_embeddings.cpu().detach().numpy())
# import ipdb; ipdb.set_trace()
images[batch_idx:batch_idx+batch_size_true] = torch.clamp(mdl_1_gen_images, -1, 1).cpu().numpy()
# if flame_mesh_imgs is None:
flame_mesh_imgs[batch_idx:batch_idx+batch_size_true] = torch.clamp(rend_flm_to_save, -1, 1).cpu().numpy()
if save_images:
mdl_name = settings_for_runs[run_idx]['name']
for key in params_to_save.keys():
params_to_save[key] = np.concatenate(params_to_save[key], axis=0)
save_dir = os.path.join(cnst.output_root, 'sample', str(run_idx), f'random_samples_q_eval_{mdl_name}')
os.makedirs(save_dir, exist_ok=True)
np.save(os.path.join(save_dir, 'params.npy'), params_to_save)
save_path_current_id = os.path.join(save_dir, 'images')
save_set_of_images(path=save_path_current_id, prefix='', images=(images + 1) / 2, show_prog_bar=True)
#save flam rndr
save_path_current_id_flm_rndr = os.path.join(save_dir, 'conditions')
save_set_of_images(path=save_path_current_id_flm_rndr, prefix='mesh', images=(flame_mesh_imgs + 1) / 2,
show_prog_bar=True) | ckpt1 = torch.load(f'{cnst.output_root}checkpoint/{run_idx}/{model_idx}.model')
generator_1.load_state_dict(ckpt1['generator_running']) | random_line_split |
generate_random_samples.py | import sys
sys.path.append('../')
import constants as cnst
import os
os.environ['PYTHONHASHSEED'] = '2'
import tqdm
from model.stg2_generator import StyledGenerator
import numpy as np
from my_utils.visualize_flame_overlay import OverLayViz
from my_utils.flm_dynamic_fit_overlay import camera_ringnetpp
from my_utils.generic_utils import save_set_of_images
import constants
from dataset_loaders import fast_image_reshape
import torch
from my_utils import generic_utils
from my_utils.eye_centering import position_to_given_location
from copy import deepcopy
from my_utils.photometric_optimization.models import FLAME
from my_utils.photometric_optimization import util
def | (flm_params, textured_rndr, norm_map, normal_map_cond, texture_cond):
if normal_map_cond and texture_cond:
return torch.cat((textured_rndr, norm_map), dim=1)
elif normal_map_cond:
return norm_map
elif texture_cond:
return textured_rndr
else:
return flm_params
def corrupt_flame_given_sigma(flm_params, corruption_type, sigma, jaw_sigma, pose_sigma):
# import ipdb; ipdb.set_trace()
# np.random.seed(2)
corrupted_flame = deepcopy(flm_params)
if corruption_type == 'shape' or corruption_type == 'all':
corrupted_flame[:, :10] = flm_params[:, :10] + \
np.clip(np.random.normal(0, sigma, flm_params[:, :10].shape),
-3 * sigma, 3 * sigma).astype('float32')
if corruption_type == 'exp_jaw'or corruption_type == 'all':
# Expression
corrupted_flame[:, 100:110] = flm_params[:, 100:110] + \
np.clip(np.random.normal(0, sigma, flm_params[:, 100:110].shape),
-3 * sigma, 3 * sigma).astype('float32')
# Jaw pose
corrupted_flame[:, 153] = flm_params[:, 153] + \
np.random.normal(0, jaw_sigma, corrupted_flame.shape[0])
if corruption_type == 'pose' or corruption_type == 'all':
# pose_perturbation = np.random.normal(0, pose_sigma[i], (corrupted_flame.shape[0], 3))
# corrupted_flame[:, 150:153] += np.clip(pose_perturbation, -3 * pose_sigma[i], 3 * pose_sigma[i])
pose_perturbation = np.random.normal(0, pose_sigma, (corrupted_flame.shape[0],))
corrupted_flame[:, 151] = flm_params[:, 151] + \
np.clip(pose_perturbation, -3 * pose_sigma, 3 * pose_sigma)
return corrupted_flame
# General settings
save_images = True
code_size = 236
use_inst_norm = True
core_tensor_res = 4
resolution = 256
alpha = 1
step_max = int(np.log2(resolution) - 2)
num_smpl_to_eval_on = 128
use_styled_conv_stylegan2 = True
flength = 5000
cam_t = np.array([0., 0., 0])
camera_params = camera_ringnetpp((512, 512), trans=cam_t, focal=flength)
# Uncomment the appropriate run_id
run_ids_1 = [29, ] # with sqrt(2)
# run_ids_1 = [7, 24, 8, 3]
# run_ids_1 = [7, 8, 3]
# run_ids_1 = [7]
settings_for_runs = \
{24: {'name': 'vector_cond', 'model_idx': '216000_1', 'normal_maps_as_cond': False,
'rendered_flame_as_condition': False, 'apply_sqrt2_fac_in_eq_lin': False},
29: {'name': 'full_model', 'model_idx': '294000_1', 'normal_maps_as_cond': True,
'rendered_flame_as_condition': True, 'apply_sqrt2_fac_in_eq_lin': True},
7: {'name': 'flm_rndr_tex_interp', 'model_idx': '051000_1', 'normal_maps_as_cond': False,
'rendered_flame_as_condition': True, 'apply_sqrt2_fac_in_eq_lin': False},
3: {'name': 'norm_mp_tex_interp', 'model_idx': '203000_1', 'normal_maps_as_cond': True,
'rendered_flame_as_condition': False, 'apply_sqrt2_fac_in_eq_lin': False},
8: {'name': 'norm_map_rend_flm_no_tex_interp', 'model_idx': '009000_1', 'normal_maps_as_cond': True,
'rendered_flame_as_condition': True, 'apply_sqrt2_fac_in_eq_lin': False},}
overlay_visualizer = OverLayViz()
# overlay_visualizer.setup_renderer(mesh_file=None)
flm_params = np.zeros((num_smpl_to_eval_on, code_size)).astype('float32')
fl_param_dict = np.load(cnst.all_flame_params_file, allow_pickle=True).item()
np.random.seed(2)
for i, key in enumerate(fl_param_dict):
flame_param = fl_param_dict[key]
shape_params = np.concatenate((np.random.normal(0, 1, [3,]), np.zeros(97))).astype('float32')
exp_params = np.concatenate((np.random.normal(0, 1, [3,]), np.zeros(47))).astype('float32')
# +- pi/4 for bad samples +- pi/8 for good samples
# pose = np.array([0, np.random.uniform(-np.pi/4, np.pi/4, 1), 0,
# np.random.uniform(0, np.pi/12, 1), 0, 0]).astype('float32')
pose = np.array([0, np.random.uniform(-np.pi / 8, np.pi / 8, 1), 0,
np.random.uniform(0, np.pi / 12, 1), 0, 0]).astype('float32')
texture = np.random.normal(0, 1, [50]).astype('float32')
# texture = flame_param['tex']
flame_param = np.hstack((shape_params, exp_params, pose, flame_param['cam'],
texture, flame_param['lit'].flatten()))
# tz = camera_params['f'][0] / (camera_params['c'][0] * flame_param[:, 156:157])
# flame_param[:, 156:159] = np.concatenate((flame_param[:, 157:], tz), axis=1)
# import ipdb; ipdb.set_trace()
flm_params[i, :] = flame_param.astype('float32')
if i == num_smpl_to_eval_on - 1:
break
batch_size = 32
num_sigmas = 1
corruption_sigma = np.linspace(0, 1.5, num_sigmas)
jaw_rot_range = (0, np.pi/8)
jaw_rot_sigmas = np.linspace(0, (jaw_rot_range[1] - jaw_rot_range[0])/6, num_sigmas)
pose_range = (-np.pi/3, np.pi/3)
pose_sigmas = np.linspace(0, (pose_range[1] - pose_range[0])/6, num_sigmas)
config_obj = util.dict2obj(cnst.flame_config)
flame_decoder = FLAME.FLAME(config_obj).cuda().eval()
for run_idx in run_ids_1:
# import ipdb; ipdb.set_trace()
generator_1 = torch.nn.DataParallel(
StyledGenerator(embedding_vocab_size=69158,
rendered_flame_ascondition=settings_for_runs[run_idx]['rendered_flame_as_condition'],
normal_maps_as_cond=settings_for_runs[run_idx]['normal_maps_as_cond'],
apply_sqrt2_fac_in_eq_lin=settings_for_runs[run_idx]['apply_sqrt2_fac_in_eq_lin'],
core_tensor_res=core_tensor_res,
w_truncation_factor=1.0,
n_mlp=8)).cuda()
model_idx = settings_for_runs[run_idx]['model_idx']
ckpt1 = torch.load(f'{cnst.output_root}checkpoint/{run_idx}/{model_idx}.model')
generator_1.load_state_dict(ckpt1['generator_running'])
generator_1 = generator_1.eval()
params_to_save = {'cam': [], 'shape': [], 'exp': [], 'pose': [], 'light_code': [], 'texture_code': [],
'identity_indices': []}
for i, sigma in enumerate(corruption_sigma):
images = np.zeros((num_smpl_to_eval_on, 3, resolution, resolution)).astype('float32')
flame_mesh_imgs = np.zeros((num_smpl_to_eval_on, 3, resolution, resolution)).astype('float32')
pbar = tqdm.tqdm(range(0, num_smpl_to_eval_on, batch_size))
pbar.set_description('Generating_images')
# print(flm_params[1, :])
for batch_idx in pbar:
flm_batch = flm_params[batch_idx:batch_idx+batch_size, :]
flm_batch = torch.from_numpy(flm_batch).cuda()
# flm_batch = eye_cntr_reg.substitute_flame_batch_with_regressed_camera(flm_batch)
flm_batch = position_to_given_location(flame_decoder, flm_batch)
if settings_for_runs[run_idx]['normal_maps_as_cond'] or \
settings_for_runs[run_idx]['rendered_flame_as_condition']:
batch_size_true = flm_batch.shape[0]
cam = flm_batch[:, constants.DECA_IDX['cam'][0]:constants.DECA_IDX['cam'][1]:]
shape = flm_batch[:, constants.INDICES['SHAPE'][0]:constants.INDICES['SHAPE'][1]]
exp = flm_batch[:, constants.INDICES['EXP'][0]:constants.INDICES['EXP'][1]]
pose = flm_batch[:, constants.INDICES['POSE'][0]:constants.INDICES['POSE'][1]]
# import ipdb; ipdb.set_trace()
light_code = \
flm_batch[:, constants.DECA_IDX['lit'][0]:constants.DECA_IDX['lit'][1]:].view((batch_size_true, 9, 3))
texture_code = flm_batch[:, constants.DECA_IDX['tex'][0]:constants.DECA_IDX['tex'][1]:]
params_to_save['cam'].append(cam.cpu().detach().numpy())
params_to_save['shape'].append(shape.cpu().detach().numpy())
params_to_save['shape'].append(shape.cpu().detach().numpy())
params_to_save['exp'].append(exp.cpu().detach().numpy())
params_to_save['pose'].append(pose.cpu().detach().numpy())
params_to_save['light_code'].append(light_code.cpu().detach().numpy())
params_to_save['texture_code'].append(texture_code.cpu().detach().numpy())
norma_map_img, _, _, _, rend_flm = \
overlay_visualizer.get_rendered_mesh(flame_params=(shape, exp, pose, light_code, texture_code),
camera_params=cam)
# import ipdb; ipdb.set_trace()
rend_flm = torch.clamp(rend_flm, 0, 1) * 2 - 1
norma_map_img = torch.clamp(norma_map_img, 0, 1) * 2 - 1
rend_flm = fast_image_reshape(rend_flm, height_out=256, width_out=256, mode='bilinear')
norma_map_img = fast_image_reshape(norma_map_img, height_out=256, width_out=256, mode='bilinear')
# Render the 2nd time to get backface culling and white texture
# norma_map_img_to_save, _, _, _, rend_flm_to_save = \
# overlay_visualizer.get_rendered_mesh(flame_params=(shape, exp, pose, light_code, texture_code),
# camera_params=cam, cull_backfaces=True, constant_albedo=0.6)
# Back face culling temporarily un-availabe
norma_map_img_to_save, _, _, _, rend_flm_to_save = \
overlay_visualizer.get_rendered_mesh(flame_params=(shape, exp, pose, light_code, texture_code),
camera_params=cam, cull_backfaces=False, constant_albedo=0.6)
rend_flm_to_save = torch.clamp(rend_flm_to_save, 0, 1) * 2 - 1
# rend_flm_to_save = rend_flm
# norma_map_img_to_save = torch.clamp(norma_map_img, 0, 1) * 2 - 1
rend_flm_to_save = fast_image_reshape(rend_flm_to_save, height_out=256, width_out=256, mode='bilinear')
# norma_map_img_to_save = fast_image_reshape(norma_map_img, height_out=256, width_out=256, mode='bilinear')
else:
rend_flm = None
norma_map_img = None
gen_1_in = ge_gen_in(flm_batch, rend_flm, norma_map_img, settings_for_runs[run_idx]['normal_maps_as_cond'],
settings_for_runs[run_idx]['rendered_flame_as_condition'])
# torch.manual_seed(2)
identity_embeddings = torch.randint(low=0, high=69158, size=(gen_1_in.shape[0], ), dtype=torch.long,
device='cuda')
mdl_1_gen_images = generic_utils.get_images_from_flame_params(
flame_params=gen_1_in.cpu().numpy(), pose=None,
model=generator_1,
step=step_max, alpha=alpha,
input_indices=identity_embeddings.cpu().numpy())
params_to_save['identity_indices'].append(identity_embeddings.cpu().detach().numpy())
# import ipdb; ipdb.set_trace()
images[batch_idx:batch_idx+batch_size_true] = torch.clamp(mdl_1_gen_images, -1, 1).cpu().numpy()
# if flame_mesh_imgs is None:
flame_mesh_imgs[batch_idx:batch_idx+batch_size_true] = torch.clamp(rend_flm_to_save, -1, 1).cpu().numpy()
if save_images:
mdl_name = settings_for_runs[run_idx]['name']
for key in params_to_save.keys():
params_to_save[key] = np.concatenate(params_to_save[key], axis=0)
save_dir = os.path.join(cnst.output_root, 'sample', str(run_idx), f'random_samples_q_eval_{mdl_name}')
os.makedirs(save_dir, exist_ok=True)
np.save(os.path.join(save_dir, 'params.npy'), params_to_save)
save_path_current_id = os.path.join(save_dir, 'images')
save_set_of_images(path=save_path_current_id, prefix='', images=(images + 1) / 2, show_prog_bar=True)
#save flam rndr
save_path_current_id_flm_rndr = os.path.join(save_dir, 'conditions')
save_set_of_images(path=save_path_current_id_flm_rndr, prefix='mesh', images=(flame_mesh_imgs + 1) / 2,
show_prog_bar=True)
| ge_gen_in | identifier_name |
fpl1617.py | import re
import requests
import json
import math
import time
import os
from operator import itemgetter
import traceback
# Parses members of each team from file
def get_all_teams(filenamePath):
myFile=open(filenamePath)
lines=myFile.readlines()
for i in range(0,len(lines)):
if lines[i].startswith("Team:"):
teamName=str(lines[i]).strip().split(':')[1].strip()
teams[teamName]=[]
for j in range(i+1,i+7):
playerName=str(lines[j]).strip().split(',')[0]
playerID=str(lines[j]).strip().split(',')[1]
teams[teamName].append([playerName,playerID,1])
return teams
def isValidCaptain(playername):
if(prvsCaptainFileFound):
for line in prvsCaptains:
if(playername in line):
count=int(line.split(':')[1].strip())
if(count<7):
return True
else:
return False
return True
else:
return True
def isValidViceCaptain(playername):
if(prvsVcFileFound):
for line in prvsViceCaptains:
if(playername in line):
count=int(line.split(':')[1].strip())
if(count<7):
return True
else:
return False
return True
else:
return True
def Captain_ViceCaptainSetup(teams):
for k,v in sorted(teams.items()):
team=k
print("-------------------\nTeam: %s" %(str(k)))
for i in range(0,len(v)):
print(str(i+1)+". "+teams[k][i][0])
captain=int(input("Enter Captain number: "))
teams[k].append(captain-1)
if(isValidCaptain(teams[k][captain-1][0])):
teams[k][captain-1][2]=2
vc=int(input("Enter vc number: "))
teams[k].append(vc-1)
if(isValidViceCaptain(teams[k][vc-1][0])):
teams[k][vc-1][2]=1.5
return teams
def getTeamScoresfromList(TeamList):
orignalScore=[]
orignalScoreDict=[]
multiplierScore=[]
multiplierScoreDict=[]
for player in TeamList[0:6]:
player_score=get_player_score(player[1],gw)
orignalScore.append(player_score)
orignalScoreDict.append({player[0]:player_score})
multiplierScore.append(player_score*player[2])
multiplierScoreDict.append({player[0]:player_score*player[2]})
return(orignalScore,multiplierScore,orignalScoreDict,multiplierScoreDict)
# Get a player's score given player ID and gw number
def get_player_score(id,gw):
url="https://fantasy.premierleague.com/drf/entry/"+str(id)+"/event/"+str(gw)+"/picks"
retryCount=0
while True:
try:
print("\rURL: "+str(url)+" Retry Count: "+str(retryCount),end="")
r = requests.get(url)
except:
print("\nFound exception")
retryCount = retryCount + 1
continue
print("")
break
result = json.loads(r.text)
points=result['entry_history']['points']
deductions=result['entry_history']['event_transfers_cost']
score = int(points)-int(deductions)
return score
def isHome(teamname,fixtures):
for fixture in fixtures:
if teamname in fixture['homeTeamName']:
return True
elif teamname in fixture['awayTeamName']:
return False
else:
continue
def getfix(gw):
res = requests.get("http://www.football-data.org/soccerseasons/426/fixtures?matchday="+str(gw))
result=json.loads(res.text)
return result['fixtures']
def calcResult(n):
score=(int(math.floor(n / 10.0)) * 10)/10
return int(score)
def calcbonus(m):
score = calcResult(m)
if(score <9):
return 0
elif(score <12 and score >=9):
return 1
elif(score <16 and score >=12):
return 2
else:
return 3
try:
print("-----------------------------------------------------")
print("LFC India Fantasy League Score calculator 16-17")
print("Author: kirankaranth1@gmail.com")
print("Source: https://github.com/kirankaranth1/LFCFantasyLeague-16-17")
print("-----------------------------------------------------")
curr_dir=str(os.getcwd())
teams={}
gw=int(input("Enter Gameweek number: "))
prvsCaptainFile="Counts/Captains/CaptainCount_gw"+str(gw-1)+".txt"
prvsVcFile="Counts/ViceCaptains/ViceCaptainCount_gw"+str(gw-1)+".txt"
try:
prvsCaptainStream=open(prvsCaptainFile,'r')
except FileNotFoundError:
prvsCaptainFileFound=False
print("WARNING: Captain count file for previous gw was not found. Hence, count checking of entered captains will be skipped.")
else:
prvsCaptainFileFound=True
prvsCaptains=prvsCaptainStream.readlines()
try:
prvsVcStream=open(prvsVcFile,'r')
except FileNotFoundError:
prvsVcFileFound=False
print("WARNING: Vice Captain count file for previous gw was not found. Hence, count checking of entered captains will be skipped.")
else:
prvsVcFileFound=True
prvsViceCaptains=prvsVcStream.readlines()
fixtures=getfix(gw)
# Streams to all log files
os.makedirs("TeamScores", exist_ok=True)
os.makedirs("Results", exist_ok=True)
os.makedirs("Counts", exist_ok=True) | teamscores_path=curr_dir+"\TeamScores\TeamScore_gw"+str(gw)+".txt"
results_path=curr_dir+"\Results\Results_gw"+str(gw)+".txt"
captain_path=curr_dir+"\Counts\Captains\CaptainCount_gw"+str(gw)+".txt"
vicecaptain_path=curr_dir+"\Counts\ViceCaptains\ViceCaptainCount_gw"+str(gw)+".txt"
f_teamscores=open("TeamScores/TeamScore_gw"+str(gw)+".txt",'w')
f_results=open("Results/Results_gw"+str(gw)+".txt",'w')
f_captain=open("Counts/Captains/CaptainCount_gw"+str(gw)+".txt",'w')
f_vicecaptain=open("Counts/ViceCaptains/ViceCaptainCount_gw"+str(gw)+".txt",'w')
# Main program starts here
teams=get_all_teams("CompletePlayersTeamsIDs.txt")
print("\n-------------------------------------------------------------------------------------------------\nPlease setup captain and vice captain for each team for gameweek "+str(gw)+"\n-------------------------------------------------------------------------------------------------")
C_teams=Captain_ViceCaptainSetup(teams)
allTeamScores = {}
for k,v in sorted(C_teams.items()):
print("\nCalculating Scores of %s" %(str(k)))
print("Team: %s" %(str(k)),file=f_teamscores)
(original,multiplied,oDict,mDict)=getTeamScoresfromList(v)
max_score=max(original)
if isHome(str(k),fixtures):
HA=0.2
print("Home Advantage: YES",file=f_teamscores)
home=True
else:
HA=0
print("Home Advantage: NO",file=f_teamscores)
home=False
#print(v)
print("Captain: %s" %(v[v[6]][0]),file=f_teamscores)
print("Vice Captain: %s" %(v[v[7]][0]),file=f_teamscores)
print("Individual Scores : %s" %(str(oDict)),file=f_teamscores)
print("Team Scores afer multipliers: %s" %(str(mDict)),file=f_teamscores)
t_score=sum(multiplied)+(HA*max_score)
print("Cumulative team Score: %s\n" %(str(round(t_score))),file=f_teamscores)
allTeamScores[str(k)]=round(t_score)
f_teamscores.close()
for fixture in fixtures:
try:
hscore=allTeamScores[fixture['homeTeamName']]
ascore=allTeamScores[fixture['awayTeamName']]
if(9>=math.fabs(hscore-ascore)>=0):
fixture['result']['goalsAwayTeam']=0
fixture['result']['goalsHomeTeam']=0
diff=math.fabs(hscore-ascore)
elif(hscore-ascore>=10):
fixture['result']['goalsAwayTeam']=0
diff=hscore-ascore
fixture['result']['goalsHomeTeam']=calcResult(diff)
else:
diff=ascore-hscore
fixture['result']['goalsAwayTeam']=calcResult(diff)
fixture['result']['goalsHomeTeam']=0
print(str(fixture['homeTeamName'])+" vs "+str(fixture['awayTeamName']),file=f_results)
print(str(allTeamScores[fixture['homeTeamName']])+"-"+str(allTeamScores[fixture['awayTeamName']]),file=f_results)
print("\nBonus points:"+str(calcbonus(diff)),file=f_results)
print("Final Score: "+str(fixture['result']['goalsHomeTeam'])+"-"+str(fixture['result']['goalsAwayTeam']),file=f_results)
print("--------------------------------------------",file=f_results)
except KeyError:
continue
f_results.close()
captains={}
vicecaptains={}
isCountsCalculated=True
for i in range(1,gw+1):
try:
currentFile = open("TeamScores/TeamScore_gw"+str(i)+".txt",'r')
except FileNotFoundError:
print("WARNING: File TeamScores/TeamScore_gw"+str(i)+".txt not found. Skipping captain and vc count calculation.")
isCountsCalculated=False
break
fileLines = currentFile.readlines()
for line in fileLines:
if(line.startswith("Captain")):
try:
captains[line.split(':')[1].strip()] += 1
except KeyError:
captains[line.split(':')[1].strip()] = 1
if(line.startswith("Vice Captain")):
try:
vicecaptains[line.split(':')[1].strip()] += 1
except KeyError:
vicecaptains[line.split(':')[1].strip()] = 1
currentFile.close()
template = "{0:25}:{1:10}"
if(isCountsCalculated):
for k,v in sorted(captains.items(), key=itemgetter(1), reverse=True):
print(template.format(k,v),file=f_captain)
for k,v in sorted(vicecaptains.items(), key=itemgetter(1), reverse=True):
print(template.format(k,v),file=f_vicecaptain)
print("\n")
print("--------------------------------------------------------------------------------------------------")
print("Team scores are logged at the below location:")
print(teamscores_path)
print("------------------------")
print("Final results are logged at the below location:")
print(results_path)
if(isCountsCalculated):
print("------------------------")
print("Captain counts are logged at the below location:")
print(captain_path)
print("------------------------")
print("Vice captain counts are logged at the below location:")
print(vicecaptain_path)
print("--------------------------------------------------------------------------------------------------")
dummy=input("Press Enter to exit the program...")
except:
print("-----------------------------------------------------\n-----------------------------------------------------")
print("Unexpected error encountered. Please report the below message to author.")
print(str(traceback.format_exc()))
dummy=input("Press Enter to exit the program...") | os.makedirs("Counts/Captains", exist_ok=True)
os.makedirs("Counts/ViceCaptains", exist_ok=True)
| random_line_split |
fpl1617.py | import re
import requests
import json
import math
import time
import os
from operator import itemgetter
import traceback
# Parses members of each team from file
def get_all_teams(filenamePath):
myFile=open(filenamePath)
lines=myFile.readlines()
for i in range(0,len(lines)):
if lines[i].startswith("Team:"):
teamName=str(lines[i]).strip().split(':')[1].strip()
teams[teamName]=[]
for j in range(i+1,i+7):
playerName=str(lines[j]).strip().split(',')[0]
playerID=str(lines[j]).strip().split(',')[1]
teams[teamName].append([playerName,playerID,1])
return teams
def isValidCaptain(playername):
if(prvsCaptainFileFound):
for line in prvsCaptains:
if(playername in line):
count=int(line.split(':')[1].strip())
if(count<7):
return True
else:
return False
return True
else:
return True
def isValidViceCaptain(playername):
|
def Captain_ViceCaptainSetup(teams):
for k,v in sorted(teams.items()):
team=k
print("-------------------\nTeam: %s" %(str(k)))
for i in range(0,len(v)):
print(str(i+1)+". "+teams[k][i][0])
captain=int(input("Enter Captain number: "))
teams[k].append(captain-1)
if(isValidCaptain(teams[k][captain-1][0])):
teams[k][captain-1][2]=2
vc=int(input("Enter vc number: "))
teams[k].append(vc-1)
if(isValidViceCaptain(teams[k][vc-1][0])):
teams[k][vc-1][2]=1.5
return teams
def getTeamScoresfromList(TeamList):
orignalScore=[]
orignalScoreDict=[]
multiplierScore=[]
multiplierScoreDict=[]
for player in TeamList[0:6]:
player_score=get_player_score(player[1],gw)
orignalScore.append(player_score)
orignalScoreDict.append({player[0]:player_score})
multiplierScore.append(player_score*player[2])
multiplierScoreDict.append({player[0]:player_score*player[2]})
return(orignalScore,multiplierScore,orignalScoreDict,multiplierScoreDict)
# Get a player's score given player ID and gw number
def get_player_score(id,gw):
url="https://fantasy.premierleague.com/drf/entry/"+str(id)+"/event/"+str(gw)+"/picks"
retryCount=0
while True:
try:
print("\rURL: "+str(url)+" Retry Count: "+str(retryCount),end="")
r = requests.get(url)
except:
print("\nFound exception")
retryCount = retryCount + 1
continue
print("")
break
result = json.loads(r.text)
points=result['entry_history']['points']
deductions=result['entry_history']['event_transfers_cost']
score = int(points)-int(deductions)
return score
def isHome(teamname,fixtures):
for fixture in fixtures:
if teamname in fixture['homeTeamName']:
return True
elif teamname in fixture['awayTeamName']:
return False
else:
continue
def getfix(gw):
res = requests.get("http://www.football-data.org/soccerseasons/426/fixtures?matchday="+str(gw))
result=json.loads(res.text)
return result['fixtures']
def calcResult(n):
score=(int(math.floor(n / 10.0)) * 10)/10
return int(score)
def calcbonus(m):
score = calcResult(m)
if(score <9):
return 0
elif(score <12 and score >=9):
return 1
elif(score <16 and score >=12):
return 2
else:
return 3
try:
print("-----------------------------------------------------")
print("LFC India Fantasy League Score calculator 16-17")
print("Author: kirankaranth1@gmail.com")
print("Source: https://github.com/kirankaranth1/LFCFantasyLeague-16-17")
print("-----------------------------------------------------")
curr_dir=str(os.getcwd())
teams={}
gw=int(input("Enter Gameweek number: "))
prvsCaptainFile="Counts/Captains/CaptainCount_gw"+str(gw-1)+".txt"
prvsVcFile="Counts/ViceCaptains/ViceCaptainCount_gw"+str(gw-1)+".txt"
try:
prvsCaptainStream=open(prvsCaptainFile,'r')
except FileNotFoundError:
prvsCaptainFileFound=False
print("WARNING: Captain count file for previous gw was not found. Hence, count checking of entered captains will be skipped.")
else:
prvsCaptainFileFound=True
prvsCaptains=prvsCaptainStream.readlines()
try:
prvsVcStream=open(prvsVcFile,'r')
except FileNotFoundError:
prvsVcFileFound=False
print("WARNING: Vice Captain count file for previous gw was not found. Hence, count checking of entered captains will be skipped.")
else:
prvsVcFileFound=True
prvsViceCaptains=prvsVcStream.readlines()
fixtures=getfix(gw)
# Streams to all log files
os.makedirs("TeamScores", exist_ok=True)
os.makedirs("Results", exist_ok=True)
os.makedirs("Counts", exist_ok=True)
os.makedirs("Counts/Captains", exist_ok=True)
os.makedirs("Counts/ViceCaptains", exist_ok=True)
teamscores_path=curr_dir+"\TeamScores\TeamScore_gw"+str(gw)+".txt"
results_path=curr_dir+"\Results\Results_gw"+str(gw)+".txt"
captain_path=curr_dir+"\Counts\Captains\CaptainCount_gw"+str(gw)+".txt"
vicecaptain_path=curr_dir+"\Counts\ViceCaptains\ViceCaptainCount_gw"+str(gw)+".txt"
f_teamscores=open("TeamScores/TeamScore_gw"+str(gw)+".txt",'w')
f_results=open("Results/Results_gw"+str(gw)+".txt",'w')
f_captain=open("Counts/Captains/CaptainCount_gw"+str(gw)+".txt",'w')
f_vicecaptain=open("Counts/ViceCaptains/ViceCaptainCount_gw"+str(gw)+".txt",'w')
# Main program starts here
teams=get_all_teams("CompletePlayersTeamsIDs.txt")
print("\n-------------------------------------------------------------------------------------------------\nPlease setup captain and vice captain for each team for gameweek "+str(gw)+"\n-------------------------------------------------------------------------------------------------")
C_teams=Captain_ViceCaptainSetup(teams)
allTeamScores = {}
for k,v in sorted(C_teams.items()):
print("\nCalculating Scores of %s" %(str(k)))
print("Team: %s" %(str(k)),file=f_teamscores)
(original,multiplied,oDict,mDict)=getTeamScoresfromList(v)
max_score=max(original)
if isHome(str(k),fixtures):
HA=0.2
print("Home Advantage: YES",file=f_teamscores)
home=True
else:
HA=0
print("Home Advantage: NO",file=f_teamscores)
home=False
#print(v)
print("Captain: %s" %(v[v[6]][0]),file=f_teamscores)
print("Vice Captain: %s" %(v[v[7]][0]),file=f_teamscores)
print("Individual Scores : %s" %(str(oDict)),file=f_teamscores)
print("Team Scores afer multipliers: %s" %(str(mDict)),file=f_teamscores)
t_score=sum(multiplied)+(HA*max_score)
print("Cumulative team Score: %s\n" %(str(round(t_score))),file=f_teamscores)
allTeamScores[str(k)]=round(t_score)
f_teamscores.close()
for fixture in fixtures:
try:
hscore=allTeamScores[fixture['homeTeamName']]
ascore=allTeamScores[fixture['awayTeamName']]
if(9>=math.fabs(hscore-ascore)>=0):
fixture['result']['goalsAwayTeam']=0
fixture['result']['goalsHomeTeam']=0
diff=math.fabs(hscore-ascore)
elif(hscore-ascore>=10):
fixture['result']['goalsAwayTeam']=0
diff=hscore-ascore
fixture['result']['goalsHomeTeam']=calcResult(diff)
else:
diff=ascore-hscore
fixture['result']['goalsAwayTeam']=calcResult(diff)
fixture['result']['goalsHomeTeam']=0
print(str(fixture['homeTeamName'])+" vs "+str(fixture['awayTeamName']),file=f_results)
print(str(allTeamScores[fixture['homeTeamName']])+"-"+str(allTeamScores[fixture['awayTeamName']]),file=f_results)
print("\nBonus points:"+str(calcbonus(diff)),file=f_results)
print("Final Score: "+str(fixture['result']['goalsHomeTeam'])+"-"+str(fixture['result']['goalsAwayTeam']),file=f_results)
print("--------------------------------------------",file=f_results)
except KeyError:
continue
f_results.close()
captains={}
vicecaptains={}
isCountsCalculated=True
for i in range(1,gw+1):
try:
currentFile = open("TeamScores/TeamScore_gw"+str(i)+".txt",'r')
except FileNotFoundError:
print("WARNING: File TeamScores/TeamScore_gw"+str(i)+".txt not found. Skipping captain and vc count calculation.")
isCountsCalculated=False
break
fileLines = currentFile.readlines()
for line in fileLines:
if(line.startswith("Captain")):
try:
captains[line.split(':')[1].strip()] += 1
except KeyError:
captains[line.split(':')[1].strip()] = 1
if(line.startswith("Vice Captain")):
try:
vicecaptains[line.split(':')[1].strip()] += 1
except KeyError:
vicecaptains[line.split(':')[1].strip()] = 1
currentFile.close()
template = "{0:25}:{1:10}"
if(isCountsCalculated):
for k,v in sorted(captains.items(), key=itemgetter(1), reverse=True):
print(template.format(k,v),file=f_captain)
for k,v in sorted(vicecaptains.items(), key=itemgetter(1), reverse=True):
print(template.format(k,v),file=f_vicecaptain)
print("\n")
print("--------------------------------------------------------------------------------------------------")
print("Team scores are logged at the below location:")
print(teamscores_path)
print("------------------------")
print("Final results are logged at the below location:")
print(results_path)
if(isCountsCalculated):
print("------------------------")
print("Captain counts are logged at the below location:")
print(captain_path)
print("------------------------")
print("Vice captain counts are logged at the below location:")
print(vicecaptain_path)
print("--------------------------------------------------------------------------------------------------")
dummy=input("Press Enter to exit the program...")
except:
print("-----------------------------------------------------\n-----------------------------------------------------")
print("Unexpected error encountered. Please report the below message to author.")
print(str(traceback.format_exc()))
dummy=input("Press Enter to exit the program...") | if(prvsVcFileFound):
for line in prvsViceCaptains:
if(playername in line):
count=int(line.split(':')[1].strip())
if(count<7):
return True
else:
return False
return True
else:
return True | identifier_body |
fpl1617.py | import re
import requests
import json
import math
import time
import os
from operator import itemgetter
import traceback
# Parses members of each team from file
def get_all_teams(filenamePath):
myFile=open(filenamePath)
lines=myFile.readlines()
for i in range(0,len(lines)):
if lines[i].startswith("Team:"):
teamName=str(lines[i]).strip().split(':')[1].strip()
teams[teamName]=[]
for j in range(i+1,i+7):
playerName=str(lines[j]).strip().split(',')[0]
playerID=str(lines[j]).strip().split(',')[1]
teams[teamName].append([playerName,playerID,1])
return teams
def isValidCaptain(playername):
if(prvsCaptainFileFound):
for line in prvsCaptains:
if(playername in line):
count=int(line.split(':')[1].strip())
if(count<7):
return True
else:
return False
return True
else:
return True
def isValidViceCaptain(playername):
if(prvsVcFileFound):
for line in prvsViceCaptains:
if(playername in line):
count=int(line.split(':')[1].strip())
if(count<7):
return True
else:
return False
return True
else:
return True
def Captain_ViceCaptainSetup(teams):
for k,v in sorted(teams.items()):
team=k
print("-------------------\nTeam: %s" %(str(k)))
for i in range(0,len(v)):
print(str(i+1)+". "+teams[k][i][0])
captain=int(input("Enter Captain number: "))
teams[k].append(captain-1)
if(isValidCaptain(teams[k][captain-1][0])):
teams[k][captain-1][2]=2
vc=int(input("Enter vc number: "))
teams[k].append(vc-1)
if(isValidViceCaptain(teams[k][vc-1][0])):
|
return teams
def getTeamScoresfromList(TeamList):
orignalScore=[]
orignalScoreDict=[]
multiplierScore=[]
multiplierScoreDict=[]
for player in TeamList[0:6]:
player_score=get_player_score(player[1],gw)
orignalScore.append(player_score)
orignalScoreDict.append({player[0]:player_score})
multiplierScore.append(player_score*player[2])
multiplierScoreDict.append({player[0]:player_score*player[2]})
return(orignalScore,multiplierScore,orignalScoreDict,multiplierScoreDict)
# Get a player's score given player ID and gw number
def get_player_score(id,gw):
url="https://fantasy.premierleague.com/drf/entry/"+str(id)+"/event/"+str(gw)+"/picks"
retryCount=0
while True:
try:
print("\rURL: "+str(url)+" Retry Count: "+str(retryCount),end="")
r = requests.get(url)
except:
print("\nFound exception")
retryCount = retryCount + 1
continue
print("")
break
result = json.loads(r.text)
points=result['entry_history']['points']
deductions=result['entry_history']['event_transfers_cost']
score = int(points)-int(deductions)
return score
def isHome(teamname,fixtures):
for fixture in fixtures:
if teamname in fixture['homeTeamName']:
return True
elif teamname in fixture['awayTeamName']:
return False
else:
continue
def getfix(gw):
res = requests.get("http://www.football-data.org/soccerseasons/426/fixtures?matchday="+str(gw))
result=json.loads(res.text)
return result['fixtures']
def calcResult(n):
score=(int(math.floor(n / 10.0)) * 10)/10
return int(score)
def calcbonus(m):
score = calcResult(m)
if(score <9):
return 0
elif(score <12 and score >=9):
return 1
elif(score <16 and score >=12):
return 2
else:
return 3
try:
print("-----------------------------------------------------")
print("LFC India Fantasy League Score calculator 16-17")
print("Author: kirankaranth1@gmail.com")
print("Source: https://github.com/kirankaranth1/LFCFantasyLeague-16-17")
print("-----------------------------------------------------")
curr_dir=str(os.getcwd())
teams={}
gw=int(input("Enter Gameweek number: "))
prvsCaptainFile="Counts/Captains/CaptainCount_gw"+str(gw-1)+".txt"
prvsVcFile="Counts/ViceCaptains/ViceCaptainCount_gw"+str(gw-1)+".txt"
try:
prvsCaptainStream=open(prvsCaptainFile,'r')
except FileNotFoundError:
prvsCaptainFileFound=False
print("WARNING: Captain count file for previous gw was not found. Hence, count checking of entered captains will be skipped.")
else:
prvsCaptainFileFound=True
prvsCaptains=prvsCaptainStream.readlines()
try:
prvsVcStream=open(prvsVcFile,'r')
except FileNotFoundError:
prvsVcFileFound=False
print("WARNING: Vice Captain count file for previous gw was not found. Hence, count checking of entered captains will be skipped.")
else:
prvsVcFileFound=True
prvsViceCaptains=prvsVcStream.readlines()
fixtures=getfix(gw)
# Streams to all log files
os.makedirs("TeamScores", exist_ok=True)
os.makedirs("Results", exist_ok=True)
os.makedirs("Counts", exist_ok=True)
os.makedirs("Counts/Captains", exist_ok=True)
os.makedirs("Counts/ViceCaptains", exist_ok=True)
teamscores_path=curr_dir+"\TeamScores\TeamScore_gw"+str(gw)+".txt"
results_path=curr_dir+"\Results\Results_gw"+str(gw)+".txt"
captain_path=curr_dir+"\Counts\Captains\CaptainCount_gw"+str(gw)+".txt"
vicecaptain_path=curr_dir+"\Counts\ViceCaptains\ViceCaptainCount_gw"+str(gw)+".txt"
f_teamscores=open("TeamScores/TeamScore_gw"+str(gw)+".txt",'w')
f_results=open("Results/Results_gw"+str(gw)+".txt",'w')
f_captain=open("Counts/Captains/CaptainCount_gw"+str(gw)+".txt",'w')
f_vicecaptain=open("Counts/ViceCaptains/ViceCaptainCount_gw"+str(gw)+".txt",'w')
# Main program starts here
teams=get_all_teams("CompletePlayersTeamsIDs.txt")
print("\n-------------------------------------------------------------------------------------------------\nPlease setup captain and vice captain for each team for gameweek "+str(gw)+"\n-------------------------------------------------------------------------------------------------")
C_teams=Captain_ViceCaptainSetup(teams)
allTeamScores = {}
for k,v in sorted(C_teams.items()):
print("\nCalculating Scores of %s" %(str(k)))
print("Team: %s" %(str(k)),file=f_teamscores)
(original,multiplied,oDict,mDict)=getTeamScoresfromList(v)
max_score=max(original)
if isHome(str(k),fixtures):
HA=0.2
print("Home Advantage: YES",file=f_teamscores)
home=True
else:
HA=0
print("Home Advantage: NO",file=f_teamscores)
home=False
#print(v)
print("Captain: %s" %(v[v[6]][0]),file=f_teamscores)
print("Vice Captain: %s" %(v[v[7]][0]),file=f_teamscores)
print("Individual Scores : %s" %(str(oDict)),file=f_teamscores)
print("Team Scores afer multipliers: %s" %(str(mDict)),file=f_teamscores)
t_score=sum(multiplied)+(HA*max_score)
print("Cumulative team Score: %s\n" %(str(round(t_score))),file=f_teamscores)
allTeamScores[str(k)]=round(t_score)
f_teamscores.close()
for fixture in fixtures:
try:
hscore=allTeamScores[fixture['homeTeamName']]
ascore=allTeamScores[fixture['awayTeamName']]
if(9>=math.fabs(hscore-ascore)>=0):
fixture['result']['goalsAwayTeam']=0
fixture['result']['goalsHomeTeam']=0
diff=math.fabs(hscore-ascore)
elif(hscore-ascore>=10):
fixture['result']['goalsAwayTeam']=0
diff=hscore-ascore
fixture['result']['goalsHomeTeam']=calcResult(diff)
else:
diff=ascore-hscore
fixture['result']['goalsAwayTeam']=calcResult(diff)
fixture['result']['goalsHomeTeam']=0
print(str(fixture['homeTeamName'])+" vs "+str(fixture['awayTeamName']),file=f_results)
print(str(allTeamScores[fixture['homeTeamName']])+"-"+str(allTeamScores[fixture['awayTeamName']]),file=f_results)
print("\nBonus points:"+str(calcbonus(diff)),file=f_results)
print("Final Score: "+str(fixture['result']['goalsHomeTeam'])+"-"+str(fixture['result']['goalsAwayTeam']),file=f_results)
print("--------------------------------------------",file=f_results)
except KeyError:
continue
f_results.close()
captains={}
vicecaptains={}
isCountsCalculated=True
for i in range(1,gw+1):
try:
currentFile = open("TeamScores/TeamScore_gw"+str(i)+".txt",'r')
except FileNotFoundError:
print("WARNING: File TeamScores/TeamScore_gw"+str(i)+".txt not found. Skipping captain and vc count calculation.")
isCountsCalculated=False
break
fileLines = currentFile.readlines()
for line in fileLines:
if(line.startswith("Captain")):
try:
captains[line.split(':')[1].strip()] += 1
except KeyError:
captains[line.split(':')[1].strip()] = 1
if(line.startswith("Vice Captain")):
try:
vicecaptains[line.split(':')[1].strip()] += 1
except KeyError:
vicecaptains[line.split(':')[1].strip()] = 1
currentFile.close()
template = "{0:25}:{1:10}"
if(isCountsCalculated):
for k,v in sorted(captains.items(), key=itemgetter(1), reverse=True):
print(template.format(k,v),file=f_captain)
for k,v in sorted(vicecaptains.items(), key=itemgetter(1), reverse=True):
print(template.format(k,v),file=f_vicecaptain)
print("\n")
print("--------------------------------------------------------------------------------------------------")
print("Team scores are logged at the below location:")
print(teamscores_path)
print("------------------------")
print("Final results are logged at the below location:")
print(results_path)
if(isCountsCalculated):
print("------------------------")
print("Captain counts are logged at the below location:")
print(captain_path)
print("------------------------")
print("Vice captain counts are logged at the below location:")
print(vicecaptain_path)
print("--------------------------------------------------------------------------------------------------")
dummy=input("Press Enter to exit the program...")
except:
print("-----------------------------------------------------\n-----------------------------------------------------")
print("Unexpected error encountered. Please report the below message to author.")
print(str(traceback.format_exc()))
dummy=input("Press Enter to exit the program...") | teams[k][vc-1][2]=1.5 | conditional_block |
fpl1617.py | import re
import requests
import json
import math
import time
import os
from operator import itemgetter
import traceback
# Parses members of each team from file
def get_all_teams(filenamePath):
myFile=open(filenamePath)
lines=myFile.readlines()
for i in range(0,len(lines)):
if lines[i].startswith("Team:"):
teamName=str(lines[i]).strip().split(':')[1].strip()
teams[teamName]=[]
for j in range(i+1,i+7):
playerName=str(lines[j]).strip().split(',')[0]
playerID=str(lines[j]).strip().split(',')[1]
teams[teamName].append([playerName,playerID,1])
return teams
def isValidCaptain(playername):
if(prvsCaptainFileFound):
for line in prvsCaptains:
if(playername in line):
count=int(line.split(':')[1].strip())
if(count<7):
return True
else:
return False
return True
else:
return True
def isValidViceCaptain(playername):
if(prvsVcFileFound):
for line in prvsViceCaptains:
if(playername in line):
count=int(line.split(':')[1].strip())
if(count<7):
return True
else:
return False
return True
else:
return True
def Captain_ViceCaptainSetup(teams):
for k,v in sorted(teams.items()):
team=k
print("-------------------\nTeam: %s" %(str(k)))
for i in range(0,len(v)):
print(str(i+1)+". "+teams[k][i][0])
captain=int(input("Enter Captain number: "))
teams[k].append(captain-1)
if(isValidCaptain(teams[k][captain-1][0])):
teams[k][captain-1][2]=2
vc=int(input("Enter vc number: "))
teams[k].append(vc-1)
if(isValidViceCaptain(teams[k][vc-1][0])):
teams[k][vc-1][2]=1.5
return teams
def getTeamScoresfromList(TeamList):
orignalScore=[]
orignalScoreDict=[]
multiplierScore=[]
multiplierScoreDict=[]
for player in TeamList[0:6]:
player_score=get_player_score(player[1],gw)
orignalScore.append(player_score)
orignalScoreDict.append({player[0]:player_score})
multiplierScore.append(player_score*player[2])
multiplierScoreDict.append({player[0]:player_score*player[2]})
return(orignalScore,multiplierScore,orignalScoreDict,multiplierScoreDict)
# Get a player's score given player ID and gw number
def | (id,gw):
url="https://fantasy.premierleague.com/drf/entry/"+str(id)+"/event/"+str(gw)+"/picks"
retryCount=0
while True:
try:
print("\rURL: "+str(url)+" Retry Count: "+str(retryCount),end="")
r = requests.get(url)
except:
print("\nFound exception")
retryCount = retryCount + 1
continue
print("")
break
result = json.loads(r.text)
points=result['entry_history']['points']
deductions=result['entry_history']['event_transfers_cost']
score = int(points)-int(deductions)
return score
def isHome(teamname,fixtures):
for fixture in fixtures:
if teamname in fixture['homeTeamName']:
return True
elif teamname in fixture['awayTeamName']:
return False
else:
continue
def getfix(gw):
res = requests.get("http://www.football-data.org/soccerseasons/426/fixtures?matchday="+str(gw))
result=json.loads(res.text)
return result['fixtures']
def calcResult(n):
score=(int(math.floor(n / 10.0)) * 10)/10
return int(score)
def calcbonus(m):
score = calcResult(m)
if(score <9):
return 0
elif(score <12 and score >=9):
return 1
elif(score <16 and score >=12):
return 2
else:
return 3
try:
print("-----------------------------------------------------")
print("LFC India Fantasy League Score calculator 16-17")
print("Author: kirankaranth1@gmail.com")
print("Source: https://github.com/kirankaranth1/LFCFantasyLeague-16-17")
print("-----------------------------------------------------")
curr_dir=str(os.getcwd())
teams={}
gw=int(input("Enter Gameweek number: "))
prvsCaptainFile="Counts/Captains/CaptainCount_gw"+str(gw-1)+".txt"
prvsVcFile="Counts/ViceCaptains/ViceCaptainCount_gw"+str(gw-1)+".txt"
try:
prvsCaptainStream=open(prvsCaptainFile,'r')
except FileNotFoundError:
prvsCaptainFileFound=False
print("WARNING: Captain count file for previous gw was not found. Hence, count checking of entered captains will be skipped.")
else:
prvsCaptainFileFound=True
prvsCaptains=prvsCaptainStream.readlines()
try:
prvsVcStream=open(prvsVcFile,'r')
except FileNotFoundError:
prvsVcFileFound=False
print("WARNING: Vice Captain count file for previous gw was not found. Hence, count checking of entered captains will be skipped.")
else:
prvsVcFileFound=True
prvsViceCaptains=prvsVcStream.readlines()
fixtures=getfix(gw)
# Streams to all log files
os.makedirs("TeamScores", exist_ok=True)
os.makedirs("Results", exist_ok=True)
os.makedirs("Counts", exist_ok=True)
os.makedirs("Counts/Captains", exist_ok=True)
os.makedirs("Counts/ViceCaptains", exist_ok=True)
teamscores_path=curr_dir+"\TeamScores\TeamScore_gw"+str(gw)+".txt"
results_path=curr_dir+"\Results\Results_gw"+str(gw)+".txt"
captain_path=curr_dir+"\Counts\Captains\CaptainCount_gw"+str(gw)+".txt"
vicecaptain_path=curr_dir+"\Counts\ViceCaptains\ViceCaptainCount_gw"+str(gw)+".txt"
f_teamscores=open("TeamScores/TeamScore_gw"+str(gw)+".txt",'w')
f_results=open("Results/Results_gw"+str(gw)+".txt",'w')
f_captain=open("Counts/Captains/CaptainCount_gw"+str(gw)+".txt",'w')
f_vicecaptain=open("Counts/ViceCaptains/ViceCaptainCount_gw"+str(gw)+".txt",'w')
# Main program starts here
teams=get_all_teams("CompletePlayersTeamsIDs.txt")
print("\n-------------------------------------------------------------------------------------------------\nPlease setup captain and vice captain for each team for gameweek "+str(gw)+"\n-------------------------------------------------------------------------------------------------")
C_teams=Captain_ViceCaptainSetup(teams)
allTeamScores = {}
for k,v in sorted(C_teams.items()):
print("\nCalculating Scores of %s" %(str(k)))
print("Team: %s" %(str(k)),file=f_teamscores)
(original,multiplied,oDict,mDict)=getTeamScoresfromList(v)
max_score=max(original)
if isHome(str(k),fixtures):
HA=0.2
print("Home Advantage: YES",file=f_teamscores)
home=True
else:
HA=0
print("Home Advantage: NO",file=f_teamscores)
home=False
#print(v)
print("Captain: %s" %(v[v[6]][0]),file=f_teamscores)
print("Vice Captain: %s" %(v[v[7]][0]),file=f_teamscores)
print("Individual Scores : %s" %(str(oDict)),file=f_teamscores)
print("Team Scores afer multipliers: %s" %(str(mDict)),file=f_teamscores)
t_score=sum(multiplied)+(HA*max_score)
print("Cumulative team Score: %s\n" %(str(round(t_score))),file=f_teamscores)
allTeamScores[str(k)]=round(t_score)
f_teamscores.close()
for fixture in fixtures:
try:
hscore=allTeamScores[fixture['homeTeamName']]
ascore=allTeamScores[fixture['awayTeamName']]
if(9>=math.fabs(hscore-ascore)>=0):
fixture['result']['goalsAwayTeam']=0
fixture['result']['goalsHomeTeam']=0
diff=math.fabs(hscore-ascore)
elif(hscore-ascore>=10):
fixture['result']['goalsAwayTeam']=0
diff=hscore-ascore
fixture['result']['goalsHomeTeam']=calcResult(diff)
else:
diff=ascore-hscore
fixture['result']['goalsAwayTeam']=calcResult(diff)
fixture['result']['goalsHomeTeam']=0
print(str(fixture['homeTeamName'])+" vs "+str(fixture['awayTeamName']),file=f_results)
print(str(allTeamScores[fixture['homeTeamName']])+"-"+str(allTeamScores[fixture['awayTeamName']]),file=f_results)
print("\nBonus points:"+str(calcbonus(diff)),file=f_results)
print("Final Score: "+str(fixture['result']['goalsHomeTeam'])+"-"+str(fixture['result']['goalsAwayTeam']),file=f_results)
print("--------------------------------------------",file=f_results)
except KeyError:
continue
f_results.close()
captains={}
vicecaptains={}
isCountsCalculated=True
for i in range(1,gw+1):
try:
currentFile = open("TeamScores/TeamScore_gw"+str(i)+".txt",'r')
except FileNotFoundError:
print("WARNING: File TeamScores/TeamScore_gw"+str(i)+".txt not found. Skipping captain and vc count calculation.")
isCountsCalculated=False
break
fileLines = currentFile.readlines()
for line in fileLines:
if(line.startswith("Captain")):
try:
captains[line.split(':')[1].strip()] += 1
except KeyError:
captains[line.split(':')[1].strip()] = 1
if(line.startswith("Vice Captain")):
try:
vicecaptains[line.split(':')[1].strip()] += 1
except KeyError:
vicecaptains[line.split(':')[1].strip()] = 1
currentFile.close()
template = "{0:25}:{1:10}"
if(isCountsCalculated):
for k,v in sorted(captains.items(), key=itemgetter(1), reverse=True):
print(template.format(k,v),file=f_captain)
for k,v in sorted(vicecaptains.items(), key=itemgetter(1), reverse=True):
print(template.format(k,v),file=f_vicecaptain)
print("\n")
print("--------------------------------------------------------------------------------------------------")
print("Team scores are logged at the below location:")
print(teamscores_path)
print("------------------------")
print("Final results are logged at the below location:")
print(results_path)
if(isCountsCalculated):
print("------------------------")
print("Captain counts are logged at the below location:")
print(captain_path)
print("------------------------")
print("Vice captain counts are logged at the below location:")
print(vicecaptain_path)
print("--------------------------------------------------------------------------------------------------")
dummy=input("Press Enter to exit the program...")
except:
print("-----------------------------------------------------\n-----------------------------------------------------")
print("Unexpected error encountered. Please report the below message to author.")
print(str(traceback.format_exc()))
dummy=input("Press Enter to exit the program...") | get_player_score | identifier_name |
workspace.go | // Copyright 2020 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package cache
import (
"context"
"errors"
"fmt"
"os"
"path/filepath"
"sort"
"strings"
"sync"
"golang.org/x/mod/modfile"
"golang.org/x/tools/gopls/internal/lsp/source"
"golang.org/x/tools/gopls/internal/span"
"golang.org/x/tools/internal/event"
"golang.org/x/tools/internal/xcontext"
)
// workspaceSource reports how the set of active modules has been derived.
type workspaceSource int
const (
legacyWorkspace = iota // non-module or single module mode
goplsModWorkspace // modules provided by a gopls.mod file
goWorkWorkspace // modules provided by a go.work file
fileSystemWorkspace // modules found by walking the filesystem
)
func (s workspaceSource) String() string {
switch s {
case legacyWorkspace:
return "legacy"
case goplsModWorkspace:
return "gopls.mod"
case goWorkWorkspace:
return "go.work"
case fileSystemWorkspace:
return "file system"
default:
return "!(unknown module source)"
}
}
// workspaceCommon holds immutable information about the workspace setup.
//
// TODO(rfindley): there is some redundancy here with workspaceInformation.
// Reconcile these two types.
type workspaceCommon struct {
root span.URI
excludePath func(string) bool
// explicitGowork is, if non-empty, the URI for the explicit go.work file
// provided via the user's environment.
explicitGowork span.URI
}
// workspace tracks go.mod files in the workspace, along with the
// gopls.mod file, to provide support for multi-module workspaces.
//
// Specifically, it provides:
// - the set of modules contained within in the workspace root considered to
// be 'active'
// - the workspace modfile, to be used for the go command `-modfile` flag
// - the set of workspace directories
//
// This type is immutable (or rather, idempotent), so that it may be shared
// across multiple snapshots.
type workspace struct {
workspaceCommon
// The source of modules in this workspace.
moduleSource workspaceSource
// activeModFiles holds the active go.mod files.
activeModFiles map[span.URI]struct{}
// knownModFiles holds the set of all go.mod files in the workspace.
// In all modes except for legacy, this is equivalent to modFiles.
knownModFiles map[span.URI]struct{}
// workFile, if nonEmpty, is the go.work file for the workspace.
workFile span.URI
// The workspace module is lazily re-built once after being invalidated.
// buildMu+built guards this reconstruction.
//
// file and wsDirs may be non-nil even if built == false, if they were copied
// from the previous workspace module version. In this case, they will be
// preserved if building fails.
buildMu sync.Mutex
built bool
buildErr error
mod *modfile.File
sum []byte
wsDirs map[span.URI]struct{}
}
// newWorkspace creates a new workspace at the given root directory,
// determining its module source based on the presence of a gopls.mod or
// go.work file, and the go111moduleOff and useWsModule settings.
//
// If useWsModule is set, the workspace may use a synthetic mod file replacing
// all modules in the root.
//
// If there is no active workspace file (a gopls.mod or go.work), newWorkspace
// scans the filesystem to find modules.
//
// TODO(rfindley): newWorkspace should perhaps never fail, relying instead on
// the criticalError method to surface problems in the workspace.
func newWorkspace(ctx context.Context, root, explicitGowork span.URI, fs source.FileSource, excludePath func(string) bool, go111moduleOff, useWsModule bool) (*workspace, error) {
ws := &workspace{
workspaceCommon: workspaceCommon{
root: root,
explicitGowork: explicitGowork,
excludePath: excludePath,
},
}
// The user may have a gopls.mod or go.work file that defines their
// workspace.
//
// TODO(rfindley): if GO111MODULE=off, this looks wrong, though there are
// probably other problems.
if err := ws.loadExplicitWorkspaceFile(ctx, fs); err == nil {
return ws, nil
}
// Otherwise, in all other modes, search for all of the go.mod files in the
// workspace.
knownModFiles, err := findModules(root, excludePath, 0)
if err != nil {
return nil, err
}
ws.knownModFiles = knownModFiles
switch {
case go111moduleOff:
ws.moduleSource = legacyWorkspace
case useWsModule:
ws.activeModFiles = knownModFiles
ws.moduleSource = fileSystemWorkspace
default:
ws.moduleSource = legacyWorkspace
activeModFiles, err := getLegacyModules(ctx, root, fs)
if err != nil {
return nil, err
}
ws.activeModFiles = activeModFiles
}
return ws, nil
}
// loadExplicitWorkspaceFile loads workspace information from go.work or
// gopls.mod files, setting the active modules, mod file, and module source
// accordingly.
func (ws *workspace) loadExplicitWorkspaceFile(ctx context.Context, fs source.FileSource) error {
for _, src := range []workspaceSource{goWorkWorkspace, goplsModWorkspace} {
fh, err := fs.GetFile(ctx, uriForSource(ws.root, ws.explicitGowork, src))
if err != nil {
return err
}
contents, err := fh.Read()
if err != nil {
continue // TODO(rfindley): is it correct to proceed here?
}
var file *modfile.File
var activeModFiles map[span.URI]struct{}
switch src {
case goWorkWorkspace:
file, activeModFiles, err = parseGoWork(ctx, ws.root, fh.URI(), contents, fs)
ws.workFile = fh.URI()
case goplsModWorkspace:
file, activeModFiles, err = parseGoplsMod(ws.root, fh.URI(), contents)
}
if err != nil {
ws.buildMu.Lock()
ws.built = true
ws.buildErr = err
ws.buildMu.Unlock()
}
ws.mod = file
ws.activeModFiles = activeModFiles
ws.moduleSource = src
return nil
}
return noHardcodedWorkspace
}
var noHardcodedWorkspace = errors.New("no hardcoded workspace")
// TODO(rfindley): eliminate getKnownModFiles.
func (w *workspace) getKnownModFiles() map[span.URI]struct{} {
return w.knownModFiles
}
// ActiveModFiles returns the set of active mod files for the current workspace.
func (w *workspace) ActiveModFiles() map[span.URI]struct{} {
return w.activeModFiles
}
// criticalError returns a critical error related to the workspace setup.
func (w *workspace) criticalError(ctx context.Context, fs source.FileSource) (res *source.CriticalError) {
// For now, we narrowly report errors related to `go.work` files.
//
// TODO(rfindley): investigate whether other workspace validation errors
// can be consolidated here.
if w.moduleSource == goWorkWorkspace {
// We should have already built the modfile, but build here to be
// consistent about accessing w.mod after w.build.
//
// TODO(rfindley): build eagerly. Building lazily is a premature
// optimization that poses a significant burden on the code.
w.build(ctx, fs)
if w.buildErr != nil {
return &source.CriticalError{
MainError: w.buildErr,
}
}
}
return nil
}
// modFile gets the workspace modfile associated with this workspace,
// computing it if it doesn't exist.
//
// A fileSource must be passed in to solve a chicken-egg problem: it is not
// correct to pass in the snapshot file source to newWorkspace when
// invalidating, because at the time these are called the snapshot is locked.
// So we must pass it in later on when actually using the modFile.
func (w *workspace) modFile(ctx context.Context, fs source.FileSource) (*modfile.File, error) {
w.build(ctx, fs)
return w.mod, w.buildErr
}
func (w *workspace) sumFile(ctx context.Context, fs source.FileSource) ([]byte, error) {
w.build(ctx, fs)
return w.sum, w.buildErr
}
func (w *workspace) build(ctx context.Context, fs source.FileSource) {
w.buildMu.Lock()
defer w.buildMu.Unlock()
if w.built {
return
}
// Building should never be cancelled. Since the workspace module is shared
// across multiple snapshots, doing so would put us in a bad state, and it
// would not be obvious to the user how to recover.
ctx = xcontext.Detach(ctx)
// If the module source is from the filesystem, try to build the workspace
// module from active modules discovered by scanning the filesystem. Fall
// back on the pre-existing mod file if parsing fails.
if w.moduleSource == fileSystemWorkspace {
file, err := buildWorkspaceModFile(ctx, w.activeModFiles, fs)
switch {
case err == nil:
w.mod = file
case w.mod != nil:
// Parsing failed, but we have a previous file version.
event.Error(ctx, "building workspace mod file", err)
default:
// No file to fall back on.
w.buildErr = err
}
}
if w.mod != nil {
w.wsDirs = map[span.URI]struct{}{
w.root: {},
}
for _, r := range w.mod.Replace {
// We may be replacing a module with a different version, not a path
// on disk.
if r.New.Version != "" {
continue
}
w.wsDirs[span.URIFromPath(r.New.Path)] = struct{}{}
}
}
// Ensure that there is always at least the root dir.
if len(w.wsDirs) == 0 {
w.wsDirs = map[span.URI]struct{}{
w.root: {},
}
}
sum, err := buildWorkspaceSumFile(ctx, w.activeModFiles, fs)
if err == nil {
w.sum = sum
} else {
event.Error(ctx, "building workspace sum file", err)
}
w.built = true
}
// dirs returns the workspace directories for the loaded modules.
func (w *workspace) dirs(ctx context.Context, fs source.FileSource) []span.URI {
w.build(ctx, fs)
var dirs []span.URI
for d := range w.wsDirs {
dirs = append(dirs, d)
}
sort.Slice(dirs, func(i, j int) bool { return dirs[i] < dirs[j] })
return dirs
}
// Clone returns a (possibly) new workspace after invalidating the changed
// files. If w is still valid in the presence of changedURIs, it returns itself
// unmodified.
//
// The returned needReinit flag indicates to the caller that the workspace
// needs to be reinitialized (because a relevant go.mod or go.work file has
// been changed).
//
// TODO(rfindley): it looks wrong that we return 'needReinit' here. The caller
// should determine whether to re-initialize..
func (w *workspace) Clone(ctx context.Context, changes map[span.URI]*fileChange, fs source.FileSource) (_ *workspace, needReinit bool) {
// Prevent races to w.modFile or w.wsDirs below, if w has not yet been built.
w.buildMu.Lock()
defer w.buildMu.Unlock()
// Clone the workspace. This may be discarded if nothing changed.
changed := false
result := &workspace{
workspaceCommon: w.workspaceCommon,
moduleSource: w.moduleSource,
knownModFiles: make(map[span.URI]struct{}),
activeModFiles: make(map[span.URI]struct{}),
workFile: w.workFile,
mod: w.mod,
sum: w.sum,
wsDirs: w.wsDirs,
}
for k, v := range w.knownModFiles {
result.knownModFiles[k] = v
}
for k, v := range w.activeModFiles {
result.activeModFiles[k] = v
}
equalURI := func(a, b span.URI) (r bool) {
// This query is a strange mix of syntax and file system state:
// deletion of a file causes a false result if the name doesn't change.
// Our tests exercise only the first clause.
return a == b || span.SameExistingFile(a, b)
}
// First handle changes to the go.work or gopls.mod file. This must be
// considered before any changes to go.mod or go.sum files, as these files
// determine which modules we care about. If go.work/gopls.mod has changed
// we need to either re-read it if it exists or walk the filesystem if it
// has been deleted. go.work should override the gopls.mod if both exist.
changed, needReinit = handleWorkspaceFileChanges(ctx, result, changes, fs)
// Next, handle go.mod changes that could affect our workspace.
for uri, change := range changes {
// Otherwise, we only care about go.mod files in the workspace directory.
if change.isUnchanged || !isGoMod(uri) || !source.InDir(result.root.Filename(), uri.Filename()) {
continue
}
changed = true
active := result.moduleSource != legacyWorkspace || equalURI(modURI(w.root), uri)
needReinit = needReinit || (active && change.fileHandle.Saved())
// Don't mess with the list of mod files if using go.work or gopls.mod.
if result.moduleSource == goplsModWorkspace || result.moduleSource == goWorkWorkspace {
continue
}
if change.exists {
result.knownModFiles[uri] = struct{}{}
if active {
result.activeModFiles[uri] = struct{}{}
}
} else {
delete(result.knownModFiles, uri)
delete(result.activeModFiles, uri)
}
}
// Finally, process go.sum changes for any modules that are now active.
for uri, change := range changes {
if !isGoSum(uri) {
continue
}
// TODO(rFindley) factor out this URI mangling.
dir := filepath.Dir(uri.Filename())
modURI := span.URIFromPath(filepath.Join(dir, "go.mod"))
if _, active := result.activeModFiles[modURI]; !active {
continue
}
// Only changes to active go.sum files actually cause the workspace to
// change.
changed = true
needReinit = needReinit || change.fileHandle.Saved()
}
if !changed {
return w, false
}
return result, needReinit
}
// handleWorkspaceFileChanges handles changes related to a go.work or gopls.mod
// file, updating ws accordingly. ws.root must be set.
func handleWorkspaceFileChanges(ctx context.Context, ws *workspace, changes map[span.URI]*fileChange, fs source.FileSource) (changed, reload bool) {
// If go.work/gopls.mod has changed we need to either re-read it if it
// exists or walk the filesystem if it has been deleted.
// go.work should override the gopls.mod if both exist.
for _, src := range []workspaceSource{goWorkWorkspace, goplsModWorkspace} {
uri := uriForSource(ws.root, ws.explicitGowork, src)
// File opens/closes are just no-ops.
change, ok := changes[uri]
if !ok {
continue
}
if change.isUnchanged {
break
}
if change.exists {
// Only invalidate if the file if it actually parses.
// Otherwise, stick with the current file.
var parsedFile *modfile.File
var parsedModules map[span.URI]struct{}
var err error
switch src {
case goWorkWorkspace:
parsedFile, parsedModules, err = parseGoWork(ctx, ws.root, uri, change.content, fs)
case goplsModWorkspace:
parsedFile, parsedModules, err = parseGoplsMod(ws.root, uri, change.content)
}
if err != nil {
// An unparseable file should not invalidate the workspace:
// nothing good could come from changing the workspace in
// this case.
//
// TODO(rfindley): well actually, it could potentially lead to a better
// critical error. Evaluate whether we can unify this case with the
// error returned by newWorkspace, without needlessly invalidating
// metadata.
event.Error(ctx, fmt.Sprintf("parsing %s", filepath.Base(uri.Filename())), err)
} else {
// only update the modfile if it parsed.
changed = true
reload = change.fileHandle.Saved()
ws.mod = parsedFile
ws.moduleSource = src
ws.knownModFiles = parsedModules
ws.activeModFiles = make(map[span.URI]struct{})
for k, v := range parsedModules {
ws.activeModFiles[k] = v
}
}
break // We've found an explicit workspace file, so can stop looking.
} else {
// go.work/gopls.mod is deleted. search for modules again.
changed = true
reload = true
ws.moduleSource = fileSystemWorkspace
// The parsed file is no longer valid.
ws.mod = nil
knownModFiles, err := findModules(ws.root, ws.excludePath, 0)
if err != nil {
ws.knownModFiles = nil
ws.activeModFiles = nil
event.Error(ctx, "finding file system modules", err)
} else {
ws.knownModFiles = knownModFiles
ws.activeModFiles = make(map[span.URI]struct{})
for k, v := range ws.knownModFiles {
ws.activeModFiles[k] = v
}
}
}
}
return changed, reload
}
// goplsModURI returns the URI for the gopls.mod file contained in root.
func uriForSource(root, explicitGowork span.URI, src workspaceSource) span.URI {
var basename string
switch src {
case goplsModWorkspace:
basename = "gopls.mod"
case goWorkWorkspace:
if explicitGowork != "" {
return explicitGowork
}
basename = "go.work"
default:
return ""
}
return span.URIFromPath(filepath.Join(root.Filename(), basename))
}
// modURI returns the URI for the go.mod file contained in root.
func modURI(root span.URI) span.URI {
return span.URIFromPath(filepath.Join(root.Filename(), "go.mod"))
}
// isGoMod reports if uri is a go.mod file.
func isGoMod(uri span.URI) bool {
return filepath.Base(uri.Filename()) == "go.mod"
}
func isGoSum(uri span.URI) bool {
return filepath.Base(uri.Filename()) == "go.sum" || filepath.Base(uri.Filename()) == "go.work.sum"
}
// fileExists reports if the file uri exists within source.
func fileExists(ctx context.Context, uri span.URI, source source.FileSource) (bool, error) {
fh, err := source.GetFile(ctx, uri)
if err != nil {
return false, err
}
return fileHandleExists(fh)
}
// fileHandleExists reports if the file underlying fh actually exits.
func fileHandleExists(fh source.FileHandle) (bool, error) {
_, err := fh.Read()
if err == nil {
return true, nil
}
if os.IsNotExist(err) {
return false, nil
}
return false, err
}
// TODO(rFindley): replace this (and similar) with a uripath package analogous
// to filepath.
func dirURI(uri span.URI) span.URI {
return span.URIFromPath(filepath.Dir(uri.Filename()))
}
// getLegacyModules returns a module set containing at most the root module.
func getLegacyModules(ctx context.Context, root span.URI, fs source.FileSource) (map[span.URI]struct{}, error) {
uri := span.URIFromPath(filepath.Join(root.Filename(), "go.mod"))
modules := make(map[span.URI]struct{})
exists, err := fileExists(ctx, uri, fs)
if err != nil {
return nil, err
}
if exists {
modules[uri] = struct{}{}
}
return modules, nil
}
func parseGoWork(ctx context.Context, root, uri span.URI, contents []byte, fs source.FileSource) (*modfile.File, map[span.URI]struct{}, error) {
workFile, err := modfile.ParseWork(uri.Filename(), contents, nil)
if err != nil {
return nil, nil, fmt.Errorf("parsing go.work: %w", err)
}
modFiles := make(map[span.URI]struct{})
for _, dir := range workFile.Use {
// The resulting modfile must use absolute paths, so that it can be
// written to a temp directory.
dir.Path = absolutePath(root, dir.Path)
modURI := span.URIFromPath(filepath.Join(dir.Path, "go.mod"))
modFiles[modURI] = struct{}{}
}
// TODO(rfindley): we should either not build the workspace modfile here, or
// not fail so hard. A failure in building the workspace modfile should not
// invalidate the active module paths extracted above.
modFile, err := buildWorkspaceModFile(ctx, modFiles, fs)
if err != nil {
return nil, nil, err
}
// Require a go directive, per the spec.
if workFile.Go == nil || workFile.Go.Version == "" |
if err := modFile.AddGoStmt(workFile.Go.Version); err != nil {
return nil, nil, err
}
return modFile, modFiles, nil
}
func parseGoplsMod(root, uri span.URI, contents []byte) (*modfile.File, map[span.URI]struct{}, error) {
modFile, err := modfile.Parse(uri.Filename(), contents, nil)
if err != nil {
return nil, nil, fmt.Errorf("parsing gopls.mod: %w", err)
}
modFiles := make(map[span.URI]struct{})
for _, replace := range modFile.Replace {
if replace.New.Version != "" {
return nil, nil, fmt.Errorf("gopls.mod: replaced module %q@%q must not have version", replace.New.Path, replace.New.Version)
}
// The resulting modfile must use absolute paths, so that it can be
// written to a temp directory.
replace.New.Path = absolutePath(root, replace.New.Path)
modURI := span.URIFromPath(filepath.Join(replace.New.Path, "go.mod"))
modFiles[modURI] = struct{}{}
}
return modFile, modFiles, nil
}
func absolutePath(root span.URI, path string) string {
dirFP := filepath.FromSlash(path)
if !filepath.IsAbs(dirFP) {
dirFP = filepath.Join(root.Filename(), dirFP)
}
return dirFP
}
// errExhausted is returned by findModules if the file scan limit is reached.
var errExhausted = errors.New("exhausted")
// Limit go.mod search to 1 million files. As a point of reference,
// Kubernetes has 22K files (as of 2020-11-24).
const fileLimit = 1000000
// findModules recursively walks the root directory looking for go.mod files,
// returning the set of modules it discovers. If modLimit is non-zero,
// searching stops once modLimit modules have been found.
//
// TODO(rfindley): consider overlays.
func findModules(root span.URI, excludePath func(string) bool, modLimit int) (map[span.URI]struct{}, error) {
// Walk the view's folder to find all modules in the view.
modFiles := make(map[span.URI]struct{})
searched := 0
errDone := errors.New("done")
err := filepath.Walk(root.Filename(), func(path string, info os.FileInfo, err error) error {
if err != nil {
// Probably a permission error. Keep looking.
return filepath.SkipDir
}
// For any path that is not the workspace folder, check if the path
// would be ignored by the go command. Vendor directories also do not
// contain workspace modules.
if info.IsDir() && path != root.Filename() {
suffix := strings.TrimPrefix(path, root.Filename())
switch {
case checkIgnored(suffix),
strings.Contains(filepath.ToSlash(suffix), "/vendor/"),
excludePath(suffix):
return filepath.SkipDir
}
}
// We're only interested in go.mod files.
uri := span.URIFromPath(path)
if isGoMod(uri) {
modFiles[uri] = struct{}{}
}
if modLimit > 0 && len(modFiles) >= modLimit {
return errDone
}
searched++
if fileLimit > 0 && searched >= fileLimit {
return errExhausted
}
return nil
})
if err == errDone {
return modFiles, nil
}
return modFiles, err
}
| {
return nil, nil, fmt.Errorf("go.work has missing or incomplete go directive")
} | conditional_block |
workspace.go | // Copyright 2020 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package cache
import (
"context"
"errors"
"fmt"
"os"
"path/filepath"
"sort"
"strings"
"sync"
"golang.org/x/mod/modfile"
"golang.org/x/tools/gopls/internal/lsp/source"
"golang.org/x/tools/gopls/internal/span"
"golang.org/x/tools/internal/event"
"golang.org/x/tools/internal/xcontext"
)
// workspaceSource reports how the set of active modules has been derived.
type workspaceSource int
const (
legacyWorkspace = iota // non-module or single module mode
goplsModWorkspace // modules provided by a gopls.mod file
goWorkWorkspace // modules provided by a go.work file
fileSystemWorkspace // modules found by walking the filesystem
)
func (s workspaceSource) String() string {
switch s {
case legacyWorkspace:
return "legacy"
case goplsModWorkspace:
return "gopls.mod"
case goWorkWorkspace:
return "go.work"
case fileSystemWorkspace:
return "file system"
default:
return "!(unknown module source)"
}
}
// workspaceCommon holds immutable information about the workspace setup.
//
// TODO(rfindley): there is some redundancy here with workspaceInformation.
// Reconcile these two types.
type workspaceCommon struct {
root span.URI
excludePath func(string) bool
// explicitGowork is, if non-empty, the URI for the explicit go.work file
// provided via the user's environment.
explicitGowork span.URI
}
// workspace tracks go.mod files in the workspace, along with the
// gopls.mod file, to provide support for multi-module workspaces.
//
// Specifically, it provides:
// - the set of modules contained within in the workspace root considered to
// be 'active'
// - the workspace modfile, to be used for the go command `-modfile` flag
// - the set of workspace directories
//
// This type is immutable (or rather, idempotent), so that it may be shared
// across multiple snapshots.
type workspace struct {
workspaceCommon
// The source of modules in this workspace.
moduleSource workspaceSource
// activeModFiles holds the active go.mod files.
activeModFiles map[span.URI]struct{}
// knownModFiles holds the set of all go.mod files in the workspace.
// In all modes except for legacy, this is equivalent to modFiles.
knownModFiles map[span.URI]struct{}
// workFile, if nonEmpty, is the go.work file for the workspace.
workFile span.URI
// The workspace module is lazily re-built once after being invalidated.
// buildMu+built guards this reconstruction.
//
// file and wsDirs may be non-nil even if built == false, if they were copied
// from the previous workspace module version. In this case, they will be
// preserved if building fails.
buildMu sync.Mutex
built bool
buildErr error
mod *modfile.File
sum []byte
wsDirs map[span.URI]struct{}
}
// newWorkspace creates a new workspace at the given root directory,
// determining its module source based on the presence of a gopls.mod or
// go.work file, and the go111moduleOff and useWsModule settings.
//
// If useWsModule is set, the workspace may use a synthetic mod file replacing
// all modules in the root.
//
// If there is no active workspace file (a gopls.mod or go.work), newWorkspace
// scans the filesystem to find modules.
//
// TODO(rfindley): newWorkspace should perhaps never fail, relying instead on
// the criticalError method to surface problems in the workspace.
func newWorkspace(ctx context.Context, root, explicitGowork span.URI, fs source.FileSource, excludePath func(string) bool, go111moduleOff, useWsModule bool) (*workspace, error) {
ws := &workspace{
workspaceCommon: workspaceCommon{
root: root,
explicitGowork: explicitGowork,
excludePath: excludePath,
},
}
// The user may have a gopls.mod or go.work file that defines their
// workspace.
//
// TODO(rfindley): if GO111MODULE=off, this looks wrong, though there are
// probably other problems.
if err := ws.loadExplicitWorkspaceFile(ctx, fs); err == nil {
return ws, nil
}
// Otherwise, in all other modes, search for all of the go.mod files in the
// workspace.
knownModFiles, err := findModules(root, excludePath, 0)
if err != nil {
return nil, err
}
ws.knownModFiles = knownModFiles
switch {
case go111moduleOff:
ws.moduleSource = legacyWorkspace
case useWsModule:
ws.activeModFiles = knownModFiles
ws.moduleSource = fileSystemWorkspace
default:
ws.moduleSource = legacyWorkspace
activeModFiles, err := getLegacyModules(ctx, root, fs)
if err != nil {
return nil, err
}
ws.activeModFiles = activeModFiles
}
return ws, nil
}
// loadExplicitWorkspaceFile loads workspace information from go.work or
// gopls.mod files, setting the active modules, mod file, and module source
// accordingly.
func (ws *workspace) loadExplicitWorkspaceFile(ctx context.Context, fs source.FileSource) error {
for _, src := range []workspaceSource{goWorkWorkspace, goplsModWorkspace} {
fh, err := fs.GetFile(ctx, uriForSource(ws.root, ws.explicitGowork, src))
if err != nil {
return err
}
contents, err := fh.Read()
if err != nil {
continue // TODO(rfindley): is it correct to proceed here?
}
var file *modfile.File
var activeModFiles map[span.URI]struct{}
switch src {
case goWorkWorkspace:
file, activeModFiles, err = parseGoWork(ctx, ws.root, fh.URI(), contents, fs)
ws.workFile = fh.URI()
case goplsModWorkspace:
file, activeModFiles, err = parseGoplsMod(ws.root, fh.URI(), contents)
}
if err != nil {
ws.buildMu.Lock()
ws.built = true
ws.buildErr = err
ws.buildMu.Unlock()
}
ws.mod = file
ws.activeModFiles = activeModFiles
ws.moduleSource = src
return nil
}
return noHardcodedWorkspace
}
var noHardcodedWorkspace = errors.New("no hardcoded workspace")
// TODO(rfindley): eliminate getKnownModFiles.
func (w *workspace) getKnownModFiles() map[span.URI]struct{} {
return w.knownModFiles
}
// ActiveModFiles returns the set of active mod files for the current workspace.
func (w *workspace) ActiveModFiles() map[span.URI]struct{} {
return w.activeModFiles
}
// criticalError returns a critical error related to the workspace setup.
func (w *workspace) criticalError(ctx context.Context, fs source.FileSource) (res *source.CriticalError) {
// For now, we narrowly report errors related to `go.work` files.
//
// TODO(rfindley): investigate whether other workspace validation errors
// can be consolidated here.
if w.moduleSource == goWorkWorkspace {
// We should have already built the modfile, but build here to be
// consistent about accessing w.mod after w.build.
//
// TODO(rfindley): build eagerly. Building lazily is a premature
// optimization that poses a significant burden on the code.
w.build(ctx, fs)
if w.buildErr != nil {
return &source.CriticalError{
MainError: w.buildErr,
}
}
}
return nil
}
// modFile gets the workspace modfile associated with this workspace,
// computing it if it doesn't exist.
//
// A fileSource must be passed in to solve a chicken-egg problem: it is not
// correct to pass in the snapshot file source to newWorkspace when
// invalidating, because at the time these are called the snapshot is locked.
// So we must pass it in later on when actually using the modFile.
func (w *workspace) modFile(ctx context.Context, fs source.FileSource) (*modfile.File, error) {
w.build(ctx, fs)
return w.mod, w.buildErr
}
func (w *workspace) sumFile(ctx context.Context, fs source.FileSource) ([]byte, error) {
w.build(ctx, fs)
return w.sum, w.buildErr
}
func (w *workspace) build(ctx context.Context, fs source.FileSource) {
w.buildMu.Lock()
defer w.buildMu.Unlock()
if w.built {
return
}
// Building should never be cancelled. Since the workspace module is shared
// across multiple snapshots, doing so would put us in a bad state, and it
// would not be obvious to the user how to recover.
ctx = xcontext.Detach(ctx)
// If the module source is from the filesystem, try to build the workspace
// module from active modules discovered by scanning the filesystem. Fall
// back on the pre-existing mod file if parsing fails.
if w.moduleSource == fileSystemWorkspace {
file, err := buildWorkspaceModFile(ctx, w.activeModFiles, fs)
switch {
case err == nil:
w.mod = file
case w.mod != nil:
// Parsing failed, but we have a previous file version.
event.Error(ctx, "building workspace mod file", err)
default:
// No file to fall back on.
w.buildErr = err
}
}
if w.mod != nil {
w.wsDirs = map[span.URI]struct{}{
w.root: {},
}
for _, r := range w.mod.Replace {
// We may be replacing a module with a different version, not a path
// on disk.
if r.New.Version != "" {
continue
}
w.wsDirs[span.URIFromPath(r.New.Path)] = struct{}{}
}
}
// Ensure that there is always at least the root dir.
if len(w.wsDirs) == 0 {
w.wsDirs = map[span.URI]struct{}{
w.root: {},
}
}
sum, err := buildWorkspaceSumFile(ctx, w.activeModFiles, fs)
if err == nil {
w.sum = sum
} else {
event.Error(ctx, "building workspace sum file", err)
}
w.built = true
}
// dirs returns the workspace directories for the loaded modules.
func (w *workspace) dirs(ctx context.Context, fs source.FileSource) []span.URI {
w.build(ctx, fs)
var dirs []span.URI
for d := range w.wsDirs {
dirs = append(dirs, d)
}
sort.Slice(dirs, func(i, j int) bool { return dirs[i] < dirs[j] })
return dirs
}
// Clone returns a (possibly) new workspace after invalidating the changed
// files. If w is still valid in the presence of changedURIs, it returns itself
// unmodified.
//
// The returned needReinit flag indicates to the caller that the workspace
// needs to be reinitialized (because a relevant go.mod or go.work file has
// been changed).
//
// TODO(rfindley): it looks wrong that we return 'needReinit' here. The caller
// should determine whether to re-initialize..
func (w *workspace) Clone(ctx context.Context, changes map[span.URI]*fileChange, fs source.FileSource) (_ *workspace, needReinit bool) {
// Prevent races to w.modFile or w.wsDirs below, if w has not yet been built.
w.buildMu.Lock()
defer w.buildMu.Unlock()
// Clone the workspace. This may be discarded if nothing changed.
changed := false
result := &workspace{
workspaceCommon: w.workspaceCommon,
moduleSource: w.moduleSource,
knownModFiles: make(map[span.URI]struct{}),
activeModFiles: make(map[span.URI]struct{}),
workFile: w.workFile,
mod: w.mod,
sum: w.sum,
wsDirs: w.wsDirs,
}
for k, v := range w.knownModFiles {
result.knownModFiles[k] = v
}
for k, v := range w.activeModFiles {
result.activeModFiles[k] = v
}
equalURI := func(a, b span.URI) (r bool) {
// This query is a strange mix of syntax and file system state:
// deletion of a file causes a false result if the name doesn't change.
// Our tests exercise only the first clause.
return a == b || span.SameExistingFile(a, b)
}
// First handle changes to the go.work or gopls.mod file. This must be
// considered before any changes to go.mod or go.sum files, as these files
// determine which modules we care about. If go.work/gopls.mod has changed
// we need to either re-read it if it exists or walk the filesystem if it
// has been deleted. go.work should override the gopls.mod if both exist.
changed, needReinit = handleWorkspaceFileChanges(ctx, result, changes, fs)
// Next, handle go.mod changes that could affect our workspace.
for uri, change := range changes {
// Otherwise, we only care about go.mod files in the workspace directory.
if change.isUnchanged || !isGoMod(uri) || !source.InDir(result.root.Filename(), uri.Filename()) {
continue
}
changed = true
active := result.moduleSource != legacyWorkspace || equalURI(modURI(w.root), uri)
needReinit = needReinit || (active && change.fileHandle.Saved())
// Don't mess with the list of mod files if using go.work or gopls.mod.
if result.moduleSource == goplsModWorkspace || result.moduleSource == goWorkWorkspace {
continue
}
if change.exists {
result.knownModFiles[uri] = struct{}{}
if active {
result.activeModFiles[uri] = struct{}{}
}
} else {
delete(result.knownModFiles, uri)
delete(result.activeModFiles, uri)
}
}
// Finally, process go.sum changes for any modules that are now active.
for uri, change := range changes {
if !isGoSum(uri) {
continue
}
// TODO(rFindley) factor out this URI mangling.
dir := filepath.Dir(uri.Filename())
modURI := span.URIFromPath(filepath.Join(dir, "go.mod"))
if _, active := result.activeModFiles[modURI]; !active {
continue
}
// Only changes to active go.sum files actually cause the workspace to
// change.
changed = true
needReinit = needReinit || change.fileHandle.Saved()
}
if !changed {
return w, false
}
return result, needReinit
}
// handleWorkspaceFileChanges handles changes related to a go.work or gopls.mod
// file, updating ws accordingly. ws.root must be set.
func handleWorkspaceFileChanges(ctx context.Context, ws *workspace, changes map[span.URI]*fileChange, fs source.FileSource) (changed, reload bool) {
// If go.work/gopls.mod has changed we need to either re-read it if it
// exists or walk the filesystem if it has been deleted.
// go.work should override the gopls.mod if both exist.
for _, src := range []workspaceSource{goWorkWorkspace, goplsModWorkspace} {
uri := uriForSource(ws.root, ws.explicitGowork, src)
// File opens/closes are just no-ops.
change, ok := changes[uri]
if !ok {
continue
}
if change.isUnchanged {
break
}
if change.exists {
// Only invalidate if the file if it actually parses.
// Otherwise, stick with the current file.
var parsedFile *modfile.File
var parsedModules map[span.URI]struct{}
var err error
switch src {
case goWorkWorkspace:
parsedFile, parsedModules, err = parseGoWork(ctx, ws.root, uri, change.content, fs)
case goplsModWorkspace:
parsedFile, parsedModules, err = parseGoplsMod(ws.root, uri, change.content)
}
if err != nil {
// An unparseable file should not invalidate the workspace:
// nothing good could come from changing the workspace in
// this case.
//
// TODO(rfindley): well actually, it could potentially lead to a better
// critical error. Evaluate whether we can unify this case with the
// error returned by newWorkspace, without needlessly invalidating
// metadata.
event.Error(ctx, fmt.Sprintf("parsing %s", filepath.Base(uri.Filename())), err)
} else {
// only update the modfile if it parsed.
changed = true
reload = change.fileHandle.Saved()
ws.mod = parsedFile
ws.moduleSource = src
ws.knownModFiles = parsedModules
ws.activeModFiles = make(map[span.URI]struct{})
for k, v := range parsedModules {
ws.activeModFiles[k] = v
}
}
break // We've found an explicit workspace file, so can stop looking.
} else {
// go.work/gopls.mod is deleted. search for modules again.
changed = true
reload = true
ws.moduleSource = fileSystemWorkspace
// The parsed file is no longer valid.
ws.mod = nil
knownModFiles, err := findModules(ws.root, ws.excludePath, 0)
if err != nil {
ws.knownModFiles = nil
ws.activeModFiles = nil
event.Error(ctx, "finding file system modules", err)
} else {
ws.knownModFiles = knownModFiles
ws.activeModFiles = make(map[span.URI]struct{})
for k, v := range ws.knownModFiles {
ws.activeModFiles[k] = v
}
}
}
}
return changed, reload
}
// goplsModURI returns the URI for the gopls.mod file contained in root.
func uriForSource(root, explicitGowork span.URI, src workspaceSource) span.URI {
var basename string
switch src {
case goplsModWorkspace:
basename = "gopls.mod"
case goWorkWorkspace:
if explicitGowork != "" {
return explicitGowork
}
basename = "go.work"
default:
return ""
}
return span.URIFromPath(filepath.Join(root.Filename(), basename))
}
// modURI returns the URI for the go.mod file contained in root.
func modURI(root span.URI) span.URI |
// isGoMod reports if uri is a go.mod file.
func isGoMod(uri span.URI) bool {
return filepath.Base(uri.Filename()) == "go.mod"
}
func isGoSum(uri span.URI) bool {
return filepath.Base(uri.Filename()) == "go.sum" || filepath.Base(uri.Filename()) == "go.work.sum"
}
// fileExists reports if the file uri exists within source.
func fileExists(ctx context.Context, uri span.URI, source source.FileSource) (bool, error) {
fh, err := source.GetFile(ctx, uri)
if err != nil {
return false, err
}
return fileHandleExists(fh)
}
// fileHandleExists reports if the file underlying fh actually exits.
func fileHandleExists(fh source.FileHandle) (bool, error) {
_, err := fh.Read()
if err == nil {
return true, nil
}
if os.IsNotExist(err) {
return false, nil
}
return false, err
}
// TODO(rFindley): replace this (and similar) with a uripath package analogous
// to filepath.
func dirURI(uri span.URI) span.URI {
return span.URIFromPath(filepath.Dir(uri.Filename()))
}
// getLegacyModules returns a module set containing at most the root module.
func getLegacyModules(ctx context.Context, root span.URI, fs source.FileSource) (map[span.URI]struct{}, error) {
uri := span.URIFromPath(filepath.Join(root.Filename(), "go.mod"))
modules := make(map[span.URI]struct{})
exists, err := fileExists(ctx, uri, fs)
if err != nil {
return nil, err
}
if exists {
modules[uri] = struct{}{}
}
return modules, nil
}
func parseGoWork(ctx context.Context, root, uri span.URI, contents []byte, fs source.FileSource) (*modfile.File, map[span.URI]struct{}, error) {
workFile, err := modfile.ParseWork(uri.Filename(), contents, nil)
if err != nil {
return nil, nil, fmt.Errorf("parsing go.work: %w", err)
}
modFiles := make(map[span.URI]struct{})
for _, dir := range workFile.Use {
// The resulting modfile must use absolute paths, so that it can be
// written to a temp directory.
dir.Path = absolutePath(root, dir.Path)
modURI := span.URIFromPath(filepath.Join(dir.Path, "go.mod"))
modFiles[modURI] = struct{}{}
}
// TODO(rfindley): we should either not build the workspace modfile here, or
// not fail so hard. A failure in building the workspace modfile should not
// invalidate the active module paths extracted above.
modFile, err := buildWorkspaceModFile(ctx, modFiles, fs)
if err != nil {
return nil, nil, err
}
// Require a go directive, per the spec.
if workFile.Go == nil || workFile.Go.Version == "" {
return nil, nil, fmt.Errorf("go.work has missing or incomplete go directive")
}
if err := modFile.AddGoStmt(workFile.Go.Version); err != nil {
return nil, nil, err
}
return modFile, modFiles, nil
}
func parseGoplsMod(root, uri span.URI, contents []byte) (*modfile.File, map[span.URI]struct{}, error) {
modFile, err := modfile.Parse(uri.Filename(), contents, nil)
if err != nil {
return nil, nil, fmt.Errorf("parsing gopls.mod: %w", err)
}
modFiles := make(map[span.URI]struct{})
for _, replace := range modFile.Replace {
if replace.New.Version != "" {
return nil, nil, fmt.Errorf("gopls.mod: replaced module %q@%q must not have version", replace.New.Path, replace.New.Version)
}
// The resulting modfile must use absolute paths, so that it can be
// written to a temp directory.
replace.New.Path = absolutePath(root, replace.New.Path)
modURI := span.URIFromPath(filepath.Join(replace.New.Path, "go.mod"))
modFiles[modURI] = struct{}{}
}
return modFile, modFiles, nil
}
func absolutePath(root span.URI, path string) string {
dirFP := filepath.FromSlash(path)
if !filepath.IsAbs(dirFP) {
dirFP = filepath.Join(root.Filename(), dirFP)
}
return dirFP
}
// errExhausted is returned by findModules if the file scan limit is reached.
var errExhausted = errors.New("exhausted")
// Limit go.mod search to 1 million files. As a point of reference,
// Kubernetes has 22K files (as of 2020-11-24).
const fileLimit = 1000000
// findModules recursively walks the root directory looking for go.mod files,
// returning the set of modules it discovers. If modLimit is non-zero,
// searching stops once modLimit modules have been found.
//
// TODO(rfindley): consider overlays.
func findModules(root span.URI, excludePath func(string) bool, modLimit int) (map[span.URI]struct{}, error) {
// Walk the view's folder to find all modules in the view.
modFiles := make(map[span.URI]struct{})
searched := 0
errDone := errors.New("done")
err := filepath.Walk(root.Filename(), func(path string, info os.FileInfo, err error) error {
if err != nil {
// Probably a permission error. Keep looking.
return filepath.SkipDir
}
// For any path that is not the workspace folder, check if the path
// would be ignored by the go command. Vendor directories also do not
// contain workspace modules.
if info.IsDir() && path != root.Filename() {
suffix := strings.TrimPrefix(path, root.Filename())
switch {
case checkIgnored(suffix),
strings.Contains(filepath.ToSlash(suffix), "/vendor/"),
excludePath(suffix):
return filepath.SkipDir
}
}
// We're only interested in go.mod files.
uri := span.URIFromPath(path)
if isGoMod(uri) {
modFiles[uri] = struct{}{}
}
if modLimit > 0 && len(modFiles) >= modLimit {
return errDone
}
searched++
if fileLimit > 0 && searched >= fileLimit {
return errExhausted
}
return nil
})
if err == errDone {
return modFiles, nil
}
return modFiles, err
}
| {
return span.URIFromPath(filepath.Join(root.Filename(), "go.mod"))
} | identifier_body |
workspace.go | // Copyright 2020 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package cache
import (
"context"
"errors"
"fmt"
"os"
"path/filepath"
"sort"
"strings"
"sync"
"golang.org/x/mod/modfile"
"golang.org/x/tools/gopls/internal/lsp/source"
"golang.org/x/tools/gopls/internal/span"
"golang.org/x/tools/internal/event"
"golang.org/x/tools/internal/xcontext"
)
// workspaceSource reports how the set of active modules has been derived.
type workspaceSource int
const (
legacyWorkspace = iota // non-module or single module mode
goplsModWorkspace // modules provided by a gopls.mod file
goWorkWorkspace // modules provided by a go.work file
fileSystemWorkspace // modules found by walking the filesystem
)
func (s workspaceSource) String() string {
switch s {
case legacyWorkspace:
return "legacy"
case goplsModWorkspace:
return "gopls.mod"
case goWorkWorkspace:
return "go.work"
case fileSystemWorkspace:
return "file system"
default:
return "!(unknown module source)"
}
}
// workspaceCommon holds immutable information about the workspace setup.
//
// TODO(rfindley): there is some redundancy here with workspaceInformation.
// Reconcile these two types.
type workspaceCommon struct {
root span.URI
excludePath func(string) bool
// explicitGowork is, if non-empty, the URI for the explicit go.work file
// provided via the user's environment.
explicitGowork span.URI
}
// workspace tracks go.mod files in the workspace, along with the
// gopls.mod file, to provide support for multi-module workspaces.
//
// Specifically, it provides:
// - the set of modules contained within in the workspace root considered to
// be 'active'
// - the workspace modfile, to be used for the go command `-modfile` flag
// - the set of workspace directories
//
// This type is immutable (or rather, idempotent), so that it may be shared
// across multiple snapshots.
type workspace struct {
workspaceCommon
// The source of modules in this workspace.
moduleSource workspaceSource
// activeModFiles holds the active go.mod files.
activeModFiles map[span.URI]struct{}
// knownModFiles holds the set of all go.mod files in the workspace.
// In all modes except for legacy, this is equivalent to modFiles.
knownModFiles map[span.URI]struct{}
// workFile, if nonEmpty, is the go.work file for the workspace.
workFile span.URI
// The workspace module is lazily re-built once after being invalidated.
// buildMu+built guards this reconstruction.
//
// file and wsDirs may be non-nil even if built == false, if they were copied
// from the previous workspace module version. In this case, they will be
// preserved if building fails.
buildMu sync.Mutex
built bool
buildErr error
mod *modfile.File
sum []byte
wsDirs map[span.URI]struct{}
}
// newWorkspace creates a new workspace at the given root directory,
// determining its module source based on the presence of a gopls.mod or
// go.work file, and the go111moduleOff and useWsModule settings.
//
// If useWsModule is set, the workspace may use a synthetic mod file replacing
// all modules in the root.
//
// If there is no active workspace file (a gopls.mod or go.work), newWorkspace
// scans the filesystem to find modules.
//
// TODO(rfindley): newWorkspace should perhaps never fail, relying instead on
// the criticalError method to surface problems in the workspace.
func newWorkspace(ctx context.Context, root, explicitGowork span.URI, fs source.FileSource, excludePath func(string) bool, go111moduleOff, useWsModule bool) (*workspace, error) {
ws := &workspace{
workspaceCommon: workspaceCommon{
root: root,
explicitGowork: explicitGowork,
excludePath: excludePath,
},
}
// The user may have a gopls.mod or go.work file that defines their
// workspace.
//
// TODO(rfindley): if GO111MODULE=off, this looks wrong, though there are
// probably other problems.
if err := ws.loadExplicitWorkspaceFile(ctx, fs); err == nil {
return ws, nil
}
// Otherwise, in all other modes, search for all of the go.mod files in the
// workspace.
knownModFiles, err := findModules(root, excludePath, 0)
if err != nil {
return nil, err
}
ws.knownModFiles = knownModFiles
switch {
case go111moduleOff:
ws.moduleSource = legacyWorkspace
case useWsModule:
ws.activeModFiles = knownModFiles
ws.moduleSource = fileSystemWorkspace
default:
ws.moduleSource = legacyWorkspace
activeModFiles, err := getLegacyModules(ctx, root, fs)
if err != nil {
return nil, err
}
ws.activeModFiles = activeModFiles
}
return ws, nil
}
// loadExplicitWorkspaceFile loads workspace information from go.work or
// gopls.mod files, setting the active modules, mod file, and module source
// accordingly.
func (ws *workspace) loadExplicitWorkspaceFile(ctx context.Context, fs source.FileSource) error {
for _, src := range []workspaceSource{goWorkWorkspace, goplsModWorkspace} {
fh, err := fs.GetFile(ctx, uriForSource(ws.root, ws.explicitGowork, src))
if err != nil {
return err
}
contents, err := fh.Read()
if err != nil {
continue // TODO(rfindley): is it correct to proceed here?
}
var file *modfile.File
var activeModFiles map[span.URI]struct{}
switch src {
case goWorkWorkspace:
file, activeModFiles, err = parseGoWork(ctx, ws.root, fh.URI(), contents, fs)
ws.workFile = fh.URI()
case goplsModWorkspace:
file, activeModFiles, err = parseGoplsMod(ws.root, fh.URI(), contents)
}
if err != nil {
ws.buildMu.Lock()
ws.built = true
ws.buildErr = err
ws.buildMu.Unlock()
}
ws.mod = file
ws.activeModFiles = activeModFiles
ws.moduleSource = src
return nil
}
return noHardcodedWorkspace
}
var noHardcodedWorkspace = errors.New("no hardcoded workspace")
// TODO(rfindley): eliminate getKnownModFiles.
func (w *workspace) | () map[span.URI]struct{} {
return w.knownModFiles
}
// ActiveModFiles returns the set of active mod files for the current workspace.
func (w *workspace) ActiveModFiles() map[span.URI]struct{} {
return w.activeModFiles
}
// criticalError returns a critical error related to the workspace setup.
func (w *workspace) criticalError(ctx context.Context, fs source.FileSource) (res *source.CriticalError) {
// For now, we narrowly report errors related to `go.work` files.
//
// TODO(rfindley): investigate whether other workspace validation errors
// can be consolidated here.
if w.moduleSource == goWorkWorkspace {
// We should have already built the modfile, but build here to be
// consistent about accessing w.mod after w.build.
//
// TODO(rfindley): build eagerly. Building lazily is a premature
// optimization that poses a significant burden on the code.
w.build(ctx, fs)
if w.buildErr != nil {
return &source.CriticalError{
MainError: w.buildErr,
}
}
}
return nil
}
// modFile gets the workspace modfile associated with this workspace,
// computing it if it doesn't exist.
//
// A fileSource must be passed in to solve a chicken-egg problem: it is not
// correct to pass in the snapshot file source to newWorkspace when
// invalidating, because at the time these are called the snapshot is locked.
// So we must pass it in later on when actually using the modFile.
func (w *workspace) modFile(ctx context.Context, fs source.FileSource) (*modfile.File, error) {
w.build(ctx, fs)
return w.mod, w.buildErr
}
func (w *workspace) sumFile(ctx context.Context, fs source.FileSource) ([]byte, error) {
w.build(ctx, fs)
return w.sum, w.buildErr
}
func (w *workspace) build(ctx context.Context, fs source.FileSource) {
w.buildMu.Lock()
defer w.buildMu.Unlock()
if w.built {
return
}
// Building should never be cancelled. Since the workspace module is shared
// across multiple snapshots, doing so would put us in a bad state, and it
// would not be obvious to the user how to recover.
ctx = xcontext.Detach(ctx)
// If the module source is from the filesystem, try to build the workspace
// module from active modules discovered by scanning the filesystem. Fall
// back on the pre-existing mod file if parsing fails.
if w.moduleSource == fileSystemWorkspace {
file, err := buildWorkspaceModFile(ctx, w.activeModFiles, fs)
switch {
case err == nil:
w.mod = file
case w.mod != nil:
// Parsing failed, but we have a previous file version.
event.Error(ctx, "building workspace mod file", err)
default:
// No file to fall back on.
w.buildErr = err
}
}
if w.mod != nil {
w.wsDirs = map[span.URI]struct{}{
w.root: {},
}
for _, r := range w.mod.Replace {
// We may be replacing a module with a different version, not a path
// on disk.
if r.New.Version != "" {
continue
}
w.wsDirs[span.URIFromPath(r.New.Path)] = struct{}{}
}
}
// Ensure that there is always at least the root dir.
if len(w.wsDirs) == 0 {
w.wsDirs = map[span.URI]struct{}{
w.root: {},
}
}
sum, err := buildWorkspaceSumFile(ctx, w.activeModFiles, fs)
if err == nil {
w.sum = sum
} else {
event.Error(ctx, "building workspace sum file", err)
}
w.built = true
}
// dirs returns the workspace directories for the loaded modules.
func (w *workspace) dirs(ctx context.Context, fs source.FileSource) []span.URI {
w.build(ctx, fs)
var dirs []span.URI
for d := range w.wsDirs {
dirs = append(dirs, d)
}
sort.Slice(dirs, func(i, j int) bool { return dirs[i] < dirs[j] })
return dirs
}
// Clone returns a (possibly) new workspace after invalidating the changed
// files. If w is still valid in the presence of changedURIs, it returns itself
// unmodified.
//
// The returned needReinit flag indicates to the caller that the workspace
// needs to be reinitialized (because a relevant go.mod or go.work file has
// been changed).
//
// TODO(rfindley): it looks wrong that we return 'needReinit' here. The caller
// should determine whether to re-initialize..
func (w *workspace) Clone(ctx context.Context, changes map[span.URI]*fileChange, fs source.FileSource) (_ *workspace, needReinit bool) {
// Prevent races to w.modFile or w.wsDirs below, if w has not yet been built.
w.buildMu.Lock()
defer w.buildMu.Unlock()
// Clone the workspace. This may be discarded if nothing changed.
changed := false
result := &workspace{
workspaceCommon: w.workspaceCommon,
moduleSource: w.moduleSource,
knownModFiles: make(map[span.URI]struct{}),
activeModFiles: make(map[span.URI]struct{}),
workFile: w.workFile,
mod: w.mod,
sum: w.sum,
wsDirs: w.wsDirs,
}
for k, v := range w.knownModFiles {
result.knownModFiles[k] = v
}
for k, v := range w.activeModFiles {
result.activeModFiles[k] = v
}
equalURI := func(a, b span.URI) (r bool) {
// This query is a strange mix of syntax and file system state:
// deletion of a file causes a false result if the name doesn't change.
// Our tests exercise only the first clause.
return a == b || span.SameExistingFile(a, b)
}
// First handle changes to the go.work or gopls.mod file. This must be
// considered before any changes to go.mod or go.sum files, as these files
// determine which modules we care about. If go.work/gopls.mod has changed
// we need to either re-read it if it exists or walk the filesystem if it
// has been deleted. go.work should override the gopls.mod if both exist.
changed, needReinit = handleWorkspaceFileChanges(ctx, result, changes, fs)
// Next, handle go.mod changes that could affect our workspace.
for uri, change := range changes {
// Otherwise, we only care about go.mod files in the workspace directory.
if change.isUnchanged || !isGoMod(uri) || !source.InDir(result.root.Filename(), uri.Filename()) {
continue
}
changed = true
active := result.moduleSource != legacyWorkspace || equalURI(modURI(w.root), uri)
needReinit = needReinit || (active && change.fileHandle.Saved())
// Don't mess with the list of mod files if using go.work or gopls.mod.
if result.moduleSource == goplsModWorkspace || result.moduleSource == goWorkWorkspace {
continue
}
if change.exists {
result.knownModFiles[uri] = struct{}{}
if active {
result.activeModFiles[uri] = struct{}{}
}
} else {
delete(result.knownModFiles, uri)
delete(result.activeModFiles, uri)
}
}
// Finally, process go.sum changes for any modules that are now active.
for uri, change := range changes {
if !isGoSum(uri) {
continue
}
// TODO(rFindley) factor out this URI mangling.
dir := filepath.Dir(uri.Filename())
modURI := span.URIFromPath(filepath.Join(dir, "go.mod"))
if _, active := result.activeModFiles[modURI]; !active {
continue
}
// Only changes to active go.sum files actually cause the workspace to
// change.
changed = true
needReinit = needReinit || change.fileHandle.Saved()
}
if !changed {
return w, false
}
return result, needReinit
}
// handleWorkspaceFileChanges handles changes related to a go.work or gopls.mod
// file, updating ws accordingly. ws.root must be set.
func handleWorkspaceFileChanges(ctx context.Context, ws *workspace, changes map[span.URI]*fileChange, fs source.FileSource) (changed, reload bool) {
// If go.work/gopls.mod has changed we need to either re-read it if it
// exists or walk the filesystem if it has been deleted.
// go.work should override the gopls.mod if both exist.
for _, src := range []workspaceSource{goWorkWorkspace, goplsModWorkspace} {
uri := uriForSource(ws.root, ws.explicitGowork, src)
// File opens/closes are just no-ops.
change, ok := changes[uri]
if !ok {
continue
}
if change.isUnchanged {
break
}
if change.exists {
// Only invalidate if the file if it actually parses.
// Otherwise, stick with the current file.
var parsedFile *modfile.File
var parsedModules map[span.URI]struct{}
var err error
switch src {
case goWorkWorkspace:
parsedFile, parsedModules, err = parseGoWork(ctx, ws.root, uri, change.content, fs)
case goplsModWorkspace:
parsedFile, parsedModules, err = parseGoplsMod(ws.root, uri, change.content)
}
if err != nil {
// An unparseable file should not invalidate the workspace:
// nothing good could come from changing the workspace in
// this case.
//
// TODO(rfindley): well actually, it could potentially lead to a better
// critical error. Evaluate whether we can unify this case with the
// error returned by newWorkspace, without needlessly invalidating
// metadata.
event.Error(ctx, fmt.Sprintf("parsing %s", filepath.Base(uri.Filename())), err)
} else {
// only update the modfile if it parsed.
changed = true
reload = change.fileHandle.Saved()
ws.mod = parsedFile
ws.moduleSource = src
ws.knownModFiles = parsedModules
ws.activeModFiles = make(map[span.URI]struct{})
for k, v := range parsedModules {
ws.activeModFiles[k] = v
}
}
break // We've found an explicit workspace file, so can stop looking.
} else {
// go.work/gopls.mod is deleted. search for modules again.
changed = true
reload = true
ws.moduleSource = fileSystemWorkspace
// The parsed file is no longer valid.
ws.mod = nil
knownModFiles, err := findModules(ws.root, ws.excludePath, 0)
if err != nil {
ws.knownModFiles = nil
ws.activeModFiles = nil
event.Error(ctx, "finding file system modules", err)
} else {
ws.knownModFiles = knownModFiles
ws.activeModFiles = make(map[span.URI]struct{})
for k, v := range ws.knownModFiles {
ws.activeModFiles[k] = v
}
}
}
}
return changed, reload
}
// goplsModURI returns the URI for the gopls.mod file contained in root.
func uriForSource(root, explicitGowork span.URI, src workspaceSource) span.URI {
var basename string
switch src {
case goplsModWorkspace:
basename = "gopls.mod"
case goWorkWorkspace:
if explicitGowork != "" {
return explicitGowork
}
basename = "go.work"
default:
return ""
}
return span.URIFromPath(filepath.Join(root.Filename(), basename))
}
// modURI returns the URI for the go.mod file contained in root.
func modURI(root span.URI) span.URI {
return span.URIFromPath(filepath.Join(root.Filename(), "go.mod"))
}
// isGoMod reports if uri is a go.mod file.
func isGoMod(uri span.URI) bool {
return filepath.Base(uri.Filename()) == "go.mod"
}
func isGoSum(uri span.URI) bool {
return filepath.Base(uri.Filename()) == "go.sum" || filepath.Base(uri.Filename()) == "go.work.sum"
}
// fileExists reports if the file uri exists within source.
func fileExists(ctx context.Context, uri span.URI, source source.FileSource) (bool, error) {
fh, err := source.GetFile(ctx, uri)
if err != nil {
return false, err
}
return fileHandleExists(fh)
}
// fileHandleExists reports if the file underlying fh actually exits.
func fileHandleExists(fh source.FileHandle) (bool, error) {
_, err := fh.Read()
if err == nil {
return true, nil
}
if os.IsNotExist(err) {
return false, nil
}
return false, err
}
// TODO(rFindley): replace this (and similar) with a uripath package analogous
// to filepath.
func dirURI(uri span.URI) span.URI {
return span.URIFromPath(filepath.Dir(uri.Filename()))
}
// getLegacyModules returns a module set containing at most the root module.
func getLegacyModules(ctx context.Context, root span.URI, fs source.FileSource) (map[span.URI]struct{}, error) {
uri := span.URIFromPath(filepath.Join(root.Filename(), "go.mod"))
modules := make(map[span.URI]struct{})
exists, err := fileExists(ctx, uri, fs)
if err != nil {
return nil, err
}
if exists {
modules[uri] = struct{}{}
}
return modules, nil
}
func parseGoWork(ctx context.Context, root, uri span.URI, contents []byte, fs source.FileSource) (*modfile.File, map[span.URI]struct{}, error) {
workFile, err := modfile.ParseWork(uri.Filename(), contents, nil)
if err != nil {
return nil, nil, fmt.Errorf("parsing go.work: %w", err)
}
modFiles := make(map[span.URI]struct{})
for _, dir := range workFile.Use {
// The resulting modfile must use absolute paths, so that it can be
// written to a temp directory.
dir.Path = absolutePath(root, dir.Path)
modURI := span.URIFromPath(filepath.Join(dir.Path, "go.mod"))
modFiles[modURI] = struct{}{}
}
// TODO(rfindley): we should either not build the workspace modfile here, or
// not fail so hard. A failure in building the workspace modfile should not
// invalidate the active module paths extracted above.
modFile, err := buildWorkspaceModFile(ctx, modFiles, fs)
if err != nil {
return nil, nil, err
}
// Require a go directive, per the spec.
if workFile.Go == nil || workFile.Go.Version == "" {
return nil, nil, fmt.Errorf("go.work has missing or incomplete go directive")
}
if err := modFile.AddGoStmt(workFile.Go.Version); err != nil {
return nil, nil, err
}
return modFile, modFiles, nil
}
func parseGoplsMod(root, uri span.URI, contents []byte) (*modfile.File, map[span.URI]struct{}, error) {
modFile, err := modfile.Parse(uri.Filename(), contents, nil)
if err != nil {
return nil, nil, fmt.Errorf("parsing gopls.mod: %w", err)
}
modFiles := make(map[span.URI]struct{})
for _, replace := range modFile.Replace {
if replace.New.Version != "" {
return nil, nil, fmt.Errorf("gopls.mod: replaced module %q@%q must not have version", replace.New.Path, replace.New.Version)
}
// The resulting modfile must use absolute paths, so that it can be
// written to a temp directory.
replace.New.Path = absolutePath(root, replace.New.Path)
modURI := span.URIFromPath(filepath.Join(replace.New.Path, "go.mod"))
modFiles[modURI] = struct{}{}
}
return modFile, modFiles, nil
}
func absolutePath(root span.URI, path string) string {
dirFP := filepath.FromSlash(path)
if !filepath.IsAbs(dirFP) {
dirFP = filepath.Join(root.Filename(), dirFP)
}
return dirFP
}
// errExhausted is returned by findModules if the file scan limit is reached.
var errExhausted = errors.New("exhausted")
// Limit go.mod search to 1 million files. As a point of reference,
// Kubernetes has 22K files (as of 2020-11-24).
const fileLimit = 1000000
// findModules recursively walks the root directory looking for go.mod files,
// returning the set of modules it discovers. If modLimit is non-zero,
// searching stops once modLimit modules have been found.
//
// TODO(rfindley): consider overlays.
func findModules(root span.URI, excludePath func(string) bool, modLimit int) (map[span.URI]struct{}, error) {
// Walk the view's folder to find all modules in the view.
modFiles := make(map[span.URI]struct{})
searched := 0
errDone := errors.New("done")
err := filepath.Walk(root.Filename(), func(path string, info os.FileInfo, err error) error {
if err != nil {
// Probably a permission error. Keep looking.
return filepath.SkipDir
}
// For any path that is not the workspace folder, check if the path
// would be ignored by the go command. Vendor directories also do not
// contain workspace modules.
if info.IsDir() && path != root.Filename() {
suffix := strings.TrimPrefix(path, root.Filename())
switch {
case checkIgnored(suffix),
strings.Contains(filepath.ToSlash(suffix), "/vendor/"),
excludePath(suffix):
return filepath.SkipDir
}
}
// We're only interested in go.mod files.
uri := span.URIFromPath(path)
if isGoMod(uri) {
modFiles[uri] = struct{}{}
}
if modLimit > 0 && len(modFiles) >= modLimit {
return errDone
}
searched++
if fileLimit > 0 && searched >= fileLimit {
return errExhausted
}
return nil
})
if err == errDone {
return modFiles, nil
}
return modFiles, err
}
| getKnownModFiles | identifier_name |
workspace.go | // Copyright 2020 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package cache
import (
"context"
"errors"
"fmt"
"os"
"path/filepath"
"sort"
"strings"
"sync"
"golang.org/x/mod/modfile"
"golang.org/x/tools/gopls/internal/lsp/source"
"golang.org/x/tools/gopls/internal/span"
"golang.org/x/tools/internal/event"
"golang.org/x/tools/internal/xcontext"
)
// workspaceSource reports how the set of active modules has been derived.
type workspaceSource int
const (
legacyWorkspace = iota // non-module or single module mode
goplsModWorkspace // modules provided by a gopls.mod file
goWorkWorkspace // modules provided by a go.work file
fileSystemWorkspace // modules found by walking the filesystem
)
func (s workspaceSource) String() string {
switch s {
case legacyWorkspace:
return "legacy"
case goplsModWorkspace:
return "gopls.mod"
case goWorkWorkspace:
return "go.work"
case fileSystemWorkspace:
return "file system"
default:
return "!(unknown module source)"
}
}
// workspaceCommon holds immutable information about the workspace setup.
//
// TODO(rfindley): there is some redundancy here with workspaceInformation.
// Reconcile these two types.
type workspaceCommon struct {
root span.URI
excludePath func(string) bool
// explicitGowork is, if non-empty, the URI for the explicit go.work file
// provided via the user's environment.
explicitGowork span.URI
}
// workspace tracks go.mod files in the workspace, along with the
// gopls.mod file, to provide support for multi-module workspaces.
//
// Specifically, it provides:
// - the set of modules contained within in the workspace root considered to
// be 'active'
// - the workspace modfile, to be used for the go command `-modfile` flag
// - the set of workspace directories
//
// This type is immutable (or rather, idempotent), so that it may be shared
// across multiple snapshots.
type workspace struct {
workspaceCommon
// The source of modules in this workspace.
moduleSource workspaceSource
// activeModFiles holds the active go.mod files.
activeModFiles map[span.URI]struct{}
// knownModFiles holds the set of all go.mod files in the workspace.
// In all modes except for legacy, this is equivalent to modFiles.
knownModFiles map[span.URI]struct{}
// workFile, if nonEmpty, is the go.work file for the workspace.
workFile span.URI
// The workspace module is lazily re-built once after being invalidated.
// buildMu+built guards this reconstruction.
//
// file and wsDirs may be non-nil even if built == false, if they were copied
// from the previous workspace module version. In this case, they will be
// preserved if building fails.
buildMu sync.Mutex
built bool
buildErr error
mod *modfile.File
sum []byte
wsDirs map[span.URI]struct{}
}
// newWorkspace creates a new workspace at the given root directory,
// determining its module source based on the presence of a gopls.mod or
// go.work file, and the go111moduleOff and useWsModule settings.
//
// If useWsModule is set, the workspace may use a synthetic mod file replacing
// all modules in the root.
//
// If there is no active workspace file (a gopls.mod or go.work), newWorkspace
// scans the filesystem to find modules.
//
// TODO(rfindley): newWorkspace should perhaps never fail, relying instead on
// the criticalError method to surface problems in the workspace.
func newWorkspace(ctx context.Context, root, explicitGowork span.URI, fs source.FileSource, excludePath func(string) bool, go111moduleOff, useWsModule bool) (*workspace, error) {
ws := &workspace{
workspaceCommon: workspaceCommon{
root: root,
explicitGowork: explicitGowork,
excludePath: excludePath,
},
}
// The user may have a gopls.mod or go.work file that defines their
// workspace.
//
// TODO(rfindley): if GO111MODULE=off, this looks wrong, though there are
// probably other problems.
if err := ws.loadExplicitWorkspaceFile(ctx, fs); err == nil {
return ws, nil
}
// Otherwise, in all other modes, search for all of the go.mod files in the
// workspace.
knownModFiles, err := findModules(root, excludePath, 0)
if err != nil {
return nil, err
}
ws.knownModFiles = knownModFiles
switch {
case go111moduleOff:
ws.moduleSource = legacyWorkspace
case useWsModule:
ws.activeModFiles = knownModFiles
ws.moduleSource = fileSystemWorkspace
default:
ws.moduleSource = legacyWorkspace
activeModFiles, err := getLegacyModules(ctx, root, fs)
if err != nil {
return nil, err
}
ws.activeModFiles = activeModFiles
}
return ws, nil
}
// loadExplicitWorkspaceFile loads workspace information from go.work or
// gopls.mod files, setting the active modules, mod file, and module source
// accordingly.
func (ws *workspace) loadExplicitWorkspaceFile(ctx context.Context, fs source.FileSource) error {
for _, src := range []workspaceSource{goWorkWorkspace, goplsModWorkspace} {
fh, err := fs.GetFile(ctx, uriForSource(ws.root, ws.explicitGowork, src))
if err != nil {
return err
}
contents, err := fh.Read()
if err != nil {
continue // TODO(rfindley): is it correct to proceed here?
}
var file *modfile.File
var activeModFiles map[span.URI]struct{}
switch src {
case goWorkWorkspace:
file, activeModFiles, err = parseGoWork(ctx, ws.root, fh.URI(), contents, fs)
ws.workFile = fh.URI()
case goplsModWorkspace:
file, activeModFiles, err = parseGoplsMod(ws.root, fh.URI(), contents)
}
if err != nil {
ws.buildMu.Lock()
ws.built = true
ws.buildErr = err
ws.buildMu.Unlock()
}
ws.mod = file
ws.activeModFiles = activeModFiles
ws.moduleSource = src
return nil
}
return noHardcodedWorkspace
}
var noHardcodedWorkspace = errors.New("no hardcoded workspace")
// TODO(rfindley): eliminate getKnownModFiles.
func (w *workspace) getKnownModFiles() map[span.URI]struct{} {
return w.knownModFiles
}
// ActiveModFiles returns the set of active mod files for the current workspace.
func (w *workspace) ActiveModFiles() map[span.URI]struct{} {
return w.activeModFiles
}
// criticalError returns a critical error related to the workspace setup.
func (w *workspace) criticalError(ctx context.Context, fs source.FileSource) (res *source.CriticalError) {
// For now, we narrowly report errors related to `go.work` files.
//
// TODO(rfindley): investigate whether other workspace validation errors
// can be consolidated here.
if w.moduleSource == goWorkWorkspace {
// We should have already built the modfile, but build here to be
// consistent about accessing w.mod after w.build.
//
// TODO(rfindley): build eagerly. Building lazily is a premature
// optimization that poses a significant burden on the code.
w.build(ctx, fs)
if w.buildErr != nil {
return &source.CriticalError{
MainError: w.buildErr,
}
}
}
return nil
}
// modFile gets the workspace modfile associated with this workspace,
// computing it if it doesn't exist.
//
// A fileSource must be passed in to solve a chicken-egg problem: it is not
// correct to pass in the snapshot file source to newWorkspace when
// invalidating, because at the time these are called the snapshot is locked.
// So we must pass it in later on when actually using the modFile.
func (w *workspace) modFile(ctx context.Context, fs source.FileSource) (*modfile.File, error) {
w.build(ctx, fs)
return w.mod, w.buildErr
}
func (w *workspace) sumFile(ctx context.Context, fs source.FileSource) ([]byte, error) {
w.build(ctx, fs)
return w.sum, w.buildErr
}
func (w *workspace) build(ctx context.Context, fs source.FileSource) {
w.buildMu.Lock()
defer w.buildMu.Unlock()
if w.built {
return
}
// Building should never be cancelled. Since the workspace module is shared
// across multiple snapshots, doing so would put us in a bad state, and it
// would not be obvious to the user how to recover.
ctx = xcontext.Detach(ctx)
// If the module source is from the filesystem, try to build the workspace
// module from active modules discovered by scanning the filesystem. Fall
// back on the pre-existing mod file if parsing fails.
if w.moduleSource == fileSystemWorkspace {
file, err := buildWorkspaceModFile(ctx, w.activeModFiles, fs)
switch {
case err == nil:
w.mod = file
case w.mod != nil:
// Parsing failed, but we have a previous file version.
event.Error(ctx, "building workspace mod file", err)
default:
// No file to fall back on.
w.buildErr = err
}
}
if w.mod != nil {
w.wsDirs = map[span.URI]struct{}{
w.root: {},
}
for _, r := range w.mod.Replace {
// We may be replacing a module with a different version, not a path
// on disk.
if r.New.Version != "" {
continue
}
w.wsDirs[span.URIFromPath(r.New.Path)] = struct{}{}
}
}
// Ensure that there is always at least the root dir.
if len(w.wsDirs) == 0 {
w.wsDirs = map[span.URI]struct{}{
w.root: {},
}
}
sum, err := buildWorkspaceSumFile(ctx, w.activeModFiles, fs)
if err == nil {
w.sum = sum
} else {
event.Error(ctx, "building workspace sum file", err)
}
w.built = true
}
// dirs returns the workspace directories for the loaded modules.
func (w *workspace) dirs(ctx context.Context, fs source.FileSource) []span.URI {
w.build(ctx, fs)
var dirs []span.URI
for d := range w.wsDirs {
dirs = append(dirs, d)
}
sort.Slice(dirs, func(i, j int) bool { return dirs[i] < dirs[j] })
return dirs
}
// Clone returns a (possibly) new workspace after invalidating the changed
// files. If w is still valid in the presence of changedURIs, it returns itself
// unmodified.
//
// The returned needReinit flag indicates to the caller that the workspace
// needs to be reinitialized (because a relevant go.mod or go.work file has
// been changed).
//
// TODO(rfindley): it looks wrong that we return 'needReinit' here. The caller
// should determine whether to re-initialize..
func (w *workspace) Clone(ctx context.Context, changes map[span.URI]*fileChange, fs source.FileSource) (_ *workspace, needReinit bool) {
// Prevent races to w.modFile or w.wsDirs below, if w has not yet been built.
w.buildMu.Lock()
defer w.buildMu.Unlock()
// Clone the workspace. This may be discarded if nothing changed.
changed := false
result := &workspace{
workspaceCommon: w.workspaceCommon,
moduleSource: w.moduleSource,
knownModFiles: make(map[span.URI]struct{}),
activeModFiles: make(map[span.URI]struct{}),
workFile: w.workFile,
mod: w.mod,
sum: w.sum,
wsDirs: w.wsDirs,
}
for k, v := range w.knownModFiles {
result.knownModFiles[k] = v
}
for k, v := range w.activeModFiles {
result.activeModFiles[k] = v
}
equalURI := func(a, b span.URI) (r bool) {
// This query is a strange mix of syntax and file system state:
// deletion of a file causes a false result if the name doesn't change.
// Our tests exercise only the first clause.
return a == b || span.SameExistingFile(a, b)
}
// First handle changes to the go.work or gopls.mod file. This must be
// considered before any changes to go.mod or go.sum files, as these files
// determine which modules we care about. If go.work/gopls.mod has changed
// we need to either re-read it if it exists or walk the filesystem if it
// has been deleted. go.work should override the gopls.mod if both exist.
changed, needReinit = handleWorkspaceFileChanges(ctx, result, changes, fs)
// Next, handle go.mod changes that could affect our workspace.
for uri, change := range changes {
// Otherwise, we only care about go.mod files in the workspace directory.
if change.isUnchanged || !isGoMod(uri) || !source.InDir(result.root.Filename(), uri.Filename()) {
continue
}
changed = true
active := result.moduleSource != legacyWorkspace || equalURI(modURI(w.root), uri)
needReinit = needReinit || (active && change.fileHandle.Saved())
// Don't mess with the list of mod files if using go.work or gopls.mod.
if result.moduleSource == goplsModWorkspace || result.moduleSource == goWorkWorkspace {
continue
}
if change.exists {
result.knownModFiles[uri] = struct{}{}
if active {
result.activeModFiles[uri] = struct{}{}
}
} else {
delete(result.knownModFiles, uri)
delete(result.activeModFiles, uri)
}
}
// Finally, process go.sum changes for any modules that are now active.
for uri, change := range changes {
if !isGoSum(uri) {
continue
}
// TODO(rFindley) factor out this URI mangling.
dir := filepath.Dir(uri.Filename())
modURI := span.URIFromPath(filepath.Join(dir, "go.mod"))
if _, active := result.activeModFiles[modURI]; !active {
continue
}
// Only changes to active go.sum files actually cause the workspace to
// change.
changed = true
needReinit = needReinit || change.fileHandle.Saved()
}
if !changed {
return w, false
}
return result, needReinit
}
// handleWorkspaceFileChanges handles changes related to a go.work or gopls.mod
// file, updating ws accordingly. ws.root must be set.
func handleWorkspaceFileChanges(ctx context.Context, ws *workspace, changes map[span.URI]*fileChange, fs source.FileSource) (changed, reload bool) {
// If go.work/gopls.mod has changed we need to either re-read it if it
// exists or walk the filesystem if it has been deleted.
// go.work should override the gopls.mod if both exist.
for _, src := range []workspaceSource{goWorkWorkspace, goplsModWorkspace} {
uri := uriForSource(ws.root, ws.explicitGowork, src)
// File opens/closes are just no-ops.
change, ok := changes[uri]
if !ok {
continue
}
if change.isUnchanged {
break
}
if change.exists {
// Only invalidate if the file if it actually parses.
// Otherwise, stick with the current file.
var parsedFile *modfile.File
var parsedModules map[span.URI]struct{}
var err error
switch src {
case goWorkWorkspace:
parsedFile, parsedModules, err = parseGoWork(ctx, ws.root, uri, change.content, fs)
case goplsModWorkspace:
parsedFile, parsedModules, err = parseGoplsMod(ws.root, uri, change.content)
}
if err != nil {
// An unparseable file should not invalidate the workspace:
// nothing good could come from changing the workspace in
// this case.
//
// TODO(rfindley): well actually, it could potentially lead to a better
// critical error. Evaluate whether we can unify this case with the
// error returned by newWorkspace, without needlessly invalidating
// metadata.
event.Error(ctx, fmt.Sprintf("parsing %s", filepath.Base(uri.Filename())), err)
} else {
// only update the modfile if it parsed.
changed = true
reload = change.fileHandle.Saved()
ws.mod = parsedFile
ws.moduleSource = src
ws.knownModFiles = parsedModules
ws.activeModFiles = make(map[span.URI]struct{})
for k, v := range parsedModules {
ws.activeModFiles[k] = v
}
}
break // We've found an explicit workspace file, so can stop looking.
} else {
// go.work/gopls.mod is deleted. search for modules again.
changed = true
reload = true
ws.moduleSource = fileSystemWorkspace
// The parsed file is no longer valid.
ws.mod = nil
knownModFiles, err := findModules(ws.root, ws.excludePath, 0)
if err != nil {
ws.knownModFiles = nil
ws.activeModFiles = nil
event.Error(ctx, "finding file system modules", err)
} else {
ws.knownModFiles = knownModFiles
ws.activeModFiles = make(map[span.URI]struct{})
for k, v := range ws.knownModFiles {
ws.activeModFiles[k] = v
}
}
}
}
return changed, reload
}
// goplsModURI returns the URI for the gopls.mod file contained in root.
func uriForSource(root, explicitGowork span.URI, src workspaceSource) span.URI {
var basename string
switch src {
case goplsModWorkspace:
basename = "gopls.mod"
case goWorkWorkspace:
if explicitGowork != "" {
return explicitGowork
}
basename = "go.work"
default:
return ""
}
return span.URIFromPath(filepath.Join(root.Filename(), basename))
}
// modURI returns the URI for the go.mod file contained in root.
func modURI(root span.URI) span.URI {
return span.URIFromPath(filepath.Join(root.Filename(), "go.mod"))
}
// isGoMod reports if uri is a go.mod file.
func isGoMod(uri span.URI) bool {
return filepath.Base(uri.Filename()) == "go.mod"
}
func isGoSum(uri span.URI) bool {
return filepath.Base(uri.Filename()) == "go.sum" || filepath.Base(uri.Filename()) == "go.work.sum"
}
// fileExists reports if the file uri exists within source.
func fileExists(ctx context.Context, uri span.URI, source source.FileSource) (bool, error) {
fh, err := source.GetFile(ctx, uri)
if err != nil {
return false, err
}
return fileHandleExists(fh)
}
// fileHandleExists reports if the file underlying fh actually exits.
func fileHandleExists(fh source.FileHandle) (bool, error) {
_, err := fh.Read()
if err == nil {
return true, nil
}
if os.IsNotExist(err) {
return false, nil
}
return false, err
}
// TODO(rFindley): replace this (and similar) with a uripath package analogous
// to filepath.
func dirURI(uri span.URI) span.URI {
return span.URIFromPath(filepath.Dir(uri.Filename()))
}
// getLegacyModules returns a module set containing at most the root module.
func getLegacyModules(ctx context.Context, root span.URI, fs source.FileSource) (map[span.URI]struct{}, error) {
uri := span.URIFromPath(filepath.Join(root.Filename(), "go.mod"))
modules := make(map[span.URI]struct{})
exists, err := fileExists(ctx, uri, fs)
if err != nil {
return nil, err
}
if exists {
modules[uri] = struct{}{}
}
return modules, nil
}
func parseGoWork(ctx context.Context, root, uri span.URI, contents []byte, fs source.FileSource) (*modfile.File, map[span.URI]struct{}, error) {
workFile, err := modfile.ParseWork(uri.Filename(), contents, nil)
if err != nil {
return nil, nil, fmt.Errorf("parsing go.work: %w", err)
}
modFiles := make(map[span.URI]struct{})
for _, dir := range workFile.Use {
// The resulting modfile must use absolute paths, so that it can be
// written to a temp directory.
dir.Path = absolutePath(root, dir.Path)
modURI := span.URIFromPath(filepath.Join(dir.Path, "go.mod"))
modFiles[modURI] = struct{}{}
}
// TODO(rfindley): we should either not build the workspace modfile here, or
// not fail so hard. A failure in building the workspace modfile should not
// invalidate the active module paths extracted above.
modFile, err := buildWorkspaceModFile(ctx, modFiles, fs)
if err != nil {
return nil, nil, err
}
// Require a go directive, per the spec.
if workFile.Go == nil || workFile.Go.Version == "" {
return nil, nil, fmt.Errorf("go.work has missing or incomplete go directive")
}
if err := modFile.AddGoStmt(workFile.Go.Version); err != nil {
return nil, nil, err
}
return modFile, modFiles, nil
}
func parseGoplsMod(root, uri span.URI, contents []byte) (*modfile.File, map[span.URI]struct{}, error) {
modFile, err := modfile.Parse(uri.Filename(), contents, nil)
if err != nil {
return nil, nil, fmt.Errorf("parsing gopls.mod: %w", err)
}
modFiles := make(map[span.URI]struct{})
for _, replace := range modFile.Replace {
if replace.New.Version != "" {
return nil, nil, fmt.Errorf("gopls.mod: replaced module %q@%q must not have version", replace.New.Path, replace.New.Version)
}
// The resulting modfile must use absolute paths, so that it can be
// written to a temp directory.
replace.New.Path = absolutePath(root, replace.New.Path)
modURI := span.URIFromPath(filepath.Join(replace.New.Path, "go.mod"))
modFiles[modURI] = struct{}{}
}
return modFile, modFiles, nil
}
func absolutePath(root span.URI, path string) string {
dirFP := filepath.FromSlash(path)
if !filepath.IsAbs(dirFP) {
dirFP = filepath.Join(root.Filename(), dirFP)
}
return dirFP
}
// errExhausted is returned by findModules if the file scan limit is reached.
var errExhausted = errors.New("exhausted")
// Limit go.mod search to 1 million files. As a point of reference,
// Kubernetes has 22K files (as of 2020-11-24).
const fileLimit = 1000000
// findModules recursively walks the root directory looking for go.mod files,
// returning the set of modules it discovers. If modLimit is non-zero,
// searching stops once modLimit modules have been found.
//
// TODO(rfindley): consider overlays.
func findModules(root span.URI, excludePath func(string) bool, modLimit int) (map[span.URI]struct{}, error) {
// Walk the view's folder to find all modules in the view.
modFiles := make(map[span.URI]struct{})
searched := 0
errDone := errors.New("done")
err := filepath.Walk(root.Filename(), func(path string, info os.FileInfo, err error) error { | // Probably a permission error. Keep looking.
return filepath.SkipDir
}
// For any path that is not the workspace folder, check if the path
// would be ignored by the go command. Vendor directories also do not
// contain workspace modules.
if info.IsDir() && path != root.Filename() {
suffix := strings.TrimPrefix(path, root.Filename())
switch {
case checkIgnored(suffix),
strings.Contains(filepath.ToSlash(suffix), "/vendor/"),
excludePath(suffix):
return filepath.SkipDir
}
}
// We're only interested in go.mod files.
uri := span.URIFromPath(path)
if isGoMod(uri) {
modFiles[uri] = struct{}{}
}
if modLimit > 0 && len(modFiles) >= modLimit {
return errDone
}
searched++
if fileLimit > 0 && searched >= fileLimit {
return errExhausted
}
return nil
})
if err == errDone {
return modFiles, nil
}
return modFiles, err
} | if err != nil { | random_line_split |
lib.rs | #![deny(
// missing_copy_implementations,
// missing_debug_implementations,
// missing_docs,
trivial_casts,
trivial_numeric_casts,
unused_extern_crates,
unused_import_braces,
unused_qualifications,
variant_size_differences
)]
// #![warn(rust_2018_idioms)]
#![doc(test(attr(deny(
missing_copy_implementations,
missing_debug_implementations,
trivial_casts,
trivial_numeric_casts,
unused_extern_crates,
unused_import_braces,
unused_qualifications,
variant_size_differences,
))))]
#![doc(test(attr(warn(rust_2018_idioms))))]
// Not needed for 2018 edition and conflicts with `rust_2018_idioms`
#![doc(test(no_crate_inject))]
#![doc(html_root_url = "https://docs.rs/serde_with/1.5.0-alpha.1")]
//! [](https://docs.rs/serde_with/)
//! [](https://crates.io/crates/serde_with/)
//! [](https://github.com/jonasbb/serde_with)
//! [](https://codecov.io/gh/jonasbb/serde_with)
//!
//! ---
//!
//! This crate provides custom de/serialization helpers to use in combination with [serde's with-annotation][with-annotation] and with the improved [`serde_as`][]-annotation.
//! Some common use cases are:
//!
//! * De/Serializing a type using the `Display` and `FromStr` traits, e.g., for `u8`, `url::Url`, or `mime::Mime`.
//! Check [`DisplayFromStr`][] or [`serde_with::rust::display_fromstr`][display_fromstr] for details.
//! * Skip serializing all empty `Option` types with [`#[skip_serializing_none]`][skip_serializing_none].
//! * Apply a prefix to each fieldname of a struct, without changing the de/serialize implementations of the struct using [`with_prefix!`][].
//! * Deserialize a comma separated list like `#hash,#tags,#are,#great` into a `Vec<String>`.
//! Check the documentation for [`serde_with::rust::StringWithSeparator::<CommaSeparator>`][StringWithSeparator].
//!
//! Check out the [**user guide**][user guide] to find out more tips and tricks about this crate.
//!
//! # Use `serde_with` in your Project
//!
//! Add this to your `Cargo.toml`:
//!
//! ```toml
//! [dependencies.serde_with]
//! version = "1.5.0-alpha.1"
//! features = [ "..." ]
//! ```
//!
//! The crate contains different features for integration with other common crates.
//! Check the [feature flags][] section for information about all available features.
//!
//! # Examples
//!
//! Annotate your struct or enum to enable the custom de/serializer.
//!
//! ## `DisplayFromStr`
//!
//! ```rust
//! # #[cfg(feature = "macros")]
//! # use serde_derive::{Deserialize, Serialize};
//! # #[cfg(feature = "macros")]
//! # use serde_with::{serde_as, DisplayFromStr};
//! # #[cfg(feature = "macros")]
//! #[serde_as]
//! # #[derive(Debug, Eq, PartialEq)]
//! #[derive(Deserialize, Serialize)]
//! struct Foo {
//! // Serialize with Display, deserialize with FromStr
//! #[serde_as(as = "DisplayFromStr")]
//! bar: u8,
//! }
//!
//! # #[cfg(all(feature = "macros", feature = "json"))] {
//! // This will serialize
//! # let foo =
//! Foo {bar: 12}
//! # ;
//!
//! // into this JSON
//! # let json = r#"
//! {"bar": "12"}
//! # "#;
//! # assert_eq!(json.replace(" ", "").replace("\n", ""), serde_json::to_string(&foo).unwrap());
//! # assert_eq!(foo, serde_json::from_str(&json).unwrap());
//! # }
//! ```
//!
//! ## `skip_serializing_none`
//!
//! This situation often occurs with JSON, but other formats also support optional fields.
//! If many fields are optional, putting the annotations on the structs can become tedious.
//!
//! ```rust
//! # #[cfg(feature = "macros")]
//! # use serde_derive::{Deserialize, Serialize};
//! # #[cfg(feature = "macros")]
//! # use serde_with::{skip_serializing_none, DisplayFromStr};
//! # #[cfg(feature = "macros")]
//! #[skip_serializing_none]
//! # #[derive(Debug, Eq, PartialEq)]
//! #[derive(Deserialize, Serialize)]
//! struct Foo {
//! a: Option<usize>,
//! b: Option<usize>,
//! c: Option<usize>,
//! d: Option<usize>,
//! e: Option<usize>,
//! f: Option<usize>,
//! g: Option<usize>,
//! }
//!
//! # #[cfg(all(feature = "macros", feature = "json"))] {
//! // This will serialize
//! # let foo =
//! Foo {a: None, b: None, c: None, d: Some(4), e: None, f: None, g: Some(7)}
//! # ;
//!
//! // into this JSON
//! # let json = r#"
//! {"d": 4, "g": 7}
//! # "#;
//! # assert_eq!(json.replace(" ", "").replace("\n", ""), serde_json::to_string(&foo).unwrap());
//! # assert_eq!(foo, serde_json::from_str(&json).unwrap());
//! # }
//! ```
//!
//! ## Advanced `serde_as` usage
//!
//! This example is mainly supposed to highlight the flexibility of the `serde_as`-annotation compared to [serde's with-annotation][with-annotation].
//! More details about `serde_as` can be found in the [user guide][].
//!
//!
//! ```rust
//! # #[cfg(all(feature = "macros", feature = "hex"))]
//! # use {
//! # serde_derive::{Deserialize, Serialize},
//! # serde_with::{serde_as, DisplayFromStr, DurationSeconds, hex::Hex},
//! # std::time::Duration,
//! # std::collections::BTreeMap,
//! # };
//! # #[cfg(all(feature = "macros", feature = "hex"))]
//! #[serde_as]
//! # #[derive(Debug, Eq, PartialEq)]
//! #[derive(Deserialize, Serialize)]
//! struct Foo {
//! // Serialize them into a list of number as seconds
//! #[serde_as(as = "Vec<DurationSeconds>")]
//! durations: Vec<Duration>,
//! // We can treat a Vec like a map with duplicates.
//! // JSON only allows string keys, so convert i32 to strings
//! // The bytes will be hex encoded
//! #[serde_as(as = "BTreeMap<DisplayFromStr, Hex>")]
//! bytes: Vec<(i32, Vec<u8>)>,
//! }
//!
//! # #[cfg(all(feature = "macros", feature = "json", feature = "hex"))] {
//! // This will serialize
//! # let foo =
//! Foo {
//! durations: vec![Duration::new(5, 0), Duration::new(3600, 0), Duration::new(0, 0)],
//! bytes: vec![
//! (1, vec![0, 1, 2]),
//! (-100, vec![100, 200, 255]),
//! (1, vec![0, 111, 222]),
//! ],
//! }
//! # ;
//!
//! // into this JSON
//! # let json = r#"
//! {
//! "durations": [5, 3600, 0],
//! "bytes": {
//! "1": "000102",
//! "-100": "64c8ff",
//! "1": "006fde"
//! }
//! }
//! # "#;
//! # assert_eq!(json.replace(" ", "").replace("\n", ""), serde_json::to_string(&foo).unwrap());
//! # assert_eq!(foo, serde_json::from_str(&json).unwrap());
//! # }
//! ```
//!
//! [`DisplayFromStr`]: https://docs.rs/serde_with/*/serde_with/struct.DisplayFromStr.html
//! [`serde_as`]: https://docs.rs/serde_with/*/serde_with/guide/index.html
//! [`with_prefix!`]: https://docs.rs/serde_with/*/serde_with/macro.with_prefix.html
//! [display_fromstr]: https://docs.rs/serde_with/*/serde_with/rust/display_fromstr/index.html
//! [feature flags]: https://docs.rs/serde_with/*/serde_with/guide/feature_flags/index.html
//! [skip_serializing_none]: https://docs.rs/serde_with/*/serde_with/attr.skip_serializing_none.html
//! [StringWithSeparator]: https://docs.rs/serde_with/*/serde_with/rust/struct.StringWithSeparator.html
//! [user guide]: https://docs.rs/serde_with/*/serde_with/guide/index.html
//! [with-annotation]: https://serde.rs/field-attrs.html#with
#[doc(hidden)]
pub extern crate serde;
#[cfg(feature = "chrono")]
pub mod chrono;
pub mod de;
mod duplicate_key_impls;
mod flatten_maybe;
pub mod formats;
#[cfg(feature = "hex")]
pub mod hex;
#[cfg(feature = "json")]
pub mod json;
pub mod rust;
pub mod ser;
mod utils;
#[doc(hidden)]
pub mod with_prefix;
// Taken from shepmaster/snafu
// Originally licensed as MIT+Apache 2
// https://github.com/shepmaster/snafu/blob/fd37d79d4531ed1d3eebffad0d658928eb860cfe/src/lib.rs#L121-L165
#[cfg(feature = "guide")]
macro_rules! generate_guide {
(pub mod $name:ident; $($rest:tt)*) => {
generate_guide!(@gen ".", pub mod $name { } $($rest)*);
};
(pub mod $name:ident { $($children:tt)* } $($rest:tt)*) => {
generate_guide!(@gen ".", pub mod $name { $($children)* } $($rest)*);
};
(@gen $prefix:expr, ) => {};
(@gen $prefix:expr, pub mod $name:ident; $($rest:tt)*) => {
generate_guide!(@gen $prefix, pub mod $name { } $($rest)*);
};
(@gen $prefix:expr, @code pub mod $name:ident; $($rest:tt)*) => {
pub mod $name;
generate_guide!(@gen $prefix, $($rest)*);
};
(@gen $prefix:expr, pub mod $name:ident { $($children:tt)* } $($rest:tt)*) => {
doc_comment::doc_comment! {
include_str!(concat!($prefix, "/", stringify!($name), ".md")),
pub mod $name {
generate_guide!(@gen concat!($prefix, "/", stringify!($name)), $($children)*);
}
}
generate_guide!(@gen $prefix, $($rest)*);
};
}
#[cfg(feature = "guide")]
generate_guide! {
pub mod guide {
pub mod migrating;
pub mod feature_flags;
}
}
#[doc(inline)]
pub use crate::{de::DeserializeAs, ser::SerializeAs};
use serde::{ser::Serialize, Deserializer, Serializer};
// Re-Export all proc_macros, as these should be seen as part of the serde_with crate
#[cfg(feature = "macros")]
#[doc(inline)]
pub use serde_with_macros::*;
use std::marker::PhantomData;
/// Separator for string-based collection de/serialization
pub trait Separator {
/// Return the string delimiting two elements in the string-based collection
fn separator() -> &'static str;
}
/// Predefined separator using a single space
#[derive(Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash, Debug, Default)]
pub struct | ;
impl Separator for SpaceSeparator {
#[inline]
fn separator() -> &'static str {
" "
}
}
/// Predefined separator using a single comma
#[derive(Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash, Debug, Default)]
pub struct CommaSeparator;
impl Separator for CommaSeparator {
#[inline]
fn separator() -> &'static str {
","
}
}
#[derive(Copy, Clone, Debug, Default)]
pub struct As<T>(PhantomData<T>);
impl<T> As<T> {
pub fn serialize<S, I>(value: &I, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
T: SerializeAs<I>,
{
T::serialize_as(value, serializer)
}
pub fn deserialize<'de, D, I>(deserializer: D) -> Result<I, D::Error>
where
T: DeserializeAs<'de, I>,
D: Deserializer<'de>,
{
T::deserialize_as(deserializer)
}
}
#[derive(Copy, Clone, Debug, Default)]
pub struct Same;
#[derive(Copy, Clone, Debug, Default)]
pub struct DisplayFromStr;
#[derive(Copy, Clone, Debug, Default)]
pub struct NoneAsEmptyString;
#[derive(Copy, Clone, Debug, Default)]
pub struct DefaultOnError<T>(PhantomData<T>);
#[derive(Copy, Clone, Debug, Default)]
pub struct BytesOrString;
#[derive(Copy, Clone, Debug, Default)]
pub struct DurationSeconds<
FORMAT: formats::Format = u64,
STRICTNESS: formats::Strictness = formats::Strict,
>(PhantomData<(FORMAT, STRICTNESS)>);
#[derive(Copy, Clone, Debug, Default)]
pub struct DurationSecondsWithFrac<
FORMAT: formats::Format = f64,
STRICTNESS: formats::Strictness = formats::Strict,
>(PhantomData<(FORMAT, STRICTNESS)>);
| SpaceSeparator | identifier_name |
lib.rs | #![deny(
// missing_copy_implementations,
// missing_debug_implementations,
// missing_docs,
trivial_casts,
trivial_numeric_casts,
unused_extern_crates,
unused_import_braces,
unused_qualifications,
variant_size_differences
)]
// #![warn(rust_2018_idioms)]
#![doc(test(attr(deny(
missing_copy_implementations,
missing_debug_implementations,
trivial_casts,
trivial_numeric_casts,
unused_extern_crates,
unused_import_braces,
unused_qualifications,
variant_size_differences,
))))]
#![doc(test(attr(warn(rust_2018_idioms))))]
// Not needed for 2018 edition and conflicts with `rust_2018_idioms`
#![doc(test(no_crate_inject))]
#![doc(html_root_url = "https://docs.rs/serde_with/1.5.0-alpha.1")]
//! [](https://docs.rs/serde_with/)
//! [](https://crates.io/crates/serde_with/)
//! [](https://github.com/jonasbb/serde_with)
//! [](https://codecov.io/gh/jonasbb/serde_with)
//!
//! ---
//!
//! This crate provides custom de/serialization helpers to use in combination with [serde's with-annotation][with-annotation] and with the improved [`serde_as`][]-annotation.
//! Some common use cases are:
//!
//! * De/Serializing a type using the `Display` and `FromStr` traits, e.g., for `u8`, `url::Url`, or `mime::Mime`.
//! Check [`DisplayFromStr`][] or [`serde_with::rust::display_fromstr`][display_fromstr] for details.
//! * Skip serializing all empty `Option` types with [`#[skip_serializing_none]`][skip_serializing_none].
//! * Apply a prefix to each fieldname of a struct, without changing the de/serialize implementations of the struct using [`with_prefix!`][].
//! * Deserialize a comma separated list like `#hash,#tags,#are,#great` into a `Vec<String>`.
//! Check the documentation for [`serde_with::rust::StringWithSeparator::<CommaSeparator>`][StringWithSeparator].
//!
//! Check out the [**user guide**][user guide] to find out more tips and tricks about this crate.
//!
//! # Use `serde_with` in your Project
//!
//! Add this to your `Cargo.toml`:
//!
//! ```toml
//! [dependencies.serde_with]
//! version = "1.5.0-alpha.1"
//! features = [ "..." ]
//! ```
//!
//! The crate contains different features for integration with other common crates.
//! Check the [feature flags][] section for information about all available features.
//!
//! # Examples
//!
//! Annotate your struct or enum to enable the custom de/serializer.
//!
//! ## `DisplayFromStr`
//!
//! ```rust
//! # #[cfg(feature = "macros")]
//! # use serde_derive::{Deserialize, Serialize};
//! # #[cfg(feature = "macros")]
//! # use serde_with::{serde_as, DisplayFromStr};
//! # #[cfg(feature = "macros")]
//! #[serde_as]
//! # #[derive(Debug, Eq, PartialEq)]
//! #[derive(Deserialize, Serialize)]
//! struct Foo {
//! // Serialize with Display, deserialize with FromStr
//! #[serde_as(as = "DisplayFromStr")]
//! bar: u8,
//! }
//!
//! # #[cfg(all(feature = "macros", feature = "json"))] {
//! // This will serialize
//! # let foo =
//! Foo {bar: 12}
//! # ;
//!
//! // into this JSON
//! # let json = r#"
//! {"bar": "12"}
//! # "#;
//! # assert_eq!(json.replace(" ", "").replace("\n", ""), serde_json::to_string(&foo).unwrap());
//! # assert_eq!(foo, serde_json::from_str(&json).unwrap());
//! # }
//! ```
//!
//! ## `skip_serializing_none`
//!
//! This situation often occurs with JSON, but other formats also support optional fields.
//! If many fields are optional, putting the annotations on the structs can become tedious.
//!
//! ```rust
//! # #[cfg(feature = "macros")]
//! # use serde_derive::{Deserialize, Serialize};
//! # #[cfg(feature = "macros")]
//! # use serde_with::{skip_serializing_none, DisplayFromStr};
//! # #[cfg(feature = "macros")]
//! #[skip_serializing_none]
//! # #[derive(Debug, Eq, PartialEq)]
//! #[derive(Deserialize, Serialize)]
//! struct Foo {
//! a: Option<usize>,
//! b: Option<usize>,
//! c: Option<usize>,
//! d: Option<usize>,
//! e: Option<usize>,
//! f: Option<usize>,
//! g: Option<usize>,
//! }
//!
//! # #[cfg(all(feature = "macros", feature = "json"))] {
//! // This will serialize
//! # let foo =
//! Foo {a: None, b: None, c: None, d: Some(4), e: None, f: None, g: Some(7)}
//! # ;
//!
//! // into this JSON
//! # let json = r#"
//! {"d": 4, "g": 7}
//! # "#;
//! # assert_eq!(json.replace(" ", "").replace("\n", ""), serde_json::to_string(&foo).unwrap());
//! # assert_eq!(foo, serde_json::from_str(&json).unwrap());
//! # }
//! ```
//!
//! ## Advanced `serde_as` usage
//!
//! This example is mainly supposed to highlight the flexibility of the `serde_as`-annotation compared to [serde's with-annotation][with-annotation].
//! More details about `serde_as` can be found in the [user guide][].
//!
//!
//! ```rust
//! # #[cfg(all(feature = "macros", feature = "hex"))]
//! # use {
//! # serde_derive::{Deserialize, Serialize},
//! # serde_with::{serde_as, DisplayFromStr, DurationSeconds, hex::Hex},
//! # std::time::Duration,
//! # std::collections::BTreeMap,
//! # };
//! # #[cfg(all(feature = "macros", feature = "hex"))]
//! #[serde_as]
//! # #[derive(Debug, Eq, PartialEq)]
//! #[derive(Deserialize, Serialize)]
//! struct Foo {
//! // Serialize them into a list of number as seconds
//! #[serde_as(as = "Vec<DurationSeconds>")]
//! durations: Vec<Duration>,
//! // We can treat a Vec like a map with duplicates.
//! // JSON only allows string keys, so convert i32 to strings
//! // The bytes will be hex encoded
//! #[serde_as(as = "BTreeMap<DisplayFromStr, Hex>")]
//! bytes: Vec<(i32, Vec<u8>)>,
//! }
//!
//! # #[cfg(all(feature = "macros", feature = "json", feature = "hex"))] {
//! // This will serialize
//! # let foo =
//! Foo {
//! durations: vec![Duration::new(5, 0), Duration::new(3600, 0), Duration::new(0, 0)],
//! bytes: vec![
//! (1, vec![0, 1, 2]),
//! (-100, vec![100, 200, 255]),
//! (1, vec![0, 111, 222]),
//! ],
//! }
//! # ;
//!
//! // into this JSON
//! # let json = r#"
//! {
//! "durations": [5, 3600, 0],
//! "bytes": {
//! "1": "000102",
//! "-100": "64c8ff", | //! # assert_eq!(json.replace(" ", "").replace("\n", ""), serde_json::to_string(&foo).unwrap());
//! # assert_eq!(foo, serde_json::from_str(&json).unwrap());
//! # }
//! ```
//!
//! [`DisplayFromStr`]: https://docs.rs/serde_with/*/serde_with/struct.DisplayFromStr.html
//! [`serde_as`]: https://docs.rs/serde_with/*/serde_with/guide/index.html
//! [`with_prefix!`]: https://docs.rs/serde_with/*/serde_with/macro.with_prefix.html
//! [display_fromstr]: https://docs.rs/serde_with/*/serde_with/rust/display_fromstr/index.html
//! [feature flags]: https://docs.rs/serde_with/*/serde_with/guide/feature_flags/index.html
//! [skip_serializing_none]: https://docs.rs/serde_with/*/serde_with/attr.skip_serializing_none.html
//! [StringWithSeparator]: https://docs.rs/serde_with/*/serde_with/rust/struct.StringWithSeparator.html
//! [user guide]: https://docs.rs/serde_with/*/serde_with/guide/index.html
//! [with-annotation]: https://serde.rs/field-attrs.html#with
#[doc(hidden)]
pub extern crate serde;
#[cfg(feature = "chrono")]
pub mod chrono;
pub mod de;
mod duplicate_key_impls;
mod flatten_maybe;
pub mod formats;
#[cfg(feature = "hex")]
pub mod hex;
#[cfg(feature = "json")]
pub mod json;
pub mod rust;
pub mod ser;
mod utils;
#[doc(hidden)]
pub mod with_prefix;
// Taken from shepmaster/snafu
// Originally licensed as MIT+Apache 2
// https://github.com/shepmaster/snafu/blob/fd37d79d4531ed1d3eebffad0d658928eb860cfe/src/lib.rs#L121-L165
#[cfg(feature = "guide")]
macro_rules! generate_guide {
(pub mod $name:ident; $($rest:tt)*) => {
generate_guide!(@gen ".", pub mod $name { } $($rest)*);
};
(pub mod $name:ident { $($children:tt)* } $($rest:tt)*) => {
generate_guide!(@gen ".", pub mod $name { $($children)* } $($rest)*);
};
(@gen $prefix:expr, ) => {};
(@gen $prefix:expr, pub mod $name:ident; $($rest:tt)*) => {
generate_guide!(@gen $prefix, pub mod $name { } $($rest)*);
};
(@gen $prefix:expr, @code pub mod $name:ident; $($rest:tt)*) => {
pub mod $name;
generate_guide!(@gen $prefix, $($rest)*);
};
(@gen $prefix:expr, pub mod $name:ident { $($children:tt)* } $($rest:tt)*) => {
doc_comment::doc_comment! {
include_str!(concat!($prefix, "/", stringify!($name), ".md")),
pub mod $name {
generate_guide!(@gen concat!($prefix, "/", stringify!($name)), $($children)*);
}
}
generate_guide!(@gen $prefix, $($rest)*);
};
}
#[cfg(feature = "guide")]
generate_guide! {
pub mod guide {
pub mod migrating;
pub mod feature_flags;
}
}
#[doc(inline)]
pub use crate::{de::DeserializeAs, ser::SerializeAs};
use serde::{ser::Serialize, Deserializer, Serializer};
// Re-Export all proc_macros, as these should be seen as part of the serde_with crate
#[cfg(feature = "macros")]
#[doc(inline)]
pub use serde_with_macros::*;
use std::marker::PhantomData;
/// Separator for string-based collection de/serialization
pub trait Separator {
/// Return the string delimiting two elements in the string-based collection
fn separator() -> &'static str;
}
/// Predefined separator using a single space
#[derive(Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash, Debug, Default)]
pub struct SpaceSeparator;
impl Separator for SpaceSeparator {
#[inline]
fn separator() -> &'static str {
" "
}
}
/// Predefined separator using a single comma
#[derive(Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash, Debug, Default)]
pub struct CommaSeparator;
impl Separator for CommaSeparator {
#[inline]
fn separator() -> &'static str {
","
}
}
#[derive(Copy, Clone, Debug, Default)]
pub struct As<T>(PhantomData<T>);
impl<T> As<T> {
pub fn serialize<S, I>(value: &I, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
T: SerializeAs<I>,
{
T::serialize_as(value, serializer)
}
pub fn deserialize<'de, D, I>(deserializer: D) -> Result<I, D::Error>
where
T: DeserializeAs<'de, I>,
D: Deserializer<'de>,
{
T::deserialize_as(deserializer)
}
}
#[derive(Copy, Clone, Debug, Default)]
pub struct Same;
#[derive(Copy, Clone, Debug, Default)]
pub struct DisplayFromStr;
#[derive(Copy, Clone, Debug, Default)]
pub struct NoneAsEmptyString;
#[derive(Copy, Clone, Debug, Default)]
pub struct DefaultOnError<T>(PhantomData<T>);
#[derive(Copy, Clone, Debug, Default)]
pub struct BytesOrString;
#[derive(Copy, Clone, Debug, Default)]
pub struct DurationSeconds<
FORMAT: formats::Format = u64,
STRICTNESS: formats::Strictness = formats::Strict,
>(PhantomData<(FORMAT, STRICTNESS)>);
#[derive(Copy, Clone, Debug, Default)]
pub struct DurationSecondsWithFrac<
FORMAT: formats::Format = f64,
STRICTNESS: formats::Strictness = formats::Strict,
>(PhantomData<(FORMAT, STRICTNESS)>); | //! "1": "006fde"
//! }
//! }
//! # "#; | random_line_split |
notification_client.rs | // Copyright 2023 The Matrix.org Foundation C.I.C.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for that specific language governing permissions and
// limitations under the License.
use std::{
sync::{Arc, Mutex},
time::Duration,
};
use futures_util::{pin_mut, StreamExt as _};
use matrix_sdk::{room::Room, Client, ClientBuildError, SlidingSyncList, SlidingSyncMode};
use matrix_sdk_base::{deserialized_responses::TimelineEvent, RoomState, StoreError};
use ruma::{
api::client::sync::sync_events::v4::{
AccountDataConfig, RoomSubscription, SyncRequestListFilters,
},
assign,
events::{
room::member::StrippedRoomMemberEvent, AnyFullStateEventContent, AnyStateEvent,
AnySyncTimelineEvent, FullStateEventContent, StateEventType,
},
push::Action,
serde::Raw,
uint, EventId, OwnedEventId, RoomId, UserId,
};
use thiserror::Error;
use tokio::sync::Mutex as AsyncMutex;
use crate::encryption_sync::{EncryptionSync, WithLocking};
/// A client specialized for handling push notifications received over the
/// network, for an app.
///
/// In particular, it takes care of running a full decryption sync, in case the
/// event in the notification was impossible to decrypt beforehand.
pub struct NotificationClient {
/// SDK client that uses an in-memory state store.
client: Client,
/// SDK client that uses the same state store as the caller's context.
parent_client: Client,
/// Should we retry decrypting an event, after it was impossible to decrypt
/// on the first attempt?
retry_decryption: bool,
/// Should the encryption sync happening in case the notification event was
/// encrypted use a cross-process lock?
///
/// Only meaningful if `retry_decryption` is true.
with_cross_process_lock: bool,
/// Should we try to filter out the notification event according to the push
/// rules?
filter_by_push_rules: bool,
/// A mutex to serialize requests to sliding sync.
///
/// If several notifications come in at the same time (e.g. network was
/// unreachable because of airplane mode or something similar), then we
/// need to make sure that repeated calls to `get_notification` won't
/// cause multiple requests with the same `conn_id` we're using for
/// notifications. This mutex solves this by sequentializing the requests.
sliding_sync_mutex: AsyncMutex<()>,
}
impl NotificationClient {
const CONNECTION_ID: &str = "notifications";
const LOCK_ID: &str = "notifications";
/// Create a new builder for a notification client.
pub async fn builder(client: Client) -> Result<NotificationClientBuilder, Error> {
NotificationClientBuilder::new(client).await
}
/// Fetches the content of a notification.
///
/// This will first try to get the notification using a short-lived sliding
/// sync, and if the sliding-sync can't find the event, then it'll use a
/// `/context` query to find the event with associated member information.
///
/// An error result means that we couldn't resolve the notification; in that
/// case, a dummy notification may be displayed instead. A `None` result
/// means the notification has been filtered out by the user's push
/// rules.
pub async fn get_notification(
&self,
room_id: &RoomId,
event_id: &EventId,
) -> Result<Option<NotificationItem>, Error> {
match self.get_notification_with_sliding_sync(room_id, event_id).await? {
NotificationStatus::Event(event) => Ok(Some(event)),
NotificationStatus::EventFilteredOut => Ok(None),
NotificationStatus::EventNotFound => {
self.get_notification_with_context(room_id, event_id).await
}
}
}
/// Run an encryption sync loop, in case an event is still encrypted.
///
/// Will return true if and only:
/// - retry_decryption was enabled,
/// - the event was encrypted,
/// - we successfully ran an encryption sync.
async fn maybe_retry_decryption(
&self,
room: &Room,
raw_event: &Raw<AnySyncTimelineEvent>,
) -> Result<Option<TimelineEvent>, Error> |
/// Try to run a sliding sync (without encryption) to retrieve the event
/// from the notification.
///
/// This works by requesting explicit state that'll be useful for building
/// the `NotificationItem`, and subscribing to the room which the
/// notification relates to.
async fn try_sliding_sync(
&self,
room_id: &RoomId,
event_id: &EventId,
) -> Result<Option<RawNotificationEvent>, Error> {
// Serialize all the calls to this method by taking a lock at the beginning,
// that will be dropped later.
let _guard = self.sliding_sync_mutex.lock().await;
// Set up a sliding sync that only subscribes to the room that had the
// notification, so we can figure out the full event and associated
// information.
let notification = Arc::new(Mutex::new(None));
let cloned_notif = notification.clone();
let target_event_id = event_id.to_owned();
let timeline_event_handler =
self.client.add_event_handler(move |raw: Raw<AnySyncTimelineEvent>| async move {
match raw.get_field::<OwnedEventId>("event_id") {
Ok(Some(event_id)) => {
if event_id == target_event_id {
// found it! There shouldn't be a previous event before, but if there
// is, that should be ok to just replace it.
*cloned_notif.lock().unwrap() =
Some(RawNotificationEvent::Timeline(raw));
}
}
Ok(None) | Err(_) => {
tracing::warn!("could not get event id");
}
}
});
let cloned_notif = notification.clone();
let target_event_id = event_id.to_owned();
let stripped_member_handler =
self.client.add_event_handler(move |raw: Raw<StrippedRoomMemberEvent>| async move {
match raw.get_field::<OwnedEventId>("event_id") {
Ok(Some(event_id)) => {
if event_id == target_event_id {
// found it! There shouldn't be a previous event before, but if there
// is, that should be ok to just replace it.
*cloned_notif.lock().unwrap() = Some(RawNotificationEvent::Invite(raw));
}
}
Ok(None) | Err(_) => {
tracing::warn!("could not get event id");
}
}
});
// Room power levels are necessary to build the push context.
let required_state = vec![
(StateEventType::RoomAvatar, "".to_owned()),
(StateEventType::RoomEncryption, "".to_owned()),
(StateEventType::RoomMember, "$LAZY".to_owned()),
(StateEventType::RoomMember, "$ME".to_owned()),
(StateEventType::RoomCanonicalAlias, "".to_owned()),
(StateEventType::RoomName, "".to_owned()),
(StateEventType::RoomPowerLevels, "".to_owned()),
];
let invites = SlidingSyncList::builder("invites")
.sync_mode(SlidingSyncMode::new_selective().add_range(0..=16))
.timeline_limit(8)
.required_state(required_state.clone())
.filters(Some(assign!(SyncRequestListFilters::default(), {
is_invite: Some(true),
is_tombstoned: Some(false),
not_room_types: vec!["m.space".to_owned()],
})))
.sort(vec!["by_recency".to_owned(), "by_name".to_owned()]);
let sync = self
.client
.sliding_sync(Self::CONNECTION_ID)?
.poll_timeout(Duration::from_secs(1))
.network_timeout(Duration::from_secs(3))
.with_account_data_extension(
assign!(AccountDataConfig::default(), { enabled: Some(true) }),
)
.add_list(invites)
.build()
.await?;
sync.subscribe_to_room(
room_id.to_owned(),
Some(assign!(RoomSubscription::default(), {
required_state,
timeline_limit: Some(uint!(16))
})),
);
let mut remaining_attempts = 3;
let stream = sync.sync();
pin_mut!(stream);
loop {
if stream.next().await.is_none() {
// Sliding sync aborted early.
break;
}
if notification.lock().unwrap().is_some() {
// We got the event.
break;
}
remaining_attempts -= 1;
if remaining_attempts == 0 {
// We're out of luck.
break;
}
}
self.client.remove_event_handler(stripped_member_handler);
self.client.remove_event_handler(timeline_event_handler);
let maybe_event = notification.lock().unwrap().take();
Ok(maybe_event)
}
/// Get a full notification, given a room id and event id.
///
/// This will run a small sliding sync to retrieve the content of the event,
/// along with extra data to form a rich notification context.
pub async fn get_notification_with_sliding_sync(
&self,
room_id: &RoomId,
event_id: &EventId,
) -> Result<NotificationStatus, Error> {
tracing::info!("fetching notification event with a sliding sync");
let Some(mut raw_event) = self.try_sliding_sync(room_id, event_id).await? else {
return Ok(NotificationStatus::EventNotFound);
};
// At this point it should have been added by the sync, if it's not, give up.
let Some(room) = self.client.get_room(room_id) else { return Err(Error::UnknownRoom) };
let push_actions = match &raw_event {
RawNotificationEvent::Timeline(timeline_event) => {
// Timeline events may be encrypted, so make sure they get decrypted first.
if let Some(timeline_event) =
self.maybe_retry_decryption(&room, timeline_event).await?
{
raw_event = RawNotificationEvent::Timeline(timeline_event.event.cast());
timeline_event.push_actions
} else {
room.event_push_actions(timeline_event).await?
}
}
RawNotificationEvent::Invite(invite_event) => {
// Invite events can't be encrypted, so they should be in clear text.
room.event_push_actions(invite_event).await?
}
};
if let Some(push_actions) = &push_actions {
if self.filter_by_push_rules && !push_actions.iter().any(|a| a.should_notify()) {
return Ok(NotificationStatus::EventFilteredOut);
}
}
Ok(NotificationStatus::Event(
NotificationItem::new(&room, &raw_event, push_actions.as_deref(), Vec::new()).await?,
))
}
/// Retrieve a notification using a `/context` query.
///
/// This is for clients that are already running other sliding syncs in the
/// same process, so that most of the contextual information for the
/// notification should already be there. In particular, the room containing
/// the event MUST be known (via a sliding sync for invites, or another
/// sliding sync).
///
/// An error result means that we couldn't resolve the notification; in that
/// case, a dummy notification may be displayed instead. A `None` result
/// means the notification has been filtered out by the user's push
/// rules.
pub async fn get_notification_with_context(
&self,
room_id: &RoomId,
event_id: &EventId,
) -> Result<Option<NotificationItem>, Error> {
tracing::info!("fetching notification event with a /context query");
// See above comment.
let Some(room) = self.parent_client.get_room(room_id) else {
return Err(Error::UnknownRoom);
};
let (mut timeline_event, state_events) =
room.event_with_context(event_id, true).await?.ok_or(Error::ContextMissingEvent)?;
if let Some(decrypted_event) =
self.maybe_retry_decryption(&room, timeline_event.event.cast_ref()).await?
{
timeline_event = decrypted_event;
}
if self.filter_by_push_rules
&& !timeline_event
.push_actions
.as_ref()
.is_some_and(|actions| actions.iter().any(|a| a.should_notify()))
{
return Ok(None);
}
Ok(Some(
NotificationItem::new(
&room,
&RawNotificationEvent::Timeline(timeline_event.event.cast()),
timeline_event.push_actions.as_deref(),
state_events,
)
.await?,
))
}
}
#[derive(Debug)]
pub enum NotificationStatus {
Event(NotificationItem),
EventNotFound,
EventFilteredOut,
}
/// Builder for a `NotificationClient`.
///
/// Fields have the same meaning as in `NotificationClient`.
#[derive(Clone)]
pub struct NotificationClientBuilder {
/// SDK client that uses an in-memory state store, to be used with the
/// sliding sync method.
client: Client,
/// SDK client that uses the same state store as the caller's context.
parent_client: Client,
retry_decryption: bool,
with_cross_process_lock: bool,
filter_by_push_rules: bool,
}
impl NotificationClientBuilder {
async fn new(parent_client: Client) -> Result<Self, Error> {
let client = parent_client.notification_client().await?;
Ok(Self {
client,
parent_client,
retry_decryption: false,
with_cross_process_lock: false,
filter_by_push_rules: false,
})
}
/// Filter out the notification event according to the push rules present in
/// the event.
pub fn filter_by_push_rules(mut self) -> Self {
self.filter_by_push_rules = true;
self
}
/// Automatically retry decryption once, if the notification was received
/// encrypted.
///
/// The boolean indicates whether we're making use of a cross-process lock
/// for the crypto-store. This should be set to true, if and only if,
/// the notification is received in a process that's different from the
/// main app.
pub fn retry_decryption(mut self, with_cross_process_lock: bool) -> Self {
self.retry_decryption = true;
self.with_cross_process_lock = with_cross_process_lock;
self
}
/// Finishes configuring the `NotificationClient`.
pub fn build(self) -> NotificationClient {
NotificationClient {
client: self.client,
parent_client: self.parent_client,
with_cross_process_lock: self.with_cross_process_lock,
filter_by_push_rules: self.filter_by_push_rules,
retry_decryption: self.retry_decryption,
sliding_sync_mutex: AsyncMutex::new(()),
}
}
}
enum RawNotificationEvent {
Timeline(Raw<AnySyncTimelineEvent>),
Invite(Raw<StrippedRoomMemberEvent>),
}
#[derive(Debug)]
pub enum NotificationEvent {
Timeline(AnySyncTimelineEvent),
Invite(StrippedRoomMemberEvent),
}
impl NotificationEvent {
pub fn sender(&self) -> &UserId {
match self {
NotificationEvent::Timeline(ev) => ev.sender(),
NotificationEvent::Invite(ev) => &ev.sender,
}
}
}
/// A notification with its full content.
#[derive(Debug)]
pub struct NotificationItem {
/// Underlying Ruma event.
pub event: NotificationEvent,
/// Display name of the sender.
pub sender_display_name: Option<String>,
/// Avatar URL of the sender.
pub sender_avatar_url: Option<String>,
/// Room display name.
pub room_display_name: String,
/// Room avatar URL.
pub room_avatar_url: Option<String>,
/// Room canonical alias.
pub room_canonical_alias: Option<String>,
/// Is this room encrypted?
pub is_room_encrypted: Option<bool>,
/// Is this room considered a direct message?
pub is_direct_message_room: bool,
/// Numbers of members who joined the room.
pub joined_members_count: u64,
/// Is it a noisy notification? (i.e. does any push action contain a sound
/// action)
///
/// It is set if and only if the push actions could be determined.
pub is_noisy: Option<bool>,
}
impl NotificationItem {
async fn new(
room: &Room,
raw_event: &RawNotificationEvent,
push_actions: Option<&[Action]>,
state_events: Vec<Raw<AnyStateEvent>>,
) -> Result<Self, Error> {
let event = match raw_event {
RawNotificationEvent::Timeline(raw_event) => NotificationEvent::Timeline(
raw_event.deserialize().map_err(|_| Error::InvalidRumaEvent)?,
),
RawNotificationEvent::Invite(raw_event) => NotificationEvent::Invite(
raw_event.deserialize().map_err(|_| Error::InvalidRumaEvent)?,
),
};
let sender = match room.state() {
RoomState::Invited => room.invite_details().await?.inviter,
_ => room.get_member_no_sync(event.sender()).await?,
};
let (mut sender_display_name, mut sender_avatar_url) = match &sender {
Some(sender) => (
sender.display_name().map(|s| s.to_owned()),
sender.avatar_url().map(|s| s.to_string()),
),
None => (None, None),
};
if sender_display_name.is_none() || sender_avatar_url.is_none() {
let sender_id = event.sender();
for ev in state_events {
let Ok(ev) = ev.deserialize() else {
continue;
};
if ev.sender() != sender_id {
continue;
}
if let AnyFullStateEventContent::RoomMember(FullStateEventContent::Original {
content,
..
}) = ev.content()
{
if sender_display_name.is_none() {
sender_display_name = content.displayname;
}
if sender_avatar_url.is_none() {
sender_avatar_url = content.avatar_url.map(|url| url.to_string());
}
}
}
}
let is_noisy = push_actions.map(|actions| actions.iter().any(|a| a.sound().is_some()));
let item = NotificationItem {
event,
sender_display_name,
sender_avatar_url,
room_display_name: room.display_name().await?.to_string(),
room_avatar_url: room.avatar_url().map(|s| s.to_string()),
room_canonical_alias: room.canonical_alias().map(|c| c.to_string()),
is_direct_message_room: room.is_direct().await?,
is_room_encrypted: room.is_encrypted().await.ok(),
joined_members_count: room.joined_members_count(),
is_noisy,
};
Ok(item)
}
}
/// An error for the [`NotificationClient`].
#[derive(Debug, Error)]
pub enum Error {
#[error(transparent)]
BuildingLocalClient(ClientBuildError),
/// The room associated to this event wasn't found.
#[error("unknown room for a notification")]
UnknownRoom,
/// The Ruma event contained within this notification couldn't be parsed.
#[error("invalid ruma event")]
InvalidRumaEvent,
/// When calling `get_notification_with_sliding_sync`, the room was missing
/// in the response.
#[error("the sliding sync response doesn't include the target room")]
SlidingSyncEmptyRoom,
#[error("the event was missing in the `/context` query")]
ContextMissingEvent,
/// An error forwarded from the client.
#[error(transparent)]
SdkError(#[from] matrix_sdk::Error),
/// An error forwarded from the underlying state store.
#[error(transparent)]
StoreError(#[from] StoreError),
}
| {
if !self.retry_decryption {
return Ok(None);
}
let event: AnySyncTimelineEvent =
raw_event.deserialize().map_err(|_| Error::InvalidRumaEvent)?;
let event_type = event.event_type();
let is_still_encrypted =
matches!(event_type, ruma::events::TimelineEventType::RoomEncrypted);
#[cfg(feature = "unstable-msc3956")]
let is_still_encrypted =
is_still_encrypted || matches!(event_type, ruma::events::TimelineEventType::Encrypted);
if !is_still_encrypted {
return Ok(None);
}
// The message is still encrypted, and the client is configured to retry
// decryption.
//
// Spawn an `EncryptionSync` that runs two iterations of the sliding sync loop:
// - the first iteration allows to get SS events as well as send e2ee requests.
// - the second one let the SS proxy forward events triggered by the sending of
// e2ee requests.
//
// Keep timeouts small for both, since we might be short on time.
let with_locking = WithLocking::from(self.with_cross_process_lock);
let encryption_sync = EncryptionSync::new(
Self::LOCK_ID.to_owned(),
self.client.clone(),
Some((Duration::from_secs(3), Duration::from_secs(4))),
with_locking,
)
.await;
// Just log out errors, but don't have them abort the notification processing:
// an undecrypted notification is still better than no
// notifications.
match encryption_sync {
Ok(sync) => match sync.run_fixed_iterations(2).await {
Ok(()) => {
let new_event = room.decrypt_event(raw_event.cast_ref()).await?;
Ok(Some(new_event))
}
Err(err) => {
tracing::warn!(
"error when running encryption_sync in get_notification: {err:#}"
);
Ok(None)
}
},
Err(err) => {
tracing::warn!("error when building encryption_sync in get_notification: {err:#}",);
Ok(None)
}
}
} | identifier_body |
notification_client.rs | // Copyright 2023 The Matrix.org Foundation C.I.C.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for that specific language governing permissions and
// limitations under the License.
use std::{
sync::{Arc, Mutex},
time::Duration,
};
use futures_util::{pin_mut, StreamExt as _};
use matrix_sdk::{room::Room, Client, ClientBuildError, SlidingSyncList, SlidingSyncMode};
use matrix_sdk_base::{deserialized_responses::TimelineEvent, RoomState, StoreError};
use ruma::{
api::client::sync::sync_events::v4::{
AccountDataConfig, RoomSubscription, SyncRequestListFilters,
},
assign,
events::{
room::member::StrippedRoomMemberEvent, AnyFullStateEventContent, AnyStateEvent,
AnySyncTimelineEvent, FullStateEventContent, StateEventType,
},
push::Action,
serde::Raw,
uint, EventId, OwnedEventId, RoomId, UserId,
};
use thiserror::Error;
use tokio::sync::Mutex as AsyncMutex;
use crate::encryption_sync::{EncryptionSync, WithLocking};
/// A client specialized for handling push notifications received over the
/// network, for an app.
///
/// In particular, it takes care of running a full decryption sync, in case the
/// event in the notification was impossible to decrypt beforehand.
pub struct NotificationClient {
/// SDK client that uses an in-memory state store.
client: Client,
/// SDK client that uses the same state store as the caller's context.
parent_client: Client,
/// Should we retry decrypting an event, after it was impossible to decrypt
/// on the first attempt?
retry_decryption: bool,
/// Should the encryption sync happening in case the notification event was
/// encrypted use a cross-process lock?
///
/// Only meaningful if `retry_decryption` is true.
with_cross_process_lock: bool,
/// Should we try to filter out the notification event according to the push
/// rules?
filter_by_push_rules: bool,
/// A mutex to serialize requests to sliding sync.
///
/// If several notifications come in at the same time (e.g. network was
/// unreachable because of airplane mode or something similar), then we
/// need to make sure that repeated calls to `get_notification` won't
/// cause multiple requests with the same `conn_id` we're using for
/// notifications. This mutex solves this by sequentializing the requests.
sliding_sync_mutex: AsyncMutex<()>,
}
impl NotificationClient {
const CONNECTION_ID: &str = "notifications";
const LOCK_ID: &str = "notifications";
/// Create a new builder for a notification client.
pub async fn builder(client: Client) -> Result<NotificationClientBuilder, Error> {
NotificationClientBuilder::new(client).await
}
/// Fetches the content of a notification.
///
/// This will first try to get the notification using a short-lived sliding
/// sync, and if the sliding-sync can't find the event, then it'll use a
/// `/context` query to find the event with associated member information.
///
/// An error result means that we couldn't resolve the notification; in that
/// case, a dummy notification may be displayed instead. A `None` result
/// means the notification has been filtered out by the user's push
/// rules.
pub async fn get_notification(
&self,
room_id: &RoomId,
event_id: &EventId,
) -> Result<Option<NotificationItem>, Error> {
match self.get_notification_with_sliding_sync(room_id, event_id).await? {
NotificationStatus::Event(event) => Ok(Some(event)),
NotificationStatus::EventFilteredOut => Ok(None),
NotificationStatus::EventNotFound => {
self.get_notification_with_context(room_id, event_id).await
}
}
}
/// Run an encryption sync loop, in case an event is still encrypted.
///
/// Will return true if and only:
/// - retry_decryption was enabled,
/// - the event was encrypted,
/// - we successfully ran an encryption sync.
async fn maybe_retry_decryption(
&self,
room: &Room,
raw_event: &Raw<AnySyncTimelineEvent>,
) -> Result<Option<TimelineEvent>, Error> {
if !self.retry_decryption {
return Ok(None);
}
let event: AnySyncTimelineEvent =
raw_event.deserialize().map_err(|_| Error::InvalidRumaEvent)?;
let event_type = event.event_type();
let is_still_encrypted =
matches!(event_type, ruma::events::TimelineEventType::RoomEncrypted);
#[cfg(feature = "unstable-msc3956")]
let is_still_encrypted =
is_still_encrypted || matches!(event_type, ruma::events::TimelineEventType::Encrypted);
if !is_still_encrypted {
return Ok(None);
}
// The message is still encrypted, and the client is configured to retry
// decryption.
//
// Spawn an `EncryptionSync` that runs two iterations of the sliding sync loop:
// - the first iteration allows to get SS events as well as send e2ee requests.
// - the second one let the SS proxy forward events triggered by the sending of
// e2ee requests.
//
// Keep timeouts small for both, since we might be short on time.
let with_locking = WithLocking::from(self.with_cross_process_lock);
let encryption_sync = EncryptionSync::new(
Self::LOCK_ID.to_owned(),
self.client.clone(),
Some((Duration::from_secs(3), Duration::from_secs(4))),
with_locking,
)
.await;
// Just log out errors, but don't have them abort the notification processing:
// an undecrypted notification is still better than no
// notifications.
match encryption_sync {
Ok(sync) => match sync.run_fixed_iterations(2).await {
Ok(()) => {
let new_event = room.decrypt_event(raw_event.cast_ref()).await?;
Ok(Some(new_event))
}
Err(err) => {
tracing::warn!(
"error when running encryption_sync in get_notification: {err:#}"
);
Ok(None)
}
},
Err(err) => {
tracing::warn!("error when building encryption_sync in get_notification: {err:#}",);
Ok(None)
}
}
}
/// Try to run a sliding sync (without encryption) to retrieve the event
/// from the notification.
///
/// This works by requesting explicit state that'll be useful for building
/// the `NotificationItem`, and subscribing to the room which the
/// notification relates to.
async fn | (
&self,
room_id: &RoomId,
event_id: &EventId,
) -> Result<Option<RawNotificationEvent>, Error> {
// Serialize all the calls to this method by taking a lock at the beginning,
// that will be dropped later.
let _guard = self.sliding_sync_mutex.lock().await;
// Set up a sliding sync that only subscribes to the room that had the
// notification, so we can figure out the full event and associated
// information.
let notification = Arc::new(Mutex::new(None));
let cloned_notif = notification.clone();
let target_event_id = event_id.to_owned();
let timeline_event_handler =
self.client.add_event_handler(move |raw: Raw<AnySyncTimelineEvent>| async move {
match raw.get_field::<OwnedEventId>("event_id") {
Ok(Some(event_id)) => {
if event_id == target_event_id {
// found it! There shouldn't be a previous event before, but if there
// is, that should be ok to just replace it.
*cloned_notif.lock().unwrap() =
Some(RawNotificationEvent::Timeline(raw));
}
}
Ok(None) | Err(_) => {
tracing::warn!("could not get event id");
}
}
});
let cloned_notif = notification.clone();
let target_event_id = event_id.to_owned();
let stripped_member_handler =
self.client.add_event_handler(move |raw: Raw<StrippedRoomMemberEvent>| async move {
match raw.get_field::<OwnedEventId>("event_id") {
Ok(Some(event_id)) => {
if event_id == target_event_id {
// found it! There shouldn't be a previous event before, but if there
// is, that should be ok to just replace it.
*cloned_notif.lock().unwrap() = Some(RawNotificationEvent::Invite(raw));
}
}
Ok(None) | Err(_) => {
tracing::warn!("could not get event id");
}
}
});
// Room power levels are necessary to build the push context.
let required_state = vec![
(StateEventType::RoomAvatar, "".to_owned()),
(StateEventType::RoomEncryption, "".to_owned()),
(StateEventType::RoomMember, "$LAZY".to_owned()),
(StateEventType::RoomMember, "$ME".to_owned()),
(StateEventType::RoomCanonicalAlias, "".to_owned()),
(StateEventType::RoomName, "".to_owned()),
(StateEventType::RoomPowerLevels, "".to_owned()),
];
let invites = SlidingSyncList::builder("invites")
.sync_mode(SlidingSyncMode::new_selective().add_range(0..=16))
.timeline_limit(8)
.required_state(required_state.clone())
.filters(Some(assign!(SyncRequestListFilters::default(), {
is_invite: Some(true),
is_tombstoned: Some(false),
not_room_types: vec!["m.space".to_owned()],
})))
.sort(vec!["by_recency".to_owned(), "by_name".to_owned()]);
let sync = self
.client
.sliding_sync(Self::CONNECTION_ID)?
.poll_timeout(Duration::from_secs(1))
.network_timeout(Duration::from_secs(3))
.with_account_data_extension(
assign!(AccountDataConfig::default(), { enabled: Some(true) }),
)
.add_list(invites)
.build()
.await?;
sync.subscribe_to_room(
room_id.to_owned(),
Some(assign!(RoomSubscription::default(), {
required_state,
timeline_limit: Some(uint!(16))
})),
);
let mut remaining_attempts = 3;
let stream = sync.sync();
pin_mut!(stream);
loop {
if stream.next().await.is_none() {
// Sliding sync aborted early.
break;
}
if notification.lock().unwrap().is_some() {
// We got the event.
break;
}
remaining_attempts -= 1;
if remaining_attempts == 0 {
// We're out of luck.
break;
}
}
self.client.remove_event_handler(stripped_member_handler);
self.client.remove_event_handler(timeline_event_handler);
let maybe_event = notification.lock().unwrap().take();
Ok(maybe_event)
}
/// Get a full notification, given a room id and event id.
///
/// This will run a small sliding sync to retrieve the content of the event,
/// along with extra data to form a rich notification context.
pub async fn get_notification_with_sliding_sync(
&self,
room_id: &RoomId,
event_id: &EventId,
) -> Result<NotificationStatus, Error> {
tracing::info!("fetching notification event with a sliding sync");
let Some(mut raw_event) = self.try_sliding_sync(room_id, event_id).await? else {
return Ok(NotificationStatus::EventNotFound);
};
// At this point it should have been added by the sync, if it's not, give up.
let Some(room) = self.client.get_room(room_id) else { return Err(Error::UnknownRoom) };
let push_actions = match &raw_event {
RawNotificationEvent::Timeline(timeline_event) => {
// Timeline events may be encrypted, so make sure they get decrypted first.
if let Some(timeline_event) =
self.maybe_retry_decryption(&room, timeline_event).await?
{
raw_event = RawNotificationEvent::Timeline(timeline_event.event.cast());
timeline_event.push_actions
} else {
room.event_push_actions(timeline_event).await?
}
}
RawNotificationEvent::Invite(invite_event) => {
// Invite events can't be encrypted, so they should be in clear text.
room.event_push_actions(invite_event).await?
}
};
if let Some(push_actions) = &push_actions {
if self.filter_by_push_rules && !push_actions.iter().any(|a| a.should_notify()) {
return Ok(NotificationStatus::EventFilteredOut);
}
}
Ok(NotificationStatus::Event(
NotificationItem::new(&room, &raw_event, push_actions.as_deref(), Vec::new()).await?,
))
}
/// Retrieve a notification using a `/context` query.
///
/// This is for clients that are already running other sliding syncs in the
/// same process, so that most of the contextual information for the
/// notification should already be there. In particular, the room containing
/// the event MUST be known (via a sliding sync for invites, or another
/// sliding sync).
///
/// An error result means that we couldn't resolve the notification; in that
/// case, a dummy notification may be displayed instead. A `None` result
/// means the notification has been filtered out by the user's push
/// rules.
pub async fn get_notification_with_context(
&self,
room_id: &RoomId,
event_id: &EventId,
) -> Result<Option<NotificationItem>, Error> {
tracing::info!("fetching notification event with a /context query");
// See above comment.
let Some(room) = self.parent_client.get_room(room_id) else {
return Err(Error::UnknownRoom);
};
let (mut timeline_event, state_events) =
room.event_with_context(event_id, true).await?.ok_or(Error::ContextMissingEvent)?;
if let Some(decrypted_event) =
self.maybe_retry_decryption(&room, timeline_event.event.cast_ref()).await?
{
timeline_event = decrypted_event;
}
if self.filter_by_push_rules
&& !timeline_event
.push_actions
.as_ref()
.is_some_and(|actions| actions.iter().any(|a| a.should_notify()))
{
return Ok(None);
}
Ok(Some(
NotificationItem::new(
&room,
&RawNotificationEvent::Timeline(timeline_event.event.cast()),
timeline_event.push_actions.as_deref(),
state_events,
)
.await?,
))
}
}
#[derive(Debug)]
pub enum NotificationStatus {
Event(NotificationItem),
EventNotFound,
EventFilteredOut,
}
/// Builder for a `NotificationClient`.
///
/// Fields have the same meaning as in `NotificationClient`.
#[derive(Clone)]
pub struct NotificationClientBuilder {
/// SDK client that uses an in-memory state store, to be used with the
/// sliding sync method.
client: Client,
/// SDK client that uses the same state store as the caller's context.
parent_client: Client,
retry_decryption: bool,
with_cross_process_lock: bool,
filter_by_push_rules: bool,
}
impl NotificationClientBuilder {
async fn new(parent_client: Client) -> Result<Self, Error> {
let client = parent_client.notification_client().await?;
Ok(Self {
client,
parent_client,
retry_decryption: false,
with_cross_process_lock: false,
filter_by_push_rules: false,
})
}
/// Filter out the notification event according to the push rules present in
/// the event.
pub fn filter_by_push_rules(mut self) -> Self {
self.filter_by_push_rules = true;
self
}
/// Automatically retry decryption once, if the notification was received
/// encrypted.
///
/// The boolean indicates whether we're making use of a cross-process lock
/// for the crypto-store. This should be set to true, if and only if,
/// the notification is received in a process that's different from the
/// main app.
pub fn retry_decryption(mut self, with_cross_process_lock: bool) -> Self {
self.retry_decryption = true;
self.with_cross_process_lock = with_cross_process_lock;
self
}
/// Finishes configuring the `NotificationClient`.
pub fn build(self) -> NotificationClient {
NotificationClient {
client: self.client,
parent_client: self.parent_client,
with_cross_process_lock: self.with_cross_process_lock,
filter_by_push_rules: self.filter_by_push_rules,
retry_decryption: self.retry_decryption,
sliding_sync_mutex: AsyncMutex::new(()),
}
}
}
enum RawNotificationEvent {
Timeline(Raw<AnySyncTimelineEvent>),
Invite(Raw<StrippedRoomMemberEvent>),
}
#[derive(Debug)]
pub enum NotificationEvent {
Timeline(AnySyncTimelineEvent),
Invite(StrippedRoomMemberEvent),
}
impl NotificationEvent {
pub fn sender(&self) -> &UserId {
match self {
NotificationEvent::Timeline(ev) => ev.sender(),
NotificationEvent::Invite(ev) => &ev.sender,
}
}
}
/// A notification with its full content.
#[derive(Debug)]
pub struct NotificationItem {
/// Underlying Ruma event.
pub event: NotificationEvent,
/// Display name of the sender.
pub sender_display_name: Option<String>,
/// Avatar URL of the sender.
pub sender_avatar_url: Option<String>,
/// Room display name.
pub room_display_name: String,
/// Room avatar URL.
pub room_avatar_url: Option<String>,
/// Room canonical alias.
pub room_canonical_alias: Option<String>,
/// Is this room encrypted?
pub is_room_encrypted: Option<bool>,
/// Is this room considered a direct message?
pub is_direct_message_room: bool,
/// Numbers of members who joined the room.
pub joined_members_count: u64,
/// Is it a noisy notification? (i.e. does any push action contain a sound
/// action)
///
/// It is set if and only if the push actions could be determined.
pub is_noisy: Option<bool>,
}
impl NotificationItem {
async fn new(
room: &Room,
raw_event: &RawNotificationEvent,
push_actions: Option<&[Action]>,
state_events: Vec<Raw<AnyStateEvent>>,
) -> Result<Self, Error> {
let event = match raw_event {
RawNotificationEvent::Timeline(raw_event) => NotificationEvent::Timeline(
raw_event.deserialize().map_err(|_| Error::InvalidRumaEvent)?,
),
RawNotificationEvent::Invite(raw_event) => NotificationEvent::Invite(
raw_event.deserialize().map_err(|_| Error::InvalidRumaEvent)?,
),
};
let sender = match room.state() {
RoomState::Invited => room.invite_details().await?.inviter,
_ => room.get_member_no_sync(event.sender()).await?,
};
let (mut sender_display_name, mut sender_avatar_url) = match &sender {
Some(sender) => (
sender.display_name().map(|s| s.to_owned()),
sender.avatar_url().map(|s| s.to_string()),
),
None => (None, None),
};
if sender_display_name.is_none() || sender_avatar_url.is_none() {
let sender_id = event.sender();
for ev in state_events {
let Ok(ev) = ev.deserialize() else {
continue;
};
if ev.sender() != sender_id {
continue;
}
if let AnyFullStateEventContent::RoomMember(FullStateEventContent::Original {
content,
..
}) = ev.content()
{
if sender_display_name.is_none() {
sender_display_name = content.displayname;
}
if sender_avatar_url.is_none() {
sender_avatar_url = content.avatar_url.map(|url| url.to_string());
}
}
}
}
let is_noisy = push_actions.map(|actions| actions.iter().any(|a| a.sound().is_some()));
let item = NotificationItem {
event,
sender_display_name,
sender_avatar_url,
room_display_name: room.display_name().await?.to_string(),
room_avatar_url: room.avatar_url().map(|s| s.to_string()),
room_canonical_alias: room.canonical_alias().map(|c| c.to_string()),
is_direct_message_room: room.is_direct().await?,
is_room_encrypted: room.is_encrypted().await.ok(),
joined_members_count: room.joined_members_count(),
is_noisy,
};
Ok(item)
}
}
/// An error for the [`NotificationClient`].
#[derive(Debug, Error)]
pub enum Error {
#[error(transparent)]
BuildingLocalClient(ClientBuildError),
/// The room associated to this event wasn't found.
#[error("unknown room for a notification")]
UnknownRoom,
/// The Ruma event contained within this notification couldn't be parsed.
#[error("invalid ruma event")]
InvalidRumaEvent,
/// When calling `get_notification_with_sliding_sync`, the room was missing
/// in the response.
#[error("the sliding sync response doesn't include the target room")]
SlidingSyncEmptyRoom,
#[error("the event was missing in the `/context` query")]
ContextMissingEvent,
/// An error forwarded from the client.
#[error(transparent)]
SdkError(#[from] matrix_sdk::Error),
/// An error forwarded from the underlying state store.
#[error(transparent)]
StoreError(#[from] StoreError),
}
| try_sliding_sync | identifier_name |
notification_client.rs | // Copyright 2023 The Matrix.org Foundation C.I.C.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for that specific language governing permissions and
// limitations under the License.
use std::{
sync::{Arc, Mutex},
time::Duration,
};
use futures_util::{pin_mut, StreamExt as _};
use matrix_sdk::{room::Room, Client, ClientBuildError, SlidingSyncList, SlidingSyncMode};
use matrix_sdk_base::{deserialized_responses::TimelineEvent, RoomState, StoreError};
use ruma::{
api::client::sync::sync_events::v4::{
AccountDataConfig, RoomSubscription, SyncRequestListFilters,
},
assign,
events::{
room::member::StrippedRoomMemberEvent, AnyFullStateEventContent, AnyStateEvent,
AnySyncTimelineEvent, FullStateEventContent, StateEventType,
},
push::Action,
serde::Raw,
uint, EventId, OwnedEventId, RoomId, UserId,
};
use thiserror::Error;
use tokio::sync::Mutex as AsyncMutex;
use crate::encryption_sync::{EncryptionSync, WithLocking};
/// A client specialized for handling push notifications received over the
/// network, for an app.
///
/// In particular, it takes care of running a full decryption sync, in case the
/// event in the notification was impossible to decrypt beforehand.
pub struct NotificationClient {
/// SDK client that uses an in-memory state store.
client: Client,
/// SDK client that uses the same state store as the caller's context.
parent_client: Client,
/// Should we retry decrypting an event, after it was impossible to decrypt
/// on the first attempt?
retry_decryption: bool,
/// Should the encryption sync happening in case the notification event was
/// encrypted use a cross-process lock?
///
/// Only meaningful if `retry_decryption` is true.
with_cross_process_lock: bool,
/// Should we try to filter out the notification event according to the push
/// rules?
filter_by_push_rules: bool,
/// A mutex to serialize requests to sliding sync.
///
/// If several notifications come in at the same time (e.g. network was
/// unreachable because of airplane mode or something similar), then we
/// need to make sure that repeated calls to `get_notification` won't
/// cause multiple requests with the same `conn_id` we're using for
/// notifications. This mutex solves this by sequentializing the requests.
sliding_sync_mutex: AsyncMutex<()>,
}
impl NotificationClient {
const CONNECTION_ID: &str = "notifications";
const LOCK_ID: &str = "notifications";
/// Create a new builder for a notification client.
pub async fn builder(client: Client) -> Result<NotificationClientBuilder, Error> {
NotificationClientBuilder::new(client).await
}
/// Fetches the content of a notification.
///
/// This will first try to get the notification using a short-lived sliding
/// sync, and if the sliding-sync can't find the event, then it'll use a
/// `/context` query to find the event with associated member information.
///
/// An error result means that we couldn't resolve the notification; in that
/// case, a dummy notification may be displayed instead. A `None` result
/// means the notification has been filtered out by the user's push
/// rules.
pub async fn get_notification(
&self,
room_id: &RoomId,
event_id: &EventId,
) -> Result<Option<NotificationItem>, Error> {
match self.get_notification_with_sliding_sync(room_id, event_id).await? {
NotificationStatus::Event(event) => Ok(Some(event)),
NotificationStatus::EventFilteredOut => Ok(None),
NotificationStatus::EventNotFound => {
self.get_notification_with_context(room_id, event_id).await
}
}
}
/// Run an encryption sync loop, in case an event is still encrypted.
///
/// Will return true if and only:
/// - retry_decryption was enabled,
/// - the event was encrypted,
/// - we successfully ran an encryption sync.
async fn maybe_retry_decryption(
&self,
room: &Room,
raw_event: &Raw<AnySyncTimelineEvent>,
) -> Result<Option<TimelineEvent>, Error> {
if !self.retry_decryption {
return Ok(None);
}
let event: AnySyncTimelineEvent =
raw_event.deserialize().map_err(|_| Error::InvalidRumaEvent)?;
let event_type = event.event_type();
let is_still_encrypted =
matches!(event_type, ruma::events::TimelineEventType::RoomEncrypted);
#[cfg(feature = "unstable-msc3956")]
let is_still_encrypted =
is_still_encrypted || matches!(event_type, ruma::events::TimelineEventType::Encrypted);
if !is_still_encrypted {
return Ok(None);
}
// The message is still encrypted, and the client is configured to retry
// decryption.
//
// Spawn an `EncryptionSync` that runs two iterations of the sliding sync loop:
// - the first iteration allows to get SS events as well as send e2ee requests.
// - the second one let the SS proxy forward events triggered by the sending of
// e2ee requests.
//
// Keep timeouts small for both, since we might be short on time.
let with_locking = WithLocking::from(self.with_cross_process_lock);
let encryption_sync = EncryptionSync::new(
Self::LOCK_ID.to_owned(),
self.client.clone(),
Some((Duration::from_secs(3), Duration::from_secs(4))),
with_locking,
)
.await;
// Just log out errors, but don't have them abort the notification processing:
// an undecrypted notification is still better than no
// notifications.
match encryption_sync {
Ok(sync) => match sync.run_fixed_iterations(2).await {
Ok(()) => {
let new_event = room.decrypt_event(raw_event.cast_ref()).await?;
Ok(Some(new_event))
}
Err(err) => {
tracing::warn!(
"error when running encryption_sync in get_notification: {err:#}"
);
Ok(None)
}
},
Err(err) => {
tracing::warn!("error when building encryption_sync in get_notification: {err:#}",);
Ok(None)
}
}
}
/// Try to run a sliding sync (without encryption) to retrieve the event
/// from the notification.
///
/// This works by requesting explicit state that'll be useful for building
/// the `NotificationItem`, and subscribing to the room which the
/// notification relates to.
async fn try_sliding_sync(
&self,
room_id: &RoomId,
event_id: &EventId,
) -> Result<Option<RawNotificationEvent>, Error> {
// Serialize all the calls to this method by taking a lock at the beginning,
// that will be dropped later.
let _guard = self.sliding_sync_mutex.lock().await;
// Set up a sliding sync that only subscribes to the room that had the
// notification, so we can figure out the full event and associated
// information.
let notification = Arc::new(Mutex::new(None));
let cloned_notif = notification.clone();
let target_event_id = event_id.to_owned();
let timeline_event_handler =
self.client.add_event_handler(move |raw: Raw<AnySyncTimelineEvent>| async move {
match raw.get_field::<OwnedEventId>("event_id") {
Ok(Some(event_id)) => {
if event_id == target_event_id {
// found it! There shouldn't be a previous event before, but if there
// is, that should be ok to just replace it.
*cloned_notif.lock().unwrap() =
Some(RawNotificationEvent::Timeline(raw));
}
}
Ok(None) | Err(_) => {
tracing::warn!("could not get event id");
}
}
});
let cloned_notif = notification.clone();
let target_event_id = event_id.to_owned();
let stripped_member_handler =
self.client.add_event_handler(move |raw: Raw<StrippedRoomMemberEvent>| async move {
match raw.get_field::<OwnedEventId>("event_id") {
Ok(Some(event_id)) => {
if event_id == target_event_id {
// found it! There shouldn't be a previous event before, but if there
// is, that should be ok to just replace it.
*cloned_notif.lock().unwrap() = Some(RawNotificationEvent::Invite(raw));
}
}
Ok(None) | Err(_) => {
tracing::warn!("could not get event id");
}
}
});
// Room power levels are necessary to build the push context.
let required_state = vec![
(StateEventType::RoomAvatar, "".to_owned()),
(StateEventType::RoomEncryption, "".to_owned()),
(StateEventType::RoomMember, "$LAZY".to_owned()),
(StateEventType::RoomMember, "$ME".to_owned()),
(StateEventType::RoomCanonicalAlias, "".to_owned()),
(StateEventType::RoomName, "".to_owned()),
(StateEventType::RoomPowerLevels, "".to_owned()),
];
let invites = SlidingSyncList::builder("invites")
.sync_mode(SlidingSyncMode::new_selective().add_range(0..=16))
.timeline_limit(8)
.required_state(required_state.clone())
.filters(Some(assign!(SyncRequestListFilters::default(), {
is_invite: Some(true),
is_tombstoned: Some(false),
not_room_types: vec!["m.space".to_owned()],
})))
.sort(vec!["by_recency".to_owned(), "by_name".to_owned()]);
let sync = self
.client
.sliding_sync(Self::CONNECTION_ID)?
.poll_timeout(Duration::from_secs(1))
.network_timeout(Duration::from_secs(3))
.with_account_data_extension(
assign!(AccountDataConfig::default(), { enabled: Some(true) }),
)
.add_list(invites)
.build()
.await?;
sync.subscribe_to_room(
room_id.to_owned(),
Some(assign!(RoomSubscription::default(), {
required_state,
timeline_limit: Some(uint!(16))
})),
);
let mut remaining_attempts = 3;
let stream = sync.sync();
pin_mut!(stream);
loop {
if stream.next().await.is_none() {
// Sliding sync aborted early.
break;
}
if notification.lock().unwrap().is_some() {
// We got the event.
break;
}
remaining_attempts -= 1;
if remaining_attempts == 0 {
// We're out of luck.
break;
}
}
self.client.remove_event_handler(stripped_member_handler);
self.client.remove_event_handler(timeline_event_handler);
let maybe_event = notification.lock().unwrap().take();
Ok(maybe_event)
}
/// Get a full notification, given a room id and event id.
///
/// This will run a small sliding sync to retrieve the content of the event,
/// along with extra data to form a rich notification context.
pub async fn get_notification_with_sliding_sync(
&self,
room_id: &RoomId,
event_id: &EventId,
) -> Result<NotificationStatus, Error> {
tracing::info!("fetching notification event with a sliding sync");
let Some(mut raw_event) = self.try_sliding_sync(room_id, event_id).await? else {
return Ok(NotificationStatus::EventNotFound);
};
// At this point it should have been added by the sync, if it's not, give up.
let Some(room) = self.client.get_room(room_id) else { return Err(Error::UnknownRoom) };
let push_actions = match &raw_event {
RawNotificationEvent::Timeline(timeline_event) => {
// Timeline events may be encrypted, so make sure they get decrypted first.
if let Some(timeline_event) = | timeline_event.push_actions
} else {
room.event_push_actions(timeline_event).await?
}
}
RawNotificationEvent::Invite(invite_event) => {
// Invite events can't be encrypted, so they should be in clear text.
room.event_push_actions(invite_event).await?
}
};
if let Some(push_actions) = &push_actions {
if self.filter_by_push_rules && !push_actions.iter().any(|a| a.should_notify()) {
return Ok(NotificationStatus::EventFilteredOut);
}
}
Ok(NotificationStatus::Event(
NotificationItem::new(&room, &raw_event, push_actions.as_deref(), Vec::new()).await?,
))
}
/// Retrieve a notification using a `/context` query.
///
/// This is for clients that are already running other sliding syncs in the
/// same process, so that most of the contextual information for the
/// notification should already be there. In particular, the room containing
/// the event MUST be known (via a sliding sync for invites, or another
/// sliding sync).
///
/// An error result means that we couldn't resolve the notification; in that
/// case, a dummy notification may be displayed instead. A `None` result
/// means the notification has been filtered out by the user's push
/// rules.
pub async fn get_notification_with_context(
&self,
room_id: &RoomId,
event_id: &EventId,
) -> Result<Option<NotificationItem>, Error> {
tracing::info!("fetching notification event with a /context query");
// See above comment.
let Some(room) = self.parent_client.get_room(room_id) else {
return Err(Error::UnknownRoom);
};
let (mut timeline_event, state_events) =
room.event_with_context(event_id, true).await?.ok_or(Error::ContextMissingEvent)?;
if let Some(decrypted_event) =
self.maybe_retry_decryption(&room, timeline_event.event.cast_ref()).await?
{
timeline_event = decrypted_event;
}
if self.filter_by_push_rules
&& !timeline_event
.push_actions
.as_ref()
.is_some_and(|actions| actions.iter().any(|a| a.should_notify()))
{
return Ok(None);
}
Ok(Some(
NotificationItem::new(
&room,
&RawNotificationEvent::Timeline(timeline_event.event.cast()),
timeline_event.push_actions.as_deref(),
state_events,
)
.await?,
))
}
}
#[derive(Debug)]
pub enum NotificationStatus {
Event(NotificationItem),
EventNotFound,
EventFilteredOut,
}
/// Builder for a `NotificationClient`.
///
/// Fields have the same meaning as in `NotificationClient`.
#[derive(Clone)]
pub struct NotificationClientBuilder {
/// SDK client that uses an in-memory state store, to be used with the
/// sliding sync method.
client: Client,
/// SDK client that uses the same state store as the caller's context.
parent_client: Client,
retry_decryption: bool,
with_cross_process_lock: bool,
filter_by_push_rules: bool,
}
impl NotificationClientBuilder {
async fn new(parent_client: Client) -> Result<Self, Error> {
let client = parent_client.notification_client().await?;
Ok(Self {
client,
parent_client,
retry_decryption: false,
with_cross_process_lock: false,
filter_by_push_rules: false,
})
}
/// Filter out the notification event according to the push rules present in
/// the event.
pub fn filter_by_push_rules(mut self) -> Self {
self.filter_by_push_rules = true;
self
}
/// Automatically retry decryption once, if the notification was received
/// encrypted.
///
/// The boolean indicates whether we're making use of a cross-process lock
/// for the crypto-store. This should be set to true, if and only if,
/// the notification is received in a process that's different from the
/// main app.
pub fn retry_decryption(mut self, with_cross_process_lock: bool) -> Self {
self.retry_decryption = true;
self.with_cross_process_lock = with_cross_process_lock;
self
}
/// Finishes configuring the `NotificationClient`.
pub fn build(self) -> NotificationClient {
NotificationClient {
client: self.client,
parent_client: self.parent_client,
with_cross_process_lock: self.with_cross_process_lock,
filter_by_push_rules: self.filter_by_push_rules,
retry_decryption: self.retry_decryption,
sliding_sync_mutex: AsyncMutex::new(()),
}
}
}
enum RawNotificationEvent {
Timeline(Raw<AnySyncTimelineEvent>),
Invite(Raw<StrippedRoomMemberEvent>),
}
#[derive(Debug)]
pub enum NotificationEvent {
Timeline(AnySyncTimelineEvent),
Invite(StrippedRoomMemberEvent),
}
impl NotificationEvent {
pub fn sender(&self) -> &UserId {
match self {
NotificationEvent::Timeline(ev) => ev.sender(),
NotificationEvent::Invite(ev) => &ev.sender,
}
}
}
/// A notification with its full content.
#[derive(Debug)]
pub struct NotificationItem {
/// Underlying Ruma event.
pub event: NotificationEvent,
/// Display name of the sender.
pub sender_display_name: Option<String>,
/// Avatar URL of the sender.
pub sender_avatar_url: Option<String>,
/// Room display name.
pub room_display_name: String,
/// Room avatar URL.
pub room_avatar_url: Option<String>,
/// Room canonical alias.
pub room_canonical_alias: Option<String>,
/// Is this room encrypted?
pub is_room_encrypted: Option<bool>,
/// Is this room considered a direct message?
pub is_direct_message_room: bool,
/// Numbers of members who joined the room.
pub joined_members_count: u64,
/// Is it a noisy notification? (i.e. does any push action contain a sound
/// action)
///
/// It is set if and only if the push actions could be determined.
pub is_noisy: Option<bool>,
}
impl NotificationItem {
async fn new(
room: &Room,
raw_event: &RawNotificationEvent,
push_actions: Option<&[Action]>,
state_events: Vec<Raw<AnyStateEvent>>,
) -> Result<Self, Error> {
let event = match raw_event {
RawNotificationEvent::Timeline(raw_event) => NotificationEvent::Timeline(
raw_event.deserialize().map_err(|_| Error::InvalidRumaEvent)?,
),
RawNotificationEvent::Invite(raw_event) => NotificationEvent::Invite(
raw_event.deserialize().map_err(|_| Error::InvalidRumaEvent)?,
),
};
let sender = match room.state() {
RoomState::Invited => room.invite_details().await?.inviter,
_ => room.get_member_no_sync(event.sender()).await?,
};
let (mut sender_display_name, mut sender_avatar_url) = match &sender {
Some(sender) => (
sender.display_name().map(|s| s.to_owned()),
sender.avatar_url().map(|s| s.to_string()),
),
None => (None, None),
};
if sender_display_name.is_none() || sender_avatar_url.is_none() {
let sender_id = event.sender();
for ev in state_events {
let Ok(ev) = ev.deserialize() else {
continue;
};
if ev.sender() != sender_id {
continue;
}
if let AnyFullStateEventContent::RoomMember(FullStateEventContent::Original {
content,
..
}) = ev.content()
{
if sender_display_name.is_none() {
sender_display_name = content.displayname;
}
if sender_avatar_url.is_none() {
sender_avatar_url = content.avatar_url.map(|url| url.to_string());
}
}
}
}
let is_noisy = push_actions.map(|actions| actions.iter().any(|a| a.sound().is_some()));
let item = NotificationItem {
event,
sender_display_name,
sender_avatar_url,
room_display_name: room.display_name().await?.to_string(),
room_avatar_url: room.avatar_url().map(|s| s.to_string()),
room_canonical_alias: room.canonical_alias().map(|c| c.to_string()),
is_direct_message_room: room.is_direct().await?,
is_room_encrypted: room.is_encrypted().await.ok(),
joined_members_count: room.joined_members_count(),
is_noisy,
};
Ok(item)
}
}
/// An error for the [`NotificationClient`].
#[derive(Debug, Error)]
pub enum Error {
#[error(transparent)]
BuildingLocalClient(ClientBuildError),
/// The room associated to this event wasn't found.
#[error("unknown room for a notification")]
UnknownRoom,
/// The Ruma event contained within this notification couldn't be parsed.
#[error("invalid ruma event")]
InvalidRumaEvent,
/// When calling `get_notification_with_sliding_sync`, the room was missing
/// in the response.
#[error("the sliding sync response doesn't include the target room")]
SlidingSyncEmptyRoom,
#[error("the event was missing in the `/context` query")]
ContextMissingEvent,
/// An error forwarded from the client.
#[error(transparent)]
SdkError(#[from] matrix_sdk::Error),
/// An error forwarded from the underlying state store.
#[error(transparent)]
StoreError(#[from] StoreError),
} | self.maybe_retry_decryption(&room, timeline_event).await?
{
raw_event = RawNotificationEvent::Timeline(timeline_event.event.cast()); | random_line_split |
gateway_bft_test.go | /*
Copyright IBM Corp All Rights Reserved.
SPDX-License-Identifier: Apache-2.0
*/
package gateway
import (
"context"
"os"
"path/filepath"
"syscall"
"time"
docker "github.com/fsouza/go-dockerclient"
"github.com/golang/protobuf/proto"
"github.com/hyperledger/fabric-protos-go/common"
"github.com/hyperledger/fabric-protos-go/gateway"
"github.com/hyperledger/fabric-protos-go/peer"
"github.com/hyperledger/fabric/integration/channelparticipation"
"github.com/hyperledger/fabric/integration/nwo"
"github.com/hyperledger/fabric/integration/nwo/commands"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
"github.com/onsi/gomega/gexec"
"github.com/tedsuo/ifrit"
ginkgomon "github.com/tedsuo/ifrit/ginkgomon_v2"
"github.com/tedsuo/ifrit/grouper"
"google.golang.org/grpc/status"
)
var _ = Describe("GatewayService with BFT ordering service", func() {
var (
testDir string
network *nwo.Network
ordererProcesses map[string]ifrit.Process
peerProcesses ifrit.Process
channel = "testchannel1"
)
BeforeEach(func() {
var err error
testDir, err = os.MkdirTemp("", "gateway")
Expect(err).NotTo(HaveOccurred())
client, err := docker.NewClientFromEnv()
Expect(err).NotTo(HaveOccurred())
networkConfig := nwo.MultiNodeSmartBFT()
network = nwo.New(networkConfig, testDir, client, StartPort(), components)
network.GenerateConfigTree()
network.Bootstrap()
ordererProcesses = make(map[string]ifrit.Process)
for _, orderer := range network.Orderers {
runner := network.OrdererRunner(orderer)
proc := ifrit.Invoke(runner)
ordererProcesses[orderer.Name] = proc
Eventually(proc.Ready(), network.EventuallyTimeout).Should(BeClosed())
}
peerGroupRunner, _ := peerGroupRunners(network)
peerProcesses = ifrit.Invoke(peerGroupRunner)
Eventually(peerProcesses.Ready(), network.EventuallyTimeout).Should(BeClosed())
By("Joining orderers to channel")
joinChannel(network, channel)
By("Joining peers to channel")
network.JoinChannel(channel, network.Orderers[0], network.PeersWithChannel(channel)...)
orderer := network.Orderers[0]
By("Deploying chaincode")
chaincode := nwo.Chaincode{
Name: "gatewaycc",
Version: "0.0",
Path: components.Build("github.com/hyperledger/fabric/integration/chaincode/simple/cmd"),
Lang: "binary",
PackageFile: filepath.Join(testDir, "gatewaycc.tar.gz"),
Ctor: `{"Args":["init","a","100","b","200"]}`,
SignaturePolicy: `AND ('Org1MSP.peer')`,
Sequence: "1",
InitRequired: true,
Label: "gatewaycc_label",
}
nwo.DeployChaincode(network, channel, orderer, chaincode)
})
AfterEach(func() {
if peerProcesses != nil {
peerProcesses.Signal(syscall.SIGTERM)
Eventually(peerProcesses.Wait(), network.EventuallyTimeout).Should(Receive())
}
if network != nil {
network.Cleanup()
}
for _, ordererInstance := range ordererProcesses {
ordererInstance.Signal(syscall.SIGTERM)
Eventually(ordererInstance.Wait(), network.EventuallyTimeout).Should(Receive())
}
os.RemoveAll(testDir)
})
It("Submit transaction", func() {
ctx, cancel := context.WithTimeout(context.Background(), network.EventuallyTimeout)
defer cancel()
org1Peer0 := network.Peer("Org1", "peer0")
conn := network.PeerClientConn(org1Peer0)
defer conn.Close()
gw := gateway.NewGatewayClient(conn)
signer := network.PeerUserSigner(org1Peer0, "User1")
By("Submitting a new transaction")
submitRequest := prepareTransaction(ctx, gw, signer, channel, "gatewaycc", "invoke", []string{"a", "b", "10"})
_, err := gw.Submit(ctx, submitRequest)
Expect(err).NotTo(HaveOccurred())
waitForCommit(ctx, gw, signer, channel, submitRequest.TransactionId)
By("Checking the ledger state")
result := evaluateTransaction(ctx, gw, signer, channel, "gatewaycc", "query", []string{"a"})
Expect(result.Payload).To(Equal([]byte("90")))
By("Resubmitting the same transaction")
_, err = gw.Submit(ctx, submitRequest)
Expect(err).To(HaveOccurred())
rpcErr := status.Convert(err)
Expect(rpcErr.Message()).To(Equal("insufficient number of orderers could successfully process transaction to satisfy quorum requirement"))
Expect(len(rpcErr.Details())).To(BeNumerically(">", 0))
Expect(rpcErr.Details()[0].(*gateway.ErrorDetail).Message).To(Equal("received unsuccessful response from orderer: status=SERVICE_UNAVAILABLE, info=failed to submit request: request already processed"))
By("Shutting down orderer2")
ordererProcesses["orderer2"].Signal(syscall.SIGTERM)
Eventually(ordererProcesses["orderer2"].Wait(), network.EventuallyTimeout).Should(Receive())
By("Submitting a new transaction")
submitRequest = prepareTransaction(ctx, gw, signer, channel, "gatewaycc", "invoke", []string{"a", "b", "10"})
_, err = gw.Submit(ctx, submitRequest)
Expect(err).NotTo(HaveOccurred())
waitForCommit(ctx, gw, signer, channel, submitRequest.TransactionId)
By("Checking the ledger state")
result = evaluateTransaction(ctx, gw, signer, channel, "gatewaycc", "query", []string{"a"})
Expect(result.Payload).To(Equal([]byte("80")))
By("Shutting down orderer1 - no longer quorate")
ordererProcesses["orderer1"].Signal(syscall.SIGTERM)
Eventually(ordererProcesses["orderer1"].Wait(), network.EventuallyTimeout).Should(Receive())
By("Submitting a new transaction")
submitRequest = prepareTransaction(ctx, gw, signer, channel, "gatewaycc", "invoke", []string{"a", "b", "10"})
_, err = gw.Submit(ctx, submitRequest)
Expect(err).To(HaveOccurred())
rpcErr = status.Convert(err)
Expect(rpcErr.Message()).To(Equal("insufficient number of orderers could successfully process transaction to satisfy quorum requirement"))
By("Restarting orderer2")
runner := network.OrdererRunner(network.Orderers[1])
ordererProcesses["orderer2"] = ifrit.Invoke(runner)
Eventually(ordererProcesses["orderer2"].Ready(), network.EventuallyTimeout).Should(BeClosed())
time.Sleep(time.Second)
By("Resubmitting the same transaction")
_, err = gw.Submit(ctx, submitRequest)
Expect(err).NotTo(HaveOccurred())
waitForCommit(ctx, gw, signer, channel, submitRequest.TransactionId)
By("Checking the ledger state")
result = evaluateTransaction(ctx, gw, signer, channel, "gatewaycc", "query", []string{"a"})
Expect(result.Payload).To(Equal([]byte("70")))
By("Submitting a new transaction")
submitRequest = prepareTransaction(ctx, gw, signer, channel, "gatewaycc", "invoke", []string{"a", "b", "10"})
_, err = gw.Submit(ctx, submitRequest)
Expect(err).NotTo(HaveOccurred())
waitForCommit(ctx, gw, signer, channel, submitRequest.TransactionId)
By("Checking the ledger state")
result = evaluateTransaction(ctx, gw, signer, channel, "gatewaycc", "query", []string{"a"})
Expect(result.Payload).To(Equal([]byte("60")))
})
})
func prepareTransaction(
ctx context.Context,
gatewayClient gateway.GatewayClient,
signer *nwo.SigningIdentity,
channel string,
chaincode string,
transactionName string,
arguments []string,
) *gateway.SubmitRequest {
args := [][]byte{}
for _, arg := range arguments {
args = append(args, []byte(arg))
}
proposedTransaction, transactionID := NewProposedTransaction(
signer,
channel,
chaincode,
transactionName,
nil,
args...,
)
endorseRequest := &gateway.EndorseRequest{
TransactionId: transactionID,
ChannelId: channel,
ProposedTransaction: proposedTransaction,
}
endorseResponse, err := gatewayClient.Endorse(ctx, endorseRequest)
Expect(err).NotTo(HaveOccurred())
preparedTransaction := endorseResponse.GetPreparedTransaction()
preparedTransaction.Signature, err = signer.Sign(preparedTransaction.Payload)
Expect(err).NotTo(HaveOccurred())
return &gateway.SubmitRequest{
TransactionId: transactionID,
ChannelId: channel,
PreparedTransaction: preparedTransaction,
}
}
func waitForCommit(
ctx context.Context,
gatewayClient gateway.GatewayClient,
signer *nwo.SigningIdentity,
channel string,
transactionId string,
) {
idBytes, err := signer.Serialize()
Expect(err).NotTo(HaveOccurred())
statusRequest := &gateway.CommitStatusRequest{
ChannelId: channel,
Identity: idBytes,
TransactionId: transactionId,
}
statusRequestBytes, err := proto.Marshal(statusRequest)
Expect(err).NotTo(HaveOccurred())
signature, err := signer.Sign(statusRequestBytes)
Expect(err).NotTo(HaveOccurred())
signedStatusRequest := &gateway.SignedCommitStatusRequest{
Request: statusRequestBytes,
Signature: signature,
}
statusResponse, err := gatewayClient.CommitStatus(ctx, signedStatusRequest)
Expect(err).NotTo(HaveOccurred())
Expect(statusResponse.Result).To(Equal(peer.TxValidationCode_VALID))
}
func evaluateTransaction(
ctx context.Context,
gatewayClient gateway.GatewayClient,
signer *nwo.SigningIdentity,
channel string,
chaincode string,
transactionName string,
arguments []string,
) *peer.Response {
args := [][]byte{}
for _, arg := range arguments {
args = append(args, []byte(arg))
}
proposedTransaction, transactionID := NewProposedTransaction(
signer,
channel,
chaincode,
transactionName,
nil,
args...,
)
evaluateRequest := &gateway.EvaluateRequest{
TransactionId: transactionID,
ChannelId: channel,
ProposedTransaction: proposedTransaction,
}
evaluateResponse, err := gatewayClient.Evaluate(ctx, evaluateRequest)
Expect(err).NotTo(HaveOccurred())
return evaluateResponse.GetResult()
}
func peerGroupRunners(n *nwo.Network) (ifrit.Runner, []*ginkgomon.Runner) {
runners := []*ginkgomon.Runner{}
members := grouper.Members{}
for _, p := range n.Peers {
runner := n.PeerRunner(p)
members = append(members, grouper.Member{Name: p.ID(), Runner: runner})
runners = append(runners, runner)
}
return grouper.NewParallel(syscall.SIGTERM, members), runners
}
func joinChannel(network *nwo.Network, channel string) {
sess, err := network.ConfigTxGen(commands.OutputBlock{
ChannelID: channel,
Profile: network.Profiles[0].Name,
ConfigPath: network.RootDir,
OutputBlock: network.OutputBlockPath(channel),
})
Expect(err).NotTo(HaveOccurred())
Eventually(sess, network.EventuallyTimeout).Should(gexec.Exit(0))
genesisBlockBytes, err := os.ReadFile(network.OutputBlockPath(channel))
Expect(err).NotTo(HaveOccurred())
genesisBlock := &common.Block{}
err = proto.Unmarshal(genesisBlockBytes, genesisBlock)
Expect(err).NotTo(HaveOccurred())
expectedChannelInfoPT := channelparticipation.ChannelInfo{
Name: channel,
URL: "/participation/v1/channels/" + channel,
Status: "active",
ConsensusRelation: "consenter",
Height: 1,
}
for _, o := range network.Orderers |
}
| {
By("joining " + o.Name + " to channel as a consenter")
channelparticipation.Join(network, o, channel, genesisBlock, expectedChannelInfoPT)
channelInfo := channelparticipation.ListOne(network, o, channel)
Expect(channelInfo).To(Equal(expectedChannelInfoPT))
} | conditional_block |
gateway_bft_test.go | /*
Copyright IBM Corp All Rights Reserved.
SPDX-License-Identifier: Apache-2.0
*/
package gateway
import (
"context"
"os"
"path/filepath"
"syscall"
"time"
docker "github.com/fsouza/go-dockerclient"
"github.com/golang/protobuf/proto"
"github.com/hyperledger/fabric-protos-go/common"
"github.com/hyperledger/fabric-protos-go/gateway"
"github.com/hyperledger/fabric-protos-go/peer"
"github.com/hyperledger/fabric/integration/channelparticipation"
"github.com/hyperledger/fabric/integration/nwo"
"github.com/hyperledger/fabric/integration/nwo/commands"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
"github.com/onsi/gomega/gexec"
"github.com/tedsuo/ifrit"
ginkgomon "github.com/tedsuo/ifrit/ginkgomon_v2"
"github.com/tedsuo/ifrit/grouper"
"google.golang.org/grpc/status"
)
var _ = Describe("GatewayService with BFT ordering service", func() {
var (
testDir string
network *nwo.Network
ordererProcesses map[string]ifrit.Process
peerProcesses ifrit.Process
channel = "testchannel1"
)
BeforeEach(func() {
var err error
testDir, err = os.MkdirTemp("", "gateway")
Expect(err).NotTo(HaveOccurred())
client, err := docker.NewClientFromEnv()
Expect(err).NotTo(HaveOccurred())
networkConfig := nwo.MultiNodeSmartBFT()
network = nwo.New(networkConfig, testDir, client, StartPort(), components)
network.GenerateConfigTree()
network.Bootstrap()
ordererProcesses = make(map[string]ifrit.Process)
for _, orderer := range network.Orderers {
runner := network.OrdererRunner(orderer)
proc := ifrit.Invoke(runner)
ordererProcesses[orderer.Name] = proc
Eventually(proc.Ready(), network.EventuallyTimeout).Should(BeClosed())
}
peerGroupRunner, _ := peerGroupRunners(network)
peerProcesses = ifrit.Invoke(peerGroupRunner)
Eventually(peerProcesses.Ready(), network.EventuallyTimeout).Should(BeClosed())
By("Joining orderers to channel")
joinChannel(network, channel)
By("Joining peers to channel")
network.JoinChannel(channel, network.Orderers[0], network.PeersWithChannel(channel)...)
orderer := network.Orderers[0]
By("Deploying chaincode")
chaincode := nwo.Chaincode{
Name: "gatewaycc",
Version: "0.0",
Path: components.Build("github.com/hyperledger/fabric/integration/chaincode/simple/cmd"),
Lang: "binary",
PackageFile: filepath.Join(testDir, "gatewaycc.tar.gz"),
Ctor: `{"Args":["init","a","100","b","200"]}`,
SignaturePolicy: `AND ('Org1MSP.peer')`,
Sequence: "1",
InitRequired: true,
Label: "gatewaycc_label",
}
nwo.DeployChaincode(network, channel, orderer, chaincode)
})
AfterEach(func() {
if peerProcesses != nil {
peerProcesses.Signal(syscall.SIGTERM)
Eventually(peerProcesses.Wait(), network.EventuallyTimeout).Should(Receive())
}
if network != nil {
network.Cleanup()
}
for _, ordererInstance := range ordererProcesses {
ordererInstance.Signal(syscall.SIGTERM)
Eventually(ordererInstance.Wait(), network.EventuallyTimeout).Should(Receive())
}
os.RemoveAll(testDir)
})
It("Submit transaction", func() {
ctx, cancel := context.WithTimeout(context.Background(), network.EventuallyTimeout)
defer cancel()
org1Peer0 := network.Peer("Org1", "peer0")
conn := network.PeerClientConn(org1Peer0)
defer conn.Close()
gw := gateway.NewGatewayClient(conn)
signer := network.PeerUserSigner(org1Peer0, "User1")
By("Submitting a new transaction")
submitRequest := prepareTransaction(ctx, gw, signer, channel, "gatewaycc", "invoke", []string{"a", "b", "10"})
_, err := gw.Submit(ctx, submitRequest)
Expect(err).NotTo(HaveOccurred())
waitForCommit(ctx, gw, signer, channel, submitRequest.TransactionId)
By("Checking the ledger state")
result := evaluateTransaction(ctx, gw, signer, channel, "gatewaycc", "query", []string{"a"})
Expect(result.Payload).To(Equal([]byte("90")))
By("Resubmitting the same transaction")
_, err = gw.Submit(ctx, submitRequest)
Expect(err).To(HaveOccurred())
rpcErr := status.Convert(err)
Expect(rpcErr.Message()).To(Equal("insufficient number of orderers could successfully process transaction to satisfy quorum requirement"))
Expect(len(rpcErr.Details())).To(BeNumerically(">", 0))
Expect(rpcErr.Details()[0].(*gateway.ErrorDetail).Message).To(Equal("received unsuccessful response from orderer: status=SERVICE_UNAVAILABLE, info=failed to submit request: request already processed"))
By("Shutting down orderer2")
ordererProcesses["orderer2"].Signal(syscall.SIGTERM)
Eventually(ordererProcesses["orderer2"].Wait(), network.EventuallyTimeout).Should(Receive())
By("Submitting a new transaction")
submitRequest = prepareTransaction(ctx, gw, signer, channel, "gatewaycc", "invoke", []string{"a", "b", "10"})
_, err = gw.Submit(ctx, submitRequest)
Expect(err).NotTo(HaveOccurred())
waitForCommit(ctx, gw, signer, channel, submitRequest.TransactionId)
By("Checking the ledger state")
result = evaluateTransaction(ctx, gw, signer, channel, "gatewaycc", "query", []string{"a"})
Expect(result.Payload).To(Equal([]byte("80")))
By("Shutting down orderer1 - no longer quorate")
ordererProcesses["orderer1"].Signal(syscall.SIGTERM)
Eventually(ordererProcesses["orderer1"].Wait(), network.EventuallyTimeout).Should(Receive())
By("Submitting a new transaction")
submitRequest = prepareTransaction(ctx, gw, signer, channel, "gatewaycc", "invoke", []string{"a", "b", "10"})
_, err = gw.Submit(ctx, submitRequest)
Expect(err).To(HaveOccurred())
rpcErr = status.Convert(err)
Expect(rpcErr.Message()).To(Equal("insufficient number of orderers could successfully process transaction to satisfy quorum requirement"))
By("Restarting orderer2")
runner := network.OrdererRunner(network.Orderers[1])
ordererProcesses["orderer2"] = ifrit.Invoke(runner)
Eventually(ordererProcesses["orderer2"].Ready(), network.EventuallyTimeout).Should(BeClosed())
time.Sleep(time.Second)
By("Resubmitting the same transaction")
_, err = gw.Submit(ctx, submitRequest)
Expect(err).NotTo(HaveOccurred())
waitForCommit(ctx, gw, signer, channel, submitRequest.TransactionId)
By("Checking the ledger state")
result = evaluateTransaction(ctx, gw, signer, channel, "gatewaycc", "query", []string{"a"})
Expect(result.Payload).To(Equal([]byte("70")))
By("Submitting a new transaction")
submitRequest = prepareTransaction(ctx, gw, signer, channel, "gatewaycc", "invoke", []string{"a", "b", "10"})
_, err = gw.Submit(ctx, submitRequest)
Expect(err).NotTo(HaveOccurred())
waitForCommit(ctx, gw, signer, channel, submitRequest.TransactionId)
By("Checking the ledger state")
result = evaluateTransaction(ctx, gw, signer, channel, "gatewaycc", "query", []string{"a"})
Expect(result.Payload).To(Equal([]byte("60")))
})
})
func prepareTransaction(
ctx context.Context,
gatewayClient gateway.GatewayClient,
signer *nwo.SigningIdentity,
channel string,
chaincode string,
transactionName string,
arguments []string,
) *gateway.SubmitRequest {
args := [][]byte{}
for _, arg := range arguments {
args = append(args, []byte(arg))
}
proposedTransaction, transactionID := NewProposedTransaction(
signer,
channel,
chaincode,
transactionName,
nil,
args...,
)
endorseRequest := &gateway.EndorseRequest{
TransactionId: transactionID,
ChannelId: channel,
ProposedTransaction: proposedTransaction,
}
endorseResponse, err := gatewayClient.Endorse(ctx, endorseRequest)
Expect(err).NotTo(HaveOccurred())
preparedTransaction := endorseResponse.GetPreparedTransaction()
preparedTransaction.Signature, err = signer.Sign(preparedTransaction.Payload)
Expect(err).NotTo(HaveOccurred())
return &gateway.SubmitRequest{
TransactionId: transactionID,
ChannelId: channel,
PreparedTransaction: preparedTransaction,
}
}
func waitForCommit(
ctx context.Context,
gatewayClient gateway.GatewayClient,
signer *nwo.SigningIdentity,
channel string,
transactionId string,
) {
idBytes, err := signer.Serialize()
Expect(err).NotTo(HaveOccurred())
statusRequest := &gateway.CommitStatusRequest{
ChannelId: channel,
Identity: idBytes,
TransactionId: transactionId,
}
statusRequestBytes, err := proto.Marshal(statusRequest)
Expect(err).NotTo(HaveOccurred())
signature, err := signer.Sign(statusRequestBytes)
Expect(err).NotTo(HaveOccurred())
signedStatusRequest := &gateway.SignedCommitStatusRequest{
Request: statusRequestBytes,
Signature: signature,
}
statusResponse, err := gatewayClient.CommitStatus(ctx, signedStatusRequest)
Expect(err).NotTo(HaveOccurred())
Expect(statusResponse.Result).To(Equal(peer.TxValidationCode_VALID))
}
func evaluateTransaction(
ctx context.Context,
gatewayClient gateway.GatewayClient,
signer *nwo.SigningIdentity,
channel string,
chaincode string,
transactionName string,
arguments []string,
) *peer.Response {
args := [][]byte{}
for _, arg := range arguments {
args = append(args, []byte(arg))
}
proposedTransaction, transactionID := NewProposedTransaction(
signer,
channel,
chaincode,
transactionName,
nil,
args...,
)
evaluateRequest := &gateway.EvaluateRequest{
TransactionId: transactionID,
ChannelId: channel,
ProposedTransaction: proposedTransaction,
}
evaluateResponse, err := gatewayClient.Evaluate(ctx, evaluateRequest)
Expect(err).NotTo(HaveOccurred())
return evaluateResponse.GetResult()
}
func peerGroupRunners(n *nwo.Network) (ifrit.Runner, []*ginkgomon.Runner) {
runners := []*ginkgomon.Runner{}
members := grouper.Members{}
for _, p := range n.Peers {
runner := n.PeerRunner(p)
members = append(members, grouper.Member{Name: p.ID(), Runner: runner})
runners = append(runners, runner)
}
return grouper.NewParallel(syscall.SIGTERM, members), runners
}
func joinChannel(network *nwo.Network, channel string) | {
sess, err := network.ConfigTxGen(commands.OutputBlock{
ChannelID: channel,
Profile: network.Profiles[0].Name,
ConfigPath: network.RootDir,
OutputBlock: network.OutputBlockPath(channel),
})
Expect(err).NotTo(HaveOccurred())
Eventually(sess, network.EventuallyTimeout).Should(gexec.Exit(0))
genesisBlockBytes, err := os.ReadFile(network.OutputBlockPath(channel))
Expect(err).NotTo(HaveOccurred())
genesisBlock := &common.Block{}
err = proto.Unmarshal(genesisBlockBytes, genesisBlock)
Expect(err).NotTo(HaveOccurred())
expectedChannelInfoPT := channelparticipation.ChannelInfo{
Name: channel,
URL: "/participation/v1/channels/" + channel,
Status: "active",
ConsensusRelation: "consenter",
Height: 1,
}
for _, o := range network.Orderers {
By("joining " + o.Name + " to channel as a consenter")
channelparticipation.Join(network, o, channel, genesisBlock, expectedChannelInfoPT)
channelInfo := channelparticipation.ListOne(network, o, channel)
Expect(channelInfo).To(Equal(expectedChannelInfoPT))
}
} | identifier_body | |
gateway_bft_test.go | /*
Copyright IBM Corp All Rights Reserved.
SPDX-License-Identifier: Apache-2.0
*/
package gateway
import (
"context"
"os"
"path/filepath"
"syscall"
"time"
docker "github.com/fsouza/go-dockerclient"
"github.com/golang/protobuf/proto"
"github.com/hyperledger/fabric-protos-go/common"
"github.com/hyperledger/fabric-protos-go/gateway"
"github.com/hyperledger/fabric-protos-go/peer"
"github.com/hyperledger/fabric/integration/channelparticipation"
"github.com/hyperledger/fabric/integration/nwo"
"github.com/hyperledger/fabric/integration/nwo/commands"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
"github.com/onsi/gomega/gexec"
"github.com/tedsuo/ifrit"
ginkgomon "github.com/tedsuo/ifrit/ginkgomon_v2"
"github.com/tedsuo/ifrit/grouper"
"google.golang.org/grpc/status"
)
var _ = Describe("GatewayService with BFT ordering service", func() {
var (
testDir string
network *nwo.Network
ordererProcesses map[string]ifrit.Process
peerProcesses ifrit.Process
channel = "testchannel1"
)
BeforeEach(func() {
var err error
testDir, err = os.MkdirTemp("", "gateway")
Expect(err).NotTo(HaveOccurred())
client, err := docker.NewClientFromEnv()
Expect(err).NotTo(HaveOccurred())
networkConfig := nwo.MultiNodeSmartBFT()
network = nwo.New(networkConfig, testDir, client, StartPort(), components)
network.GenerateConfigTree()
network.Bootstrap()
ordererProcesses = make(map[string]ifrit.Process)
for _, orderer := range network.Orderers {
runner := network.OrdererRunner(orderer)
proc := ifrit.Invoke(runner)
ordererProcesses[orderer.Name] = proc
Eventually(proc.Ready(), network.EventuallyTimeout).Should(BeClosed())
}
peerGroupRunner, _ := peerGroupRunners(network)
peerProcesses = ifrit.Invoke(peerGroupRunner)
Eventually(peerProcesses.Ready(), network.EventuallyTimeout).Should(BeClosed())
By("Joining orderers to channel")
joinChannel(network, channel)
By("Joining peers to channel")
network.JoinChannel(channel, network.Orderers[0], network.PeersWithChannel(channel)...)
orderer := network.Orderers[0]
By("Deploying chaincode")
chaincode := nwo.Chaincode{
Name: "gatewaycc",
Version: "0.0",
Path: components.Build("github.com/hyperledger/fabric/integration/chaincode/simple/cmd"),
Lang: "binary",
PackageFile: filepath.Join(testDir, "gatewaycc.tar.gz"),
Ctor: `{"Args":["init","a","100","b","200"]}`,
SignaturePolicy: `AND ('Org1MSP.peer')`,
Sequence: "1",
InitRequired: true,
Label: "gatewaycc_label",
}
nwo.DeployChaincode(network, channel, orderer, chaincode)
})
AfterEach(func() {
if peerProcesses != nil {
peerProcesses.Signal(syscall.SIGTERM)
Eventually(peerProcesses.Wait(), network.EventuallyTimeout).Should(Receive())
}
if network != nil {
network.Cleanup()
}
for _, ordererInstance := range ordererProcesses {
ordererInstance.Signal(syscall.SIGTERM)
Eventually(ordererInstance.Wait(), network.EventuallyTimeout).Should(Receive())
}
os.RemoveAll(testDir)
})
It("Submit transaction", func() {
ctx, cancel := context.WithTimeout(context.Background(), network.EventuallyTimeout)
defer cancel()
org1Peer0 := network.Peer("Org1", "peer0")
conn := network.PeerClientConn(org1Peer0)
defer conn.Close()
gw := gateway.NewGatewayClient(conn)
signer := network.PeerUserSigner(org1Peer0, "User1")
By("Submitting a new transaction")
submitRequest := prepareTransaction(ctx, gw, signer, channel, "gatewaycc", "invoke", []string{"a", "b", "10"})
_, err := gw.Submit(ctx, submitRequest)
Expect(err).NotTo(HaveOccurred())
waitForCommit(ctx, gw, signer, channel, submitRequest.TransactionId)
By("Checking the ledger state")
result := evaluateTransaction(ctx, gw, signer, channel, "gatewaycc", "query", []string{"a"})
Expect(result.Payload).To(Equal([]byte("90")))
By("Resubmitting the same transaction")
_, err = gw.Submit(ctx, submitRequest)
Expect(err).To(HaveOccurred())
rpcErr := status.Convert(err)
Expect(rpcErr.Message()).To(Equal("insufficient number of orderers could successfully process transaction to satisfy quorum requirement"))
Expect(len(rpcErr.Details())).To(BeNumerically(">", 0))
Expect(rpcErr.Details()[0].(*gateway.ErrorDetail).Message).To(Equal("received unsuccessful response from orderer: status=SERVICE_UNAVAILABLE, info=failed to submit request: request already processed"))
By("Shutting down orderer2")
ordererProcesses["orderer2"].Signal(syscall.SIGTERM)
Eventually(ordererProcesses["orderer2"].Wait(), network.EventuallyTimeout).Should(Receive())
By("Submitting a new transaction")
submitRequest = prepareTransaction(ctx, gw, signer, channel, "gatewaycc", "invoke", []string{"a", "b", "10"})
_, err = gw.Submit(ctx, submitRequest)
Expect(err).NotTo(HaveOccurred())
waitForCommit(ctx, gw, signer, channel, submitRequest.TransactionId)
By("Checking the ledger state")
result = evaluateTransaction(ctx, gw, signer, channel, "gatewaycc", "query", []string{"a"})
Expect(result.Payload).To(Equal([]byte("80")))
By("Shutting down orderer1 - no longer quorate")
ordererProcesses["orderer1"].Signal(syscall.SIGTERM)
Eventually(ordererProcesses["orderer1"].Wait(), network.EventuallyTimeout).Should(Receive())
By("Submitting a new transaction")
submitRequest = prepareTransaction(ctx, gw, signer, channel, "gatewaycc", "invoke", []string{"a", "b", "10"})
_, err = gw.Submit(ctx, submitRequest)
Expect(err).To(HaveOccurred())
rpcErr = status.Convert(err)
Expect(rpcErr.Message()).To(Equal("insufficient number of orderers could successfully process transaction to satisfy quorum requirement"))
By("Restarting orderer2")
runner := network.OrdererRunner(network.Orderers[1])
ordererProcesses["orderer2"] = ifrit.Invoke(runner)
Eventually(ordererProcesses["orderer2"].Ready(), network.EventuallyTimeout).Should(BeClosed())
time.Sleep(time.Second)
By("Resubmitting the same transaction")
_, err = gw.Submit(ctx, submitRequest)
Expect(err).NotTo(HaveOccurred())
waitForCommit(ctx, gw, signer, channel, submitRequest.TransactionId)
By("Checking the ledger state")
result = evaluateTransaction(ctx, gw, signer, channel, "gatewaycc", "query", []string{"a"})
Expect(result.Payload).To(Equal([]byte("70")))
By("Submitting a new transaction")
submitRequest = prepareTransaction(ctx, gw, signer, channel, "gatewaycc", "invoke", []string{"a", "b", "10"})
_, err = gw.Submit(ctx, submitRequest)
Expect(err).NotTo(HaveOccurred())
waitForCommit(ctx, gw, signer, channel, submitRequest.TransactionId)
By("Checking the ledger state")
result = evaluateTransaction(ctx, gw, signer, channel, "gatewaycc", "query", []string{"a"})
Expect(result.Payload).To(Equal([]byte("60")))
})
})
func | (
ctx context.Context,
gatewayClient gateway.GatewayClient,
signer *nwo.SigningIdentity,
channel string,
chaincode string,
transactionName string,
arguments []string,
) *gateway.SubmitRequest {
args := [][]byte{}
for _, arg := range arguments {
args = append(args, []byte(arg))
}
proposedTransaction, transactionID := NewProposedTransaction(
signer,
channel,
chaincode,
transactionName,
nil,
args...,
)
endorseRequest := &gateway.EndorseRequest{
TransactionId: transactionID,
ChannelId: channel,
ProposedTransaction: proposedTransaction,
}
endorseResponse, err := gatewayClient.Endorse(ctx, endorseRequest)
Expect(err).NotTo(HaveOccurred())
preparedTransaction := endorseResponse.GetPreparedTransaction()
preparedTransaction.Signature, err = signer.Sign(preparedTransaction.Payload)
Expect(err).NotTo(HaveOccurred())
return &gateway.SubmitRequest{
TransactionId: transactionID,
ChannelId: channel,
PreparedTransaction: preparedTransaction,
}
}
func waitForCommit(
ctx context.Context,
gatewayClient gateway.GatewayClient,
signer *nwo.SigningIdentity,
channel string,
transactionId string,
) {
idBytes, err := signer.Serialize()
Expect(err).NotTo(HaveOccurred())
statusRequest := &gateway.CommitStatusRequest{
ChannelId: channel,
Identity: idBytes,
TransactionId: transactionId,
}
statusRequestBytes, err := proto.Marshal(statusRequest)
Expect(err).NotTo(HaveOccurred())
signature, err := signer.Sign(statusRequestBytes)
Expect(err).NotTo(HaveOccurred())
signedStatusRequest := &gateway.SignedCommitStatusRequest{
Request: statusRequestBytes,
Signature: signature,
}
statusResponse, err := gatewayClient.CommitStatus(ctx, signedStatusRequest)
Expect(err).NotTo(HaveOccurred())
Expect(statusResponse.Result).To(Equal(peer.TxValidationCode_VALID))
}
func evaluateTransaction(
ctx context.Context,
gatewayClient gateway.GatewayClient,
signer *nwo.SigningIdentity,
channel string,
chaincode string,
transactionName string,
arguments []string,
) *peer.Response {
args := [][]byte{}
for _, arg := range arguments {
args = append(args, []byte(arg))
}
proposedTransaction, transactionID := NewProposedTransaction(
signer,
channel,
chaincode,
transactionName,
nil,
args...,
)
evaluateRequest := &gateway.EvaluateRequest{
TransactionId: transactionID,
ChannelId: channel,
ProposedTransaction: proposedTransaction,
}
evaluateResponse, err := gatewayClient.Evaluate(ctx, evaluateRequest)
Expect(err).NotTo(HaveOccurred())
return evaluateResponse.GetResult()
}
func peerGroupRunners(n *nwo.Network) (ifrit.Runner, []*ginkgomon.Runner) {
runners := []*ginkgomon.Runner{}
members := grouper.Members{}
for _, p := range n.Peers {
runner := n.PeerRunner(p)
members = append(members, grouper.Member{Name: p.ID(), Runner: runner})
runners = append(runners, runner)
}
return grouper.NewParallel(syscall.SIGTERM, members), runners
}
func joinChannel(network *nwo.Network, channel string) {
sess, err := network.ConfigTxGen(commands.OutputBlock{
ChannelID: channel,
Profile: network.Profiles[0].Name,
ConfigPath: network.RootDir,
OutputBlock: network.OutputBlockPath(channel),
})
Expect(err).NotTo(HaveOccurred())
Eventually(sess, network.EventuallyTimeout).Should(gexec.Exit(0))
genesisBlockBytes, err := os.ReadFile(network.OutputBlockPath(channel))
Expect(err).NotTo(HaveOccurred())
genesisBlock := &common.Block{}
err = proto.Unmarshal(genesisBlockBytes, genesisBlock)
Expect(err).NotTo(HaveOccurred())
expectedChannelInfoPT := channelparticipation.ChannelInfo{
Name: channel,
URL: "/participation/v1/channels/" + channel,
Status: "active",
ConsensusRelation: "consenter",
Height: 1,
}
for _, o := range network.Orderers {
By("joining " + o.Name + " to channel as a consenter")
channelparticipation.Join(network, o, channel, genesisBlock, expectedChannelInfoPT)
channelInfo := channelparticipation.ListOne(network, o, channel)
Expect(channelInfo).To(Equal(expectedChannelInfoPT))
}
}
| prepareTransaction | identifier_name |
gateway_bft_test.go | /*
Copyright IBM Corp All Rights Reserved.
SPDX-License-Identifier: Apache-2.0
*/
package gateway
import (
"context"
"os"
"path/filepath"
"syscall"
"time"
docker "github.com/fsouza/go-dockerclient"
"github.com/golang/protobuf/proto"
"github.com/hyperledger/fabric-protos-go/common"
"github.com/hyperledger/fabric-protos-go/gateway"
"github.com/hyperledger/fabric-protos-go/peer"
"github.com/hyperledger/fabric/integration/channelparticipation"
"github.com/hyperledger/fabric/integration/nwo"
"github.com/hyperledger/fabric/integration/nwo/commands"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
"github.com/onsi/gomega/gexec"
"github.com/tedsuo/ifrit"
ginkgomon "github.com/tedsuo/ifrit/ginkgomon_v2"
"github.com/tedsuo/ifrit/grouper"
"google.golang.org/grpc/status"
)
var _ = Describe("GatewayService with BFT ordering service", func() {
var (
testDir string
network *nwo.Network
ordererProcesses map[string]ifrit.Process
peerProcesses ifrit.Process
channel = "testchannel1"
) | BeforeEach(func() {
var err error
testDir, err = os.MkdirTemp("", "gateway")
Expect(err).NotTo(HaveOccurred())
client, err := docker.NewClientFromEnv()
Expect(err).NotTo(HaveOccurred())
networkConfig := nwo.MultiNodeSmartBFT()
network = nwo.New(networkConfig, testDir, client, StartPort(), components)
network.GenerateConfigTree()
network.Bootstrap()
ordererProcesses = make(map[string]ifrit.Process)
for _, orderer := range network.Orderers {
runner := network.OrdererRunner(orderer)
proc := ifrit.Invoke(runner)
ordererProcesses[orderer.Name] = proc
Eventually(proc.Ready(), network.EventuallyTimeout).Should(BeClosed())
}
peerGroupRunner, _ := peerGroupRunners(network)
peerProcesses = ifrit.Invoke(peerGroupRunner)
Eventually(peerProcesses.Ready(), network.EventuallyTimeout).Should(BeClosed())
By("Joining orderers to channel")
joinChannel(network, channel)
By("Joining peers to channel")
network.JoinChannel(channel, network.Orderers[0], network.PeersWithChannel(channel)...)
orderer := network.Orderers[0]
By("Deploying chaincode")
chaincode := nwo.Chaincode{
Name: "gatewaycc",
Version: "0.0",
Path: components.Build("github.com/hyperledger/fabric/integration/chaincode/simple/cmd"),
Lang: "binary",
PackageFile: filepath.Join(testDir, "gatewaycc.tar.gz"),
Ctor: `{"Args":["init","a","100","b","200"]}`,
SignaturePolicy: `AND ('Org1MSP.peer')`,
Sequence: "1",
InitRequired: true,
Label: "gatewaycc_label",
}
nwo.DeployChaincode(network, channel, orderer, chaincode)
})
AfterEach(func() {
if peerProcesses != nil {
peerProcesses.Signal(syscall.SIGTERM)
Eventually(peerProcesses.Wait(), network.EventuallyTimeout).Should(Receive())
}
if network != nil {
network.Cleanup()
}
for _, ordererInstance := range ordererProcesses {
ordererInstance.Signal(syscall.SIGTERM)
Eventually(ordererInstance.Wait(), network.EventuallyTimeout).Should(Receive())
}
os.RemoveAll(testDir)
})
It("Submit transaction", func() {
ctx, cancel := context.WithTimeout(context.Background(), network.EventuallyTimeout)
defer cancel()
org1Peer0 := network.Peer("Org1", "peer0")
conn := network.PeerClientConn(org1Peer0)
defer conn.Close()
gw := gateway.NewGatewayClient(conn)
signer := network.PeerUserSigner(org1Peer0, "User1")
By("Submitting a new transaction")
submitRequest := prepareTransaction(ctx, gw, signer, channel, "gatewaycc", "invoke", []string{"a", "b", "10"})
_, err := gw.Submit(ctx, submitRequest)
Expect(err).NotTo(HaveOccurred())
waitForCommit(ctx, gw, signer, channel, submitRequest.TransactionId)
By("Checking the ledger state")
result := evaluateTransaction(ctx, gw, signer, channel, "gatewaycc", "query", []string{"a"})
Expect(result.Payload).To(Equal([]byte("90")))
By("Resubmitting the same transaction")
_, err = gw.Submit(ctx, submitRequest)
Expect(err).To(HaveOccurred())
rpcErr := status.Convert(err)
Expect(rpcErr.Message()).To(Equal("insufficient number of orderers could successfully process transaction to satisfy quorum requirement"))
Expect(len(rpcErr.Details())).To(BeNumerically(">", 0))
Expect(rpcErr.Details()[0].(*gateway.ErrorDetail).Message).To(Equal("received unsuccessful response from orderer: status=SERVICE_UNAVAILABLE, info=failed to submit request: request already processed"))
By("Shutting down orderer2")
ordererProcesses["orderer2"].Signal(syscall.SIGTERM)
Eventually(ordererProcesses["orderer2"].Wait(), network.EventuallyTimeout).Should(Receive())
By("Submitting a new transaction")
submitRequest = prepareTransaction(ctx, gw, signer, channel, "gatewaycc", "invoke", []string{"a", "b", "10"})
_, err = gw.Submit(ctx, submitRequest)
Expect(err).NotTo(HaveOccurred())
waitForCommit(ctx, gw, signer, channel, submitRequest.TransactionId)
By("Checking the ledger state")
result = evaluateTransaction(ctx, gw, signer, channel, "gatewaycc", "query", []string{"a"})
Expect(result.Payload).To(Equal([]byte("80")))
By("Shutting down orderer1 - no longer quorate")
ordererProcesses["orderer1"].Signal(syscall.SIGTERM)
Eventually(ordererProcesses["orderer1"].Wait(), network.EventuallyTimeout).Should(Receive())
By("Submitting a new transaction")
submitRequest = prepareTransaction(ctx, gw, signer, channel, "gatewaycc", "invoke", []string{"a", "b", "10"})
_, err = gw.Submit(ctx, submitRequest)
Expect(err).To(HaveOccurred())
rpcErr = status.Convert(err)
Expect(rpcErr.Message()).To(Equal("insufficient number of orderers could successfully process transaction to satisfy quorum requirement"))
By("Restarting orderer2")
runner := network.OrdererRunner(network.Orderers[1])
ordererProcesses["orderer2"] = ifrit.Invoke(runner)
Eventually(ordererProcesses["orderer2"].Ready(), network.EventuallyTimeout).Should(BeClosed())
time.Sleep(time.Second)
By("Resubmitting the same transaction")
_, err = gw.Submit(ctx, submitRequest)
Expect(err).NotTo(HaveOccurred())
waitForCommit(ctx, gw, signer, channel, submitRequest.TransactionId)
By("Checking the ledger state")
result = evaluateTransaction(ctx, gw, signer, channel, "gatewaycc", "query", []string{"a"})
Expect(result.Payload).To(Equal([]byte("70")))
By("Submitting a new transaction")
submitRequest = prepareTransaction(ctx, gw, signer, channel, "gatewaycc", "invoke", []string{"a", "b", "10"})
_, err = gw.Submit(ctx, submitRequest)
Expect(err).NotTo(HaveOccurred())
waitForCommit(ctx, gw, signer, channel, submitRequest.TransactionId)
By("Checking the ledger state")
result = evaluateTransaction(ctx, gw, signer, channel, "gatewaycc", "query", []string{"a"})
Expect(result.Payload).To(Equal([]byte("60")))
})
})
func prepareTransaction(
ctx context.Context,
gatewayClient gateway.GatewayClient,
signer *nwo.SigningIdentity,
channel string,
chaincode string,
transactionName string,
arguments []string,
) *gateway.SubmitRequest {
args := [][]byte{}
for _, arg := range arguments {
args = append(args, []byte(arg))
}
proposedTransaction, transactionID := NewProposedTransaction(
signer,
channel,
chaincode,
transactionName,
nil,
args...,
)
endorseRequest := &gateway.EndorseRequest{
TransactionId: transactionID,
ChannelId: channel,
ProposedTransaction: proposedTransaction,
}
endorseResponse, err := gatewayClient.Endorse(ctx, endorseRequest)
Expect(err).NotTo(HaveOccurred())
preparedTransaction := endorseResponse.GetPreparedTransaction()
preparedTransaction.Signature, err = signer.Sign(preparedTransaction.Payload)
Expect(err).NotTo(HaveOccurred())
return &gateway.SubmitRequest{
TransactionId: transactionID,
ChannelId: channel,
PreparedTransaction: preparedTransaction,
}
}
func waitForCommit(
ctx context.Context,
gatewayClient gateway.GatewayClient,
signer *nwo.SigningIdentity,
channel string,
transactionId string,
) {
idBytes, err := signer.Serialize()
Expect(err).NotTo(HaveOccurred())
statusRequest := &gateway.CommitStatusRequest{
ChannelId: channel,
Identity: idBytes,
TransactionId: transactionId,
}
statusRequestBytes, err := proto.Marshal(statusRequest)
Expect(err).NotTo(HaveOccurred())
signature, err := signer.Sign(statusRequestBytes)
Expect(err).NotTo(HaveOccurred())
signedStatusRequest := &gateway.SignedCommitStatusRequest{
Request: statusRequestBytes,
Signature: signature,
}
statusResponse, err := gatewayClient.CommitStatus(ctx, signedStatusRequest)
Expect(err).NotTo(HaveOccurred())
Expect(statusResponse.Result).To(Equal(peer.TxValidationCode_VALID))
}
func evaluateTransaction(
ctx context.Context,
gatewayClient gateway.GatewayClient,
signer *nwo.SigningIdentity,
channel string,
chaincode string,
transactionName string,
arguments []string,
) *peer.Response {
args := [][]byte{}
for _, arg := range arguments {
args = append(args, []byte(arg))
}
proposedTransaction, transactionID := NewProposedTransaction(
signer,
channel,
chaincode,
transactionName,
nil,
args...,
)
evaluateRequest := &gateway.EvaluateRequest{
TransactionId: transactionID,
ChannelId: channel,
ProposedTransaction: proposedTransaction,
}
evaluateResponse, err := gatewayClient.Evaluate(ctx, evaluateRequest)
Expect(err).NotTo(HaveOccurred())
return evaluateResponse.GetResult()
}
func peerGroupRunners(n *nwo.Network) (ifrit.Runner, []*ginkgomon.Runner) {
runners := []*ginkgomon.Runner{}
members := grouper.Members{}
for _, p := range n.Peers {
runner := n.PeerRunner(p)
members = append(members, grouper.Member{Name: p.ID(), Runner: runner})
runners = append(runners, runner)
}
return grouper.NewParallel(syscall.SIGTERM, members), runners
}
func joinChannel(network *nwo.Network, channel string) {
sess, err := network.ConfigTxGen(commands.OutputBlock{
ChannelID: channel,
Profile: network.Profiles[0].Name,
ConfigPath: network.RootDir,
OutputBlock: network.OutputBlockPath(channel),
})
Expect(err).NotTo(HaveOccurred())
Eventually(sess, network.EventuallyTimeout).Should(gexec.Exit(0))
genesisBlockBytes, err := os.ReadFile(network.OutputBlockPath(channel))
Expect(err).NotTo(HaveOccurred())
genesisBlock := &common.Block{}
err = proto.Unmarshal(genesisBlockBytes, genesisBlock)
Expect(err).NotTo(HaveOccurred())
expectedChannelInfoPT := channelparticipation.ChannelInfo{
Name: channel,
URL: "/participation/v1/channels/" + channel,
Status: "active",
ConsensusRelation: "consenter",
Height: 1,
}
for _, o := range network.Orderers {
By("joining " + o.Name + " to channel as a consenter")
channelparticipation.Join(network, o, channel, genesisBlock, expectedChannelInfoPT)
channelInfo := channelparticipation.ListOne(network, o, channel)
Expect(channelInfo).To(Equal(expectedChannelInfoPT))
}
} | random_line_split | |
db.go | /*
* Copyright 2018 Insolar
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package storage
import (
"bytes"
"context"
"path/filepath"
"sync"
"github.com/dgraph-io/badger"
"github.com/pkg/errors"
"github.com/ugorji/go/codec"
"github.com/insolar/insolar/configuration"
"github.com/insolar/insolar/core"
"github.com/insolar/insolar/core/message"
"github.com/insolar/insolar/instrumentation/inslogger"
"github.com/insolar/insolar/ledger/index"
"github.com/insolar/insolar/ledger/jetdrop"
"github.com/insolar/insolar/ledger/record"
)
const (
scopeIDLifeline byte = 1
scopeIDRecord byte = 2
scopeIDJetDrop byte = 3
scopeIDPulse byte = 4
scopeIDSystem byte = 5
scopeIDMessage byte = 6
scopeIDBlob byte = 7
scopeIDLocal byte = 8
sysGenesis byte = 1
sysLatestPulse byte = 2
)
// DB represents BadgerDB storage implementation.
type DB struct {
PlatformCryptographyScheme core.PlatformCryptographyScheme `inject:""`
db *badger.DB
genesisRef *core.RecordRef
// dropWG guards inflight updates before jet drop calculated.
dropWG sync.WaitGroup
// for BadgerDB it is normal to have transaction conflicts
// and these conflicts we should resolve by ourself
// so txretiries is our knob to tune up retry logic.
txretiries int
idlocker *IDLocker
// NodeHistory is an in-memory active node storage for each pulse. It's required to calculate node roles
// for past pulses to locate data.
// It should only contain previous N pulses. It should be stored on disk.
nodeHistory map[core.PulseNumber][]core.Node
nodeHistoryLock sync.Mutex
}
// SetTxRetiries sets number of retries on conflict in Update
func (db *DB) SetTxRetiries(n int) {
db.txretiries = n
}
func setOptions(o *badger.Options) *badger.Options {
newo := &badger.Options{}
if o != nil {
*newo = *o
} else {
*newo = badger.DefaultOptions
}
return newo
}
// NewDB returns storage.DB with BadgerDB instance initialized by opts.
// Creates database in provided dir or in current directory if dir parameter is empty.
func NewDB(conf configuration.Ledger, opts *badger.Options) (*DB, error) {
opts = setOptions(opts)
dir, err := filepath.Abs(conf.Storage.DataDirectory)
if err != nil {
return nil, err
}
opts.Dir = dir
opts.ValueDir = dir
bdb, err := badger.Open(*opts)
if err != nil {
return nil, errors.Wrap(err, "local database open failed")
}
db := &DB{
db: bdb,
txretiries: conf.Storage.TxRetriesOnConflict,
idlocker: NewIDLocker(),
nodeHistory: map[core.PulseNumber][]core.Node{},
}
return db, nil
}
// Init creates initial records in storage.
func (db *DB) Init(ctx context.Context) error {
inslog := inslogger.FromContext(ctx)
inslog.Debug("start storage bootstrap")
getGenesisRef := func() (*core.RecordRef, error) {
buff, err := db.get(ctx, prefixkey(scopeIDSystem, []byte{sysGenesis}))
if err != nil {
return nil, err
}
var genesisRef core.RecordRef
copy(genesisRef[:], buff)
return &genesisRef, nil
}
createGenesisRecord := func() (*core.RecordRef, error) {
err := db.AddPulse(
ctx,
core.Pulse{
PulseNumber: core.GenesisPulse.PulseNumber,
Entropy: core.GenesisPulse.Entropy,
},
)
if err != nil {
return nil, err
}
err = db.SetDrop(ctx, &jetdrop.JetDrop{})
if err != nil {
return nil, err
}
lastPulse, err := db.GetLatestPulseNumber(ctx)
if err != nil {
return nil, err
}
genesisID, err := db.SetRecord(ctx, lastPulse, &record.GenesisRecord{})
if err != nil {
return nil, err
}
err = db.SetObjectIndex(
ctx,
genesisID,
&index.ObjectLifeline{LatestState: genesisID, LatestStateApproved: genesisID},
)
if err != nil {
return nil, err
}
genesisRef := core.NewRecordRef(*genesisID, *genesisID)
return genesisRef, db.set(ctx, prefixkey(scopeIDSystem, []byte{sysGenesis}), genesisRef[:])
}
var err error
db.genesisRef, err = getGenesisRef()
if err == ErrNotFound {
db.genesisRef, err = createGenesisRecord()
}
if err != nil {
return errors.Wrap(err, "bootstrap failed")
}
return nil
}
// GenesisRef returns the genesis record reference.
//
// Genesis record is the parent for all top-level records.
func (db *DB) GenesisRef() *core.RecordRef {
return db.genesisRef
}
// Close wraps BadgerDB Close method.
//
// From https://godoc.org/github.com/dgraph-io/badger#DB.Close:
// «It's crucial to call it to ensure all the pending updates make their way to disk.
// Calling DB.Close() multiple times is not safe and wouldcause panic.»
func (db *DB) Close() error {
// TODO: add close flag and mutex guard on Close method
return db.db.Close()
}
// Stop stops DB component.
func (db *DB) Stop(ctx context.Context) error {
return db.Close()
}
// GetBlob returns binary value stored by record ID.
func (db *DB) GetBlob(ctx context.Context, id *core.RecordID) ([]byte, error) {
var (
blob []byte
err error
)
err = db.View(ctx, func(tx *TransactionManager) error {
blob, err = tx.GetBlob(ctx, id)
return err
})
if err != nil {
return nil, err
}
return blob, nil
}
// SetBlob saves binary value for provided pulse.
func (db *DB) SetBlob(ctx context.Context, pulseNumber core.PulseNumber, blob []byte) (*core.RecordID, error) {
var (
id *core.RecordID
err error
)
err = db.Update(ctx, func(tx *TransactionManager) error {
id, err = tx.SetBlob(ctx, pulseNumber, blob)
return err
})
if err != nil {
return nil, err
}
return id, nil
}
// GetRecord wraps matching transaction manager method.
func (db *DB) GetRecord(ctx context.Context, id *core.RecordID) (record.Record, error) {
var (
fetchedRecord record.Record
err error
)
err = db.View(ctx, func(tx *TransactionManager) error {
fetchedRecord, err = tx.GetRecord(ctx, id)
return err
})
if err != nil {
return nil, err
}
return fetchedRecord, nil
}
// SetRecord wraps matching transaction manager method.
func (db *DB) SetRecord(ctx context.Context, pulseNumber core.PulseNumber, rec record.Record) (*core.RecordID, error) {
var (
id *core.RecordID
err error
)
err = db.Update(ctx, func(tx *TransactionManager) error {
id, err = tx.SetRecord(ctx, pulseNumber, rec)
return err
})
if err != nil {
return nil, err
}
return id, nil
}
// GetObjectIndex wraps matching transaction manager method.
func (db *DB) GetObjectIndex(
ctx context.Context,
id *core.RecordID,
forupdate bool,
) (*index.ObjectLifeline, error) {
tx := db.BeginTransaction(false)
defer tx.Discard()
idx, err := tx.GetObjectIndex(ctx, id, forupdate)
if err != nil {
return nil, err
}
return idx, nil
}
// SetObjectIndex wraps matching transaction manager method.
func (db *DB) SetObjectIndex(
ctx context.Context,
id *core.RecordID,
idx *index.ObjectLifeline,
) error {
return db.Update(ctx, func(tx *TransactionManager) error {
return tx.SetObjectIndex(ctx, id, idx)
})
}
// GetDrop returns jet drop for a given pulse number.
func (db *DB) GetDrop(ctx context.Context, pulse core.PulseNumber) (*jetdrop.JetDrop, error) {
k := prefixkey(scopeIDJetDrop, pulse.Bytes())
buf, err := db.get(ctx, k)
if err != nil {
return nil, err
}
drop, err := jetdrop.Decode(buf)
if err != nil {
return nil, err
}
return drop, nil
}
func (db *DB) waitinflight() {
db.dropWG.Wait()
}
// CreateDrop creates and stores jet drop for given pulse number.
//
// Previous JetDrop hash should be provided. On success returns saved drop and slot records.
func (db *DB) CreateDrop(ctx context.Context, pulse core.PulseNumber, prevHash []byte) (
*jetdrop.JetDrop,
[][]byte,
error,
) {
var err error
db.waitinflight()
hw := db.PlatformCryptographyScheme.ReferenceHasher()
_, err = hw.Write(prevHash)
if err != nil {
return nil, nil, err
}
prefix := make([]byte, core.PulseNumberSize+1)
prefix[0] = scopeIDMessage
copy(prefix[1:], pulse.Bytes())
var messages [][]byte
err = db.db.View(func(txn *badger.Txn) error {
it := txn.NewIterator(badger.DefaultIteratorOptions)
defer it.Close()
for it.Seek(prefix); it.ValidForPrefix(prefix); it.Next() {
val, err := it.Item().ValueCopy(nil)
if err != nil {
return err
}
messages = append(messages, val)
}
return nil
})
if err != nil {
return nil, nil, err
}
drop := jetdrop.JetDrop{
Pulse: pulse,
PrevHash: prevHash,
Hash: hw.Sum(nil),
}
return &drop, messages, nil
}
// SetDrop saves provided JetDrop in db.
func (db *DB) SetDrop(ctx context.Context, drop *jetdrop.JetDrop) error {
k := prefixkey(scopeIDJetDrop, drop.Pulse.Bytes())
_, err := db.get(ctx, k)
if err == nil {
return ErrOverride
}
encoded, err := jetdrop.Encode(drop)
if err != nil {
return err
}
return db.set(ctx, k, encoded)
}
// AddPulse saves new pulse data and updates index.
func (db *DB) AddPulse(ctx context.Context, pulse core.Pulse) error {
return db.Update(ctx, func(tx *TransactionManager) error {
var latest core.PulseNumber
latest, err := tx.GetLatestPulseNumber(ctx)
if err != nil && err != ErrNotFound {
return err
}
pulseRec := record.PulseRecord{
PrevPulse: latest,
Entropy: pulse.Entropy,
PredictedNextPulse: pulse.NextPulseNumber,
}
var buf bytes.Buffer
enc := codec.NewEncoder(&buf, &codec.CborHandle{})
err = enc.Encode(pulseRec)
if err != nil {
return err
}
err = tx.set(ctx, prefixkey(scopeIDPulse, pulse.PulseNumber.Bytes()), buf.Bytes())
if err != nil {
return err
}
return tx.set(ctx, prefixkey(scopeIDSystem, []byte{sysLatestPulse}), pulse.PulseNumber.Bytes())
})
}
// GetPulse returns pulse for provided pulse number.
func (db *DB) GetPulse(ctx context.Context, num core.PulseNumber) (*record.PulseRecord, error) {
buf, err := db.get(ctx, prefixkey(scopeIDPulse, num.Bytes()))
if err != nil {
return nil, err
}
dec := codec.NewDecoder(bytes.NewReader(buf), &codec.CborHandle{})
var rec record.PulseRecord
err = dec.Decode(&rec)
if err != nil {
return nil, err
}
return &rec, nil
}
// GetLatestPulseNumber returns current pulse number.
func (db *DB) GetLatestPulseNumber(ctx context.Context) (core.PulseNumber, error) {
tx := db.BeginTransaction(false)
defer tx.Discard()
return tx.GetLatestPulseNumber(ctx)
}
// BeginTransaction opens a new transaction.
// All methods called on returned transaction manager will persist changes
// only after success on "Commit" call.
func (db *DB) BeginTransaction(update bool) *TransactionManager {
if update {
db.dropWG.Add(1)
}
return &TransactionManager{
db: db,
update: update,
txupdates: make(map[string]keyval),
}
}
// View accepts transaction function. All calls to received transaction manager will be consistent.
func (db *DB) View(ctx context.Context, fn func(*TransactionManager) error) error {
tx := db.BeginTransaction(false)
defer tx.Discard()
return fn(tx)
}
// Update accepts transaction function and commits changes. All calls to received transaction manager will be
// consistent and written tp disk or an error will be returned.
func (db *DB) Update(ctx context.Context, fn func(*TransactionManager) error) error {
tries := db.txretiries
var tx *TransactionManager
var err error
for {
tx = db.BeginTransaction(true)
err = fn(tx)
if err != nil {
break
}
err = tx.Commit()
if err == nil {
break
}
if err != badger.ErrConflict {
break
}
if tries < 1 {
if db.txretiries > 0 {
err = ErrConflictRetriesOver
} else {
err = ErrConflict
}
break
}
tries--
tx.Discard()
}
tx.Discard()
if err != nil {
inslogger.FromContext(ctx).Errorln("DB Update error:", err)
}
return err
}
// GetBadgerDB return badger.DB instance (for internal usage, like tests)
func (db *DB) GetBadgerDB() *badger.DB {
return db.db
}
// SetMessage persists message to the database
func (db *DB) SetMessage(ctx context.Context, pulseNumber core.PulseNumber, genericMessage core.Message) error {
messageBytes := message.ToBytes(genericMessage)
hw := db.PlatformCryptographyScheme.ReferenceHasher()
_, err := hw.Write(messageBytes)
if err != nil {
return err
}
hw.Sum(nil)
return db.set(
ctx,
prefixkey(scopeIDMessage, bytes.Join([][]byte{pulseNumber.Bytes(), hw.Sum(nil)}, nil)),
messageBytes,
)
}
// SetLocalData saves provided data to storage.
func (db *DB) SetLocalData(ctx context.Context, pulse core.PulseNumber, key []byte, data []byte) error {
| // GetLocalData retrieves data from storage.
func (db *DB) GetLocalData(ctx context.Context, pulse core.PulseNumber, key []byte) ([]byte, error) {
return db.get(
ctx,
bytes.Join([][]byte{{scopeIDLocal}, pulse.Bytes(), key}, nil),
)
}
// IterateLocalData iterates over all record with specified prefix and calls handler with key and value of that record.
//
// The key will be returned without prefix (e.g. the remaining slice) and value will be returned as it was saved.
func (db *DB) IterateLocalData(ctx context.Context, pulse core.PulseNumber, prefix []byte, handler func(k, v []byte) error) error {
fullPrefix := bytes.Join([][]byte{{scopeIDLocal}, pulse.Bytes(), prefix}, nil)
return db.db.View(func(txn *badger.Txn) error {
it := txn.NewIterator(badger.DefaultIteratorOptions)
defer it.Close()
for it.Seek(fullPrefix); it.ValidForPrefix(fullPrefix); it.Next() {
key := it.Item().KeyCopy(nil)[len(fullPrefix):]
value, err := it.Item().ValueCopy(nil)
if err != nil {
return err
}
err = handler(key, value)
if err != nil {
return err
}
}
return nil
})
}
// SetActiveNodes saves active nodes for pulse in memory.
func (db *DB) SetActiveNodes(pulse core.PulseNumber, nodes []core.Node) error {
db.nodeHistoryLock.Lock()
defer db.nodeHistoryLock.Unlock()
if _, ok := db.nodeHistory[pulse]; ok {
return errors.New("node history override is forbidden")
}
db.nodeHistory[pulse] = nodes
return nil
}
// GetActiveNodes return active nodes for specified pulse.
func (db *DB) GetActiveNodes(pulse core.PulseNumber) ([]core.Node, error) {
nodes, ok := db.nodeHistory[pulse]
if !ok {
return nil, errors.New("no nodes for this pulse")
}
return nodes, nil
}
// get wraps matching transaction manager method.
func (db *DB) get(ctx context.Context, key []byte) ([]byte, error) {
tx := db.BeginTransaction(false)
defer tx.Discard()
return tx.get(ctx, key)
}
// set wraps matching transaction manager method.
func (db *DB) set(ctx context.Context, key, value []byte) error {
return db.Update(ctx, func(tx *TransactionManager) error {
return tx.set(ctx, key, value)
})
}
| return db.set(
ctx,
bytes.Join([][]byte{{scopeIDLocal}, pulse.Bytes(), key}, nil),
data,
)
}
| identifier_body |
db.go | /*
* Copyright 2018 Insolar
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package storage
import (
"bytes"
"context"
"path/filepath"
"sync"
"github.com/dgraph-io/badger"
"github.com/pkg/errors"
"github.com/ugorji/go/codec"
"github.com/insolar/insolar/configuration"
"github.com/insolar/insolar/core"
"github.com/insolar/insolar/core/message"
"github.com/insolar/insolar/instrumentation/inslogger"
"github.com/insolar/insolar/ledger/index"
"github.com/insolar/insolar/ledger/jetdrop"
"github.com/insolar/insolar/ledger/record"
)
const (
scopeIDLifeline byte = 1
scopeIDRecord byte = 2
scopeIDJetDrop byte = 3
scopeIDPulse byte = 4
scopeIDSystem byte = 5
scopeIDMessage byte = 6
scopeIDBlob byte = 7
scopeIDLocal byte = 8
sysGenesis byte = 1
sysLatestPulse byte = 2
)
// DB represents BadgerDB storage implementation.
type DB struct {
PlatformCryptographyScheme core.PlatformCryptographyScheme `inject:""`
db *badger.DB
genesisRef *core.RecordRef
// dropWG guards inflight updates before jet drop calculated.
dropWG sync.WaitGroup
// for BadgerDB it is normal to have transaction conflicts
// and these conflicts we should resolve by ourself
// so txretiries is our knob to tune up retry logic.
txretiries int
idlocker *IDLocker
// NodeHistory is an in-memory active node storage for each pulse. It's required to calculate node roles
// for past pulses to locate data.
// It should only contain previous N pulses. It should be stored on disk.
nodeHistory map[core.PulseNumber][]core.Node
nodeHistoryLock sync.Mutex
}
// SetTxRetiries sets number of retries on conflict in Update
func (db *DB) SetTxRetiries(n int) {
db.txretiries = n
}
func setOptions(o *badger.Options) *badger.Options {
newo := &badger.Options{}
if o != nil {
*newo = *o
} else {
*newo = badger.DefaultOptions
}
return newo
}
// NewDB returns storage.DB with BadgerDB instance initialized by opts.
// Creates database in provided dir or in current directory if dir parameter is empty.
func NewDB(conf configuration.Ledger, opts *badger.Options) (*DB, error) {
opts = setOptions(opts)
dir, err := filepath.Abs(conf.Storage.DataDirectory)
if err != nil |
opts.Dir = dir
opts.ValueDir = dir
bdb, err := badger.Open(*opts)
if err != nil {
return nil, errors.Wrap(err, "local database open failed")
}
db := &DB{
db: bdb,
txretiries: conf.Storage.TxRetriesOnConflict,
idlocker: NewIDLocker(),
nodeHistory: map[core.PulseNumber][]core.Node{},
}
return db, nil
}
// Init creates initial records in storage.
func (db *DB) Init(ctx context.Context) error {
inslog := inslogger.FromContext(ctx)
inslog.Debug("start storage bootstrap")
getGenesisRef := func() (*core.RecordRef, error) {
buff, err := db.get(ctx, prefixkey(scopeIDSystem, []byte{sysGenesis}))
if err != nil {
return nil, err
}
var genesisRef core.RecordRef
copy(genesisRef[:], buff)
return &genesisRef, nil
}
createGenesisRecord := func() (*core.RecordRef, error) {
err := db.AddPulse(
ctx,
core.Pulse{
PulseNumber: core.GenesisPulse.PulseNumber,
Entropy: core.GenesisPulse.Entropy,
},
)
if err != nil {
return nil, err
}
err = db.SetDrop(ctx, &jetdrop.JetDrop{})
if err != nil {
return nil, err
}
lastPulse, err := db.GetLatestPulseNumber(ctx)
if err != nil {
return nil, err
}
genesisID, err := db.SetRecord(ctx, lastPulse, &record.GenesisRecord{})
if err != nil {
return nil, err
}
err = db.SetObjectIndex(
ctx,
genesisID,
&index.ObjectLifeline{LatestState: genesisID, LatestStateApproved: genesisID},
)
if err != nil {
return nil, err
}
genesisRef := core.NewRecordRef(*genesisID, *genesisID)
return genesisRef, db.set(ctx, prefixkey(scopeIDSystem, []byte{sysGenesis}), genesisRef[:])
}
var err error
db.genesisRef, err = getGenesisRef()
if err == ErrNotFound {
db.genesisRef, err = createGenesisRecord()
}
if err != nil {
return errors.Wrap(err, "bootstrap failed")
}
return nil
}
// GenesisRef returns the genesis record reference.
//
// Genesis record is the parent for all top-level records.
func (db *DB) GenesisRef() *core.RecordRef {
return db.genesisRef
}
// Close wraps BadgerDB Close method.
//
// From https://godoc.org/github.com/dgraph-io/badger#DB.Close:
// «It's crucial to call it to ensure all the pending updates make their way to disk.
// Calling DB.Close() multiple times is not safe and wouldcause panic.»
func (db *DB) Close() error {
// TODO: add close flag and mutex guard on Close method
return db.db.Close()
}
// Stop stops DB component.
func (db *DB) Stop(ctx context.Context) error {
return db.Close()
}
// GetBlob returns binary value stored by record ID.
func (db *DB) GetBlob(ctx context.Context, id *core.RecordID) ([]byte, error) {
var (
blob []byte
err error
)
err = db.View(ctx, func(tx *TransactionManager) error {
blob, err = tx.GetBlob(ctx, id)
return err
})
if err != nil {
return nil, err
}
return blob, nil
}
// SetBlob saves binary value for provided pulse.
func (db *DB) SetBlob(ctx context.Context, pulseNumber core.PulseNumber, blob []byte) (*core.RecordID, error) {
var (
id *core.RecordID
err error
)
err = db.Update(ctx, func(tx *TransactionManager) error {
id, err = tx.SetBlob(ctx, pulseNumber, blob)
return err
})
if err != nil {
return nil, err
}
return id, nil
}
// GetRecord wraps matching transaction manager method.
func (db *DB) GetRecord(ctx context.Context, id *core.RecordID) (record.Record, error) {
var (
fetchedRecord record.Record
err error
)
err = db.View(ctx, func(tx *TransactionManager) error {
fetchedRecord, err = tx.GetRecord(ctx, id)
return err
})
if err != nil {
return nil, err
}
return fetchedRecord, nil
}
// SetRecord wraps matching transaction manager method.
func (db *DB) SetRecord(ctx context.Context, pulseNumber core.PulseNumber, rec record.Record) (*core.RecordID, error) {
var (
id *core.RecordID
err error
)
err = db.Update(ctx, func(tx *TransactionManager) error {
id, err = tx.SetRecord(ctx, pulseNumber, rec)
return err
})
if err != nil {
return nil, err
}
return id, nil
}
// GetObjectIndex wraps matching transaction manager method.
func (db *DB) GetObjectIndex(
ctx context.Context,
id *core.RecordID,
forupdate bool,
) (*index.ObjectLifeline, error) {
tx := db.BeginTransaction(false)
defer tx.Discard()
idx, err := tx.GetObjectIndex(ctx, id, forupdate)
if err != nil {
return nil, err
}
return idx, nil
}
// SetObjectIndex wraps matching transaction manager method.
func (db *DB) SetObjectIndex(
ctx context.Context,
id *core.RecordID,
idx *index.ObjectLifeline,
) error {
return db.Update(ctx, func(tx *TransactionManager) error {
return tx.SetObjectIndex(ctx, id, idx)
})
}
// GetDrop returns jet drop for a given pulse number.
func (db *DB) GetDrop(ctx context.Context, pulse core.PulseNumber) (*jetdrop.JetDrop, error) {
k := prefixkey(scopeIDJetDrop, pulse.Bytes())
buf, err := db.get(ctx, k)
if err != nil {
return nil, err
}
drop, err := jetdrop.Decode(buf)
if err != nil {
return nil, err
}
return drop, nil
}
func (db *DB) waitinflight() {
db.dropWG.Wait()
}
// CreateDrop creates and stores jet drop for given pulse number.
//
// Previous JetDrop hash should be provided. On success returns saved drop and slot records.
func (db *DB) CreateDrop(ctx context.Context, pulse core.PulseNumber, prevHash []byte) (
*jetdrop.JetDrop,
[][]byte,
error,
) {
var err error
db.waitinflight()
hw := db.PlatformCryptographyScheme.ReferenceHasher()
_, err = hw.Write(prevHash)
if err != nil {
return nil, nil, err
}
prefix := make([]byte, core.PulseNumberSize+1)
prefix[0] = scopeIDMessage
copy(prefix[1:], pulse.Bytes())
var messages [][]byte
err = db.db.View(func(txn *badger.Txn) error {
it := txn.NewIterator(badger.DefaultIteratorOptions)
defer it.Close()
for it.Seek(prefix); it.ValidForPrefix(prefix); it.Next() {
val, err := it.Item().ValueCopy(nil)
if err != nil {
return err
}
messages = append(messages, val)
}
return nil
})
if err != nil {
return nil, nil, err
}
drop := jetdrop.JetDrop{
Pulse: pulse,
PrevHash: prevHash,
Hash: hw.Sum(nil),
}
return &drop, messages, nil
}
// SetDrop saves provided JetDrop in db.
func (db *DB) SetDrop(ctx context.Context, drop *jetdrop.JetDrop) error {
k := prefixkey(scopeIDJetDrop, drop.Pulse.Bytes())
_, err := db.get(ctx, k)
if err == nil {
return ErrOverride
}
encoded, err := jetdrop.Encode(drop)
if err != nil {
return err
}
return db.set(ctx, k, encoded)
}
// AddPulse saves new pulse data and updates index.
func (db *DB) AddPulse(ctx context.Context, pulse core.Pulse) error {
return db.Update(ctx, func(tx *TransactionManager) error {
var latest core.PulseNumber
latest, err := tx.GetLatestPulseNumber(ctx)
if err != nil && err != ErrNotFound {
return err
}
pulseRec := record.PulseRecord{
PrevPulse: latest,
Entropy: pulse.Entropy,
PredictedNextPulse: pulse.NextPulseNumber,
}
var buf bytes.Buffer
enc := codec.NewEncoder(&buf, &codec.CborHandle{})
err = enc.Encode(pulseRec)
if err != nil {
return err
}
err = tx.set(ctx, prefixkey(scopeIDPulse, pulse.PulseNumber.Bytes()), buf.Bytes())
if err != nil {
return err
}
return tx.set(ctx, prefixkey(scopeIDSystem, []byte{sysLatestPulse}), pulse.PulseNumber.Bytes())
})
}
// GetPulse returns pulse for provided pulse number.
func (db *DB) GetPulse(ctx context.Context, num core.PulseNumber) (*record.PulseRecord, error) {
buf, err := db.get(ctx, prefixkey(scopeIDPulse, num.Bytes()))
if err != nil {
return nil, err
}
dec := codec.NewDecoder(bytes.NewReader(buf), &codec.CborHandle{})
var rec record.PulseRecord
err = dec.Decode(&rec)
if err != nil {
return nil, err
}
return &rec, nil
}
// GetLatestPulseNumber returns current pulse number.
func (db *DB) GetLatestPulseNumber(ctx context.Context) (core.PulseNumber, error) {
tx := db.BeginTransaction(false)
defer tx.Discard()
return tx.GetLatestPulseNumber(ctx)
}
// BeginTransaction opens a new transaction.
// All methods called on returned transaction manager will persist changes
// only after success on "Commit" call.
func (db *DB) BeginTransaction(update bool) *TransactionManager {
if update {
db.dropWG.Add(1)
}
return &TransactionManager{
db: db,
update: update,
txupdates: make(map[string]keyval),
}
}
// View accepts transaction function. All calls to received transaction manager will be consistent.
func (db *DB) View(ctx context.Context, fn func(*TransactionManager) error) error {
tx := db.BeginTransaction(false)
defer tx.Discard()
return fn(tx)
}
// Update accepts transaction function and commits changes. All calls to received transaction manager will be
// consistent and written tp disk or an error will be returned.
func (db *DB) Update(ctx context.Context, fn func(*TransactionManager) error) error {
tries := db.txretiries
var tx *TransactionManager
var err error
for {
tx = db.BeginTransaction(true)
err = fn(tx)
if err != nil {
break
}
err = tx.Commit()
if err == nil {
break
}
if err != badger.ErrConflict {
break
}
if tries < 1 {
if db.txretiries > 0 {
err = ErrConflictRetriesOver
} else {
err = ErrConflict
}
break
}
tries--
tx.Discard()
}
tx.Discard()
if err != nil {
inslogger.FromContext(ctx).Errorln("DB Update error:", err)
}
return err
}
// GetBadgerDB return badger.DB instance (for internal usage, like tests)
func (db *DB) GetBadgerDB() *badger.DB {
return db.db
}
// SetMessage persists message to the database
func (db *DB) SetMessage(ctx context.Context, pulseNumber core.PulseNumber, genericMessage core.Message) error {
messageBytes := message.ToBytes(genericMessage)
hw := db.PlatformCryptographyScheme.ReferenceHasher()
_, err := hw.Write(messageBytes)
if err != nil {
return err
}
hw.Sum(nil)
return db.set(
ctx,
prefixkey(scopeIDMessage, bytes.Join([][]byte{pulseNumber.Bytes(), hw.Sum(nil)}, nil)),
messageBytes,
)
}
// SetLocalData saves provided data to storage.
func (db *DB) SetLocalData(ctx context.Context, pulse core.PulseNumber, key []byte, data []byte) error {
return db.set(
ctx,
bytes.Join([][]byte{{scopeIDLocal}, pulse.Bytes(), key}, nil),
data,
)
}
// GetLocalData retrieves data from storage.
func (db *DB) GetLocalData(ctx context.Context, pulse core.PulseNumber, key []byte) ([]byte, error) {
return db.get(
ctx,
bytes.Join([][]byte{{scopeIDLocal}, pulse.Bytes(), key}, nil),
)
}
// IterateLocalData iterates over all record with specified prefix and calls handler with key and value of that record.
//
// The key will be returned without prefix (e.g. the remaining slice) and value will be returned as it was saved.
func (db *DB) IterateLocalData(ctx context.Context, pulse core.PulseNumber, prefix []byte, handler func(k, v []byte) error) error {
fullPrefix := bytes.Join([][]byte{{scopeIDLocal}, pulse.Bytes(), prefix}, nil)
return db.db.View(func(txn *badger.Txn) error {
it := txn.NewIterator(badger.DefaultIteratorOptions)
defer it.Close()
for it.Seek(fullPrefix); it.ValidForPrefix(fullPrefix); it.Next() {
key := it.Item().KeyCopy(nil)[len(fullPrefix):]
value, err := it.Item().ValueCopy(nil)
if err != nil {
return err
}
err = handler(key, value)
if err != nil {
return err
}
}
return nil
})
}
// SetActiveNodes saves active nodes for pulse in memory.
func (db *DB) SetActiveNodes(pulse core.PulseNumber, nodes []core.Node) error {
db.nodeHistoryLock.Lock()
defer db.nodeHistoryLock.Unlock()
if _, ok := db.nodeHistory[pulse]; ok {
return errors.New("node history override is forbidden")
}
db.nodeHistory[pulse] = nodes
return nil
}
// GetActiveNodes return active nodes for specified pulse.
func (db *DB) GetActiveNodes(pulse core.PulseNumber) ([]core.Node, error) {
nodes, ok := db.nodeHistory[pulse]
if !ok {
return nil, errors.New("no nodes for this pulse")
}
return nodes, nil
}
// get wraps matching transaction manager method.
func (db *DB) get(ctx context.Context, key []byte) ([]byte, error) {
tx := db.BeginTransaction(false)
defer tx.Discard()
return tx.get(ctx, key)
}
// set wraps matching transaction manager method.
func (db *DB) set(ctx context.Context, key, value []byte) error {
return db.Update(ctx, func(tx *TransactionManager) error {
return tx.set(ctx, key, value)
})
}
| {
return nil, err
} | conditional_block |
db.go | /*
* Copyright 2018 Insolar
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package storage
import (
"bytes"
"context"
"path/filepath"
"sync"
"github.com/dgraph-io/badger"
"github.com/pkg/errors"
"github.com/ugorji/go/codec"
"github.com/insolar/insolar/configuration"
"github.com/insolar/insolar/core"
"github.com/insolar/insolar/core/message"
"github.com/insolar/insolar/instrumentation/inslogger"
"github.com/insolar/insolar/ledger/index"
"github.com/insolar/insolar/ledger/jetdrop"
"github.com/insolar/insolar/ledger/record"
)
const (
scopeIDLifeline byte = 1
scopeIDRecord byte = 2
scopeIDJetDrop byte = 3
scopeIDPulse byte = 4
scopeIDSystem byte = 5
scopeIDMessage byte = 6
scopeIDBlob byte = 7
scopeIDLocal byte = 8
sysGenesis byte = 1
sysLatestPulse byte = 2
)
// DB represents BadgerDB storage implementation.
type DB struct {
PlatformCryptographyScheme core.PlatformCryptographyScheme `inject:""`
db *badger.DB
genesisRef *core.RecordRef
// dropWG guards inflight updates before jet drop calculated.
dropWG sync.WaitGroup
// for BadgerDB it is normal to have transaction conflicts
// and these conflicts we should resolve by ourself
// so txretiries is our knob to tune up retry logic.
txretiries int
idlocker *IDLocker
// NodeHistory is an in-memory active node storage for each pulse. It's required to calculate node roles
// for past pulses to locate data.
// It should only contain previous N pulses. It should be stored on disk.
nodeHistory map[core.PulseNumber][]core.Node
nodeHistoryLock sync.Mutex
}
// SetTxRetiries sets number of retries on conflict in Update
func (db *DB) SetTxRetiries(n int) {
db.txretiries = n
}
func setOptions(o *badger.Options) *badger.Options {
newo := &badger.Options{}
if o != nil {
*newo = *o
} else {
*newo = badger.DefaultOptions
}
return newo
}
// NewDB returns storage.DB with BadgerDB instance initialized by opts.
// Creates database in provided dir or in current directory if dir parameter is empty.
func NewDB(conf configuration.Ledger, opts *badger.Options) (*DB, error) {
opts = setOptions(opts)
dir, err := filepath.Abs(conf.Storage.DataDirectory)
if err != nil {
return nil, err
}
opts.Dir = dir
opts.ValueDir = dir
bdb, err := badger.Open(*opts)
if err != nil {
return nil, errors.Wrap(err, "local database open failed")
}
db := &DB{
db: bdb,
txretiries: conf.Storage.TxRetriesOnConflict,
idlocker: NewIDLocker(),
nodeHistory: map[core.PulseNumber][]core.Node{},
}
return db, nil
}
// Init creates initial records in storage.
func (db *DB) Init(ctx context.Context) error {
inslog := inslogger.FromContext(ctx)
inslog.Debug("start storage bootstrap")
getGenesisRef := func() (*core.RecordRef, error) {
buff, err := db.get(ctx, prefixkey(scopeIDSystem, []byte{sysGenesis}))
if err != nil {
return nil, err
}
var genesisRef core.RecordRef
copy(genesisRef[:], buff)
return &genesisRef, nil
}
createGenesisRecord := func() (*core.RecordRef, error) {
err := db.AddPulse(
ctx,
core.Pulse{
PulseNumber: core.GenesisPulse.PulseNumber,
Entropy: core.GenesisPulse.Entropy,
},
)
if err != nil {
return nil, err
}
err = db.SetDrop(ctx, &jetdrop.JetDrop{})
if err != nil {
return nil, err
}
lastPulse, err := db.GetLatestPulseNumber(ctx)
if err != nil {
return nil, err
}
genesisID, err := db.SetRecord(ctx, lastPulse, &record.GenesisRecord{})
if err != nil {
return nil, err
}
err = db.SetObjectIndex(
ctx,
genesisID,
&index.ObjectLifeline{LatestState: genesisID, LatestStateApproved: genesisID},
)
if err != nil {
return nil, err
}
genesisRef := core.NewRecordRef(*genesisID, *genesisID)
return genesisRef, db.set(ctx, prefixkey(scopeIDSystem, []byte{sysGenesis}), genesisRef[:])
}
var err error
db.genesisRef, err = getGenesisRef()
if err == ErrNotFound {
db.genesisRef, err = createGenesisRecord()
}
if err != nil {
return errors.Wrap(err, "bootstrap failed")
}
return nil
}
// GenesisRef returns the genesis record reference.
//
// Genesis record is the parent for all top-level records.
func (db *DB) GenesisRef() *core.RecordRef {
return db.genesisRef
}
// Close wraps BadgerDB Close method.
//
// From https://godoc.org/github.com/dgraph-io/badger#DB.Close:
// «It's crucial to call it to ensure all the pending updates make their way to disk.
// Calling DB.Close() multiple times is not safe and wouldcause panic.»
func (db *DB) Close() error {
// TODO: add close flag and mutex guard on Close method
return db.db.Close()
}
// Stop stops DB component.
func (db *DB) Stop(ctx context.Context) error {
return db.Close()
}
// GetBlob returns binary value stored by record ID.
func (db *DB) GetBlob(ctx context.Context, id *core.RecordID) ([]byte, error) {
var (
blob []byte
err error
)
err = db.View(ctx, func(tx *TransactionManager) error {
blob, err = tx.GetBlob(ctx, id)
return err
})
if err != nil {
return nil, err
}
return blob, nil
}
// SetBlob saves binary value for provided pulse.
func (db *DB) SetBlob(ctx context.Context, pulseNumber core.PulseNumber, blob []byte) (*core.RecordID, error) {
var (
id *core.RecordID
err error
)
err = db.Update(ctx, func(tx *TransactionManager) error {
id, err = tx.SetBlob(ctx, pulseNumber, blob)
return err
})
if err != nil {
return nil, err
}
return id, nil
}
// GetRecord wraps matching transaction manager method.
func (db *DB) GetRecord(ctx context.Context, id *core.RecordID) (record.Record, error) {
var (
fetchedRecord record.Record
err error
)
err = db.View(ctx, func(tx *TransactionManager) error {
fetchedRecord, err = tx.GetRecord(ctx, id)
return err
})
if err != nil {
return nil, err
}
return fetchedRecord, nil
}
// SetRecord wraps matching transaction manager method.
func (db *DB) SetRecord(ctx context.Context, pulseNumber core.PulseNumber, rec record.Record) (*core.RecordID, error) {
var (
id *core.RecordID
err error
)
err = db.Update(ctx, func(tx *TransactionManager) error {
id, err = tx.SetRecord(ctx, pulseNumber, rec)
return err
})
if err != nil {
return nil, err
}
return id, nil
}
// GetObjectIndex wraps matching transaction manager method.
func (db *DB) GetObjectIndex(
ctx context.Context,
id *core.RecordID,
forupdate bool,
) (*index.ObjectLifeline, error) {
tx := db.BeginTransaction(false)
defer tx.Discard()
idx, err := tx.GetObjectIndex(ctx, id, forupdate)
if err != nil {
return nil, err
}
return idx, nil
}
// SetObjectIndex wraps matching transaction manager method.
func (db *DB) SetObjectIndex(
ctx context.Context,
id *core.RecordID,
idx *index.ObjectLifeline,
) error {
return db.Update(ctx, func(tx *TransactionManager) error {
return tx.SetObjectIndex(ctx, id, idx)
})
}
// GetDrop returns jet drop for a given pulse number.
func (db *DB) GetDrop(ctx context.Context, pulse core.PulseNumber) (*jetdrop.JetDrop, error) {
k := prefixkey(scopeIDJetDrop, pulse.Bytes())
buf, err := db.get(ctx, k)
if err != nil {
return nil, err
}
drop, err := jetdrop.Decode(buf)
if err != nil {
return nil, err
}
return drop, nil
}
func (db *DB) waitinflight() {
db.dropWG.Wait()
}
// CreateDrop creates and stores jet drop for given pulse number.
//
// Previous JetDrop hash should be provided. On success returns saved drop and slot records.
func (db *DB) CreateDrop(ctx context.Context, pulse core.PulseNumber, prevHash []byte) (
*jetdrop.JetDrop,
[][]byte,
error,
) {
var err error
db.waitinflight()
hw := db.PlatformCryptographyScheme.ReferenceHasher()
_, err = hw.Write(prevHash)
if err != nil {
return nil, nil, err
}
prefix := make([]byte, core.PulseNumberSize+1)
prefix[0] = scopeIDMessage
copy(prefix[1:], pulse.Bytes())
var messages [][]byte
err = db.db.View(func(txn *badger.Txn) error {
it := txn.NewIterator(badger.DefaultIteratorOptions)
defer it.Close()
for it.Seek(prefix); it.ValidForPrefix(prefix); it.Next() {
val, err := it.Item().ValueCopy(nil)
if err != nil {
return err
}
messages = append(messages, val)
}
return nil
})
if err != nil {
return nil, nil, err
}
drop := jetdrop.JetDrop{
Pulse: pulse,
PrevHash: prevHash,
Hash: hw.Sum(nil),
}
return &drop, messages, nil
}
// SetDrop saves provided JetDrop in db.
func (db *DB) SetDrop(ctx context.Context, drop *jetdrop.JetDrop) error {
k := prefixkey(scopeIDJetDrop, drop.Pulse.Bytes())
_, err := db.get(ctx, k)
if err == nil {
return ErrOverride
}
encoded, err := jetdrop.Encode(drop)
if err != nil {
return err
}
return db.set(ctx, k, encoded)
}
// AddPulse saves new pulse data and updates index.
func (db *DB) AddPulse(ctx context.Context, pulse core.Pulse) error {
return db.Update(ctx, func(tx *TransactionManager) error {
var latest core.PulseNumber
latest, err := tx.GetLatestPulseNumber(ctx)
if err != nil && err != ErrNotFound {
return err
}
pulseRec := record.PulseRecord{
PrevPulse: latest,
Entropy: pulse.Entropy,
PredictedNextPulse: pulse.NextPulseNumber,
}
var buf bytes.Buffer
enc := codec.NewEncoder(&buf, &codec.CborHandle{})
err = enc.Encode(pulseRec)
if err != nil {
return err
}
err = tx.set(ctx, prefixkey(scopeIDPulse, pulse.PulseNumber.Bytes()), buf.Bytes())
if err != nil {
return err
}
return tx.set(ctx, prefixkey(scopeIDSystem, []byte{sysLatestPulse}), pulse.PulseNumber.Bytes())
})
}
// GetPulse returns pulse for provided pulse number.
func (db *DB) GetPulse(ctx context.Context, num core.PulseNumber) (*record.PulseRecord, error) {
buf, err := db.get(ctx, prefixkey(scopeIDPulse, num.Bytes()))
if err != nil {
return nil, err
}
dec := codec.NewDecoder(bytes.NewReader(buf), &codec.CborHandle{})
var rec record.PulseRecord
err = dec.Decode(&rec)
if err != nil {
return nil, err
}
return &rec, nil
}
// GetLatestPulseNumber returns current pulse number.
func (db *DB) GetLatestPulseNumber(ctx context.Context) (core.PulseNumber, error) {
tx := db.BeginTransaction(false)
defer tx.Discard()
return tx.GetLatestPulseNumber(ctx)
}
// BeginTransaction opens a new transaction.
// All methods called on returned transaction manager will persist changes
// only after success on "Commit" call.
func (db *DB) BeginTransaction(update bool) *TransactionManager {
if update {
db.dropWG.Add(1)
}
return &TransactionManager{
db: db,
update: update,
txupdates: make(map[string]keyval),
}
}
// View accepts transaction function. All calls to received transaction manager will be consistent.
func (db *DB) View(ctx context.Context, fn func(*TransactionManager) error) error {
tx := db.BeginTransaction(false)
defer tx.Discard()
return fn(tx)
}
// Update accepts transaction function and commits changes. All calls to received transaction manager will be
// consistent and written tp disk or an error will be returned.
func (db *DB) Update(ctx context.Context, fn func(*TransactionManager) error) error {
tries := db.txretiries
var tx *TransactionManager
var err error
for {
tx = db.BeginTransaction(true)
err = fn(tx)
if err != nil {
break
}
err = tx.Commit()
if err == nil {
break
}
if err != badger.ErrConflict {
break
}
if tries < 1 {
if db.txretiries > 0 {
err = ErrConflictRetriesOver
} else {
err = ErrConflict
}
break
}
tries--
tx.Discard()
}
tx.Discard()
if err != nil {
inslogger.FromContext(ctx).Errorln("DB Update error:", err)
}
return err
}
// GetBadgerDB return badger.DB instance (for internal usage, like tests)
func (db *DB) GetBadgerDB() *badger.DB {
return db.db
}
// SetMessage persists message to the database
func (db *DB) SetMessage(ctx context.Context, pulseNumber core.PulseNumber, genericMessage core.Message) error {
messageBytes := message.ToBytes(genericMessage)
hw := db.PlatformCryptographyScheme.ReferenceHasher()
_, err := hw.Write(messageBytes)
if err != nil {
return err
}
hw.Sum(nil)
return db.set(
ctx,
prefixkey(scopeIDMessage, bytes.Join([][]byte{pulseNumber.Bytes(), hw.Sum(nil)}, nil)),
messageBytes,
)
}
// SetLocalData saves provided data to storage.
func (db *DB) SetLocalData(ctx context.Context, pulse core.PulseNumber, key []byte, data []byte) error {
return db.set(
ctx,
bytes.Join([][]byte{{scopeIDLocal}, pulse.Bytes(), key}, nil),
data,
)
}
// GetLocalData retrieves data from storage.
func (db *DB) GetLocalData(ctx context.Context, pulse core.PulseNumber, key []byte) ([]byte, error) {
return db.get(
ctx,
bytes.Join([][]byte{{scopeIDLocal}, pulse.Bytes(), key}, nil),
)
}
// IterateLocalData iterates over all record with specified prefix and calls handler with key and value of that record.
//
// The key will be returned without prefix (e.g. the remaining slice) and value will be returned as it was saved.
func (db *DB) IterateLocalData(ctx context.Context, pulse core.PulseNumber, prefix []byte, handler func(k, v []byte) error) error {
fullPrefix := bytes.Join([][]byte{{scopeIDLocal}, pulse.Bytes(), prefix}, nil)
return db.db.View(func(txn *badger.Txn) error {
it := txn.NewIterator(badger.DefaultIteratorOptions)
defer it.Close()
for it.Seek(fullPrefix); it.ValidForPrefix(fullPrefix); it.Next() {
key := it.Item().KeyCopy(nil)[len(fullPrefix):]
value, err := it.Item().ValueCopy(nil)
if err != nil {
return err
}
err = handler(key, value)
if err != nil {
return err
}
}
return nil
})
}
// SetActiveNodes saves active nodes for pulse in memory.
func (db *DB) SetActiveNodes(pulse core.PulseNumber, nodes []core.Node) error {
db.nodeHistoryLock.Lock()
defer db.nodeHistoryLock.Unlock()
if _, ok := db.nodeHistory[pulse]; ok {
return errors.New("node history override is forbidden")
}
db.nodeHistory[pulse] = nodes
return nil
}
// GetActiveNodes return active nodes for specified pulse.
func (db *DB) GetActiveNodes(pulse core.PulseNumber) ([]core.Node, error) {
nodes, ok := db.nodeHistory[pulse]
if !ok {
return nil, errors.New("no nodes for this pulse")
}
return nodes, nil
}
// get wraps matching transaction manager method.
func (db *DB) get(ctx context.Context, key []byte) ([]byte, error) {
tx := db.BeginTransaction(false)
defer tx.Discard()
return tx.get(ctx, key)
}
// set wraps matching transaction manager method. | return db.Update(ctx, func(tx *TransactionManager) error {
return tx.set(ctx, key, value)
})
} | func (db *DB) set(ctx context.Context, key, value []byte) error { | random_line_split |
db.go | /*
* Copyright 2018 Insolar
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package storage
import (
"bytes"
"context"
"path/filepath"
"sync"
"github.com/dgraph-io/badger"
"github.com/pkg/errors"
"github.com/ugorji/go/codec"
"github.com/insolar/insolar/configuration"
"github.com/insolar/insolar/core"
"github.com/insolar/insolar/core/message"
"github.com/insolar/insolar/instrumentation/inslogger"
"github.com/insolar/insolar/ledger/index"
"github.com/insolar/insolar/ledger/jetdrop"
"github.com/insolar/insolar/ledger/record"
)
const (
scopeIDLifeline byte = 1
scopeIDRecord byte = 2
scopeIDJetDrop byte = 3
scopeIDPulse byte = 4
scopeIDSystem byte = 5
scopeIDMessage byte = 6
scopeIDBlob byte = 7
scopeIDLocal byte = 8
sysGenesis byte = 1
sysLatestPulse byte = 2
)
// DB represents BadgerDB storage implementation.
type DB struct {
PlatformCryptographyScheme core.PlatformCryptographyScheme `inject:""`
db *badger.DB
genesisRef *core.RecordRef
// dropWG guards inflight updates before jet drop calculated.
dropWG sync.WaitGroup
// for BadgerDB it is normal to have transaction conflicts
// and these conflicts we should resolve by ourself
// so txretiries is our knob to tune up retry logic.
txretiries int
idlocker *IDLocker
// NodeHistory is an in-memory active node storage for each pulse. It's required to calculate node roles
// for past pulses to locate data.
// It should only contain previous N pulses. It should be stored on disk.
nodeHistory map[core.PulseNumber][]core.Node
nodeHistoryLock sync.Mutex
}
// SetTxRetiries sets number of retries on conflict in Update
func (db *DB) SetTxRetiries(n int) {
db.txretiries = n
}
func setOptions(o *badger.Options) *badger.Options {
newo := &badger.Options{}
if o != nil {
*newo = *o
} else {
*newo = badger.DefaultOptions
}
return newo
}
// NewDB returns storage.DB with BadgerDB instance initialized by opts.
// Creates database in provided dir or in current directory if dir parameter is empty.
func NewDB(conf configuration.Ledger, opts *badger.Options) (*DB, error) {
opts = setOptions(opts)
dir, err := filepath.Abs(conf.Storage.DataDirectory)
if err != nil {
return nil, err
}
opts.Dir = dir
opts.ValueDir = dir
bdb, err := badger.Open(*opts)
if err != nil {
return nil, errors.Wrap(err, "local database open failed")
}
db := &DB{
db: bdb,
txretiries: conf.Storage.TxRetriesOnConflict,
idlocker: NewIDLocker(),
nodeHistory: map[core.PulseNumber][]core.Node{},
}
return db, nil
}
// Init creates initial records in storage.
func (db *DB) Init(ctx context.Context) error {
inslog := inslogger.FromContext(ctx)
inslog.Debug("start storage bootstrap")
getGenesisRef := func() (*core.RecordRef, error) {
buff, err := db.get(ctx, prefixkey(scopeIDSystem, []byte{sysGenesis}))
if err != nil {
return nil, err
}
var genesisRef core.RecordRef
copy(genesisRef[:], buff)
return &genesisRef, nil
}
createGenesisRecord := func() (*core.RecordRef, error) {
err := db.AddPulse(
ctx,
core.Pulse{
PulseNumber: core.GenesisPulse.PulseNumber,
Entropy: core.GenesisPulse.Entropy,
},
)
if err != nil {
return nil, err
}
err = db.SetDrop(ctx, &jetdrop.JetDrop{})
if err != nil {
return nil, err
}
lastPulse, err := db.GetLatestPulseNumber(ctx)
if err != nil {
return nil, err
}
genesisID, err := db.SetRecord(ctx, lastPulse, &record.GenesisRecord{})
if err != nil {
return nil, err
}
err = db.SetObjectIndex(
ctx,
genesisID,
&index.ObjectLifeline{LatestState: genesisID, LatestStateApproved: genesisID},
)
if err != nil {
return nil, err
}
genesisRef := core.NewRecordRef(*genesisID, *genesisID)
return genesisRef, db.set(ctx, prefixkey(scopeIDSystem, []byte{sysGenesis}), genesisRef[:])
}
var err error
db.genesisRef, err = getGenesisRef()
if err == ErrNotFound {
db.genesisRef, err = createGenesisRecord()
}
if err != nil {
return errors.Wrap(err, "bootstrap failed")
}
return nil
}
// GenesisRef returns the genesis record reference.
//
// Genesis record is the parent for all top-level records.
func (db *DB) GenesisRef() *core.RecordRef {
return db.genesisRef
}
// Close wraps BadgerDB Close method.
//
// From https://godoc.org/github.com/dgraph-io/badger#DB.Close:
// «It's crucial to call it to ensure all the pending updates make their way to disk.
// Calling DB.Close() multiple times is not safe and wouldcause panic.»
func (db *DB) Close() error {
// TODO: add close flag and mutex guard on Close method
return db.db.Close()
}
// Stop stops DB component.
func (db *DB) Stop(ctx context.Context) error {
return db.Close()
}
// GetBlob returns binary value stored by record ID.
func (db *DB) GetBlob(ctx context.Context, id *core.RecordID) ([]byte, error) {
var (
blob []byte
err error
)
err = db.View(ctx, func(tx *TransactionManager) error {
blob, err = tx.GetBlob(ctx, id)
return err
})
if err != nil {
return nil, err
}
return blob, nil
}
// SetBlob saves binary value for provided pulse.
func (db *DB) SetBlob(ctx context.Context, pulseNumber core.PulseNumber, blob []byte) (*core.RecordID, error) {
var (
id *core.RecordID
err error
)
err = db.Update(ctx, func(tx *TransactionManager) error {
id, err = tx.SetBlob(ctx, pulseNumber, blob)
return err
})
if err != nil {
return nil, err
}
return id, nil
}
// GetRecord wraps matching transaction manager method.
func (db *DB) GetRecord(ctx context.Context, id *core.RecordID) (record.Record, error) {
var (
fetchedRecord record.Record
err error
)
err = db.View(ctx, func(tx *TransactionManager) error {
fetchedRecord, err = tx.GetRecord(ctx, id)
return err
})
if err != nil {
return nil, err
}
return fetchedRecord, nil
}
// SetRecord wraps matching transaction manager method.
func (db *DB) SetRecord(ctx context.Context, pulseNumber core.PulseNumber, rec record.Record) (*core.RecordID, error) {
var (
id *core.RecordID
err error
)
err = db.Update(ctx, func(tx *TransactionManager) error {
id, err = tx.SetRecord(ctx, pulseNumber, rec)
return err
})
if err != nil {
return nil, err
}
return id, nil
}
// GetObjectIndex wraps matching transaction manager method.
func (db *DB) GetObjectIndex(
ctx context.Context,
id *core.RecordID,
forupdate bool,
) (*index.ObjectLifeline, error) {
tx := db.BeginTransaction(false)
defer tx.Discard()
idx, err := tx.GetObjectIndex(ctx, id, forupdate)
if err != nil {
return nil, err
}
return idx, nil
}
// SetObjectIndex wraps matching transaction manager method.
func (db *DB) SetObjectIndex(
ctx context.Context,
id *core.RecordID,
idx *index.ObjectLifeline,
) error {
return db.Update(ctx, func(tx *TransactionManager) error {
return tx.SetObjectIndex(ctx, id, idx)
})
}
// GetDrop returns jet drop for a given pulse number.
func (db *DB) GetDrop(ctx context.Context, pulse core.PulseNumber) (*jetdrop.JetDrop, error) {
k := prefixkey(scopeIDJetDrop, pulse.Bytes())
buf, err := db.get(ctx, k)
if err != nil {
return nil, err
}
drop, err := jetdrop.Decode(buf)
if err != nil {
return nil, err
}
return drop, nil
}
func (db *DB) waitinflight() {
db.dropWG.Wait()
}
// CreateDrop creates and stores jet drop for given pulse number.
//
// Previous JetDrop hash should be provided. On success returns saved drop and slot records.
func (db *DB) CreateDrop(ctx context.Context, pulse core.PulseNumber, prevHash []byte) (
*jetdrop.JetDrop,
[][]byte,
error,
) {
var err error
db.waitinflight()
hw := db.PlatformCryptographyScheme.ReferenceHasher()
_, err = hw.Write(prevHash)
if err != nil {
return nil, nil, err
}
prefix := make([]byte, core.PulseNumberSize+1)
prefix[0] = scopeIDMessage
copy(prefix[1:], pulse.Bytes())
var messages [][]byte
err = db.db.View(func(txn *badger.Txn) error {
it := txn.NewIterator(badger.DefaultIteratorOptions)
defer it.Close()
for it.Seek(prefix); it.ValidForPrefix(prefix); it.Next() {
val, err := it.Item().ValueCopy(nil)
if err != nil {
return err
}
messages = append(messages, val)
}
return nil
})
if err != nil {
return nil, nil, err
}
drop := jetdrop.JetDrop{
Pulse: pulse,
PrevHash: prevHash,
Hash: hw.Sum(nil),
}
return &drop, messages, nil
}
// SetDrop saves provided JetDrop in db.
func (db *DB) SetDrop(ctx context.Context, drop *jetdrop.JetDrop) error {
k := prefixkey(scopeIDJetDrop, drop.Pulse.Bytes())
_, err := db.get(ctx, k)
if err == nil {
return ErrOverride
}
encoded, err := jetdrop.Encode(drop)
if err != nil {
return err
}
return db.set(ctx, k, encoded)
}
// AddPulse saves new pulse data and updates index.
func (db *DB) AddPulse(ctx context.Context, pulse core.Pulse) error {
return db.Update(ctx, func(tx *TransactionManager) error {
var latest core.PulseNumber
latest, err := tx.GetLatestPulseNumber(ctx)
if err != nil && err != ErrNotFound {
return err
}
pulseRec := record.PulseRecord{
PrevPulse: latest,
Entropy: pulse.Entropy,
PredictedNextPulse: pulse.NextPulseNumber,
}
var buf bytes.Buffer
enc := codec.NewEncoder(&buf, &codec.CborHandle{})
err = enc.Encode(pulseRec)
if err != nil {
return err
}
err = tx.set(ctx, prefixkey(scopeIDPulse, pulse.PulseNumber.Bytes()), buf.Bytes())
if err != nil {
return err
}
return tx.set(ctx, prefixkey(scopeIDSystem, []byte{sysLatestPulse}), pulse.PulseNumber.Bytes())
})
}
// GetPulse returns pulse for provided pulse number.
func (db *DB) GetPulse(ctx context.Context, num core.PulseNumber) (*record.PulseRecord, error) {
buf, err := db.get(ctx, prefixkey(scopeIDPulse, num.Bytes()))
if err != nil {
return nil, err
}
dec := codec.NewDecoder(bytes.NewReader(buf), &codec.CborHandle{})
var rec record.PulseRecord
err = dec.Decode(&rec)
if err != nil {
return nil, err
}
return &rec, nil
}
// GetLatestPulseNumber returns current pulse number.
func (db *DB) GetLatestPulseNumber(ctx context.Context) (core.PulseNumber, error) {
tx := db.BeginTransaction(false)
defer tx.Discard()
return tx.GetLatestPulseNumber(ctx)
}
// BeginTransaction opens a new transaction.
// All methods called on returned transaction manager will persist changes
// only after success on "Commit" call.
func (db *DB) BeginTransaction(update bool) *TransactionManager {
if update {
db.dropWG.Add(1)
}
return &TransactionManager{
db: db,
update: update,
txupdates: make(map[string]keyval),
}
}
// View accepts transaction function. All calls to received transaction manager will be consistent.
func (db *DB) View(ctx context.Context, fn func(*TransactionManager) error) error {
tx := db.BeginTransaction(false)
defer tx.Discard()
return fn(tx)
}
// Update accepts transaction function and commits changes. All calls to received transaction manager will be
// consistent and written tp disk or an error will be returned.
func (db *DB) Update(ctx context.Context, fn func(*TransactionManager) error) error {
tries := db.txretiries
var tx *TransactionManager
var err error
for {
tx = db.BeginTransaction(true)
err = fn(tx)
if err != nil {
break
}
err = tx.Commit()
if err == nil {
break
}
if err != badger.ErrConflict {
break
}
if tries < 1 {
if db.txretiries > 0 {
err = ErrConflictRetriesOver
} else {
err = ErrConflict
}
break
}
tries--
tx.Discard()
}
tx.Discard()
if err != nil {
inslogger.FromContext(ctx).Errorln("DB Update error:", err)
}
return err
}
// GetBadgerDB return badger.DB instance (for internal usage, like tests)
func (db *DB) GetBadgerDB() *badger.DB {
return db.db
}
// SetMessage persists message to the database
func (db *DB) SetMessage(ctx context.Context, pulseNumber core.PulseNumber, genericMessage core.Message) error {
messageBytes := message.ToBytes(genericMessage)
hw := db.PlatformCryptographyScheme.ReferenceHasher()
_, err := hw.Write(messageBytes)
if err != nil {
return err
}
hw.Sum(nil)
return db.set(
ctx,
prefixkey(scopeIDMessage, bytes.Join([][]byte{pulseNumber.Bytes(), hw.Sum(nil)}, nil)),
messageBytes,
)
}
// SetLocalData saves provided data to storage.
func (db *DB) SetLocalData(ctx context.Context, pulse core.PulseNumber, key []byte, data []byte) error {
return db.set(
ctx,
bytes.Join([][]byte{{scopeIDLocal}, pulse.Bytes(), key}, nil),
data,
)
}
// GetLocalData retrieves data from storage.
func (db *DB) GetLocalData(ctx context.Context, pulse core.PulseNumber, key []byte) ([]byte, error) {
return db.get(
ctx,
bytes.Join([][]byte{{scopeIDLocal}, pulse.Bytes(), key}, nil),
)
}
// IterateLocalData iterates over all record with specified prefix and calls handler with key and value of that record.
//
// The key will be returned without prefix (e.g. the remaining slice) and value will be returned as it was saved.
func (db *DB) IterateLocalData(ctx context.Context, pulse core.PulseNumber, prefix []byte, handler func(k, v []byte) error) error {
fullPrefix := bytes.Join([][]byte{{scopeIDLocal}, pulse.Bytes(), prefix}, nil)
return db.db.View(func(txn *badger.Txn) error {
it := txn.NewIterator(badger.DefaultIteratorOptions)
defer it.Close()
for it.Seek(fullPrefix); it.ValidForPrefix(fullPrefix); it.Next() {
key := it.Item().KeyCopy(nil)[len(fullPrefix):]
value, err := it.Item().ValueCopy(nil)
if err != nil {
return err
}
err = handler(key, value)
if err != nil {
return err
}
}
return nil
})
}
// SetActiveNodes saves active nodes for pulse in memory.
func (db *DB) Se | ulse core.PulseNumber, nodes []core.Node) error {
db.nodeHistoryLock.Lock()
defer db.nodeHistoryLock.Unlock()
if _, ok := db.nodeHistory[pulse]; ok {
return errors.New("node history override is forbidden")
}
db.nodeHistory[pulse] = nodes
return nil
}
// GetActiveNodes return active nodes for specified pulse.
func (db *DB) GetActiveNodes(pulse core.PulseNumber) ([]core.Node, error) {
nodes, ok := db.nodeHistory[pulse]
if !ok {
return nil, errors.New("no nodes for this pulse")
}
return nodes, nil
}
// get wraps matching transaction manager method.
func (db *DB) get(ctx context.Context, key []byte) ([]byte, error) {
tx := db.BeginTransaction(false)
defer tx.Discard()
return tx.get(ctx, key)
}
// set wraps matching transaction manager method.
func (db *DB) set(ctx context.Context, key, value []byte) error {
return db.Update(ctx, func(tx *TransactionManager) error {
return tx.set(ctx, key, value)
})
}
| tActiveNodes(p | identifier_name |
strutil.go | // Copyright (c) 2021 Terminus, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package strutil
import (
"bytes"
"fmt"
"math/rand"
"path/filepath"
"regexp"
"strconv"
"strings"
"time"
"unicode"
)
// Trim 两边裁剪 `s`, 如果不指定 `cutset`, 默认cutset=space
//
// Trim("trim ") => "trim"
//
// Trim(" this ") => "this"
//
// Trim("athisb", "abs") => "this"
func Trim(s string, cutset ...string) string {
if len(cutset) == 0 {
return strings.TrimSpace(s)
}
return strings.Trim(s, cutset[0])
}
// TrimLeft 裁剪 `s` 左边, 如果不指定 `cutset`, 默认cutset=space
//
// TrimLeft("trim ") => "trim "
//
// TrimLeft(" this") => "this"
//
// TrimLeft("athisa", "a") => "thisa"
func TrimLeft(s string, cutset ...string) string {
if len(cutset) == 0 {
return strings.TrimLeftFunc(s, unicode.IsSpace)
}
return strings.TrimLeft(s, cutset[0])
}
// TrimRight 裁剪 `s` 右边,如果不指定 `cutset`, 默认cutset=space
//
// TrimRight("trim ") => "trim"
//
// TrimRight(" this") => " this"
//
// TrimRight("athisa", "a") => "athis"
func TrimRight(s string, cutset ...string) string {
if len(cutset) == 0 {
return strings.TrimRightFunc(s, unicode.IsSpace)
}
return strings.TrimRight(s, cutset[0])
}
// TrimSuffixes 裁剪 `s` 的后缀
//
// TrimSuffixes("test.go", ".go") => "test"
//
// TrimSuffixes("test.go", ".md", ".go", ".sh") => "test"
//
// TrimSuffixes("test.go.tmp", ".go", ".tmp") => "test.go"
func TrimSuffixes(s string, suffixes ...string) string {
originLen := len(s)
for i := range suffixes {
trimmed := strings.TrimSuffix(s, suffixes[i])
if len(trimmed) != originLen {
return trimmed
}
}
return s
}
// TrimPrefixes 裁剪 `s` 的前缀
//
// TrimPrefixes("/tmp/file", "/tmp") => "/file"
//
// TrimPrefixes("/tmp/tmp/file", "/tmp", "/tmp/tmp") => "/tmp/file"
func TrimPrefixes(s string, prefixes ...string) string {
originLen := len(s)
for i := range prefixes {
trimmed := strings.TrimPrefix(s, prefixes[i])
if len(trimmed) != originLen {
return trimmed
}
}
return s
}
// TrimSlice Trim 的 Slice 版本
//
// TrimSlice([]string{"trim ", " trim", " trim "}) => []string{"trim", "trim", "trim"}
func TrimSlice(ss []string, cutset ...string) []string {
r := make([]string, len(ss))
for i := range ss {
r[i] = Trim(ss[i], cutset...)
}
return r
}
// TrimSliceLeft TrimLeft 的 Slice 版本
//
// TrimSliceLeft([]string{"trim ", " trim", " trim "}) => []string{"trim ", "trim", "trim "}
func TrimSliceLeft(ss []string, cutset ...string) []string {
r := make([]string, len(ss))
for i := range ss {
r[i] = TrimLeft(ss[i], cutset...)
}
return r
}
// TrimSliceRight TrimRight 的 Slice 版本
//
// TrimSliceRight([]string{"trim ", " trim", " trim "}) => []string{"trim", " trim", " trim"}
func TrimSliceRight(ss []string, cutset ...string) []string {
r := make([]string, len(ss))
for i := range ss {
r[i] = TrimRight(ss[i], cutset...)
}
return r
}
// TrimSliceSuffixes TrimSuffixes 的 Slice 版本
//
// TrimSliceSuffixes([]string{"test.go", "test.go.tmp"}, ".go", ".tmp") => []string{"test", "test.go"}
func TrimSliceSuffixes(ss []string, suffixes ...string) []string {
r := make([]string, len(ss))
for i := range ss {
r[i] = TrimSuffixes(ss[i], suffixes...)
}
return r
}
// TrimSlicePrefixes TrimPrefixes 的 Slice 版本
//
// TrimSlicePrefixes([]string{"/tmp/file", "/tmp/tmp/file"}, "/tmp", "/tmp/tmp") => []string{"/file", "/tmp/file"}
func TrimSlicePrefixes(ss []string, prefixes ...string) []string {
r := make([]string, len(ss))
for i := range ss {
r[i] = TrimPrefixes(ss[i], prefixes...)
}
return r
}
// HasPrefixes `prefixes` 中是否存在 `s` 的前缀
//
// HasPrefixes("asd", "ddd", "uuu") => false
//
// HasPrefixes("asd", "sd", "as") => true
//
// HasPrefixes("asd", "asd") => true
func HasPrefixes(s string, prefixes ...string) bool {
for i := range prefixes {
if strings.HasPrefix(s, prefixes[i]) {
return true
}
}
return false
}
// HasSuffixes `suffixes` 中是否存在 `s` 的后缀
//
// HasSuffixes("asd", "ddd", "d") => true
//
// HasSuffixes("asd", "sd") => true
//
// HasSuffixes("asd", "iid", "as") => false
func HasSuffixes(s string, suffixes ...string) bool {
for i := range suffixes {
if strings.HasSuffix(s, suffixes[i]) {
return true
}
}
return false
}
var (
collapseWhitespaceRegex = regexp.MustCompile("[ \t\n\r]+")
)
// CollapseWhitespace 转化连续的 space 为 _一个_ 空格
//
// CollapseWhitespace("only one space") => "only one space"
//
// CollapseWhitespace("collapse \n all \t sorts of \r \n \r\n whitespace") => "collapse all sorts of whitespace"
func CollapseWhitespace(s string) string {
return collapseWhitespaceRegex.ReplaceAllString(s, " ")
}
// Center 居中 `s`
//
// Center("a", 5) => " a "
//
// Center("ab", 5) => " ab "
//
// Center("abc", 1) => "abc"
func Center(s string, length int) string {
minus := length - len(s)
if minus <= 0 {
return s
}
right := minus / 2
mod := minus % 2
return strings.Join([]string{Repeat(" ", right+mod), s, Repeat(" ", right)}, "")
}
// Truncate 截断 `s` 到 `length`-3 的长度,末尾增加 "..."
//
// Truncate("it is too long", 6) => "it ..."
//
// Truncate("it is too long", 13) => "it is too ..."
//
// Truncate("but it is not", 16) => "but it is not"
func Truncate(s string, length int) string {
if len(s) > length {
return s[:length-3] + "..."
}
return s
}
// Split 根据 `sep` 来切分 `s`, `omitEmptyOpt`=true 时,忽略结果中的空字符串
//
// Split("a|bc|12||3", "|") => []string{"a", "bc", "12", "", "3"}
//
// Split("a|bc|12||3", "|", true) => []string{"a", "bc", "12", "3"}
//
// Split("a,b,c", ":") => []string{"a,b,c"}
func Split(s string, sep string, omitEmptyOpt ...bool) []string {
var omitEmpty bool
if len(omitEmptyOpt) > 0 && omitEmptyOpt[0] {
omitEmpty = true
}
parts := strings.Split(s, sep)
if !omitEmpty {
return parts
}
result := []string{}
for _, v := range parts {
if v != "" {
result = append(result, v)
}
}
return result
}
// Splits 根据 `seps` 中的每个元素来切分 `s`, `omitEmptyOpt`=true 时,忽略结果中的空字符串
//
// Splits("a,bc,,12,3", []string{",", ","}) => []string{"a", "bc", "12", "", "3"}
//
// Splits("a,bc,,12,3", []string{",", ","}, true) => []string{"a", "bc", "12", "3"}
func Splits(s string, seps []string, omitEmptyOpt ...bool) []string {
if len(seps) == 0 {
return []string{s}
}
sep, seps := seps[0], seps[1:]
parts := Split(s, sep, omitEmptyOpt...)
result := []string{}
for _, part := range parts {
result = append(result, Splits(part, seps, omitEmptyOpt...)...)
}
return result
}
var (
linesRegex = regexp.MustCompile("\r\n|\n|\r")
)
// Lines 将 `s` 按 newline 切分成 string slice, omitEmptyOpt=true 时,忽略结果中的空字符串
//
// Lines("abc\ndef\nghi") => []string{"abc", "def", "ghi"}
//
// Lines("abc\rdef\rghi") => []string{"abc", "def", "ghi"}
//
// Lines("abc\r\ndef\r\nghi\n") => []string{"abc", "def", "ghi", ""}
//
// Lines("abc\r\ndef\r\nghi\n", true) => []string{"abc", "def", "ghi"}
func Lines(s string, omitEmptyOpt ...bool) []string {
lines := linesRegex.Split(s, -1)
if len(omitEmptyOpt) == 0 || !omitEmptyOpt[0] {
return lines
}
r := []string{}
for i := range lines {
if lines[i] != "" {
r = append(r, lines[i])
}
}
return r
}
// Repeat see also strings.Repeat
func Repeat(s string, count int) string {
return strings.Repeat(s, count)
}
// Concat 合并字符串
func Concat(s ...string) string {
return strings.Join(s, "")
}
// Join see also strings.Join,
// omitEmptyOpt = true 时,不拼接 `ss` 中空字符串
func Join(ss []string, sep string, omitEmptyOpt ...bool) string {
if len(omitEmptyOpt) == 0 || !omitEmptyOpt[0] {
return strings.Join(ss, sep)
}
r := []string{}
for i := range ss {
if ss[i] != "" {
r = append(r, ss[i])
}
}
return strings.Join(r, sep)
}
// JoinPath see also filepath.Join
func JoinPath(ss ...string) string {
return filepath.Join(ss...)
}
// ToLower see also strings.ToLower
func ToLower(s string) string {
return strings.ToLower(s)
}
// ToUpper see also strings.ToUpper
func ToUpper(s string) string {
return strings.ToUpper(s)
}
// ToTitle see also strings.ToTitle
func ToTitle(s string) string {
return strings.ToTitle(s)
}
// Title see also strings.Title
func Title(s string) string {
return strings.Title(s)
}
// Contains 检查 `s` 中是否存在 `substrs` 中的某个字符串
//
// Contains("test contains.", "t c", "iii") => true
//
// Contains("test contains.", "t cc", "test ") => false
//
// Contains("test contains.", "iii", "uuu", "ont") => true
func Contains(s string, substrs ...string) bool {
for i := range substrs {
if strings.Contains(s, substrs[i]) {
return true
}
}
return false
}
// Equal 判断 `s` 和 `other` 是否相同,如果 ignorecase = true, 忽略大小写
//
// Equal("aaa", "AAA") => false
//
// Equal("aaa", "AaA", true) => true
func Equal[T ~string, S ~string](s S, t T, ignorecase ...bool) bool {
return string(s) == string(t) || (len(ignorecase) > 0 && ignorecase[0] && strings.EqualFold(string(s), string(t)))
}
// Atoi64 parse string to int64
//
// Atoi64("6") => (6, nil)
func Atoi64(s string) (int64, error) {
return strconv.ParseInt(s, 10, 64)
}
// Map 对 `ss` 中的每个元素执行 `f`, 返回f返回的结果列表
//
// Map([]string{"1", "2", "3"}, func(s string) string {return Concat("X", s)}) => []string{"X1", "X2", "X3"}
//
// Map([]string{"Aa", "bB", "cc"}, ToLower, Title) => []string{"Aa", "Bb", "Cc"}
func Map(ss []string, fs ...func(s string) string) []string {
r := []string{}
r = append(r, ss...)
r2 := []string{}
for _, f := range fs {
for i := range r {
r2 = append(r2, f(r[i]))
}
r = r2[:]
r2 = []string{}
}
return r
}
// DedupSlice 返回不含重复元素的 slice,各元素按第一次出现顺序排序。如果 omitEmptyOpt = true, 忽略空字符串
//
// DedupSlice([]string{"c", "", "b", "a", "", "a", "b", "c", "", "d"}) => []string{"c", "", "b", "a", "d"}
//
// DedupSlice([]string{"c", "", "b", "a", "", "a", "b", "c", "", "d"}, true) => []string{"c", "b", "a", "d"}
func DedupSlice(ss []string, omitEmptyOpt .. | omitEmpty = true
}
result := make([]string, 0, len(ss))
m := make(map[string]struct{}, len(ss))
for _, s := range ss {
if s == "" && omitEmpty {
continue
}
if _, ok := m[s]; ok {
continue
}
result = append(result, s)
m[s] = struct{}{}
}
return result
}
// DedupUint64Slice 返回不含重复元素的 slice,各元素按第一次出现顺序排序。
//
// DedupUint64Slice([]uint64{3, 3, 1, 2, 1, 2, 3, 3, 2, 1, 0, 1, 2}) => []uint64{3, 1, 2, 0}
//
// DedupUint64Slice([]uint64{3, 3, 1, 2, 1, 2, 3, 3, 2, 1, 0, 1, 2}, true) => []uint64{3, 1, 2}
func DedupUint64Slice(ii []uint64, omitZeroOpt ...bool) []uint64 {
var omitZero bool
if len(omitZeroOpt) > 0 && omitZeroOpt[0] {
omitZero = true
}
result := make([]uint64, 0, len(ii))
m := make(map[uint64]struct{}, len(ii))
for _, i := range ii {
if i == 0 && omitZero {
continue
}
if _, ok := m[i]; ok {
continue
}
result = append(result, i)
m[i] = struct{}{}
}
return result
}
// DedupInt64Slice ([]int64{3, 3, 1, 2, 1, 2, 3, 3, 2, 1, 0, 1, 2}, true) => []int64{3, 1, 2}
func DedupInt64Slice(ii []int64, omitZeroOpt ...bool) []int64 {
var omitZero bool
if len(omitZeroOpt) > 0 && omitZeroOpt[0] {
omitZero = true
}
result := make([]int64, 0, len(ii))
m := make(map[int64]struct{}, len(ii))
for _, i := range ii {
if i == 0 && omitZero {
continue
}
if _, ok := m[i]; ok {
continue
}
result = append(result, i)
m[i] = struct{}{}
}
return result
}
// IntersectionUin64Slice 返回两个 uint64 slice 的交集,复杂度 O(m * n),待优化
//
// IntersectionUin64Slice([]uint64{3, 1, 2, 0}, []uint64{0, 3}) => []uint64{3, 0}
//
// IntersectionUin64Slice([]uint64{3, 1, 2, 1, 0}, []uint64{1, 2, 0}) => []uint64{1, 2, 1, 0}
func IntersectionUin64Slice(s1, s2 []uint64) []uint64 {
if len(s1) == 0 {
return nil
}
if len(s2) == 0 {
return s1
}
var result []uint64
for _, i := range s1 {
for _, j := range s2 {
if i == j {
result = append(result, i)
break
}
}
}
return result
}
// IntersectionIn64Slice 返回两个 int64 slice 的交集,复杂度 O(m * log(m))
//
// IntersectionIn64Slice([]int64{3, 1, 2, 0}, []int64{0, 3}) => []int64{3, 0}
//
// IntersectionIn64Slice([]int64{3, 1, 2, 1, 0}, []int64{1, 2, 0}) => []int64{1, 2, 1, 0}
func IntersectionInt64Slice(s1, s2 []int64) []int64 {
m := make(map[int64]bool)
nn := make([]int64, 0)
for _, v := range s1 {
m[v] = true
}
for _, v := range s2 {
if _, ok := m[v]; ok {
nn = append(nn, v)
}
}
return nn
}
// Remove 删除 slice 在 removes 中存在的元素。
//
// RemoveSlice([]string{"a", "b", "c", "a"}, "a") => []string{"b", "c"})
//
// RemoveSlice([]string{"a", "b", "c", "a"}, "b", "c") => []string{"a", "a"})
func RemoveSlice(ss []string, removes ...string) []string {
m := make(map[string]struct{})
for _, rm := range removes {
m[rm] = struct{}{}
}
result := make([]string, 0, len(ss))
for _, s := range ss {
if _, ok := m[s]; ok {
continue
}
result = append(result, s)
}
return result
}
func Exist(slice []string, val string) bool {
for _, v := range slice {
if v == val {
return true
}
}
return false
}
// NormalizeNewlines normalizes \r\n (windows) and \r (mac)
// into \n (unix).
//
// There are 3 ways to represent a newline.
//
// Unix: using single character LF, which is byte 10 (0x0a), represented as “” in Go string literal.
// Windows: using 2 characters: CR LF, which is bytes 13 10 (0x0d, 0x0a), represented as “” in Go string literal.
// Mac OS: using 1 character CR (byte 13 (0x0d)), represented as “” in Go string literal. This is the least popular.
func NormalizeNewlines(d []byte) []byte {
// replace CR LF \r\n (windows) with LF \n (unix)
d = bytes.Replace(d, []byte{13, 10}, []byte{10}, -1)
// replace CF \r (mac) with LF \n (unix)
d = bytes.Replace(d, []byte{13}, []byte{10}, -1)
return d
}
func SplitIfEmptyString(s, sep string) []string {
if s == "" {
return []string{}
}
return strings.SplitN(s, sep, -1)
}
var fontKinds = [][]int{{10, 48}, {26, 97}, {26, 65}}
// RandStr 获取随机字符串
func RandStr(size int) string {
result := make([]byte, size)
rand.Seed(time.Now().UnixNano())
for i := 0; i < size; i++ {
ikind := rand.Intn(3)
scope, base := fontKinds[ikind][0], fontKinds[ikind][1]
result[i] = uint8(base + rand.Intn(scope))
}
return string(result)
}
// ParseVersion 序列化版本 "1.05.1" --> "1.5.1",
func ParseVersion(version string) string {
// ISO/IEC 14651:2011
const maxByte = 1<<8 - 1
vo := make([]byte, 0, len(version)+8)
j := -1
for i := 0; i < len(version); i++ {
b := version[i]
if '0' > b || b > '9' {
vo = append(vo, b)
j = -1
continue
}
if j == -1 {
vo = append(vo, 0x00)
j = len(vo) - 1
}
if vo[j] == 1 && vo[j+1] == '0' {
vo[j+1] = b
continue
}
if vo[j]+1 > maxByte {
panic("VersionOrdinal: invalid version")
}
vo = append(vo, b)
vo[j]++
}
return string(vo)
}
// FlatErrors 将 errors 打平为一个 error
func FlatErrors(errs []error, sep string) error {
var errMsgs []string
for _, err := range errs {
errMsgs = append(errMsgs, err.Error())
}
return fmt.Errorf("%s", Join(errMsgs, sep, true))
}
func ContainsOrEmpty(source, target string) bool {
return target == "" || strings.Contains(source, target)
}
func FirstNoneEmpty(strs ...string) string {
for _, str := range strs {
if len(str) > 0 {
return str
}
}
return ""
}
func HandleQuotes(data []byte, quotes [2]byte, handler func([]byte)) {
var heap []byte
var buf []byte
var left, right = quotes[0], quotes[1]
for _, c := range data {
if c == left {
buf = append(buf, c)
heap = append(heap, c)
continue
}
if len(heap) == 0 {
if len(buf) != 0 {
handler(buf)
buf = nil
}
continue
}
buf = append(buf, c)
if c == right && heap[len(heap)-1] == left {
heap = heap[:len(heap)-1]
continue
}
}
}
| .bool) []string {
var omitEmpty bool
if len(omitEmptyOpt) > 0 && omitEmptyOpt[0] {
| conditional_block |
strutil.go | // Copyright (c) 2021 Terminus, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package strutil
import (
"bytes"
"fmt"
"math/rand"
"path/filepath"
"regexp"
"strconv"
"strings"
"time"
"unicode"
)
// Trim 两边裁剪 `s`, 如果不指定 `cutset`, 默认cutset=space
//
// Trim("trim ") => "trim"
//
// Trim(" this ") => "this"
//
// Trim("athisb", "abs") => "this"
func Trim(s string, cutset ...string) string {
if len(cutset) == 0 {
return strings.TrimSpace(s)
}
return strings.Trim(s, cutset[0])
}
// TrimLeft 裁剪 `s` 左边, 如果不指定 `cutset`, 默认cutset=space
//
// TrimLeft("trim ") => "trim "
//
// TrimLeft(" this") => "this"
//
// TrimLeft("athisa", "a") => "thisa"
func TrimLeft(s string, cutset ...string) string {
if len(cutset) == 0 {
return strings.TrimLeftFunc(s, unicode.IsSpace)
}
return strings.TrimLeft(s, cutset[0])
}
// TrimRight 裁剪 `s` 右边,如果不指定 `cutset`, 默认cutset=space
//
// TrimRight("trim ") => "trim"
//
// TrimRight(" this") => " this"
//
// TrimRight("athisa", "a") => "athis"
func TrimRight(s string, cutset ...string) string {
if len(cutset) == 0 {
return strings.TrimRightFunc(s, unicode.IsSpace)
}
return strings.TrimRight(s, cutset[0])
}
// TrimSuffixes 裁剪 `s` 的后缀
//
// TrimSuffixes("test.go", ".go") => "test"
//
// TrimSuffixes("test.go", ".md", ".go", ".sh") => "test"
//
// TrimSuffixes("test.go.tmp", ".go", ".tmp") => "test.go"
func TrimSuffixes(s string, suffixes ...string) string {
originLen := len(s)
for i := range suffixes {
trimmed := strings.TrimSuffix(s, suffixes[i])
if len(trimmed) != originLen {
return trimmed
}
}
return s
}
// TrimPrefixes 裁剪 `s` 的前缀
//
// TrimPrefixes("/tmp/file", "/tmp") => "/file"
//
// TrimPrefixes("/tmp/tmp/file", "/tmp", "/tmp/tmp") => "/tmp/file"
func TrimPrefixes(s string, prefixes ...string) string {
originLen := len(s)
for i := range prefixes {
trimmed := strings.TrimPrefix(s, prefixes[i])
if len(trimmed) != originLen {
return trimmed
}
}
return s
}
// TrimSlice Trim 的 Slice 版本
//
// TrimSlice([]string{"trim ", " trim", " trim "}) => []string{"trim", "trim", "trim"}
func TrimSlice(ss []string, cutset ...string) []string {
r := make([]string, len(ss))
for i := range ss {
r[i] = Trim(ss[i], cutset...)
}
return r
}
// TrimSliceLeft TrimLeft 的 Slice 版本
//
// TrimSliceLeft([]string{"trim ", " trim", " trim "}) => []string{"trim ", "trim", "trim "}
func TrimSliceLeft(ss []string, cutset ...string) []string {
r := make([]string, len(ss))
for i := range ss {
r[i] = TrimLeft(ss[i], cutset...)
}
return r
}
// TrimSliceRight TrimRight 的 Slice 版本
//
// TrimSliceRight([]string{"trim ", " trim", " trim "}) => []string{"trim", " trim", " trim"}
func TrimSliceRight(ss []string, cutset ...string) []string {
r := make([]string, len(ss))
for i := range ss {
r[i] = TrimRight(ss[i], cutset...)
}
return r
}
// TrimSliceSuffixes TrimSuffixes 的 Slice 版本
//
// TrimSliceSuffixes([]string{"test.go", "test.go.tmp"}, ".go", ".tmp") => []string{"test", "test.go"}
func TrimSliceSuffixes(ss []string, suffixes ...string) []string {
r := make([]string, len(ss))
for i := range ss {
r[i] = TrimSuffixes(ss[i], suffixes...)
}
return r
}
// TrimSlicePrefixes TrimPrefixes 的 Slice 版本
//
// TrimSlicePrefixes([]string{"/tmp/file", "/tmp/tmp/file"}, "/tmp", "/tmp/tmp") => []string{"/file", "/tmp/file"}
func TrimSlicePrefixes(ss []string, prefixes ...string) []string {
r := make([]string, len(ss))
for i := range ss {
r[i] = TrimPrefixes(ss[i], prefixes...)
}
return r
}
// HasPrefixes `prefixes` 中是否存在 `s` 的前缀
//
// HasPrefixes("asd", "ddd", "uuu") => false
//
// HasPrefixes("asd", "sd", "as") => true
//
// HasPrefixes("asd", "asd") => true
func HasPrefixes(s string, prefixes ...string) bool {
for i := range prefixes {
if strings.HasPrefix(s, prefixes[i]) {
return true
}
}
return false
}
// HasSuffixes `suffixes` 中是否存在 `s` 的后缀
//
// HasSuffixes("asd", "ddd", "d") => true
//
// HasSuffixes("asd", "sd") => true
//
// HasSuffixes("asd", "iid", "as") => false
func HasSuffixes(s string, suffixes ...string) bool {
for i := range suffixes {
if strings.HasSuffix(s, suffixes[i]) {
return true
}
}
return false
}
var (
collapseWhitespaceRegex = regexp.MustCompile("[ \t\n\r]+")
)
// CollapseWhitespace 转化连续的 space 为 _一个_ 空格
//
// CollapseWhitespace("only one space") => "only one space"
//
// CollapseWhitespace("collapse \n all \t sorts of \r \n \r\n whitespace") => "collapse all sorts of whitespace"
func CollapseWhitespace(s string) string {
return collapseWhitespaceRegex.ReplaceAllString(s, " ")
}
// Center 居中 `s`
//
// Center("a", 5) => " a "
//
// Center("ab", 5) => " ab "
//
// Center("abc", 1) => "abc"
func Center(s string, length int) string {
minus := length - len(s)
if minus <= 0 {
return s
}
right := minus / 2
mod := minus % 2
return strings.Join([]string{Repeat(" ", right+mod), s, Repeat(" ", right)}, "")
}
// Truncate 截断 `s` 到 `length`-3 的长度,末尾增加 "..."
//
// Truncate("it is too long", 6) => "it ..."
//
// Truncate("it is too long", 13) => "it is too ..."
//
// Truncate("but it is not", 16) => "but it is not"
func Truncate(s string, length int) string {
if len(s) > length {
return s[:length-3] + "..."
}
return s
}
// Split 根据 `sep` 来切分 `s`, `omitEmptyOpt`=true 时,忽略结果中的空字符串
//
// Split("a|bc|12||3", "|") => []string{"a", "bc", "12", "", "3"}
//
// Split("a|bc|12||3", "|", true) => []string{"a", "bc", "12", "3"}
//
// Split("a,b,c", ":") => []string{"a,b,c"}
func Split(s string, sep string, omitEmptyOpt ...bool) []string {
var omitEmpty bool
if len(omitEmptyOpt) > 0 && omitEmptyOpt[0] {
omitEmpty = true
}
parts := strings.Split(s, sep)
if !omitEmpty {
return parts
}
result := []string{}
for _, v := range parts {
if v != "" {
result = append(result, v)
}
}
return result
}
// Splits 根据 `seps` 中的每个元素来切分 `s`, `omitEmptyOpt`=true 时,忽略结果中的空字符串
//
// Splits("a,bc,,12,3", []string{",", ","}) => []string{"a", "bc", "12", "", "3"}
//
// Splits("a,bc,,12,3", []string{",", ","}, true) => []string{"a", "bc", "12", "3"}
func Splits(s string, seps []string, omitEmptyOpt ...bool) []string {
if len(seps) == 0 {
return []string{s}
}
sep, seps := seps[0], seps[1:]
parts := Split(s, sep, omitEmptyOpt...)
result := []string{}
for _, part := range parts {
result = append(result, Splits(part, seps, omitEmptyOpt...)...)
}
return result
}
var (
linesRegex = regexp.MustCompile("\r\n|\n|\r")
)
// Lines 将 `s` 按 newline 切分成 string slice, omitEmptyOpt=true 时,忽略结果中的空字符串
//
// Lines("abc\ndef\nghi") => []string{"abc", "def", "ghi"}
//
// Lines("abc\rdef\rghi") => []string{"abc", "def", "ghi"}
//
// Lines("abc\r\ndef\r\nghi\n") => []string{"abc", "def", "ghi", ""}
//
// Lines("abc\r\ndef\r\nghi\n", true) => []string{"abc", "def", "ghi"}
func Lines(s string, omitEmptyOpt ...bool) []string {
lines := linesRegex.Split(s, -1)
if len(omitEmptyOpt) == 0 || !omitEmptyOpt[0] {
return lines
}
r := []string{}
for i := range lines {
if lines[i] != "" {
r = append(r, lines[i])
}
}
return r
}
// Repeat see also strings.Repeat
func Repeat(s string, count int) string {
return strings.Repeat(s, cou | ool) string {
if len(omitEmptyOpt) == 0 || !omitEmptyOpt[0] {
return strings.Join(ss, sep)
}
r := []string{}
for i := range ss {
if ss[i] != "" {
r = append(r, ss[i])
}
}
return strings.Join(r, sep)
}
// JoinPath see also filepath.Join
func JoinPath(ss ...string) string {
return filepath.Join(ss...)
}
// ToLower see also strings.ToLower
func ToLower(s string) string {
return strings.ToLower(s)
}
// ToUpper see also strings.ToUpper
func ToUpper(s string) string {
return strings.ToUpper(s)
}
// ToTitle see also strings.ToTitle
func ToTitle(s string) string {
return strings.ToTitle(s)
}
// Title see also strings.Title
func Title(s string) string {
return strings.Title(s)
}
// Contains 检查 `s` 中是否存在 `substrs` 中的某个字符串
//
// Contains("test contains.", "t c", "iii") => true
//
// Contains("test contains.", "t cc", "test ") => false
//
// Contains("test contains.", "iii", "uuu", "ont") => true
func Contains(s string, substrs ...string) bool {
for i := range substrs {
if strings.Contains(s, substrs[i]) {
return true
}
}
return false
}
// Equal 判断 `s` 和 `other` 是否相同,如果 ignorecase = true, 忽略大小写
//
// Equal("aaa", "AAA") => false
//
// Equal("aaa", "AaA", true) => true
func Equal[T ~string, S ~string](s S, t T, ignorecase ...bool) bool {
return string(s) == string(t) || (len(ignorecase) > 0 && ignorecase[0] && strings.EqualFold(string(s), string(t)))
}
// Atoi64 parse string to int64
//
// Atoi64("6") => (6, nil)
func Atoi64(s string) (int64, error) {
return strconv.ParseInt(s, 10, 64)
}
// Map 对 `ss` 中的每个元素执行 `f`, 返回f返回的结果列表
//
// Map([]string{"1", "2", "3"}, func(s string) string {return Concat("X", s)}) => []string{"X1", "X2", "X3"}
//
// Map([]string{"Aa", "bB", "cc"}, ToLower, Title) => []string{"Aa", "Bb", "Cc"}
func Map(ss []string, fs ...func(s string) string) []string {
r := []string{}
r = append(r, ss...)
r2 := []string{}
for _, f := range fs {
for i := range r {
r2 = append(r2, f(r[i]))
}
r = r2[:]
r2 = []string{}
}
return r
}
// DedupSlice 返回不含重复元素的 slice,各元素按第一次出现顺序排序。如果 omitEmptyOpt = true, 忽略空字符串
//
// DedupSlice([]string{"c", "", "b", "a", "", "a", "b", "c", "", "d"}) => []string{"c", "", "b", "a", "d"}
//
// DedupSlice([]string{"c", "", "b", "a", "", "a", "b", "c", "", "d"}, true) => []string{"c", "b", "a", "d"}
func DedupSlice(ss []string, omitEmptyOpt ...bool) []string {
var omitEmpty bool
if len(omitEmptyOpt) > 0 && omitEmptyOpt[0] {
omitEmpty = true
}
result := make([]string, 0, len(ss))
m := make(map[string]struct{}, len(ss))
for _, s := range ss {
if s == "" && omitEmpty {
continue
}
if _, ok := m[s]; ok {
continue
}
result = append(result, s)
m[s] = struct{}{}
}
return result
}
// DedupUint64Slice 返回不含重复元素的 slice,各元素按第一次出现顺序排序。
//
// DedupUint64Slice([]uint64{3, 3, 1, 2, 1, 2, 3, 3, 2, 1, 0, 1, 2}) => []uint64{3, 1, 2, 0}
//
// DedupUint64Slice([]uint64{3, 3, 1, 2, 1, 2, 3, 3, 2, 1, 0, 1, 2}, true) => []uint64{3, 1, 2}
func DedupUint64Slice(ii []uint64, omitZeroOpt ...bool) []uint64 {
var omitZero bool
if len(omitZeroOpt) > 0 && omitZeroOpt[0] {
omitZero = true
}
result := make([]uint64, 0, len(ii))
m := make(map[uint64]struct{}, len(ii))
for _, i := range ii {
if i == 0 && omitZero {
continue
}
if _, ok := m[i]; ok {
continue
}
result = append(result, i)
m[i] = struct{}{}
}
return result
}
// DedupInt64Slice ([]int64{3, 3, 1, 2, 1, 2, 3, 3, 2, 1, 0, 1, 2}, true) => []int64{3, 1, 2}
func DedupInt64Slice(ii []int64, omitZeroOpt ...bool) []int64 {
var omitZero bool
if len(omitZeroOpt) > 0 && omitZeroOpt[0] {
omitZero = true
}
result := make([]int64, 0, len(ii))
m := make(map[int64]struct{}, len(ii))
for _, i := range ii {
if i == 0 && omitZero {
continue
}
if _, ok := m[i]; ok {
continue
}
result = append(result, i)
m[i] = struct{}{}
}
return result
}
// IntersectionUin64Slice 返回两个 uint64 slice 的交集,复杂度 O(m * n),待优化
//
// IntersectionUin64Slice([]uint64{3, 1, 2, 0}, []uint64{0, 3}) => []uint64{3, 0}
//
// IntersectionUin64Slice([]uint64{3, 1, 2, 1, 0}, []uint64{1, 2, 0}) => []uint64{1, 2, 1, 0}
func IntersectionUin64Slice(s1, s2 []uint64) []uint64 {
if len(s1) == 0 {
return nil
}
if len(s2) == 0 {
return s1
}
var result []uint64
for _, i := range s1 {
for _, j := range s2 {
if i == j {
result = append(result, i)
break
}
}
}
return result
}
// IntersectionIn64Slice 返回两个 int64 slice 的交集,复杂度 O(m * log(m))
//
// IntersectionIn64Slice([]int64{3, 1, 2, 0}, []int64{0, 3}) => []int64{3, 0}
//
// IntersectionIn64Slice([]int64{3, 1, 2, 1, 0}, []int64{1, 2, 0}) => []int64{1, 2, 1, 0}
func IntersectionInt64Slice(s1, s2 []int64) []int64 {
m := make(map[int64]bool)
nn := make([]int64, 0)
for _, v := range s1 {
m[v] = true
}
for _, v := range s2 {
if _, ok := m[v]; ok {
nn = append(nn, v)
}
}
return nn
}
// Remove 删除 slice 在 removes 中存在的元素。
//
// RemoveSlice([]string{"a", "b", "c", "a"}, "a") => []string{"b", "c"})
//
// RemoveSlice([]string{"a", "b", "c", "a"}, "b", "c") => []string{"a", "a"})
func RemoveSlice(ss []string, removes ...string) []string {
m := make(map[string]struct{})
for _, rm := range removes {
m[rm] = struct{}{}
}
result := make([]string, 0, len(ss))
for _, s := range ss {
if _, ok := m[s]; ok {
continue
}
result = append(result, s)
}
return result
}
func Exist(slice []string, val string) bool {
for _, v := range slice {
if v == val {
return true
}
}
return false
}
// NormalizeNewlines normalizes \r\n (windows) and \r (mac)
// into \n (unix).
//
// There are 3 ways to represent a newline.
//
// Unix: using single character LF, which is byte 10 (0x0a), represented as “” in Go string literal.
// Windows: using 2 characters: CR LF, which is bytes 13 10 (0x0d, 0x0a), represented as “” in Go string literal.
// Mac OS: using 1 character CR (byte 13 (0x0d)), represented as “” in Go string literal. This is the least popular.
func NormalizeNewlines(d []byte) []byte {
// replace CR LF \r\n (windows) with LF \n (unix)
d = bytes.Replace(d, []byte{13, 10}, []byte{10}, -1)
// replace CF \r (mac) with LF \n (unix)
d = bytes.Replace(d, []byte{13}, []byte{10}, -1)
return d
}
func SplitIfEmptyString(s, sep string) []string {
if s == "" {
return []string{}
}
return strings.SplitN(s, sep, -1)
}
var fontKinds = [][]int{{10, 48}, {26, 97}, {26, 65}}
// RandStr 获取随机字符串
func RandStr(size int) string {
result := make([]byte, size)
rand.Seed(time.Now().UnixNano())
for i := 0; i < size; i++ {
ikind := rand.Intn(3)
scope, base := fontKinds[ikind][0], fontKinds[ikind][1]
result[i] = uint8(base + rand.Intn(scope))
}
return string(result)
}
// ParseVersion 序列化版本 "1.05.1" --> "1.5.1",
func ParseVersion(version string) string {
// ISO/IEC 14651:2011
const maxByte = 1<<8 - 1
vo := make([]byte, 0, len(version)+8)
j := -1
for i := 0; i < len(version); i++ {
b := version[i]
if '0' > b || b > '9' {
vo = append(vo, b)
j = -1
continue
}
if j == -1 {
vo = append(vo, 0x00)
j = len(vo) - 1
}
if vo[j] == 1 && vo[j+1] == '0' {
vo[j+1] = b
continue
}
if vo[j]+1 > maxByte {
panic("VersionOrdinal: invalid version")
}
vo = append(vo, b)
vo[j]++
}
return string(vo)
}
// FlatErrors 将 errors 打平为一个 error
func FlatErrors(errs []error, sep string) error {
var errMsgs []string
for _, err := range errs {
errMsgs = append(errMsgs, err.Error())
}
return fmt.Errorf("%s", Join(errMsgs, sep, true))
}
func ContainsOrEmpty(source, target string) bool {
return target == "" || strings.Contains(source, target)
}
func FirstNoneEmpty(strs ...string) string {
for _, str := range strs {
if len(str) > 0 {
return str
}
}
return ""
}
func HandleQuotes(data []byte, quotes [2]byte, handler func([]byte)) {
var heap []byte
var buf []byte
var left, right = quotes[0], quotes[1]
for _, c := range data {
if c == left {
buf = append(buf, c)
heap = append(heap, c)
continue
}
if len(heap) == 0 {
if len(buf) != 0 {
handler(buf)
buf = nil
}
continue
}
buf = append(buf, c)
if c == right && heap[len(heap)-1] == left {
heap = heap[:len(heap)-1]
continue
}
}
}
| nt)
}
// Concat 合并字符串
func Concat(s ...string) string {
return strings.Join(s, "")
}
// Join see also strings.Join,
// omitEmptyOpt = true 时,不拼接 `ss` 中空字符串
func Join(ss []string, sep string, omitEmptyOpt ...b | identifier_body |
strutil.go | // Copyright (c) 2021 Terminus, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package strutil
import (
"bytes"
"fmt"
"math/rand"
"path/filepath"
"regexp"
"strconv"
"strings"
"time"
"unicode"
)
// Trim 两边裁剪 `s`, 如果不指定 `cutset`, 默认cutset=space
//
// Trim("trim ") => "trim"
//
// Trim(" this ") => "this"
//
// Trim("athisb", "abs") => "this"
func Trim(s string, cutset ...string) string {
if len(cutset) == 0 {
return strings.TrimSpace(s)
}
return strings.Trim(s, cutset[0])
}
// TrimLeft 裁剪 `s` 左边, 如果不指定 `cutset`, 默认cutset=space
//
// TrimLeft("trim ") => "trim "
//
// TrimLeft(" this") => "this"
//
// TrimLeft("athisa", "a") => "thisa"
func TrimLeft(s string, cutset ...string) string | n(cutset) == 0 {
return strings.TrimLeftFunc(s, unicode.IsSpace)
}
return strings.TrimLeft(s, cutset[0])
}
// TrimRight 裁剪 `s` 右边,如果不指定 `cutset`, 默认cutset=space
//
// TrimRight("trim ") => "trim"
//
// TrimRight(" this") => " this"
//
// TrimRight("athisa", "a") => "athis"
func TrimRight(s string, cutset ...string) string {
if len(cutset) == 0 {
return strings.TrimRightFunc(s, unicode.IsSpace)
}
return strings.TrimRight(s, cutset[0])
}
// TrimSuffixes 裁剪 `s` 的后缀
//
// TrimSuffixes("test.go", ".go") => "test"
//
// TrimSuffixes("test.go", ".md", ".go", ".sh") => "test"
//
// TrimSuffixes("test.go.tmp", ".go", ".tmp") => "test.go"
func TrimSuffixes(s string, suffixes ...string) string {
originLen := len(s)
for i := range suffixes {
trimmed := strings.TrimSuffix(s, suffixes[i])
if len(trimmed) != originLen {
return trimmed
}
}
return s
}
// TrimPrefixes 裁剪 `s` 的前缀
//
// TrimPrefixes("/tmp/file", "/tmp") => "/file"
//
// TrimPrefixes("/tmp/tmp/file", "/tmp", "/tmp/tmp") => "/tmp/file"
func TrimPrefixes(s string, prefixes ...string) string {
originLen := len(s)
for i := range prefixes {
trimmed := strings.TrimPrefix(s, prefixes[i])
if len(trimmed) != originLen {
return trimmed
}
}
return s
}
// TrimSlice Trim 的 Slice 版本
//
// TrimSlice([]string{"trim ", " trim", " trim "}) => []string{"trim", "trim", "trim"}
func TrimSlice(ss []string, cutset ...string) []string {
r := make([]string, len(ss))
for i := range ss {
r[i] = Trim(ss[i], cutset...)
}
return r
}
// TrimSliceLeft TrimLeft 的 Slice 版本
//
// TrimSliceLeft([]string{"trim ", " trim", " trim "}) => []string{"trim ", "trim", "trim "}
func TrimSliceLeft(ss []string, cutset ...string) []string {
r := make([]string, len(ss))
for i := range ss {
r[i] = TrimLeft(ss[i], cutset...)
}
return r
}
// TrimSliceRight TrimRight 的 Slice 版本
//
// TrimSliceRight([]string{"trim ", " trim", " trim "}) => []string{"trim", " trim", " trim"}
func TrimSliceRight(ss []string, cutset ...string) []string {
r := make([]string, len(ss))
for i := range ss {
r[i] = TrimRight(ss[i], cutset...)
}
return r
}
// TrimSliceSuffixes TrimSuffixes 的 Slice 版本
//
// TrimSliceSuffixes([]string{"test.go", "test.go.tmp"}, ".go", ".tmp") => []string{"test", "test.go"}
func TrimSliceSuffixes(ss []string, suffixes ...string) []string {
r := make([]string, len(ss))
for i := range ss {
r[i] = TrimSuffixes(ss[i], suffixes...)
}
return r
}
// TrimSlicePrefixes TrimPrefixes 的 Slice 版本
//
// TrimSlicePrefixes([]string{"/tmp/file", "/tmp/tmp/file"}, "/tmp", "/tmp/tmp") => []string{"/file", "/tmp/file"}
func TrimSlicePrefixes(ss []string, prefixes ...string) []string {
r := make([]string, len(ss))
for i := range ss {
r[i] = TrimPrefixes(ss[i], prefixes...)
}
return r
}
// HasPrefixes `prefixes` 中是否存在 `s` 的前缀
//
// HasPrefixes("asd", "ddd", "uuu") => false
//
// HasPrefixes("asd", "sd", "as") => true
//
// HasPrefixes("asd", "asd") => true
func HasPrefixes(s string, prefixes ...string) bool {
for i := range prefixes {
if strings.HasPrefix(s, prefixes[i]) {
return true
}
}
return false
}
// HasSuffixes `suffixes` 中是否存在 `s` 的后缀
//
// HasSuffixes("asd", "ddd", "d") => true
//
// HasSuffixes("asd", "sd") => true
//
// HasSuffixes("asd", "iid", "as") => false
func HasSuffixes(s string, suffixes ...string) bool {
for i := range suffixes {
if strings.HasSuffix(s, suffixes[i]) {
return true
}
}
return false
}
var (
collapseWhitespaceRegex = regexp.MustCompile("[ \t\n\r]+")
)
// CollapseWhitespace 转化连续的 space 为 _一个_ 空格
//
// CollapseWhitespace("only one space") => "only one space"
//
// CollapseWhitespace("collapse \n all \t sorts of \r \n \r\n whitespace") => "collapse all sorts of whitespace"
func CollapseWhitespace(s string) string {
return collapseWhitespaceRegex.ReplaceAllString(s, " ")
}
// Center 居中 `s`
//
// Center("a", 5) => " a "
//
// Center("ab", 5) => " ab "
//
// Center("abc", 1) => "abc"
func Center(s string, length int) string {
minus := length - len(s)
if minus <= 0 {
return s
}
right := minus / 2
mod := minus % 2
return strings.Join([]string{Repeat(" ", right+mod), s, Repeat(" ", right)}, "")
}
// Truncate 截断 `s` 到 `length`-3 的长度,末尾增加 "..."
//
// Truncate("it is too long", 6) => "it ..."
//
// Truncate("it is too long", 13) => "it is too ..."
//
// Truncate("but it is not", 16) => "but it is not"
func Truncate(s string, length int) string {
if len(s) > length {
return s[:length-3] + "..."
}
return s
}
// Split 根据 `sep` 来切分 `s`, `omitEmptyOpt`=true 时,忽略结果中的空字符串
//
// Split("a|bc|12||3", "|") => []string{"a", "bc", "12", "", "3"}
//
// Split("a|bc|12||3", "|", true) => []string{"a", "bc", "12", "3"}
//
// Split("a,b,c", ":") => []string{"a,b,c"}
func Split(s string, sep string, omitEmptyOpt ...bool) []string {
var omitEmpty bool
if len(omitEmptyOpt) > 0 && omitEmptyOpt[0] {
omitEmpty = true
}
parts := strings.Split(s, sep)
if !omitEmpty {
return parts
}
result := []string{}
for _, v := range parts {
if v != "" {
result = append(result, v)
}
}
return result
}
// Splits 根据 `seps` 中的每个元素来切分 `s`, `omitEmptyOpt`=true 时,忽略结果中的空字符串
//
// Splits("a,bc,,12,3", []string{",", ","}) => []string{"a", "bc", "12", "", "3"}
//
// Splits("a,bc,,12,3", []string{",", ","}, true) => []string{"a", "bc", "12", "3"}
func Splits(s string, seps []string, omitEmptyOpt ...bool) []string {
if len(seps) == 0 {
return []string{s}
}
sep, seps := seps[0], seps[1:]
parts := Split(s, sep, omitEmptyOpt...)
result := []string{}
for _, part := range parts {
result = append(result, Splits(part, seps, omitEmptyOpt...)...)
}
return result
}
var (
linesRegex = regexp.MustCompile("\r\n|\n|\r")
)
// Lines 将 `s` 按 newline 切分成 string slice, omitEmptyOpt=true 时,忽略结果中的空字符串
//
// Lines("abc\ndef\nghi") => []string{"abc", "def", "ghi"}
//
// Lines("abc\rdef\rghi") => []string{"abc", "def", "ghi"}
//
// Lines("abc\r\ndef\r\nghi\n") => []string{"abc", "def", "ghi", ""}
//
// Lines("abc\r\ndef\r\nghi\n", true) => []string{"abc", "def", "ghi"}
func Lines(s string, omitEmptyOpt ...bool) []string {
lines := linesRegex.Split(s, -1)
if len(omitEmptyOpt) == 0 || !omitEmptyOpt[0] {
return lines
}
r := []string{}
for i := range lines {
if lines[i] != "" {
r = append(r, lines[i])
}
}
return r
}
// Repeat see also strings.Repeat
func Repeat(s string, count int) string {
return strings.Repeat(s, count)
}
// Concat 合并字符串
func Concat(s ...string) string {
return strings.Join(s, "")
}
// Join see also strings.Join,
// omitEmptyOpt = true 时,不拼接 `ss` 中空字符串
func Join(ss []string, sep string, omitEmptyOpt ...bool) string {
if len(omitEmptyOpt) == 0 || !omitEmptyOpt[0] {
return strings.Join(ss, sep)
}
r := []string{}
for i := range ss {
if ss[i] != "" {
r = append(r, ss[i])
}
}
return strings.Join(r, sep)
}
// JoinPath see also filepath.Join
func JoinPath(ss ...string) string {
return filepath.Join(ss...)
}
// ToLower see also strings.ToLower
func ToLower(s string) string {
return strings.ToLower(s)
}
// ToUpper see also strings.ToUpper
func ToUpper(s string) string {
return strings.ToUpper(s)
}
// ToTitle see also strings.ToTitle
func ToTitle(s string) string {
return strings.ToTitle(s)
}
// Title see also strings.Title
func Title(s string) string {
return strings.Title(s)
}
// Contains 检查 `s` 中是否存在 `substrs` 中的某个字符串
//
// Contains("test contains.", "t c", "iii") => true
//
// Contains("test contains.", "t cc", "test ") => false
//
// Contains("test contains.", "iii", "uuu", "ont") => true
func Contains(s string, substrs ...string) bool {
for i := range substrs {
if strings.Contains(s, substrs[i]) {
return true
}
}
return false
}
// Equal 判断 `s` 和 `other` 是否相同,如果 ignorecase = true, 忽略大小写
//
// Equal("aaa", "AAA") => false
//
// Equal("aaa", "AaA", true) => true
func Equal[T ~string, S ~string](s S, t T, ignorecase ...bool) bool {
return string(s) == string(t) || (len(ignorecase) > 0 && ignorecase[0] && strings.EqualFold(string(s), string(t)))
}
// Atoi64 parse string to int64
//
// Atoi64("6") => (6, nil)
func Atoi64(s string) (int64, error) {
return strconv.ParseInt(s, 10, 64)
}
// Map 对 `ss` 中的每个元素执行 `f`, 返回f返回的结果列表
//
// Map([]string{"1", "2", "3"}, func(s string) string {return Concat("X", s)}) => []string{"X1", "X2", "X3"}
//
// Map([]string{"Aa", "bB", "cc"}, ToLower, Title) => []string{"Aa", "Bb", "Cc"}
func Map(ss []string, fs ...func(s string) string) []string {
r := []string{}
r = append(r, ss...)
r2 := []string{}
for _, f := range fs {
for i := range r {
r2 = append(r2, f(r[i]))
}
r = r2[:]
r2 = []string{}
}
return r
}
// DedupSlice 返回不含重复元素的 slice,各元素按第一次出现顺序排序。如果 omitEmptyOpt = true, 忽略空字符串
//
// DedupSlice([]string{"c", "", "b", "a", "", "a", "b", "c", "", "d"}) => []string{"c", "", "b", "a", "d"}
//
// DedupSlice([]string{"c", "", "b", "a", "", "a", "b", "c", "", "d"}, true) => []string{"c", "b", "a", "d"}
func DedupSlice(ss []string, omitEmptyOpt ...bool) []string {
var omitEmpty bool
if len(omitEmptyOpt) > 0 && omitEmptyOpt[0] {
omitEmpty = true
}
result := make([]string, 0, len(ss))
m := make(map[string]struct{}, len(ss))
for _, s := range ss {
if s == "" && omitEmpty {
continue
}
if _, ok := m[s]; ok {
continue
}
result = append(result, s)
m[s] = struct{}{}
}
return result
}
// DedupUint64Slice 返回不含重复元素的 slice,各元素按第一次出现顺序排序。
//
// DedupUint64Slice([]uint64{3, 3, 1, 2, 1, 2, 3, 3, 2, 1, 0, 1, 2}) => []uint64{3, 1, 2, 0}
//
// DedupUint64Slice([]uint64{3, 3, 1, 2, 1, 2, 3, 3, 2, 1, 0, 1, 2}, true) => []uint64{3, 1, 2}
func DedupUint64Slice(ii []uint64, omitZeroOpt ...bool) []uint64 {
var omitZero bool
if len(omitZeroOpt) > 0 && omitZeroOpt[0] {
omitZero = true
}
result := make([]uint64, 0, len(ii))
m := make(map[uint64]struct{}, len(ii))
for _, i := range ii {
if i == 0 && omitZero {
continue
}
if _, ok := m[i]; ok {
continue
}
result = append(result, i)
m[i] = struct{}{}
}
return result
}
// DedupInt64Slice ([]int64{3, 3, 1, 2, 1, 2, 3, 3, 2, 1, 0, 1, 2}, true) => []int64{3, 1, 2}
func DedupInt64Slice(ii []int64, omitZeroOpt ...bool) []int64 {
var omitZero bool
if len(omitZeroOpt) > 0 && omitZeroOpt[0] {
omitZero = true
}
result := make([]int64, 0, len(ii))
m := make(map[int64]struct{}, len(ii))
for _, i := range ii {
if i == 0 && omitZero {
continue
}
if _, ok := m[i]; ok {
continue
}
result = append(result, i)
m[i] = struct{}{}
}
return result
}
// IntersectionUin64Slice 返回两个 uint64 slice 的交集,复杂度 O(m * n),待优化
//
// IntersectionUin64Slice([]uint64{3, 1, 2, 0}, []uint64{0, 3}) => []uint64{3, 0}
//
// IntersectionUin64Slice([]uint64{3, 1, 2, 1, 0}, []uint64{1, 2, 0}) => []uint64{1, 2, 1, 0}
func IntersectionUin64Slice(s1, s2 []uint64) []uint64 {
if len(s1) == 0 {
return nil
}
if len(s2) == 0 {
return s1
}
var result []uint64
for _, i := range s1 {
for _, j := range s2 {
if i == j {
result = append(result, i)
break
}
}
}
return result
}
// IntersectionIn64Slice 返回两个 int64 slice 的交集,复杂度 O(m * log(m))
//
// IntersectionIn64Slice([]int64{3, 1, 2, 0}, []int64{0, 3}) => []int64{3, 0}
//
// IntersectionIn64Slice([]int64{3, 1, 2, 1, 0}, []int64{1, 2, 0}) => []int64{1, 2, 1, 0}
func IntersectionInt64Slice(s1, s2 []int64) []int64 {
m := make(map[int64]bool)
nn := make([]int64, 0)
for _, v := range s1 {
m[v] = true
}
for _, v := range s2 {
if _, ok := m[v]; ok {
nn = append(nn, v)
}
}
return nn
}
// Remove 删除 slice 在 removes 中存在的元素。
//
// RemoveSlice([]string{"a", "b", "c", "a"}, "a") => []string{"b", "c"})
//
// RemoveSlice([]string{"a", "b", "c", "a"}, "b", "c") => []string{"a", "a"})
func RemoveSlice(ss []string, removes ...string) []string {
m := make(map[string]struct{})
for _, rm := range removes {
m[rm] = struct{}{}
}
result := make([]string, 0, len(ss))
for _, s := range ss {
if _, ok := m[s]; ok {
continue
}
result = append(result, s)
}
return result
}
func Exist(slice []string, val string) bool {
for _, v := range slice {
if v == val {
return true
}
}
return false
}
// NormalizeNewlines normalizes \r\n (windows) and \r (mac)
// into \n (unix).
//
// There are 3 ways to represent a newline.
//
// Unix: using single character LF, which is byte 10 (0x0a), represented as “” in Go string literal.
// Windows: using 2 characters: CR LF, which is bytes 13 10 (0x0d, 0x0a), represented as “” in Go string literal.
// Mac OS: using 1 character CR (byte 13 (0x0d)), represented as “” in Go string literal. This is the least popular.
func NormalizeNewlines(d []byte) []byte {
// replace CR LF \r\n (windows) with LF \n (unix)
d = bytes.Replace(d, []byte{13, 10}, []byte{10}, -1)
// replace CF \r (mac) with LF \n (unix)
d = bytes.Replace(d, []byte{13}, []byte{10}, -1)
return d
}
func SplitIfEmptyString(s, sep string) []string {
if s == "" {
return []string{}
}
return strings.SplitN(s, sep, -1)
}
var fontKinds = [][]int{{10, 48}, {26, 97}, {26, 65}}
// RandStr 获取随机字符串
func RandStr(size int) string {
result := make([]byte, size)
rand.Seed(time.Now().UnixNano())
for i := 0; i < size; i++ {
ikind := rand.Intn(3)
scope, base := fontKinds[ikind][0], fontKinds[ikind][1]
result[i] = uint8(base + rand.Intn(scope))
}
return string(result)
}
// ParseVersion 序列化版本 "1.05.1" --> "1.5.1",
func ParseVersion(version string) string {
// ISO/IEC 14651:2011
const maxByte = 1<<8 - 1
vo := make([]byte, 0, len(version)+8)
j := -1
for i := 0; i < len(version); i++ {
b := version[i]
if '0' > b || b > '9' {
vo = append(vo, b)
j = -1
continue
}
if j == -1 {
vo = append(vo, 0x00)
j = len(vo) - 1
}
if vo[j] == 1 && vo[j+1] == '0' {
vo[j+1] = b
continue
}
if vo[j]+1 > maxByte {
panic("VersionOrdinal: invalid version")
}
vo = append(vo, b)
vo[j]++
}
return string(vo)
}
// FlatErrors 将 errors 打平为一个 error
func FlatErrors(errs []error, sep string) error {
var errMsgs []string
for _, err := range errs {
errMsgs = append(errMsgs, err.Error())
}
return fmt.Errorf("%s", Join(errMsgs, sep, true))
}
func ContainsOrEmpty(source, target string) bool {
return target == "" || strings.Contains(source, target)
}
func FirstNoneEmpty(strs ...string) string {
for _, str := range strs {
if len(str) > 0 {
return str
}
}
return ""
}
func HandleQuotes(data []byte, quotes [2]byte, handler func([]byte)) {
var heap []byte
var buf []byte
var left, right = quotes[0], quotes[1]
for _, c := range data {
if c == left {
buf = append(buf, c)
heap = append(heap, c)
continue
}
if len(heap) == 0 {
if len(buf) != 0 {
handler(buf)
buf = nil
}
continue
}
buf = append(buf, c)
if c == right && heap[len(heap)-1] == left {
heap = heap[:len(heap)-1]
continue
}
}
}
| {
if le | identifier_name |
strutil.go | // Copyright (c) 2021 Terminus, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package strutil
import (
"bytes"
"fmt"
"math/rand"
"path/filepath"
"regexp"
"strconv"
"strings"
"time"
"unicode"
)
// Trim 两边裁剪 `s`, 如果不指定 `cutset`, 默认cutset=space
//
// Trim("trim ") => "trim"
//
// Trim(" this ") => "this"
//
// Trim("athisb", "abs") => "this"
func Trim(s string, cutset ...string) string {
if len(cutset) == 0 {
return strings.TrimSpace(s)
}
return strings.Trim(s, cutset[0])
}
// TrimLeft 裁剪 `s` 左边, 如果不指定 `cutset`, 默认cutset=space
//
// TrimLeft("trim ") => "trim "
//
// TrimLeft(" this") => "this"
//
// TrimLeft("athisa", "a") => "thisa"
func TrimLeft(s string, cutset ...string) string {
if len(cutset) == 0 {
return strings.TrimLeftFunc(s, unicode.IsSpace)
}
return strings.TrimLeft(s, cutset[0])
}
// TrimRight 裁剪 `s` 右边,如果不指定 `cutset`, 默认cutset=space
//
// TrimRight("trim ") => "trim"
//
// TrimRight(" this") => " this"
//
// TrimRight("athisa", "a") => "athis"
func TrimRight(s string, cutset ...string) string {
if len(cutset) == 0 {
return strings.TrimRightFunc(s, unicode.IsSpace)
}
return strings.TrimRight(s, cutset[0])
}
// TrimSuffixes 裁剪 `s` 的后缀
//
// TrimSuffixes("test.go", ".go") => "test"
//
// TrimSuffixes("test.go", ".md", ".go", ".sh") => "test"
//
// TrimSuffixes("test.go.tmp", ".go", ".tmp") => "test.go"
func TrimSuffixes(s string, suffixes ...string) string {
originLen := len(s)
for i := range suffixes {
trimmed := strings.TrimSuffix(s, suffixes[i])
if len(trimmed) != originLen {
return trimmed
}
}
return s | //
// TrimPrefixes("/tmp/file", "/tmp") => "/file"
//
// TrimPrefixes("/tmp/tmp/file", "/tmp", "/tmp/tmp") => "/tmp/file"
func TrimPrefixes(s string, prefixes ...string) string {
originLen := len(s)
for i := range prefixes {
trimmed := strings.TrimPrefix(s, prefixes[i])
if len(trimmed) != originLen {
return trimmed
}
}
return s
}
// TrimSlice Trim 的 Slice 版本
//
// TrimSlice([]string{"trim ", " trim", " trim "}) => []string{"trim", "trim", "trim"}
func TrimSlice(ss []string, cutset ...string) []string {
r := make([]string, len(ss))
for i := range ss {
r[i] = Trim(ss[i], cutset...)
}
return r
}
// TrimSliceLeft TrimLeft 的 Slice 版本
//
// TrimSliceLeft([]string{"trim ", " trim", " trim "}) => []string{"trim ", "trim", "trim "}
func TrimSliceLeft(ss []string, cutset ...string) []string {
r := make([]string, len(ss))
for i := range ss {
r[i] = TrimLeft(ss[i], cutset...)
}
return r
}
// TrimSliceRight TrimRight 的 Slice 版本
//
// TrimSliceRight([]string{"trim ", " trim", " trim "}) => []string{"trim", " trim", " trim"}
func TrimSliceRight(ss []string, cutset ...string) []string {
r := make([]string, len(ss))
for i := range ss {
r[i] = TrimRight(ss[i], cutset...)
}
return r
}
// TrimSliceSuffixes TrimSuffixes 的 Slice 版本
//
// TrimSliceSuffixes([]string{"test.go", "test.go.tmp"}, ".go", ".tmp") => []string{"test", "test.go"}
func TrimSliceSuffixes(ss []string, suffixes ...string) []string {
r := make([]string, len(ss))
for i := range ss {
r[i] = TrimSuffixes(ss[i], suffixes...)
}
return r
}
// TrimSlicePrefixes TrimPrefixes 的 Slice 版本
//
// TrimSlicePrefixes([]string{"/tmp/file", "/tmp/tmp/file"}, "/tmp", "/tmp/tmp") => []string{"/file", "/tmp/file"}
func TrimSlicePrefixes(ss []string, prefixes ...string) []string {
r := make([]string, len(ss))
for i := range ss {
r[i] = TrimPrefixes(ss[i], prefixes...)
}
return r
}
// HasPrefixes `prefixes` 中是否存在 `s` 的前缀
//
// HasPrefixes("asd", "ddd", "uuu") => false
//
// HasPrefixes("asd", "sd", "as") => true
//
// HasPrefixes("asd", "asd") => true
func HasPrefixes(s string, prefixes ...string) bool {
for i := range prefixes {
if strings.HasPrefix(s, prefixes[i]) {
return true
}
}
return false
}
// HasSuffixes `suffixes` 中是否存在 `s` 的后缀
//
// HasSuffixes("asd", "ddd", "d") => true
//
// HasSuffixes("asd", "sd") => true
//
// HasSuffixes("asd", "iid", "as") => false
func HasSuffixes(s string, suffixes ...string) bool {
for i := range suffixes {
if strings.HasSuffix(s, suffixes[i]) {
return true
}
}
return false
}
var (
collapseWhitespaceRegex = regexp.MustCompile("[ \t\n\r]+")
)
// CollapseWhitespace 转化连续的 space 为 _一个_ 空格
//
// CollapseWhitespace("only one space") => "only one space"
//
// CollapseWhitespace("collapse \n all \t sorts of \r \n \r\n whitespace") => "collapse all sorts of whitespace"
func CollapseWhitespace(s string) string {
return collapseWhitespaceRegex.ReplaceAllString(s, " ")
}
// Center 居中 `s`
//
// Center("a", 5) => " a "
//
// Center("ab", 5) => " ab "
//
// Center("abc", 1) => "abc"
func Center(s string, length int) string {
minus := length - len(s)
if minus <= 0 {
return s
}
right := minus / 2
mod := minus % 2
return strings.Join([]string{Repeat(" ", right+mod), s, Repeat(" ", right)}, "")
}
// Truncate 截断 `s` 到 `length`-3 的长度,末尾增加 "..."
//
// Truncate("it is too long", 6) => "it ..."
//
// Truncate("it is too long", 13) => "it is too ..."
//
// Truncate("but it is not", 16) => "but it is not"
func Truncate(s string, length int) string {
if len(s) > length {
return s[:length-3] + "..."
}
return s
}
// Split 根据 `sep` 来切分 `s`, `omitEmptyOpt`=true 时,忽略结果中的空字符串
//
// Split("a|bc|12||3", "|") => []string{"a", "bc", "12", "", "3"}
//
// Split("a|bc|12||3", "|", true) => []string{"a", "bc", "12", "3"}
//
// Split("a,b,c", ":") => []string{"a,b,c"}
func Split(s string, sep string, omitEmptyOpt ...bool) []string {
var omitEmpty bool
if len(omitEmptyOpt) > 0 && omitEmptyOpt[0] {
omitEmpty = true
}
parts := strings.Split(s, sep)
if !omitEmpty {
return parts
}
result := []string{}
for _, v := range parts {
if v != "" {
result = append(result, v)
}
}
return result
}
// Splits 根据 `seps` 中的每个元素来切分 `s`, `omitEmptyOpt`=true 时,忽略结果中的空字符串
//
// Splits("a,bc,,12,3", []string{",", ","}) => []string{"a", "bc", "12", "", "3"}
//
// Splits("a,bc,,12,3", []string{",", ","}, true) => []string{"a", "bc", "12", "3"}
func Splits(s string, seps []string, omitEmptyOpt ...bool) []string {
if len(seps) == 0 {
return []string{s}
}
sep, seps := seps[0], seps[1:]
parts := Split(s, sep, omitEmptyOpt...)
result := []string{}
for _, part := range parts {
result = append(result, Splits(part, seps, omitEmptyOpt...)...)
}
return result
}
var (
linesRegex = regexp.MustCompile("\r\n|\n|\r")
)
// Lines 将 `s` 按 newline 切分成 string slice, omitEmptyOpt=true 时,忽略结果中的空字符串
//
// Lines("abc\ndef\nghi") => []string{"abc", "def", "ghi"}
//
// Lines("abc\rdef\rghi") => []string{"abc", "def", "ghi"}
//
// Lines("abc\r\ndef\r\nghi\n") => []string{"abc", "def", "ghi", ""}
//
// Lines("abc\r\ndef\r\nghi\n", true) => []string{"abc", "def", "ghi"}
func Lines(s string, omitEmptyOpt ...bool) []string {
lines := linesRegex.Split(s, -1)
if len(omitEmptyOpt) == 0 || !omitEmptyOpt[0] {
return lines
}
r := []string{}
for i := range lines {
if lines[i] != "" {
r = append(r, lines[i])
}
}
return r
}
// Repeat see also strings.Repeat
func Repeat(s string, count int) string {
return strings.Repeat(s, count)
}
// Concat 合并字符串
func Concat(s ...string) string {
return strings.Join(s, "")
}
// Join see also strings.Join,
// omitEmptyOpt = true 时,不拼接 `ss` 中空字符串
func Join(ss []string, sep string, omitEmptyOpt ...bool) string {
if len(omitEmptyOpt) == 0 || !omitEmptyOpt[0] {
return strings.Join(ss, sep)
}
r := []string{}
for i := range ss {
if ss[i] != "" {
r = append(r, ss[i])
}
}
return strings.Join(r, sep)
}
// JoinPath see also filepath.Join
func JoinPath(ss ...string) string {
return filepath.Join(ss...)
}
// ToLower see also strings.ToLower
func ToLower(s string) string {
return strings.ToLower(s)
}
// ToUpper see also strings.ToUpper
func ToUpper(s string) string {
return strings.ToUpper(s)
}
// ToTitle see also strings.ToTitle
func ToTitle(s string) string {
return strings.ToTitle(s)
}
// Title see also strings.Title
func Title(s string) string {
return strings.Title(s)
}
// Contains 检查 `s` 中是否存在 `substrs` 中的某个字符串
//
// Contains("test contains.", "t c", "iii") => true
//
// Contains("test contains.", "t cc", "test ") => false
//
// Contains("test contains.", "iii", "uuu", "ont") => true
func Contains(s string, substrs ...string) bool {
for i := range substrs {
if strings.Contains(s, substrs[i]) {
return true
}
}
return false
}
// Equal 判断 `s` 和 `other` 是否相同,如果 ignorecase = true, 忽略大小写
//
// Equal("aaa", "AAA") => false
//
// Equal("aaa", "AaA", true) => true
func Equal[T ~string, S ~string](s S, t T, ignorecase ...bool) bool {
return string(s) == string(t) || (len(ignorecase) > 0 && ignorecase[0] && strings.EqualFold(string(s), string(t)))
}
// Atoi64 parse string to int64
//
// Atoi64("6") => (6, nil)
func Atoi64(s string) (int64, error) {
return strconv.ParseInt(s, 10, 64)
}
// Map 对 `ss` 中的每个元素执行 `f`, 返回f返回的结果列表
//
// Map([]string{"1", "2", "3"}, func(s string) string {return Concat("X", s)}) => []string{"X1", "X2", "X3"}
//
// Map([]string{"Aa", "bB", "cc"}, ToLower, Title) => []string{"Aa", "Bb", "Cc"}
func Map(ss []string, fs ...func(s string) string) []string {
r := []string{}
r = append(r, ss...)
r2 := []string{}
for _, f := range fs {
for i := range r {
r2 = append(r2, f(r[i]))
}
r = r2[:]
r2 = []string{}
}
return r
}
// DedupSlice 返回不含重复元素的 slice,各元素按第一次出现顺序排序。如果 omitEmptyOpt = true, 忽略空字符串
//
// DedupSlice([]string{"c", "", "b", "a", "", "a", "b", "c", "", "d"}) => []string{"c", "", "b", "a", "d"}
//
// DedupSlice([]string{"c", "", "b", "a", "", "a", "b", "c", "", "d"}, true) => []string{"c", "b", "a", "d"}
func DedupSlice(ss []string, omitEmptyOpt ...bool) []string {
var omitEmpty bool
if len(omitEmptyOpt) > 0 && omitEmptyOpt[0] {
omitEmpty = true
}
result := make([]string, 0, len(ss))
m := make(map[string]struct{}, len(ss))
for _, s := range ss {
if s == "" && omitEmpty {
continue
}
if _, ok := m[s]; ok {
continue
}
result = append(result, s)
m[s] = struct{}{}
}
return result
}
// DedupUint64Slice 返回不含重复元素的 slice,各元素按第一次出现顺序排序。
//
// DedupUint64Slice([]uint64{3, 3, 1, 2, 1, 2, 3, 3, 2, 1, 0, 1, 2}) => []uint64{3, 1, 2, 0}
//
// DedupUint64Slice([]uint64{3, 3, 1, 2, 1, 2, 3, 3, 2, 1, 0, 1, 2}, true) => []uint64{3, 1, 2}
func DedupUint64Slice(ii []uint64, omitZeroOpt ...bool) []uint64 {
var omitZero bool
if len(omitZeroOpt) > 0 && omitZeroOpt[0] {
omitZero = true
}
result := make([]uint64, 0, len(ii))
m := make(map[uint64]struct{}, len(ii))
for _, i := range ii {
if i == 0 && omitZero {
continue
}
if _, ok := m[i]; ok {
continue
}
result = append(result, i)
m[i] = struct{}{}
}
return result
}
// DedupInt64Slice ([]int64{3, 3, 1, 2, 1, 2, 3, 3, 2, 1, 0, 1, 2}, true) => []int64{3, 1, 2}
func DedupInt64Slice(ii []int64, omitZeroOpt ...bool) []int64 {
var omitZero bool
if len(omitZeroOpt) > 0 && omitZeroOpt[0] {
omitZero = true
}
result := make([]int64, 0, len(ii))
m := make(map[int64]struct{}, len(ii))
for _, i := range ii {
if i == 0 && omitZero {
continue
}
if _, ok := m[i]; ok {
continue
}
result = append(result, i)
m[i] = struct{}{}
}
return result
}
// IntersectionUin64Slice 返回两个 uint64 slice 的交集,复杂度 O(m * n),待优化
//
// IntersectionUin64Slice([]uint64{3, 1, 2, 0}, []uint64{0, 3}) => []uint64{3, 0}
//
// IntersectionUin64Slice([]uint64{3, 1, 2, 1, 0}, []uint64{1, 2, 0}) => []uint64{1, 2, 1, 0}
func IntersectionUin64Slice(s1, s2 []uint64) []uint64 {
if len(s1) == 0 {
return nil
}
if len(s2) == 0 {
return s1
}
var result []uint64
for _, i := range s1 {
for _, j := range s2 {
if i == j {
result = append(result, i)
break
}
}
}
return result
}
// IntersectionIn64Slice 返回两个 int64 slice 的交集,复杂度 O(m * log(m))
//
// IntersectionIn64Slice([]int64{3, 1, 2, 0}, []int64{0, 3}) => []int64{3, 0}
//
// IntersectionIn64Slice([]int64{3, 1, 2, 1, 0}, []int64{1, 2, 0}) => []int64{1, 2, 1, 0}
func IntersectionInt64Slice(s1, s2 []int64) []int64 {
m := make(map[int64]bool)
nn := make([]int64, 0)
for _, v := range s1 {
m[v] = true
}
for _, v := range s2 {
if _, ok := m[v]; ok {
nn = append(nn, v)
}
}
return nn
}
// Remove 删除 slice 在 removes 中存在的元素。
//
// RemoveSlice([]string{"a", "b", "c", "a"}, "a") => []string{"b", "c"})
//
// RemoveSlice([]string{"a", "b", "c", "a"}, "b", "c") => []string{"a", "a"})
func RemoveSlice(ss []string, removes ...string) []string {
m := make(map[string]struct{})
for _, rm := range removes {
m[rm] = struct{}{}
}
result := make([]string, 0, len(ss))
for _, s := range ss {
if _, ok := m[s]; ok {
continue
}
result = append(result, s)
}
return result
}
func Exist(slice []string, val string) bool {
for _, v := range slice {
if v == val {
return true
}
}
return false
}
// NormalizeNewlines normalizes \r\n (windows) and \r (mac)
// into \n (unix).
//
// There are 3 ways to represent a newline.
//
// Unix: using single character LF, which is byte 10 (0x0a), represented as “” in Go string literal.
// Windows: using 2 characters: CR LF, which is bytes 13 10 (0x0d, 0x0a), represented as “” in Go string literal.
// Mac OS: using 1 character CR (byte 13 (0x0d)), represented as “” in Go string literal. This is the least popular.
func NormalizeNewlines(d []byte) []byte {
// replace CR LF \r\n (windows) with LF \n (unix)
d = bytes.Replace(d, []byte{13, 10}, []byte{10}, -1)
// replace CF \r (mac) with LF \n (unix)
d = bytes.Replace(d, []byte{13}, []byte{10}, -1)
return d
}
func SplitIfEmptyString(s, sep string) []string {
if s == "" {
return []string{}
}
return strings.SplitN(s, sep, -1)
}
var fontKinds = [][]int{{10, 48}, {26, 97}, {26, 65}}
// RandStr 获取随机字符串
func RandStr(size int) string {
result := make([]byte, size)
rand.Seed(time.Now().UnixNano())
for i := 0; i < size; i++ {
ikind := rand.Intn(3)
scope, base := fontKinds[ikind][0], fontKinds[ikind][1]
result[i] = uint8(base + rand.Intn(scope))
}
return string(result)
}
// ParseVersion 序列化版本 "1.05.1" --> "1.5.1",
func ParseVersion(version string) string {
// ISO/IEC 14651:2011
const maxByte = 1<<8 - 1
vo := make([]byte, 0, len(version)+8)
j := -1
for i := 0; i < len(version); i++ {
b := version[i]
if '0' > b || b > '9' {
vo = append(vo, b)
j = -1
continue
}
if j == -1 {
vo = append(vo, 0x00)
j = len(vo) - 1
}
if vo[j] == 1 && vo[j+1] == '0' {
vo[j+1] = b
continue
}
if vo[j]+1 > maxByte {
panic("VersionOrdinal: invalid version")
}
vo = append(vo, b)
vo[j]++
}
return string(vo)
}
// FlatErrors 将 errors 打平为一个 error
func FlatErrors(errs []error, sep string) error {
var errMsgs []string
for _, err := range errs {
errMsgs = append(errMsgs, err.Error())
}
return fmt.Errorf("%s", Join(errMsgs, sep, true))
}
func ContainsOrEmpty(source, target string) bool {
return target == "" || strings.Contains(source, target)
}
func FirstNoneEmpty(strs ...string) string {
for _, str := range strs {
if len(str) > 0 {
return str
}
}
return ""
}
func HandleQuotes(data []byte, quotes [2]byte, handler func([]byte)) {
var heap []byte
var buf []byte
var left, right = quotes[0], quotes[1]
for _, c := range data {
if c == left {
buf = append(buf, c)
heap = append(heap, c)
continue
}
if len(heap) == 0 {
if len(buf) != 0 {
handler(buf)
buf = nil
}
continue
}
buf = append(buf, c)
if c == right && heap[len(heap)-1] == left {
heap = heap[:len(heap)-1]
continue
}
}
} | }
// TrimPrefixes 裁剪 `s` 的前缀 | random_line_split |
converter.go | package scope
import (
"fmt"
rbacv1 "k8s.io/api/rbac/v1"
kapierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/runtime/schema"
kutilerrors "k8s.io/apimachinery/pkg/util/errors"
"k8s.io/apimachinery/pkg/util/sets"
kauthorizer "k8s.io/apiserver/pkg/authorization/authorizer"
rbaclisters "k8s.io/client-go/listers/rbac/v1"
rbacv1helpers "k8s.io/kubernetes/pkg/apis/rbac/v1"
authorizerrbac "k8s.io/kubernetes/plugin/pkg/auth/authorizer/rbac"
scopemetadata "github.com/openshift/library-go/pkg/authorization/scopemetadata"
)
const (
scopesAllNamespaces = "*"
legacyGroupName = ""
coreGroupName = ""
kubeAuthorizationGroupName = "authorization.k8s.io"
openshiftAuthorizationGroupName = "authorization.openshift.io"
imageGroupName = "image.openshift.io"
networkGroupName = "network.openshift.io"
oauthGroupName = "oauth.openshift.io"
projectGroupName = "project.openshift.io"
userGroupName = "user.openshift.io"
)
// scopeDiscoveryRule is a rule that allows a client to discover the API resources available on this server
var scopeDiscoveryRule = rbacv1.PolicyRule{
Verbs: []string{"get"},
NonResourceURLs: []string{
// Server version checking
"/version", "/version/*",
// API discovery/negotiation
"/api", "/api/*",
"/apis", "/apis/*",
"/oapi", "/oapi/*",
"/openapi/v2",
"/swaggerapi", "/swaggerapi/*", "/swagger.json", "/swagger-2.0.0.pb-v1",
"/osapi", "/osapi/", // these cannot be removed until we can drop support for pre 3.1 clients
"/.well-known", "/.well-known/*",
// we intentionally allow all to here
"/",
},
}
// ScopesToRules takes the scopes and return the rules back. We ALWAYS add the discovery rules and it is possible to get some rules and and
// an error since errors aren't fatal to evaluation
func ScopesToRules(scopes []string, namespace string, clusterRoleGetter rbaclisters.ClusterRoleLister) ([]rbacv1.PolicyRule, error) {
rules := append([]rbacv1.PolicyRule{}, scopeDiscoveryRule)
errors := []error{}
for _, scope := range scopes {
found := false
for _, evaluator := range ScopeEvaluators {
if evaluator.Handles(scope) {
found = true
currRules, err := evaluator.ResolveRules(scope, namespace, clusterRoleGetter)
if err != nil {
errors = append(errors, err)
continue
}
rules = append(rules, currRules...)
}
}
if !found {
errors = append(errors, fmt.Errorf("no scope evaluator found for %q", scope))
}
}
return rules, kutilerrors.NewAggregate(errors)
}
// ScopesToVisibleNamespaces returns a list of namespaces that the provided scopes have "get" access to.
// This exists only to support efficiently list/watch of projects (ACLed namespaces)
func ScopesToVisibleNamespaces(scopes []string, clusterRoleGetter rbaclisters.ClusterRoleLister, ignoreUnhandledScopes bool) (sets.String, error) {
if len(scopes) == 0 {
return sets.NewString("*"), nil
}
visibleNamespaces := sets.String{}
errors := []error{}
for _, scope := range scopes {
found := false
for _, evaluator := range ScopeEvaluators {
if evaluator.Handles(scope) {
found = true
allowedNamespaces, err := evaluator.ResolveGettableNamespaces(scope, clusterRoleGetter)
if err != nil {
errors = append(errors, err)
continue
}
visibleNamespaces.Insert(allowedNamespaces...)
break
}
}
if !found && !ignoreUnhandledScopes {
errors = append(errors, fmt.Errorf("no scope evaluator found for %q", scope))
}
}
return visibleNamespaces, kutilerrors.NewAggregate(errors)
}
const (
UserIndicator = "user:"
ClusterRoleIndicator = "role:"
)
// ScopeEvaluator takes a scope and returns the rules that express it
type ScopeEvaluator interface {
// Handles returns true if this evaluator can evaluate this scope
Handles(scope string) bool
// Validate returns an error if the scope is malformed
Validate(scope string) error
// Describe returns a description, warning (typically used to warn about escalation dangers), or an error if the scope is malformed
Describe(scope string) (description string, warning string, err error)
// ResolveRules returns the policy rules that this scope allows
ResolveRules(scope, namespace string, clusterRoleGetter rbaclisters.ClusterRoleLister) ([]rbacv1.PolicyRule, error)
ResolveGettableNamespaces(scope string, clusterRoleGetter rbaclisters.ClusterRoleLister) ([]string, error)
}
// ScopeEvaluators map prefixes to a function that handles that prefix
var ScopeEvaluators = []ScopeEvaluator{
userEvaluator{},
clusterRoleEvaluator{},
}
// scopes are in the format
// <indicator><indicator choice>
// we have the following formats:
// user:<scope name>
// role:<clusterrole name>:<namespace to allow the cluster role, * means all>
// TODO
// cluster:<comma-delimited verbs>:<comma-delimited resources>
// namespace:<namespace name>:<comma-delimited verbs>:<comma-delimited resources>
const (
UserInfo = UserIndicator + "info"
UserAccessCheck = UserIndicator + "check-access"
// UserListScopedProjects gives explicit permission to see the projects that this token can see.
UserListScopedProjects = UserIndicator + "list-scoped-projects"
// UserListAllProjects gives explicit permission to see the projects a user can see. This is often used to prime secondary ACL systems
// unrelated to openshift and to display projects for selection in a secondary UI.
UserListAllProjects = UserIndicator + "list-projects"
// UserFull includes all permissions of the user
UserFull = UserIndicator + "full"
)
var defaultSupportedScopesMap = map[string]string{
UserInfo: "Read-only access to your user information (including username, identities, and group membership)",
UserAccessCheck: `Read-only access to view your privileges (for example, "can I create builds?")`,
UserListScopedProjects: `Read-only access to list your projects viewable with this token and view their metadata (display name, description, etc.)`,
UserListAllProjects: `Read-only access to list your projects and view their metadata (display name, description, etc.)`,
UserFull: `Full read/write access with all of your permissions`,
}
func DefaultSupportedScopes() []string {
return sets.StringKeySet(defaultSupportedScopesMap).List()
}
func DescribeScopes(scopes []string) map[string]string {
ret := map[string]string{}
for _, s := range scopes {
val, ok := defaultSupportedScopesMap[s]
if ok {
ret[s] = val
} else {
ret[s] = ""
}
}
return ret
}
// user:<scope name>
type userEvaluator struct {
scopemetadata.UserEvaluator
}
func (userEvaluator) ResolveRules(scope, namespace string, _ rbaclisters.ClusterRoleLister) ([]rbacv1.PolicyRule, error) {
switch scope {
case UserInfo:
return []rbacv1.PolicyRule{
rbacv1helpers.NewRule("get").
Groups(userGroupName, legacyGroupName).
Resources("users").
Names("~").
RuleOrDie(),
}, nil
case UserAccessCheck:
return []rbacv1.PolicyRule{
rbacv1helpers.NewRule("create").
Groups(kubeAuthorizationGroupName).
Resources("selfsubjectaccessreviews").
RuleOrDie(),
rbacv1helpers.NewRule("create").
Groups(openshiftAuthorizationGroupName, legacyGroupName).
Resources("selfsubjectrulesreviews").
RuleOrDie(),
}, nil
case UserListScopedProjects:
return []rbacv1.PolicyRule{
rbacv1helpers.NewRule("list", "watch").
Groups(projectGroupName, legacyGroupName).
Resources("projects").
RuleOrDie(),
}, nil
case UserListAllProjects:
return []rbacv1.PolicyRule{
rbacv1helpers.NewRule("list", "watch").
Groups(projectGroupName, legacyGroupName).
Resources("projects").
RuleOrDie(),
rbacv1helpers.NewRule("get").
Groups(coreGroupName).
Resources("namespaces").
RuleOrDie(),
}, nil
case UserFull:
return []rbacv1.PolicyRule{
rbacv1helpers.NewRule(rbacv1.VerbAll).
Groups(rbacv1.APIGroupAll).
Resources(rbacv1.ResourceAll).
RuleOrDie(),
rbacv1helpers.NewRule(rbacv1.VerbAll).
URLs(rbacv1.NonResourceAll).
RuleOrDie(),
}, nil
default:
return nil, fmt.Errorf("unrecognized scope: %v", scope)
}
}
func (userEvaluator) ResolveGettableNamespaces(scope string, _ rbaclisters.ClusterRoleLister) ([]string, error) {
switch scope {
case UserFull, UserListAllProjects:
return []string{"*"}, nil
default:
return []string{}, nil
}
}
// escalatingScopeResources are resources that are considered escalating for scope evaluation
var escalatingScopeResources = []schema.GroupResource{
{Group: coreGroupName, Resource: "secrets"},
{Group: imageGroupName, Resource: "imagestreams/secrets"},
{Group: oauthGroupName, Resource: "oauthauthorizetokens"},
{Group: oauthGroupName, Resource: "oauthaccesstokens"},
{Group: openshiftAuthorizationGroupName, Resource: "roles"},
{Group: openshiftAuthorizationGroupName, Resource: "rolebindings"},
{Group: openshiftAuthorizationGroupName, Resource: "clusterroles"},
{Group: openshiftAuthorizationGroupName, Resource: "clusterrolebindings"},
// used in Service admission to create a service with external IP outside the allowed range
{Group: networkGroupName, Resource: "service/externalips"},
{Group: legacyGroupName, Resource: "imagestreams/secrets"},
{Group: legacyGroupName, Resource: "oauthauthorizetokens"},
{Group: legacyGroupName, Resource: "oauthaccesstokens"},
{Group: legacyGroupName, Resource: "roles"},
{Group: legacyGroupName, Resource: "rolebindings"},
{Group: legacyGroupName, Resource: "clusterroles"},
{Group: legacyGroupName, Resource: "clusterrolebindings"},
}
// role:<clusterrole name>:<namespace to allow the cluster role, * means all>
type clusterRoleEvaluator struct {
scopemetadata.ClusterRoleEvaluator
}
var clusterRoleEvaluatorInstance = clusterRoleEvaluator{}
func (e clusterRoleEvaluator) ResolveRules(scope, namespace string, clusterRoleGetter rbaclisters.ClusterRoleLister) ([]rbacv1.PolicyRule, error) {
_, scopeNamespace, _, err := scopemetadata.ClusterRoleEvaluatorParseScope(scope)
if err != nil {
return nil, err
}
// if the scope limit on the clusterrole doesn't match, then don't add any rules, but its not an error
if !(scopeNamespace == scopesAllNamespaces || scopeNamespace == namespace) {
return []rbacv1.PolicyRule{}, nil
}
return e.resolveRules(scope, clusterRoleGetter)
}
func has(set []string, value string) bool {
for _, element := range set {
if value == element {
return true
}
}
return false
}
// resolveRules doesn't enforce namespace checks
func (e clusterRoleEvaluator) resolveRules(scope string, clusterRoleGetter rbaclisters.ClusterRoleLister) ([]rbacv1.PolicyRule, error) {
roleName, _, escalating, err := scopemetadata.ClusterRoleEvaluatorParseScope(scope)
if err != nil {
return nil, err
}
role, err := clusterRoleGetter.Get(roleName)
if err != nil {
if kapierrors.IsNotFound(err) {
return []rbacv1.PolicyRule{}, nil
}
return nil, err
}
rules := []rbacv1.PolicyRule{}
for _, rule := range role.Rules {
if escalating {
rules = append(rules, rule)
continue
}
// rules with unbounded access shouldn't be allowed in scopes.
if has(rule.Verbs, rbacv1.VerbAll) ||
has(rule.Resources, rbacv1.ResourceAll) ||
has(rule.APIGroups, rbacv1.APIGroupAll) {
continue
}
// rules that allow escalating resource access should be cleaned.
safeRule := removeEscalatingResources(rule)
rules = append(rules, safeRule)
}
return rules, nil
}
func (e clusterRoleEvaluator) | (scope string, clusterRoleGetter rbaclisters.ClusterRoleLister) ([]string, error) {
_, scopeNamespace, _, err := scopemetadata.ClusterRoleEvaluatorParseScope(scope)
if err != nil {
return nil, err
}
rules, err := e.resolveRules(scope, clusterRoleGetter)
if err != nil {
return nil, err
}
attributes := kauthorizer.AttributesRecord{
APIGroup: coreGroupName,
Verb: "get",
Resource: "namespaces",
ResourceRequest: true,
}
if authorizerrbac.RulesAllow(attributes, rules...) {
return []string{scopeNamespace}, nil
}
return []string{}, nil
}
func remove(array []string, item string) []string {
newar := array[:0]
for _, element := range array {
if element != item {
newar = append(newar, element)
}
}
return newar
}
// removeEscalatingResources inspects a PolicyRule and removes any references to escalating resources.
// It has coarse logic for now. It is possible to rewrite one rule into many for the finest grain control
// but removing the entire matching resource regardless of verb or secondary group is cheaper, easier, and errs on the side removing
// too much, not too little
func removeEscalatingResources(in rbacv1.PolicyRule) rbacv1.PolicyRule {
var ruleCopy *rbacv1.PolicyRule
for _, resource := range escalatingScopeResources {
if !(has(in.APIGroups, resource.Group) && has(in.Resources, resource.Resource)) {
continue
}
if ruleCopy == nil {
// we're using a cache of cache of an object that uses pointers to data. I'm pretty sure we need to do a copy to avoid
// muddying the cache
ruleCopy = in.DeepCopy()
}
ruleCopy.Resources = remove(ruleCopy.Resources, resource.Resource)
}
if ruleCopy != nil {
return *ruleCopy
}
return in
}
| ResolveGettableNamespaces | identifier_name |
converter.go | package scope
import (
"fmt"
rbacv1 "k8s.io/api/rbac/v1"
kapierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/runtime/schema"
kutilerrors "k8s.io/apimachinery/pkg/util/errors"
"k8s.io/apimachinery/pkg/util/sets"
kauthorizer "k8s.io/apiserver/pkg/authorization/authorizer"
rbaclisters "k8s.io/client-go/listers/rbac/v1"
rbacv1helpers "k8s.io/kubernetes/pkg/apis/rbac/v1"
authorizerrbac "k8s.io/kubernetes/plugin/pkg/auth/authorizer/rbac"
scopemetadata "github.com/openshift/library-go/pkg/authorization/scopemetadata"
)
const (
scopesAllNamespaces = "*"
legacyGroupName = ""
coreGroupName = ""
kubeAuthorizationGroupName = "authorization.k8s.io"
openshiftAuthorizationGroupName = "authorization.openshift.io"
imageGroupName = "image.openshift.io"
networkGroupName = "network.openshift.io"
oauthGroupName = "oauth.openshift.io"
projectGroupName = "project.openshift.io"
userGroupName = "user.openshift.io"
)
// scopeDiscoveryRule is a rule that allows a client to discover the API resources available on this server
var scopeDiscoveryRule = rbacv1.PolicyRule{
Verbs: []string{"get"},
NonResourceURLs: []string{
// Server version checking
"/version", "/version/*",
// API discovery/negotiation
"/api", "/api/*",
"/apis", "/apis/*",
"/oapi", "/oapi/*",
"/openapi/v2",
"/swaggerapi", "/swaggerapi/*", "/swagger.json", "/swagger-2.0.0.pb-v1",
"/osapi", "/osapi/", // these cannot be removed until we can drop support for pre 3.1 clients
"/.well-known", "/.well-known/*",
// we intentionally allow all to here
"/",
},
}
// ScopesToRules takes the scopes and return the rules back. We ALWAYS add the discovery rules and it is possible to get some rules and and
// an error since errors aren't fatal to evaluation
func ScopesToRules(scopes []string, namespace string, clusterRoleGetter rbaclisters.ClusterRoleLister) ([]rbacv1.PolicyRule, error) {
rules := append([]rbacv1.PolicyRule{}, scopeDiscoveryRule)
errors := []error{}
for _, scope := range scopes {
found := false
for _, evaluator := range ScopeEvaluators {
if evaluator.Handles(scope) {
found = true
currRules, err := evaluator.ResolveRules(scope, namespace, clusterRoleGetter)
if err != nil {
errors = append(errors, err)
continue
}
rules = append(rules, currRules...)
}
}
if !found {
errors = append(errors, fmt.Errorf("no scope evaluator found for %q", scope))
}
}
return rules, kutilerrors.NewAggregate(errors)
}
// ScopesToVisibleNamespaces returns a list of namespaces that the provided scopes have "get" access to.
// This exists only to support efficiently list/watch of projects (ACLed namespaces)
func ScopesToVisibleNamespaces(scopes []string, clusterRoleGetter rbaclisters.ClusterRoleLister, ignoreUnhandledScopes bool) (sets.String, error) {
if len(scopes) == 0 {
return sets.NewString("*"), nil
}
visibleNamespaces := sets.String{}
errors := []error{}
for _, scope := range scopes {
found := false
for _, evaluator := range ScopeEvaluators {
if evaluator.Handles(scope) {
found = true
allowedNamespaces, err := evaluator.ResolveGettableNamespaces(scope, clusterRoleGetter)
if err != nil {
errors = append(errors, err)
continue
}
visibleNamespaces.Insert(allowedNamespaces...)
break
}
}
if !found && !ignoreUnhandledScopes {
errors = append(errors, fmt.Errorf("no scope evaluator found for %q", scope))
}
}
return visibleNamespaces, kutilerrors.NewAggregate(errors)
}
const (
UserIndicator = "user:"
ClusterRoleIndicator = "role:"
)
// ScopeEvaluator takes a scope and returns the rules that express it
type ScopeEvaluator interface {
// Handles returns true if this evaluator can evaluate this scope
Handles(scope string) bool
// Validate returns an error if the scope is malformed
Validate(scope string) error
// Describe returns a description, warning (typically used to warn about escalation dangers), or an error if the scope is malformed
Describe(scope string) (description string, warning string, err error)
// ResolveRules returns the policy rules that this scope allows
ResolveRules(scope, namespace string, clusterRoleGetter rbaclisters.ClusterRoleLister) ([]rbacv1.PolicyRule, error)
ResolveGettableNamespaces(scope string, clusterRoleGetter rbaclisters.ClusterRoleLister) ([]string, error)
}
// ScopeEvaluators map prefixes to a function that handles that prefix
var ScopeEvaluators = []ScopeEvaluator{
userEvaluator{},
clusterRoleEvaluator{},
}
// scopes are in the format
// <indicator><indicator choice>
// we have the following formats:
// user:<scope name>
// role:<clusterrole name>:<namespace to allow the cluster role, * means all>
// TODO
// cluster:<comma-delimited verbs>:<comma-delimited resources>
// namespace:<namespace name>:<comma-delimited verbs>:<comma-delimited resources>
const (
UserInfo = UserIndicator + "info"
UserAccessCheck = UserIndicator + "check-access"
// UserListScopedProjects gives explicit permission to see the projects that this token can see.
UserListScopedProjects = UserIndicator + "list-scoped-projects"
// UserListAllProjects gives explicit permission to see the projects a user can see. This is often used to prime secondary ACL systems
// unrelated to openshift and to display projects for selection in a secondary UI.
UserListAllProjects = UserIndicator + "list-projects"
// UserFull includes all permissions of the user
UserFull = UserIndicator + "full"
)
var defaultSupportedScopesMap = map[string]string{
UserInfo: "Read-only access to your user information (including username, identities, and group membership)",
UserAccessCheck: `Read-only access to view your privileges (for example, "can I create builds?")`,
UserListScopedProjects: `Read-only access to list your projects viewable with this token and view their metadata (display name, description, etc.)`,
UserListAllProjects: `Read-only access to list your projects and view their metadata (display name, description, etc.)`,
UserFull: `Full read/write access with all of your permissions`,
}
func DefaultSupportedScopes() []string {
return sets.StringKeySet(defaultSupportedScopesMap).List()
}
func DescribeScopes(scopes []string) map[string]string {
ret := map[string]string{}
for _, s := range scopes {
val, ok := defaultSupportedScopesMap[s]
if ok {
ret[s] = val
} else {
ret[s] = ""
}
}
return ret
}
// user:<scope name>
type userEvaluator struct {
scopemetadata.UserEvaluator
}
func (userEvaluator) ResolveRules(scope, namespace string, _ rbaclisters.ClusterRoleLister) ([]rbacv1.PolicyRule, error) {
switch scope {
case UserInfo:
return []rbacv1.PolicyRule{
rbacv1helpers.NewRule("get").
Groups(userGroupName, legacyGroupName).
Resources("users").
Names("~").
RuleOrDie(),
}, nil
case UserAccessCheck:
return []rbacv1.PolicyRule{
rbacv1helpers.NewRule("create").
Groups(kubeAuthorizationGroupName).
Resources("selfsubjectaccessreviews").
RuleOrDie(),
rbacv1helpers.NewRule("create").
Groups(openshiftAuthorizationGroupName, legacyGroupName).
Resources("selfsubjectrulesreviews").
RuleOrDie(),
}, nil
case UserListScopedProjects:
return []rbacv1.PolicyRule{
rbacv1helpers.NewRule("list", "watch").
Groups(projectGroupName, legacyGroupName).
Resources("projects").
RuleOrDie(),
}, nil
case UserListAllProjects:
return []rbacv1.PolicyRule{
rbacv1helpers.NewRule("list", "watch").
Groups(projectGroupName, legacyGroupName).
Resources("projects").
RuleOrDie(),
rbacv1helpers.NewRule("get").
Groups(coreGroupName).
Resources("namespaces").
RuleOrDie(),
}, nil
case UserFull:
return []rbacv1.PolicyRule{
rbacv1helpers.NewRule(rbacv1.VerbAll).
Groups(rbacv1.APIGroupAll).
Resources(rbacv1.ResourceAll).
RuleOrDie(),
rbacv1helpers.NewRule(rbacv1.VerbAll).
URLs(rbacv1.NonResourceAll).
RuleOrDie(),
}, nil
default:
return nil, fmt.Errorf("unrecognized scope: %v", scope)
}
}
func (userEvaluator) ResolveGettableNamespaces(scope string, _ rbaclisters.ClusterRoleLister) ([]string, error) {
switch scope {
case UserFull, UserListAllProjects:
return []string{"*"}, nil
default:
return []string{}, nil
}
}
// escalatingScopeResources are resources that are considered escalating for scope evaluation
var escalatingScopeResources = []schema.GroupResource{
{Group: coreGroupName, Resource: "secrets"},
{Group: imageGroupName, Resource: "imagestreams/secrets"},
{Group: oauthGroupName, Resource: "oauthauthorizetokens"},
{Group: oauthGroupName, Resource: "oauthaccesstokens"},
{Group: openshiftAuthorizationGroupName, Resource: "roles"},
{Group: openshiftAuthorizationGroupName, Resource: "rolebindings"},
{Group: openshiftAuthorizationGroupName, Resource: "clusterroles"},
{Group: openshiftAuthorizationGroupName, Resource: "clusterrolebindings"},
// used in Service admission to create a service with external IP outside the allowed range
{Group: networkGroupName, Resource: "service/externalips"},
{Group: legacyGroupName, Resource: "imagestreams/secrets"},
{Group: legacyGroupName, Resource: "oauthauthorizetokens"},
{Group: legacyGroupName, Resource: "oauthaccesstokens"},
{Group: legacyGroupName, Resource: "roles"},
{Group: legacyGroupName, Resource: "rolebindings"},
{Group: legacyGroupName, Resource: "clusterroles"},
{Group: legacyGroupName, Resource: "clusterrolebindings"},
}
// role:<clusterrole name>:<namespace to allow the cluster role, * means all>
type clusterRoleEvaluator struct {
scopemetadata.ClusterRoleEvaluator
}
var clusterRoleEvaluatorInstance = clusterRoleEvaluator{}
func (e clusterRoleEvaluator) ResolveRules(scope, namespace string, clusterRoleGetter rbaclisters.ClusterRoleLister) ([]rbacv1.PolicyRule, error) |
func has(set []string, value string) bool {
for _, element := range set {
if value == element {
return true
}
}
return false
}
// resolveRules doesn't enforce namespace checks
func (e clusterRoleEvaluator) resolveRules(scope string, clusterRoleGetter rbaclisters.ClusterRoleLister) ([]rbacv1.PolicyRule, error) {
roleName, _, escalating, err := scopemetadata.ClusterRoleEvaluatorParseScope(scope)
if err != nil {
return nil, err
}
role, err := clusterRoleGetter.Get(roleName)
if err != nil {
if kapierrors.IsNotFound(err) {
return []rbacv1.PolicyRule{}, nil
}
return nil, err
}
rules := []rbacv1.PolicyRule{}
for _, rule := range role.Rules {
if escalating {
rules = append(rules, rule)
continue
}
// rules with unbounded access shouldn't be allowed in scopes.
if has(rule.Verbs, rbacv1.VerbAll) ||
has(rule.Resources, rbacv1.ResourceAll) ||
has(rule.APIGroups, rbacv1.APIGroupAll) {
continue
}
// rules that allow escalating resource access should be cleaned.
safeRule := removeEscalatingResources(rule)
rules = append(rules, safeRule)
}
return rules, nil
}
func (e clusterRoleEvaluator) ResolveGettableNamespaces(scope string, clusterRoleGetter rbaclisters.ClusterRoleLister) ([]string, error) {
_, scopeNamespace, _, err := scopemetadata.ClusterRoleEvaluatorParseScope(scope)
if err != nil {
return nil, err
}
rules, err := e.resolveRules(scope, clusterRoleGetter)
if err != nil {
return nil, err
}
attributes := kauthorizer.AttributesRecord{
APIGroup: coreGroupName,
Verb: "get",
Resource: "namespaces",
ResourceRequest: true,
}
if authorizerrbac.RulesAllow(attributes, rules...) {
return []string{scopeNamespace}, nil
}
return []string{}, nil
}
func remove(array []string, item string) []string {
newar := array[:0]
for _, element := range array {
if element != item {
newar = append(newar, element)
}
}
return newar
}
// removeEscalatingResources inspects a PolicyRule and removes any references to escalating resources.
// It has coarse logic for now. It is possible to rewrite one rule into many for the finest grain control
// but removing the entire matching resource regardless of verb or secondary group is cheaper, easier, and errs on the side removing
// too much, not too little
func removeEscalatingResources(in rbacv1.PolicyRule) rbacv1.PolicyRule {
var ruleCopy *rbacv1.PolicyRule
for _, resource := range escalatingScopeResources {
if !(has(in.APIGroups, resource.Group) && has(in.Resources, resource.Resource)) {
continue
}
if ruleCopy == nil {
// we're using a cache of cache of an object that uses pointers to data. I'm pretty sure we need to do a copy to avoid
// muddying the cache
ruleCopy = in.DeepCopy()
}
ruleCopy.Resources = remove(ruleCopy.Resources, resource.Resource)
}
if ruleCopy != nil {
return *ruleCopy
}
return in
}
| {
_, scopeNamespace, _, err := scopemetadata.ClusterRoleEvaluatorParseScope(scope)
if err != nil {
return nil, err
}
// if the scope limit on the clusterrole doesn't match, then don't add any rules, but its not an error
if !(scopeNamespace == scopesAllNamespaces || scopeNamespace == namespace) {
return []rbacv1.PolicyRule{}, nil
}
return e.resolveRules(scope, clusterRoleGetter)
} | identifier_body |
converter.go | package scope
import (
"fmt"
rbacv1 "k8s.io/api/rbac/v1"
kapierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/runtime/schema"
kutilerrors "k8s.io/apimachinery/pkg/util/errors"
"k8s.io/apimachinery/pkg/util/sets"
kauthorizer "k8s.io/apiserver/pkg/authorization/authorizer"
rbaclisters "k8s.io/client-go/listers/rbac/v1"
rbacv1helpers "k8s.io/kubernetes/pkg/apis/rbac/v1"
authorizerrbac "k8s.io/kubernetes/plugin/pkg/auth/authorizer/rbac"
scopemetadata "github.com/openshift/library-go/pkg/authorization/scopemetadata"
)
const (
scopesAllNamespaces = "*"
legacyGroupName = ""
coreGroupName = ""
kubeAuthorizationGroupName = "authorization.k8s.io"
openshiftAuthorizationGroupName = "authorization.openshift.io"
imageGroupName = "image.openshift.io"
networkGroupName = "network.openshift.io"
oauthGroupName = "oauth.openshift.io"
projectGroupName = "project.openshift.io"
userGroupName = "user.openshift.io"
)
// scopeDiscoveryRule is a rule that allows a client to discover the API resources available on this server
var scopeDiscoveryRule = rbacv1.PolicyRule{
Verbs: []string{"get"},
NonResourceURLs: []string{
// Server version checking
"/version", "/version/*",
// API discovery/negotiation
"/api", "/api/*",
"/apis", "/apis/*",
"/oapi", "/oapi/*",
"/openapi/v2",
"/swaggerapi", "/swaggerapi/*", "/swagger.json", "/swagger-2.0.0.pb-v1",
"/osapi", "/osapi/", // these cannot be removed until we can drop support for pre 3.1 clients
"/.well-known", "/.well-known/*",
// we intentionally allow all to here
"/",
},
}
// ScopesToRules takes the scopes and return the rules back. We ALWAYS add the discovery rules and it is possible to get some rules and and
// an error since errors aren't fatal to evaluation
func ScopesToRules(scopes []string, namespace string, clusterRoleGetter rbaclisters.ClusterRoleLister) ([]rbacv1.PolicyRule, error) {
rules := append([]rbacv1.PolicyRule{}, scopeDiscoveryRule)
errors := []error{}
for _, scope := range scopes {
found := false
for _, evaluator := range ScopeEvaluators |
if !found {
errors = append(errors, fmt.Errorf("no scope evaluator found for %q", scope))
}
}
return rules, kutilerrors.NewAggregate(errors)
}
// ScopesToVisibleNamespaces returns a list of namespaces that the provided scopes have "get" access to.
// This exists only to support efficiently list/watch of projects (ACLed namespaces)
func ScopesToVisibleNamespaces(scopes []string, clusterRoleGetter rbaclisters.ClusterRoleLister, ignoreUnhandledScopes bool) (sets.String, error) {
if len(scopes) == 0 {
return sets.NewString("*"), nil
}
visibleNamespaces := sets.String{}
errors := []error{}
for _, scope := range scopes {
found := false
for _, evaluator := range ScopeEvaluators {
if evaluator.Handles(scope) {
found = true
allowedNamespaces, err := evaluator.ResolveGettableNamespaces(scope, clusterRoleGetter)
if err != nil {
errors = append(errors, err)
continue
}
visibleNamespaces.Insert(allowedNamespaces...)
break
}
}
if !found && !ignoreUnhandledScopes {
errors = append(errors, fmt.Errorf("no scope evaluator found for %q", scope))
}
}
return visibleNamespaces, kutilerrors.NewAggregate(errors)
}
const (
UserIndicator = "user:"
ClusterRoleIndicator = "role:"
)
// ScopeEvaluator takes a scope and returns the rules that express it
type ScopeEvaluator interface {
// Handles returns true if this evaluator can evaluate this scope
Handles(scope string) bool
// Validate returns an error if the scope is malformed
Validate(scope string) error
// Describe returns a description, warning (typically used to warn about escalation dangers), or an error if the scope is malformed
Describe(scope string) (description string, warning string, err error)
// ResolveRules returns the policy rules that this scope allows
ResolveRules(scope, namespace string, clusterRoleGetter rbaclisters.ClusterRoleLister) ([]rbacv1.PolicyRule, error)
ResolveGettableNamespaces(scope string, clusterRoleGetter rbaclisters.ClusterRoleLister) ([]string, error)
}
// ScopeEvaluators map prefixes to a function that handles that prefix
var ScopeEvaluators = []ScopeEvaluator{
userEvaluator{},
clusterRoleEvaluator{},
}
// scopes are in the format
// <indicator><indicator choice>
// we have the following formats:
// user:<scope name>
// role:<clusterrole name>:<namespace to allow the cluster role, * means all>
// TODO
// cluster:<comma-delimited verbs>:<comma-delimited resources>
// namespace:<namespace name>:<comma-delimited verbs>:<comma-delimited resources>
const (
UserInfo = UserIndicator + "info"
UserAccessCheck = UserIndicator + "check-access"
// UserListScopedProjects gives explicit permission to see the projects that this token can see.
UserListScopedProjects = UserIndicator + "list-scoped-projects"
// UserListAllProjects gives explicit permission to see the projects a user can see. This is often used to prime secondary ACL systems
// unrelated to openshift and to display projects for selection in a secondary UI.
UserListAllProjects = UserIndicator + "list-projects"
// UserFull includes all permissions of the user
UserFull = UserIndicator + "full"
)
var defaultSupportedScopesMap = map[string]string{
UserInfo: "Read-only access to your user information (including username, identities, and group membership)",
UserAccessCheck: `Read-only access to view your privileges (for example, "can I create builds?")`,
UserListScopedProjects: `Read-only access to list your projects viewable with this token and view their metadata (display name, description, etc.)`,
UserListAllProjects: `Read-only access to list your projects and view their metadata (display name, description, etc.)`,
UserFull: `Full read/write access with all of your permissions`,
}
func DefaultSupportedScopes() []string {
return sets.StringKeySet(defaultSupportedScopesMap).List()
}
func DescribeScopes(scopes []string) map[string]string {
ret := map[string]string{}
for _, s := range scopes {
val, ok := defaultSupportedScopesMap[s]
if ok {
ret[s] = val
} else {
ret[s] = ""
}
}
return ret
}
// user:<scope name>
type userEvaluator struct {
scopemetadata.UserEvaluator
}
func (userEvaluator) ResolveRules(scope, namespace string, _ rbaclisters.ClusterRoleLister) ([]rbacv1.PolicyRule, error) {
switch scope {
case UserInfo:
return []rbacv1.PolicyRule{
rbacv1helpers.NewRule("get").
Groups(userGroupName, legacyGroupName).
Resources("users").
Names("~").
RuleOrDie(),
}, nil
case UserAccessCheck:
return []rbacv1.PolicyRule{
rbacv1helpers.NewRule("create").
Groups(kubeAuthorizationGroupName).
Resources("selfsubjectaccessreviews").
RuleOrDie(),
rbacv1helpers.NewRule("create").
Groups(openshiftAuthorizationGroupName, legacyGroupName).
Resources("selfsubjectrulesreviews").
RuleOrDie(),
}, nil
case UserListScopedProjects:
return []rbacv1.PolicyRule{
rbacv1helpers.NewRule("list", "watch").
Groups(projectGroupName, legacyGroupName).
Resources("projects").
RuleOrDie(),
}, nil
case UserListAllProjects:
return []rbacv1.PolicyRule{
rbacv1helpers.NewRule("list", "watch").
Groups(projectGroupName, legacyGroupName).
Resources("projects").
RuleOrDie(),
rbacv1helpers.NewRule("get").
Groups(coreGroupName).
Resources("namespaces").
RuleOrDie(),
}, nil
case UserFull:
return []rbacv1.PolicyRule{
rbacv1helpers.NewRule(rbacv1.VerbAll).
Groups(rbacv1.APIGroupAll).
Resources(rbacv1.ResourceAll).
RuleOrDie(),
rbacv1helpers.NewRule(rbacv1.VerbAll).
URLs(rbacv1.NonResourceAll).
RuleOrDie(),
}, nil
default:
return nil, fmt.Errorf("unrecognized scope: %v", scope)
}
}
func (userEvaluator) ResolveGettableNamespaces(scope string, _ rbaclisters.ClusterRoleLister) ([]string, error) {
switch scope {
case UserFull, UserListAllProjects:
return []string{"*"}, nil
default:
return []string{}, nil
}
}
// escalatingScopeResources are resources that are considered escalating for scope evaluation
var escalatingScopeResources = []schema.GroupResource{
{Group: coreGroupName, Resource: "secrets"},
{Group: imageGroupName, Resource: "imagestreams/secrets"},
{Group: oauthGroupName, Resource: "oauthauthorizetokens"},
{Group: oauthGroupName, Resource: "oauthaccesstokens"},
{Group: openshiftAuthorizationGroupName, Resource: "roles"},
{Group: openshiftAuthorizationGroupName, Resource: "rolebindings"},
{Group: openshiftAuthorizationGroupName, Resource: "clusterroles"},
{Group: openshiftAuthorizationGroupName, Resource: "clusterrolebindings"},
// used in Service admission to create a service with external IP outside the allowed range
{Group: networkGroupName, Resource: "service/externalips"},
{Group: legacyGroupName, Resource: "imagestreams/secrets"},
{Group: legacyGroupName, Resource: "oauthauthorizetokens"},
{Group: legacyGroupName, Resource: "oauthaccesstokens"},
{Group: legacyGroupName, Resource: "roles"},
{Group: legacyGroupName, Resource: "rolebindings"},
{Group: legacyGroupName, Resource: "clusterroles"},
{Group: legacyGroupName, Resource: "clusterrolebindings"},
}
// role:<clusterrole name>:<namespace to allow the cluster role, * means all>
type clusterRoleEvaluator struct {
scopemetadata.ClusterRoleEvaluator
}
var clusterRoleEvaluatorInstance = clusterRoleEvaluator{}
func (e clusterRoleEvaluator) ResolveRules(scope, namespace string, clusterRoleGetter rbaclisters.ClusterRoleLister) ([]rbacv1.PolicyRule, error) {
_, scopeNamespace, _, err := scopemetadata.ClusterRoleEvaluatorParseScope(scope)
if err != nil {
return nil, err
}
// if the scope limit on the clusterrole doesn't match, then don't add any rules, but its not an error
if !(scopeNamespace == scopesAllNamespaces || scopeNamespace == namespace) {
return []rbacv1.PolicyRule{}, nil
}
return e.resolveRules(scope, clusterRoleGetter)
}
func has(set []string, value string) bool {
for _, element := range set {
if value == element {
return true
}
}
return false
}
// resolveRules doesn't enforce namespace checks
func (e clusterRoleEvaluator) resolveRules(scope string, clusterRoleGetter rbaclisters.ClusterRoleLister) ([]rbacv1.PolicyRule, error) {
roleName, _, escalating, err := scopemetadata.ClusterRoleEvaluatorParseScope(scope)
if err != nil {
return nil, err
}
role, err := clusterRoleGetter.Get(roleName)
if err != nil {
if kapierrors.IsNotFound(err) {
return []rbacv1.PolicyRule{}, nil
}
return nil, err
}
rules := []rbacv1.PolicyRule{}
for _, rule := range role.Rules {
if escalating {
rules = append(rules, rule)
continue
}
// rules with unbounded access shouldn't be allowed in scopes.
if has(rule.Verbs, rbacv1.VerbAll) ||
has(rule.Resources, rbacv1.ResourceAll) ||
has(rule.APIGroups, rbacv1.APIGroupAll) {
continue
}
// rules that allow escalating resource access should be cleaned.
safeRule := removeEscalatingResources(rule)
rules = append(rules, safeRule)
}
return rules, nil
}
func (e clusterRoleEvaluator) ResolveGettableNamespaces(scope string, clusterRoleGetter rbaclisters.ClusterRoleLister) ([]string, error) {
_, scopeNamespace, _, err := scopemetadata.ClusterRoleEvaluatorParseScope(scope)
if err != nil {
return nil, err
}
rules, err := e.resolveRules(scope, clusterRoleGetter)
if err != nil {
return nil, err
}
attributes := kauthorizer.AttributesRecord{
APIGroup: coreGroupName,
Verb: "get",
Resource: "namespaces",
ResourceRequest: true,
}
if authorizerrbac.RulesAllow(attributes, rules...) {
return []string{scopeNamespace}, nil
}
return []string{}, nil
}
func remove(array []string, item string) []string {
newar := array[:0]
for _, element := range array {
if element != item {
newar = append(newar, element)
}
}
return newar
}
// removeEscalatingResources inspects a PolicyRule and removes any references to escalating resources.
// It has coarse logic for now. It is possible to rewrite one rule into many for the finest grain control
// but removing the entire matching resource regardless of verb or secondary group is cheaper, easier, and errs on the side removing
// too much, not too little
func removeEscalatingResources(in rbacv1.PolicyRule) rbacv1.PolicyRule {
var ruleCopy *rbacv1.PolicyRule
for _, resource := range escalatingScopeResources {
if !(has(in.APIGroups, resource.Group) && has(in.Resources, resource.Resource)) {
continue
}
if ruleCopy == nil {
// we're using a cache of cache of an object that uses pointers to data. I'm pretty sure we need to do a copy to avoid
// muddying the cache
ruleCopy = in.DeepCopy()
}
ruleCopy.Resources = remove(ruleCopy.Resources, resource.Resource)
}
if ruleCopy != nil {
return *ruleCopy
}
return in
}
| {
if evaluator.Handles(scope) {
found = true
currRules, err := evaluator.ResolveRules(scope, namespace, clusterRoleGetter)
if err != nil {
errors = append(errors, err)
continue
}
rules = append(rules, currRules...)
}
} | conditional_block |
converter.go | package scope
import (
"fmt"
rbacv1 "k8s.io/api/rbac/v1"
kapierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/runtime/schema"
kutilerrors "k8s.io/apimachinery/pkg/util/errors"
"k8s.io/apimachinery/pkg/util/sets"
kauthorizer "k8s.io/apiserver/pkg/authorization/authorizer"
rbaclisters "k8s.io/client-go/listers/rbac/v1"
rbacv1helpers "k8s.io/kubernetes/pkg/apis/rbac/v1"
authorizerrbac "k8s.io/kubernetes/plugin/pkg/auth/authorizer/rbac"
scopemetadata "github.com/openshift/library-go/pkg/authorization/scopemetadata"
)
const (
scopesAllNamespaces = "*"
legacyGroupName = ""
coreGroupName = ""
kubeAuthorizationGroupName = "authorization.k8s.io"
openshiftAuthorizationGroupName = "authorization.openshift.io"
imageGroupName = "image.openshift.io"
networkGroupName = "network.openshift.io"
oauthGroupName = "oauth.openshift.io"
projectGroupName = "project.openshift.io"
userGroupName = "user.openshift.io"
)
// scopeDiscoveryRule is a rule that allows a client to discover the API resources available on this server
var scopeDiscoveryRule = rbacv1.PolicyRule{
Verbs: []string{"get"},
NonResourceURLs: []string{
// Server version checking
"/version", "/version/*",
// API discovery/negotiation
"/api", "/api/*",
"/apis", "/apis/*",
"/oapi", "/oapi/*",
"/openapi/v2",
"/swaggerapi", "/swaggerapi/*", "/swagger.json", "/swagger-2.0.0.pb-v1",
"/osapi", "/osapi/", // these cannot be removed until we can drop support for pre 3.1 clients
"/.well-known", "/.well-known/*",
// we intentionally allow all to here
"/",
},
}
// ScopesToRules takes the scopes and return the rules back. We ALWAYS add the discovery rules and it is possible to get some rules and and
// an error since errors aren't fatal to evaluation
func ScopesToRules(scopes []string, namespace string, clusterRoleGetter rbaclisters.ClusterRoleLister) ([]rbacv1.PolicyRule, error) {
rules := append([]rbacv1.PolicyRule{}, scopeDiscoveryRule)
errors := []error{}
for _, scope := range scopes {
found := false
for _, evaluator := range ScopeEvaluators {
if evaluator.Handles(scope) {
found = true
currRules, err := evaluator.ResolveRules(scope, namespace, clusterRoleGetter)
if err != nil {
errors = append(errors, err)
continue
}
rules = append(rules, currRules...)
}
}
if !found {
errors = append(errors, fmt.Errorf("no scope evaluator found for %q", scope))
}
}
return rules, kutilerrors.NewAggregate(errors)
}
// ScopesToVisibleNamespaces returns a list of namespaces that the provided scopes have "get" access to.
// This exists only to support efficiently list/watch of projects (ACLed namespaces)
func ScopesToVisibleNamespaces(scopes []string, clusterRoleGetter rbaclisters.ClusterRoleLister, ignoreUnhandledScopes bool) (sets.String, error) {
if len(scopes) == 0 {
return sets.NewString("*"), nil
}
visibleNamespaces := sets.String{}
errors := []error{}
for _, scope := range scopes {
found := false
for _, evaluator := range ScopeEvaluators {
if evaluator.Handles(scope) {
found = true
allowedNamespaces, err := evaluator.ResolveGettableNamespaces(scope, clusterRoleGetter)
if err != nil {
errors = append(errors, err)
continue
}
visibleNamespaces.Insert(allowedNamespaces...)
break
}
}
if !found && !ignoreUnhandledScopes {
errors = append(errors, fmt.Errorf("no scope evaluator found for %q", scope))
}
}
return visibleNamespaces, kutilerrors.NewAggregate(errors)
}
const (
UserIndicator = "user:"
ClusterRoleIndicator = "role:"
)
// ScopeEvaluator takes a scope and returns the rules that express it
type ScopeEvaluator interface {
// Handles returns true if this evaluator can evaluate this scope
Handles(scope string) bool
// Validate returns an error if the scope is malformed
Validate(scope string) error
// Describe returns a description, warning (typically used to warn about escalation dangers), or an error if the scope is malformed
Describe(scope string) (description string, warning string, err error)
// ResolveRules returns the policy rules that this scope allows
ResolveRules(scope, namespace string, clusterRoleGetter rbaclisters.ClusterRoleLister) ([]rbacv1.PolicyRule, error)
ResolveGettableNamespaces(scope string, clusterRoleGetter rbaclisters.ClusterRoleLister) ([]string, error)
}
// ScopeEvaluators map prefixes to a function that handles that prefix
var ScopeEvaluators = []ScopeEvaluator{
userEvaluator{},
clusterRoleEvaluator{},
}
| // <indicator><indicator choice>
// we have the following formats:
// user:<scope name>
// role:<clusterrole name>:<namespace to allow the cluster role, * means all>
// TODO
// cluster:<comma-delimited verbs>:<comma-delimited resources>
// namespace:<namespace name>:<comma-delimited verbs>:<comma-delimited resources>
const (
UserInfo = UserIndicator + "info"
UserAccessCheck = UserIndicator + "check-access"
// UserListScopedProjects gives explicit permission to see the projects that this token can see.
UserListScopedProjects = UserIndicator + "list-scoped-projects"
// UserListAllProjects gives explicit permission to see the projects a user can see. This is often used to prime secondary ACL systems
// unrelated to openshift and to display projects for selection in a secondary UI.
UserListAllProjects = UserIndicator + "list-projects"
// UserFull includes all permissions of the user
UserFull = UserIndicator + "full"
)
var defaultSupportedScopesMap = map[string]string{
UserInfo: "Read-only access to your user information (including username, identities, and group membership)",
UserAccessCheck: `Read-only access to view your privileges (for example, "can I create builds?")`,
UserListScopedProjects: `Read-only access to list your projects viewable with this token and view their metadata (display name, description, etc.)`,
UserListAllProjects: `Read-only access to list your projects and view their metadata (display name, description, etc.)`,
UserFull: `Full read/write access with all of your permissions`,
}
func DefaultSupportedScopes() []string {
return sets.StringKeySet(defaultSupportedScopesMap).List()
}
func DescribeScopes(scopes []string) map[string]string {
ret := map[string]string{}
for _, s := range scopes {
val, ok := defaultSupportedScopesMap[s]
if ok {
ret[s] = val
} else {
ret[s] = ""
}
}
return ret
}
// user:<scope name>
type userEvaluator struct {
scopemetadata.UserEvaluator
}
func (userEvaluator) ResolveRules(scope, namespace string, _ rbaclisters.ClusterRoleLister) ([]rbacv1.PolicyRule, error) {
switch scope {
case UserInfo:
return []rbacv1.PolicyRule{
rbacv1helpers.NewRule("get").
Groups(userGroupName, legacyGroupName).
Resources("users").
Names("~").
RuleOrDie(),
}, nil
case UserAccessCheck:
return []rbacv1.PolicyRule{
rbacv1helpers.NewRule("create").
Groups(kubeAuthorizationGroupName).
Resources("selfsubjectaccessreviews").
RuleOrDie(),
rbacv1helpers.NewRule("create").
Groups(openshiftAuthorizationGroupName, legacyGroupName).
Resources("selfsubjectrulesreviews").
RuleOrDie(),
}, nil
case UserListScopedProjects:
return []rbacv1.PolicyRule{
rbacv1helpers.NewRule("list", "watch").
Groups(projectGroupName, legacyGroupName).
Resources("projects").
RuleOrDie(),
}, nil
case UserListAllProjects:
return []rbacv1.PolicyRule{
rbacv1helpers.NewRule("list", "watch").
Groups(projectGroupName, legacyGroupName).
Resources("projects").
RuleOrDie(),
rbacv1helpers.NewRule("get").
Groups(coreGroupName).
Resources("namespaces").
RuleOrDie(),
}, nil
case UserFull:
return []rbacv1.PolicyRule{
rbacv1helpers.NewRule(rbacv1.VerbAll).
Groups(rbacv1.APIGroupAll).
Resources(rbacv1.ResourceAll).
RuleOrDie(),
rbacv1helpers.NewRule(rbacv1.VerbAll).
URLs(rbacv1.NonResourceAll).
RuleOrDie(),
}, nil
default:
return nil, fmt.Errorf("unrecognized scope: %v", scope)
}
}
func (userEvaluator) ResolveGettableNamespaces(scope string, _ rbaclisters.ClusterRoleLister) ([]string, error) {
switch scope {
case UserFull, UserListAllProjects:
return []string{"*"}, nil
default:
return []string{}, nil
}
}
// escalatingScopeResources are resources that are considered escalating for scope evaluation
var escalatingScopeResources = []schema.GroupResource{
{Group: coreGroupName, Resource: "secrets"},
{Group: imageGroupName, Resource: "imagestreams/secrets"},
{Group: oauthGroupName, Resource: "oauthauthorizetokens"},
{Group: oauthGroupName, Resource: "oauthaccesstokens"},
{Group: openshiftAuthorizationGroupName, Resource: "roles"},
{Group: openshiftAuthorizationGroupName, Resource: "rolebindings"},
{Group: openshiftAuthorizationGroupName, Resource: "clusterroles"},
{Group: openshiftAuthorizationGroupName, Resource: "clusterrolebindings"},
// used in Service admission to create a service with external IP outside the allowed range
{Group: networkGroupName, Resource: "service/externalips"},
{Group: legacyGroupName, Resource: "imagestreams/secrets"},
{Group: legacyGroupName, Resource: "oauthauthorizetokens"},
{Group: legacyGroupName, Resource: "oauthaccesstokens"},
{Group: legacyGroupName, Resource: "roles"},
{Group: legacyGroupName, Resource: "rolebindings"},
{Group: legacyGroupName, Resource: "clusterroles"},
{Group: legacyGroupName, Resource: "clusterrolebindings"},
}
// role:<clusterrole name>:<namespace to allow the cluster role, * means all>
type clusterRoleEvaluator struct {
scopemetadata.ClusterRoleEvaluator
}
var clusterRoleEvaluatorInstance = clusterRoleEvaluator{}
func (e clusterRoleEvaluator) ResolveRules(scope, namespace string, clusterRoleGetter rbaclisters.ClusterRoleLister) ([]rbacv1.PolicyRule, error) {
_, scopeNamespace, _, err := scopemetadata.ClusterRoleEvaluatorParseScope(scope)
if err != nil {
return nil, err
}
// if the scope limit on the clusterrole doesn't match, then don't add any rules, but its not an error
if !(scopeNamespace == scopesAllNamespaces || scopeNamespace == namespace) {
return []rbacv1.PolicyRule{}, nil
}
return e.resolveRules(scope, clusterRoleGetter)
}
func has(set []string, value string) bool {
for _, element := range set {
if value == element {
return true
}
}
return false
}
// resolveRules doesn't enforce namespace checks
func (e clusterRoleEvaluator) resolveRules(scope string, clusterRoleGetter rbaclisters.ClusterRoleLister) ([]rbacv1.PolicyRule, error) {
roleName, _, escalating, err := scopemetadata.ClusterRoleEvaluatorParseScope(scope)
if err != nil {
return nil, err
}
role, err := clusterRoleGetter.Get(roleName)
if err != nil {
if kapierrors.IsNotFound(err) {
return []rbacv1.PolicyRule{}, nil
}
return nil, err
}
rules := []rbacv1.PolicyRule{}
for _, rule := range role.Rules {
if escalating {
rules = append(rules, rule)
continue
}
// rules with unbounded access shouldn't be allowed in scopes.
if has(rule.Verbs, rbacv1.VerbAll) ||
has(rule.Resources, rbacv1.ResourceAll) ||
has(rule.APIGroups, rbacv1.APIGroupAll) {
continue
}
// rules that allow escalating resource access should be cleaned.
safeRule := removeEscalatingResources(rule)
rules = append(rules, safeRule)
}
return rules, nil
}
func (e clusterRoleEvaluator) ResolveGettableNamespaces(scope string, clusterRoleGetter rbaclisters.ClusterRoleLister) ([]string, error) {
_, scopeNamespace, _, err := scopemetadata.ClusterRoleEvaluatorParseScope(scope)
if err != nil {
return nil, err
}
rules, err := e.resolveRules(scope, clusterRoleGetter)
if err != nil {
return nil, err
}
attributes := kauthorizer.AttributesRecord{
APIGroup: coreGroupName,
Verb: "get",
Resource: "namespaces",
ResourceRequest: true,
}
if authorizerrbac.RulesAllow(attributes, rules...) {
return []string{scopeNamespace}, nil
}
return []string{}, nil
}
func remove(array []string, item string) []string {
newar := array[:0]
for _, element := range array {
if element != item {
newar = append(newar, element)
}
}
return newar
}
// removeEscalatingResources inspects a PolicyRule and removes any references to escalating resources.
// It has coarse logic for now. It is possible to rewrite one rule into many for the finest grain control
// but removing the entire matching resource regardless of verb or secondary group is cheaper, easier, and errs on the side removing
// too much, not too little
func removeEscalatingResources(in rbacv1.PolicyRule) rbacv1.PolicyRule {
var ruleCopy *rbacv1.PolicyRule
for _, resource := range escalatingScopeResources {
if !(has(in.APIGroups, resource.Group) && has(in.Resources, resource.Resource)) {
continue
}
if ruleCopy == nil {
// we're using a cache of cache of an object that uses pointers to data. I'm pretty sure we need to do a copy to avoid
// muddying the cache
ruleCopy = in.DeepCopy()
}
ruleCopy.Resources = remove(ruleCopy.Resources, resource.Resource)
}
if ruleCopy != nil {
return *ruleCopy
}
return in
} | // scopes are in the format | random_line_split |
bond_monitor.py | import pandas as pd
import pymongo
import pymssql
import tkinter as tk
from tkinter import ttk
import re
convert_list = [ "128", "117", "125", "126", "110", "113", "131"]
def get_bond_names():
bond_names = {}
conn = pymssql.connect(host='192.168.8.120', port=14333, user='GuestUser', password='GuestUser', database='JYDB',charset='GBK')
with conn.cursor() as cursor:
sql = ''' SELECT SecuCode, SecuAbbr,SecuMarket FROM Bond_Code '''
cursor.execute(sql)
data = cursor.fetchall()
for i in data:
if i[2] == 83:
bond_names[i[0]+".SH"]=i[1]
if i[2] == 90:
bond_names[i[0]+".SZ"]=i[1]
return bond_names
def is_number(s):
try:
float(s)
return True
except:
pass
return False
def treeview_sort_column(tv,col,reverse):
l = [(tv.set(k,col),k) for k in tv.get_children('')]
if is_number(l[0][0]):
l.sort(key = lambda x: float(x[0]),reverse=reverse)
else:
l.sort(reverse = reverse)
for index,(val,k) in enumerate(l):
tv.move(k,'',index)
tv.heading(col,command=lambda :treeview_sort_column(tv,col,not reverse))
class basedesk():
def __init__(self, master):
self.root = master
self.root.title('future monitor')
self.root.geometry('1080x720')
self.table_init = False
self.signal_data = {}
self.bond_names = get_bond_names()
myclient = pymongo.MongoClient("mongodb://192.168.9.189:15009/")
self.mongo = myclient.data
self.mongo.authenticate("zlt01", "zlt_ujYH")
self.mysql = pymssql.connect(host='192.168.9.85', user='sa', password='lhtzb.123', database='BondMonitor')
self.target_bond = []
self.signal_list = []
self.get_target_bond()
self.db_lookup()
def get_target_bond(self):
with self.mysql.cursor() as cursor:
##取日常要订的表
sql = ''' SELECT * FROM Bond_list '''
cursor.execute(sql)
data = cursor.fetchall()
for i in data:
if i[5] != 90 and i[5] != 83:
print(i)
if i[5] == 90:
self.target_bond.append(str(i[2]) + ".SZ")
if i[5] == 83:
self.target_bond.append(str(i[2]) + ".SH")
sql = ''' SELECT * FROM Abnormal_Bond_list '''
cursor.execute(sql)
data = cursor.fetchall()
for i in data:
print(i)
self.target_bond.append(i[0])
def add_new_abnormal_bond(self,code):
with self.mysql.cursor() as cursor:
sql = "insert into Abnormal_Bond_list values (" +" ' "+code + "'"+ ","+"'"+self.bond_names[code]+ "'"+")"
print(sql)
cursor.execute(sql)
self.mysql.commit()
def show_warning(self,code):
top = tk.Toplevel()
top.geometry('640x480')
top.title('warnnig')
l3 =tk.Label(top,text='{} {}'.format(code,self.bond_names[code]))
l3.pack(side='top')
def db_lookup(self):
mongo = self.mongo
temp = mongo["quote_data"]
sample = temp.find({"code_name":re.compile("^1")})
# print(str(sample[0]["time"])[:4])
self.data = []
self.table_list = []
for s in sample:
if s["code_name"][0:3] not in convert_list:
if ((int(s["code_name"][0:3]) >= 150 or s["code_name"][0:3] == '127' or s["code_name"][0:3] == '123') and s["code_name"][-2::] == "SZ"):
pass
else:
rate = 0
if s["pre_close"] != 0:
rate = (s["last_price"] - s["pre_close"])/s["pre_close"]
if rate > 0.05 or (s["code_name"] in self.target_bond and (s["volume"] > 5000 or s["total_ask_vol"] > 2000 or s["total_bid_vol"] > 2000)):
self.signal_calc(s)
tags = ""
if self.signal_data[s["code_name"]]["signal"]:
tags= "warning"
self.data.append({
"code_name":s["code_name"],
"bond_name":self.bond_names[s["code_name"]],
"volume":s["volume"],
"signal":self.signal_data[s["code_name"]]["signal"],
"total_ask_vol":s["total_ask_vol"],
"total_bid_vol":s["total_bid_vol"],
"price":"{:.2f}".format(s["last_price"]),
"tags":tags
})
self.table_list.append(s["code_name"])
if rate > 0.05 and (s["code_name"] not in self.target_bond):
self.target_bond.append(s["code_name"])
self.add_new_abnormal_bond(s["code_name"])
if s["code_name"] not in self.signal_list:
self.signal_list.append(s["code_name"])
if s["code_name"] in self.signal_list:
self.signal_calc(s)
print("bond total:",len(self.data))
self.show_table()
self.root.after(10000,self.db_lookup)
def signal_calc(self,s):
minute = str(s["time"])[:4]
if s["code_name"] not in self.signal_data.keys():
self.signal_data[s["code_name"]] = {}
self.signal_data[s["code_name"]]["time"] = minute
self.signal_data[s["code_name"]]["pirce"] = []
self.signal_data[s["code_name"]]["pirce"].append(s["last_price"])
self.signal_data[s["code_name"]]["signal"] = False
else:
if self.signal_data[s["code_name"]]["time"] != minute :
self.signal_data[s["code_name"]]["time"] = minute
self.signal_data[s["code_name"]]["pirce"].append(s["last_price"])
pirce_len = len(self.signal_data[s["code_name"]]["pirce"])
if pirce_len >= 5 :
pirce_base = self.signal_data[s["code_name"]]["pirce"][-5]
if pirce_base != 0:
rate = (s["last_price"] - pirce_base) / pirce_base
if abs(rate) > 0.01:
self.show_warning(s["code_name"])
if pirce_len < 14:
pass
else:
total = 0.0
if len(self.signal_data[s["code_name"]]["pirce"]) != 15:
print("signal cacl error")
for i in self.signal_data[s["code_name"]]["pirce"]:
total = total + i
avg = total / 15
if s["last_price"] > avg:
self.signal_data[s["code_name"]]["signal"] = True
del self.signal_data[s["code_name"]]["pirce"][0]
def set_tv_head(self,tv):
tv["columns"] = self.title
for i in range(len(self.title)):
if self.title[i] == "account_name":
tv.column(self.title[i],width=180,anchor='center')
else:
tv.column(self.title[i],width=100,anchor='center')
tv.heading(self.title[i],text=self.title[i],command=lambda _col=self.title[i]:treeview_sort_column(tv,_col,False))
def show_table(self):
if not self.table_init:
self.title = ["code_name","bond_name","volume","signal","total_ask_vol","total_bid_vol","price"]
scrollbar = tk.Scrollbar(self.root)
scrollbar.pack(side=tk.RIGHT,fill=tk.Y)
self.main_tv = ttk.Treeview(self.root,columns=self.title,
yscrollcommand=scrollbar.set,
show='headings')
self.set_tv_head(self.main_tv)
for data in [self.data]:
for i in range(len(data)):
self.main_tv.insert('','end',values=[data[i][y] for y in self.title],tags=data[i]["tags"])
scrollbar.config(command=self.main_tv.yview)
self.main_tv.tag_configure('warning', background='red')
self.main_tv.pack(side="top",expand="yes",fill="both")
self.table_init = True
else:
all_items = self.main_tv.get_children("");
for item in all_items:
values = self.main_tv.item(item,"values")
if (len(values) != 0) and (values[0] not in self.table_list):
self.main_tv.delete(item)
continue
all_items = self.main_tv.get_children("");
data = self.data
for i in range(len(data)):
showed = False
for item in all_items:
values = self.main_tv.item(item,"values")
if len(values) != 0 and values[0] == data[i]["code_name"]:
self.main_tv.item(item,values = [data[i][y] for y in self.title],tags=data[i]["tags"])
showed = True
break
if not showed:
self.main_tv.insert('','end',values=[data[i][y] for y in self.title],tags=data[i]["tags"])
if __name__ == '__main__':
root = tk.Tk() |
basedesk(root)
root.mainloop()
| conditional_block | |
bond_monitor.py | import pandas as pd
import pymongo
import pymssql
import tkinter as tk
from tkinter import ttk
import re
convert_list = [ "128", "117", "125", "126", "110", "113", "131"]
def get_bond_names():
bond_names = {}
conn = pymssql.connect(host='192.168.8.120', port=14333, user='GuestUser', password='GuestUser', database='JYDB',charset='GBK')
with conn.cursor() as cursor:
sql = ''' SELECT SecuCode, SecuAbbr,SecuMarket FROM Bond_Code '''
cursor.execute(sql)
data = cursor.fetchall()
for i in data:
if i[2] == 83:
bond_names[i[0]+".SH"]=i[1]
if i[2] == 90:
bond_names[i[0]+".SZ"]=i[1]
return bond_names
def is_number(s):
try:
float(s)
return True
except:
pass
return False
def treeview_sort_column(tv,col,reverse):
l = [(tv.set(k,col),k) for k in tv.get_children('')]
if is_number(l[0][0]):
l.sort(key = lambda x: float(x[0]),reverse=reverse)
else:
l.sort(reverse = reverse)
for index,(val,k) in enumerate(l):
tv.move(k,'',index)
tv.heading(col,command=lambda :treeview_sort_column(tv,col,not reverse))
class basedesk():
def | (self, master):
self.root = master
self.root.title('future monitor')
self.root.geometry('1080x720')
self.table_init = False
self.signal_data = {}
self.bond_names = get_bond_names()
myclient = pymongo.MongoClient("mongodb://192.168.9.189:15009/")
self.mongo = myclient.data
self.mongo.authenticate("zlt01", "zlt_ujYH")
self.mysql = pymssql.connect(host='192.168.9.85', user='sa', password='lhtzb.123', database='BondMonitor')
self.target_bond = []
self.signal_list = []
self.get_target_bond()
self.db_lookup()
def get_target_bond(self):
with self.mysql.cursor() as cursor:
##取日常要订的表
sql = ''' SELECT * FROM Bond_list '''
cursor.execute(sql)
data = cursor.fetchall()
for i in data:
if i[5] != 90 and i[5] != 83:
print(i)
if i[5] == 90:
self.target_bond.append(str(i[2]) + ".SZ")
if i[5] == 83:
self.target_bond.append(str(i[2]) + ".SH")
sql = ''' SELECT * FROM Abnormal_Bond_list '''
cursor.execute(sql)
data = cursor.fetchall()
for i in data:
print(i)
self.target_bond.append(i[0])
def add_new_abnormal_bond(self,code):
with self.mysql.cursor() as cursor:
sql = "insert into Abnormal_Bond_list values (" +" ' "+code + "'"+ ","+"'"+self.bond_names[code]+ "'"+")"
print(sql)
cursor.execute(sql)
self.mysql.commit()
def show_warning(self,code):
top = tk.Toplevel()
top.geometry('640x480')
top.title('warnnig')
l3 =tk.Label(top,text='{} {}'.format(code,self.bond_names[code]))
l3.pack(side='top')
def db_lookup(self):
mongo = self.mongo
temp = mongo["quote_data"]
sample = temp.find({"code_name":re.compile("^1")})
# print(str(sample[0]["time"])[:4])
self.data = []
self.table_list = []
for s in sample:
if s["code_name"][0:3] not in convert_list:
if ((int(s["code_name"][0:3]) >= 150 or s["code_name"][0:3] == '127' or s["code_name"][0:3] == '123') and s["code_name"][-2::] == "SZ"):
pass
else:
rate = 0
if s["pre_close"] != 0:
rate = (s["last_price"] - s["pre_close"])/s["pre_close"]
if rate > 0.05 or (s["code_name"] in self.target_bond and (s["volume"] > 5000 or s["total_ask_vol"] > 2000 or s["total_bid_vol"] > 2000)):
self.signal_calc(s)
tags = ""
if self.signal_data[s["code_name"]]["signal"]:
tags= "warning"
self.data.append({
"code_name":s["code_name"],
"bond_name":self.bond_names[s["code_name"]],
"volume":s["volume"],
"signal":self.signal_data[s["code_name"]]["signal"],
"total_ask_vol":s["total_ask_vol"],
"total_bid_vol":s["total_bid_vol"],
"price":"{:.2f}".format(s["last_price"]),
"tags":tags
})
self.table_list.append(s["code_name"])
if rate > 0.05 and (s["code_name"] not in self.target_bond):
self.target_bond.append(s["code_name"])
self.add_new_abnormal_bond(s["code_name"])
if s["code_name"] not in self.signal_list:
self.signal_list.append(s["code_name"])
if s["code_name"] in self.signal_list:
self.signal_calc(s)
print("bond total:",len(self.data))
self.show_table()
self.root.after(10000,self.db_lookup)
def signal_calc(self,s):
minute = str(s["time"])[:4]
if s["code_name"] not in self.signal_data.keys():
self.signal_data[s["code_name"]] = {}
self.signal_data[s["code_name"]]["time"] = minute
self.signal_data[s["code_name"]]["pirce"] = []
self.signal_data[s["code_name"]]["pirce"].append(s["last_price"])
self.signal_data[s["code_name"]]["signal"] = False
else:
if self.signal_data[s["code_name"]]["time"] != minute :
self.signal_data[s["code_name"]]["time"] = minute
self.signal_data[s["code_name"]]["pirce"].append(s["last_price"])
pirce_len = len(self.signal_data[s["code_name"]]["pirce"])
if pirce_len >= 5 :
pirce_base = self.signal_data[s["code_name"]]["pirce"][-5]
if pirce_base != 0:
rate = (s["last_price"] - pirce_base) / pirce_base
if abs(rate) > 0.01:
self.show_warning(s["code_name"])
if pirce_len < 14:
pass
else:
total = 0.0
if len(self.signal_data[s["code_name"]]["pirce"]) != 15:
print("signal cacl error")
for i in self.signal_data[s["code_name"]]["pirce"]:
total = total + i
avg = total / 15
if s["last_price"] > avg:
self.signal_data[s["code_name"]]["signal"] = True
del self.signal_data[s["code_name"]]["pirce"][0]
def set_tv_head(self,tv):
tv["columns"] = self.title
for i in range(len(self.title)):
if self.title[i] == "account_name":
tv.column(self.title[i],width=180,anchor='center')
else:
tv.column(self.title[i],width=100,anchor='center')
tv.heading(self.title[i],text=self.title[i],command=lambda _col=self.title[i]:treeview_sort_column(tv,_col,False))
def show_table(self):
if not self.table_init:
self.title = ["code_name","bond_name","volume","signal","total_ask_vol","total_bid_vol","price"]
scrollbar = tk.Scrollbar(self.root)
scrollbar.pack(side=tk.RIGHT,fill=tk.Y)
self.main_tv = ttk.Treeview(self.root,columns=self.title,
yscrollcommand=scrollbar.set,
show='headings')
self.set_tv_head(self.main_tv)
for data in [self.data]:
for i in range(len(data)):
self.main_tv.insert('','end',values=[data[i][y] for y in self.title],tags=data[i]["tags"])
scrollbar.config(command=self.main_tv.yview)
self.main_tv.tag_configure('warning', background='red')
self.main_tv.pack(side="top",expand="yes",fill="both")
self.table_init = True
else:
all_items = self.main_tv.get_children("");
for item in all_items:
values = self.main_tv.item(item,"values")
if (len(values) != 0) and (values[0] not in self.table_list):
self.main_tv.delete(item)
continue
all_items = self.main_tv.get_children("");
data = self.data
for i in range(len(data)):
showed = False
for item in all_items:
values = self.main_tv.item(item,"values")
if len(values) != 0 and values[0] == data[i]["code_name"]:
self.main_tv.item(item,values = [data[i][y] for y in self.title],tags=data[i]["tags"])
showed = True
break
if not showed:
self.main_tv.insert('','end',values=[data[i][y] for y in self.title],tags=data[i]["tags"])
if __name__ == '__main__':
root = tk.Tk()
basedesk(root)
root.mainloop()
| __init__ | identifier_name |
bond_monitor.py | import pandas as pd
import pymongo
import pymssql
import tkinter as tk
from tkinter import ttk
import re
convert_list = [ "128", "117", "125", "126", "110", "113", "131"]
def get_bond_names():
bond_names = {}
conn = pymssql.connect(host='192.168.8.120', port=14333, user='GuestUser', password='GuestUser', database='JYDB',charset='GBK')
with conn.cursor() as cursor:
sql = ''' SELECT SecuCode, SecuAbbr,SecuMarket FROM Bond_Code '''
cursor.execute(sql)
data = cursor.fetchall()
for i in data:
if i[2] == 83:
bond_names[i[0]+".SH"]=i[1]
if i[2] == 90:
bond_names[i[0]+".SZ"]=i[1]
return bond_names
def is_number(s):
try:
float(s)
return True
except:
pass
return False
def treeview_sort_column(tv,col,reverse):
l = [(tv.set(k,col),k) for k in tv.get_children('')]
if is_number(l[0][0]):
l.sort(key = lambda x: float(x[0]),reverse=reverse)
else:
l.sort(reverse = reverse)
for index,(val,k) in enumerate(l):
tv.move(k,'',index)
tv.heading(col,command=lambda :treeview_sort_column(tv,col,not reverse))
class basedesk():
def __init__(self, master):
self.root = master
self.root.title('future monitor')
self.root.geometry('1080x720')
self.table_init = False
self.signal_data = {}
self.bond_names = get_bond_names()
myclient = pymongo.MongoClient("mongodb://192.168.9.189:15009/")
self.mongo = myclient.data
self.mongo.authenticate("zlt01", "zlt_ujYH")
| self.target_bond = []
self.signal_list = []
self.get_target_bond()
self.db_lookup()
def get_target_bond(self):
with self.mysql.cursor() as cursor:
##取日常要订的表
sql = ''' SELECT * FROM Bond_list '''
cursor.execute(sql)
data = cursor.fetchall()
for i in data:
if i[5] != 90 and i[5] != 83:
print(i)
if i[5] == 90:
self.target_bond.append(str(i[2]) + ".SZ")
if i[5] == 83:
self.target_bond.append(str(i[2]) + ".SH")
sql = ''' SELECT * FROM Abnormal_Bond_list '''
cursor.execute(sql)
data = cursor.fetchall()
for i in data:
print(i)
self.target_bond.append(i[0])
def add_new_abnormal_bond(self,code):
with self.mysql.cursor() as cursor:
sql = "insert into Abnormal_Bond_list values (" +" ' "+code + "'"+ ","+"'"+self.bond_names[code]+ "'"+")"
print(sql)
cursor.execute(sql)
self.mysql.commit()
def show_warning(self,code):
top = tk.Toplevel()
top.geometry('640x480')
top.title('warnnig')
l3 =tk.Label(top,text='{} {}'.format(code,self.bond_names[code]))
l3.pack(side='top')
def db_lookup(self):
mongo = self.mongo
temp = mongo["quote_data"]
sample = temp.find({"code_name":re.compile("^1")})
# print(str(sample[0]["time"])[:4])
self.data = []
self.table_list = []
for s in sample:
if s["code_name"][0:3] not in convert_list:
if ((int(s["code_name"][0:3]) >= 150 or s["code_name"][0:3] == '127' or s["code_name"][0:3] == '123') and s["code_name"][-2::] == "SZ"):
pass
else:
rate = 0
if s["pre_close"] != 0:
rate = (s["last_price"] - s["pre_close"])/s["pre_close"]
if rate > 0.05 or (s["code_name"] in self.target_bond and (s["volume"] > 5000 or s["total_ask_vol"] > 2000 or s["total_bid_vol"] > 2000)):
self.signal_calc(s)
tags = ""
if self.signal_data[s["code_name"]]["signal"]:
tags= "warning"
self.data.append({
"code_name":s["code_name"],
"bond_name":self.bond_names[s["code_name"]],
"volume":s["volume"],
"signal":self.signal_data[s["code_name"]]["signal"],
"total_ask_vol":s["total_ask_vol"],
"total_bid_vol":s["total_bid_vol"],
"price":"{:.2f}".format(s["last_price"]),
"tags":tags
})
self.table_list.append(s["code_name"])
if rate > 0.05 and (s["code_name"] not in self.target_bond):
self.target_bond.append(s["code_name"])
self.add_new_abnormal_bond(s["code_name"])
if s["code_name"] not in self.signal_list:
self.signal_list.append(s["code_name"])
if s["code_name"] in self.signal_list:
self.signal_calc(s)
print("bond total:",len(self.data))
self.show_table()
self.root.after(10000,self.db_lookup)
def signal_calc(self,s):
minute = str(s["time"])[:4]
if s["code_name"] not in self.signal_data.keys():
self.signal_data[s["code_name"]] = {}
self.signal_data[s["code_name"]]["time"] = minute
self.signal_data[s["code_name"]]["pirce"] = []
self.signal_data[s["code_name"]]["pirce"].append(s["last_price"])
self.signal_data[s["code_name"]]["signal"] = False
else:
if self.signal_data[s["code_name"]]["time"] != minute :
self.signal_data[s["code_name"]]["time"] = minute
self.signal_data[s["code_name"]]["pirce"].append(s["last_price"])
pirce_len = len(self.signal_data[s["code_name"]]["pirce"])
if pirce_len >= 5 :
pirce_base = self.signal_data[s["code_name"]]["pirce"][-5]
if pirce_base != 0:
rate = (s["last_price"] - pirce_base) / pirce_base
if abs(rate) > 0.01:
self.show_warning(s["code_name"])
if pirce_len < 14:
pass
else:
total = 0.0
if len(self.signal_data[s["code_name"]]["pirce"]) != 15:
print("signal cacl error")
for i in self.signal_data[s["code_name"]]["pirce"]:
total = total + i
avg = total / 15
if s["last_price"] > avg:
self.signal_data[s["code_name"]]["signal"] = True
del self.signal_data[s["code_name"]]["pirce"][0]
def set_tv_head(self,tv):
tv["columns"] = self.title
for i in range(len(self.title)):
if self.title[i] == "account_name":
tv.column(self.title[i],width=180,anchor='center')
else:
tv.column(self.title[i],width=100,anchor='center')
tv.heading(self.title[i],text=self.title[i],command=lambda _col=self.title[i]:treeview_sort_column(tv,_col,False))
def show_table(self):
if not self.table_init:
self.title = ["code_name","bond_name","volume","signal","total_ask_vol","total_bid_vol","price"]
scrollbar = tk.Scrollbar(self.root)
scrollbar.pack(side=tk.RIGHT,fill=tk.Y)
self.main_tv = ttk.Treeview(self.root,columns=self.title,
yscrollcommand=scrollbar.set,
show='headings')
self.set_tv_head(self.main_tv)
for data in [self.data]:
for i in range(len(data)):
self.main_tv.insert('','end',values=[data[i][y] for y in self.title],tags=data[i]["tags"])
scrollbar.config(command=self.main_tv.yview)
self.main_tv.tag_configure('warning', background='red')
self.main_tv.pack(side="top",expand="yes",fill="both")
self.table_init = True
else:
all_items = self.main_tv.get_children("");
for item in all_items:
values = self.main_tv.item(item,"values")
if (len(values) != 0) and (values[0] not in self.table_list):
self.main_tv.delete(item)
continue
all_items = self.main_tv.get_children("");
data = self.data
for i in range(len(data)):
showed = False
for item in all_items:
values = self.main_tv.item(item,"values")
if len(values) != 0 and values[0] == data[i]["code_name"]:
self.main_tv.item(item,values = [data[i][y] for y in self.title],tags=data[i]["tags"])
showed = True
break
if not showed:
self.main_tv.insert('','end',values=[data[i][y] for y in self.title],tags=data[i]["tags"])
if __name__ == '__main__':
root = tk.Tk()
basedesk(root)
root.mainloop() | self.mysql = pymssql.connect(host='192.168.9.85', user='sa', password='lhtzb.123', database='BondMonitor')
| random_line_split |
bond_monitor.py | import pandas as pd
import pymongo
import pymssql
import tkinter as tk
from tkinter import ttk
import re
convert_list = [ "128", "117", "125", "126", "110", "113", "131"]
def get_bond_names():
bond_names = {}
conn = pymssql.connect(host='192.168.8.120', port=14333, user='GuestUser', password='GuestUser', database='JYDB',charset='GBK')
with conn.cursor() as cursor:
sql = ''' SELECT SecuCode, SecuAbbr,SecuMarket FROM Bond_Code '''
cursor.execute(sql)
data = cursor.fetchall()
for i in data:
if i[2] == 83:
bond_names[i[0]+".SH"]=i[1]
if i[2] == 90:
bond_names[i[0]+".SZ"]=i[1]
return bond_names
def is_number(s):
try:
float(s)
return True
except:
pass
return False
def treeview_sort_column(tv,col,reverse):
l = [(tv.set(k,col),k) for k in tv.get_children('')]
if is_number(l[0][0]):
l.sort(key = lambda x: float(x[0]),reverse=reverse)
else:
l.sort(reverse = reverse)
for index,(val,k) in enumerate(l):
tv.move(k,'',index)
tv.heading(col,command=lambda :treeview_sort_column(tv,col,not reverse))
class basedesk():
def __init__(self, master):
self.root = master
self.root.title('future monitor')
self.root.geometry('1080x720')
self.table_init = False
self.signal_data = {}
self.bond_names = get_bond_names()
myclient = pymongo.MongoClient("mongodb://192.168.9.189:15009/")
self.mongo = myclient.data
self.mongo.authenticate("zlt01", "zlt_ujYH")
self.mysql = pymssql.connect(host='192.168.9.85', user='sa', password='lhtzb.123', database='BondMonitor')
self.target_bond = []
self.signal_list = []
self.get_target_bond()
self.db_lookup()
def get_target_bond(self):
with self.mysql.cursor() as cursor:
##取日常要订的表
sql = ''' SELECT * FROM Bond_list '''
cursor.execute(sql)
data = cursor.fetchall()
for i in data:
if i[5] != 90 and i[5] != 83:
print(i)
if i[5] == 90:
self.target_bond.append(str(i[2]) + ".SZ")
if i[5] == 83:
self.target_bond.append(str(i[2]) + ".SH")
sql = ''' SELECT * FROM Abnormal_Bond_list '''
cursor.execute(sql)
data = cursor.fetchall()
for i in data:
print(i)
self.target_bond.append(i[0])
def add_new_abnormal_bond(self,code):
with self.mysql.cursor() as cursor:
sql = "insert into Abnormal_Bond_list values (" +" ' "+code + "'"+ ","+"'"+self.bond_names[code]+ "'"+")"
print(sql)
cursor.execute(sql)
self.mysql.commit()
def show_warning(self,code):
top = tk.Toplevel()
top.geometry('640x480')
top.title('warnnig')
l3 =tk.Label(top,text='{} {}'.format(code,self.bond_names[code]))
l3.pack(side='top')
def db_lookup(self):
mongo = self.mongo
temp = mongo["quote_data"]
sample = temp.find({"code_name":re.compile("^1")})
# print(str(sample[0]["time"])[:4])
self.data = []
self.table_list = []
for s in sample:
if s["code_name"][0:3] not in convert_list:
if ((int(s["code_name"][0:3]) >= 150 or s["code_name"][0:3] == '127' or s["code_name"][0:3] == '123') and s["code_name"][-2::] == "SZ"):
pass
else:
rate = 0
if s["pre_close"] != 0:
rate = (s["last_price"] - s["pre_close"])/s["pre_close"]
if rate > 0.05 or (s["code_name"] in self.target_bond and (s["volume"] > 5000 or s["total_ask_vol"] > 2000 or s["total_bid_vol"] > 2000)):
self.signal_calc(s)
tags = ""
if self.signal_data[s["code_name"]]["signal"]:
tags= "warning"
self.data.append({
"code_name":s["code_name"],
"bond_name":self.bond_names[s["code_name"]],
"volume":s["volume"],
"signal":self.signal_data[s["code_name"]]["signal"],
"total_ask_vol":s["total_ask_vol"],
"total_bid_vol":s["total_bid_vol"],
"price":"{:.2f}".format(s["last_price"]),
"tags":tags
})
self.table_list.append(s["code_name"])
if rate > 0.05 and (s["code_name"] not in self.target_bond):
self.target_bond.append(s["code_name"])
self.add_new_abnormal_bond(s["code_name"])
if s["code_name"] not in self.signal_list:
self.signal_list.append(s["code_name"])
if s["code_name"] in self.signal_list:
self.signal_calc(s)
print("bond total:",len(self.data))
self.show_table()
self.root.after(10000,self.db_lookup)
def signal_calc(self,s):
minute = str(s | def set_tv_head(self,tv):
tv["columns"] = self.title
for i in range(len(self.title)):
if self.title[i] == "account_name":
tv.column(self.title[i],width=180,anchor='center')
else:
tv.column(self.title[i],width=100,anchor='center')
tv.heading(self.title[i],text=self.title[i],command=lambda _col=self.title[i]:treeview_sort_column(tv,_col,False))
def show_table(self):
if not self.table_init:
self.title = ["code_name","bond_name","volume","signal","total_ask_vol","total_bid_vol","price"]
scrollbar = tk.Scrollbar(self.root)
scrollbar.pack(side=tk.RIGHT,fill=tk.Y)
self.main_tv = ttk.Treeview(self.root,columns=self.title,
yscrollcommand=scrollbar.set,
show='headings')
self.set_tv_head(self.main_tv)
for data in [self.data]:
for i in range(len(data)):
self.main_tv.insert('','end',values=[data[i][y] for y in self.title],tags=data[i]["tags"])
scrollbar.config(command=self.main_tv.yview)
self.main_tv.tag_configure('warning', background='red')
self.main_tv.pack(side="top",expand="yes",fill="both")
self.table_init = True
else:
all_items = self.main_tv.get_children("");
for item in all_items:
values = self.main_tv.item(item,"values")
if (len(values) != 0) and (values[0] not in self.table_list):
self.main_tv.delete(item)
continue
all_items = self.main_tv.get_children("");
data = self.data
for i in range(len(data)):
showed = False
for item in all_items:
values = self.main_tv.item(item,"values")
if len(values) != 0 and values[0] == data[i]["code_name"]:
self.main_tv.item(item,values = [data[i][y] for y in self.title],tags=data[i]["tags"])
showed = True
break
if not showed:
self.main_tv.insert('','end',values=[data[i][y] for y in self.title],tags=data[i]["tags"])
if __name__ == '__main__':
root = tk.Tk()
basedesk(root)
root.mainloop()
| ["time"])[:4]
if s["code_name"] not in self.signal_data.keys():
self.signal_data[s["code_name"]] = {}
self.signal_data[s["code_name"]]["time"] = minute
self.signal_data[s["code_name"]]["pirce"] = []
self.signal_data[s["code_name"]]["pirce"].append(s["last_price"])
self.signal_data[s["code_name"]]["signal"] = False
else:
if self.signal_data[s["code_name"]]["time"] != minute :
self.signal_data[s["code_name"]]["time"] = minute
self.signal_data[s["code_name"]]["pirce"].append(s["last_price"])
pirce_len = len(self.signal_data[s["code_name"]]["pirce"])
if pirce_len >= 5 :
pirce_base = self.signal_data[s["code_name"]]["pirce"][-5]
if pirce_base != 0:
rate = (s["last_price"] - pirce_base) / pirce_base
if abs(rate) > 0.01:
self.show_warning(s["code_name"])
if pirce_len < 14:
pass
else:
total = 0.0
if len(self.signal_data[s["code_name"]]["pirce"]) != 15:
print("signal cacl error")
for i in self.signal_data[s["code_name"]]["pirce"]:
total = total + i
avg = total / 15
if s["last_price"] > avg:
self.signal_data[s["code_name"]]["signal"] = True
del self.signal_data[s["code_name"]]["pirce"][0]
| identifier_body |
main.rs | use std::env;
use std::fs;
extern crate rgsl;
use std::rc::Rc;
use std::cell::RefCell;
use plotters::prelude::*;
use std::time::Instant;
use std::path::Path;
use std::io::{Error, ErrorKind};
use std::fs::File;
use std::io::prelude::*;
use std::io::BufReader;
use image::{imageops::FilterType, ImageFormat};
#[macro_use]
extern crate lazy_static;
lazy_static!{
static ref AIR_REFRACTIVE_INDEX:f64 = 1f64;
static ref CORNEA_REFRACTIVE_INDEX:f64 = 1.37;
}
macro_rules! clone {
(@param _) => ( _ );
(@param $x:ident) => ( mut $x );
($($n:ident),+ => move || $body:expr) => (
{
$( let $n = $n.clone(); )+
move || $body
}
);
($($n:ident),+ => move |$($p:tt),+| $body:expr) => (
{
$( let $n = $n.clone(); )+
move |$(clone!(@param $p),)+| $body
}
);
}
fn exp_f(x: &rgsl::VectorF64, f: &mut rgsl::VectorF64, data: &Data) -> rgsl::Value {
let a = x.get(0);
let b = x.get(1);
let c = x.get(2);
for (i, (x, y)) in data.x.iter().zip(data.y.iter()).enumerate(){
/* Model Yi = a * x^2 + b * x + c*/
let yi = a * x.powi(2) + b * x + c;
f.set(i, yi - y);
}
rgsl::Value::Success
}
fn exp_df(x: &rgsl::VectorF64, J: &mut rgsl::MatrixF64, data: &Data) -> rgsl::Value {
for (i, x) in data.x.iter().enumerate(){
/* Jacobian matrix J(i,j) = dfi / dxj, */
/* where fi = (Yi - yi)/sigma[i], */
/* Yi = A * exp(-lambda * i) + b */
/* and the xj are the parameters (A,lambda,b) */
J.set(i, 0, x.powi(2));
J.set(i, 1, *x);
J.set(i, 2, 1f64);
}
rgsl::Value::Success
}
fn print_state(iter: usize, s: &rgsl::MultiFitFdfSolver) {
println!("iter: {} x = {} {} {} |f(x)| = {}", iter,
s.x().get(0), s.x().get(1), s.x().get(2), rgsl::blas::level1::dnrm2(&s.f()));
}
#[derive(Debug)]
pub struct Data{
pub x: Vec<f64>,
pub y: Vec<f64>,
n: usize
}
fn read_file_into_lines(filename: &str)-> Vec<String>{
let contents = fs::read_to_string(
filename).expect(&format!("can't read file {}", filename));
let rows: Vec<String> = contents.split('\n').map(|s| s.to_string()).collect();
rows
}
fn bound_in_axis(x: f32, y: f32) -> bool{
let x_low = -8;
let x_high = 8;
let y_low = 0;
let y_high = 16;
x > x_low as f32 && x < x_high as f32 && y > y_low as f32 && y < y_high as f32
}
fn plot_parabola(data: &Data, params: &rgsl::VectorF64, additional_points: Vec<Vec<f64>>, fig_path: &String, image_path: &String){
let root = BitMapBackend::new(fig_path, (1024, 1024)).into_drawing_area();
root.fill(&WHITE).unwrap();
let mut chart = ChartBuilder::on(&root)
.caption("Optical Distortion Correction", ("sans-serif", 10).into_font())
.margin(5)
.x_label_area_size(30)
.y_label_area_size(30)
.build_ranged(8f32..-8f32, 16f32..0f32).unwrap();
//chart.configure_mesh().draw().unwrap();
// Plot background image
//let (w, h) = chart.plotting_area().dim_in_pixel();
//println!("plotting area: {}, {}", w, h);
//let image = image::load(
//BufReader::new(File::open(image_path).unwrap()),
//ImageFormat::Bmp,
//).unwrap()
//.resize_exact(16, 16, FilterType::Nearest);
//let elem: BitMapElement<_> = ((-8.0, 8.0), image).into();
//chart.draw_series(std::iter::once(elem)).unwrap();
// Draw Overlay points
let x_low = -8;
let x_high = 8;
let y_low = 0;
let y_high = 16;
let a = params.get(0);
let b = params.get(1);
let c = params.get(2);
// Draw parabola
chart.draw_series(LineSeries::new(
(x_low..=x_high).map(|x| {x as f32}).map(|x| (x, x.powi(2) * a as f32 + x * b as f32 + c as f32)),
&RED,
)).unwrap()
.label(format!("y = {:.3}x^2 + {:.3}x + {:.3}", a, b, c)).legend(|(x, y)| PathElement::new(vec![(x, y), (x + 2, y)], &RED));
// Draw cornea points
chart.draw_series(PointSeries::of_element(data.x.iter().zip(data.y.iter()).map(|(x, y)| (*x as f32, *y as f32)), 2, ShapeStyle::from(&RED).filled(),
&|coord, size, style| {
EmptyElement::at(coord)
+ Circle::new((0, 0), size, style)
})).unwrap();
// Plot pupil margins
chart.draw_series(PointSeries::of_element(additional_points.iter().map(|v| (v[0] as f32, v[1] as f32)), 3, ShapeStyle::from(&BLUE).filled(),
&|coord, size, style| {
EmptyElement::at(coord)
+ Circle::new((0, 0), size, style)
})).unwrap();
// Draw inbound ray
let pm0_x = additional_points[0][0];
let pm0_boundary = vec![pm0_x, a * pm0_x.powi(2) + b * pm0_x + c];
let pm1_x = additional_points[1][0];
let pm1_boundary = vec![pm1_x, a * pm1_x.powi(2) + b * pm1_x + c];
chart.draw_series(LineSeries::new(
(y_low..=y_high+8).map(|y| {y as f32}).map(|y| (pm0_x as f32, y)),
&BLUE)).unwrap();
chart.draw_series(LineSeries::new(
(y_low..=y_high+8).map(|y| {y as f32}).map(|y| (pm1_x as f32, y)),
&BLUE)).unwrap();
// Draw tangent line
let tangent_line_0_k = 2f64 * a * pm0_x + b;
let tangent_line_0_b = pm0_boundary[1] - tangent_line_0_k * pm0_x;
let mut tangent_line_low = ((pm0_x - 2f64) * 10f64) as i32;
let mut tangent_line_high = ((pm0_x + 2f64) * 10f64) as i32;
//println!("{}, {}", tangent_line_low, tangent_line_high);
chart.draw_series(LineSeries::new(
(tangent_line_low..=tangent_line_high).map(|x| x as f32 / 10f32).map(|x| (x, tangent_line_0_k as f32 * x + tangent_line_0_b as f32))
.filter(
|(x, y)| bound_in_axis(*x, *y))
,&BLACK
)).unwrap();
let tangent_line_0_k_vert = -1f64 / tangent_line_0_k;
let tangent_line_0_b_vert = pm0_boundary[1] - (-1f64 / tangent_line_0_k) * pm0_x;
chart.draw_series(LineSeries::new(
(tangent_line_low..=tangent_line_high).map(|x| x as f32 / 10f32).map(|x| (x, tangent_line_0_k_vert as f32 * x + tangent_line_0_b_vert as f32))
.filter(
|(x, y)| bound_in_axis(*x, *y))
,&BLACK
)).unwrap();
// Draw tangent line
let tangent_line_1_k = 2f64 * a * pm1_x + b;
let tangent_line_1_b = pm1_boundary[1] - tangent_line_1_k * pm1_x;
tangent_line_low = ((pm1_x - 2f64) * 10f64) as i32;
tangent_line_high = ((pm1_x + 2f64) * 10f64) as i32;
//println!("{}, {}", tangent_line_low, tangent_line_high);
chart.draw_series(LineSeries::new(
(tangent_line_low..=tangent_line_high).map(|x| x as f32 / 10f32).map(|x| (x, tangent_line_1_k as f32 * x + tangent_line_1_b as f32))
.filter(
|(x, y)| bound_in_axis(*x, *y))
,&BLACK
)).unwrap();
let tangent_line_1_k_vert = -1f64 / tangent_line_1_k;
let tangent_line_1_b_vert = pm1_boundary[1] - (-1f64 / tangent_line_1_k) * pm1_x;
chart.draw_series(LineSeries::new(
(tangent_line_low..=tangent_line_high).map(|x| x as f32 / 10f32).map(|x| (x, tangent_line_1_k_vert as f32 * x + tangent_line_1_b_vert as f32))
.filter(
|(x, y)| bound_in_axis(*x, *y))
,&BLACK
)).unwrap();
chart
.configure_series_labels()
.background_style(&WHITE.mix(0.8))
.border_style(&BLACK)
.draw().unwrap();
}
fn curve_fitting(data: Rc<Data>) -> Result<rgsl::MultiFitFdfSolver, Error>{
//Initialize Parameters
let num_params = 3;
let num_instances = data.n;
let mut status = rgsl::Value::Success;
let mut J = match rgsl::MatrixF64::new(num_params, num_params){
Some(mat) => mat,
None => return Err(Error::new(ErrorKind::Other, "Can't create Jacobian matrix"))
};
let mut params_init = [0f64, -1f64, 100f64];
let mut params = rgsl::VectorView::from_array(&mut params_init);
rgsl::RngType::env_setup();
let rng_type = rgsl::rng::default();
let mut rng = match rgsl::Rng::new(&rng_type){
Some(r) => r,
None => return Err(Error::new(ErrorKind::Other, "Can't create rng"))
};
let solver_type = rgsl::MultiFitFdfSolverType::lmsder();
let mut func = rgsl::MultiFitFunctionFdf::new(num_instances, num_params);
let expb_f = clone!(data => move |x, f| {
exp_f(&x, &mut f, &*data)
});
func.f = Some(Box::new(expb_f));
let expb_df = clone!(data => move |x, J| {
exp_df(&x, &mut J, &*data)
});
func.df = Some(Box::new(expb_df));
let expb_fdf = clone!(data => move |x, f, J| {
exp_f(&x, &mut f, &*data);
exp_df(&x, &mut J, &*data);
rgsl::Value::Success
});
func.fdf = Some(Box::new(expb_fdf));
// Create a solver
let mut solver = match rgsl::MultiFitFdfSolver::new(&solver_type, num_instances, num_params){
Some(s) => s,
None => return Err(Error::new(ErrorKind::Other, "can't create solver"))
};
solver.set(&mut func, ¶ms.vector());
let mut iter = 0;
loop {
iter += 1;
status = solver.iterate();
println!("status = {}", rgsl::error::str_error(status));
print_state(iter, &solver);
if status != rgsl::Value::Success {
//return Err(Error::new(ErrorKind::TimedOut, "Reconstruction failed"));
break;
}
//println!("dx: {:?}", &solver.dx());
//println!("position: {:?}", &s.position());
status = rgsl::multifit::test_delta(&solver.dx(), &solver.x(), 1e-4, 1e-4);
if status != rgsl::Value::Continue || iter >= 500 {
break;
}
}
println!("Done");
println!("params: {:?}", &solver.x());
Ok(solver)
}
fn apply_optical_distortion_correction(params: &rgsl::VectorF64, points: &Vec<Vec<f64>>) -> Vec<Vec<f64>>{
let a = params.get(0);
let b = params.get(1);
let c = params.get(2);
let mut new_points: Vec<Vec<f64>> = Vec::new();
for point in points.iter(){
let k1 = 2f64 * a * point[0] + b;
let theta_1 = k1.atan();
let mut theta_2 = 0f64;
let mut theta = 0f64;
let mut new_point: Vec<f64> = Vec::new();
if theta_1 > 0f64{
theta_2 = theta_1 * *AIR_REFRACTIVE_INDEX/ *CORNEA_REFRACTIVE_INDEX;
theta = theta_1 - theta_2;
}else{
theta_2 = theta_1.abs() / *CORNEA_REFRACTIVE_INDEX;
theta = -1f64 * (theta_1.abs() - theta_2);
}
println!("theta: {}", theta);
let boundary_point = vec![point[0], a * point[0].powi(2) + b * point[0] + c];
let new_length = (boundary_point[1] - point[1]) / *CORNEA_REFRACTIVE_INDEX;
new_point.push(boundary_point[0] + new_length * theta.sin());
new_point.push(boundary_point[1] - new_length * theta.cos());
println!("old: {:?}, new: {:?}", point, new_point);
new_points.push(new_point);
}
new_points
}
/*
fn run_on_folder(folder: &str){
let mut total_time_ms = 0;
let mut iter = 0;
for entry in fs::read_dir(folder).unwrap(){
let entry = entry.unwrap();
let file_path = entry.path();
if !file_path.is_dir(){
let lines = read_file_into_lines(file_path.to_str().unwrap());
println!("process: {:?}", file_path);
// Create Data
let mut data = Rc::new(RefCell::new(Data{
x: Vec::<f64>::new(),
y: Vec::<f64>::new(),
n: 0
}));
for (i, line) in lines.iter().enumerate(){
let mut data = *data.borrow_mut();
let numbers: Vec<f64> = line.split(" ").filter_map(|v| v.parse::<f64>().ok()).collect();
//println!("data: {:?}", numbers);
if numbers.len() > 1{
data.x.push(*numbers.get(0).unwrap());
data.y.push(*numbers.get(1).unwrap());
data.n += 1;
}
}
//println!("Data: {:?}", data);
if data.borrow().n >= 3{
let fig_path: String = format!("./figs/{}.png", file_path.file_name().unwrap().to_str().unwrap());
let now = Instant::now();
let curve_params = curve_fitting(data);
total_time_ms += now.elapsed().as_micros();
iter += 1;
}
}
}
println!("run {} iterations", iter);
println!("average time: {} microsecond", total_time_ms/ iter);
}
*/
fn main() {
let args: Vec<String> = env::args().collect();
let path = Path::new(&args[1]);
if path.is_dir(){
//run_on_folder(&path.to_str().unwrap());
println!("TODO: fix bug in run folder");
}else{
let lines = read_file_into_lines(path.to_str().unwrap());
let mut total_time_micros = 0;
let mut iter = 0;
let mut file = File::create("cross_corrected.keypoint").unwrap();
file.write_all(b"scanId\teyeId\tbscanId\tpm0xmm\tpm0ymm\tpm0zmm\tpm1xmm\tpm1ymm\tpm1zmm\n");
for line in lines.iter().skip(1){
let elements: Vec<&str> = line.split(",").collect();
if elements.len() > 8 | else{
println!("total elements: {}", elements.len());
println!("Can't process {}", line);
}
}
println!("Total iteration: {}", iter);
println!("Average time: {}", total_time_micros / iter);
}
}
| {
let pm0: Vec<f64> = vec![elements[3], elements[4]].iter().filter_map(|e| e.parse::<f64>().ok()).collect();
let pm1: Vec<f64> = vec![elements[5], elements[6]].iter().filter_map(|e| e.parse::<f64>().ok()).collect();
let cp_x: Vec<f64> = elements[7].split(" ").filter_map(|v| v.parse::<f64>().ok()).collect();
let cp_y: Vec<f64> = elements[8].split(" ").filter_map(|v| v.parse::<f64>().ok()).collect();
let num_kp = cp_y.len();
let mut data = Rc::new(Data{
x: cp_x,
y: cp_y,
n: num_kp
});
//println!("{:?}", data.x);
if data.n >= 3{
let now = Instant::now();
let solver = match curve_fitting(Rc::clone(&data)){
Ok(p) => p,
Err(e) => panic!("can't reconstruct curve")
};
println!("{:?}", solver.x());
let mut add_points = vec![pm0, pm1];
let corrected_points = apply_optical_distortion_correction(&solver.x(), &add_points);
if elements[2] == "x"{
let output_str = format!("{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\n", elements[0],
elements[1], elements[2],
corrected_points[0][0], 0, corrected_points[0][1],
corrected_points[1][0], 0, corrected_points[1][1]);
file.write_all(output_str.as_bytes()).unwrap();
}else{
let output_str = format!("{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\n", elements[0],
elements[1], elements[2],
-1.53, corrected_points[0][0], corrected_points[0][1],
-1.53, corrected_points[1][0], corrected_points[1][1]);
file.write_all(output_str.as_bytes()).unwrap();
}
//println!("{:?}", corrected_points);
total_time_micros += now.elapsed().as_micros();
iter += 1;
add_points.extend(corrected_points);
let image_path = format!("./resource/TestMotility/OD/{}/0{}.bmp", elements[2], elements[0]);
println!("image: {}", image_path);
let fig_path: String = format!("./figs_mm/{}_{}.png", elements[0], elements[2]);
//plot_parabola(&data, &solver.x(), add_points, &fig_path, &image_path);
}
} | conditional_block |
main.rs | use std::env;
use std::fs;
extern crate rgsl;
use std::rc::Rc;
use std::cell::RefCell;
use plotters::prelude::*;
use std::time::Instant;
use std::path::Path;
use std::io::{Error, ErrorKind};
use std::fs::File;
use std::io::prelude::*;
use std::io::BufReader;
use image::{imageops::FilterType, ImageFormat};
#[macro_use]
extern crate lazy_static;
lazy_static!{
static ref AIR_REFRACTIVE_INDEX:f64 = 1f64;
static ref CORNEA_REFRACTIVE_INDEX:f64 = 1.37;
}
macro_rules! clone {
(@param _) => ( _ );
(@param $x:ident) => ( mut $x );
($($n:ident),+ => move || $body:expr) => (
{
$( let $n = $n.clone(); )+
move || $body
}
);
($($n:ident),+ => move |$($p:tt),+| $body:expr) => (
{
$( let $n = $n.clone(); )+
move |$(clone!(@param $p),)+| $body
}
);
}
fn exp_f(x: &rgsl::VectorF64, f: &mut rgsl::VectorF64, data: &Data) -> rgsl::Value {
let a = x.get(0);
let b = x.get(1);
let c = x.get(2);
for (i, (x, y)) in data.x.iter().zip(data.y.iter()).enumerate(){
/* Model Yi = a * x^2 + b * x + c*/
let yi = a * x.powi(2) + b * x + c;
f.set(i, yi - y);
}
rgsl::Value::Success
}
fn exp_df(x: &rgsl::VectorF64, J: &mut rgsl::MatrixF64, data: &Data) -> rgsl::Value {
for (i, x) in data.x.iter().enumerate(){
/* Jacobian matrix J(i,j) = dfi / dxj, */
/* where fi = (Yi - yi)/sigma[i], */
/* Yi = A * exp(-lambda * i) + b */
/* and the xj are the parameters (A,lambda,b) */
J.set(i, 0, x.powi(2));
J.set(i, 1, *x);
J.set(i, 2, 1f64);
}
rgsl::Value::Success
}
fn print_state(iter: usize, s: &rgsl::MultiFitFdfSolver) {
println!("iter: {} x = {} {} {} |f(x)| = {}", iter,
s.x().get(0), s.x().get(1), s.x().get(2), rgsl::blas::level1::dnrm2(&s.f()));
}
#[derive(Debug)]
pub struct Data{
pub x: Vec<f64>,
pub y: Vec<f64>,
n: usize
}
fn read_file_into_lines(filename: &str)-> Vec<String>{
let contents = fs::read_to_string(
filename).expect(&format!("can't read file {}", filename));
let rows: Vec<String> = contents.split('\n').map(|s| s.to_string()).collect();
rows
}
fn bound_in_axis(x: f32, y: f32) -> bool{
let x_low = -8;
let x_high = 8;
let y_low = 0;
let y_high = 16;
x > x_low as f32 && x < x_high as f32 && y > y_low as f32 && y < y_high as f32
}
fn plot_parabola(data: &Data, params: &rgsl::VectorF64, additional_points: Vec<Vec<f64>>, fig_path: &String, image_path: &String){
let root = BitMapBackend::new(fig_path, (1024, 1024)).into_drawing_area();
root.fill(&WHITE).unwrap();
let mut chart = ChartBuilder::on(&root)
.caption("Optical Distortion Correction", ("sans-serif", 10).into_font())
.margin(5)
.x_label_area_size(30)
.y_label_area_size(30)
.build_ranged(8f32..-8f32, 16f32..0f32).unwrap();
//chart.configure_mesh().draw().unwrap();
// Plot background image
//let (w, h) = chart.plotting_area().dim_in_pixel();
//println!("plotting area: {}, {}", w, h);
//let image = image::load(
//BufReader::new(File::open(image_path).unwrap()),
//ImageFormat::Bmp,
//).unwrap()
//.resize_exact(16, 16, FilterType::Nearest);
//let elem: BitMapElement<_> = ((-8.0, 8.0), image).into();
//chart.draw_series(std::iter::once(elem)).unwrap();
// Draw Overlay points
let x_low = -8;
let x_high = 8;
let y_low = 0;
let y_high = 16;
let a = params.get(0);
let b = params.get(1);
let c = params.get(2);
// Draw parabola
chart.draw_series(LineSeries::new(
(x_low..=x_high).map(|x| {x as f32}).map(|x| (x, x.powi(2) * a as f32 + x * b as f32 + c as f32)),
&RED,
)).unwrap()
.label(format!("y = {:.3}x^2 + {:.3}x + {:.3}", a, b, c)).legend(|(x, y)| PathElement::new(vec![(x, y), (x + 2, y)], &RED));
// Draw cornea points
chart.draw_series(PointSeries::of_element(data.x.iter().zip(data.y.iter()).map(|(x, y)| (*x as f32, *y as f32)), 2, ShapeStyle::from(&RED).filled(),
&|coord, size, style| {
EmptyElement::at(coord)
+ Circle::new((0, 0), size, style)
})).unwrap();
// Plot pupil margins
chart.draw_series(PointSeries::of_element(additional_points.iter().map(|v| (v[0] as f32, v[1] as f32)), 3, ShapeStyle::from(&BLUE).filled(),
&|coord, size, style| {
EmptyElement::at(coord)
+ Circle::new((0, 0), size, style)
})).unwrap();
// Draw inbound ray
let pm0_x = additional_points[0][0];
let pm0_boundary = vec![pm0_x, a * pm0_x.powi(2) + b * pm0_x + c];
let pm1_x = additional_points[1][0];
let pm1_boundary = vec![pm1_x, a * pm1_x.powi(2) + b * pm1_x + c];
chart.draw_series(LineSeries::new(
(y_low..=y_high+8).map(|y| {y as f32}).map(|y| (pm0_x as f32, y)),
&BLUE)).unwrap();
chart.draw_series(LineSeries::new(
(y_low..=y_high+8).map(|y| {y as f32}).map(|y| (pm1_x as f32, y)),
&BLUE)).unwrap();
// Draw tangent line
let tangent_line_0_k = 2f64 * a * pm0_x + b;
let tangent_line_0_b = pm0_boundary[1] - tangent_line_0_k * pm0_x;
let mut tangent_line_low = ((pm0_x - 2f64) * 10f64) as i32;
let mut tangent_line_high = ((pm0_x + 2f64) * 10f64) as i32;
//println!("{}, {}", tangent_line_low, tangent_line_high);
chart.draw_series(LineSeries::new(
(tangent_line_low..=tangent_line_high).map(|x| x as f32 / 10f32).map(|x| (x, tangent_line_0_k as f32 * x + tangent_line_0_b as f32))
.filter(
|(x, y)| bound_in_axis(*x, *y))
,&BLACK
)).unwrap();
let tangent_line_0_k_vert = -1f64 / tangent_line_0_k;
let tangent_line_0_b_vert = pm0_boundary[1] - (-1f64 / tangent_line_0_k) * pm0_x;
chart.draw_series(LineSeries::new(
(tangent_line_low..=tangent_line_high).map(|x| x as f32 / 10f32).map(|x| (x, tangent_line_0_k_vert as f32 * x + tangent_line_0_b_vert as f32))
.filter(
|(x, y)| bound_in_axis(*x, *y))
,&BLACK
)).unwrap();
// Draw tangent line
let tangent_line_1_k = 2f64 * a * pm1_x + b;
let tangent_line_1_b = pm1_boundary[1] - tangent_line_1_k * pm1_x;
tangent_line_low = ((pm1_x - 2f64) * 10f64) as i32;
tangent_line_high = ((pm1_x + 2f64) * 10f64) as i32;
//println!("{}, {}", tangent_line_low, tangent_line_high);
chart.draw_series(LineSeries::new(
(tangent_line_low..=tangent_line_high).map(|x| x as f32 / 10f32).map(|x| (x, tangent_line_1_k as f32 * x + tangent_line_1_b as f32))
.filter(
|(x, y)| bound_in_axis(*x, *y))
,&BLACK
)).unwrap();
let tangent_line_1_k_vert = -1f64 / tangent_line_1_k;
let tangent_line_1_b_vert = pm1_boundary[1] - (-1f64 / tangent_line_1_k) * pm1_x;
chart.draw_series(LineSeries::new(
(tangent_line_low..=tangent_line_high).map(|x| x as f32 / 10f32).map(|x| (x, tangent_line_1_k_vert as f32 * x + tangent_line_1_b_vert as f32))
.filter(
|(x, y)| bound_in_axis(*x, *y))
,&BLACK
)).unwrap();
chart
.configure_series_labels()
.background_style(&WHITE.mix(0.8))
.border_style(&BLACK)
.draw().unwrap();
}
fn curve_fitting(data: Rc<Data>) -> Result<rgsl::MultiFitFdfSolver, Error>{
//Initialize Parameters
let num_params = 3;
let num_instances = data.n;
let mut status = rgsl::Value::Success;
let mut J = match rgsl::MatrixF64::new(num_params, num_params){
Some(mat) => mat,
None => return Err(Error::new(ErrorKind::Other, "Can't create Jacobian matrix"))
};
let mut params_init = [0f64, -1f64, 100f64];
let mut params = rgsl::VectorView::from_array(&mut params_init);
rgsl::RngType::env_setup();
let rng_type = rgsl::rng::default();
let mut rng = match rgsl::Rng::new(&rng_type){
Some(r) => r,
None => return Err(Error::new(ErrorKind::Other, "Can't create rng"))
};
let solver_type = rgsl::MultiFitFdfSolverType::lmsder();
let mut func = rgsl::MultiFitFunctionFdf::new(num_instances, num_params);
let expb_f = clone!(data => move |x, f| {
exp_f(&x, &mut f, &*data)
});
func.f = Some(Box::new(expb_f));
let expb_df = clone!(data => move |x, J| {
exp_df(&x, &mut J, &*data)
});
func.df = Some(Box::new(expb_df));
let expb_fdf = clone!(data => move |x, f, J| {
exp_f(&x, &mut f, &*data);
exp_df(&x, &mut J, &*data);
rgsl::Value::Success
});
func.fdf = Some(Box::new(expb_fdf));
// Create a solver
let mut solver = match rgsl::MultiFitFdfSolver::new(&solver_type, num_instances, num_params){
Some(s) => s,
None => return Err(Error::new(ErrorKind::Other, "can't create solver"))
};
solver.set(&mut func, ¶ms.vector());
let mut iter = 0;
loop {
iter += 1;
status = solver.iterate();
println!("status = {}", rgsl::error::str_error(status));
print_state(iter, &solver);
if status != rgsl::Value::Success {
//return Err(Error::new(ErrorKind::TimedOut, "Reconstruction failed"));
break;
}
//println!("dx: {:?}", &solver.dx());
//println!("position: {:?}", &s.position());
status = rgsl::multifit::test_delta(&solver.dx(), &solver.x(), 1e-4, 1e-4);
if status != rgsl::Value::Continue || iter >= 500 {
break;
}
}
println!("Done");
println!("params: {:?}", &solver.x());
Ok(solver)
}
fn apply_optical_distortion_correction(params: &rgsl::VectorF64, points: &Vec<Vec<f64>>) -> Vec<Vec<f64>>{
let a = params.get(0);
let b = params.get(1);
let c = params.get(2);
let mut new_points: Vec<Vec<f64>> = Vec::new();
for point in points.iter(){
let k1 = 2f64 * a * point[0] + b;
let theta_1 = k1.atan();
let mut theta_2 = 0f64;
let mut theta = 0f64;
let mut new_point: Vec<f64> = Vec::new();
if theta_1 > 0f64{
theta_2 = theta_1 * *AIR_REFRACTIVE_INDEX/ *CORNEA_REFRACTIVE_INDEX;
theta = theta_1 - theta_2;
}else{
theta_2 = theta_1.abs() / *CORNEA_REFRACTIVE_INDEX;
theta = -1f64 * (theta_1.abs() - theta_2);
}
println!("theta: {}", theta);
let boundary_point = vec![point[0], a * point[0].powi(2) + b * point[0] + c];
let new_length = (boundary_point[1] - point[1]) / *CORNEA_REFRACTIVE_INDEX;
new_point.push(boundary_point[0] + new_length * theta.sin());
new_point.push(boundary_point[1] - new_length * theta.cos());
println!("old: {:?}, new: {:?}", point, new_point);
new_points.push(new_point);
}
new_points
}
/*
fn run_on_folder(folder: &str){
let mut total_time_ms = 0;
let mut iter = 0;
for entry in fs::read_dir(folder).unwrap(){
let entry = entry.unwrap();
let file_path = entry.path();
if !file_path.is_dir(){
let lines = read_file_into_lines(file_path.to_str().unwrap());
println!("process: {:?}", file_path);
// Create Data
let mut data = Rc::new(RefCell::new(Data{
x: Vec::<f64>::new(),
y: Vec::<f64>::new(),
n: 0
}));
for (i, line) in lines.iter().enumerate(){
let mut data = *data.borrow_mut();
let numbers: Vec<f64> = line.split(" ").filter_map(|v| v.parse::<f64>().ok()).collect();
//println!("data: {:?}", numbers);
if numbers.len() > 1{
data.x.push(*numbers.get(0).unwrap());
data.y.push(*numbers.get(1).unwrap());
data.n += 1;
}
}
//println!("Data: {:?}", data);
if data.borrow().n >= 3{
let fig_path: String = format!("./figs/{}.png", file_path.file_name().unwrap().to_str().unwrap());
let now = Instant::now();
let curve_params = curve_fitting(data); | println!("run {} iterations", iter);
println!("average time: {} microsecond", total_time_ms/ iter);
}
*/
fn main() {
let args: Vec<String> = env::args().collect();
let path = Path::new(&args[1]);
if path.is_dir(){
//run_on_folder(&path.to_str().unwrap());
println!("TODO: fix bug in run folder");
}else{
let lines = read_file_into_lines(path.to_str().unwrap());
let mut total_time_micros = 0;
let mut iter = 0;
let mut file = File::create("cross_corrected.keypoint").unwrap();
file.write_all(b"scanId\teyeId\tbscanId\tpm0xmm\tpm0ymm\tpm0zmm\tpm1xmm\tpm1ymm\tpm1zmm\n");
for line in lines.iter().skip(1){
let elements: Vec<&str> = line.split(",").collect();
if elements.len() > 8{
let pm0: Vec<f64> = vec![elements[3], elements[4]].iter().filter_map(|e| e.parse::<f64>().ok()).collect();
let pm1: Vec<f64> = vec![elements[5], elements[6]].iter().filter_map(|e| e.parse::<f64>().ok()).collect();
let cp_x: Vec<f64> = elements[7].split(" ").filter_map(|v| v.parse::<f64>().ok()).collect();
let cp_y: Vec<f64> = elements[8].split(" ").filter_map(|v| v.parse::<f64>().ok()).collect();
let num_kp = cp_y.len();
let mut data = Rc::new(Data{
x: cp_x,
y: cp_y,
n: num_kp
});
//println!("{:?}", data.x);
if data.n >= 3{
let now = Instant::now();
let solver = match curve_fitting(Rc::clone(&data)){
Ok(p) => p,
Err(e) => panic!("can't reconstruct curve")
};
println!("{:?}", solver.x());
let mut add_points = vec![pm0, pm1];
let corrected_points = apply_optical_distortion_correction(&solver.x(), &add_points);
if elements[2] == "x"{
let output_str = format!("{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\n", elements[0],
elements[1], elements[2],
corrected_points[0][0], 0, corrected_points[0][1],
corrected_points[1][0], 0, corrected_points[1][1]);
file.write_all(output_str.as_bytes()).unwrap();
}else{
let output_str = format!("{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\n", elements[0],
elements[1], elements[2],
-1.53, corrected_points[0][0], corrected_points[0][1],
-1.53, corrected_points[1][0], corrected_points[1][1]);
file.write_all(output_str.as_bytes()).unwrap();
}
//println!("{:?}", corrected_points);
total_time_micros += now.elapsed().as_micros();
iter += 1;
add_points.extend(corrected_points);
let image_path = format!("./resource/TestMotility/OD/{}/0{}.bmp", elements[2], elements[0]);
println!("image: {}", image_path);
let fig_path: String = format!("./figs_mm/{}_{}.png", elements[0], elements[2]);
//plot_parabola(&data, &solver.x(), add_points, &fig_path, &image_path);
}
}else{
println!("total elements: {}", elements.len());
println!("Can't process {}", line);
}
}
println!("Total iteration: {}", iter);
println!("Average time: {}", total_time_micros / iter);
}
} | total_time_ms += now.elapsed().as_micros();
iter += 1;
}
}
} | random_line_split |
main.rs | use std::env;
use std::fs;
extern crate rgsl;
use std::rc::Rc;
use std::cell::RefCell;
use plotters::prelude::*;
use std::time::Instant;
use std::path::Path;
use std::io::{Error, ErrorKind};
use std::fs::File;
use std::io::prelude::*;
use std::io::BufReader;
use image::{imageops::FilterType, ImageFormat};
#[macro_use]
extern crate lazy_static;
lazy_static!{
static ref AIR_REFRACTIVE_INDEX:f64 = 1f64;
static ref CORNEA_REFRACTIVE_INDEX:f64 = 1.37;
}
macro_rules! clone {
(@param _) => ( _ );
(@param $x:ident) => ( mut $x );
($($n:ident),+ => move || $body:expr) => (
{
$( let $n = $n.clone(); )+
move || $body
}
);
($($n:ident),+ => move |$($p:tt),+| $body:expr) => (
{
$( let $n = $n.clone(); )+
move |$(clone!(@param $p),)+| $body
}
);
}
fn exp_f(x: &rgsl::VectorF64, f: &mut rgsl::VectorF64, data: &Data) -> rgsl::Value {
let a = x.get(0);
let b = x.get(1);
let c = x.get(2);
for (i, (x, y)) in data.x.iter().zip(data.y.iter()).enumerate(){
/* Model Yi = a * x^2 + b * x + c*/
let yi = a * x.powi(2) + b * x + c;
f.set(i, yi - y);
}
rgsl::Value::Success
}
fn exp_df(x: &rgsl::VectorF64, J: &mut rgsl::MatrixF64, data: &Data) -> rgsl::Value {
for (i, x) in data.x.iter().enumerate(){
/* Jacobian matrix J(i,j) = dfi / dxj, */
/* where fi = (Yi - yi)/sigma[i], */
/* Yi = A * exp(-lambda * i) + b */
/* and the xj are the parameters (A,lambda,b) */
J.set(i, 0, x.powi(2));
J.set(i, 1, *x);
J.set(i, 2, 1f64);
}
rgsl::Value::Success
}
fn print_state(iter: usize, s: &rgsl::MultiFitFdfSolver) {
println!("iter: {} x = {} {} {} |f(x)| = {}", iter,
s.x().get(0), s.x().get(1), s.x().get(2), rgsl::blas::level1::dnrm2(&s.f()));
}
#[derive(Debug)]
pub struct Data{
pub x: Vec<f64>,
pub y: Vec<f64>,
n: usize
}
fn read_file_into_lines(filename: &str)-> Vec<String>{
let contents = fs::read_to_string(
filename).expect(&format!("can't read file {}", filename));
let rows: Vec<String> = contents.split('\n').map(|s| s.to_string()).collect();
rows
}
fn bound_in_axis(x: f32, y: f32) -> bool{
let x_low = -8;
let x_high = 8;
let y_low = 0;
let y_high = 16;
x > x_low as f32 && x < x_high as f32 && y > y_low as f32 && y < y_high as f32
}
fn plot_parabola(data: &Data, params: &rgsl::VectorF64, additional_points: Vec<Vec<f64>>, fig_path: &String, image_path: &String){
let root = BitMapBackend::new(fig_path, (1024, 1024)).into_drawing_area();
root.fill(&WHITE).unwrap();
let mut chart = ChartBuilder::on(&root)
.caption("Optical Distortion Correction", ("sans-serif", 10).into_font())
.margin(5)
.x_label_area_size(30)
.y_label_area_size(30)
.build_ranged(8f32..-8f32, 16f32..0f32).unwrap();
//chart.configure_mesh().draw().unwrap();
// Plot background image
//let (w, h) = chart.plotting_area().dim_in_pixel();
//println!("plotting area: {}, {}", w, h);
//let image = image::load(
//BufReader::new(File::open(image_path).unwrap()),
//ImageFormat::Bmp,
//).unwrap()
//.resize_exact(16, 16, FilterType::Nearest);
//let elem: BitMapElement<_> = ((-8.0, 8.0), image).into();
//chart.draw_series(std::iter::once(elem)).unwrap();
// Draw Overlay points
let x_low = -8;
let x_high = 8;
let y_low = 0;
let y_high = 16;
let a = params.get(0);
let b = params.get(1);
let c = params.get(2);
// Draw parabola
chart.draw_series(LineSeries::new(
(x_low..=x_high).map(|x| {x as f32}).map(|x| (x, x.powi(2) * a as f32 + x * b as f32 + c as f32)),
&RED,
)).unwrap()
.label(format!("y = {:.3}x^2 + {:.3}x + {:.3}", a, b, c)).legend(|(x, y)| PathElement::new(vec![(x, y), (x + 2, y)], &RED));
// Draw cornea points
chart.draw_series(PointSeries::of_element(data.x.iter().zip(data.y.iter()).map(|(x, y)| (*x as f32, *y as f32)), 2, ShapeStyle::from(&RED).filled(),
&|coord, size, style| {
EmptyElement::at(coord)
+ Circle::new((0, 0), size, style)
})).unwrap();
// Plot pupil margins
chart.draw_series(PointSeries::of_element(additional_points.iter().map(|v| (v[0] as f32, v[1] as f32)), 3, ShapeStyle::from(&BLUE).filled(),
&|coord, size, style| {
EmptyElement::at(coord)
+ Circle::new((0, 0), size, style)
})).unwrap();
// Draw inbound ray
let pm0_x = additional_points[0][0];
let pm0_boundary = vec![pm0_x, a * pm0_x.powi(2) + b * pm0_x + c];
let pm1_x = additional_points[1][0];
let pm1_boundary = vec![pm1_x, a * pm1_x.powi(2) + b * pm1_x + c];
chart.draw_series(LineSeries::new(
(y_low..=y_high+8).map(|y| {y as f32}).map(|y| (pm0_x as f32, y)),
&BLUE)).unwrap();
chart.draw_series(LineSeries::new(
(y_low..=y_high+8).map(|y| {y as f32}).map(|y| (pm1_x as f32, y)),
&BLUE)).unwrap();
// Draw tangent line
let tangent_line_0_k = 2f64 * a * pm0_x + b;
let tangent_line_0_b = pm0_boundary[1] - tangent_line_0_k * pm0_x;
let mut tangent_line_low = ((pm0_x - 2f64) * 10f64) as i32;
let mut tangent_line_high = ((pm0_x + 2f64) * 10f64) as i32;
//println!("{}, {}", tangent_line_low, tangent_line_high);
chart.draw_series(LineSeries::new(
(tangent_line_low..=tangent_line_high).map(|x| x as f32 / 10f32).map(|x| (x, tangent_line_0_k as f32 * x + tangent_line_0_b as f32))
.filter(
|(x, y)| bound_in_axis(*x, *y))
,&BLACK
)).unwrap();
let tangent_line_0_k_vert = -1f64 / tangent_line_0_k;
let tangent_line_0_b_vert = pm0_boundary[1] - (-1f64 / tangent_line_0_k) * pm0_x;
chart.draw_series(LineSeries::new(
(tangent_line_low..=tangent_line_high).map(|x| x as f32 / 10f32).map(|x| (x, tangent_line_0_k_vert as f32 * x + tangent_line_0_b_vert as f32))
.filter(
|(x, y)| bound_in_axis(*x, *y))
,&BLACK
)).unwrap();
// Draw tangent line
let tangent_line_1_k = 2f64 * a * pm1_x + b;
let tangent_line_1_b = pm1_boundary[1] - tangent_line_1_k * pm1_x;
tangent_line_low = ((pm1_x - 2f64) * 10f64) as i32;
tangent_line_high = ((pm1_x + 2f64) * 10f64) as i32;
//println!("{}, {}", tangent_line_low, tangent_line_high);
chart.draw_series(LineSeries::new(
(tangent_line_low..=tangent_line_high).map(|x| x as f32 / 10f32).map(|x| (x, tangent_line_1_k as f32 * x + tangent_line_1_b as f32))
.filter(
|(x, y)| bound_in_axis(*x, *y))
,&BLACK
)).unwrap();
let tangent_line_1_k_vert = -1f64 / tangent_line_1_k;
let tangent_line_1_b_vert = pm1_boundary[1] - (-1f64 / tangent_line_1_k) * pm1_x;
chart.draw_series(LineSeries::new(
(tangent_line_low..=tangent_line_high).map(|x| x as f32 / 10f32).map(|x| (x, tangent_line_1_k_vert as f32 * x + tangent_line_1_b_vert as f32))
.filter(
|(x, y)| bound_in_axis(*x, *y))
,&BLACK
)).unwrap();
chart
.configure_series_labels()
.background_style(&WHITE.mix(0.8))
.border_style(&BLACK)
.draw().unwrap();
}
fn curve_fitting(data: Rc<Data>) -> Result<rgsl::MultiFitFdfSolver, Error>{
//Initialize Parameters
let num_params = 3;
let num_instances = data.n;
let mut status = rgsl::Value::Success;
let mut J = match rgsl::MatrixF64::new(num_params, num_params){
Some(mat) => mat,
None => return Err(Error::new(ErrorKind::Other, "Can't create Jacobian matrix"))
};
let mut params_init = [0f64, -1f64, 100f64];
let mut params = rgsl::VectorView::from_array(&mut params_init);
rgsl::RngType::env_setup();
let rng_type = rgsl::rng::default();
let mut rng = match rgsl::Rng::new(&rng_type){
Some(r) => r,
None => return Err(Error::new(ErrorKind::Other, "Can't create rng"))
};
let solver_type = rgsl::MultiFitFdfSolverType::lmsder();
let mut func = rgsl::MultiFitFunctionFdf::new(num_instances, num_params);
let expb_f = clone!(data => move |x, f| {
exp_f(&x, &mut f, &*data)
});
func.f = Some(Box::new(expb_f));
let expb_df = clone!(data => move |x, J| {
exp_df(&x, &mut J, &*data)
});
func.df = Some(Box::new(expb_df));
let expb_fdf = clone!(data => move |x, f, J| {
exp_f(&x, &mut f, &*data);
exp_df(&x, &mut J, &*data);
rgsl::Value::Success
});
func.fdf = Some(Box::new(expb_fdf));
// Create a solver
let mut solver = match rgsl::MultiFitFdfSolver::new(&solver_type, num_instances, num_params){
Some(s) => s,
None => return Err(Error::new(ErrorKind::Other, "can't create solver"))
};
solver.set(&mut func, ¶ms.vector());
let mut iter = 0;
loop {
iter += 1;
status = solver.iterate();
println!("status = {}", rgsl::error::str_error(status));
print_state(iter, &solver);
if status != rgsl::Value::Success {
//return Err(Error::new(ErrorKind::TimedOut, "Reconstruction failed"));
break;
}
//println!("dx: {:?}", &solver.dx());
//println!("position: {:?}", &s.position());
status = rgsl::multifit::test_delta(&solver.dx(), &solver.x(), 1e-4, 1e-4);
if status != rgsl::Value::Continue || iter >= 500 {
break;
}
}
println!("Done");
println!("params: {:?}", &solver.x());
Ok(solver)
}
fn apply_optical_distortion_correction(params: &rgsl::VectorF64, points: &Vec<Vec<f64>>) -> Vec<Vec<f64>> |
/*
fn run_on_folder(folder: &str){
let mut total_time_ms = 0;
let mut iter = 0;
for entry in fs::read_dir(folder).unwrap(){
let entry = entry.unwrap();
let file_path = entry.path();
if !file_path.is_dir(){
let lines = read_file_into_lines(file_path.to_str().unwrap());
println!("process: {:?}", file_path);
// Create Data
let mut data = Rc::new(RefCell::new(Data{
x: Vec::<f64>::new(),
y: Vec::<f64>::new(),
n: 0
}));
for (i, line) in lines.iter().enumerate(){
let mut data = *data.borrow_mut();
let numbers: Vec<f64> = line.split(" ").filter_map(|v| v.parse::<f64>().ok()).collect();
//println!("data: {:?}", numbers);
if numbers.len() > 1{
data.x.push(*numbers.get(0).unwrap());
data.y.push(*numbers.get(1).unwrap());
data.n += 1;
}
}
//println!("Data: {:?}", data);
if data.borrow().n >= 3{
let fig_path: String = format!("./figs/{}.png", file_path.file_name().unwrap().to_str().unwrap());
let now = Instant::now();
let curve_params = curve_fitting(data);
total_time_ms += now.elapsed().as_micros();
iter += 1;
}
}
}
println!("run {} iterations", iter);
println!("average time: {} microsecond", total_time_ms/ iter);
}
*/
fn main() {
let args: Vec<String> = env::args().collect();
let path = Path::new(&args[1]);
if path.is_dir(){
//run_on_folder(&path.to_str().unwrap());
println!("TODO: fix bug in run folder");
}else{
let lines = read_file_into_lines(path.to_str().unwrap());
let mut total_time_micros = 0;
let mut iter = 0;
let mut file = File::create("cross_corrected.keypoint").unwrap();
file.write_all(b"scanId\teyeId\tbscanId\tpm0xmm\tpm0ymm\tpm0zmm\tpm1xmm\tpm1ymm\tpm1zmm\n");
for line in lines.iter().skip(1){
let elements: Vec<&str> = line.split(",").collect();
if elements.len() > 8{
let pm0: Vec<f64> = vec![elements[3], elements[4]].iter().filter_map(|e| e.parse::<f64>().ok()).collect();
let pm1: Vec<f64> = vec![elements[5], elements[6]].iter().filter_map(|e| e.parse::<f64>().ok()).collect();
let cp_x: Vec<f64> = elements[7].split(" ").filter_map(|v| v.parse::<f64>().ok()).collect();
let cp_y: Vec<f64> = elements[8].split(" ").filter_map(|v| v.parse::<f64>().ok()).collect();
let num_kp = cp_y.len();
let mut data = Rc::new(Data{
x: cp_x,
y: cp_y,
n: num_kp
});
//println!("{:?}", data.x);
if data.n >= 3{
let now = Instant::now();
let solver = match curve_fitting(Rc::clone(&data)){
Ok(p) => p,
Err(e) => panic!("can't reconstruct curve")
};
println!("{:?}", solver.x());
let mut add_points = vec![pm0, pm1];
let corrected_points = apply_optical_distortion_correction(&solver.x(), &add_points);
if elements[2] == "x"{
let output_str = format!("{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\n", elements[0],
elements[1], elements[2],
corrected_points[0][0], 0, corrected_points[0][1],
corrected_points[1][0], 0, corrected_points[1][1]);
file.write_all(output_str.as_bytes()).unwrap();
}else{
let output_str = format!("{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\n", elements[0],
elements[1], elements[2],
-1.53, corrected_points[0][0], corrected_points[0][1],
-1.53, corrected_points[1][0], corrected_points[1][1]);
file.write_all(output_str.as_bytes()).unwrap();
}
//println!("{:?}", corrected_points);
total_time_micros += now.elapsed().as_micros();
iter += 1;
add_points.extend(corrected_points);
let image_path = format!("./resource/TestMotility/OD/{}/0{}.bmp", elements[2], elements[0]);
println!("image: {}", image_path);
let fig_path: String = format!("./figs_mm/{}_{}.png", elements[0], elements[2]);
//plot_parabola(&data, &solver.x(), add_points, &fig_path, &image_path);
}
}else{
println!("total elements: {}", elements.len());
println!("Can't process {}", line);
}
}
println!("Total iteration: {}", iter);
println!("Average time: {}", total_time_micros / iter);
}
}
| {
let a = params.get(0);
let b = params.get(1);
let c = params.get(2);
let mut new_points: Vec<Vec<f64>> = Vec::new();
for point in points.iter(){
let k1 = 2f64 * a * point[0] + b;
let theta_1 = k1.atan();
let mut theta_2 = 0f64;
let mut theta = 0f64;
let mut new_point: Vec<f64> = Vec::new();
if theta_1 > 0f64{
theta_2 = theta_1 * *AIR_REFRACTIVE_INDEX/ *CORNEA_REFRACTIVE_INDEX;
theta = theta_1 - theta_2;
}else{
theta_2 = theta_1.abs() / *CORNEA_REFRACTIVE_INDEX;
theta = -1f64 * (theta_1.abs() - theta_2);
}
println!("theta: {}", theta);
let boundary_point = vec![point[0], a * point[0].powi(2) + b * point[0] + c];
let new_length = (boundary_point[1] - point[1]) / *CORNEA_REFRACTIVE_INDEX;
new_point.push(boundary_point[0] + new_length * theta.sin());
new_point.push(boundary_point[1] - new_length * theta.cos());
println!("old: {:?}, new: {:?}", point, new_point);
new_points.push(new_point);
}
new_points
} | identifier_body |
main.rs | use std::env;
use std::fs;
extern crate rgsl;
use std::rc::Rc;
use std::cell::RefCell;
use plotters::prelude::*;
use std::time::Instant;
use std::path::Path;
use std::io::{Error, ErrorKind};
use std::fs::File;
use std::io::prelude::*;
use std::io::BufReader;
use image::{imageops::FilterType, ImageFormat};
#[macro_use]
extern crate lazy_static;
lazy_static!{
static ref AIR_REFRACTIVE_INDEX:f64 = 1f64;
static ref CORNEA_REFRACTIVE_INDEX:f64 = 1.37;
}
macro_rules! clone {
(@param _) => ( _ );
(@param $x:ident) => ( mut $x );
($($n:ident),+ => move || $body:expr) => (
{
$( let $n = $n.clone(); )+
move || $body
}
);
($($n:ident),+ => move |$($p:tt),+| $body:expr) => (
{
$( let $n = $n.clone(); )+
move |$(clone!(@param $p),)+| $body
}
);
}
fn exp_f(x: &rgsl::VectorF64, f: &mut rgsl::VectorF64, data: &Data) -> rgsl::Value {
let a = x.get(0);
let b = x.get(1);
let c = x.get(2);
for (i, (x, y)) in data.x.iter().zip(data.y.iter()).enumerate(){
/* Model Yi = a * x^2 + b * x + c*/
let yi = a * x.powi(2) + b * x + c;
f.set(i, yi - y);
}
rgsl::Value::Success
}
fn exp_df(x: &rgsl::VectorF64, J: &mut rgsl::MatrixF64, data: &Data) -> rgsl::Value {
for (i, x) in data.x.iter().enumerate(){
/* Jacobian matrix J(i,j) = dfi / dxj, */
/* where fi = (Yi - yi)/sigma[i], */
/* Yi = A * exp(-lambda * i) + b */
/* and the xj are the parameters (A,lambda,b) */
J.set(i, 0, x.powi(2));
J.set(i, 1, *x);
J.set(i, 2, 1f64);
}
rgsl::Value::Success
}
fn print_state(iter: usize, s: &rgsl::MultiFitFdfSolver) {
println!("iter: {} x = {} {} {} |f(x)| = {}", iter,
s.x().get(0), s.x().get(1), s.x().get(2), rgsl::blas::level1::dnrm2(&s.f()));
}
#[derive(Debug)]
pub struct Data{
pub x: Vec<f64>,
pub y: Vec<f64>,
n: usize
}
fn read_file_into_lines(filename: &str)-> Vec<String>{
let contents = fs::read_to_string(
filename).expect(&format!("can't read file {}", filename));
let rows: Vec<String> = contents.split('\n').map(|s| s.to_string()).collect();
rows
}
fn bound_in_axis(x: f32, y: f32) -> bool{
let x_low = -8;
let x_high = 8;
let y_low = 0;
let y_high = 16;
x > x_low as f32 && x < x_high as f32 && y > y_low as f32 && y < y_high as f32
}
fn plot_parabola(data: &Data, params: &rgsl::VectorF64, additional_points: Vec<Vec<f64>>, fig_path: &String, image_path: &String){
let root = BitMapBackend::new(fig_path, (1024, 1024)).into_drawing_area();
root.fill(&WHITE).unwrap();
let mut chart = ChartBuilder::on(&root)
.caption("Optical Distortion Correction", ("sans-serif", 10).into_font())
.margin(5)
.x_label_area_size(30)
.y_label_area_size(30)
.build_ranged(8f32..-8f32, 16f32..0f32).unwrap();
//chart.configure_mesh().draw().unwrap();
// Plot background image
//let (w, h) = chart.plotting_area().dim_in_pixel();
//println!("plotting area: {}, {}", w, h);
//let image = image::load(
//BufReader::new(File::open(image_path).unwrap()),
//ImageFormat::Bmp,
//).unwrap()
//.resize_exact(16, 16, FilterType::Nearest);
//let elem: BitMapElement<_> = ((-8.0, 8.0), image).into();
//chart.draw_series(std::iter::once(elem)).unwrap();
// Draw Overlay points
let x_low = -8;
let x_high = 8;
let y_low = 0;
let y_high = 16;
let a = params.get(0);
let b = params.get(1);
let c = params.get(2);
// Draw parabola
chart.draw_series(LineSeries::new(
(x_low..=x_high).map(|x| {x as f32}).map(|x| (x, x.powi(2) * a as f32 + x * b as f32 + c as f32)),
&RED,
)).unwrap()
.label(format!("y = {:.3}x^2 + {:.3}x + {:.3}", a, b, c)).legend(|(x, y)| PathElement::new(vec![(x, y), (x + 2, y)], &RED));
// Draw cornea points
chart.draw_series(PointSeries::of_element(data.x.iter().zip(data.y.iter()).map(|(x, y)| (*x as f32, *y as f32)), 2, ShapeStyle::from(&RED).filled(),
&|coord, size, style| {
EmptyElement::at(coord)
+ Circle::new((0, 0), size, style)
})).unwrap();
// Plot pupil margins
chart.draw_series(PointSeries::of_element(additional_points.iter().map(|v| (v[0] as f32, v[1] as f32)), 3, ShapeStyle::from(&BLUE).filled(),
&|coord, size, style| {
EmptyElement::at(coord)
+ Circle::new((0, 0), size, style)
})).unwrap();
// Draw inbound ray
let pm0_x = additional_points[0][0];
let pm0_boundary = vec![pm0_x, a * pm0_x.powi(2) + b * pm0_x + c];
let pm1_x = additional_points[1][0];
let pm1_boundary = vec![pm1_x, a * pm1_x.powi(2) + b * pm1_x + c];
chart.draw_series(LineSeries::new(
(y_low..=y_high+8).map(|y| {y as f32}).map(|y| (pm0_x as f32, y)),
&BLUE)).unwrap();
chart.draw_series(LineSeries::new(
(y_low..=y_high+8).map(|y| {y as f32}).map(|y| (pm1_x as f32, y)),
&BLUE)).unwrap();
// Draw tangent line
let tangent_line_0_k = 2f64 * a * pm0_x + b;
let tangent_line_0_b = pm0_boundary[1] - tangent_line_0_k * pm0_x;
let mut tangent_line_low = ((pm0_x - 2f64) * 10f64) as i32;
let mut tangent_line_high = ((pm0_x + 2f64) * 10f64) as i32;
//println!("{}, {}", tangent_line_low, tangent_line_high);
chart.draw_series(LineSeries::new(
(tangent_line_low..=tangent_line_high).map(|x| x as f32 / 10f32).map(|x| (x, tangent_line_0_k as f32 * x + tangent_line_0_b as f32))
.filter(
|(x, y)| bound_in_axis(*x, *y))
,&BLACK
)).unwrap();
let tangent_line_0_k_vert = -1f64 / tangent_line_0_k;
let tangent_line_0_b_vert = pm0_boundary[1] - (-1f64 / tangent_line_0_k) * pm0_x;
chart.draw_series(LineSeries::new(
(tangent_line_low..=tangent_line_high).map(|x| x as f32 / 10f32).map(|x| (x, tangent_line_0_k_vert as f32 * x + tangent_line_0_b_vert as f32))
.filter(
|(x, y)| bound_in_axis(*x, *y))
,&BLACK
)).unwrap();
// Draw tangent line
let tangent_line_1_k = 2f64 * a * pm1_x + b;
let tangent_line_1_b = pm1_boundary[1] - tangent_line_1_k * pm1_x;
tangent_line_low = ((pm1_x - 2f64) * 10f64) as i32;
tangent_line_high = ((pm1_x + 2f64) * 10f64) as i32;
//println!("{}, {}", tangent_line_low, tangent_line_high);
chart.draw_series(LineSeries::new(
(tangent_line_low..=tangent_line_high).map(|x| x as f32 / 10f32).map(|x| (x, tangent_line_1_k as f32 * x + tangent_line_1_b as f32))
.filter(
|(x, y)| bound_in_axis(*x, *y))
,&BLACK
)).unwrap();
let tangent_line_1_k_vert = -1f64 / tangent_line_1_k;
let tangent_line_1_b_vert = pm1_boundary[1] - (-1f64 / tangent_line_1_k) * pm1_x;
chart.draw_series(LineSeries::new(
(tangent_line_low..=tangent_line_high).map(|x| x as f32 / 10f32).map(|x| (x, tangent_line_1_k_vert as f32 * x + tangent_line_1_b_vert as f32))
.filter(
|(x, y)| bound_in_axis(*x, *y))
,&BLACK
)).unwrap();
chart
.configure_series_labels()
.background_style(&WHITE.mix(0.8))
.border_style(&BLACK)
.draw().unwrap();
}
fn curve_fitting(data: Rc<Data>) -> Result<rgsl::MultiFitFdfSolver, Error>{
//Initialize Parameters
let num_params = 3;
let num_instances = data.n;
let mut status = rgsl::Value::Success;
let mut J = match rgsl::MatrixF64::new(num_params, num_params){
Some(mat) => mat,
None => return Err(Error::new(ErrorKind::Other, "Can't create Jacobian matrix"))
};
let mut params_init = [0f64, -1f64, 100f64];
let mut params = rgsl::VectorView::from_array(&mut params_init);
rgsl::RngType::env_setup();
let rng_type = rgsl::rng::default();
let mut rng = match rgsl::Rng::new(&rng_type){
Some(r) => r,
None => return Err(Error::new(ErrorKind::Other, "Can't create rng"))
};
let solver_type = rgsl::MultiFitFdfSolverType::lmsder();
let mut func = rgsl::MultiFitFunctionFdf::new(num_instances, num_params);
let expb_f = clone!(data => move |x, f| {
exp_f(&x, &mut f, &*data)
});
func.f = Some(Box::new(expb_f));
let expb_df = clone!(data => move |x, J| {
exp_df(&x, &mut J, &*data)
});
func.df = Some(Box::new(expb_df));
let expb_fdf = clone!(data => move |x, f, J| {
exp_f(&x, &mut f, &*data);
exp_df(&x, &mut J, &*data);
rgsl::Value::Success
});
func.fdf = Some(Box::new(expb_fdf));
// Create a solver
let mut solver = match rgsl::MultiFitFdfSolver::new(&solver_type, num_instances, num_params){
Some(s) => s,
None => return Err(Error::new(ErrorKind::Other, "can't create solver"))
};
solver.set(&mut func, ¶ms.vector());
let mut iter = 0;
loop {
iter += 1;
status = solver.iterate();
println!("status = {}", rgsl::error::str_error(status));
print_state(iter, &solver);
if status != rgsl::Value::Success {
//return Err(Error::new(ErrorKind::TimedOut, "Reconstruction failed"));
break;
}
//println!("dx: {:?}", &solver.dx());
//println!("position: {:?}", &s.position());
status = rgsl::multifit::test_delta(&solver.dx(), &solver.x(), 1e-4, 1e-4);
if status != rgsl::Value::Continue || iter >= 500 {
break;
}
}
println!("Done");
println!("params: {:?}", &solver.x());
Ok(solver)
}
fn apply_optical_distortion_correction(params: &rgsl::VectorF64, points: &Vec<Vec<f64>>) -> Vec<Vec<f64>>{
let a = params.get(0);
let b = params.get(1);
let c = params.get(2);
let mut new_points: Vec<Vec<f64>> = Vec::new();
for point in points.iter(){
let k1 = 2f64 * a * point[0] + b;
let theta_1 = k1.atan();
let mut theta_2 = 0f64;
let mut theta = 0f64;
let mut new_point: Vec<f64> = Vec::new();
if theta_1 > 0f64{
theta_2 = theta_1 * *AIR_REFRACTIVE_INDEX/ *CORNEA_REFRACTIVE_INDEX;
theta = theta_1 - theta_2;
}else{
theta_2 = theta_1.abs() / *CORNEA_REFRACTIVE_INDEX;
theta = -1f64 * (theta_1.abs() - theta_2);
}
println!("theta: {}", theta);
let boundary_point = vec![point[0], a * point[0].powi(2) + b * point[0] + c];
let new_length = (boundary_point[1] - point[1]) / *CORNEA_REFRACTIVE_INDEX;
new_point.push(boundary_point[0] + new_length * theta.sin());
new_point.push(boundary_point[1] - new_length * theta.cos());
println!("old: {:?}, new: {:?}", point, new_point);
new_points.push(new_point);
}
new_points
}
/*
fn run_on_folder(folder: &str){
let mut total_time_ms = 0;
let mut iter = 0;
for entry in fs::read_dir(folder).unwrap(){
let entry = entry.unwrap();
let file_path = entry.path();
if !file_path.is_dir(){
let lines = read_file_into_lines(file_path.to_str().unwrap());
println!("process: {:?}", file_path);
// Create Data
let mut data = Rc::new(RefCell::new(Data{
x: Vec::<f64>::new(),
y: Vec::<f64>::new(),
n: 0
}));
for (i, line) in lines.iter().enumerate(){
let mut data = *data.borrow_mut();
let numbers: Vec<f64> = line.split(" ").filter_map(|v| v.parse::<f64>().ok()).collect();
//println!("data: {:?}", numbers);
if numbers.len() > 1{
data.x.push(*numbers.get(0).unwrap());
data.y.push(*numbers.get(1).unwrap());
data.n += 1;
}
}
//println!("Data: {:?}", data);
if data.borrow().n >= 3{
let fig_path: String = format!("./figs/{}.png", file_path.file_name().unwrap().to_str().unwrap());
let now = Instant::now();
let curve_params = curve_fitting(data);
total_time_ms += now.elapsed().as_micros();
iter += 1;
}
}
}
println!("run {} iterations", iter);
println!("average time: {} microsecond", total_time_ms/ iter);
}
*/
fn | () {
let args: Vec<String> = env::args().collect();
let path = Path::new(&args[1]);
if path.is_dir(){
//run_on_folder(&path.to_str().unwrap());
println!("TODO: fix bug in run folder");
}else{
let lines = read_file_into_lines(path.to_str().unwrap());
let mut total_time_micros = 0;
let mut iter = 0;
let mut file = File::create("cross_corrected.keypoint").unwrap();
file.write_all(b"scanId\teyeId\tbscanId\tpm0xmm\tpm0ymm\tpm0zmm\tpm1xmm\tpm1ymm\tpm1zmm\n");
for line in lines.iter().skip(1){
let elements: Vec<&str> = line.split(",").collect();
if elements.len() > 8{
let pm0: Vec<f64> = vec![elements[3], elements[4]].iter().filter_map(|e| e.parse::<f64>().ok()).collect();
let pm1: Vec<f64> = vec![elements[5], elements[6]].iter().filter_map(|e| e.parse::<f64>().ok()).collect();
let cp_x: Vec<f64> = elements[7].split(" ").filter_map(|v| v.parse::<f64>().ok()).collect();
let cp_y: Vec<f64> = elements[8].split(" ").filter_map(|v| v.parse::<f64>().ok()).collect();
let num_kp = cp_y.len();
let mut data = Rc::new(Data{
x: cp_x,
y: cp_y,
n: num_kp
});
//println!("{:?}", data.x);
if data.n >= 3{
let now = Instant::now();
let solver = match curve_fitting(Rc::clone(&data)){
Ok(p) => p,
Err(e) => panic!("can't reconstruct curve")
};
println!("{:?}", solver.x());
let mut add_points = vec![pm0, pm1];
let corrected_points = apply_optical_distortion_correction(&solver.x(), &add_points);
if elements[2] == "x"{
let output_str = format!("{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\n", elements[0],
elements[1], elements[2],
corrected_points[0][0], 0, corrected_points[0][1],
corrected_points[1][0], 0, corrected_points[1][1]);
file.write_all(output_str.as_bytes()).unwrap();
}else{
let output_str = format!("{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\n", elements[0],
elements[1], elements[2],
-1.53, corrected_points[0][0], corrected_points[0][1],
-1.53, corrected_points[1][0], corrected_points[1][1]);
file.write_all(output_str.as_bytes()).unwrap();
}
//println!("{:?}", corrected_points);
total_time_micros += now.elapsed().as_micros();
iter += 1;
add_points.extend(corrected_points);
let image_path = format!("./resource/TestMotility/OD/{}/0{}.bmp", elements[2], elements[0]);
println!("image: {}", image_path);
let fig_path: String = format!("./figs_mm/{}_{}.png", elements[0], elements[2]);
//plot_parabola(&data, &solver.x(), add_points, &fig_path, &image_path);
}
}else{
println!("total elements: {}", elements.len());
println!("Can't process {}", line);
}
}
println!("Total iteration: {}", iter);
println!("Average time: {}", total_time_micros / iter);
}
}
| main | identifier_name |
py4_promoter_DCI_scatter.py | import sys,argparse
import os,glob
import numpy as np
import pandas as pd
import re,bisect
from scipy import stats
import matplotlib
# matplotlib.use('Agg')
import matplotlib.pyplot as plt
matplotlib.rcParams['font.size']=11
import seaborn as sns
sns.set(font_scale=1.1)
sns.set_style("whitegrid", {'axes.grid' : False})
sns.set_style("ticks",{'ytick.color': 'k','axes.edgecolor': 'k'})
matplotlib.rcParams["font.sans-serif"] = ["Arial"]
matplotlib.rcParams['mathtext.fontset'] = 'custom'
matplotlib.rcParams["mathtext.rm"] = "Arial"
# def return_dci_df(DCI_dir,subdir,hm_mark,compr_type,suffix):
# dci_file = '{}/{}/{}_{}{}.bed'.format(DCI_dir,subdir,hm_mark,compr_type,suffix)
# dci_df = pd.read_csv(dci_file,sep='\t',header=None)
# dci_df.columns=['chr','start','end','DCI']
# dci_df.index = ['_'.join(ii) for ii in dci_df[['chr','start','end']].values.astype(str)]
# return dci_df
def return_dci_df(DCI_dir,subdir,hm_mark,compr_type,suffix):
dci_file = '{}/{}/{}_{}{}.csv'.format(DCI_dir,subdir,hm_mark,compr_type,suffix)
if os.path.isfile(dci_file):
dci_df = pd.read_csv(dci_file,sep='\t',index_col=4)
dci_df.columns=['chr','start','end','IfOverlap','score','strand','DCI']
return dci_df
else:
return None
def scatter_plot_compr_DCI(num_DCI_bins_df,subdir,hm_mark,compr_type,suffix,dci_thre):
compr_x = compr_type[0]
compr_y = compr_type[1]
test_file='{}/{}/{}_{}{}.csv'.format(DCI_dir,subdir,hm_mark,compr_y,suffix)
# print(test_file)
if os.path.isfile(test_file):
dci_df_wt_over_vector = return_dci_df(DCI_dir,subdir,hm_mark,'WT_over_Vector',suffix)
up_bins = dci_df_wt_over_vector[dci_df_wt_over_vector['DCI']>dci_thre].index
dn_bins = dci_df_wt_over_vector[dci_df_wt_over_vector['DCI']<-1*dci_thre].index
dci_df_x = return_dci_df(DCI_dir,subdir,hm_mark,compr_x,suffix)
dci_df_y = return_dci_df(DCI_dir,subdir,hm_mark,compr_y,suffix)
# scatter plot
plt.figure(figsize=(2.1,2.1))
plt.scatter(dci_df_x.loc[:,'DCI'],dci_df_y.loc[:,'DCI'],c='tab:grey',s=3,alpha=1,rasterized=True,label='All genes')
plt.scatter(dci_df_x.loc[up_bins,'DCI'],dci_df_y.loc[up_bins,'DCI'],c='tab:red',s=3,alpha=1,rasterized=True,label='Genes w/ DCI$>{}$ in WT/Vector'.format(dci_thre))
plt.scatter(dci_df_x.loc[dn_bins,'DCI'],dci_df_y.loc[dn_bins,'DCI'],c='tab:blue',s=3,alpha=1,rasterized=True,label='Genes w/ DCI$<{}$ in WT/Vector'.format(-1*dci_thre))
# save and plot the correlation
x,y = dci_df_x.loc[:,'DCI'],dci_df_y.loc[:,'DCI']
slope, intercept, r_value, p_value, std_err = stats.linregress(x, y)
output_prename = '{}_{}_{}_dci{}'.format(subdir,hm_mark,suffix,dci_thre)
num_DCI_bins_df.loc[output_prename,'scatter_pearsonr_s'] = r_value
num_DCI_bins_df.loc[output_prename,'scatter_pearsonr_p'] = p_value
x_sort = np.sort(x)
plt.plot(x_sort,x_sort*slope+intercept,c = 'k',ls='--',lw=.8)
plt.text(.97,.97,'$r={:.2f}$ '.format(r_value),fontsize=10,transform=plt.axes().transAxes,ha='right',va='top')
plt.axhline(y=0,c='k',lw=1)
plt.axvline(x=0,c='k',lw=1)
# # plt.title('{} over {}'.format(cellType_labels[treatment],cellType_labels[control]))
plt.legend(fontsize=10.5,borderaxespad=0.1,labelspacing=.1,handletextpad=0.1,\
handlelength=1,loc="upper left",markerscale=3,bbox_to_anchor=[-0.12,1.36],frameon=False)
xa,xb = cellType_labels[compr_x.split('_')[0]],cellType_labels[compr_x.split('_')[-1]]
ya,yb = cellType_labels[compr_y.split('_')[0]],cellType_labels[compr_y.split('_')[-1]]
plt.xlabel('DCI score ({} over {})'.format(xa,xb),fontsize=12)
plt.ylabel('DCI score ({} over {})'.format(ya,yb),fontsize=12)
plt.savefig('{}/{}/scatter_{}_{}_vs_{}{}_dci{}.png'.format(outdir,subdir,hm_mark,compr_x,compr_y,suffix,dci_thre),\
bbox_inches='tight',pad_inches=0.1,dpi=600,transparent=True)
plt.show()
plt.close()
return up_bins,dn_bins
return [],[]
def plot_box_figs(subdir,hm_mark,suffix,selected_bins,color,title,dci_thre,num_DCI_bins_df,flag):
test_file='{}/{}/{}_{}{}.csv'.format(DCI_dir,subdir,hm_mark,'WT_over_Vector',suffix)
if os.path.isfile(test_file):
box_vals = []
xticklabels = []
sig_vals,sig_colors = [],[]
for compr_col in ['WT_over_Vector','DEL_over_WT','EIF_over_DEL','TPR_over_WT']:
dci_df = return_dci_df(DCI_dir,subdir,hm_mark,compr_col,suffix)
if dci_df is not None:
box_val = dci_df.loc[selected_bins]['DCI'].values
# save the values in box plots
dci_df.loc[selected_bins].to_csv('{}/{}/box_{}_{}_genes{}_dci{}_{}.csv'.format(outdir,subdir,hm_mark,flag,suffix,dci_thre,compr_col))
s,p = stats.ttest_1samp(box_val,0)
sig_vals.append('*' if p<0.05 else '')
sig_colors.append('b' if s<0 else 'r')
box_vals.append(box_val)
xa,xb = cellType_labels[compr_col.split('_')[0]],cellType_labels[compr_col.split('_')[-1]]
xticklabels.append('{} over {}'.format(xa,xb))
num_DCI_bins_df.loc['{}_{}_{}_dci{}'.format(subdir,hm_mark,suffix,dci_thre),'{} {} s'.format(title.split()[2],compr_col)] = '{:.2f}'.format(s)
num_DCI_bins_df.loc['{}_{}_{}_dci{}'.format(subdir,hm_mark,suffix,dci_thre),'{} {} p'.format(title.split()[2],compr_col)] = '{:.2e}'.format(p)
#print(box_vals)
positions = np.arange(len(box_vals))
fig = plt.figure(figsize=(.46*len(box_vals),2.2))
g = plt.boxplot(box_vals,positions=positions,widths = .5,patch_artist=True,\
boxprops=dict(color='k',facecolor='w',fill=None,lw=1),\
medianprops=dict(color='k'),showfliers=False)
# g = plt.violinplot(box_vals)
# for position_id in np.arange(len(positions)):
# scatter_x = np.random.normal(positions[position_id],0.06,len(box_vals[position_id]))
# plt.scatter(scatter_x,box_vals[position_id],color=color,s=5,zorder=0,alpha=0.6,rasterized=True)
# for compr_pos in [[0,1,'t'],[1,2,'t'],[2,3,'t']]:
# mark_pvalue(compr_pos,positions,box_vals)
plt.axes().set_xticklabels(xticklabels,rotation=30,ha='right',fontsize=12)
plt.ylabel('DCI score'.format(hm_mark),fontsize=13)
# plt.ylim([-1,2])
for ii in positions:
plt.scatter(ii,np.median(box_vals[ii]),marker=sig_vals[ii],color='red',s=77)
# plt.axes().text(ii,0,sig_vals[ii-1],fontsize=28,va='top',ha='center',color='red')
plt.axhline(y=0,c='k',lw=1)
plt.title(title,fontsize=12)
# plt.legend(fontsize=16,borderaxespad=0.2,labelspacing=.2,handletextpad=0.2,handlelength=1,loc="upper right",frameon=False)
plt.savefig('{}/{}/box_{}_{}_genes{}_dci{}.png'.format(outdir,subdir,hm_mark,flag,suffix,dci_thre),\
bbox_inches='tight',pad_inches=0.1,dpi=600,transparent=True)
plt.show()
plt.close()
# ==== main()
cellType_labels= {'Vector':'Vector',\
'WT':'WT',\
'DEL':'$\Delta$cIDR',\
'EIF':'UTX-eIF$_{IDR}$',\
'TPR':'$\Delta$TPR',\
'MT2':'MT2',\
'FUS':'UTX-FUS$_{IDR}$'}
outdir = 'f4_promoter_DCI_scatter'
os.makedirs(outdir,exist_ok=True)
# project_dir="/nv/vol190/zanglab/zw5j/since2019_projects/UTX_HaoJiang"
project_dir="/Volumes/zanglab/zw5j/since2019_projects/UTX_HaoJiang"
# DCI_dir='{}/f5_hichip/f1_hichip_bart3d_new/f2_DEG_promoter_DCI_non_normalized/f1_promoter_DCI_rename'.format(project_dir)
DCI_dir='{}/f5_hichip/f1_hichip_bart3d_new/f1_DEG_promoter_DCI/f1_promoter_DCI'.format(project_dir)
# DCI_dir='{}/f5_hichip/f1_hichip_bart3d_new/f0_run_bart3d_new/bart3d_DCI_rename'.format(project_dir)
# expr_dir='{}/f0_data_process/rna_seq/data_1st_submit_STAR_RSEM_new/f6_deg/f1_deseq2_out'.format(project_dir)
# expr_dir='{}/f0_data_process/rna_seq/data_1st_submit_STAR_RSEM_new/f6_deg/fz_deseq2_out_combined'.format(project_dir)
# deg_df = pd.read_csv('{}/deseq2_combined.csv'.format(expr_dir),index_col=0)
subdirs=['bart3d_dis200k_data_1st_submit','bart3d_dis200k_data202008',
'bart3d_dis500k_data_1st_submit','bart3d_dis500k_data202008'] | suffixes=['_promoter_DCI']
dci_thres = [2,5]
num_DCI_bins_df = pd.DataFrame()
for subdir in subdirs[1:2]:
outdir_tmp='{}/{}'.format(outdir,subdir)
os.makedirs(outdir_tmp,exist_ok=True)
for hm_mark in hm_marks[:]:
for suffix in suffixes[:]:
for dci_thre in dci_thres[1:]:
for compr_type in compr_types[:]:
up_bins,dn_bins = scatter_plot_compr_DCI(num_DCI_bins_df,subdir,hm_mark,compr_type,suffix,dci_thre)
# the box plot are exactly the same
if compr_type[1]=='DEL_over_WT':
num_DCI_bins_df.loc['{}_{}_{}_dci{}'.format(subdir,hm_mark,suffix,dci_thre),'# up genes'] = len(up_bins)
num_DCI_bins_df.loc['{}_{}_{}_dci{}'.format(subdir,hm_mark,suffix,dci_thre),'# dn genes'] = len(dn_bins)
##### box plot
selected_bins = up_bins
color = 'tab:red'
title = 'Genes w/ DCI$>{}$ \n in WT over Vector'.format(dci_thre)
plot_box_figs(subdir,hm_mark,suffix,selected_bins,color,title,dci_thre,num_DCI_bins_df,'increased')
selected_bins = dn_bins
color = 'tab:blue'
title = 'Genes w/ DCI$<{}$ \n in WT over Vector'.format(-1*dci_thre)
plot_box_figs(subdir,hm_mark,suffix,selected_bins,color,title,dci_thre,num_DCI_bins_df,'decreased')
num_DCI_bins_df.to_csv(outdir+os.sep+'num_DCI_promoter_summary.csv') |
compr_types = [['WT_over_Vector','DEL_over_WT'],['DEL_over_WT','EIF_over_DEL'],['WT_over_Vector','TPR_over_WT']]
hm_marks = ['H3K4me3','H3K27ac'] | random_line_split |
py4_promoter_DCI_scatter.py | import sys,argparse
import os,glob
import numpy as np
import pandas as pd
import re,bisect
from scipy import stats
import matplotlib
# matplotlib.use('Agg')
import matplotlib.pyplot as plt
matplotlib.rcParams['font.size']=11
import seaborn as sns
sns.set(font_scale=1.1)
sns.set_style("whitegrid", {'axes.grid' : False})
sns.set_style("ticks",{'ytick.color': 'k','axes.edgecolor': 'k'})
matplotlib.rcParams["font.sans-serif"] = ["Arial"]
matplotlib.rcParams['mathtext.fontset'] = 'custom'
matplotlib.rcParams["mathtext.rm"] = "Arial"
# def return_dci_df(DCI_dir,subdir,hm_mark,compr_type,suffix):
# dci_file = '{}/{}/{}_{}{}.bed'.format(DCI_dir,subdir,hm_mark,compr_type,suffix)
# dci_df = pd.read_csv(dci_file,sep='\t',header=None)
# dci_df.columns=['chr','start','end','DCI']
# dci_df.index = ['_'.join(ii) for ii in dci_df[['chr','start','end']].values.astype(str)]
# return dci_df
def return_dci_df(DCI_dir,subdir,hm_mark,compr_type,suffix):
dci_file = '{}/{}/{}_{}{}.csv'.format(DCI_dir,subdir,hm_mark,compr_type,suffix)
if os.path.isfile(dci_file):
dci_df = pd.read_csv(dci_file,sep='\t',index_col=4)
dci_df.columns=['chr','start','end','IfOverlap','score','strand','DCI']
return dci_df
else:
return None
def scatter_plot_compr_DCI(num_DCI_bins_df,subdir,hm_mark,compr_type,suffix,dci_thre):
compr_x = compr_type[0]
compr_y = compr_type[1]
test_file='{}/{}/{}_{}{}.csv'.format(DCI_dir,subdir,hm_mark,compr_y,suffix)
# print(test_file)
if os.path.isfile(test_file):
dci_df_wt_over_vector = return_dci_df(DCI_dir,subdir,hm_mark,'WT_over_Vector',suffix)
up_bins = dci_df_wt_over_vector[dci_df_wt_over_vector['DCI']>dci_thre].index
dn_bins = dci_df_wt_over_vector[dci_df_wt_over_vector['DCI']<-1*dci_thre].index
dci_df_x = return_dci_df(DCI_dir,subdir,hm_mark,compr_x,suffix)
dci_df_y = return_dci_df(DCI_dir,subdir,hm_mark,compr_y,suffix)
# scatter plot
plt.figure(figsize=(2.1,2.1))
plt.scatter(dci_df_x.loc[:,'DCI'],dci_df_y.loc[:,'DCI'],c='tab:grey',s=3,alpha=1,rasterized=True,label='All genes')
plt.scatter(dci_df_x.loc[up_bins,'DCI'],dci_df_y.loc[up_bins,'DCI'],c='tab:red',s=3,alpha=1,rasterized=True,label='Genes w/ DCI$>{}$ in WT/Vector'.format(dci_thre))
plt.scatter(dci_df_x.loc[dn_bins,'DCI'],dci_df_y.loc[dn_bins,'DCI'],c='tab:blue',s=3,alpha=1,rasterized=True,label='Genes w/ DCI$<{}$ in WT/Vector'.format(-1*dci_thre))
# save and plot the correlation
x,y = dci_df_x.loc[:,'DCI'],dci_df_y.loc[:,'DCI']
slope, intercept, r_value, p_value, std_err = stats.linregress(x, y)
output_prename = '{}_{}_{}_dci{}'.format(subdir,hm_mark,suffix,dci_thre)
num_DCI_bins_df.loc[output_prename,'scatter_pearsonr_s'] = r_value
num_DCI_bins_df.loc[output_prename,'scatter_pearsonr_p'] = p_value
x_sort = np.sort(x)
plt.plot(x_sort,x_sort*slope+intercept,c = 'k',ls='--',lw=.8)
plt.text(.97,.97,'$r={:.2f}$ '.format(r_value),fontsize=10,transform=plt.axes().transAxes,ha='right',va='top')
plt.axhline(y=0,c='k',lw=1)
plt.axvline(x=0,c='k',lw=1)
# # plt.title('{} over {}'.format(cellType_labels[treatment],cellType_labels[control]))
plt.legend(fontsize=10.5,borderaxespad=0.1,labelspacing=.1,handletextpad=0.1,\
handlelength=1,loc="upper left",markerscale=3,bbox_to_anchor=[-0.12,1.36],frameon=False)
xa,xb = cellType_labels[compr_x.split('_')[0]],cellType_labels[compr_x.split('_')[-1]]
ya,yb = cellType_labels[compr_y.split('_')[0]],cellType_labels[compr_y.split('_')[-1]]
plt.xlabel('DCI score ({} over {})'.format(xa,xb),fontsize=12)
plt.ylabel('DCI score ({} over {})'.format(ya,yb),fontsize=12)
plt.savefig('{}/{}/scatter_{}_{}_vs_{}{}_dci{}.png'.format(outdir,subdir,hm_mark,compr_x,compr_y,suffix,dci_thre),\
bbox_inches='tight',pad_inches=0.1,dpi=600,transparent=True)
plt.show()
plt.close()
return up_bins,dn_bins
return [],[]
def plot_box_figs(subdir,hm_mark,suffix,selected_bins,color,title,dci_thre,num_DCI_bins_df,flag):
|
# ==== main()
cellType_labels= {'Vector':'Vector',\
'WT':'WT',\
'DEL':'$\Delta$cIDR',\
'EIF':'UTX-eIF$_{IDR}$',\
'TPR':'$\Delta$TPR',\
'MT2':'MT2',\
'FUS':'UTX-FUS$_{IDR}$'}
outdir = 'f4_promoter_DCI_scatter'
os.makedirs(outdir,exist_ok=True)
# project_dir="/nv/vol190/zanglab/zw5j/since2019_projects/UTX_HaoJiang"
project_dir="/Volumes/zanglab/zw5j/since2019_projects/UTX_HaoJiang"
# DCI_dir='{}/f5_hichip/f1_hichip_bart3d_new/f2_DEG_promoter_DCI_non_normalized/f1_promoter_DCI_rename'.format(project_dir)
DCI_dir='{}/f5_hichip/f1_hichip_bart3d_new/f1_DEG_promoter_DCI/f1_promoter_DCI'.format(project_dir)
# DCI_dir='{}/f5_hichip/f1_hichip_bart3d_new/f0_run_bart3d_new/bart3d_DCI_rename'.format(project_dir)
# expr_dir='{}/f0_data_process/rna_seq/data_1st_submit_STAR_RSEM_new/f6_deg/f1_deseq2_out'.format(project_dir)
# expr_dir='{}/f0_data_process/rna_seq/data_1st_submit_STAR_RSEM_new/f6_deg/fz_deseq2_out_combined'.format(project_dir)
# deg_df = pd.read_csv('{}/deseq2_combined.csv'.format(expr_dir),index_col=0)
subdirs=['bart3d_dis200k_data_1st_submit','bart3d_dis200k_data202008',
'bart3d_dis500k_data_1st_submit','bart3d_dis500k_data202008']
compr_types = [['WT_over_Vector','DEL_over_WT'],['DEL_over_WT','EIF_over_DEL'],['WT_over_Vector','TPR_over_WT']]
hm_marks = ['H3K4me3','H3K27ac']
suffixes=['_promoter_DCI']
dci_thres = [2,5]
num_DCI_bins_df = pd.DataFrame()
for subdir in subdirs[1:2]:
outdir_tmp='{}/{}'.format(outdir,subdir)
os.makedirs(outdir_tmp,exist_ok=True)
for hm_mark in hm_marks[:]:
for suffix in suffixes[:]:
for dci_thre in dci_thres[1:]:
for compr_type in compr_types[:]:
up_bins,dn_bins = scatter_plot_compr_DCI(num_DCI_bins_df,subdir,hm_mark,compr_type,suffix,dci_thre)
# the box plot are exactly the same
if compr_type[1]=='DEL_over_WT':
num_DCI_bins_df.loc['{}_{}_{}_dci{}'.format(subdir,hm_mark,suffix,dci_thre),'# up genes'] = len(up_bins)
num_DCI_bins_df.loc['{}_{}_{}_dci{}'.format(subdir,hm_mark,suffix,dci_thre),'# dn genes'] = len(dn_bins)
##### box plot
selected_bins = up_bins
color = 'tab:red'
title = 'Genes w/ DCI$>{}$ \n in WT over Vector'.format(dci_thre)
plot_box_figs(subdir,hm_mark,suffix,selected_bins,color,title,dci_thre,num_DCI_bins_df,'increased')
selected_bins = dn_bins
color = 'tab:blue'
title = 'Genes w/ DCI$<{}$ \n in WT over Vector'.format(-1*dci_thre)
plot_box_figs(subdir,hm_mark,suffix,selected_bins,color,title,dci_thre,num_DCI_bins_df,'decreased')
num_DCI_bins_df.to_csv(outdir+os.sep+'num_DCI_promoter_summary.csv')
| test_file='{}/{}/{}_{}{}.csv'.format(DCI_dir,subdir,hm_mark,'WT_over_Vector',suffix)
if os.path.isfile(test_file):
box_vals = []
xticklabels = []
sig_vals,sig_colors = [],[]
for compr_col in ['WT_over_Vector','DEL_over_WT','EIF_over_DEL','TPR_over_WT']:
dci_df = return_dci_df(DCI_dir,subdir,hm_mark,compr_col,suffix)
if dci_df is not None:
box_val = dci_df.loc[selected_bins]['DCI'].values
# save the values in box plots
dci_df.loc[selected_bins].to_csv('{}/{}/box_{}_{}_genes{}_dci{}_{}.csv'.format(outdir,subdir,hm_mark,flag,suffix,dci_thre,compr_col))
s,p = stats.ttest_1samp(box_val,0)
sig_vals.append('*' if p<0.05 else '')
sig_colors.append('b' if s<0 else 'r')
box_vals.append(box_val)
xa,xb = cellType_labels[compr_col.split('_')[0]],cellType_labels[compr_col.split('_')[-1]]
xticklabels.append('{} over {}'.format(xa,xb))
num_DCI_bins_df.loc['{}_{}_{}_dci{}'.format(subdir,hm_mark,suffix,dci_thre),'{} {} s'.format(title.split()[2],compr_col)] = '{:.2f}'.format(s)
num_DCI_bins_df.loc['{}_{}_{}_dci{}'.format(subdir,hm_mark,suffix,dci_thre),'{} {} p'.format(title.split()[2],compr_col)] = '{:.2e}'.format(p)
#print(box_vals)
positions = np.arange(len(box_vals))
fig = plt.figure(figsize=(.46*len(box_vals),2.2))
g = plt.boxplot(box_vals,positions=positions,widths = .5,patch_artist=True,\
boxprops=dict(color='k',facecolor='w',fill=None,lw=1),\
medianprops=dict(color='k'),showfliers=False)
# g = plt.violinplot(box_vals)
# for position_id in np.arange(len(positions)):
# scatter_x = np.random.normal(positions[position_id],0.06,len(box_vals[position_id]))
# plt.scatter(scatter_x,box_vals[position_id],color=color,s=5,zorder=0,alpha=0.6,rasterized=True)
# for compr_pos in [[0,1,'t'],[1,2,'t'],[2,3,'t']]:
# mark_pvalue(compr_pos,positions,box_vals)
plt.axes().set_xticklabels(xticklabels,rotation=30,ha='right',fontsize=12)
plt.ylabel('DCI score'.format(hm_mark),fontsize=13)
# plt.ylim([-1,2])
for ii in positions:
plt.scatter(ii,np.median(box_vals[ii]),marker=sig_vals[ii],color='red',s=77)
# plt.axes().text(ii,0,sig_vals[ii-1],fontsize=28,va='top',ha='center',color='red')
plt.axhline(y=0,c='k',lw=1)
plt.title(title,fontsize=12)
# plt.legend(fontsize=16,borderaxespad=0.2,labelspacing=.2,handletextpad=0.2,handlelength=1,loc="upper right",frameon=False)
plt.savefig('{}/{}/box_{}_{}_genes{}_dci{}.png'.format(outdir,subdir,hm_mark,flag,suffix,dci_thre),\
bbox_inches='tight',pad_inches=0.1,dpi=600,transparent=True)
plt.show()
plt.close() | identifier_body |
py4_promoter_DCI_scatter.py | import sys,argparse
import os,glob
import numpy as np
import pandas as pd
import re,bisect
from scipy import stats
import matplotlib
# matplotlib.use('Agg')
import matplotlib.pyplot as plt
matplotlib.rcParams['font.size']=11
import seaborn as sns
sns.set(font_scale=1.1)
sns.set_style("whitegrid", {'axes.grid' : False})
sns.set_style("ticks",{'ytick.color': 'k','axes.edgecolor': 'k'})
matplotlib.rcParams["font.sans-serif"] = ["Arial"]
matplotlib.rcParams['mathtext.fontset'] = 'custom'
matplotlib.rcParams["mathtext.rm"] = "Arial"
# def return_dci_df(DCI_dir,subdir,hm_mark,compr_type,suffix):
# dci_file = '{}/{}/{}_{}{}.bed'.format(DCI_dir,subdir,hm_mark,compr_type,suffix)
# dci_df = pd.read_csv(dci_file,sep='\t',header=None)
# dci_df.columns=['chr','start','end','DCI']
# dci_df.index = ['_'.join(ii) for ii in dci_df[['chr','start','end']].values.astype(str)]
# return dci_df
def return_dci_df(DCI_dir,subdir,hm_mark,compr_type,suffix):
dci_file = '{}/{}/{}_{}{}.csv'.format(DCI_dir,subdir,hm_mark,compr_type,suffix)
if os.path.isfile(dci_file):
dci_df = pd.read_csv(dci_file,sep='\t',index_col=4)
dci_df.columns=['chr','start','end','IfOverlap','score','strand','DCI']
return dci_df
else:
return None
def scatter_plot_compr_DCI(num_DCI_bins_df,subdir,hm_mark,compr_type,suffix,dci_thre):
compr_x = compr_type[0]
compr_y = compr_type[1]
test_file='{}/{}/{}_{}{}.csv'.format(DCI_dir,subdir,hm_mark,compr_y,suffix)
# print(test_file)
if os.path.isfile(test_file):
dci_df_wt_over_vector = return_dci_df(DCI_dir,subdir,hm_mark,'WT_over_Vector',suffix)
up_bins = dci_df_wt_over_vector[dci_df_wt_over_vector['DCI']>dci_thre].index
dn_bins = dci_df_wt_over_vector[dci_df_wt_over_vector['DCI']<-1*dci_thre].index
dci_df_x = return_dci_df(DCI_dir,subdir,hm_mark,compr_x,suffix)
dci_df_y = return_dci_df(DCI_dir,subdir,hm_mark,compr_y,suffix)
# scatter plot
plt.figure(figsize=(2.1,2.1))
plt.scatter(dci_df_x.loc[:,'DCI'],dci_df_y.loc[:,'DCI'],c='tab:grey',s=3,alpha=1,rasterized=True,label='All genes')
plt.scatter(dci_df_x.loc[up_bins,'DCI'],dci_df_y.loc[up_bins,'DCI'],c='tab:red',s=3,alpha=1,rasterized=True,label='Genes w/ DCI$>{}$ in WT/Vector'.format(dci_thre))
plt.scatter(dci_df_x.loc[dn_bins,'DCI'],dci_df_y.loc[dn_bins,'DCI'],c='tab:blue',s=3,alpha=1,rasterized=True,label='Genes w/ DCI$<{}$ in WT/Vector'.format(-1*dci_thre))
# save and plot the correlation
x,y = dci_df_x.loc[:,'DCI'],dci_df_y.loc[:,'DCI']
slope, intercept, r_value, p_value, std_err = stats.linregress(x, y)
output_prename = '{}_{}_{}_dci{}'.format(subdir,hm_mark,suffix,dci_thre)
num_DCI_bins_df.loc[output_prename,'scatter_pearsonr_s'] = r_value
num_DCI_bins_df.loc[output_prename,'scatter_pearsonr_p'] = p_value
x_sort = np.sort(x)
plt.plot(x_sort,x_sort*slope+intercept,c = 'k',ls='--',lw=.8)
plt.text(.97,.97,'$r={:.2f}$ '.format(r_value),fontsize=10,transform=plt.axes().transAxes,ha='right',va='top')
plt.axhline(y=0,c='k',lw=1)
plt.axvline(x=0,c='k',lw=1)
# # plt.title('{} over {}'.format(cellType_labels[treatment],cellType_labels[control]))
plt.legend(fontsize=10.5,borderaxespad=0.1,labelspacing=.1,handletextpad=0.1,\
handlelength=1,loc="upper left",markerscale=3,bbox_to_anchor=[-0.12,1.36],frameon=False)
xa,xb = cellType_labels[compr_x.split('_')[0]],cellType_labels[compr_x.split('_')[-1]]
ya,yb = cellType_labels[compr_y.split('_')[0]],cellType_labels[compr_y.split('_')[-1]]
plt.xlabel('DCI score ({} over {})'.format(xa,xb),fontsize=12)
plt.ylabel('DCI score ({} over {})'.format(ya,yb),fontsize=12)
plt.savefig('{}/{}/scatter_{}_{}_vs_{}{}_dci{}.png'.format(outdir,subdir,hm_mark,compr_x,compr_y,suffix,dci_thre),\
bbox_inches='tight',pad_inches=0.1,dpi=600,transparent=True)
plt.show()
plt.close()
return up_bins,dn_bins
return [],[]
def plot_box_figs(subdir,hm_mark,suffix,selected_bins,color,title,dci_thre,num_DCI_bins_df,flag):
test_file='{}/{}/{}_{}{}.csv'.format(DCI_dir,subdir,hm_mark,'WT_over_Vector',suffix)
if os.path.isfile(test_file):
box_vals = []
xticklabels = []
sig_vals,sig_colors = [],[]
for compr_col in ['WT_over_Vector','DEL_over_WT','EIF_over_DEL','TPR_over_WT']:
dci_df = return_dci_df(DCI_dir,subdir,hm_mark,compr_col,suffix)
if dci_df is not None:
box_val = dci_df.loc[selected_bins]['DCI'].values
# save the values in box plots
dci_df.loc[selected_bins].to_csv('{}/{}/box_{}_{}_genes{}_dci{}_{}.csv'.format(outdir,subdir,hm_mark,flag,suffix,dci_thre,compr_col))
s,p = stats.ttest_1samp(box_val,0)
sig_vals.append('*' if p<0.05 else '')
sig_colors.append('b' if s<0 else 'r')
box_vals.append(box_val)
xa,xb = cellType_labels[compr_col.split('_')[0]],cellType_labels[compr_col.split('_')[-1]]
xticklabels.append('{} over {}'.format(xa,xb))
num_DCI_bins_df.loc['{}_{}_{}_dci{}'.format(subdir,hm_mark,suffix,dci_thre),'{} {} s'.format(title.split()[2],compr_col)] = '{:.2f}'.format(s)
num_DCI_bins_df.loc['{}_{}_{}_dci{}'.format(subdir,hm_mark,suffix,dci_thre),'{} {} p'.format(title.split()[2],compr_col)] = '{:.2e}'.format(p)
#print(box_vals)
positions = np.arange(len(box_vals))
fig = plt.figure(figsize=(.46*len(box_vals),2.2))
g = plt.boxplot(box_vals,positions=positions,widths = .5,patch_artist=True,\
boxprops=dict(color='k',facecolor='w',fill=None,lw=1),\
medianprops=dict(color='k'),showfliers=False)
# g = plt.violinplot(box_vals)
# for position_id in np.arange(len(positions)):
# scatter_x = np.random.normal(positions[position_id],0.06,len(box_vals[position_id]))
# plt.scatter(scatter_x,box_vals[position_id],color=color,s=5,zorder=0,alpha=0.6,rasterized=True)
# for compr_pos in [[0,1,'t'],[1,2,'t'],[2,3,'t']]:
# mark_pvalue(compr_pos,positions,box_vals)
plt.axes().set_xticklabels(xticklabels,rotation=30,ha='right',fontsize=12)
plt.ylabel('DCI score'.format(hm_mark),fontsize=13)
# plt.ylim([-1,2])
for ii in positions:
plt.scatter(ii,np.median(box_vals[ii]),marker=sig_vals[ii],color='red',s=77)
# plt.axes().text(ii,0,sig_vals[ii-1],fontsize=28,va='top',ha='center',color='red')
plt.axhline(y=0,c='k',lw=1)
plt.title(title,fontsize=12)
# plt.legend(fontsize=16,borderaxespad=0.2,labelspacing=.2,handletextpad=0.2,handlelength=1,loc="upper right",frameon=False)
plt.savefig('{}/{}/box_{}_{}_genes{}_dci{}.png'.format(outdir,subdir,hm_mark,flag,suffix,dci_thre),\
bbox_inches='tight',pad_inches=0.1,dpi=600,transparent=True)
plt.show()
plt.close()
# ==== main()
cellType_labels= {'Vector':'Vector',\
'WT':'WT',\
'DEL':'$\Delta$cIDR',\
'EIF':'UTX-eIF$_{IDR}$',\
'TPR':'$\Delta$TPR',\
'MT2':'MT2',\
'FUS':'UTX-FUS$_{IDR}$'}
outdir = 'f4_promoter_DCI_scatter'
os.makedirs(outdir,exist_ok=True)
# project_dir="/nv/vol190/zanglab/zw5j/since2019_projects/UTX_HaoJiang"
project_dir="/Volumes/zanglab/zw5j/since2019_projects/UTX_HaoJiang"
# DCI_dir='{}/f5_hichip/f1_hichip_bart3d_new/f2_DEG_promoter_DCI_non_normalized/f1_promoter_DCI_rename'.format(project_dir)
DCI_dir='{}/f5_hichip/f1_hichip_bart3d_new/f1_DEG_promoter_DCI/f1_promoter_DCI'.format(project_dir)
# DCI_dir='{}/f5_hichip/f1_hichip_bart3d_new/f0_run_bart3d_new/bart3d_DCI_rename'.format(project_dir)
# expr_dir='{}/f0_data_process/rna_seq/data_1st_submit_STAR_RSEM_new/f6_deg/f1_deseq2_out'.format(project_dir)
# expr_dir='{}/f0_data_process/rna_seq/data_1st_submit_STAR_RSEM_new/f6_deg/fz_deseq2_out_combined'.format(project_dir)
# deg_df = pd.read_csv('{}/deseq2_combined.csv'.format(expr_dir),index_col=0)
subdirs=['bart3d_dis200k_data_1st_submit','bart3d_dis200k_data202008',
'bart3d_dis500k_data_1st_submit','bart3d_dis500k_data202008']
compr_types = [['WT_over_Vector','DEL_over_WT'],['DEL_over_WT','EIF_over_DEL'],['WT_over_Vector','TPR_over_WT']]
hm_marks = ['H3K4me3','H3K27ac']
suffixes=['_promoter_DCI']
dci_thres = [2,5]
num_DCI_bins_df = pd.DataFrame()
for subdir in subdirs[1:2]:
outdir_tmp='{}/{}'.format(outdir,subdir)
os.makedirs(outdir_tmp,exist_ok=True)
for hm_mark in hm_marks[:]:
|
num_DCI_bins_df.to_csv(outdir+os.sep+'num_DCI_promoter_summary.csv')
| for suffix in suffixes[:]:
for dci_thre in dci_thres[1:]:
for compr_type in compr_types[:]:
up_bins,dn_bins = scatter_plot_compr_DCI(num_DCI_bins_df,subdir,hm_mark,compr_type,suffix,dci_thre)
# the box plot are exactly the same
if compr_type[1]=='DEL_over_WT':
num_DCI_bins_df.loc['{}_{}_{}_dci{}'.format(subdir,hm_mark,suffix,dci_thre),'# up genes'] = len(up_bins)
num_DCI_bins_df.loc['{}_{}_{}_dci{}'.format(subdir,hm_mark,suffix,dci_thre),'# dn genes'] = len(dn_bins)
##### box plot
selected_bins = up_bins
color = 'tab:red'
title = 'Genes w/ DCI$>{}$ \n in WT over Vector'.format(dci_thre)
plot_box_figs(subdir,hm_mark,suffix,selected_bins,color,title,dci_thre,num_DCI_bins_df,'increased')
selected_bins = dn_bins
color = 'tab:blue'
title = 'Genes w/ DCI$<{}$ \n in WT over Vector'.format(-1*dci_thre)
plot_box_figs(subdir,hm_mark,suffix,selected_bins,color,title,dci_thre,num_DCI_bins_df,'decreased') | conditional_block |
py4_promoter_DCI_scatter.py | import sys,argparse
import os,glob
import numpy as np
import pandas as pd
import re,bisect
from scipy import stats
import matplotlib
# matplotlib.use('Agg')
import matplotlib.pyplot as plt
matplotlib.rcParams['font.size']=11
import seaborn as sns
sns.set(font_scale=1.1)
sns.set_style("whitegrid", {'axes.grid' : False})
sns.set_style("ticks",{'ytick.color': 'k','axes.edgecolor': 'k'})
matplotlib.rcParams["font.sans-serif"] = ["Arial"]
matplotlib.rcParams['mathtext.fontset'] = 'custom'
matplotlib.rcParams["mathtext.rm"] = "Arial"
# def return_dci_df(DCI_dir,subdir,hm_mark,compr_type,suffix):
# dci_file = '{}/{}/{}_{}{}.bed'.format(DCI_dir,subdir,hm_mark,compr_type,suffix)
# dci_df = pd.read_csv(dci_file,sep='\t',header=None)
# dci_df.columns=['chr','start','end','DCI']
# dci_df.index = ['_'.join(ii) for ii in dci_df[['chr','start','end']].values.astype(str)]
# return dci_df
def | (DCI_dir,subdir,hm_mark,compr_type,suffix):
dci_file = '{}/{}/{}_{}{}.csv'.format(DCI_dir,subdir,hm_mark,compr_type,suffix)
if os.path.isfile(dci_file):
dci_df = pd.read_csv(dci_file,sep='\t',index_col=4)
dci_df.columns=['chr','start','end','IfOverlap','score','strand','DCI']
return dci_df
else:
return None
def scatter_plot_compr_DCI(num_DCI_bins_df,subdir,hm_mark,compr_type,suffix,dci_thre):
compr_x = compr_type[0]
compr_y = compr_type[1]
test_file='{}/{}/{}_{}{}.csv'.format(DCI_dir,subdir,hm_mark,compr_y,suffix)
# print(test_file)
if os.path.isfile(test_file):
dci_df_wt_over_vector = return_dci_df(DCI_dir,subdir,hm_mark,'WT_over_Vector',suffix)
up_bins = dci_df_wt_over_vector[dci_df_wt_over_vector['DCI']>dci_thre].index
dn_bins = dci_df_wt_over_vector[dci_df_wt_over_vector['DCI']<-1*dci_thre].index
dci_df_x = return_dci_df(DCI_dir,subdir,hm_mark,compr_x,suffix)
dci_df_y = return_dci_df(DCI_dir,subdir,hm_mark,compr_y,suffix)
# scatter plot
plt.figure(figsize=(2.1,2.1))
plt.scatter(dci_df_x.loc[:,'DCI'],dci_df_y.loc[:,'DCI'],c='tab:grey',s=3,alpha=1,rasterized=True,label='All genes')
plt.scatter(dci_df_x.loc[up_bins,'DCI'],dci_df_y.loc[up_bins,'DCI'],c='tab:red',s=3,alpha=1,rasterized=True,label='Genes w/ DCI$>{}$ in WT/Vector'.format(dci_thre))
plt.scatter(dci_df_x.loc[dn_bins,'DCI'],dci_df_y.loc[dn_bins,'DCI'],c='tab:blue',s=3,alpha=1,rasterized=True,label='Genes w/ DCI$<{}$ in WT/Vector'.format(-1*dci_thre))
# save and plot the correlation
x,y = dci_df_x.loc[:,'DCI'],dci_df_y.loc[:,'DCI']
slope, intercept, r_value, p_value, std_err = stats.linregress(x, y)
output_prename = '{}_{}_{}_dci{}'.format(subdir,hm_mark,suffix,dci_thre)
num_DCI_bins_df.loc[output_prename,'scatter_pearsonr_s'] = r_value
num_DCI_bins_df.loc[output_prename,'scatter_pearsonr_p'] = p_value
x_sort = np.sort(x)
plt.plot(x_sort,x_sort*slope+intercept,c = 'k',ls='--',lw=.8)
plt.text(.97,.97,'$r={:.2f}$ '.format(r_value),fontsize=10,transform=plt.axes().transAxes,ha='right',va='top')
plt.axhline(y=0,c='k',lw=1)
plt.axvline(x=0,c='k',lw=1)
# # plt.title('{} over {}'.format(cellType_labels[treatment],cellType_labels[control]))
plt.legend(fontsize=10.5,borderaxespad=0.1,labelspacing=.1,handletextpad=0.1,\
handlelength=1,loc="upper left",markerscale=3,bbox_to_anchor=[-0.12,1.36],frameon=False)
xa,xb = cellType_labels[compr_x.split('_')[0]],cellType_labels[compr_x.split('_')[-1]]
ya,yb = cellType_labels[compr_y.split('_')[0]],cellType_labels[compr_y.split('_')[-1]]
plt.xlabel('DCI score ({} over {})'.format(xa,xb),fontsize=12)
plt.ylabel('DCI score ({} over {})'.format(ya,yb),fontsize=12)
plt.savefig('{}/{}/scatter_{}_{}_vs_{}{}_dci{}.png'.format(outdir,subdir,hm_mark,compr_x,compr_y,suffix,dci_thre),\
bbox_inches='tight',pad_inches=0.1,dpi=600,transparent=True)
plt.show()
plt.close()
return up_bins,dn_bins
return [],[]
def plot_box_figs(subdir,hm_mark,suffix,selected_bins,color,title,dci_thre,num_DCI_bins_df,flag):
test_file='{}/{}/{}_{}{}.csv'.format(DCI_dir,subdir,hm_mark,'WT_over_Vector',suffix)
if os.path.isfile(test_file):
box_vals = []
xticklabels = []
sig_vals,sig_colors = [],[]
for compr_col in ['WT_over_Vector','DEL_over_WT','EIF_over_DEL','TPR_over_WT']:
dci_df = return_dci_df(DCI_dir,subdir,hm_mark,compr_col,suffix)
if dci_df is not None:
box_val = dci_df.loc[selected_bins]['DCI'].values
# save the values in box plots
dci_df.loc[selected_bins].to_csv('{}/{}/box_{}_{}_genes{}_dci{}_{}.csv'.format(outdir,subdir,hm_mark,flag,suffix,dci_thre,compr_col))
s,p = stats.ttest_1samp(box_val,0)
sig_vals.append('*' if p<0.05 else '')
sig_colors.append('b' if s<0 else 'r')
box_vals.append(box_val)
xa,xb = cellType_labels[compr_col.split('_')[0]],cellType_labels[compr_col.split('_')[-1]]
xticklabels.append('{} over {}'.format(xa,xb))
num_DCI_bins_df.loc['{}_{}_{}_dci{}'.format(subdir,hm_mark,suffix,dci_thre),'{} {} s'.format(title.split()[2],compr_col)] = '{:.2f}'.format(s)
num_DCI_bins_df.loc['{}_{}_{}_dci{}'.format(subdir,hm_mark,suffix,dci_thre),'{} {} p'.format(title.split()[2],compr_col)] = '{:.2e}'.format(p)
#print(box_vals)
positions = np.arange(len(box_vals))
fig = plt.figure(figsize=(.46*len(box_vals),2.2))
g = plt.boxplot(box_vals,positions=positions,widths = .5,patch_artist=True,\
boxprops=dict(color='k',facecolor='w',fill=None,lw=1),\
medianprops=dict(color='k'),showfliers=False)
# g = plt.violinplot(box_vals)
# for position_id in np.arange(len(positions)):
# scatter_x = np.random.normal(positions[position_id],0.06,len(box_vals[position_id]))
# plt.scatter(scatter_x,box_vals[position_id],color=color,s=5,zorder=0,alpha=0.6,rasterized=True)
# for compr_pos in [[0,1,'t'],[1,2,'t'],[2,3,'t']]:
# mark_pvalue(compr_pos,positions,box_vals)
plt.axes().set_xticklabels(xticklabels,rotation=30,ha='right',fontsize=12)
plt.ylabel('DCI score'.format(hm_mark),fontsize=13)
# plt.ylim([-1,2])
for ii in positions:
plt.scatter(ii,np.median(box_vals[ii]),marker=sig_vals[ii],color='red',s=77)
# plt.axes().text(ii,0,sig_vals[ii-1],fontsize=28,va='top',ha='center',color='red')
plt.axhline(y=0,c='k',lw=1)
plt.title(title,fontsize=12)
# plt.legend(fontsize=16,borderaxespad=0.2,labelspacing=.2,handletextpad=0.2,handlelength=1,loc="upper right",frameon=False)
plt.savefig('{}/{}/box_{}_{}_genes{}_dci{}.png'.format(outdir,subdir,hm_mark,flag,suffix,dci_thre),\
bbox_inches='tight',pad_inches=0.1,dpi=600,transparent=True)
plt.show()
plt.close()
# ==== main()
cellType_labels= {'Vector':'Vector',\
'WT':'WT',\
'DEL':'$\Delta$cIDR',\
'EIF':'UTX-eIF$_{IDR}$',\
'TPR':'$\Delta$TPR',\
'MT2':'MT2',\
'FUS':'UTX-FUS$_{IDR}$'}
outdir = 'f4_promoter_DCI_scatter'
os.makedirs(outdir,exist_ok=True)
# project_dir="/nv/vol190/zanglab/zw5j/since2019_projects/UTX_HaoJiang"
project_dir="/Volumes/zanglab/zw5j/since2019_projects/UTX_HaoJiang"
# DCI_dir='{}/f5_hichip/f1_hichip_bart3d_new/f2_DEG_promoter_DCI_non_normalized/f1_promoter_DCI_rename'.format(project_dir)
DCI_dir='{}/f5_hichip/f1_hichip_bart3d_new/f1_DEG_promoter_DCI/f1_promoter_DCI'.format(project_dir)
# DCI_dir='{}/f5_hichip/f1_hichip_bart3d_new/f0_run_bart3d_new/bart3d_DCI_rename'.format(project_dir)
# expr_dir='{}/f0_data_process/rna_seq/data_1st_submit_STAR_RSEM_new/f6_deg/f1_deseq2_out'.format(project_dir)
# expr_dir='{}/f0_data_process/rna_seq/data_1st_submit_STAR_RSEM_new/f6_deg/fz_deseq2_out_combined'.format(project_dir)
# deg_df = pd.read_csv('{}/deseq2_combined.csv'.format(expr_dir),index_col=0)
subdirs=['bart3d_dis200k_data_1st_submit','bart3d_dis200k_data202008',
'bart3d_dis500k_data_1st_submit','bart3d_dis500k_data202008']
compr_types = [['WT_over_Vector','DEL_over_WT'],['DEL_over_WT','EIF_over_DEL'],['WT_over_Vector','TPR_over_WT']]
hm_marks = ['H3K4me3','H3K27ac']
suffixes=['_promoter_DCI']
dci_thres = [2,5]
num_DCI_bins_df = pd.DataFrame()
for subdir in subdirs[1:2]:
outdir_tmp='{}/{}'.format(outdir,subdir)
os.makedirs(outdir_tmp,exist_ok=True)
for hm_mark in hm_marks[:]:
for suffix in suffixes[:]:
for dci_thre in dci_thres[1:]:
for compr_type in compr_types[:]:
up_bins,dn_bins = scatter_plot_compr_DCI(num_DCI_bins_df,subdir,hm_mark,compr_type,suffix,dci_thre)
# the box plot are exactly the same
if compr_type[1]=='DEL_over_WT':
num_DCI_bins_df.loc['{}_{}_{}_dci{}'.format(subdir,hm_mark,suffix,dci_thre),'# up genes'] = len(up_bins)
num_DCI_bins_df.loc['{}_{}_{}_dci{}'.format(subdir,hm_mark,suffix,dci_thre),'# dn genes'] = len(dn_bins)
##### box plot
selected_bins = up_bins
color = 'tab:red'
title = 'Genes w/ DCI$>{}$ \n in WT over Vector'.format(dci_thre)
plot_box_figs(subdir,hm_mark,suffix,selected_bins,color,title,dci_thre,num_DCI_bins_df,'increased')
selected_bins = dn_bins
color = 'tab:blue'
title = 'Genes w/ DCI$<{}$ \n in WT over Vector'.format(-1*dci_thre)
plot_box_figs(subdir,hm_mark,suffix,selected_bins,color,title,dci_thre,num_DCI_bins_df,'decreased')
num_DCI_bins_df.to_csv(outdir+os.sep+'num_DCI_promoter_summary.csv')
| return_dci_df | identifier_name |
module.js | // requires
const path = require("path")
const { withDb } = Msa.require("db")
const { Sheet } = require('./model')
const { SheetPerm } = require("./perm")
const { SheetParamDict } = require("./params")
const { MsaParamsLocalAdminModule } = Msa.require("params")
//var msaDbFiles = Msa.require("msa-db", "files.js")
//const msaFs = Msa.require("fs")
const { formatHtml } = Msa.require("utils")
const { userMdw } = Msa.require("user")
// class
class MsaSheet extends Msa.Module {
constructor() {
super()
this.initDeps()
this.initApp()
this.initParams()
}
initDeps() {
this.Sheet = Sheet
}
getId(ctx, reqId) {
return reqId
}
getDefaultContent() {
return {
tag: "msa-sheet-boxes",
content: {
tag: "msa-sheet-text"
}
}
}
getUserId(ctx) {
const user = ctx.user
return user ? user.name : ctx.connection.remoteAddress
}
checkPerm(ctx, sheet, expVal, prevVal) {
const perm = deepGet(sheet, "params", "perm").get()
return perm.check(ctx.user, expVal, prevVal)
}
canRead(ctx, sheet) {
return this.checkPerm(ctx, sheet, SheetPerm.READ)
}
canWrite(ctx, sheet) {
return this.checkPerm(ctx, sheet, SheetPerm.WRITE)
}
initApp() {
this.app.get('/:id', userMdw, (req, res, next) => {
const reqId = req.params.id
if (reqId.indexOf('.') >= 0 || reqId[0] === '_')
return next()
withDb(async db => {
const ctx = newCtx(req, { db })
const id = this.getId(ctx, reqId)
const sheet = await this.getSheet(ctx, id)
res.json(sheet)
}).catch(next)
})
this.app.post('/:id', userMdw, (req, res, next) => {
withDb(async db => {
const ctx = newCtx(req, { db })
const id = this.getId(ctx, req.params.id)
const { content } = req.body
const sheet = await this.getSheet(ctx, id)
sheet.content = formatHtml({ body: content })
await this.upsertSheetInDb(ctx, sheet)
res.sendStatus(200)
}).catch(next)
})
this.app.get('/_templates', (req, res, next) => {
res.json(Templates)
})
this.app.use('/:id/_box',
(req, res, next) => {
req.msaSheetArgs = { id: this.getId(null, req.params.id) }
next()
},
TemplatesRouter)
}
async getSheet(ctx, id) {
const dbSheet = await ctx.db.getOne("SELECT id, contentBody, contentHead, createdBy, updatedBy, params FROM msa_sheets WHERE id=:id",
{ id })
const sheet = this.Sheet.newFromDb(id, dbSheet)
if (!dbSheet) sheet.content = formatHtml(this.getDefaultContent())
if (!this.canRead(ctx, sheet))
throw Msa.FORBIDDEN
sheet.editable = this.canWrite(ctx, sheet)
return sheet
}
async upsertSheetInDb(ctx, sheet) {
if (!(await this.updateSheetInDb(ctx, sheet)))
await this.createSheetInDb(ctx, sheet)
}
async createSheetInDb(ctx, sheet) {
if (!this.canWrite(ctx, sheet))
throw Msa.FORBIDDEN
const user = this.getUserId(ctx)
sheet.createdBy = user
sheet.updatedBy = user
await ctx.db.run("INSERT INTO msa_sheets (id, contentBody, contentHead, createdBy, updatedBy) VALUES (:id, :contentBody, :contentHead, :createdBy, :updatedBy)",
sheet.formatForDb(["id", "contentHead", "contentBody", "createdBy", "updatedBy"]))
}
async updateSheetInDb(ctx, sheet) {
if (!this.canWrite(ctx, sheet))
throw Msa.FORBIDDEN
const user = this.getUserId(ctx)
sheet.updatedBy = user
const res = await ctx.db.run("UPDATE msa_sheets SET contentHead=:contentHead, contentBody=:contentBody, updatedBy=:updatedBy WHERE id=:id",
sheet.formatForDb(["id", "contentHead", "contentBody", "updatedBy"]))
return res.nbChanges > 0
}
// params
initParams() {
const Sheet = this.Sheet
this.params = new class extends MsaParamsLocalAdminModule {
async getRootParam(ctx) {
const id = ctx.sheetParamsArgs.id
const dbRow = await ctx.db.getOne("SELECT params FROM msa_sheets WHERE id=:id",
{ id })
return Sheet.newFromDb(id, dbRow).params
}
async updateRootParam(ctx, rootParam) {
const vals = {
id: ctx.sheetParamsArgs.id,
params: rootParam.getAsDbStr()
}
const res = await ctx.db.run("UPDATE msa_sheets SET params=:params WHERE id=:id", vals)
if (res.nbChanges === 0)
await ctx.db.run("INSERT INTO msa_sheets (id, params) VALUES (:id, :params)", vals)
}
}
this.app.use("/:id/_params",
userMdw,
(req, _res, next) => {
req.sheetParamsArgs = {
id: this.getId(req, req.params.id)
}
next()
},
this.params.app)
}
}
// get sheet //////////////////////////////////////////////////////////////////
// get a sheet from DB
/*
MsaSheetPt.getSheet = async function(req, id) {
const dbId = this.getId(req, id)
const dbSheet = await SheetsDb.findOne({ where:{ id:dbId }})
const sheet = (dbSheet !== null) ? {
content: {
head: dbSheet.contentHead,
body: dbSheet.contentBody
},
params: dbSheet.params
} : {
content: formatHtml(this.getDefaultContent()), | if(!this.canRead(req, id, sheet))
throw Msa.FORBIDDEN
sheet.editable = this.canWrite(req, id, sheet)
return sheet
}
*/
/*
MsaSheetPt.getSheet = function(key, args1, args2) {
// args
if(args2===undefined) var args = {}, next = args1
else var args = args1, next = args2
defArg(args, "checkUserPerms", args.hasOwnProperty("user"))
defArg(args, "user", null)
if(!next) next = emptyFun
// get sheetType
// var typeObj = SheetTypes[type]
// if(!typeObj) return next("Unknown sheet type ["+type+"].")
// select in DB
const dbKey = this.getDbKey(key)
SheetsDb.find({ where:{ key:dbKey }}).then(
sheet => _getSheet1(this, key, sheet, args, next),
err => next(err))
}
var _getSheet1 = function(self, key, sheet, args, next) {
if(sheet) return _getSheet2(sheet, args, next)
// sheet does not exist: use default content
const defaultContent = self.getDefaultContent()
if(defaultContent===null) return next(null, null)
self.createSheet(key, args, (err, sheet) => {
// convert "unauthorized" (to create sheet) to "page not found"
if(err===401 || err===403) err=404
next(err, sheet)
})
}
var _getSheet2 = function(sheet, args, next) {
// prepare and return sheet
readSheetModel(sheet, args, err => {
next(err, sheet)
})
}
*/
// create sheet //////////////////////////////////////////////////////////////////
/*
// create a sheet (in DB or not)
MsaSheetPt.createSheet = function(key, args1, args2) {
// determine args & next
if(args2===undefined) var args = {}, next = args1
else var args = args1, next = args2
// default args
defArg(args, "checkUserPerms", args.hasOwnProperty("user"))
defArg(args, "user", null)
defArg(args, "ifExist", "get")
defArg(args, "insertInDb", true)
if(!next) next = emptyFun
// get sheetType
// var typeObj = SheetTypes[type]
// if(!typeObj) return next("Unknown sheet type ["+type+"].")
// check if sheet already exists
const dbKey = this.getDbKey(key)
SheetsDb.find({ where:{ key:dbKey }}).then(
sheet => _createSheet1(this, dbKey, sheet, args, next),
err => next(err))
}
var _createSheet1 = function(self, dbKey, sheet, args, next) {
// if sheet exists: apply args.ifExist action
if(sheet) {
var ifExist = args.ifExist
if(ifExist=="null") return next(null, null)
else if(ifExist=="error") return next(409) // CONFLICT
else if(ifExist=="get") return _createSheet3(sheet, args, next)
else if(ifExist!="overwrite") return next("Unknown ifExist ["+ifExist+"].")
else if(typeof ifExist==="function") return ifExist(sheet)
else return next("Unknown ifExist ["+ifExist+"].")
}
// check if user has permission to create this sheetType
if(args.checkUserPerms)
if(!self.getCreatePerm().check(args.user))
return next(403)
// create base sheet model
createSheetModel(self, dbKey, args, (err, sheet) => {
if(err) return next(err)
_createSheet2(self, sheet, args, next)
})
}
var _createSheet2 = function(self, sheet, args, next) {
// check if sheet has to be inserted in db
if(!args.insertInDb) return _createSheet3(sheet, args, next)
// insert sheet in db
SheetsDb.upsert(
{ key:sheet.key, content: sheet.content },
{ where: { key:sheet.key }}).then(
() => _createSheet3(sheet, args, next),
err => next(err))
}
var _createSheet3 = function(sheet, args, next) {
// prepare and return sheet
readSheetModel(sheet, args, function(err){
next(err, sheet)
})
}
*/
// update sheet //////////////////////////////////////////////////////////////////
// update a sheet in DB with updates
/*
MsaSheetPt.updateSheet = function(key, update, args1, args2) {
// determine args
if(args2===undefined) var args = {}, next = args1
else var args = args1, next = args2
// default args
defArg(args, "checkUserPerms", args.hasOwnProperty("user"))
defArg(args, "user", null)
defArg(args, "ifNotExist", "create")
defArg(args, "insertInDb", true)
if(!next) next = emptyFun
// get sheetType
// var typeObj = SheetTypes[type]
// if(!typeObj) return next("Unknown sheet type ["+type+"].")
// select sheet to update in DB
const dbKey = this.getDbKey(key)
SheetsDb.find({ where:{ key:dbKey }}).then(
sheet => _updateSheet1(this, key, update, sheet, args, next),
err => next(err))
}
var _updateSheet1 = function(self, key, update, sheet, args, next) {
if(sheet) return _updateSheet2(self, update, sheet, args, next)
// sheet does not exist: apply args.ifNotExist action
var ifNotExist = args.ifNotExist
if(ifNotExist=="null") return next(null, null)
else if(ifNotExist=="error") return next(404) // PAGE NOT FOUND
else if(ifNotExist=="create") {
// create sheet
return self.createSheet(key, args, (err, sheet) => {
if(err) return next(err)
_updateSheet2(self, update, sheet, args, next)
})
} else if(typeof ifNotExist==="function") return ifNotExist()
else return next("Unknown ifNotExist ["+ifNotExist+"].")
}
var _updateSheet2 = function(self, update, sheet, args, next) {
// update sheet model
updateSheetModel(sheet, update, args, (err, atLeastOneUpdate) => {
if(err) return next(err)
_updateSheet3(self, sheet, atLeastOneUpdate, args, next)
})
}
var _updateSheet3 = function(self, sheet, atLeastOneUpdate, args, next) {
// insert in DB (if requested & needed)
if(!atLeastOneUpdate || !args.insertInDb) return _updateSheet4(sheet, args, next)
SheetsDb.upsert(
{ key:sheet.key, content:sheet.content },
{ where:{ key:sheet.key }}).then(
() => _updateSheet4(sheet, args, next),
err => next(err))
}
var _updateSheet4 = function(sheet, args, next) {
// prepare and return sheet
readSheetModel(sheet, args, function(err){
next(err, sheet)
})
}
*/
// readSheetModel //////////////////////////////////////////////////////////////////
/*
var readSheetModel = function(sheet, args, next) {
// read callbacks
var readCallbacks = sheet.readCallbacks
if(!readCallbacks) return _readSheetModel2(sheet, args, next)
_readSheetModel1(sheet, readCallbacks, 0, readCallbacks.length, args, next)
}
var _readSheetModel1 = function(sheet, readCallbacks, i, len, args, next) {
// TODO: read callbacks
_readSheetModel2(sheet, args, next)
}
var _readSheetModel2 = function(sheet, args, next) {
// set editable
sheet.editable = checkEditable(sheet, args.user)
// remove mongoDB id
delete sheet._id
next()
}
var checkEditable = function(sheet, user) {
// check sheet owner
if(perm.exprCheck(sheet.owner, user)) return true
// TODO: check content edit perms
return false
}
// createSheetModel //////////////////////////////////////////////////////////////////
var createSheetModel = function(mod, dbKey, args, next) {
// create sheet object
var user = args.user
var sheet = {
key: dbKey,
owner: user ? {or: [{name: user.name}, {group: "admin"}]} : {group: "admin"}
}
// apply sheet type content
// var typeObj = SheetTypes[type]
// if(!typeObj) return next('Unknown sheet type ['+type+'].')
var content = args.content || mod.getDefaultContent()
if(typeof content==="string")
content = parseHtml(content).body[0]
sheet.content = content
// apply write callbacks (if exist)
var writeCallbacks = getSheetWriteCallbacks(content, sheet)
if(!writeCallbacks) return _createSheetModel1(this, sheet, args, next)
applySheetCallbacks(writeCallbacks, err => {
if(err) return next(err)
_createSheetModel1(mod, sheet, args, next)
})
}
var _createSheetModel1 = function(mod, sheet, args, next) {
// call sheetType onCreate callback (if exist)
var onCreate = mod.onCreate
if(!onCreate) return _createSheetModel2(sheet, next)
onCreate(sheet, args, err => {
if(err) return next(err)
_createSheetModel2(sheet, next)
})
}
var _createSheetModel2 = function(sheet, next) {
// prepare sheet
prepareSheetForWrite(sheet)
next(null, sheet)
}
var prepareSheetForWrite = function(sheet) {
// format html
var content = sheet.content
var formattedContent = formatHtml(content)
// add heads
formattedContent.head += getHeads(content)
// update content
sheet.content = formattedContent
}
// updateSheetModel //////////////////////////////////////////////////////////////////
var updateSheetModel = function(sheet, update, args, next) {
// parse sheet & new html
var newContent = update.content
if(!newContent) return next(null, false)
var sheetContent = parseHtml(sheet.content).body
if(typeof newContent==="string")
newContent = parseHtml(newContent).body
// check edit permission
var updKey = (newContent.length===1 && newContent.attrs) ? newContent.attrs['msa-sheet-key'] : null
if(args.checkUserPerms)
if(!checkEditSheetPerm(args.user, sheet, updKey, next))
return
// update all content
if(!updKey) var updatedHtml = sheet.content = newContent
else {
// find content to content
var htmlByKey = getHtmlByKey(sheetContent)
var updatedHtml = htmlByKey[updKey]
if(updatedHtml===undefined)
return next("Box key ["+ updKey +"] does not exist in sheet key ["+sheet.key+"].")
// update content object
for(var a in updatedHtml) delete updatedHtml[a]
for(var a in newContent) updatedHtml[a] = newContent[a]
sheet.content = sheetContent
}
// update new keys
determineNewKeys(updatedHtml)
// call write callbacks (if exist)
var writeCallbacks = getSheetWriteCallbacks(updatedHtml)
if(!writeCallbacks) return _updateSheetModel1(sheet, next)
applySheetCallbacks(writeCallbacks, err => {
if(err) return next(err)
_updateSheetModel1(sheet, next)
})
}
var _updateSheetModel1 = function(sheet, next) {
// prepare
prepareSheetForWrite(sheet)
next(null, sheet)
}
var checkEditSheetPerm = function(user, sheet, updKey) {
return perm.exprCheck(sheet.owner, user)
// TODO: check sheet edit perm from updKey
// TODO: harmonize code with readSheet
}
*/
// renderSheetAsHtml //////////////////////////////////////////////////////////////////
var sheetHead = formatHtml({ wel: "/sheet/msa-sheet.js" }).head
function renderSheetAsHtml(sheet, baseUrl, sheetId) {
const content = sheet.content
return {
head: sheetHead + content.head,
body: `<msa-sheet base-url='${baseUrl}' sheet-id='${sheetId}' editable='${sheet.editable}'>${content.body}</msa-sheet>`
}
}
// Read & Write callbacks ////////////////////////////////////
/*
var ReadCallbacks = []
var getSheetReadCallbacks = function(html, sheet) {
var readCallbacks = []
getSheetCallbacks(html, sheet, ReadCallbacks, readCallbacks)
return readCallbacks
}
var WriteCallbacks = []
var getSheetWriteCallbacks = function(html, sheet) {
var writeCallbacks = []
getSheetCallbacks(html, sheet, WriteCallbacks, writeCallbacks)
return writeCallbacks
}
var getSheetCallbacks = function(html, sheet, Callbacks, sheetCallbacks) {
var type = typeof html
if(type==="object") {
// case array: recursive call on array elements
var len = html.length
if(len!==undefined) {
for(var i=0; i<len; ++i)
getSheetCallbacks(html[i], sheet, Callbacks, sheetCallbacks)
return
}
// case object: check that a callback exists for this tag
var tag = html.tag
if(tag) {
var callback = Callbacks[tag]
if(callback) {
// if so, push callback in result list
sheetCallbacks.push({
fun: callback.fun,
args: [ html, { sheet:sheet } ]
})
}
}
// recursive call on content
getSheetCallbacks(html.content, sheet, Callbacks, sheetCallbacks)
}
}
var applySheetCallbacks = function(callbacks, next) {
_applySheetCallbacks1(callbacks, 0, callbacks.length, next)
}
var _applySheetCallbacks1 = function(callbacks, i, len, next) {
if(i>=len) return next()
var callback = callbacks[i]
var fun = callback.fun, args = callback.args
args.push(function(err){
if(err) return next(err)
_applySheetCallbacks1(callbacks, i+1, len, next)
})
fun.apply(null, args)
}
*/
// perms /////////////////////////////////////////////////
/*
var checkSheetWritePerm = function(type, key, user, next){
// get sheetType
// var typeObj = SheetTypes[type]
// if(!typeObj) return next("Unknown sheet type ["+type+"].")
// select in DB
SheetsDb.find({ type:type, key:key }).then(
sheet => next(checkEditable(sheet, user) ? undefined : 403),
err => next(err))
}
var checkSheetWritePermMdw = function(req, res, next){
var params = req.params
checkSheetWritePerm(params.type, params.key, req.session.user, next)
}
*/
// register ///////////////////////////////////////////////////////////////////
// sheet
/*
var SheetTypes = {}
var registerType = MsaSheetPt.registerType = function(type, args) {
if(!type) return
var typeObj = {}
// clone args into typeObj
if(args) for(var a in args) typeObj[a] = args[a]
// default values
defArg(typeObj, "perms", {})
defArg(typeObj.perms, "create", { group:"admin" })
if(typeObj.perms.create instanceof Perm === false)
typeObj.perms.create = new Perm(typeObj.perms.create)
// default content
defArg(typeObj, "content", {
tag: "msa-sheet-boxes",
content: {
tag: "msa-sheet-text"
}
})
// db collection
// defArg(typeObj, "dbCollection", type+"s")
// if(typeof typeObj.dbCollection==="string") {
// typeObj.dbCollection = msaDb.collection(typeObj.dbCollection)
// }
// insert in SheetTypes map
SheetTypes[type] = typeObj
}
*/
// templates
const Templates = {}
const TemplatesRouter = Msa.express.Router()
const registerSheetBoxTemplate = function (tag, template) {
if (!template) template = {}
template.tag = tag
if (template.html)
template.html = formatHtml(template.html)
if (!template.title) template.title = tag
// default args
defArg(template, "img", defaultTemplateImg)
// insert in global map
Templates[tag] = template
// add template module in router (if any)
if (template.mods)
for (let route in template.mods)
TemplatesRouter.use(route, template.mods[route].app)
// register head (if some, or if html is webelement)
var wel = (typeof html === "object") && (html.webelement || html.wel)
if (wel) {
var head = template.head || html
var tag = path.basename(wel, '.html')
registerHead(tag, head)
}
}
var defaultTemplateImg = "<img src='data:image/svg+xml;utf8,%3Csvg%20xmlns%3D%22http%3A%2F%2Fwww.w3.org%2F2000%2Fsvg%22%20fill%3D%22%23999%22%20viewBox%3D%220%200%201024%201024%22%3E%3Cpath%20d%3D%22M896%200h-768c-70.4%200-128%2057.6-128%20128v768c0%2070.4%2057.6%20128%20128%20128h768c70.4%200%20128-57.6%20128-128v-768c0-70.4-57.6-128-128-128zM896%20896h-768v-768h768v768z%22%2F%3E%3C%2Fsvg%3E'>"
// head
const Heads = {}
function registerHead(tag, head) {
Heads[tag] = formatHtml({ head: head }).head
}
// browse html to determine associated heads
function getHeads(htmlObj) {
var heads = ""
var type = typeof htmlObj
if (type === "object") {
// array
var len = htmlObj.length
if (len !== undefined) {
for (var i = 0; i < len; ++i)
heads += getHeads(htmlObj[i])
return heads
}
// object
var tag = htmlObj.tag
if (tag) {
var head = Heads[tag]
if (head) heads += head
}
// recursive call on content
heads += getHeads(htmlObj.content)
}
return heads
}
// routes ////////////////////////////////////////////////////////////
// attachs
/*
sheetApp.get('/:type/:key/attach/*', function(req, res, next){
var params=req.params, type=params.type, key=params.key, path=params[0]
// build fullPath & update req
var fullPath = getAttachPath(type, key, path)
params[0] = fullPath
// get file
msaDbFiles.getMdw(req, res, function(err){
// if not found, try to find it in drafts
if(err==404) _getDraftAttach(type, key, path, req, res, next)
else if(err) next(err)
})
})
var _getDraftAttach = function(type, key, path, req, res, next){
var params=req.params
// build draftPath & update req
var draftPath = getAttachPath(type, key, 'drafts/'+path)
params[0] = draftPath
// get file
msaDbFiles.getMdw(req, res, next)
}
sheetApp.post('/:type/:key/attach/*', checkSheetWritePermMdw, function(req, res, next){
var params=req.params, type=params.type, key=params.key, path=params[0]
// build fullPath & update req
var fullPath = getAttachPath(type, key, path)
params[0] = fullPath
// post file
msaDbFiles.postMdw(req, res, next)
})
sheetApp.delete('/:type/:key/attach/*', checkSheetWritePermMdw, function(req, res, next){
var params=req.params, type=params.type, key=params.key, path=params[0]
// build fullPath & update req
var fullPath = getAttachPath(type, key, path)
params[0] = fullPath
// delete file
msaDbFiles.deleteMdw(req, res, next)
})
var getAttachPath = function(type, key, file){
return '/sheet_attachs/'+type+'/'+key+'/'+file
}
*/
// common //////////////////////////////////////////
// get arg, with default value
const getArg = function (args, attr, defaultVal) {
var val = args[attr]
return (val === undefined) ? val : defaultVal
}
// set arg if not already defined
const defArg = function (args, attr, val) {
if (args[attr] === undefined) args[attr] = val
}
// check if args are defined
const checkArgs = function (args, mandatoryArgs, next) {
for (var i = 0, len = mandatoryArgs.length; i < len; ++i) {
var key = mandatoryArgs[i]
if (args[key] === undefined) {
var err = 'Missing mandatory argument "' + key + '"'
if (next) next(err)
else throw new Error(err)
return false
}
}
return true
}
const emptyFun = function () { }
const replyJson = function (res, next) {
return function (err, data) {
if (err) return next(err)
res.json(data)
}
}
/*
const getHtmlByKey = function(html) {
var keys = {}
_getHtmlByKey1(html, keys)
return keys
}
const _getHtmlByKey1 = function(html, keys) {
var type = typeof html
if(type==="object") {
// array
var len = html.length
if(len!==undefined) {
for(var i=0; i<len; ++i)
_getHtmlByKey1(html[i], keys)
return
}
// object
var key = html.attrs && html.attrs['msa-sheet-key']
if(key) keys[key] = html
// content
_getHtmlByKey1(html.content, keys)
}
}
*/
// transform keys starting with "new" by true value
/*
const determineNewKeys = function(html) {
var idx = 0
var htmlByKey = getHtmlByKey(html)
for(var key in htmlByKey) {
if(key.substring(0, 3)!=="new") continue
var htmlWithKey = htmlByKey[key]
while(htmlByKey[idx.toString()]!==undefined)
++idx
if(!newBox.attrs) newBox.attrs = {}
newBox.attrs['msa-sheet-key'] = idx.toString()
}
}
*/
function newCtx(req, kwargs) {
const ctx = Object.create(req)
Object.assign(ctx, kwargs)
return ctx
}
function deepGet(obj, key, ...args) {
const obj2 = obj[key]
if (obj2 === undefined) return
if (args.length === 0) return obj2
return deepGet(obj2, ...args)
}
// default templates
// no need to register the head of these web elements, as they are imported directly in msa-sheet.html
registerSheetBoxTemplate("msa-sheet-text", {
title: "Text",
html: { tag: "msa-sheet-text" },
editionSrc: "/sheet/msa-sheet-edition.js:MsaSheetTextEdition",
img: "<img src='data:image/svg+xml;utf8,%3Csvg%20xmlns%3D%22http%3A%2F%2Fwww.w3.org%2F2000%2Fsvg%22%20fill%3D%22%23999%22%20viewBox%3D%220%200%201024%201024%22%3E%3Cpath%20class%3D%22path1%22%20d%3D%22M896%200h-768c-17.664%200-32%2014.336-32%2032v192c0%2017.664%2014.336%2032%2032%2032h32c17.664%200%2032-14.336%2032-32l64-96h192v768l-160%2064c-17.664%200-32%2014.304-32%2032s14.336%2032%2032%2032h448c17.696%200%2032-14.304%2032-32s-14.304-32-32-32l-160-64v-768h192l64%2096c0%2017.664%2014.304%2032%2032%2032h32c17.696%200%2032-14.336%2032-32v-192c0-17.664-14.304-32-32-32z%22%3E%3C%2Fpath%3E%0A%3C%2Fsvg%3E'>"
})
registerSheetBoxTemplate("msa-sheet-boxes", {
title: "Boxes",
html: { tag: "msa-sheet-boxes" },
editionSrc: "/sheet/msa-sheet-edition.js:MsaSheetBoxesEdition",
img: "<img src='data:image/svg+xml;utf8,%3Csvg%20xmlns%3D%22http%3A%2F%2Fwww.w3.org%2F2000%2Fsvg%22%20fill%3D%22%23999%22%20viewBox%3D%220%200%201024%201024%22%3E%3Cpath%20d%3D%22M896%200h-768c-70.4%200-128%2057.6-128%20128v768c0%2070.4%2057.6%20128%20128%20128h768c70.4%200%20128-57.6%20128-128v-768c0-70.4-57.6-128-128-128zM896%20896h-768v-768h768v768z%22%2F%3E%3C%2Fsvg%3E'>"
})
// export
module.exports = {
MsaSheet,
renderSheetAsHtml,
registerSheetBoxTemplate,
registerHead
} | params: new SheetParamDict()
} | random_line_split |
module.js | // requires
const path = require("path")
const { withDb } = Msa.require("db")
const { Sheet } = require('./model')
const { SheetPerm } = require("./perm")
const { SheetParamDict } = require("./params")
const { MsaParamsLocalAdminModule } = Msa.require("params")
//var msaDbFiles = Msa.require("msa-db", "files.js")
//const msaFs = Msa.require("fs")
const { formatHtml } = Msa.require("utils")
const { userMdw } = Msa.require("user")
// class
class MsaSheet extends Msa.Module {
constructor() {
super()
this.initDeps()
this.initApp()
this.initParams()
}
initDeps() {
this.Sheet = Sheet
}
getId(ctx, reqId) {
return reqId
}
getDefaultContent() {
return {
tag: "msa-sheet-boxes",
content: {
tag: "msa-sheet-text"
}
}
}
getUserId(ctx) {
const user = ctx.user
return user ? user.name : ctx.connection.remoteAddress
}
checkPerm(ctx, sheet, expVal, prevVal) {
const perm = deepGet(sheet, "params", "perm").get()
return perm.check(ctx.user, expVal, prevVal)
}
canRead(ctx, sheet) {
return this.checkPerm(ctx, sheet, SheetPerm.READ)
}
canWrite(ctx, sheet) {
return this.checkPerm(ctx, sheet, SheetPerm.WRITE)
}
initApp() {
this.app.get('/:id', userMdw, (req, res, next) => {
const reqId = req.params.id
if (reqId.indexOf('.') >= 0 || reqId[0] === '_')
return next()
withDb(async db => {
const ctx = newCtx(req, { db })
const id = this.getId(ctx, reqId)
const sheet = await this.getSheet(ctx, id)
res.json(sheet)
}).catch(next)
})
this.app.post('/:id', userMdw, (req, res, next) => {
withDb(async db => {
const ctx = newCtx(req, { db })
const id = this.getId(ctx, req.params.id)
const { content } = req.body
const sheet = await this.getSheet(ctx, id)
sheet.content = formatHtml({ body: content })
await this.upsertSheetInDb(ctx, sheet)
res.sendStatus(200)
}).catch(next)
})
this.app.get('/_templates', (req, res, next) => {
res.json(Templates)
})
this.app.use('/:id/_box',
(req, res, next) => {
req.msaSheetArgs = { id: this.getId(null, req.params.id) }
next()
},
TemplatesRouter)
}
async getSheet(ctx, id) {
const dbSheet = await ctx.db.getOne("SELECT id, contentBody, contentHead, createdBy, updatedBy, params FROM msa_sheets WHERE id=:id",
{ id })
const sheet = this.Sheet.newFromDb(id, dbSheet)
if (!dbSheet) sheet.content = formatHtml(this.getDefaultContent())
if (!this.canRead(ctx, sheet))
throw Msa.FORBIDDEN
sheet.editable = this.canWrite(ctx, sheet)
return sheet
}
async upsertSheetInDb(ctx, sheet) {
if (!(await this.updateSheetInDb(ctx, sheet)))
await this.createSheetInDb(ctx, sheet)
}
async createSheetInDb(ctx, sheet) {
if (!this.canWrite(ctx, sheet))
throw Msa.FORBIDDEN
const user = this.getUserId(ctx)
sheet.createdBy = user
sheet.updatedBy = user
await ctx.db.run("INSERT INTO msa_sheets (id, contentBody, contentHead, createdBy, updatedBy) VALUES (:id, :contentBody, :contentHead, :createdBy, :updatedBy)",
sheet.formatForDb(["id", "contentHead", "contentBody", "createdBy", "updatedBy"]))
}
async updateSheetInDb(ctx, sheet) {
if (!this.canWrite(ctx, sheet))
throw Msa.FORBIDDEN
const user = this.getUserId(ctx)
sheet.updatedBy = user
const res = await ctx.db.run("UPDATE msa_sheets SET contentHead=:contentHead, contentBody=:contentBody, updatedBy=:updatedBy WHERE id=:id",
sheet.formatForDb(["id", "contentHead", "contentBody", "updatedBy"]))
return res.nbChanges > 0
}
// params
initParams() {
const Sheet = this.Sheet
this.params = new class extends MsaParamsLocalAdminModule {
async | (ctx) {
const id = ctx.sheetParamsArgs.id
const dbRow = await ctx.db.getOne("SELECT params FROM msa_sheets WHERE id=:id",
{ id })
return Sheet.newFromDb(id, dbRow).params
}
async updateRootParam(ctx, rootParam) {
const vals = {
id: ctx.sheetParamsArgs.id,
params: rootParam.getAsDbStr()
}
const res = await ctx.db.run("UPDATE msa_sheets SET params=:params WHERE id=:id", vals)
if (res.nbChanges === 0)
await ctx.db.run("INSERT INTO msa_sheets (id, params) VALUES (:id, :params)", vals)
}
}
this.app.use("/:id/_params",
userMdw,
(req, _res, next) => {
req.sheetParamsArgs = {
id: this.getId(req, req.params.id)
}
next()
},
this.params.app)
}
}
// get sheet //////////////////////////////////////////////////////////////////
// get a sheet from DB
/*
MsaSheetPt.getSheet = async function(req, id) {
const dbId = this.getId(req, id)
const dbSheet = await SheetsDb.findOne({ where:{ id:dbId }})
const sheet = (dbSheet !== null) ? {
content: {
head: dbSheet.contentHead,
body: dbSheet.contentBody
},
params: dbSheet.params
} : {
content: formatHtml(this.getDefaultContent()),
params: new SheetParamDict()
}
if(!this.canRead(req, id, sheet))
throw Msa.FORBIDDEN
sheet.editable = this.canWrite(req, id, sheet)
return sheet
}
*/
/*
MsaSheetPt.getSheet = function(key, args1, args2) {
// args
if(args2===undefined) var args = {}, next = args1
else var args = args1, next = args2
defArg(args, "checkUserPerms", args.hasOwnProperty("user"))
defArg(args, "user", null)
if(!next) next = emptyFun
// get sheetType
// var typeObj = SheetTypes[type]
// if(!typeObj) return next("Unknown sheet type ["+type+"].")
// select in DB
const dbKey = this.getDbKey(key)
SheetsDb.find({ where:{ key:dbKey }}).then(
sheet => _getSheet1(this, key, sheet, args, next),
err => next(err))
}
var _getSheet1 = function(self, key, sheet, args, next) {
if(sheet) return _getSheet2(sheet, args, next)
// sheet does not exist: use default content
const defaultContent = self.getDefaultContent()
if(defaultContent===null) return next(null, null)
self.createSheet(key, args, (err, sheet) => {
// convert "unauthorized" (to create sheet) to "page not found"
if(err===401 || err===403) err=404
next(err, sheet)
})
}
var _getSheet2 = function(sheet, args, next) {
// prepare and return sheet
readSheetModel(sheet, args, err => {
next(err, sheet)
})
}
*/
// create sheet //////////////////////////////////////////////////////////////////
/*
// create a sheet (in DB or not)
MsaSheetPt.createSheet = function(key, args1, args2) {
// determine args & next
if(args2===undefined) var args = {}, next = args1
else var args = args1, next = args2
// default args
defArg(args, "checkUserPerms", args.hasOwnProperty("user"))
defArg(args, "user", null)
defArg(args, "ifExist", "get")
defArg(args, "insertInDb", true)
if(!next) next = emptyFun
// get sheetType
// var typeObj = SheetTypes[type]
// if(!typeObj) return next("Unknown sheet type ["+type+"].")
// check if sheet already exists
const dbKey = this.getDbKey(key)
SheetsDb.find({ where:{ key:dbKey }}).then(
sheet => _createSheet1(this, dbKey, sheet, args, next),
err => next(err))
}
var _createSheet1 = function(self, dbKey, sheet, args, next) {
// if sheet exists: apply args.ifExist action
if(sheet) {
var ifExist = args.ifExist
if(ifExist=="null") return next(null, null)
else if(ifExist=="error") return next(409) // CONFLICT
else if(ifExist=="get") return _createSheet3(sheet, args, next)
else if(ifExist!="overwrite") return next("Unknown ifExist ["+ifExist+"].")
else if(typeof ifExist==="function") return ifExist(sheet)
else return next("Unknown ifExist ["+ifExist+"].")
}
// check if user has permission to create this sheetType
if(args.checkUserPerms)
if(!self.getCreatePerm().check(args.user))
return next(403)
// create base sheet model
createSheetModel(self, dbKey, args, (err, sheet) => {
if(err) return next(err)
_createSheet2(self, sheet, args, next)
})
}
var _createSheet2 = function(self, sheet, args, next) {
// check if sheet has to be inserted in db
if(!args.insertInDb) return _createSheet3(sheet, args, next)
// insert sheet in db
SheetsDb.upsert(
{ key:sheet.key, content: sheet.content },
{ where: { key:sheet.key }}).then(
() => _createSheet3(sheet, args, next),
err => next(err))
}
var _createSheet3 = function(sheet, args, next) {
// prepare and return sheet
readSheetModel(sheet, args, function(err){
next(err, sheet)
})
}
*/
// update sheet //////////////////////////////////////////////////////////////////
// update a sheet in DB with updates
/*
MsaSheetPt.updateSheet = function(key, update, args1, args2) {
// determine args
if(args2===undefined) var args = {}, next = args1
else var args = args1, next = args2
// default args
defArg(args, "checkUserPerms", args.hasOwnProperty("user"))
defArg(args, "user", null)
defArg(args, "ifNotExist", "create")
defArg(args, "insertInDb", true)
if(!next) next = emptyFun
// get sheetType
// var typeObj = SheetTypes[type]
// if(!typeObj) return next("Unknown sheet type ["+type+"].")
// select sheet to update in DB
const dbKey = this.getDbKey(key)
SheetsDb.find({ where:{ key:dbKey }}).then(
sheet => _updateSheet1(this, key, update, sheet, args, next),
err => next(err))
}
var _updateSheet1 = function(self, key, update, sheet, args, next) {
if(sheet) return _updateSheet2(self, update, sheet, args, next)
// sheet does not exist: apply args.ifNotExist action
var ifNotExist = args.ifNotExist
if(ifNotExist=="null") return next(null, null)
else if(ifNotExist=="error") return next(404) // PAGE NOT FOUND
else if(ifNotExist=="create") {
// create sheet
return self.createSheet(key, args, (err, sheet) => {
if(err) return next(err)
_updateSheet2(self, update, sheet, args, next)
})
} else if(typeof ifNotExist==="function") return ifNotExist()
else return next("Unknown ifNotExist ["+ifNotExist+"].")
}
var _updateSheet2 = function(self, update, sheet, args, next) {
// update sheet model
updateSheetModel(sheet, update, args, (err, atLeastOneUpdate) => {
if(err) return next(err)
_updateSheet3(self, sheet, atLeastOneUpdate, args, next)
})
}
var _updateSheet3 = function(self, sheet, atLeastOneUpdate, args, next) {
// insert in DB (if requested & needed)
if(!atLeastOneUpdate || !args.insertInDb) return _updateSheet4(sheet, args, next)
SheetsDb.upsert(
{ key:sheet.key, content:sheet.content },
{ where:{ key:sheet.key }}).then(
() => _updateSheet4(sheet, args, next),
err => next(err))
}
var _updateSheet4 = function(sheet, args, next) {
// prepare and return sheet
readSheetModel(sheet, args, function(err){
next(err, sheet)
})
}
*/
// readSheetModel //////////////////////////////////////////////////////////////////
/*
var readSheetModel = function(sheet, args, next) {
// read callbacks
var readCallbacks = sheet.readCallbacks
if(!readCallbacks) return _readSheetModel2(sheet, args, next)
_readSheetModel1(sheet, readCallbacks, 0, readCallbacks.length, args, next)
}
var _readSheetModel1 = function(sheet, readCallbacks, i, len, args, next) {
// TODO: read callbacks
_readSheetModel2(sheet, args, next)
}
var _readSheetModel2 = function(sheet, args, next) {
// set editable
sheet.editable = checkEditable(sheet, args.user)
// remove mongoDB id
delete sheet._id
next()
}
var checkEditable = function(sheet, user) {
// check sheet owner
if(perm.exprCheck(sheet.owner, user)) return true
// TODO: check content edit perms
return false
}
// createSheetModel //////////////////////////////////////////////////////////////////
var createSheetModel = function(mod, dbKey, args, next) {
// create sheet object
var user = args.user
var sheet = {
key: dbKey,
owner: user ? {or: [{name: user.name}, {group: "admin"}]} : {group: "admin"}
}
// apply sheet type content
// var typeObj = SheetTypes[type]
// if(!typeObj) return next('Unknown sheet type ['+type+'].')
var content = args.content || mod.getDefaultContent()
if(typeof content==="string")
content = parseHtml(content).body[0]
sheet.content = content
// apply write callbacks (if exist)
var writeCallbacks = getSheetWriteCallbacks(content, sheet)
if(!writeCallbacks) return _createSheetModel1(this, sheet, args, next)
applySheetCallbacks(writeCallbacks, err => {
if(err) return next(err)
_createSheetModel1(mod, sheet, args, next)
})
}
var _createSheetModel1 = function(mod, sheet, args, next) {
// call sheetType onCreate callback (if exist)
var onCreate = mod.onCreate
if(!onCreate) return _createSheetModel2(sheet, next)
onCreate(sheet, args, err => {
if(err) return next(err)
_createSheetModel2(sheet, next)
})
}
var _createSheetModel2 = function(sheet, next) {
// prepare sheet
prepareSheetForWrite(sheet)
next(null, sheet)
}
var prepareSheetForWrite = function(sheet) {
// format html
var content = sheet.content
var formattedContent = formatHtml(content)
// add heads
formattedContent.head += getHeads(content)
// update content
sheet.content = formattedContent
}
// updateSheetModel //////////////////////////////////////////////////////////////////
var updateSheetModel = function(sheet, update, args, next) {
// parse sheet & new html
var newContent = update.content
if(!newContent) return next(null, false)
var sheetContent = parseHtml(sheet.content).body
if(typeof newContent==="string")
newContent = parseHtml(newContent).body
// check edit permission
var updKey = (newContent.length===1 && newContent.attrs) ? newContent.attrs['msa-sheet-key'] : null
if(args.checkUserPerms)
if(!checkEditSheetPerm(args.user, sheet, updKey, next))
return
// update all content
if(!updKey) var updatedHtml = sheet.content = newContent
else {
// find content to content
var htmlByKey = getHtmlByKey(sheetContent)
var updatedHtml = htmlByKey[updKey]
if(updatedHtml===undefined)
return next("Box key ["+ updKey +"] does not exist in sheet key ["+sheet.key+"].")
// update content object
for(var a in updatedHtml) delete updatedHtml[a]
for(var a in newContent) updatedHtml[a] = newContent[a]
sheet.content = sheetContent
}
// update new keys
determineNewKeys(updatedHtml)
// call write callbacks (if exist)
var writeCallbacks = getSheetWriteCallbacks(updatedHtml)
if(!writeCallbacks) return _updateSheetModel1(sheet, next)
applySheetCallbacks(writeCallbacks, err => {
if(err) return next(err)
_updateSheetModel1(sheet, next)
})
}
var _updateSheetModel1 = function(sheet, next) {
// prepare
prepareSheetForWrite(sheet)
next(null, sheet)
}
var checkEditSheetPerm = function(user, sheet, updKey) {
return perm.exprCheck(sheet.owner, user)
// TODO: check sheet edit perm from updKey
// TODO: harmonize code with readSheet
}
*/
// renderSheetAsHtml //////////////////////////////////////////////////////////////////
var sheetHead = formatHtml({ wel: "/sheet/msa-sheet.js" }).head
function renderSheetAsHtml(sheet, baseUrl, sheetId) {
const content = sheet.content
return {
head: sheetHead + content.head,
body: `<msa-sheet base-url='${baseUrl}' sheet-id='${sheetId}' editable='${sheet.editable}'>${content.body}</msa-sheet>`
}
}
// Read & Write callbacks ////////////////////////////////////
/*
var ReadCallbacks = []
var getSheetReadCallbacks = function(html, sheet) {
var readCallbacks = []
getSheetCallbacks(html, sheet, ReadCallbacks, readCallbacks)
return readCallbacks
}
var WriteCallbacks = []
var getSheetWriteCallbacks = function(html, sheet) {
var writeCallbacks = []
getSheetCallbacks(html, sheet, WriteCallbacks, writeCallbacks)
return writeCallbacks
}
var getSheetCallbacks = function(html, sheet, Callbacks, sheetCallbacks) {
var type = typeof html
if(type==="object") {
// case array: recursive call on array elements
var len = html.length
if(len!==undefined) {
for(var i=0; i<len; ++i)
getSheetCallbacks(html[i], sheet, Callbacks, sheetCallbacks)
return
}
// case object: check that a callback exists for this tag
var tag = html.tag
if(tag) {
var callback = Callbacks[tag]
if(callback) {
// if so, push callback in result list
sheetCallbacks.push({
fun: callback.fun,
args: [ html, { sheet:sheet } ]
})
}
}
// recursive call on content
getSheetCallbacks(html.content, sheet, Callbacks, sheetCallbacks)
}
}
var applySheetCallbacks = function(callbacks, next) {
_applySheetCallbacks1(callbacks, 0, callbacks.length, next)
}
var _applySheetCallbacks1 = function(callbacks, i, len, next) {
if(i>=len) return next()
var callback = callbacks[i]
var fun = callback.fun, args = callback.args
args.push(function(err){
if(err) return next(err)
_applySheetCallbacks1(callbacks, i+1, len, next)
})
fun.apply(null, args)
}
*/
// perms /////////////////////////////////////////////////
/*
var checkSheetWritePerm = function(type, key, user, next){
// get sheetType
// var typeObj = SheetTypes[type]
// if(!typeObj) return next("Unknown sheet type ["+type+"].")
// select in DB
SheetsDb.find({ type:type, key:key }).then(
sheet => next(checkEditable(sheet, user) ? undefined : 403),
err => next(err))
}
var checkSheetWritePermMdw = function(req, res, next){
var params = req.params
checkSheetWritePerm(params.type, params.key, req.session.user, next)
}
*/
// register ///////////////////////////////////////////////////////////////////
// sheet
/*
var SheetTypes = {}
var registerType = MsaSheetPt.registerType = function(type, args) {
if(!type) return
var typeObj = {}
// clone args into typeObj
if(args) for(var a in args) typeObj[a] = args[a]
// default values
defArg(typeObj, "perms", {})
defArg(typeObj.perms, "create", { group:"admin" })
if(typeObj.perms.create instanceof Perm === false)
typeObj.perms.create = new Perm(typeObj.perms.create)
// default content
defArg(typeObj, "content", {
tag: "msa-sheet-boxes",
content: {
tag: "msa-sheet-text"
}
})
// db collection
// defArg(typeObj, "dbCollection", type+"s")
// if(typeof typeObj.dbCollection==="string") {
// typeObj.dbCollection = msaDb.collection(typeObj.dbCollection)
// }
// insert in SheetTypes map
SheetTypes[type] = typeObj
}
*/
// templates
const Templates = {}
const TemplatesRouter = Msa.express.Router()
const registerSheetBoxTemplate = function (tag, template) {
if (!template) template = {}
template.tag = tag
if (template.html)
template.html = formatHtml(template.html)
if (!template.title) template.title = tag
// default args
defArg(template, "img", defaultTemplateImg)
// insert in global map
Templates[tag] = template
// add template module in router (if any)
if (template.mods)
for (let route in template.mods)
TemplatesRouter.use(route, template.mods[route].app)
// register head (if some, or if html is webelement)
var wel = (typeof html === "object") && (html.webelement || html.wel)
if (wel) {
var head = template.head || html
var tag = path.basename(wel, '.html')
registerHead(tag, head)
}
}
var defaultTemplateImg = "<img src='data:image/svg+xml;utf8,%3Csvg%20xmlns%3D%22http%3A%2F%2Fwww.w3.org%2F2000%2Fsvg%22%20fill%3D%22%23999%22%20viewBox%3D%220%200%201024%201024%22%3E%3Cpath%20d%3D%22M896%200h-768c-70.4%200-128%2057.6-128%20128v768c0%2070.4%2057.6%20128%20128%20128h768c70.4%200%20128-57.6%20128-128v-768c0-70.4-57.6-128-128-128zM896%20896h-768v-768h768v768z%22%2F%3E%3C%2Fsvg%3E'>"
// head
const Heads = {}
function registerHead(tag, head) {
Heads[tag] = formatHtml({ head: head }).head
}
// browse html to determine associated heads
function getHeads(htmlObj) {
var heads = ""
var type = typeof htmlObj
if (type === "object") {
// array
var len = htmlObj.length
if (len !== undefined) {
for (var i = 0; i < len; ++i)
heads += getHeads(htmlObj[i])
return heads
}
// object
var tag = htmlObj.tag
if (tag) {
var head = Heads[tag]
if (head) heads += head
}
// recursive call on content
heads += getHeads(htmlObj.content)
}
return heads
}
// routes ////////////////////////////////////////////////////////////
// attachs
/*
sheetApp.get('/:type/:key/attach/*', function(req, res, next){
var params=req.params, type=params.type, key=params.key, path=params[0]
// build fullPath & update req
var fullPath = getAttachPath(type, key, path)
params[0] = fullPath
// get file
msaDbFiles.getMdw(req, res, function(err){
// if not found, try to find it in drafts
if(err==404) _getDraftAttach(type, key, path, req, res, next)
else if(err) next(err)
})
})
var _getDraftAttach = function(type, key, path, req, res, next){
var params=req.params
// build draftPath & update req
var draftPath = getAttachPath(type, key, 'drafts/'+path)
params[0] = draftPath
// get file
msaDbFiles.getMdw(req, res, next)
}
sheetApp.post('/:type/:key/attach/*', checkSheetWritePermMdw, function(req, res, next){
var params=req.params, type=params.type, key=params.key, path=params[0]
// build fullPath & update req
var fullPath = getAttachPath(type, key, path)
params[0] = fullPath
// post file
msaDbFiles.postMdw(req, res, next)
})
sheetApp.delete('/:type/:key/attach/*', checkSheetWritePermMdw, function(req, res, next){
var params=req.params, type=params.type, key=params.key, path=params[0]
// build fullPath & update req
var fullPath = getAttachPath(type, key, path)
params[0] = fullPath
// delete file
msaDbFiles.deleteMdw(req, res, next)
})
var getAttachPath = function(type, key, file){
return '/sheet_attachs/'+type+'/'+key+'/'+file
}
*/
// common //////////////////////////////////////////
// get arg, with default value
const getArg = function (args, attr, defaultVal) {
var val = args[attr]
return (val === undefined) ? val : defaultVal
}
// set arg if not already defined
const defArg = function (args, attr, val) {
if (args[attr] === undefined) args[attr] = val
}
// check if args are defined
const checkArgs = function (args, mandatoryArgs, next) {
for (var i = 0, len = mandatoryArgs.length; i < len; ++i) {
var key = mandatoryArgs[i]
if (args[key] === undefined) {
var err = 'Missing mandatory argument "' + key + '"'
if (next) next(err)
else throw new Error(err)
return false
}
}
return true
}
const emptyFun = function () { }
const replyJson = function (res, next) {
return function (err, data) {
if (err) return next(err)
res.json(data)
}
}
/*
const getHtmlByKey = function(html) {
var keys = {}
_getHtmlByKey1(html, keys)
return keys
}
const _getHtmlByKey1 = function(html, keys) {
var type = typeof html
if(type==="object") {
// array
var len = html.length
if(len!==undefined) {
for(var i=0; i<len; ++i)
_getHtmlByKey1(html[i], keys)
return
}
// object
var key = html.attrs && html.attrs['msa-sheet-key']
if(key) keys[key] = html
// content
_getHtmlByKey1(html.content, keys)
}
}
*/
// transform keys starting with "new" by true value
/*
const determineNewKeys = function(html) {
var idx = 0
var htmlByKey = getHtmlByKey(html)
for(var key in htmlByKey) {
if(key.substring(0, 3)!=="new") continue
var htmlWithKey = htmlByKey[key]
while(htmlByKey[idx.toString()]!==undefined)
++idx
if(!newBox.attrs) newBox.attrs = {}
newBox.attrs['msa-sheet-key'] = idx.toString()
}
}
*/
function newCtx(req, kwargs) {
const ctx = Object.create(req)
Object.assign(ctx, kwargs)
return ctx
}
function deepGet(obj, key, ...args) {
const obj2 = obj[key]
if (obj2 === undefined) return
if (args.length === 0) return obj2
return deepGet(obj2, ...args)
}
// default templates
// no need to register the head of these web elements, as they are imported directly in msa-sheet.html
registerSheetBoxTemplate("msa-sheet-text", {
title: "Text",
html: { tag: "msa-sheet-text" },
editionSrc: "/sheet/msa-sheet-edition.js:MsaSheetTextEdition",
img: "<img src='data:image/svg+xml;utf8,%3Csvg%20xmlns%3D%22http%3A%2F%2Fwww.w3.org%2F2000%2Fsvg%22%20fill%3D%22%23999%22%20viewBox%3D%220%200%201024%201024%22%3E%3Cpath%20class%3D%22path1%22%20d%3D%22M896%200h-768c-17.664%200-32%2014.336-32%2032v192c0%2017.664%2014.336%2032%2032%2032h32c17.664%200%2032-14.336%2032-32l64-96h192v768l-160%2064c-17.664%200-32%2014.304-32%2032s14.336%2032%2032%2032h448c17.696%200%2032-14.304%2032-32s-14.304-32-32-32l-160-64v-768h192l64%2096c0%2017.664%2014.304%2032%2032%2032h32c17.696%200%2032-14.336%2032-32v-192c0-17.664-14.304-32-32-32z%22%3E%3C%2Fpath%3E%0A%3C%2Fsvg%3E'>"
})
registerSheetBoxTemplate("msa-sheet-boxes", {
title: "Boxes",
html: { tag: "msa-sheet-boxes" },
editionSrc: "/sheet/msa-sheet-edition.js:MsaSheetBoxesEdition",
img: "<img src='data:image/svg+xml;utf8,%3Csvg%20xmlns%3D%22http%3A%2F%2Fwww.w3.org%2F2000%2Fsvg%22%20fill%3D%22%23999%22%20viewBox%3D%220%200%201024%201024%22%3E%3Cpath%20d%3D%22M896%200h-768c-70.4%200-128%2057.6-128%20128v768c0%2070.4%2057.6%20128%20128%20128h768c70.4%200%20128-57.6%20128-128v-768c0-70.4-57.6-128-128-128zM896%20896h-768v-768h768v768z%22%2F%3E%3C%2Fsvg%3E'>"
})
// export
module.exports = {
MsaSheet,
renderSheetAsHtml,
registerSheetBoxTemplate,
registerHead
} | getRootParam | identifier_name |
module.js | // requires
const path = require("path")
const { withDb } = Msa.require("db")
const { Sheet } = require('./model')
const { SheetPerm } = require("./perm")
const { SheetParamDict } = require("./params")
const { MsaParamsLocalAdminModule } = Msa.require("params")
//var msaDbFiles = Msa.require("msa-db", "files.js")
//const msaFs = Msa.require("fs")
const { formatHtml } = Msa.require("utils")
const { userMdw } = Msa.require("user")
// class
class MsaSheet extends Msa.Module {
constructor() {
super()
this.initDeps()
this.initApp()
this.initParams()
}
initDeps() {
this.Sheet = Sheet
}
getId(ctx, reqId) {
return reqId
}
getDefaultContent() {
return {
tag: "msa-sheet-boxes",
content: {
tag: "msa-sheet-text"
}
}
}
getUserId(ctx) {
const user = ctx.user
return user ? user.name : ctx.connection.remoteAddress
}
checkPerm(ctx, sheet, expVal, prevVal) {
const perm = deepGet(sheet, "params", "perm").get()
return perm.check(ctx.user, expVal, prevVal)
}
canRead(ctx, sheet) {
return this.checkPerm(ctx, sheet, SheetPerm.READ)
}
canWrite(ctx, sheet) {
return this.checkPerm(ctx, sheet, SheetPerm.WRITE)
}
initApp() {
this.app.get('/:id', userMdw, (req, res, next) => {
const reqId = req.params.id
if (reqId.indexOf('.') >= 0 || reqId[0] === '_')
return next()
withDb(async db => {
const ctx = newCtx(req, { db })
const id = this.getId(ctx, reqId)
const sheet = await this.getSheet(ctx, id)
res.json(sheet)
}).catch(next)
})
this.app.post('/:id', userMdw, (req, res, next) => {
withDb(async db => {
const ctx = newCtx(req, { db })
const id = this.getId(ctx, req.params.id)
const { content } = req.body
const sheet = await this.getSheet(ctx, id)
sheet.content = formatHtml({ body: content })
await this.upsertSheetInDb(ctx, sheet)
res.sendStatus(200)
}).catch(next)
})
this.app.get('/_templates', (req, res, next) => {
res.json(Templates)
})
this.app.use('/:id/_box',
(req, res, next) => {
req.msaSheetArgs = { id: this.getId(null, req.params.id) }
next()
},
TemplatesRouter)
}
async getSheet(ctx, id) {
const dbSheet = await ctx.db.getOne("SELECT id, contentBody, contentHead, createdBy, updatedBy, params FROM msa_sheets WHERE id=:id",
{ id })
const sheet = this.Sheet.newFromDb(id, dbSheet)
if (!dbSheet) sheet.content = formatHtml(this.getDefaultContent())
if (!this.canRead(ctx, sheet))
throw Msa.FORBIDDEN
sheet.editable = this.canWrite(ctx, sheet)
return sheet
}
async upsertSheetInDb(ctx, sheet) {
if (!(await this.updateSheetInDb(ctx, sheet)))
await this.createSheetInDb(ctx, sheet)
}
async createSheetInDb(ctx, sheet) {
if (!this.canWrite(ctx, sheet))
throw Msa.FORBIDDEN
const user = this.getUserId(ctx)
sheet.createdBy = user
sheet.updatedBy = user
await ctx.db.run("INSERT INTO msa_sheets (id, contentBody, contentHead, createdBy, updatedBy) VALUES (:id, :contentBody, :contentHead, :createdBy, :updatedBy)",
sheet.formatForDb(["id", "contentHead", "contentBody", "createdBy", "updatedBy"]))
}
async updateSheetInDb(ctx, sheet) {
if (!this.canWrite(ctx, sheet))
throw Msa.FORBIDDEN
const user = this.getUserId(ctx)
sheet.updatedBy = user
const res = await ctx.db.run("UPDATE msa_sheets SET contentHead=:contentHead, contentBody=:contentBody, updatedBy=:updatedBy WHERE id=:id",
sheet.formatForDb(["id", "contentHead", "contentBody", "updatedBy"]))
return res.nbChanges > 0
}
// params
initParams() {
const Sheet = this.Sheet
this.params = new class extends MsaParamsLocalAdminModule {
async getRootParam(ctx) {
const id = ctx.sheetParamsArgs.id
const dbRow = await ctx.db.getOne("SELECT params FROM msa_sheets WHERE id=:id",
{ id })
return Sheet.newFromDb(id, dbRow).params
}
async updateRootParam(ctx, rootParam) {
const vals = {
id: ctx.sheetParamsArgs.id,
params: rootParam.getAsDbStr()
}
const res = await ctx.db.run("UPDATE msa_sheets SET params=:params WHERE id=:id", vals)
if (res.nbChanges === 0)
await ctx.db.run("INSERT INTO msa_sheets (id, params) VALUES (:id, :params)", vals)
}
}
this.app.use("/:id/_params",
userMdw,
(req, _res, next) => {
req.sheetParamsArgs = {
id: this.getId(req, req.params.id)
}
next()
},
this.params.app)
}
}
// get sheet //////////////////////////////////////////////////////////////////
// get a sheet from DB
/*
MsaSheetPt.getSheet = async function(req, id) {
const dbId = this.getId(req, id)
const dbSheet = await SheetsDb.findOne({ where:{ id:dbId }})
const sheet = (dbSheet !== null) ? {
content: {
head: dbSheet.contentHead,
body: dbSheet.contentBody
},
params: dbSheet.params
} : {
content: formatHtml(this.getDefaultContent()),
params: new SheetParamDict()
}
if(!this.canRead(req, id, sheet))
throw Msa.FORBIDDEN
sheet.editable = this.canWrite(req, id, sheet)
return sheet
}
*/
/*
MsaSheetPt.getSheet = function(key, args1, args2) {
// args
if(args2===undefined) var args = {}, next = args1
else var args = args1, next = args2
defArg(args, "checkUserPerms", args.hasOwnProperty("user"))
defArg(args, "user", null)
if(!next) next = emptyFun
// get sheetType
// var typeObj = SheetTypes[type]
// if(!typeObj) return next("Unknown sheet type ["+type+"].")
// select in DB
const dbKey = this.getDbKey(key)
SheetsDb.find({ where:{ key:dbKey }}).then(
sheet => _getSheet1(this, key, sheet, args, next),
err => next(err))
}
var _getSheet1 = function(self, key, sheet, args, next) {
if(sheet) return _getSheet2(sheet, args, next)
// sheet does not exist: use default content
const defaultContent = self.getDefaultContent()
if(defaultContent===null) return next(null, null)
self.createSheet(key, args, (err, sheet) => {
// convert "unauthorized" (to create sheet) to "page not found"
if(err===401 || err===403) err=404
next(err, sheet)
})
}
var _getSheet2 = function(sheet, args, next) {
// prepare and return sheet
readSheetModel(sheet, args, err => {
next(err, sheet)
})
}
*/
// create sheet //////////////////////////////////////////////////////////////////
/*
// create a sheet (in DB or not)
MsaSheetPt.createSheet = function(key, args1, args2) {
// determine args & next
if(args2===undefined) var args = {}, next = args1
else var args = args1, next = args2
// default args
defArg(args, "checkUserPerms", args.hasOwnProperty("user"))
defArg(args, "user", null)
defArg(args, "ifExist", "get")
defArg(args, "insertInDb", true)
if(!next) next = emptyFun
// get sheetType
// var typeObj = SheetTypes[type]
// if(!typeObj) return next("Unknown sheet type ["+type+"].")
// check if sheet already exists
const dbKey = this.getDbKey(key)
SheetsDb.find({ where:{ key:dbKey }}).then(
sheet => _createSheet1(this, dbKey, sheet, args, next),
err => next(err))
}
var _createSheet1 = function(self, dbKey, sheet, args, next) {
// if sheet exists: apply args.ifExist action
if(sheet) {
var ifExist = args.ifExist
if(ifExist=="null") return next(null, null)
else if(ifExist=="error") return next(409) // CONFLICT
else if(ifExist=="get") return _createSheet3(sheet, args, next)
else if(ifExist!="overwrite") return next("Unknown ifExist ["+ifExist+"].")
else if(typeof ifExist==="function") return ifExist(sheet)
else return next("Unknown ifExist ["+ifExist+"].")
}
// check if user has permission to create this sheetType
if(args.checkUserPerms)
if(!self.getCreatePerm().check(args.user))
return next(403)
// create base sheet model
createSheetModel(self, dbKey, args, (err, sheet) => {
if(err) return next(err)
_createSheet2(self, sheet, args, next)
})
}
var _createSheet2 = function(self, sheet, args, next) {
// check if sheet has to be inserted in db
if(!args.insertInDb) return _createSheet3(sheet, args, next)
// insert sheet in db
SheetsDb.upsert(
{ key:sheet.key, content: sheet.content },
{ where: { key:sheet.key }}).then(
() => _createSheet3(sheet, args, next),
err => next(err))
}
var _createSheet3 = function(sheet, args, next) {
// prepare and return sheet
readSheetModel(sheet, args, function(err){
next(err, sheet)
})
}
*/
// update sheet //////////////////////////////////////////////////////////////////
// update a sheet in DB with updates
/*
MsaSheetPt.updateSheet = function(key, update, args1, args2) {
// determine args
if(args2===undefined) var args = {}, next = args1
else var args = args1, next = args2
// default args
defArg(args, "checkUserPerms", args.hasOwnProperty("user"))
defArg(args, "user", null)
defArg(args, "ifNotExist", "create")
defArg(args, "insertInDb", true)
if(!next) next = emptyFun
// get sheetType
// var typeObj = SheetTypes[type]
// if(!typeObj) return next("Unknown sheet type ["+type+"].")
// select sheet to update in DB
const dbKey = this.getDbKey(key)
SheetsDb.find({ where:{ key:dbKey }}).then(
sheet => _updateSheet1(this, key, update, sheet, args, next),
err => next(err))
}
var _updateSheet1 = function(self, key, update, sheet, args, next) {
if(sheet) return _updateSheet2(self, update, sheet, args, next)
// sheet does not exist: apply args.ifNotExist action
var ifNotExist = args.ifNotExist
if(ifNotExist=="null") return next(null, null)
else if(ifNotExist=="error") return next(404) // PAGE NOT FOUND
else if(ifNotExist=="create") {
// create sheet
return self.createSheet(key, args, (err, sheet) => {
if(err) return next(err)
_updateSheet2(self, update, sheet, args, next)
})
} else if(typeof ifNotExist==="function") return ifNotExist()
else return next("Unknown ifNotExist ["+ifNotExist+"].")
}
var _updateSheet2 = function(self, update, sheet, args, next) {
// update sheet model
updateSheetModel(sheet, update, args, (err, atLeastOneUpdate) => {
if(err) return next(err)
_updateSheet3(self, sheet, atLeastOneUpdate, args, next)
})
}
var _updateSheet3 = function(self, sheet, atLeastOneUpdate, args, next) {
// insert in DB (if requested & needed)
if(!atLeastOneUpdate || !args.insertInDb) return _updateSheet4(sheet, args, next)
SheetsDb.upsert(
{ key:sheet.key, content:sheet.content },
{ where:{ key:sheet.key }}).then(
() => _updateSheet4(sheet, args, next),
err => next(err))
}
var _updateSheet4 = function(sheet, args, next) {
// prepare and return sheet
readSheetModel(sheet, args, function(err){
next(err, sheet)
})
}
*/
// readSheetModel //////////////////////////////////////////////////////////////////
/*
var readSheetModel = function(sheet, args, next) {
// read callbacks
var readCallbacks = sheet.readCallbacks
if(!readCallbacks) return _readSheetModel2(sheet, args, next)
_readSheetModel1(sheet, readCallbacks, 0, readCallbacks.length, args, next)
}
var _readSheetModel1 = function(sheet, readCallbacks, i, len, args, next) {
// TODO: read callbacks
_readSheetModel2(sheet, args, next)
}
var _readSheetModel2 = function(sheet, args, next) {
// set editable
sheet.editable = checkEditable(sheet, args.user)
// remove mongoDB id
delete sheet._id
next()
}
var checkEditable = function(sheet, user) {
// check sheet owner
if(perm.exprCheck(sheet.owner, user)) return true
// TODO: check content edit perms
return false
}
// createSheetModel //////////////////////////////////////////////////////////////////
var createSheetModel = function(mod, dbKey, args, next) {
// create sheet object
var user = args.user
var sheet = {
key: dbKey,
owner: user ? {or: [{name: user.name}, {group: "admin"}]} : {group: "admin"}
}
// apply sheet type content
// var typeObj = SheetTypes[type]
// if(!typeObj) return next('Unknown sheet type ['+type+'].')
var content = args.content || mod.getDefaultContent()
if(typeof content==="string")
content = parseHtml(content).body[0]
sheet.content = content
// apply write callbacks (if exist)
var writeCallbacks = getSheetWriteCallbacks(content, sheet)
if(!writeCallbacks) return _createSheetModel1(this, sheet, args, next)
applySheetCallbacks(writeCallbacks, err => {
if(err) return next(err)
_createSheetModel1(mod, sheet, args, next)
})
}
var _createSheetModel1 = function(mod, sheet, args, next) {
// call sheetType onCreate callback (if exist)
var onCreate = mod.onCreate
if(!onCreate) return _createSheetModel2(sheet, next)
onCreate(sheet, args, err => {
if(err) return next(err)
_createSheetModel2(sheet, next)
})
}
var _createSheetModel2 = function(sheet, next) {
// prepare sheet
prepareSheetForWrite(sheet)
next(null, sheet)
}
var prepareSheetForWrite = function(sheet) {
// format html
var content = sheet.content
var formattedContent = formatHtml(content)
// add heads
formattedContent.head += getHeads(content)
// update content
sheet.content = formattedContent
}
// updateSheetModel //////////////////////////////////////////////////////////////////
var updateSheetModel = function(sheet, update, args, next) {
// parse sheet & new html
var newContent = update.content
if(!newContent) return next(null, false)
var sheetContent = parseHtml(sheet.content).body
if(typeof newContent==="string")
newContent = parseHtml(newContent).body
// check edit permission
var updKey = (newContent.length===1 && newContent.attrs) ? newContent.attrs['msa-sheet-key'] : null
if(args.checkUserPerms)
if(!checkEditSheetPerm(args.user, sheet, updKey, next))
return
// update all content
if(!updKey) var updatedHtml = sheet.content = newContent
else {
// find content to content
var htmlByKey = getHtmlByKey(sheetContent)
var updatedHtml = htmlByKey[updKey]
if(updatedHtml===undefined)
return next("Box key ["+ updKey +"] does not exist in sheet key ["+sheet.key+"].")
// update content object
for(var a in updatedHtml) delete updatedHtml[a]
for(var a in newContent) updatedHtml[a] = newContent[a]
sheet.content = sheetContent
}
// update new keys
determineNewKeys(updatedHtml)
// call write callbacks (if exist)
var writeCallbacks = getSheetWriteCallbacks(updatedHtml)
if(!writeCallbacks) return _updateSheetModel1(sheet, next)
applySheetCallbacks(writeCallbacks, err => {
if(err) return next(err)
_updateSheetModel1(sheet, next)
})
}
var _updateSheetModel1 = function(sheet, next) {
// prepare
prepareSheetForWrite(sheet)
next(null, sheet)
}
var checkEditSheetPerm = function(user, sheet, updKey) {
return perm.exprCheck(sheet.owner, user)
// TODO: check sheet edit perm from updKey
// TODO: harmonize code with readSheet
}
*/
// renderSheetAsHtml //////////////////////////////////////////////////////////////////
var sheetHead = formatHtml({ wel: "/sheet/msa-sheet.js" }).head
function renderSheetAsHtml(sheet, baseUrl, sheetId) {
const content = sheet.content
return {
head: sheetHead + content.head,
body: `<msa-sheet base-url='${baseUrl}' sheet-id='${sheetId}' editable='${sheet.editable}'>${content.body}</msa-sheet>`
}
}
// Read & Write callbacks ////////////////////////////////////
/*
var ReadCallbacks = []
var getSheetReadCallbacks = function(html, sheet) {
var readCallbacks = []
getSheetCallbacks(html, sheet, ReadCallbacks, readCallbacks)
return readCallbacks
}
var WriteCallbacks = []
var getSheetWriteCallbacks = function(html, sheet) {
var writeCallbacks = []
getSheetCallbacks(html, sheet, WriteCallbacks, writeCallbacks)
return writeCallbacks
}
var getSheetCallbacks = function(html, sheet, Callbacks, sheetCallbacks) {
var type = typeof html
if(type==="object") {
// case array: recursive call on array elements
var len = html.length
if(len!==undefined) {
for(var i=0; i<len; ++i)
getSheetCallbacks(html[i], sheet, Callbacks, sheetCallbacks)
return
}
// case object: check that a callback exists for this tag
var tag = html.tag
if(tag) {
var callback = Callbacks[tag]
if(callback) {
// if so, push callback in result list
sheetCallbacks.push({
fun: callback.fun,
args: [ html, { sheet:sheet } ]
})
}
}
// recursive call on content
getSheetCallbacks(html.content, sheet, Callbacks, sheetCallbacks)
}
}
var applySheetCallbacks = function(callbacks, next) {
_applySheetCallbacks1(callbacks, 0, callbacks.length, next)
}
var _applySheetCallbacks1 = function(callbacks, i, len, next) {
if(i>=len) return next()
var callback = callbacks[i]
var fun = callback.fun, args = callback.args
args.push(function(err){
if(err) return next(err)
_applySheetCallbacks1(callbacks, i+1, len, next)
})
fun.apply(null, args)
}
*/
// perms /////////////////////////////////////////////////
/*
var checkSheetWritePerm = function(type, key, user, next){
// get sheetType
// var typeObj = SheetTypes[type]
// if(!typeObj) return next("Unknown sheet type ["+type+"].")
// select in DB
SheetsDb.find({ type:type, key:key }).then(
sheet => next(checkEditable(sheet, user) ? undefined : 403),
err => next(err))
}
var checkSheetWritePermMdw = function(req, res, next){
var params = req.params
checkSheetWritePerm(params.type, params.key, req.session.user, next)
}
*/
// register ///////////////////////////////////////////////////////////////////
// sheet
/*
var SheetTypes = {}
var registerType = MsaSheetPt.registerType = function(type, args) {
if(!type) return
var typeObj = {}
// clone args into typeObj
if(args) for(var a in args) typeObj[a] = args[a]
// default values
defArg(typeObj, "perms", {})
defArg(typeObj.perms, "create", { group:"admin" })
if(typeObj.perms.create instanceof Perm === false)
typeObj.perms.create = new Perm(typeObj.perms.create)
// default content
defArg(typeObj, "content", {
tag: "msa-sheet-boxes",
content: {
tag: "msa-sheet-text"
}
})
// db collection
// defArg(typeObj, "dbCollection", type+"s")
// if(typeof typeObj.dbCollection==="string") {
// typeObj.dbCollection = msaDb.collection(typeObj.dbCollection)
// }
// insert in SheetTypes map
SheetTypes[type] = typeObj
}
*/
// templates
const Templates = {}
const TemplatesRouter = Msa.express.Router()
const registerSheetBoxTemplate = function (tag, template) {
if (!template) template = {}
template.tag = tag
if (template.html)
template.html = formatHtml(template.html)
if (!template.title) template.title = tag
// default args
defArg(template, "img", defaultTemplateImg)
// insert in global map
Templates[tag] = template
// add template module in router (if any)
if (template.mods)
for (let route in template.mods)
TemplatesRouter.use(route, template.mods[route].app)
// register head (if some, or if html is webelement)
var wel = (typeof html === "object") && (html.webelement || html.wel)
if (wel) {
var head = template.head || html
var tag = path.basename(wel, '.html')
registerHead(tag, head)
}
}
var defaultTemplateImg = "<img src='data:image/svg+xml;utf8,%3Csvg%20xmlns%3D%22http%3A%2F%2Fwww.w3.org%2F2000%2Fsvg%22%20fill%3D%22%23999%22%20viewBox%3D%220%200%201024%201024%22%3E%3Cpath%20d%3D%22M896%200h-768c-70.4%200-128%2057.6-128%20128v768c0%2070.4%2057.6%20128%20128%20128h768c70.4%200%20128-57.6%20128-128v-768c0-70.4-57.6-128-128-128zM896%20896h-768v-768h768v768z%22%2F%3E%3C%2Fsvg%3E'>"
// head
const Heads = {}
function registerHead(tag, head) {
Heads[tag] = formatHtml({ head: head }).head
}
// browse html to determine associated heads
function getHeads(htmlObj) {
var heads = ""
var type = typeof htmlObj
if (type === "object") {
// array
var len = htmlObj.length
if (len !== undefined) {
for (var i = 0; i < len; ++i)
heads += getHeads(htmlObj[i])
return heads
}
// object
var tag = htmlObj.tag
if (tag) {
var head = Heads[tag]
if (head) heads += head
}
// recursive call on content
heads += getHeads(htmlObj.content)
}
return heads
}
// routes ////////////////////////////////////////////////////////////
// attachs
/*
sheetApp.get('/:type/:key/attach/*', function(req, res, next){
var params=req.params, type=params.type, key=params.key, path=params[0]
// build fullPath & update req
var fullPath = getAttachPath(type, key, path)
params[0] = fullPath
// get file
msaDbFiles.getMdw(req, res, function(err){
// if not found, try to find it in drafts
if(err==404) _getDraftAttach(type, key, path, req, res, next)
else if(err) next(err)
})
})
var _getDraftAttach = function(type, key, path, req, res, next){
var params=req.params
// build draftPath & update req
var draftPath = getAttachPath(type, key, 'drafts/'+path)
params[0] = draftPath
// get file
msaDbFiles.getMdw(req, res, next)
}
sheetApp.post('/:type/:key/attach/*', checkSheetWritePermMdw, function(req, res, next){
var params=req.params, type=params.type, key=params.key, path=params[0]
// build fullPath & update req
var fullPath = getAttachPath(type, key, path)
params[0] = fullPath
// post file
msaDbFiles.postMdw(req, res, next)
})
sheetApp.delete('/:type/:key/attach/*', checkSheetWritePermMdw, function(req, res, next){
var params=req.params, type=params.type, key=params.key, path=params[0]
// build fullPath & update req
var fullPath = getAttachPath(type, key, path)
params[0] = fullPath
// delete file
msaDbFiles.deleteMdw(req, res, next)
})
var getAttachPath = function(type, key, file){
return '/sheet_attachs/'+type+'/'+key+'/'+file
}
*/
// common //////////////////////////////////////////
// get arg, with default value
const getArg = function (args, attr, defaultVal) {
var val = args[attr]
return (val === undefined) ? val : defaultVal
}
// set arg if not already defined
const defArg = function (args, attr, val) {
if (args[attr] === undefined) args[attr] = val
}
// check if args are defined
const checkArgs = function (args, mandatoryArgs, next) {
for (var i = 0, len = mandatoryArgs.length; i < len; ++i) {
var key = mandatoryArgs[i]
if (args[key] === undefined) {
var err = 'Missing mandatory argument "' + key + '"'
if (next) next(err)
else throw new Error(err)
return false
}
}
return true
}
const emptyFun = function () { }
const replyJson = function (res, next) {
return function (err, data) {
if (err) return next(err)
res.json(data)
}
}
/*
const getHtmlByKey = function(html) {
var keys = {}
_getHtmlByKey1(html, keys)
return keys
}
const _getHtmlByKey1 = function(html, keys) {
var type = typeof html
if(type==="object") {
// array
var len = html.length
if(len!==undefined) {
for(var i=0; i<len; ++i)
_getHtmlByKey1(html[i], keys)
return
}
// object
var key = html.attrs && html.attrs['msa-sheet-key']
if(key) keys[key] = html
// content
_getHtmlByKey1(html.content, keys)
}
}
*/
// transform keys starting with "new" by true value
/*
const determineNewKeys = function(html) {
var idx = 0
var htmlByKey = getHtmlByKey(html)
for(var key in htmlByKey) {
if(key.substring(0, 3)!=="new") continue
var htmlWithKey = htmlByKey[key]
while(htmlByKey[idx.toString()]!==undefined)
++idx
if(!newBox.attrs) newBox.attrs = {}
newBox.attrs['msa-sheet-key'] = idx.toString()
}
}
*/
function newCtx(req, kwargs) |
function deepGet(obj, key, ...args) {
const obj2 = obj[key]
if (obj2 === undefined) return
if (args.length === 0) return obj2
return deepGet(obj2, ...args)
}
// default templates
// no need to register the head of these web elements, as they are imported directly in msa-sheet.html
registerSheetBoxTemplate("msa-sheet-text", {
title: "Text",
html: { tag: "msa-sheet-text" },
editionSrc: "/sheet/msa-sheet-edition.js:MsaSheetTextEdition",
img: "<img src='data:image/svg+xml;utf8,%3Csvg%20xmlns%3D%22http%3A%2F%2Fwww.w3.org%2F2000%2Fsvg%22%20fill%3D%22%23999%22%20viewBox%3D%220%200%201024%201024%22%3E%3Cpath%20class%3D%22path1%22%20d%3D%22M896%200h-768c-17.664%200-32%2014.336-32%2032v192c0%2017.664%2014.336%2032%2032%2032h32c17.664%200%2032-14.336%2032-32l64-96h192v768l-160%2064c-17.664%200-32%2014.304-32%2032s14.336%2032%2032%2032h448c17.696%200%2032-14.304%2032-32s-14.304-32-32-32l-160-64v-768h192l64%2096c0%2017.664%2014.304%2032%2032%2032h32c17.696%200%2032-14.336%2032-32v-192c0-17.664-14.304-32-32-32z%22%3E%3C%2Fpath%3E%0A%3C%2Fsvg%3E'>"
})
registerSheetBoxTemplate("msa-sheet-boxes", {
title: "Boxes",
html: { tag: "msa-sheet-boxes" },
editionSrc: "/sheet/msa-sheet-edition.js:MsaSheetBoxesEdition",
img: "<img src='data:image/svg+xml;utf8,%3Csvg%20xmlns%3D%22http%3A%2F%2Fwww.w3.org%2F2000%2Fsvg%22%20fill%3D%22%23999%22%20viewBox%3D%220%200%201024%201024%22%3E%3Cpath%20d%3D%22M896%200h-768c-70.4%200-128%2057.6-128%20128v768c0%2070.4%2057.6%20128%20128%20128h768c70.4%200%20128-57.6%20128-128v-768c0-70.4-57.6-128-128-128zM896%20896h-768v-768h768v768z%22%2F%3E%3C%2Fsvg%3E'>"
})
// export
module.exports = {
MsaSheet,
renderSheetAsHtml,
registerSheetBoxTemplate,
registerHead
} | {
const ctx = Object.create(req)
Object.assign(ctx, kwargs)
return ctx
} | identifier_body |
remote_build.go | package main
import (
"bytes"
"context"
"encoding/base64"
"encoding/json"
"errors"
"fmt"
"io"
"io/ioutil"
"net/http"
"net/url"
"os"
"path"
"path/filepath"
"strconv"
"strings"
"time"
ggit "gg-scm.io/pkg/git"
"github.com/gobwas/ws"
"github.com/gobwas/ws/wsutil"
"github.com/johnewart/archiver"
"github.com/spf13/cobra"
"github.com/ulikunitz/xz"
"github.com/yourbase/commons/http/headers"
"github.com/yourbase/yb"
"github.com/yourbase/yb/internal/config"
"gopkg.in/src-d/go-git.v4"
gitplumbing "gopkg.in/src-d/go-git.v4/plumbing"
"gopkg.in/src-d/go-git.v4/plumbing/object"
"zombiezen.com/go/log"
)
type remoteCmd struct {
cfg config.Getter
target string
baseCommit string
branch string
patchData []byte
patchPath string
repoDir string
noAcceleration bool
disableCache bool
disableSkipper bool
dryRun bool
committed bool
publicRepo bool
backupWorktree bool
remotes []*url.URL
}
func newRemoteCmd(cfg config.Getter) *cobra.Command {
p := &remoteCmd{
cfg: cfg,
}
c := &cobra.Command{
Use: "remotebuild [options] [TARGET]",
Short: "Build a target remotely",
Long: `Builds a target using YourBase infrastructure. If no argument is given, ` +
`uses the target named "` + yb.DefaultTarget + `", if there is one.` +
"\n\n" +
`yb remotebuild will search for the .yourbase.yml file in the current ` +
`directory and its parent directories. The target's commands will be run ` +
`in the directory the .yourbase.yml file appears in.`,
Args: cobra.MaximumNArgs(1),
DisableFlagsInUseLine: true,
SilenceErrors: true,
SilenceUsage: true,
RunE: func(cmd *cobra.Command, args []string) error {
p.target = yb.DefaultTarget
if len(args) > 0 {
p.target = args[0]
}
return p.run(cmd.Context())
},
ValidArgsFunction: func(cc *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
if len(args) > 0 {
return nil, cobra.ShellCompDirectiveNoFileComp
}
return autocompleteTargetName(toComplete)
},
}
c.Flags().StringVar(&p.baseCommit, "base-commit", "", "Base commit hash as common ancestor")
c.Flags().StringVar(&p.branch, "branch", "", "Branch name")
c.Flags().StringVar(&p.patchPath, "patch-path", "", "Path to save the patch")
c.Flags().BoolVar(&p.noAcceleration, "no-accel", false, "Disable acceleration")
c.Flags().BoolVar(&p.disableCache, "disable-cache", false, "Disable cache acceleration")
c.Flags().BoolVar(&p.disableSkipper, "disable-skipper", false, "Disable skipping steps acceleration")
c.Flags().BoolVarP(&p.dryRun, "dry-run", "n", false, "Pretend to remote build")
c.Flags().BoolVar(&p.committed, "committed", false, "Only remote build committed changes")
c.Flags().BoolVar(&p.backupWorktree, "backup-worktree", false, "Saves uncommitted work into a tarball")
return c
}
func (p *remoteCmd) run(ctx context.Context) error {
targetPackage, _, err := findPackage()
if err != nil {
return err
}
target := targetPackage.Targets[p.target]
if target == nil {
return fmt.Errorf("%s: no such target (found: %s)", p.target, strings.Join(listTargetNames(targetPackage.Targets), ", "))
}
p.repoDir = targetPackage.Path
workRepo, err := git.PlainOpen(p.repoDir)
if err != nil {
return fmt.Errorf("opening repository %s: %w", p.repoDir, err)
}
g, err := ggit.New(ggit.Options{
Dir: targetPackage.Path,
LogHook: func(ctx context.Context, args []string) {
log.Debugf(ctx, "running git %s", strings.Join(args, " "))
},
})
if err != nil {
return err
}
// Show timing feedback and start tracking spent time
startTime := time.Now()
log.Infof(ctx, "Bootstrapping...")
list, err := workRepo.Remotes()
if err != nil {
return fmt.Errorf("getting remotes for %s: %w", p.repoDir, err)
}
var repoUrls []string
for _, r := range list {
c := r.Config()
repoUrls = append(repoUrls, c.URLs...)
}
project, err := p.fetchProject(ctx, repoUrls)
if err != nil {
return err
}
if project.Repository == "" {
projectURL, err := config.UIURL(p.cfg, fmt.Sprintf("%s/%s", project.OrgSlug, project.Label))
if err != nil {
return err
}
return fmt.Errorf("empty repository for project %s. Please check your project settings at %s", project.Label, projectURL)
}
// First things first:
// 1. Define correct branch name
// 2. Define common ancestor commit
// 3. Generate patch file
// 3.1. Comparing every local commits with the one upstream
// 3.2. Comparing every unstaged/untracked changes with the one upstream
// 3.3. Save the patch and compress it
// 4. Submit build!
ancestorRef, commitCount, branch, err := fastFindAncestor(ctx, workRepo)
if err != nil { // Error
return err
}
p.branch = branch
p.baseCommit = ancestorRef.String()
head, err := workRepo.Head()
if err != nil {
return fmt.Errorf("couldn't find HEAD commit: %w", err)
}
headCommit, err := workRepo.CommitObject(head.Hash())
if err != nil {
return fmt.Errorf("couldn't find HEAD commit: %w", err)
}
ancestorCommit, err := workRepo.CommitObject(ancestorRef)
if err != nil {
return fmt.Errorf("couldn't find merge-base commit: %w", err)
}
// Show feedback: end of bootstrap
endTime := time.Now()
bootTime := endTime.Sub(startTime)
log.Infof(ctx, "Bootstrap finished at %s, taking %s", endTime.Format(longTimeFormat), bootTime.Truncate(time.Millisecond))
// Process patches
startTime = time.Now()
pGenerationChan := make(chan bool)
if p.committed && headCommit.Hash.String() != p.baseCommit {
log.Infof(ctx, "Generating patch for %d commits...", commitCount)
patch, err := ancestorCommit.Patch(headCommit)
if err != nil {
return fmt.Errorf("patch generation failed: %w", err)
}
// This is where the patch is actually generated see #278
go func(ch chan<- bool) {
log.Debugf(ctx, "Starting the actual patch generation...")
p.patchData = []byte(patch.String())
log.Debugf(ctx, "Patch generation finished, only committed changes")
ch <- true
}(pGenerationChan)
} else if !p.committed {
// Apply changes that weren't committed yet
worktree, err := workRepo.Worktree() // current worktree
if err != nil {
return fmt.Errorf("couldn't get current worktree: %w", err)
}
log.Infof(ctx, "Generating patch for local changes...")
// Save files before committing.
log.Debugf(ctx, "Start backing up the worktree-save")
saver, err := newWorktreeSave(targetPackage.Path, ggit.Hash(headCommit.Hash), p.backupWorktree)
if err != nil {
return err
}
if err := p.traverseChanges(ctx, g, saver); err != nil {
return err
}
resetDone := false
if err := saver.save(ctx); err != nil {
return err
}
defer func() {
if !resetDone {
log.Debugf(ctx, "Reset failed, restoring...")
if err := saver.restore(ctx); err != nil {
log.Errorf(ctx,
"Unable to restore kept files at %s: %v\n"+
" Please consider unarchiving that package",
saver.saveFilePath(),
err)
}
}
}()
log.Debugf(ctx, "Committing temporary changes")
latest, err := commitTempChanges(worktree, headCommit)
if err != nil {
return fmt.Errorf("commit to temporary cloned repository failed: %w", err)
}
tempCommit, err := workRepo.CommitObject(latest)
if err != nil {
return fmt.Errorf("can't find commit %q: %w", latest, err)
}
log.Debugf(ctx, "Starting the actual patch generation...")
patch, err := ancestorCommit.Patch(tempCommit)
if err != nil {
return fmt.Errorf("patch generation failed: %w", err)
}
// This is where the patch is actually generated see #278
p.patchData = []byte(patch.String())
log.Debugf(ctx, "Actual patch generation finished")
log.Debugf(ctx, "Reseting worktree to previous state...")
// Reset back to HEAD
if err := worktree.Reset(&git.ResetOptions{
Commit: headCommit.Hash,
}); err != nil {
log.Errorf(ctx, "Unable to reset temporary commit: %v\n Please try `git reset --hard HEAD~1`", err)
} else {
resetDone = true
}
log.Debugf(ctx, "Worktree reset done.")
}
// Show feedback: end of patch generation
endTime = time.Now()
patchTime := endTime.Sub(startTime)
log.Infof(ctx, "Patch finished at %s, taking %s", endTime.Format(longTimeFormat), patchTime.Truncate(time.Millisecond))
if len(p.patchPath) > 0 && len(p.patchData) > 0 {
if err := p.savePatch(); err != nil {
log.Warnf(ctx, "Unable to save copy of generated patch: %v", err)
}
}
if p.dryRun {
log.Infof(ctx, "Dry run ended, build not submitted")
return nil
}
if err := p.submitBuild(ctx, project, target.Tags); err != nil {
return fmt.Errorf("unable to submit build: %w", err)
}
return nil
}
func commitTempChanges(w *git.Worktree, c *object.Commit) (latest gitplumbing.Hash, err error) {
if w == nil || c == nil {
err = fmt.Errorf("Needs a worktree and a commit object")
return
}
latest, err = w.Commit(
"YourBase remote build",
&git.CommitOptions{
Author: &object.Signature{
Name: c.Author.Name,
Email: c.Author.Email,
When: time.Now(),
},
},
)
return
}
func (p *remoteCmd) traverseChanges(ctx context.Context, g *ggit.Git, saver *worktreeSave) error {
workTree, err := g.WorkTree(ctx)
if err != nil {
return fmt.Errorf("traverse changes: %w", err)
}
status, err := g.Status(ctx, ggit.StatusOptions{
DisableRenames: true,
})
if err != nil {
return fmt.Errorf("traverse changes: %w", err)
}
var addList []ggit.Pathspec
for _, ent := range status {
if ent.Code[1] == ' ' {
// If file is already staged, then skip.
continue
}
var err error
addList, err = findFilesToAdd(ctx, g, workTree, addList, ent.Name)
if err != nil {
return fmt.Errorf("traverse changes: %w", err)
}
if !ent.Code.IsMissing() { // No need to add deletion to the saver, right?
if err = saver.add(ctx, filepath.FromSlash(string(ent.Name))); err != nil {
return fmt.Errorf("traverse changes: %w", err)
}
}
}
err = g.Add(ctx, addList, ggit.AddOptions{
IncludeIgnored: true,
})
if err != nil {
return fmt.Errorf("traverse changes: %w", err)
}
return nil
}
// findFilesToAdd finds files to stage in Git, recursing into directories and
// ignoring any non-text files.
func findFilesToAdd(ctx context.Context, g *ggit.Git, workTree string, dst []ggit.Pathspec, file ggit.TopPath) ([]ggit.Pathspec, error) {
realPath := filepath.Join(workTree, filepath.FromSlash(string(file)))
fi, err := os.Stat(realPath)
if os.IsNotExist(err) {
return dst, nil
}
if err != nil {
return dst, fmt.Errorf("find files to git add: %w", err)
}
if !fi.IsDir() {
binary, err := isBinary(realPath)
if err != nil {
return dst, fmt.Errorf("find files to git add: %w", err)
}
log.Debugf(ctx, "%s is binary = %t", file, binary)
if binary {
log.Infof(ctx, "Skipping binary file %s", realPath)
return dst, nil
}
return append(dst, file.Pathspec()), nil
}
log.Debugf(ctx, "Added a dir, checking its contents: %s", file)
dir, err := ioutil.ReadDir(realPath)
if err != nil {
return dst, fmt.Errorf("find files to git add: %w", err)
}
for _, f := range dir {
var err error
dst, err = findFilesToAdd(ctx, g, workTree, dst, ggit.TopPath(path.Join(string(file), f.Name())))
if err != nil {
return dst, err
}
}
return dst, nil
}
// isBinary returns whether a file contains a NUL byte near the beginning of the file.
func isBinary(filePath string) (bool, error) {
r, err := os.Open(filePath)
if err != nil {
return false, err
}
defer r.Close()
buf := make([]byte, 8000)
n, err := io.ReadFull(r, buf)
if err != nil {
// Ignore EOF, since it's fine for the file to be shorter than the buffer size.
// Otherwise, wrap the error. We don't fully stop the control flow here because
// we may still have read enough data to make a determination.
if errors.Is(err, io.EOF) || errors.Is(err, io.ErrUnexpectedEOF) {
err = nil
} else {
err = fmt.Errorf("check for binary: %w", err)
}
}
for _, b := range buf[:n] {
if b == 0 {
return true, err
}
}
return false, err
}
func postToAPI(cfg config.Getter, path string, formData url.Values) (*http.Response, error) {
userToken, err := config.UserToken(cfg)
if err != nil {
return nil, fmt.Errorf("Couldn't get user token: %v", err)
}
apiURL, err := config.APIURL(cfg, path)
if err != nil {
return nil, fmt.Errorf("Couldn't determine API URL: %v", err)
}
req := &http.Request{
Method: http.MethodPost,
URL: apiURL,
Header: http.Header{
http.CanonicalHeaderKey("YB_API_TOKEN"): {userToken},
headers.ContentType: {"application/x-www-form-urlencoded"},
},
GetBody: func() (io.ReadCloser, error) {
return ioutil.NopCloser(strings.NewReader(formData.Encode())), nil
},
}
req.Body, _ = req.GetBody()
res, err := http.DefaultClient.Do(req)
if err != nil {
return nil, err
}
return res, nil
}
// buildIDFromLogURL returns the build ID in a build log WebSocket URL.
//
// TODO(ch2570): This should come from the API.
func buildIDFromLogURL(u *url.URL) (string, error) {
// Pattern is /builds/ID/progress
const prefix = "/builds/"
const suffix = "/progress"
if !strings.HasPrefix(u.Path, prefix) || !strings.HasSuffix(u.Path, suffix) {
return "", fmt.Errorf("build ID for %v: unrecognized path", u)
}
id := u.Path[len(prefix) : len(u.Path)-len(suffix)]
if strings.ContainsRune(id, '/') {
return "", fmt.Errorf("build ID for %v: unrecognized path", u)
}
return id, nil
}
// An apiProject is a YourBase project as returned by the API.
type apiProject struct {
ID int `json:"id"`
Label string `json:"label"`
Description string `json:"description"`
Repository string `json:"repository"`
OrgSlug string `json:"organization_slug"`
}
func (p *remoteCmd) fetchProject(ctx context.Context, urls []string) (*apiProject, error) {
v := url.Values{}
fmt.Println()
log.Infof(ctx, "URLs used to search: %s", urls)
for _, u := range urls {
rem, err := ggit.ParseURL(u)
if err != nil {
log.Warnf(ctx, "Invalid remote %s (%v), ignoring", u, err)
continue
}
// We only support GitHub by now
// TODO create something more generic
if rem.Host != "github.com" {
log.Warnf(ctx, "Ignoring remote %s (only github.com supported)", u)
continue
}
p.remotes = append(p.remotes, rem)
v.Add("urls[]", u)
}
resp, err := postToAPI(p.cfg, "search/projects", v)
if err != nil {
return nil, fmt.Errorf("Couldn't lookup project on api server: %v", err)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
log.Debugf(ctx, "Build server returned HTTP Status %d", resp.StatusCode)
switch resp.StatusCode {
case http.StatusNonAuthoritativeInfo:
p.publicRepo = true
case http.StatusUnauthorized:
return nil, fmt.Errorf("Unauthorized, authentication failed.\nPlease `yb login` again.")
case http.StatusPreconditionFailed, http.StatusNotFound:
return nil, fmt.Errorf("Please verify if this private repository has %s installed.", config.GitHubAppURL())
default:
return nil, fmt.Errorf("This is us, not you, please try again in a few minutes.")
}
}
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, err
}
project := new(apiProject)
err = json.Unmarshal(body, project)
if err != nil {
return nil, err
}
return project, nil
}
func (cmd *remoteCmd) savePatch() error {
err := ioutil.WriteFile(cmd.patchPath, cmd.patchData, 0644)
if err != nil {
return fmt.Errorf("Couldn't save a local patch file at: %s, because: %v", cmd.patchPath, err)
}
return nil
}
func (cmd *remoteCmd) | (ctx context.Context, project *apiProject, tagMap map[string]string) error {
startTime := time.Now()
userToken, err := config.UserToken(cmd.cfg)
if err != nil {
return err
}
patchBuffer := new(bytes.Buffer)
xzWriter, err := xz.NewWriter(patchBuffer)
if err != nil {
return fmt.Errorf("submit build: compress patch: %w", err)
}
if _, err := xzWriter.Write(cmd.patchData); err != nil {
return fmt.Errorf("submit build: compress patch: %w", err)
}
if err := xzWriter.Close(); err != nil {
return fmt.Errorf("submit build: compress patch: %w", err)
}
patchEncoded := base64.StdEncoding.EncodeToString(patchBuffer.Bytes())
formData := url.Values{
"project_id": {strconv.Itoa(project.ID)},
"repository": {project.Repository},
"api_key": {userToken},
"target": {cmd.target},
"patch_data": {patchEncoded},
"commit": {cmd.baseCommit},
"branch": {cmd.branch},
}
tags := make([]string, 0)
for k, v := range tagMap {
tags = append(tags, fmt.Sprintf("%s:%s", k, v))
}
for _, tag := range tags {
formData.Add("tags[]", tag)
}
if cmd.noAcceleration {
formData.Add("no-accel", "True")
}
if cmd.disableCache {
formData.Add("disable-cache", "True")
}
if cmd.disableSkipper {
formData.Add("disable-skipper", "True")
}
resp, err := postToAPI(cmd.cfg, "builds/cli", formData)
if err != nil {
return err
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return fmt.Errorf("Couldn't read response body: %s", err)
}
switch resp.StatusCode {
case 401:
return fmt.Errorf("Unauthorized, authentication failed.\nPlease `yb login` again.")
case 403:
if cmd.publicRepo {
return fmt.Errorf("This should not happen, please open a support inquery with YB")
} else {
return fmt.Errorf("Tried to build a private repository of a organization of which you're not part of.")
}
case 412:
// TODO Show helpful message with App URL to fix GH App installation issue
return fmt.Errorf("Please verify if this specific repo has %s installed", config.GitHubAppURL())
case 500:
return fmt.Errorf("Internal server error")
}
//Process simple response from the API
body = bytes.ReplaceAll(body, []byte(`"`), nil)
if i := bytes.IndexByte(body, '\n'); i != -1 {
body = body[:i]
}
logURL, err := url.Parse(string(body))
if err != nil {
return fmt.Errorf("server response: parse log URL: %w", err)
}
if logURL.Scheme != "ws" && logURL.Scheme != "wss" {
return fmt.Errorf("server response: parse log URL: unhandled scheme %q", logURL.Scheme)
}
// Construct UI URL to present to the user.
// Fine to proceed in the face of errors: this is displayed as a fallback if
// other things fail.
var uiURL *url.URL
if id, err := buildIDFromLogURL(logURL); err != nil {
log.Warnf(ctx, "Could not construct build link: %v", err)
} else {
uiURL, err = config.UIURL(cmd.cfg, "/"+project.OrgSlug+"/"+project.Label+"/builds/"+id)
if err != nil {
log.Warnf(ctx, "Could not construct build link: %v", err)
}
}
endTime := time.Now()
submitTime := endTime.Sub(startTime)
log.Infof(ctx, "Submission finished at %s, taking %s", endTime.Format(longTimeFormat), submitTime.Truncate(time.Millisecond))
startTime = time.Now()
conn, _, _, err := ws.DefaultDialer.Dial(context.Background(), logURL.String())
if err != nil {
return fmt.Errorf("Cannot connect: %v", err)
}
defer func() {
if err := conn.Close(); err != nil {
log.Debugf(ctx, "Cannot close: %v", err)
}
}()
buildSuccess := false
buildSetupFinished := false
for {
msg, control, err := wsutil.ReadServerData(conn)
if err != nil {
if err != io.EOF {
log.Debugf(ctx, "Unstable connection: %v", err)
} else {
if buildSuccess {
log.Infof(ctx, "Build Completed!")
} else {
log.Errorf(ctx, "Build failed or the connection was interrupted!")
}
if uiURL != nil {
log.Infof(ctx, "Build Log: %v", uiURL)
}
return nil
}
} else {
// TODO This depends on build agent output, try to structure this better
if control.IsData() && strings.Count(string(msg), "Streaming results from build") > 0 {
fmt.Println()
} else if control.IsData() && !buildSetupFinished && len(msg) > 0 {
buildSetupFinished = true
endTime := time.Now()
setupTime := endTime.Sub(startTime)
log.Infof(ctx, "Set up finished at %s, taking %s", endTime.Format(longTimeFormat), setupTime.Truncate(time.Millisecond))
if cmd.publicRepo {
log.Infof(ctx, "Building a public repository: '%s'", project.Repository)
}
if uiURL != nil {
log.Infof(ctx, "Build Log: %v", uiURL)
}
}
if !buildSuccess {
buildSuccess = strings.Count(string(msg), "-- BUILD SUCCEEDED --") > 0
}
os.Stdout.Write(msg)
}
}
}
type worktreeSave struct {
path string
hash ggit.Hash
files []string
}
func newWorktreeSave(path string, hash ggit.Hash, enabled bool) (*worktreeSave, error) {
if !enabled {
return nil, nil
}
if _, err := os.Lstat(path); os.IsNotExist(err) {
return nil, fmt.Errorf("save worktree state: %w", err)
}
return &worktreeSave{
path: path,
hash: hash,
}, nil
}
func (w *worktreeSave) hasFiles() bool {
return w != nil && len(w.files) > 0
}
func (w *worktreeSave) add(ctx context.Context, file string) error {
if w == nil {
return nil
}
fullPath := filepath.Join(w.path, file)
if _, err := os.Lstat(fullPath); os.IsNotExist(err) {
return fmt.Errorf("save worktree state: %w", err)
}
log.Debugf(ctx, "Saving %s to the tarball", file)
w.files = append(w.files, file)
return nil
}
func (w *worktreeSave) saveFilePath() string {
return filepath.Join(w.path, fmt.Sprintf(".yb-worktreesave-%v.tar", w.hash))
}
func (w *worktreeSave) save(ctx context.Context) error {
if !w.hasFiles() {
return nil
}
log.Debugf(ctx, "Saving a tarball with all the worktree changes made")
tar := archiver.Tar{
MkdirAll: true,
}
if err := tar.Archive(w.files, w.saveFilePath()); err != nil {
return fmt.Errorf("save worktree state: %w", err)
}
return nil
}
func (w *worktreeSave) restore(ctx context.Context) error {
if !w.hasFiles() {
return nil
}
log.Debugf(ctx, "Restoring the worktree tarball")
pkgFile := w.saveFilePath()
if _, err := os.Lstat(pkgFile); os.IsNotExist(err) {
return fmt.Errorf("restore worktree state: %w", err)
}
tar := archiver.Tar{OverwriteExisting: true}
if err := tar.Unarchive(pkgFile, w.path); err != nil {
return fmt.Errorf("restore worktree state: %w", err)
}
if err := os.Remove(pkgFile); err != nil {
log.Warnf(ctx, "Failed to clean up temporary worktree save: %v", err)
}
return nil
}
| submitBuild | identifier_name |
remote_build.go | package main
import (
"bytes"
"context"
"encoding/base64"
"encoding/json"
"errors"
"fmt"
"io"
"io/ioutil"
"net/http"
"net/url"
"os"
"path"
"path/filepath"
"strconv"
"strings"
"time"
ggit "gg-scm.io/pkg/git"
"github.com/gobwas/ws"
"github.com/gobwas/ws/wsutil"
"github.com/johnewart/archiver"
"github.com/spf13/cobra"
"github.com/ulikunitz/xz"
"github.com/yourbase/commons/http/headers"
"github.com/yourbase/yb"
"github.com/yourbase/yb/internal/config"
"gopkg.in/src-d/go-git.v4"
gitplumbing "gopkg.in/src-d/go-git.v4/plumbing"
"gopkg.in/src-d/go-git.v4/plumbing/object"
"zombiezen.com/go/log"
)
type remoteCmd struct {
cfg config.Getter
target string
baseCommit string
branch string
patchData []byte
patchPath string
repoDir string
noAcceleration bool
disableCache bool
disableSkipper bool
dryRun bool
committed bool
publicRepo bool
backupWorktree bool
remotes []*url.URL
}
func newRemoteCmd(cfg config.Getter) *cobra.Command {
p := &remoteCmd{
cfg: cfg,
}
c := &cobra.Command{
Use: "remotebuild [options] [TARGET]",
Short: "Build a target remotely",
Long: `Builds a target using YourBase infrastructure. If no argument is given, ` +
`uses the target named "` + yb.DefaultTarget + `", if there is one.` +
"\n\n" +
`yb remotebuild will search for the .yourbase.yml file in the current ` +
`directory and its parent directories. The target's commands will be run ` +
`in the directory the .yourbase.yml file appears in.`,
Args: cobra.MaximumNArgs(1),
DisableFlagsInUseLine: true,
SilenceErrors: true,
SilenceUsage: true,
RunE: func(cmd *cobra.Command, args []string) error {
p.target = yb.DefaultTarget
if len(args) > 0 {
p.target = args[0]
}
return p.run(cmd.Context())
},
ValidArgsFunction: func(cc *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
if len(args) > 0 {
return nil, cobra.ShellCompDirectiveNoFileComp
}
return autocompleteTargetName(toComplete)
},
}
c.Flags().StringVar(&p.baseCommit, "base-commit", "", "Base commit hash as common ancestor")
c.Flags().StringVar(&p.branch, "branch", "", "Branch name")
c.Flags().StringVar(&p.patchPath, "patch-path", "", "Path to save the patch")
c.Flags().BoolVar(&p.noAcceleration, "no-accel", false, "Disable acceleration")
c.Flags().BoolVar(&p.disableCache, "disable-cache", false, "Disable cache acceleration")
c.Flags().BoolVar(&p.disableSkipper, "disable-skipper", false, "Disable skipping steps acceleration")
c.Flags().BoolVarP(&p.dryRun, "dry-run", "n", false, "Pretend to remote build")
c.Flags().BoolVar(&p.committed, "committed", false, "Only remote build committed changes")
c.Flags().BoolVar(&p.backupWorktree, "backup-worktree", false, "Saves uncommitted work into a tarball")
return c
}
func (p *remoteCmd) run(ctx context.Context) error {
targetPackage, _, err := findPackage()
if err != nil {
return err
}
target := targetPackage.Targets[p.target]
if target == nil {
return fmt.Errorf("%s: no such target (found: %s)", p.target, strings.Join(listTargetNames(targetPackage.Targets), ", "))
}
p.repoDir = targetPackage.Path
workRepo, err := git.PlainOpen(p.repoDir)
if err != nil {
return fmt.Errorf("opening repository %s: %w", p.repoDir, err)
}
g, err := ggit.New(ggit.Options{
Dir: targetPackage.Path,
LogHook: func(ctx context.Context, args []string) {
log.Debugf(ctx, "running git %s", strings.Join(args, " "))
},
})
if err != nil {
return err
}
// Show timing feedback and start tracking spent time
startTime := time.Now()
log.Infof(ctx, "Bootstrapping...")
list, err := workRepo.Remotes()
if err != nil {
return fmt.Errorf("getting remotes for %s: %w", p.repoDir, err)
}
var repoUrls []string
for _, r := range list {
c := r.Config()
repoUrls = append(repoUrls, c.URLs...)
}
project, err := p.fetchProject(ctx, repoUrls)
if err != nil {
return err
}
if project.Repository == "" {
projectURL, err := config.UIURL(p.cfg, fmt.Sprintf("%s/%s", project.OrgSlug, project.Label))
if err != nil {
return err
}
return fmt.Errorf("empty repository for project %s. Please check your project settings at %s", project.Label, projectURL)
}
// First things first:
// 1. Define correct branch name
// 2. Define common ancestor commit
// 3. Generate patch file
// 3.1. Comparing every local commits with the one upstream
// 3.2. Comparing every unstaged/untracked changes with the one upstream
// 3.3. Save the patch and compress it
// 4. Submit build!
ancestorRef, commitCount, branch, err := fastFindAncestor(ctx, workRepo)
if err != nil { // Error
return err
}
p.branch = branch
p.baseCommit = ancestorRef.String()
head, err := workRepo.Head()
if err != nil {
return fmt.Errorf("couldn't find HEAD commit: %w", err)
}
headCommit, err := workRepo.CommitObject(head.Hash())
if err != nil {
return fmt.Errorf("couldn't find HEAD commit: %w", err)
}
ancestorCommit, err := workRepo.CommitObject(ancestorRef)
if err != nil {
return fmt.Errorf("couldn't find merge-base commit: %w", err)
}
// Show feedback: end of bootstrap
endTime := time.Now()
bootTime := endTime.Sub(startTime)
log.Infof(ctx, "Bootstrap finished at %s, taking %s", endTime.Format(longTimeFormat), bootTime.Truncate(time.Millisecond))
// Process patches
startTime = time.Now()
pGenerationChan := make(chan bool)
if p.committed && headCommit.Hash.String() != p.baseCommit {
log.Infof(ctx, "Generating patch for %d commits...", commitCount)
patch, err := ancestorCommit.Patch(headCommit)
if err != nil {
return fmt.Errorf("patch generation failed: %w", err)
}
// This is where the patch is actually generated see #278
go func(ch chan<- bool) {
log.Debugf(ctx, "Starting the actual patch generation...")
p.patchData = []byte(patch.String())
log.Debugf(ctx, "Patch generation finished, only committed changes")
ch <- true
}(pGenerationChan)
} else if !p.committed {
// Apply changes that weren't committed yet
worktree, err := workRepo.Worktree() // current worktree
if err != nil {
return fmt.Errorf("couldn't get current worktree: %w", err)
}
log.Infof(ctx, "Generating patch for local changes...")
// Save files before committing.
log.Debugf(ctx, "Start backing up the worktree-save")
saver, err := newWorktreeSave(targetPackage.Path, ggit.Hash(headCommit.Hash), p.backupWorktree)
if err != nil {
return err
}
if err := p.traverseChanges(ctx, g, saver); err != nil {
return err
}
resetDone := false
if err := saver.save(ctx); err != nil {
return err
}
defer func() {
if !resetDone {
log.Debugf(ctx, "Reset failed, restoring...")
if err := saver.restore(ctx); err != nil {
log.Errorf(ctx,
"Unable to restore kept files at %s: %v\n"+
" Please consider unarchiving that package",
saver.saveFilePath(),
err)
}
}
}()
log.Debugf(ctx, "Committing temporary changes")
latest, err := commitTempChanges(worktree, headCommit)
if err != nil {
return fmt.Errorf("commit to temporary cloned repository failed: %w", err)
}
tempCommit, err := workRepo.CommitObject(latest)
if err != nil {
return fmt.Errorf("can't find commit %q: %w", latest, err)
}
log.Debugf(ctx, "Starting the actual patch generation...")
patch, err := ancestorCommit.Patch(tempCommit)
if err != nil |
// This is where the patch is actually generated see #278
p.patchData = []byte(patch.String())
log.Debugf(ctx, "Actual patch generation finished")
log.Debugf(ctx, "Reseting worktree to previous state...")
// Reset back to HEAD
if err := worktree.Reset(&git.ResetOptions{
Commit: headCommit.Hash,
}); err != nil {
log.Errorf(ctx, "Unable to reset temporary commit: %v\n Please try `git reset --hard HEAD~1`", err)
} else {
resetDone = true
}
log.Debugf(ctx, "Worktree reset done.")
}
// Show feedback: end of patch generation
endTime = time.Now()
patchTime := endTime.Sub(startTime)
log.Infof(ctx, "Patch finished at %s, taking %s", endTime.Format(longTimeFormat), patchTime.Truncate(time.Millisecond))
if len(p.patchPath) > 0 && len(p.patchData) > 0 {
if err := p.savePatch(); err != nil {
log.Warnf(ctx, "Unable to save copy of generated patch: %v", err)
}
}
if p.dryRun {
log.Infof(ctx, "Dry run ended, build not submitted")
return nil
}
if err := p.submitBuild(ctx, project, target.Tags); err != nil {
return fmt.Errorf("unable to submit build: %w", err)
}
return nil
}
func commitTempChanges(w *git.Worktree, c *object.Commit) (latest gitplumbing.Hash, err error) {
if w == nil || c == nil {
err = fmt.Errorf("Needs a worktree and a commit object")
return
}
latest, err = w.Commit(
"YourBase remote build",
&git.CommitOptions{
Author: &object.Signature{
Name: c.Author.Name,
Email: c.Author.Email,
When: time.Now(),
},
},
)
return
}
func (p *remoteCmd) traverseChanges(ctx context.Context, g *ggit.Git, saver *worktreeSave) error {
workTree, err := g.WorkTree(ctx)
if err != nil {
return fmt.Errorf("traverse changes: %w", err)
}
status, err := g.Status(ctx, ggit.StatusOptions{
DisableRenames: true,
})
if err != nil {
return fmt.Errorf("traverse changes: %w", err)
}
var addList []ggit.Pathspec
for _, ent := range status {
if ent.Code[1] == ' ' {
// If file is already staged, then skip.
continue
}
var err error
addList, err = findFilesToAdd(ctx, g, workTree, addList, ent.Name)
if err != nil {
return fmt.Errorf("traverse changes: %w", err)
}
if !ent.Code.IsMissing() { // No need to add deletion to the saver, right?
if err = saver.add(ctx, filepath.FromSlash(string(ent.Name))); err != nil {
return fmt.Errorf("traverse changes: %w", err)
}
}
}
err = g.Add(ctx, addList, ggit.AddOptions{
IncludeIgnored: true,
})
if err != nil {
return fmt.Errorf("traverse changes: %w", err)
}
return nil
}
// findFilesToAdd finds files to stage in Git, recursing into directories and
// ignoring any non-text files.
func findFilesToAdd(ctx context.Context, g *ggit.Git, workTree string, dst []ggit.Pathspec, file ggit.TopPath) ([]ggit.Pathspec, error) {
realPath := filepath.Join(workTree, filepath.FromSlash(string(file)))
fi, err := os.Stat(realPath)
if os.IsNotExist(err) {
return dst, nil
}
if err != nil {
return dst, fmt.Errorf("find files to git add: %w", err)
}
if !fi.IsDir() {
binary, err := isBinary(realPath)
if err != nil {
return dst, fmt.Errorf("find files to git add: %w", err)
}
log.Debugf(ctx, "%s is binary = %t", file, binary)
if binary {
log.Infof(ctx, "Skipping binary file %s", realPath)
return dst, nil
}
return append(dst, file.Pathspec()), nil
}
log.Debugf(ctx, "Added a dir, checking its contents: %s", file)
dir, err := ioutil.ReadDir(realPath)
if err != nil {
return dst, fmt.Errorf("find files to git add: %w", err)
}
for _, f := range dir {
var err error
dst, err = findFilesToAdd(ctx, g, workTree, dst, ggit.TopPath(path.Join(string(file), f.Name())))
if err != nil {
return dst, err
}
}
return dst, nil
}
// isBinary returns whether a file contains a NUL byte near the beginning of the file.
func isBinary(filePath string) (bool, error) {
r, err := os.Open(filePath)
if err != nil {
return false, err
}
defer r.Close()
buf := make([]byte, 8000)
n, err := io.ReadFull(r, buf)
if err != nil {
// Ignore EOF, since it's fine for the file to be shorter than the buffer size.
// Otherwise, wrap the error. We don't fully stop the control flow here because
// we may still have read enough data to make a determination.
if errors.Is(err, io.EOF) || errors.Is(err, io.ErrUnexpectedEOF) {
err = nil
} else {
err = fmt.Errorf("check for binary: %w", err)
}
}
for _, b := range buf[:n] {
if b == 0 {
return true, err
}
}
return false, err
}
func postToAPI(cfg config.Getter, path string, formData url.Values) (*http.Response, error) {
userToken, err := config.UserToken(cfg)
if err != nil {
return nil, fmt.Errorf("Couldn't get user token: %v", err)
}
apiURL, err := config.APIURL(cfg, path)
if err != nil {
return nil, fmt.Errorf("Couldn't determine API URL: %v", err)
}
req := &http.Request{
Method: http.MethodPost,
URL: apiURL,
Header: http.Header{
http.CanonicalHeaderKey("YB_API_TOKEN"): {userToken},
headers.ContentType: {"application/x-www-form-urlencoded"},
},
GetBody: func() (io.ReadCloser, error) {
return ioutil.NopCloser(strings.NewReader(formData.Encode())), nil
},
}
req.Body, _ = req.GetBody()
res, err := http.DefaultClient.Do(req)
if err != nil {
return nil, err
}
return res, nil
}
// buildIDFromLogURL returns the build ID in a build log WebSocket URL.
//
// TODO(ch2570): This should come from the API.
func buildIDFromLogURL(u *url.URL) (string, error) {
// Pattern is /builds/ID/progress
const prefix = "/builds/"
const suffix = "/progress"
if !strings.HasPrefix(u.Path, prefix) || !strings.HasSuffix(u.Path, suffix) {
return "", fmt.Errorf("build ID for %v: unrecognized path", u)
}
id := u.Path[len(prefix) : len(u.Path)-len(suffix)]
if strings.ContainsRune(id, '/') {
return "", fmt.Errorf("build ID for %v: unrecognized path", u)
}
return id, nil
}
// An apiProject is a YourBase project as returned by the API.
type apiProject struct {
ID int `json:"id"`
Label string `json:"label"`
Description string `json:"description"`
Repository string `json:"repository"`
OrgSlug string `json:"organization_slug"`
}
func (p *remoteCmd) fetchProject(ctx context.Context, urls []string) (*apiProject, error) {
v := url.Values{}
fmt.Println()
log.Infof(ctx, "URLs used to search: %s", urls)
for _, u := range urls {
rem, err := ggit.ParseURL(u)
if err != nil {
log.Warnf(ctx, "Invalid remote %s (%v), ignoring", u, err)
continue
}
// We only support GitHub by now
// TODO create something more generic
if rem.Host != "github.com" {
log.Warnf(ctx, "Ignoring remote %s (only github.com supported)", u)
continue
}
p.remotes = append(p.remotes, rem)
v.Add("urls[]", u)
}
resp, err := postToAPI(p.cfg, "search/projects", v)
if err != nil {
return nil, fmt.Errorf("Couldn't lookup project on api server: %v", err)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
log.Debugf(ctx, "Build server returned HTTP Status %d", resp.StatusCode)
switch resp.StatusCode {
case http.StatusNonAuthoritativeInfo:
p.publicRepo = true
case http.StatusUnauthorized:
return nil, fmt.Errorf("Unauthorized, authentication failed.\nPlease `yb login` again.")
case http.StatusPreconditionFailed, http.StatusNotFound:
return nil, fmt.Errorf("Please verify if this private repository has %s installed.", config.GitHubAppURL())
default:
return nil, fmt.Errorf("This is us, not you, please try again in a few minutes.")
}
}
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, err
}
project := new(apiProject)
err = json.Unmarshal(body, project)
if err != nil {
return nil, err
}
return project, nil
}
func (cmd *remoteCmd) savePatch() error {
err := ioutil.WriteFile(cmd.patchPath, cmd.patchData, 0644)
if err != nil {
return fmt.Errorf("Couldn't save a local patch file at: %s, because: %v", cmd.patchPath, err)
}
return nil
}
func (cmd *remoteCmd) submitBuild(ctx context.Context, project *apiProject, tagMap map[string]string) error {
startTime := time.Now()
userToken, err := config.UserToken(cmd.cfg)
if err != nil {
return err
}
patchBuffer := new(bytes.Buffer)
xzWriter, err := xz.NewWriter(patchBuffer)
if err != nil {
return fmt.Errorf("submit build: compress patch: %w", err)
}
if _, err := xzWriter.Write(cmd.patchData); err != nil {
return fmt.Errorf("submit build: compress patch: %w", err)
}
if err := xzWriter.Close(); err != nil {
return fmt.Errorf("submit build: compress patch: %w", err)
}
patchEncoded := base64.StdEncoding.EncodeToString(patchBuffer.Bytes())
formData := url.Values{
"project_id": {strconv.Itoa(project.ID)},
"repository": {project.Repository},
"api_key": {userToken},
"target": {cmd.target},
"patch_data": {patchEncoded},
"commit": {cmd.baseCommit},
"branch": {cmd.branch},
}
tags := make([]string, 0)
for k, v := range tagMap {
tags = append(tags, fmt.Sprintf("%s:%s", k, v))
}
for _, tag := range tags {
formData.Add("tags[]", tag)
}
if cmd.noAcceleration {
formData.Add("no-accel", "True")
}
if cmd.disableCache {
formData.Add("disable-cache", "True")
}
if cmd.disableSkipper {
formData.Add("disable-skipper", "True")
}
resp, err := postToAPI(cmd.cfg, "builds/cli", formData)
if err != nil {
return err
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return fmt.Errorf("Couldn't read response body: %s", err)
}
switch resp.StatusCode {
case 401:
return fmt.Errorf("Unauthorized, authentication failed.\nPlease `yb login` again.")
case 403:
if cmd.publicRepo {
return fmt.Errorf("This should not happen, please open a support inquery with YB")
} else {
return fmt.Errorf("Tried to build a private repository of a organization of which you're not part of.")
}
case 412:
// TODO Show helpful message with App URL to fix GH App installation issue
return fmt.Errorf("Please verify if this specific repo has %s installed", config.GitHubAppURL())
case 500:
return fmt.Errorf("Internal server error")
}
//Process simple response from the API
body = bytes.ReplaceAll(body, []byte(`"`), nil)
if i := bytes.IndexByte(body, '\n'); i != -1 {
body = body[:i]
}
logURL, err := url.Parse(string(body))
if err != nil {
return fmt.Errorf("server response: parse log URL: %w", err)
}
if logURL.Scheme != "ws" && logURL.Scheme != "wss" {
return fmt.Errorf("server response: parse log URL: unhandled scheme %q", logURL.Scheme)
}
// Construct UI URL to present to the user.
// Fine to proceed in the face of errors: this is displayed as a fallback if
// other things fail.
var uiURL *url.URL
if id, err := buildIDFromLogURL(logURL); err != nil {
log.Warnf(ctx, "Could not construct build link: %v", err)
} else {
uiURL, err = config.UIURL(cmd.cfg, "/"+project.OrgSlug+"/"+project.Label+"/builds/"+id)
if err != nil {
log.Warnf(ctx, "Could not construct build link: %v", err)
}
}
endTime := time.Now()
submitTime := endTime.Sub(startTime)
log.Infof(ctx, "Submission finished at %s, taking %s", endTime.Format(longTimeFormat), submitTime.Truncate(time.Millisecond))
startTime = time.Now()
conn, _, _, err := ws.DefaultDialer.Dial(context.Background(), logURL.String())
if err != nil {
return fmt.Errorf("Cannot connect: %v", err)
}
defer func() {
if err := conn.Close(); err != nil {
log.Debugf(ctx, "Cannot close: %v", err)
}
}()
buildSuccess := false
buildSetupFinished := false
for {
msg, control, err := wsutil.ReadServerData(conn)
if err != nil {
if err != io.EOF {
log.Debugf(ctx, "Unstable connection: %v", err)
} else {
if buildSuccess {
log.Infof(ctx, "Build Completed!")
} else {
log.Errorf(ctx, "Build failed or the connection was interrupted!")
}
if uiURL != nil {
log.Infof(ctx, "Build Log: %v", uiURL)
}
return nil
}
} else {
// TODO This depends on build agent output, try to structure this better
if control.IsData() && strings.Count(string(msg), "Streaming results from build") > 0 {
fmt.Println()
} else if control.IsData() && !buildSetupFinished && len(msg) > 0 {
buildSetupFinished = true
endTime := time.Now()
setupTime := endTime.Sub(startTime)
log.Infof(ctx, "Set up finished at %s, taking %s", endTime.Format(longTimeFormat), setupTime.Truncate(time.Millisecond))
if cmd.publicRepo {
log.Infof(ctx, "Building a public repository: '%s'", project.Repository)
}
if uiURL != nil {
log.Infof(ctx, "Build Log: %v", uiURL)
}
}
if !buildSuccess {
buildSuccess = strings.Count(string(msg), "-- BUILD SUCCEEDED --") > 0
}
os.Stdout.Write(msg)
}
}
}
type worktreeSave struct {
path string
hash ggit.Hash
files []string
}
func newWorktreeSave(path string, hash ggit.Hash, enabled bool) (*worktreeSave, error) {
if !enabled {
return nil, nil
}
if _, err := os.Lstat(path); os.IsNotExist(err) {
return nil, fmt.Errorf("save worktree state: %w", err)
}
return &worktreeSave{
path: path,
hash: hash,
}, nil
}
func (w *worktreeSave) hasFiles() bool {
return w != nil && len(w.files) > 0
}
func (w *worktreeSave) add(ctx context.Context, file string) error {
if w == nil {
return nil
}
fullPath := filepath.Join(w.path, file)
if _, err := os.Lstat(fullPath); os.IsNotExist(err) {
return fmt.Errorf("save worktree state: %w", err)
}
log.Debugf(ctx, "Saving %s to the tarball", file)
w.files = append(w.files, file)
return nil
}
func (w *worktreeSave) saveFilePath() string {
return filepath.Join(w.path, fmt.Sprintf(".yb-worktreesave-%v.tar", w.hash))
}
func (w *worktreeSave) save(ctx context.Context) error {
if !w.hasFiles() {
return nil
}
log.Debugf(ctx, "Saving a tarball with all the worktree changes made")
tar := archiver.Tar{
MkdirAll: true,
}
if err := tar.Archive(w.files, w.saveFilePath()); err != nil {
return fmt.Errorf("save worktree state: %w", err)
}
return nil
}
func (w *worktreeSave) restore(ctx context.Context) error {
if !w.hasFiles() {
return nil
}
log.Debugf(ctx, "Restoring the worktree tarball")
pkgFile := w.saveFilePath()
if _, err := os.Lstat(pkgFile); os.IsNotExist(err) {
return fmt.Errorf("restore worktree state: %w", err)
}
tar := archiver.Tar{OverwriteExisting: true}
if err := tar.Unarchive(pkgFile, w.path); err != nil {
return fmt.Errorf("restore worktree state: %w", err)
}
if err := os.Remove(pkgFile); err != nil {
log.Warnf(ctx, "Failed to clean up temporary worktree save: %v", err)
}
return nil
}
| {
return fmt.Errorf("patch generation failed: %w", err)
} | conditional_block |
remote_build.go | package main
import (
"bytes"
"context"
"encoding/base64"
"encoding/json"
"errors"
"fmt"
"io"
"io/ioutil"
"net/http"
"net/url"
"os"
"path"
"path/filepath"
"strconv"
"strings"
"time"
ggit "gg-scm.io/pkg/git"
"github.com/gobwas/ws"
"github.com/gobwas/ws/wsutil"
"github.com/johnewart/archiver"
"github.com/spf13/cobra"
"github.com/ulikunitz/xz"
"github.com/yourbase/commons/http/headers"
"github.com/yourbase/yb"
"github.com/yourbase/yb/internal/config"
"gopkg.in/src-d/go-git.v4"
gitplumbing "gopkg.in/src-d/go-git.v4/plumbing"
"gopkg.in/src-d/go-git.v4/plumbing/object"
"zombiezen.com/go/log"
)
type remoteCmd struct {
cfg config.Getter
target string
baseCommit string
branch string
patchData []byte
patchPath string
repoDir string
noAcceleration bool
disableCache bool
disableSkipper bool
dryRun bool
committed bool
publicRepo bool
backupWorktree bool
remotes []*url.URL
}
func newRemoteCmd(cfg config.Getter) *cobra.Command {
p := &remoteCmd{
cfg: cfg,
}
c := &cobra.Command{
Use: "remotebuild [options] [TARGET]",
Short: "Build a target remotely",
Long: `Builds a target using YourBase infrastructure. If no argument is given, ` +
`uses the target named "` + yb.DefaultTarget + `", if there is one.` +
"\n\n" +
`yb remotebuild will search for the .yourbase.yml file in the current ` +
`directory and its parent directories. The target's commands will be run ` +
`in the directory the .yourbase.yml file appears in.`,
Args: cobra.MaximumNArgs(1),
DisableFlagsInUseLine: true,
SilenceErrors: true,
SilenceUsage: true,
RunE: func(cmd *cobra.Command, args []string) error {
p.target = yb.DefaultTarget
if len(args) > 0 {
p.target = args[0]
}
return p.run(cmd.Context())
},
ValidArgsFunction: func(cc *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
if len(args) > 0 {
return nil, cobra.ShellCompDirectiveNoFileComp
}
return autocompleteTargetName(toComplete)
},
}
c.Flags().StringVar(&p.baseCommit, "base-commit", "", "Base commit hash as common ancestor")
c.Flags().StringVar(&p.branch, "branch", "", "Branch name")
c.Flags().StringVar(&p.patchPath, "patch-path", "", "Path to save the patch")
c.Flags().BoolVar(&p.noAcceleration, "no-accel", false, "Disable acceleration")
c.Flags().BoolVar(&p.disableCache, "disable-cache", false, "Disable cache acceleration")
c.Flags().BoolVar(&p.disableSkipper, "disable-skipper", false, "Disable skipping steps acceleration")
c.Flags().BoolVarP(&p.dryRun, "dry-run", "n", false, "Pretend to remote build")
c.Flags().BoolVar(&p.committed, "committed", false, "Only remote build committed changes")
c.Flags().BoolVar(&p.backupWorktree, "backup-worktree", false, "Saves uncommitted work into a tarball")
return c
}
func (p *remoteCmd) run(ctx context.Context) error {
targetPackage, _, err := findPackage()
if err != nil {
return err
}
target := targetPackage.Targets[p.target]
if target == nil {
return fmt.Errorf("%s: no such target (found: %s)", p.target, strings.Join(listTargetNames(targetPackage.Targets), ", "))
}
p.repoDir = targetPackage.Path
workRepo, err := git.PlainOpen(p.repoDir)
if err != nil {
return fmt.Errorf("opening repository %s: %w", p.repoDir, err)
}
g, err := ggit.New(ggit.Options{
Dir: targetPackage.Path,
LogHook: func(ctx context.Context, args []string) {
log.Debugf(ctx, "running git %s", strings.Join(args, " "))
},
})
if err != nil {
return err
}
// Show timing feedback and start tracking spent time
startTime := time.Now()
log.Infof(ctx, "Bootstrapping...")
list, err := workRepo.Remotes()
if err != nil {
return fmt.Errorf("getting remotes for %s: %w", p.repoDir, err)
}
var repoUrls []string
for _, r := range list {
c := r.Config()
repoUrls = append(repoUrls, c.URLs...)
}
project, err := p.fetchProject(ctx, repoUrls)
if err != nil {
return err
}
if project.Repository == "" {
projectURL, err := config.UIURL(p.cfg, fmt.Sprintf("%s/%s", project.OrgSlug, project.Label))
if err != nil {
return err
}
return fmt.Errorf("empty repository for project %s. Please check your project settings at %s", project.Label, projectURL)
}
// First things first:
// 1. Define correct branch name
// 2. Define common ancestor commit
// 3. Generate patch file
// 3.1. Comparing every local commits with the one upstream
// 3.2. Comparing every unstaged/untracked changes with the one upstream
// 3.3. Save the patch and compress it
// 4. Submit build!
ancestorRef, commitCount, branch, err := fastFindAncestor(ctx, workRepo)
if err != nil { // Error
return err
}
p.branch = branch
p.baseCommit = ancestorRef.String()
head, err := workRepo.Head()
if err != nil {
return fmt.Errorf("couldn't find HEAD commit: %w", err)
}
headCommit, err := workRepo.CommitObject(head.Hash())
if err != nil {
return fmt.Errorf("couldn't find HEAD commit: %w", err)
}
ancestorCommit, err := workRepo.CommitObject(ancestorRef)
if err != nil {
return fmt.Errorf("couldn't find merge-base commit: %w", err)
}
// Show feedback: end of bootstrap
endTime := time.Now()
bootTime := endTime.Sub(startTime)
log.Infof(ctx, "Bootstrap finished at %s, taking %s", endTime.Format(longTimeFormat), bootTime.Truncate(time.Millisecond))
// Process patches
startTime = time.Now()
pGenerationChan := make(chan bool)
if p.committed && headCommit.Hash.String() != p.baseCommit {
log.Infof(ctx, "Generating patch for %d commits...", commitCount)
patch, err := ancestorCommit.Patch(headCommit)
if err != nil {
return fmt.Errorf("patch generation failed: %w", err)
}
// This is where the patch is actually generated see #278
go func(ch chan<- bool) {
log.Debugf(ctx, "Starting the actual patch generation...")
p.patchData = []byte(patch.String())
log.Debugf(ctx, "Patch generation finished, only committed changes")
ch <- true
}(pGenerationChan)
} else if !p.committed {
// Apply changes that weren't committed yet
worktree, err := workRepo.Worktree() // current worktree
if err != nil {
return fmt.Errorf("couldn't get current worktree: %w", err)
}
log.Infof(ctx, "Generating patch for local changes...")
// Save files before committing.
log.Debugf(ctx, "Start backing up the worktree-save")
saver, err := newWorktreeSave(targetPackage.Path, ggit.Hash(headCommit.Hash), p.backupWorktree)
if err != nil {
return err
}
if err := p.traverseChanges(ctx, g, saver); err != nil {
return err
}
resetDone := false
if err := saver.save(ctx); err != nil {
return err
}
defer func() {
if !resetDone {
log.Debugf(ctx, "Reset failed, restoring...")
if err := saver.restore(ctx); err != nil {
log.Errorf(ctx,
"Unable to restore kept files at %s: %v\n"+
" Please consider unarchiving that package",
saver.saveFilePath(),
err)
}
}
}()
log.Debugf(ctx, "Committing temporary changes")
latest, err := commitTempChanges(worktree, headCommit)
if err != nil {
return fmt.Errorf("commit to temporary cloned repository failed: %w", err)
}
tempCommit, err := workRepo.CommitObject(latest)
if err != nil {
return fmt.Errorf("can't find commit %q: %w", latest, err)
}
log.Debugf(ctx, "Starting the actual patch generation...")
patch, err := ancestorCommit.Patch(tempCommit)
if err != nil {
return fmt.Errorf("patch generation failed: %w", err)
}
// This is where the patch is actually generated see #278
p.patchData = []byte(patch.String())
log.Debugf(ctx, "Actual patch generation finished")
log.Debugf(ctx, "Reseting worktree to previous state...")
// Reset back to HEAD
if err := worktree.Reset(&git.ResetOptions{
Commit: headCommit.Hash,
}); err != nil {
log.Errorf(ctx, "Unable to reset temporary commit: %v\n Please try `git reset --hard HEAD~1`", err)
} else {
resetDone = true
}
log.Debugf(ctx, "Worktree reset done.")
}
// Show feedback: end of patch generation
endTime = time.Now()
patchTime := endTime.Sub(startTime)
log.Infof(ctx, "Patch finished at %s, taking %s", endTime.Format(longTimeFormat), patchTime.Truncate(time.Millisecond))
if len(p.patchPath) > 0 && len(p.patchData) > 0 {
if err := p.savePatch(); err != nil {
log.Warnf(ctx, "Unable to save copy of generated patch: %v", err)
}
}
if p.dryRun {
log.Infof(ctx, "Dry run ended, build not submitted")
return nil
}
if err := p.submitBuild(ctx, project, target.Tags); err != nil {
return fmt.Errorf("unable to submit build: %w", err)
}
return nil
}
func commitTempChanges(w *git.Worktree, c *object.Commit) (latest gitplumbing.Hash, err error) {
if w == nil || c == nil {
err = fmt.Errorf("Needs a worktree and a commit object")
return
}
latest, err = w.Commit(
"YourBase remote build",
&git.CommitOptions{
Author: &object.Signature{
Name: c.Author.Name,
Email: c.Author.Email,
When: time.Now(),
},
},
)
return
}
func (p *remoteCmd) traverseChanges(ctx context.Context, g *ggit.Git, saver *worktreeSave) error {
workTree, err := g.WorkTree(ctx)
if err != nil {
return fmt.Errorf("traverse changes: %w", err)
}
status, err := g.Status(ctx, ggit.StatusOptions{
DisableRenames: true,
})
if err != nil {
return fmt.Errorf("traverse changes: %w", err)
}
var addList []ggit.Pathspec
for _, ent := range status {
if ent.Code[1] == ' ' {
// If file is already staged, then skip.
continue
}
var err error
addList, err = findFilesToAdd(ctx, g, workTree, addList, ent.Name)
if err != nil {
return fmt.Errorf("traverse changes: %w", err)
}
if !ent.Code.IsMissing() { // No need to add deletion to the saver, right?
if err = saver.add(ctx, filepath.FromSlash(string(ent.Name))); err != nil {
return fmt.Errorf("traverse changes: %w", err)
}
}
}
err = g.Add(ctx, addList, ggit.AddOptions{
IncludeIgnored: true,
})
if err != nil {
return fmt.Errorf("traverse changes: %w", err)
}
return nil
}
// findFilesToAdd finds files to stage in Git, recursing into directories and
// ignoring any non-text files.
func findFilesToAdd(ctx context.Context, g *ggit.Git, workTree string, dst []ggit.Pathspec, file ggit.TopPath) ([]ggit.Pathspec, error) {
realPath := filepath.Join(workTree, filepath.FromSlash(string(file)))
fi, err := os.Stat(realPath)
if os.IsNotExist(err) {
return dst, nil
}
if err != nil {
return dst, fmt.Errorf("find files to git add: %w", err)
}
if !fi.IsDir() {
binary, err := isBinary(realPath)
if err != nil {
return dst, fmt.Errorf("find files to git add: %w", err)
}
log.Debugf(ctx, "%s is binary = %t", file, binary)
if binary {
log.Infof(ctx, "Skipping binary file %s", realPath)
return dst, nil
}
return append(dst, file.Pathspec()), nil
}
log.Debugf(ctx, "Added a dir, checking its contents: %s", file)
dir, err := ioutil.ReadDir(realPath)
if err != nil {
return dst, fmt.Errorf("find files to git add: %w", err)
}
for _, f := range dir {
var err error
dst, err = findFilesToAdd(ctx, g, workTree, dst, ggit.TopPath(path.Join(string(file), f.Name())))
if err != nil {
return dst, err
}
}
return dst, nil
}
// isBinary returns whether a file contains a NUL byte near the beginning of the file.
func isBinary(filePath string) (bool, error) {
r, err := os.Open(filePath)
if err != nil {
return false, err
}
defer r.Close()
buf := make([]byte, 8000)
n, err := io.ReadFull(r, buf)
if err != nil {
// Ignore EOF, since it's fine for the file to be shorter than the buffer size.
// Otherwise, wrap the error. We don't fully stop the control flow here because
// we may still have read enough data to make a determination.
if errors.Is(err, io.EOF) || errors.Is(err, io.ErrUnexpectedEOF) {
err = nil
} else {
err = fmt.Errorf("check for binary: %w", err)
}
}
for _, b := range buf[:n] {
if b == 0 {
return true, err
}
}
return false, err
}
func postToAPI(cfg config.Getter, path string, formData url.Values) (*http.Response, error) {
userToken, err := config.UserToken(cfg)
if err != nil {
return nil, fmt.Errorf("Couldn't get user token: %v", err)
}
apiURL, err := config.APIURL(cfg, path)
if err != nil {
return nil, fmt.Errorf("Couldn't determine API URL: %v", err)
}
req := &http.Request{
Method: http.MethodPost,
URL: apiURL,
Header: http.Header{
http.CanonicalHeaderKey("YB_API_TOKEN"): {userToken},
headers.ContentType: {"application/x-www-form-urlencoded"},
},
GetBody: func() (io.ReadCloser, error) {
return ioutil.NopCloser(strings.NewReader(formData.Encode())), nil
},
}
req.Body, _ = req.GetBody()
res, err := http.DefaultClient.Do(req)
if err != nil {
return nil, err
}
return res, nil
}
// buildIDFromLogURL returns the build ID in a build log WebSocket URL.
//
// TODO(ch2570): This should come from the API.
func buildIDFromLogURL(u *url.URL) (string, error) {
// Pattern is /builds/ID/progress
const prefix = "/builds/"
const suffix = "/progress"
if !strings.HasPrefix(u.Path, prefix) || !strings.HasSuffix(u.Path, suffix) {
return "", fmt.Errorf("build ID for %v: unrecognized path", u)
}
id := u.Path[len(prefix) : len(u.Path)-len(suffix)]
if strings.ContainsRune(id, '/') {
return "", fmt.Errorf("build ID for %v: unrecognized path", u)
}
return id, nil
}
// An apiProject is a YourBase project as returned by the API.
type apiProject struct {
ID int `json:"id"`
Label string `json:"label"`
Description string `json:"description"`
Repository string `json:"repository"`
OrgSlug string `json:"organization_slug"`
}
|
for _, u := range urls {
rem, err := ggit.ParseURL(u)
if err != nil {
log.Warnf(ctx, "Invalid remote %s (%v), ignoring", u, err)
continue
}
// We only support GitHub by now
// TODO create something more generic
if rem.Host != "github.com" {
log.Warnf(ctx, "Ignoring remote %s (only github.com supported)", u)
continue
}
p.remotes = append(p.remotes, rem)
v.Add("urls[]", u)
}
resp, err := postToAPI(p.cfg, "search/projects", v)
if err != nil {
return nil, fmt.Errorf("Couldn't lookup project on api server: %v", err)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
log.Debugf(ctx, "Build server returned HTTP Status %d", resp.StatusCode)
switch resp.StatusCode {
case http.StatusNonAuthoritativeInfo:
p.publicRepo = true
case http.StatusUnauthorized:
return nil, fmt.Errorf("Unauthorized, authentication failed.\nPlease `yb login` again.")
case http.StatusPreconditionFailed, http.StatusNotFound:
return nil, fmt.Errorf("Please verify if this private repository has %s installed.", config.GitHubAppURL())
default:
return nil, fmt.Errorf("This is us, not you, please try again in a few minutes.")
}
}
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, err
}
project := new(apiProject)
err = json.Unmarshal(body, project)
if err != nil {
return nil, err
}
return project, nil
}
func (cmd *remoteCmd) savePatch() error {
err := ioutil.WriteFile(cmd.patchPath, cmd.patchData, 0644)
if err != nil {
return fmt.Errorf("Couldn't save a local patch file at: %s, because: %v", cmd.patchPath, err)
}
return nil
}
func (cmd *remoteCmd) submitBuild(ctx context.Context, project *apiProject, tagMap map[string]string) error {
startTime := time.Now()
userToken, err := config.UserToken(cmd.cfg)
if err != nil {
return err
}
patchBuffer := new(bytes.Buffer)
xzWriter, err := xz.NewWriter(patchBuffer)
if err != nil {
return fmt.Errorf("submit build: compress patch: %w", err)
}
if _, err := xzWriter.Write(cmd.patchData); err != nil {
return fmt.Errorf("submit build: compress patch: %w", err)
}
if err := xzWriter.Close(); err != nil {
return fmt.Errorf("submit build: compress patch: %w", err)
}
patchEncoded := base64.StdEncoding.EncodeToString(patchBuffer.Bytes())
formData := url.Values{
"project_id": {strconv.Itoa(project.ID)},
"repository": {project.Repository},
"api_key": {userToken},
"target": {cmd.target},
"patch_data": {patchEncoded},
"commit": {cmd.baseCommit},
"branch": {cmd.branch},
}
tags := make([]string, 0)
for k, v := range tagMap {
tags = append(tags, fmt.Sprintf("%s:%s", k, v))
}
for _, tag := range tags {
formData.Add("tags[]", tag)
}
if cmd.noAcceleration {
formData.Add("no-accel", "True")
}
if cmd.disableCache {
formData.Add("disable-cache", "True")
}
if cmd.disableSkipper {
formData.Add("disable-skipper", "True")
}
resp, err := postToAPI(cmd.cfg, "builds/cli", formData)
if err != nil {
return err
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return fmt.Errorf("Couldn't read response body: %s", err)
}
switch resp.StatusCode {
case 401:
return fmt.Errorf("Unauthorized, authentication failed.\nPlease `yb login` again.")
case 403:
if cmd.publicRepo {
return fmt.Errorf("This should not happen, please open a support inquery with YB")
} else {
return fmt.Errorf("Tried to build a private repository of a organization of which you're not part of.")
}
case 412:
// TODO Show helpful message with App URL to fix GH App installation issue
return fmt.Errorf("Please verify if this specific repo has %s installed", config.GitHubAppURL())
case 500:
return fmt.Errorf("Internal server error")
}
//Process simple response from the API
body = bytes.ReplaceAll(body, []byte(`"`), nil)
if i := bytes.IndexByte(body, '\n'); i != -1 {
body = body[:i]
}
logURL, err := url.Parse(string(body))
if err != nil {
return fmt.Errorf("server response: parse log URL: %w", err)
}
if logURL.Scheme != "ws" && logURL.Scheme != "wss" {
return fmt.Errorf("server response: parse log URL: unhandled scheme %q", logURL.Scheme)
}
// Construct UI URL to present to the user.
// Fine to proceed in the face of errors: this is displayed as a fallback if
// other things fail.
var uiURL *url.URL
if id, err := buildIDFromLogURL(logURL); err != nil {
log.Warnf(ctx, "Could not construct build link: %v", err)
} else {
uiURL, err = config.UIURL(cmd.cfg, "/"+project.OrgSlug+"/"+project.Label+"/builds/"+id)
if err != nil {
log.Warnf(ctx, "Could not construct build link: %v", err)
}
}
endTime := time.Now()
submitTime := endTime.Sub(startTime)
log.Infof(ctx, "Submission finished at %s, taking %s", endTime.Format(longTimeFormat), submitTime.Truncate(time.Millisecond))
startTime = time.Now()
conn, _, _, err := ws.DefaultDialer.Dial(context.Background(), logURL.String())
if err != nil {
return fmt.Errorf("Cannot connect: %v", err)
}
defer func() {
if err := conn.Close(); err != nil {
log.Debugf(ctx, "Cannot close: %v", err)
}
}()
buildSuccess := false
buildSetupFinished := false
for {
msg, control, err := wsutil.ReadServerData(conn)
if err != nil {
if err != io.EOF {
log.Debugf(ctx, "Unstable connection: %v", err)
} else {
if buildSuccess {
log.Infof(ctx, "Build Completed!")
} else {
log.Errorf(ctx, "Build failed or the connection was interrupted!")
}
if uiURL != nil {
log.Infof(ctx, "Build Log: %v", uiURL)
}
return nil
}
} else {
// TODO This depends on build agent output, try to structure this better
if control.IsData() && strings.Count(string(msg), "Streaming results from build") > 0 {
fmt.Println()
} else if control.IsData() && !buildSetupFinished && len(msg) > 0 {
buildSetupFinished = true
endTime := time.Now()
setupTime := endTime.Sub(startTime)
log.Infof(ctx, "Set up finished at %s, taking %s", endTime.Format(longTimeFormat), setupTime.Truncate(time.Millisecond))
if cmd.publicRepo {
log.Infof(ctx, "Building a public repository: '%s'", project.Repository)
}
if uiURL != nil {
log.Infof(ctx, "Build Log: %v", uiURL)
}
}
if !buildSuccess {
buildSuccess = strings.Count(string(msg), "-- BUILD SUCCEEDED --") > 0
}
os.Stdout.Write(msg)
}
}
}
type worktreeSave struct {
path string
hash ggit.Hash
files []string
}
func newWorktreeSave(path string, hash ggit.Hash, enabled bool) (*worktreeSave, error) {
if !enabled {
return nil, nil
}
if _, err := os.Lstat(path); os.IsNotExist(err) {
return nil, fmt.Errorf("save worktree state: %w", err)
}
return &worktreeSave{
path: path,
hash: hash,
}, nil
}
func (w *worktreeSave) hasFiles() bool {
return w != nil && len(w.files) > 0
}
func (w *worktreeSave) add(ctx context.Context, file string) error {
if w == nil {
return nil
}
fullPath := filepath.Join(w.path, file)
if _, err := os.Lstat(fullPath); os.IsNotExist(err) {
return fmt.Errorf("save worktree state: %w", err)
}
log.Debugf(ctx, "Saving %s to the tarball", file)
w.files = append(w.files, file)
return nil
}
func (w *worktreeSave) saveFilePath() string {
return filepath.Join(w.path, fmt.Sprintf(".yb-worktreesave-%v.tar", w.hash))
}
func (w *worktreeSave) save(ctx context.Context) error {
if !w.hasFiles() {
return nil
}
log.Debugf(ctx, "Saving a tarball with all the worktree changes made")
tar := archiver.Tar{
MkdirAll: true,
}
if err := tar.Archive(w.files, w.saveFilePath()); err != nil {
return fmt.Errorf("save worktree state: %w", err)
}
return nil
}
func (w *worktreeSave) restore(ctx context.Context) error {
if !w.hasFiles() {
return nil
}
log.Debugf(ctx, "Restoring the worktree tarball")
pkgFile := w.saveFilePath()
if _, err := os.Lstat(pkgFile); os.IsNotExist(err) {
return fmt.Errorf("restore worktree state: %w", err)
}
tar := archiver.Tar{OverwriteExisting: true}
if err := tar.Unarchive(pkgFile, w.path); err != nil {
return fmt.Errorf("restore worktree state: %w", err)
}
if err := os.Remove(pkgFile); err != nil {
log.Warnf(ctx, "Failed to clean up temporary worktree save: %v", err)
}
return nil
} | func (p *remoteCmd) fetchProject(ctx context.Context, urls []string) (*apiProject, error) {
v := url.Values{}
fmt.Println()
log.Infof(ctx, "URLs used to search: %s", urls) | random_line_split |
remote_build.go | package main
import (
"bytes"
"context"
"encoding/base64"
"encoding/json"
"errors"
"fmt"
"io"
"io/ioutil"
"net/http"
"net/url"
"os"
"path"
"path/filepath"
"strconv"
"strings"
"time"
ggit "gg-scm.io/pkg/git"
"github.com/gobwas/ws"
"github.com/gobwas/ws/wsutil"
"github.com/johnewart/archiver"
"github.com/spf13/cobra"
"github.com/ulikunitz/xz"
"github.com/yourbase/commons/http/headers"
"github.com/yourbase/yb"
"github.com/yourbase/yb/internal/config"
"gopkg.in/src-d/go-git.v4"
gitplumbing "gopkg.in/src-d/go-git.v4/plumbing"
"gopkg.in/src-d/go-git.v4/plumbing/object"
"zombiezen.com/go/log"
)
type remoteCmd struct {
cfg config.Getter
target string
baseCommit string
branch string
patchData []byte
patchPath string
repoDir string
noAcceleration bool
disableCache bool
disableSkipper bool
dryRun bool
committed bool
publicRepo bool
backupWorktree bool
remotes []*url.URL
}
func newRemoteCmd(cfg config.Getter) *cobra.Command {
p := &remoteCmd{
cfg: cfg,
}
c := &cobra.Command{
Use: "remotebuild [options] [TARGET]",
Short: "Build a target remotely",
Long: `Builds a target using YourBase infrastructure. If no argument is given, ` +
`uses the target named "` + yb.DefaultTarget + `", if there is one.` +
"\n\n" +
`yb remotebuild will search for the .yourbase.yml file in the current ` +
`directory and its parent directories. The target's commands will be run ` +
`in the directory the .yourbase.yml file appears in.`,
Args: cobra.MaximumNArgs(1),
DisableFlagsInUseLine: true,
SilenceErrors: true,
SilenceUsage: true,
RunE: func(cmd *cobra.Command, args []string) error {
p.target = yb.DefaultTarget
if len(args) > 0 {
p.target = args[0]
}
return p.run(cmd.Context())
},
ValidArgsFunction: func(cc *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
if len(args) > 0 {
return nil, cobra.ShellCompDirectiveNoFileComp
}
return autocompleteTargetName(toComplete)
},
}
c.Flags().StringVar(&p.baseCommit, "base-commit", "", "Base commit hash as common ancestor")
c.Flags().StringVar(&p.branch, "branch", "", "Branch name")
c.Flags().StringVar(&p.patchPath, "patch-path", "", "Path to save the patch")
c.Flags().BoolVar(&p.noAcceleration, "no-accel", false, "Disable acceleration")
c.Flags().BoolVar(&p.disableCache, "disable-cache", false, "Disable cache acceleration")
c.Flags().BoolVar(&p.disableSkipper, "disable-skipper", false, "Disable skipping steps acceleration")
c.Flags().BoolVarP(&p.dryRun, "dry-run", "n", false, "Pretend to remote build")
c.Flags().BoolVar(&p.committed, "committed", false, "Only remote build committed changes")
c.Flags().BoolVar(&p.backupWorktree, "backup-worktree", false, "Saves uncommitted work into a tarball")
return c
}
func (p *remoteCmd) run(ctx context.Context) error {
targetPackage, _, err := findPackage()
if err != nil {
return err
}
target := targetPackage.Targets[p.target]
if target == nil {
return fmt.Errorf("%s: no such target (found: %s)", p.target, strings.Join(listTargetNames(targetPackage.Targets), ", "))
}
p.repoDir = targetPackage.Path
workRepo, err := git.PlainOpen(p.repoDir)
if err != nil {
return fmt.Errorf("opening repository %s: %w", p.repoDir, err)
}
g, err := ggit.New(ggit.Options{
Dir: targetPackage.Path,
LogHook: func(ctx context.Context, args []string) {
log.Debugf(ctx, "running git %s", strings.Join(args, " "))
},
})
if err != nil {
return err
}
// Show timing feedback and start tracking spent time
startTime := time.Now()
log.Infof(ctx, "Bootstrapping...")
list, err := workRepo.Remotes()
if err != nil {
return fmt.Errorf("getting remotes for %s: %w", p.repoDir, err)
}
var repoUrls []string
for _, r := range list {
c := r.Config()
repoUrls = append(repoUrls, c.URLs...)
}
project, err := p.fetchProject(ctx, repoUrls)
if err != nil {
return err
}
if project.Repository == "" {
projectURL, err := config.UIURL(p.cfg, fmt.Sprintf("%s/%s", project.OrgSlug, project.Label))
if err != nil {
return err
}
return fmt.Errorf("empty repository for project %s. Please check your project settings at %s", project.Label, projectURL)
}
// First things first:
// 1. Define correct branch name
// 2. Define common ancestor commit
// 3. Generate patch file
// 3.1. Comparing every local commits with the one upstream
// 3.2. Comparing every unstaged/untracked changes with the one upstream
// 3.3. Save the patch and compress it
// 4. Submit build!
ancestorRef, commitCount, branch, err := fastFindAncestor(ctx, workRepo)
if err != nil { // Error
return err
}
p.branch = branch
p.baseCommit = ancestorRef.String()
head, err := workRepo.Head()
if err != nil {
return fmt.Errorf("couldn't find HEAD commit: %w", err)
}
headCommit, err := workRepo.CommitObject(head.Hash())
if err != nil {
return fmt.Errorf("couldn't find HEAD commit: %w", err)
}
ancestorCommit, err := workRepo.CommitObject(ancestorRef)
if err != nil {
return fmt.Errorf("couldn't find merge-base commit: %w", err)
}
// Show feedback: end of bootstrap
endTime := time.Now()
bootTime := endTime.Sub(startTime)
log.Infof(ctx, "Bootstrap finished at %s, taking %s", endTime.Format(longTimeFormat), bootTime.Truncate(time.Millisecond))
// Process patches
startTime = time.Now()
pGenerationChan := make(chan bool)
if p.committed && headCommit.Hash.String() != p.baseCommit {
log.Infof(ctx, "Generating patch for %d commits...", commitCount)
patch, err := ancestorCommit.Patch(headCommit)
if err != nil {
return fmt.Errorf("patch generation failed: %w", err)
}
// This is where the patch is actually generated see #278
go func(ch chan<- bool) {
log.Debugf(ctx, "Starting the actual patch generation...")
p.patchData = []byte(patch.String())
log.Debugf(ctx, "Patch generation finished, only committed changes")
ch <- true
}(pGenerationChan)
} else if !p.committed {
// Apply changes that weren't committed yet
worktree, err := workRepo.Worktree() // current worktree
if err != nil {
return fmt.Errorf("couldn't get current worktree: %w", err)
}
log.Infof(ctx, "Generating patch for local changes...")
// Save files before committing.
log.Debugf(ctx, "Start backing up the worktree-save")
saver, err := newWorktreeSave(targetPackage.Path, ggit.Hash(headCommit.Hash), p.backupWorktree)
if err != nil {
return err
}
if err := p.traverseChanges(ctx, g, saver); err != nil {
return err
}
resetDone := false
if err := saver.save(ctx); err != nil {
return err
}
defer func() {
if !resetDone {
log.Debugf(ctx, "Reset failed, restoring...")
if err := saver.restore(ctx); err != nil {
log.Errorf(ctx,
"Unable to restore kept files at %s: %v\n"+
" Please consider unarchiving that package",
saver.saveFilePath(),
err)
}
}
}()
log.Debugf(ctx, "Committing temporary changes")
latest, err := commitTempChanges(worktree, headCommit)
if err != nil {
return fmt.Errorf("commit to temporary cloned repository failed: %w", err)
}
tempCommit, err := workRepo.CommitObject(latest)
if err != nil {
return fmt.Errorf("can't find commit %q: %w", latest, err)
}
log.Debugf(ctx, "Starting the actual patch generation...")
patch, err := ancestorCommit.Patch(tempCommit)
if err != nil {
return fmt.Errorf("patch generation failed: %w", err)
}
// This is where the patch is actually generated see #278
p.patchData = []byte(patch.String())
log.Debugf(ctx, "Actual patch generation finished")
log.Debugf(ctx, "Reseting worktree to previous state...")
// Reset back to HEAD
if err := worktree.Reset(&git.ResetOptions{
Commit: headCommit.Hash,
}); err != nil {
log.Errorf(ctx, "Unable to reset temporary commit: %v\n Please try `git reset --hard HEAD~1`", err)
} else {
resetDone = true
}
log.Debugf(ctx, "Worktree reset done.")
}
// Show feedback: end of patch generation
endTime = time.Now()
patchTime := endTime.Sub(startTime)
log.Infof(ctx, "Patch finished at %s, taking %s", endTime.Format(longTimeFormat), patchTime.Truncate(time.Millisecond))
if len(p.patchPath) > 0 && len(p.patchData) > 0 {
if err := p.savePatch(); err != nil {
log.Warnf(ctx, "Unable to save copy of generated patch: %v", err)
}
}
if p.dryRun {
log.Infof(ctx, "Dry run ended, build not submitted")
return nil
}
if err := p.submitBuild(ctx, project, target.Tags); err != nil {
return fmt.Errorf("unable to submit build: %w", err)
}
return nil
}
func commitTempChanges(w *git.Worktree, c *object.Commit) (latest gitplumbing.Hash, err error) {
if w == nil || c == nil {
err = fmt.Errorf("Needs a worktree and a commit object")
return
}
latest, err = w.Commit(
"YourBase remote build",
&git.CommitOptions{
Author: &object.Signature{
Name: c.Author.Name,
Email: c.Author.Email,
When: time.Now(),
},
},
)
return
}
func (p *remoteCmd) traverseChanges(ctx context.Context, g *ggit.Git, saver *worktreeSave) error |
// findFilesToAdd finds files to stage in Git, recursing into directories and
// ignoring any non-text files.
func findFilesToAdd(ctx context.Context, g *ggit.Git, workTree string, dst []ggit.Pathspec, file ggit.TopPath) ([]ggit.Pathspec, error) {
realPath := filepath.Join(workTree, filepath.FromSlash(string(file)))
fi, err := os.Stat(realPath)
if os.IsNotExist(err) {
return dst, nil
}
if err != nil {
return dst, fmt.Errorf("find files to git add: %w", err)
}
if !fi.IsDir() {
binary, err := isBinary(realPath)
if err != nil {
return dst, fmt.Errorf("find files to git add: %w", err)
}
log.Debugf(ctx, "%s is binary = %t", file, binary)
if binary {
log.Infof(ctx, "Skipping binary file %s", realPath)
return dst, nil
}
return append(dst, file.Pathspec()), nil
}
log.Debugf(ctx, "Added a dir, checking its contents: %s", file)
dir, err := ioutil.ReadDir(realPath)
if err != nil {
return dst, fmt.Errorf("find files to git add: %w", err)
}
for _, f := range dir {
var err error
dst, err = findFilesToAdd(ctx, g, workTree, dst, ggit.TopPath(path.Join(string(file), f.Name())))
if err != nil {
return dst, err
}
}
return dst, nil
}
// isBinary returns whether a file contains a NUL byte near the beginning of the file.
func isBinary(filePath string) (bool, error) {
r, err := os.Open(filePath)
if err != nil {
return false, err
}
defer r.Close()
buf := make([]byte, 8000)
n, err := io.ReadFull(r, buf)
if err != nil {
// Ignore EOF, since it's fine for the file to be shorter than the buffer size.
// Otherwise, wrap the error. We don't fully stop the control flow here because
// we may still have read enough data to make a determination.
if errors.Is(err, io.EOF) || errors.Is(err, io.ErrUnexpectedEOF) {
err = nil
} else {
err = fmt.Errorf("check for binary: %w", err)
}
}
for _, b := range buf[:n] {
if b == 0 {
return true, err
}
}
return false, err
}
func postToAPI(cfg config.Getter, path string, formData url.Values) (*http.Response, error) {
userToken, err := config.UserToken(cfg)
if err != nil {
return nil, fmt.Errorf("Couldn't get user token: %v", err)
}
apiURL, err := config.APIURL(cfg, path)
if err != nil {
return nil, fmt.Errorf("Couldn't determine API URL: %v", err)
}
req := &http.Request{
Method: http.MethodPost,
URL: apiURL,
Header: http.Header{
http.CanonicalHeaderKey("YB_API_TOKEN"): {userToken},
headers.ContentType: {"application/x-www-form-urlencoded"},
},
GetBody: func() (io.ReadCloser, error) {
return ioutil.NopCloser(strings.NewReader(formData.Encode())), nil
},
}
req.Body, _ = req.GetBody()
res, err := http.DefaultClient.Do(req)
if err != nil {
return nil, err
}
return res, nil
}
// buildIDFromLogURL returns the build ID in a build log WebSocket URL.
//
// TODO(ch2570): This should come from the API.
func buildIDFromLogURL(u *url.URL) (string, error) {
// Pattern is /builds/ID/progress
const prefix = "/builds/"
const suffix = "/progress"
if !strings.HasPrefix(u.Path, prefix) || !strings.HasSuffix(u.Path, suffix) {
return "", fmt.Errorf("build ID for %v: unrecognized path", u)
}
id := u.Path[len(prefix) : len(u.Path)-len(suffix)]
if strings.ContainsRune(id, '/') {
return "", fmt.Errorf("build ID for %v: unrecognized path", u)
}
return id, nil
}
// An apiProject is a YourBase project as returned by the API.
type apiProject struct {
ID int `json:"id"`
Label string `json:"label"`
Description string `json:"description"`
Repository string `json:"repository"`
OrgSlug string `json:"organization_slug"`
}
func (p *remoteCmd) fetchProject(ctx context.Context, urls []string) (*apiProject, error) {
v := url.Values{}
fmt.Println()
log.Infof(ctx, "URLs used to search: %s", urls)
for _, u := range urls {
rem, err := ggit.ParseURL(u)
if err != nil {
log.Warnf(ctx, "Invalid remote %s (%v), ignoring", u, err)
continue
}
// We only support GitHub by now
// TODO create something more generic
if rem.Host != "github.com" {
log.Warnf(ctx, "Ignoring remote %s (only github.com supported)", u)
continue
}
p.remotes = append(p.remotes, rem)
v.Add("urls[]", u)
}
resp, err := postToAPI(p.cfg, "search/projects", v)
if err != nil {
return nil, fmt.Errorf("Couldn't lookup project on api server: %v", err)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
log.Debugf(ctx, "Build server returned HTTP Status %d", resp.StatusCode)
switch resp.StatusCode {
case http.StatusNonAuthoritativeInfo:
p.publicRepo = true
case http.StatusUnauthorized:
return nil, fmt.Errorf("Unauthorized, authentication failed.\nPlease `yb login` again.")
case http.StatusPreconditionFailed, http.StatusNotFound:
return nil, fmt.Errorf("Please verify if this private repository has %s installed.", config.GitHubAppURL())
default:
return nil, fmt.Errorf("This is us, not you, please try again in a few minutes.")
}
}
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, err
}
project := new(apiProject)
err = json.Unmarshal(body, project)
if err != nil {
return nil, err
}
return project, nil
}
func (cmd *remoteCmd) savePatch() error {
err := ioutil.WriteFile(cmd.patchPath, cmd.patchData, 0644)
if err != nil {
return fmt.Errorf("Couldn't save a local patch file at: %s, because: %v", cmd.patchPath, err)
}
return nil
}
func (cmd *remoteCmd) submitBuild(ctx context.Context, project *apiProject, tagMap map[string]string) error {
startTime := time.Now()
userToken, err := config.UserToken(cmd.cfg)
if err != nil {
return err
}
patchBuffer := new(bytes.Buffer)
xzWriter, err := xz.NewWriter(patchBuffer)
if err != nil {
return fmt.Errorf("submit build: compress patch: %w", err)
}
if _, err := xzWriter.Write(cmd.patchData); err != nil {
return fmt.Errorf("submit build: compress patch: %w", err)
}
if err := xzWriter.Close(); err != nil {
return fmt.Errorf("submit build: compress patch: %w", err)
}
patchEncoded := base64.StdEncoding.EncodeToString(patchBuffer.Bytes())
formData := url.Values{
"project_id": {strconv.Itoa(project.ID)},
"repository": {project.Repository},
"api_key": {userToken},
"target": {cmd.target},
"patch_data": {patchEncoded},
"commit": {cmd.baseCommit},
"branch": {cmd.branch},
}
tags := make([]string, 0)
for k, v := range tagMap {
tags = append(tags, fmt.Sprintf("%s:%s", k, v))
}
for _, tag := range tags {
formData.Add("tags[]", tag)
}
if cmd.noAcceleration {
formData.Add("no-accel", "True")
}
if cmd.disableCache {
formData.Add("disable-cache", "True")
}
if cmd.disableSkipper {
formData.Add("disable-skipper", "True")
}
resp, err := postToAPI(cmd.cfg, "builds/cli", formData)
if err != nil {
return err
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return fmt.Errorf("Couldn't read response body: %s", err)
}
switch resp.StatusCode {
case 401:
return fmt.Errorf("Unauthorized, authentication failed.\nPlease `yb login` again.")
case 403:
if cmd.publicRepo {
return fmt.Errorf("This should not happen, please open a support inquery with YB")
} else {
return fmt.Errorf("Tried to build a private repository of a organization of which you're not part of.")
}
case 412:
// TODO Show helpful message with App URL to fix GH App installation issue
return fmt.Errorf("Please verify if this specific repo has %s installed", config.GitHubAppURL())
case 500:
return fmt.Errorf("Internal server error")
}
//Process simple response from the API
body = bytes.ReplaceAll(body, []byte(`"`), nil)
if i := bytes.IndexByte(body, '\n'); i != -1 {
body = body[:i]
}
logURL, err := url.Parse(string(body))
if err != nil {
return fmt.Errorf("server response: parse log URL: %w", err)
}
if logURL.Scheme != "ws" && logURL.Scheme != "wss" {
return fmt.Errorf("server response: parse log URL: unhandled scheme %q", logURL.Scheme)
}
// Construct UI URL to present to the user.
// Fine to proceed in the face of errors: this is displayed as a fallback if
// other things fail.
var uiURL *url.URL
if id, err := buildIDFromLogURL(logURL); err != nil {
log.Warnf(ctx, "Could not construct build link: %v", err)
} else {
uiURL, err = config.UIURL(cmd.cfg, "/"+project.OrgSlug+"/"+project.Label+"/builds/"+id)
if err != nil {
log.Warnf(ctx, "Could not construct build link: %v", err)
}
}
endTime := time.Now()
submitTime := endTime.Sub(startTime)
log.Infof(ctx, "Submission finished at %s, taking %s", endTime.Format(longTimeFormat), submitTime.Truncate(time.Millisecond))
startTime = time.Now()
conn, _, _, err := ws.DefaultDialer.Dial(context.Background(), logURL.String())
if err != nil {
return fmt.Errorf("Cannot connect: %v", err)
}
defer func() {
if err := conn.Close(); err != nil {
log.Debugf(ctx, "Cannot close: %v", err)
}
}()
buildSuccess := false
buildSetupFinished := false
for {
msg, control, err := wsutil.ReadServerData(conn)
if err != nil {
if err != io.EOF {
log.Debugf(ctx, "Unstable connection: %v", err)
} else {
if buildSuccess {
log.Infof(ctx, "Build Completed!")
} else {
log.Errorf(ctx, "Build failed or the connection was interrupted!")
}
if uiURL != nil {
log.Infof(ctx, "Build Log: %v", uiURL)
}
return nil
}
} else {
// TODO This depends on build agent output, try to structure this better
if control.IsData() && strings.Count(string(msg), "Streaming results from build") > 0 {
fmt.Println()
} else if control.IsData() && !buildSetupFinished && len(msg) > 0 {
buildSetupFinished = true
endTime := time.Now()
setupTime := endTime.Sub(startTime)
log.Infof(ctx, "Set up finished at %s, taking %s", endTime.Format(longTimeFormat), setupTime.Truncate(time.Millisecond))
if cmd.publicRepo {
log.Infof(ctx, "Building a public repository: '%s'", project.Repository)
}
if uiURL != nil {
log.Infof(ctx, "Build Log: %v", uiURL)
}
}
if !buildSuccess {
buildSuccess = strings.Count(string(msg), "-- BUILD SUCCEEDED --") > 0
}
os.Stdout.Write(msg)
}
}
}
type worktreeSave struct {
path string
hash ggit.Hash
files []string
}
func newWorktreeSave(path string, hash ggit.Hash, enabled bool) (*worktreeSave, error) {
if !enabled {
return nil, nil
}
if _, err := os.Lstat(path); os.IsNotExist(err) {
return nil, fmt.Errorf("save worktree state: %w", err)
}
return &worktreeSave{
path: path,
hash: hash,
}, nil
}
func (w *worktreeSave) hasFiles() bool {
return w != nil && len(w.files) > 0
}
func (w *worktreeSave) add(ctx context.Context, file string) error {
if w == nil {
return nil
}
fullPath := filepath.Join(w.path, file)
if _, err := os.Lstat(fullPath); os.IsNotExist(err) {
return fmt.Errorf("save worktree state: %w", err)
}
log.Debugf(ctx, "Saving %s to the tarball", file)
w.files = append(w.files, file)
return nil
}
func (w *worktreeSave) saveFilePath() string {
return filepath.Join(w.path, fmt.Sprintf(".yb-worktreesave-%v.tar", w.hash))
}
func (w *worktreeSave) save(ctx context.Context) error {
if !w.hasFiles() {
return nil
}
log.Debugf(ctx, "Saving a tarball with all the worktree changes made")
tar := archiver.Tar{
MkdirAll: true,
}
if err := tar.Archive(w.files, w.saveFilePath()); err != nil {
return fmt.Errorf("save worktree state: %w", err)
}
return nil
}
func (w *worktreeSave) restore(ctx context.Context) error {
if !w.hasFiles() {
return nil
}
log.Debugf(ctx, "Restoring the worktree tarball")
pkgFile := w.saveFilePath()
if _, err := os.Lstat(pkgFile); os.IsNotExist(err) {
return fmt.Errorf("restore worktree state: %w", err)
}
tar := archiver.Tar{OverwriteExisting: true}
if err := tar.Unarchive(pkgFile, w.path); err != nil {
return fmt.Errorf("restore worktree state: %w", err)
}
if err := os.Remove(pkgFile); err != nil {
log.Warnf(ctx, "Failed to clean up temporary worktree save: %v", err)
}
return nil
}
| {
workTree, err := g.WorkTree(ctx)
if err != nil {
return fmt.Errorf("traverse changes: %w", err)
}
status, err := g.Status(ctx, ggit.StatusOptions{
DisableRenames: true,
})
if err != nil {
return fmt.Errorf("traverse changes: %w", err)
}
var addList []ggit.Pathspec
for _, ent := range status {
if ent.Code[1] == ' ' {
// If file is already staged, then skip.
continue
}
var err error
addList, err = findFilesToAdd(ctx, g, workTree, addList, ent.Name)
if err != nil {
return fmt.Errorf("traverse changes: %w", err)
}
if !ent.Code.IsMissing() { // No need to add deletion to the saver, right?
if err = saver.add(ctx, filepath.FromSlash(string(ent.Name))); err != nil {
return fmt.Errorf("traverse changes: %w", err)
}
}
}
err = g.Add(ctx, addList, ggit.AddOptions{
IncludeIgnored: true,
})
if err != nil {
return fmt.Errorf("traverse changes: %w", err)
}
return nil
} | identifier_body |
tf_worker.py | # Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Simple image classification with Inception.
Run image classification with Inception trained on ImageNet 2012 Challenge data
set.
This program creates a graph from a saved GraphDef protocol buffer,
and runs inference on an input JPEG image. It outputs human readable
strings of the top 5 predictions along with their probabilities.
Change the --image_file argument to any jpg image to compute a
classification of that image.
Please see the tutorial and website for a detailed description of how
to use this script to perform image recognition.
https://tensorflow.org/tutorials/image_recognition/
"""
import os.path
import re
import sys
import tarfile
#import argparse
from collections import namedtuple
import cStringIO as StringIO
import logging
import cPickle as pickle
import os
import tempfile
from contextlib import contextmanager
import time
# pylint: disable=unused-import,g-bad-import-order
import tensorflow.python.platform
from six.moves import urllib
import numpy as np
import tensorflow as tf
import redis
import requests
from wand.image import Image
# pylint: enable=unused-import,g-bad-import-order
from ast import literal_eval as make_tuple
from tensorflow.python.platform import gfile
FLAGS = tf.app.flags.FLAGS
# classify_image_graph_def.pb:
# Binary representation of the GraphDef protocol buffer.
# imagenet_synset_to_human_label_map.txt:
# Map from synset ID to a human readable string.
# imagenet_2012_challenge_label_map_proto.pbtxt:
# Text representation of a protocol buffer mapping a label to synset ID.
# this is the same as namedtuple
tf.app.flags.DEFINE_string(
'model_dir', '/tmp/imagenet',
"""Path to classify_image_graph_def.pb, """
"""imagenet_synset_to_human_label_map.txt, and """
"""imagenet_2012_challenge_label_map_proto.pbtxt.""")
tf.app.flags.DEFINE_string('image_file', '',
"""Absolute path to image file.""")
tf.app.flags.DEFINE_integer('num_top_predictions', 5,
"""Display this many predictions.""")
tf.app.flags.DEFINE_string('redis_server', '',
"""Redis server address""")
tf.app.flags.DEFINE_integer('redis_port', 6379,
"""Redis server port""")
tf.app.flags.DEFINE_string('redis_queue', 'classify',
"""Redis queue to read images from""")
Task = namedtuple('Task', 'queue value')
Specs = namedtuple('Specs', 'group path ad_id')
Result = namedtuple('Result', 'OK predictions computation_time ad_id path')
# pylint: disable=line-too-long
DATA_URL = 'http://download.tensorflow.org/models/image/imagenet/inception-2015-12-05.tgz'
# pylint: enable=line-too-long
logging.getLogger().setLevel(logging.INFO)
logging.basicConfig(format='%(asctime)s %(message)s')
class NodeLookup(object):
"""Converts integer node ID's to human readable labels."""
def __init__(self,
label_lookup_path=None,
uid_lookup_path=None):
if not label_lookup_path:
label_lookup_path = os.path.join(
FLAGS.model_dir, 'imagenet_2012_challenge_label_map_proto.pbtxt')
if not uid_lookup_path:
uid_lookup_path = os.path.join(
FLAGS.model_dir, 'imagenet_synset_to_human_label_map.txt')
self.node_lookup = self.load(label_lookup_path, uid_lookup_path)
def load(self, label_lookup_path, uid_lookup_path):
"""Loads a human readable English name for each softmax node.
Args:
label_lookup_path: string UID to integer node ID.
uid_lookup_path: string UID to human-readable string.
Returns:
dict from integer node ID to human-readable string.
"""
if not gfile.Exists(uid_lookup_path):
tf.logging.fatal('File does not exist %s', uid_lookup_path)
if not gfile.Exists(label_lookup_path):
tf.logging.fatal('File does not exist %s', label_lookup_path)
# Loads mapping from string UID to human-readable string
proto_as_ascii_lines = gfile.GFile(uid_lookup_path).readlines()
uid_to_human = {}
p = re.compile(r'[n\d]*[ \S,]*')
for line in proto_as_ascii_lines:
parsed_items = p.findall(line)
uid = parsed_items[0]
human_string = parsed_items[2]
uid_to_human[uid] = human_string
# Loads mapping from string UID to integer node ID.
node_id_to_uid = {}
proto_as_ascii = gfile.GFile(label_lookup_path).readlines()
for line in proto_as_ascii:
if line.startswith(' target_class:'):
target_class = int(line.split(': ')[1])
if line.startswith(' target_class_string:'):
target_class_string = line.split(': ')[1]
node_id_to_uid[target_class] = target_class_string[1:-2]
# Loads the final mapping of integer node ID to human-readable string
node_id_to_name = {}
for key, val in node_id_to_uid.iteritems():
if val not in uid_to_human:
|
name = uid_to_human[val]
node_id_to_name[key] = name
return node_id_to_name
def id_to_string(self, node_id):
if node_id not in self.node_lookup:
return ''
return self.node_lookup[node_id]
def create_graph():
""""Creates a graph from saved GraphDef file and returns a saver."""
# Creates graph from saved graph_def.pb.
with gfile.FastGFile(os.path.join(
FLAGS.model_dir, 'classify_image_graph_def.pb'), 'r') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
_ = tf.import_graph_def(graph_def, name='')
@contextmanager
def convert_to_jpg(data):
tmp = tempfile.NamedTemporaryFile(delete=False)
with Image(file=StringIO.StringIO(data)) as img:
if img.format != 'JPEG':
logging.info('Converting {} to JPEG.'.format(img.format))
img.format = 'JPEG'
img.save(tmp)
tmp.close()
yield tmp.name
os.remove(tmp.name)
def classify_images():
create_graph()
node_lookup = NodeLookup()
# 4 instances running in parallel on g2.2xlarge seems to be the magic number.
# If running more instances, memcpy errors will be thrown after some time.
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=1./4)
with tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) as sess:
r_server = redis.StrictRedis(FLAGS.redis_server, FLAGS.redis_port)
softmax_tensor = sess.graph.get_tensor_by_name('softmax:0')
while True:
task = Task(*r_server.brpop(FLAGS.redis_queue))
specs = Specs(**pickle.loads(task.value))
logging.info(specs)
try:
result_key = 'archive:{}:{}'.format(specs.group, specs.path)
kaidee_result_key = ''
full_url = specs.path.split('//')
url_path = len(full_url)>1 and full_url[1] or full_url[0]
kaidee_result_key = url_path.split('/', 1)[1]
response = requests.get(specs.path, timeout=10)
with convert_to_jpg(response.content) as jpg:
image_data = gfile.FastGFile(jpg).read()
starttime = time.time()
predictions = sess.run(softmax_tensor,{'DecodeJpeg/contents:0': image_data})
endtime = time.time()
predictions = np.squeeze(predictions)
top_k = predictions.argsort()[-FLAGS.num_top_predictions:][::-1]
result = Result(True,
[ (node_lookup.id_to_string(node_id), predictions[node_id]) for node_id in top_k ],
endtime - starttime,
specs.ad_id, specs.path)
r_server.hmset(result_key, result._asdict())
r_server.hmset(kaidee_result_key, result._asdict())
r_server.zadd('archive:{}:category:{}'.format(specs.group, result.predictions[0][0]),
result.predictions[0][1], specs.path)
# The publishing was only added since AWS ElastiCache does not support subscribing to keyspace notifications.
r_server.publish('latest', pickle.dumps({'path': specs.path, 'group': specs.group,
'category': result.predictions[0][0], 'value': float(result.predictions[0][1])}))
# Kaidee channel
predictions_dict = dict((x, y) for x, y in result.predictions)
r_server.publish('classify', pickle.dumps({'path': specs.path, 'group': specs.group,
'predictions': predictions_dict, 'ad_id': specs.ad_id}))
logging.info(result)
except Exception as e:
logging.error('Something went wrong when classifying the image: {}'.format(e))
r_server.hmset(result_key, {'OK': False})
def maybe_download_and_extract():
"""Download and extract model tar file."""
dest_directory = FLAGS.model_dir
if not os.path.exists(dest_directory):
os.makedirs(dest_directory)
filename = DATA_URL.split('/')[-1]
filepath = os.path.join(dest_directory, filename)
if not os.path.exists(filepath):
def _progress(count, block_size, total_size):
sys.stdout.write('\r>> Downloading %s %.1f%%' % (
filename, float(count * block_size) / float(total_size) * 100.0))
sys.stdout.flush()
filepath, _ = urllib.request.urlretrieve(DATA_URL, filepath,
reporthook=_progress)
print()
statinfo = os.stat(filepath)
print('Succesfully downloaded', filename, statinfo.st_size, 'bytes.')
tarfile.open(filepath, 'r:gz').extractall(dest_directory)
def main(_):
maybe_download_and_extract()
classify_images()
if __name__ == '__main__':
tf.app.run()
| tf.logging.fatal('Failed to locate: %s', val) | conditional_block |
tf_worker.py | # Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Simple image classification with Inception.
Run image classification with Inception trained on ImageNet 2012 Challenge data
set.
This program creates a graph from a saved GraphDef protocol buffer,
and runs inference on an input JPEG image. It outputs human readable
strings of the top 5 predictions along with their probabilities.
Change the --image_file argument to any jpg image to compute a
classification of that image.
Please see the tutorial and website for a detailed description of how
to use this script to perform image recognition.
https://tensorflow.org/tutorials/image_recognition/
"""
import os.path
import re
import sys
import tarfile
#import argparse
from collections import namedtuple
import cStringIO as StringIO
import logging
import cPickle as pickle
import os
import tempfile
from contextlib import contextmanager
import time
# pylint: disable=unused-import,g-bad-import-order
import tensorflow.python.platform
from six.moves import urllib
import numpy as np
import tensorflow as tf
import redis
import requests
from wand.image import Image
# pylint: enable=unused-import,g-bad-import-order
from ast import literal_eval as make_tuple
from tensorflow.python.platform import gfile
FLAGS = tf.app.flags.FLAGS
# classify_image_graph_def.pb:
# Binary representation of the GraphDef protocol buffer.
# imagenet_synset_to_human_label_map.txt:
# Map from synset ID to a human readable string.
# imagenet_2012_challenge_label_map_proto.pbtxt:
# Text representation of a protocol buffer mapping a label to synset ID.
# this is the same as namedtuple
tf.app.flags.DEFINE_string(
'model_dir', '/tmp/imagenet',
"""Path to classify_image_graph_def.pb, """
"""imagenet_synset_to_human_label_map.txt, and """
"""imagenet_2012_challenge_label_map_proto.pbtxt.""")
tf.app.flags.DEFINE_string('image_file', '',
"""Absolute path to image file.""")
tf.app.flags.DEFINE_integer('num_top_predictions', 5,
"""Display this many predictions.""")
tf.app.flags.DEFINE_string('redis_server', '',
"""Redis server address""")
tf.app.flags.DEFINE_integer('redis_port', 6379,
"""Redis server port""")
tf.app.flags.DEFINE_string('redis_queue', 'classify',
"""Redis queue to read images from""")
Task = namedtuple('Task', 'queue value')
Specs = namedtuple('Specs', 'group path ad_id')
Result = namedtuple('Result', 'OK predictions computation_time ad_id path')
# pylint: disable=line-too-long
DATA_URL = 'http://download.tensorflow.org/models/image/imagenet/inception-2015-12-05.tgz'
# pylint: enable=line-too-long
logging.getLogger().setLevel(logging.INFO)
logging.basicConfig(format='%(asctime)s %(message)s')
class NodeLookup(object):
"""Converts integer node ID's to human readable labels."""
def __init__(self,
label_lookup_path=None,
uid_lookup_path=None):
if not label_lookup_path:
label_lookup_path = os.path.join(
FLAGS.model_dir, 'imagenet_2012_challenge_label_map_proto.pbtxt')
if not uid_lookup_path:
uid_lookup_path = os.path.join(
FLAGS.model_dir, 'imagenet_synset_to_human_label_map.txt')
self.node_lookup = self.load(label_lookup_path, uid_lookup_path)
def load(self, label_lookup_path, uid_lookup_path):
"""Loads a human readable English name for each softmax node.
Args:
label_lookup_path: string UID to integer node ID.
uid_lookup_path: string UID to human-readable string.
Returns:
dict from integer node ID to human-readable string.
"""
if not gfile.Exists(uid_lookup_path):
tf.logging.fatal('File does not exist %s', uid_lookup_path)
if not gfile.Exists(label_lookup_path):
tf.logging.fatal('File does not exist %s', label_lookup_path)
# Loads mapping from string UID to human-readable string
proto_as_ascii_lines = gfile.GFile(uid_lookup_path).readlines()
uid_to_human = {}
p = re.compile(r'[n\d]*[ \S,]*')
for line in proto_as_ascii_lines:
parsed_items = p.findall(line)
uid = parsed_items[0]
human_string = parsed_items[2]
uid_to_human[uid] = human_string
# Loads mapping from string UID to integer node ID.
node_id_to_uid = {}
proto_as_ascii = gfile.GFile(label_lookup_path).readlines()
for line in proto_as_ascii:
if line.startswith(' target_class:'):
target_class = int(line.split(': ')[1])
if line.startswith(' target_class_string:'):
target_class_string = line.split(': ')[1]
node_id_to_uid[target_class] = target_class_string[1:-2]
# Loads the final mapping of integer node ID to human-readable string
node_id_to_name = {}
for key, val in node_id_to_uid.iteritems():
if val not in uid_to_human:
tf.logging.fatal('Failed to locate: %s', val)
name = uid_to_human[val]
node_id_to_name[key] = name
return node_id_to_name
def id_to_string(self, node_id):
if node_id not in self.node_lookup:
return ''
return self.node_lookup[node_id]
def create_graph():
""""Creates a graph from saved GraphDef file and returns a saver."""
# Creates graph from saved graph_def.pb.
with gfile.FastGFile(os.path.join(
FLAGS.model_dir, 'classify_image_graph_def.pb'), 'r') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
_ = tf.import_graph_def(graph_def, name='')
@contextmanager
def convert_to_jpg(data):
tmp = tempfile.NamedTemporaryFile(delete=False)
with Image(file=StringIO.StringIO(data)) as img:
if img.format != 'JPEG':
logging.info('Converting {} to JPEG.'.format(img.format))
img.format = 'JPEG'
img.save(tmp)
tmp.close()
yield tmp.name
os.remove(tmp.name)
def classify_images():
create_graph()
node_lookup = NodeLookup()
# 4 instances running in parallel on g2.2xlarge seems to be the magic number.
# If running more instances, memcpy errors will be thrown after some time.
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=1./4)
with tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) as sess:
r_server = redis.StrictRedis(FLAGS.redis_server, FLAGS.redis_port)
softmax_tensor = sess.graph.get_tensor_by_name('softmax:0')
while True:
task = Task(*r_server.brpop(FLAGS.redis_queue))
specs = Specs(**pickle.loads(task.value))
logging.info(specs)
try:
result_key = 'archive:{}:{}'.format(specs.group, specs.path)
kaidee_result_key = ''
full_url = specs.path.split('//')
url_path = len(full_url)>1 and full_url[1] or full_url[0]
kaidee_result_key = url_path.split('/', 1)[1]
response = requests.get(specs.path, timeout=10)
with convert_to_jpg(response.content) as jpg:
image_data = gfile.FastGFile(jpg).read()
starttime = time.time()
predictions = sess.run(softmax_tensor,{'DecodeJpeg/contents:0': image_data})
endtime = time.time()
predictions = np.squeeze(predictions)
top_k = predictions.argsort()[-FLAGS.num_top_predictions:][::-1]
result = Result(True,
[ (node_lookup.id_to_string(node_id), predictions[node_id]) for node_id in top_k ],
endtime - starttime,
specs.ad_id, specs.path)
r_server.hmset(result_key, result._asdict())
r_server.hmset(kaidee_result_key, result._asdict())
r_server.zadd('archive:{}:category:{}'.format(specs.group, result.predictions[0][0]),
result.predictions[0][1], specs.path)
# The publishing was only added since AWS ElastiCache does not support subscribing to keyspace notifications.
r_server.publish('latest', pickle.dumps({'path': specs.path, 'group': specs.group,
'category': result.predictions[0][0], 'value': float(result.predictions[0][1])}))
# Kaidee channel
predictions_dict = dict((x, y) for x, y in result.predictions)
r_server.publish('classify', pickle.dumps({'path': specs.path, 'group': specs.group,
'predictions': predictions_dict, 'ad_id': specs.ad_id}))
logging.info(result)
except Exception as e:
logging.error('Something went wrong when classifying the image: {}'.format(e))
r_server.hmset(result_key, {'OK': False})
def maybe_download_and_extract():
|
def main(_):
maybe_download_and_extract()
classify_images()
if __name__ == '__main__':
tf.app.run()
| """Download and extract model tar file."""
dest_directory = FLAGS.model_dir
if not os.path.exists(dest_directory):
os.makedirs(dest_directory)
filename = DATA_URL.split('/')[-1]
filepath = os.path.join(dest_directory, filename)
if not os.path.exists(filepath):
def _progress(count, block_size, total_size):
sys.stdout.write('\r>> Downloading %s %.1f%%' % (
filename, float(count * block_size) / float(total_size) * 100.0))
sys.stdout.flush()
filepath, _ = urllib.request.urlretrieve(DATA_URL, filepath,
reporthook=_progress)
print()
statinfo = os.stat(filepath)
print('Succesfully downloaded', filename, statinfo.st_size, 'bytes.')
tarfile.open(filepath, 'r:gz').extractall(dest_directory) | identifier_body |
tf_worker.py | # Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
# | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Simple image classification with Inception.
Run image classification with Inception trained on ImageNet 2012 Challenge data
set.
This program creates a graph from a saved GraphDef protocol buffer,
and runs inference on an input JPEG image. It outputs human readable
strings of the top 5 predictions along with their probabilities.
Change the --image_file argument to any jpg image to compute a
classification of that image.
Please see the tutorial and website for a detailed description of how
to use this script to perform image recognition.
https://tensorflow.org/tutorials/image_recognition/
"""
import os.path
import re
import sys
import tarfile
#import argparse
from collections import namedtuple
import cStringIO as StringIO
import logging
import cPickle as pickle
import os
import tempfile
from contextlib import contextmanager
import time
# pylint: disable=unused-import,g-bad-import-order
import tensorflow.python.platform
from six.moves import urllib
import numpy as np
import tensorflow as tf
import redis
import requests
from wand.image import Image
# pylint: enable=unused-import,g-bad-import-order
from ast import literal_eval as make_tuple
from tensorflow.python.platform import gfile
FLAGS = tf.app.flags.FLAGS
# classify_image_graph_def.pb:
# Binary representation of the GraphDef protocol buffer.
# imagenet_synset_to_human_label_map.txt:
# Map from synset ID to a human readable string.
# imagenet_2012_challenge_label_map_proto.pbtxt:
# Text representation of a protocol buffer mapping a label to synset ID.
# this is the same as namedtuple
tf.app.flags.DEFINE_string(
'model_dir', '/tmp/imagenet',
"""Path to classify_image_graph_def.pb, """
"""imagenet_synset_to_human_label_map.txt, and """
"""imagenet_2012_challenge_label_map_proto.pbtxt.""")
tf.app.flags.DEFINE_string('image_file', '',
"""Absolute path to image file.""")
tf.app.flags.DEFINE_integer('num_top_predictions', 5,
"""Display this many predictions.""")
tf.app.flags.DEFINE_string('redis_server', '',
"""Redis server address""")
tf.app.flags.DEFINE_integer('redis_port', 6379,
"""Redis server port""")
tf.app.flags.DEFINE_string('redis_queue', 'classify',
"""Redis queue to read images from""")
Task = namedtuple('Task', 'queue value')
Specs = namedtuple('Specs', 'group path ad_id')
Result = namedtuple('Result', 'OK predictions computation_time ad_id path')
# pylint: disable=line-too-long
DATA_URL = 'http://download.tensorflow.org/models/image/imagenet/inception-2015-12-05.tgz'
# pylint: enable=line-too-long
logging.getLogger().setLevel(logging.INFO)
logging.basicConfig(format='%(asctime)s %(message)s')
class NodeLookup(object):
"""Converts integer node ID's to human readable labels."""
def __init__(self,
label_lookup_path=None,
uid_lookup_path=None):
if not label_lookup_path:
label_lookup_path = os.path.join(
FLAGS.model_dir, 'imagenet_2012_challenge_label_map_proto.pbtxt')
if not uid_lookup_path:
uid_lookup_path = os.path.join(
FLAGS.model_dir, 'imagenet_synset_to_human_label_map.txt')
self.node_lookup = self.load(label_lookup_path, uid_lookup_path)
def load(self, label_lookup_path, uid_lookup_path):
"""Loads a human readable English name for each softmax node.
Args:
label_lookup_path: string UID to integer node ID.
uid_lookup_path: string UID to human-readable string.
Returns:
dict from integer node ID to human-readable string.
"""
if not gfile.Exists(uid_lookup_path):
tf.logging.fatal('File does not exist %s', uid_lookup_path)
if not gfile.Exists(label_lookup_path):
tf.logging.fatal('File does not exist %s', label_lookup_path)
# Loads mapping from string UID to human-readable string
proto_as_ascii_lines = gfile.GFile(uid_lookup_path).readlines()
uid_to_human = {}
p = re.compile(r'[n\d]*[ \S,]*')
for line in proto_as_ascii_lines:
parsed_items = p.findall(line)
uid = parsed_items[0]
human_string = parsed_items[2]
uid_to_human[uid] = human_string
# Loads mapping from string UID to integer node ID.
node_id_to_uid = {}
proto_as_ascii = gfile.GFile(label_lookup_path).readlines()
for line in proto_as_ascii:
if line.startswith(' target_class:'):
target_class = int(line.split(': ')[1])
if line.startswith(' target_class_string:'):
target_class_string = line.split(': ')[1]
node_id_to_uid[target_class] = target_class_string[1:-2]
# Loads the final mapping of integer node ID to human-readable string
node_id_to_name = {}
for key, val in node_id_to_uid.iteritems():
if val not in uid_to_human:
tf.logging.fatal('Failed to locate: %s', val)
name = uid_to_human[val]
node_id_to_name[key] = name
return node_id_to_name
def id_to_string(self, node_id):
if node_id not in self.node_lookup:
return ''
return self.node_lookup[node_id]
def create_graph():
""""Creates a graph from saved GraphDef file and returns a saver."""
# Creates graph from saved graph_def.pb.
with gfile.FastGFile(os.path.join(
FLAGS.model_dir, 'classify_image_graph_def.pb'), 'r') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
_ = tf.import_graph_def(graph_def, name='')
@contextmanager
def convert_to_jpg(data):
tmp = tempfile.NamedTemporaryFile(delete=False)
with Image(file=StringIO.StringIO(data)) as img:
if img.format != 'JPEG':
logging.info('Converting {} to JPEG.'.format(img.format))
img.format = 'JPEG'
img.save(tmp)
tmp.close()
yield tmp.name
os.remove(tmp.name)
def classify_images():
create_graph()
node_lookup = NodeLookup()
# 4 instances running in parallel on g2.2xlarge seems to be the magic number.
# If running more instances, memcpy errors will be thrown after some time.
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=1./4)
with tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) as sess:
r_server = redis.StrictRedis(FLAGS.redis_server, FLAGS.redis_port)
softmax_tensor = sess.graph.get_tensor_by_name('softmax:0')
while True:
task = Task(*r_server.brpop(FLAGS.redis_queue))
specs = Specs(**pickle.loads(task.value))
logging.info(specs)
try:
result_key = 'archive:{}:{}'.format(specs.group, specs.path)
kaidee_result_key = ''
full_url = specs.path.split('//')
url_path = len(full_url)>1 and full_url[1] or full_url[0]
kaidee_result_key = url_path.split('/', 1)[1]
response = requests.get(specs.path, timeout=10)
with convert_to_jpg(response.content) as jpg:
image_data = gfile.FastGFile(jpg).read()
starttime = time.time()
predictions = sess.run(softmax_tensor,{'DecodeJpeg/contents:0': image_data})
endtime = time.time()
predictions = np.squeeze(predictions)
top_k = predictions.argsort()[-FLAGS.num_top_predictions:][::-1]
result = Result(True,
[ (node_lookup.id_to_string(node_id), predictions[node_id]) for node_id in top_k ],
endtime - starttime,
specs.ad_id, specs.path)
r_server.hmset(result_key, result._asdict())
r_server.hmset(kaidee_result_key, result._asdict())
r_server.zadd('archive:{}:category:{}'.format(specs.group, result.predictions[0][0]),
result.predictions[0][1], specs.path)
# The publishing was only added since AWS ElastiCache does not support subscribing to keyspace notifications.
r_server.publish('latest', pickle.dumps({'path': specs.path, 'group': specs.group,
'category': result.predictions[0][0], 'value': float(result.predictions[0][1])}))
# Kaidee channel
predictions_dict = dict((x, y) for x, y in result.predictions)
r_server.publish('classify', pickle.dumps({'path': specs.path, 'group': specs.group,
'predictions': predictions_dict, 'ad_id': specs.ad_id}))
logging.info(result)
except Exception as e:
logging.error('Something went wrong when classifying the image: {}'.format(e))
r_server.hmset(result_key, {'OK': False})
def maybe_download_and_extract():
"""Download and extract model tar file."""
dest_directory = FLAGS.model_dir
if not os.path.exists(dest_directory):
os.makedirs(dest_directory)
filename = DATA_URL.split('/')[-1]
filepath = os.path.join(dest_directory, filename)
if not os.path.exists(filepath):
def _progress(count, block_size, total_size):
sys.stdout.write('\r>> Downloading %s %.1f%%' % (
filename, float(count * block_size) / float(total_size) * 100.0))
sys.stdout.flush()
filepath, _ = urllib.request.urlretrieve(DATA_URL, filepath,
reporthook=_progress)
print()
statinfo = os.stat(filepath)
print('Succesfully downloaded', filename, statinfo.st_size, 'bytes.')
tarfile.open(filepath, 'r:gz').extractall(dest_directory)
def main(_):
maybe_download_and_extract()
classify_images()
if __name__ == '__main__':
tf.app.run() | # Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, | random_line_split |
tf_worker.py | # Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Simple image classification with Inception.
Run image classification with Inception trained on ImageNet 2012 Challenge data
set.
This program creates a graph from a saved GraphDef protocol buffer,
and runs inference on an input JPEG image. It outputs human readable
strings of the top 5 predictions along with their probabilities.
Change the --image_file argument to any jpg image to compute a
classification of that image.
Please see the tutorial and website for a detailed description of how
to use this script to perform image recognition.
https://tensorflow.org/tutorials/image_recognition/
"""
import os.path
import re
import sys
import tarfile
#import argparse
from collections import namedtuple
import cStringIO as StringIO
import logging
import cPickle as pickle
import os
import tempfile
from contextlib import contextmanager
import time
# pylint: disable=unused-import,g-bad-import-order
import tensorflow.python.platform
from six.moves import urllib
import numpy as np
import tensorflow as tf
import redis
import requests
from wand.image import Image
# pylint: enable=unused-import,g-bad-import-order
from ast import literal_eval as make_tuple
from tensorflow.python.platform import gfile
FLAGS = tf.app.flags.FLAGS
# classify_image_graph_def.pb:
# Binary representation of the GraphDef protocol buffer.
# imagenet_synset_to_human_label_map.txt:
# Map from synset ID to a human readable string.
# imagenet_2012_challenge_label_map_proto.pbtxt:
# Text representation of a protocol buffer mapping a label to synset ID.
# this is the same as namedtuple
tf.app.flags.DEFINE_string(
'model_dir', '/tmp/imagenet',
"""Path to classify_image_graph_def.pb, """
"""imagenet_synset_to_human_label_map.txt, and """
"""imagenet_2012_challenge_label_map_proto.pbtxt.""")
tf.app.flags.DEFINE_string('image_file', '',
"""Absolute path to image file.""")
tf.app.flags.DEFINE_integer('num_top_predictions', 5,
"""Display this many predictions.""")
tf.app.flags.DEFINE_string('redis_server', '',
"""Redis server address""")
tf.app.flags.DEFINE_integer('redis_port', 6379,
"""Redis server port""")
tf.app.flags.DEFINE_string('redis_queue', 'classify',
"""Redis queue to read images from""")
Task = namedtuple('Task', 'queue value')
Specs = namedtuple('Specs', 'group path ad_id')
Result = namedtuple('Result', 'OK predictions computation_time ad_id path')
# pylint: disable=line-too-long
DATA_URL = 'http://download.tensorflow.org/models/image/imagenet/inception-2015-12-05.tgz'
# pylint: enable=line-too-long
logging.getLogger().setLevel(logging.INFO)
logging.basicConfig(format='%(asctime)s %(message)s')
class NodeLookup(object):
"""Converts integer node ID's to human readable labels."""
def __init__(self,
label_lookup_path=None,
uid_lookup_path=None):
if not label_lookup_path:
label_lookup_path = os.path.join(
FLAGS.model_dir, 'imagenet_2012_challenge_label_map_proto.pbtxt')
if not uid_lookup_path:
uid_lookup_path = os.path.join(
FLAGS.model_dir, 'imagenet_synset_to_human_label_map.txt')
self.node_lookup = self.load(label_lookup_path, uid_lookup_path)
def load(self, label_lookup_path, uid_lookup_path):
"""Loads a human readable English name for each softmax node.
Args:
label_lookup_path: string UID to integer node ID.
uid_lookup_path: string UID to human-readable string.
Returns:
dict from integer node ID to human-readable string.
"""
if not gfile.Exists(uid_lookup_path):
tf.logging.fatal('File does not exist %s', uid_lookup_path)
if not gfile.Exists(label_lookup_path):
tf.logging.fatal('File does not exist %s', label_lookup_path)
# Loads mapping from string UID to human-readable string
proto_as_ascii_lines = gfile.GFile(uid_lookup_path).readlines()
uid_to_human = {}
p = re.compile(r'[n\d]*[ \S,]*')
for line in proto_as_ascii_lines:
parsed_items = p.findall(line)
uid = parsed_items[0]
human_string = parsed_items[2]
uid_to_human[uid] = human_string
# Loads mapping from string UID to integer node ID.
node_id_to_uid = {}
proto_as_ascii = gfile.GFile(label_lookup_path).readlines()
for line in proto_as_ascii:
if line.startswith(' target_class:'):
target_class = int(line.split(': ')[1])
if line.startswith(' target_class_string:'):
target_class_string = line.split(': ')[1]
node_id_to_uid[target_class] = target_class_string[1:-2]
# Loads the final mapping of integer node ID to human-readable string
node_id_to_name = {}
for key, val in node_id_to_uid.iteritems():
if val not in uid_to_human:
tf.logging.fatal('Failed to locate: %s', val)
name = uid_to_human[val]
node_id_to_name[key] = name
return node_id_to_name
def id_to_string(self, node_id):
if node_id not in self.node_lookup:
return ''
return self.node_lookup[node_id]
def create_graph():
""""Creates a graph from saved GraphDef file and returns a saver."""
# Creates graph from saved graph_def.pb.
with gfile.FastGFile(os.path.join(
FLAGS.model_dir, 'classify_image_graph_def.pb'), 'r') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
_ = tf.import_graph_def(graph_def, name='')
@contextmanager
def convert_to_jpg(data):
tmp = tempfile.NamedTemporaryFile(delete=False)
with Image(file=StringIO.StringIO(data)) as img:
if img.format != 'JPEG':
logging.info('Converting {} to JPEG.'.format(img.format))
img.format = 'JPEG'
img.save(tmp)
tmp.close()
yield tmp.name
os.remove(tmp.name)
def classify_images():
create_graph()
node_lookup = NodeLookup()
# 4 instances running in parallel on g2.2xlarge seems to be the magic number.
# If running more instances, memcpy errors will be thrown after some time.
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=1./4)
with tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) as sess:
r_server = redis.StrictRedis(FLAGS.redis_server, FLAGS.redis_port)
softmax_tensor = sess.graph.get_tensor_by_name('softmax:0')
while True:
task = Task(*r_server.brpop(FLAGS.redis_queue))
specs = Specs(**pickle.loads(task.value))
logging.info(specs)
try:
result_key = 'archive:{}:{}'.format(specs.group, specs.path)
kaidee_result_key = ''
full_url = specs.path.split('//')
url_path = len(full_url)>1 and full_url[1] or full_url[0]
kaidee_result_key = url_path.split('/', 1)[1]
response = requests.get(specs.path, timeout=10)
with convert_to_jpg(response.content) as jpg:
image_data = gfile.FastGFile(jpg).read()
starttime = time.time()
predictions = sess.run(softmax_tensor,{'DecodeJpeg/contents:0': image_data})
endtime = time.time()
predictions = np.squeeze(predictions)
top_k = predictions.argsort()[-FLAGS.num_top_predictions:][::-1]
result = Result(True,
[ (node_lookup.id_to_string(node_id), predictions[node_id]) for node_id in top_k ],
endtime - starttime,
specs.ad_id, specs.path)
r_server.hmset(result_key, result._asdict())
r_server.hmset(kaidee_result_key, result._asdict())
r_server.zadd('archive:{}:category:{}'.format(specs.group, result.predictions[0][0]),
result.predictions[0][1], specs.path)
# The publishing was only added since AWS ElastiCache does not support subscribing to keyspace notifications.
r_server.publish('latest', pickle.dumps({'path': specs.path, 'group': specs.group,
'category': result.predictions[0][0], 'value': float(result.predictions[0][1])}))
# Kaidee channel
predictions_dict = dict((x, y) for x, y in result.predictions)
r_server.publish('classify', pickle.dumps({'path': specs.path, 'group': specs.group,
'predictions': predictions_dict, 'ad_id': specs.ad_id}))
logging.info(result)
except Exception as e:
logging.error('Something went wrong when classifying the image: {}'.format(e))
r_server.hmset(result_key, {'OK': False})
def | ():
"""Download and extract model tar file."""
dest_directory = FLAGS.model_dir
if not os.path.exists(dest_directory):
os.makedirs(dest_directory)
filename = DATA_URL.split('/')[-1]
filepath = os.path.join(dest_directory, filename)
if not os.path.exists(filepath):
def _progress(count, block_size, total_size):
sys.stdout.write('\r>> Downloading %s %.1f%%' % (
filename, float(count * block_size) / float(total_size) * 100.0))
sys.stdout.flush()
filepath, _ = urllib.request.urlretrieve(DATA_URL, filepath,
reporthook=_progress)
print()
statinfo = os.stat(filepath)
print('Succesfully downloaded', filename, statinfo.st_size, 'bytes.')
tarfile.open(filepath, 'r:gz').extractall(dest_directory)
def main(_):
maybe_download_and_extract()
classify_images()
if __name__ == '__main__':
tf.app.run()
| maybe_download_and_extract | identifier_name |
blt_engine.py | from bearlibterminal import terminal
from mapping.game_map import GameMap
from gameplay.dialog_tree import DialogTree
from message_log import MessageLog
from game_object import GameObject
from render_functions import draw_all, draw_map
from input_handler import handle_keys
from utils import *
from gameplay.npc import NPC
from gameplay.inventory import InventoryItem
import re
game_title = "StrangeHack"
screen_width = 120
screen_height = 40
map_width=65
map_height = 40
dialog_width = 50
dialog_height = 35
dialog_pos_x = 68
dialog_pos_y = 1
##Todo: This is starting to turn into spaghetti code. Probably need to refactor
##soon.
def updateui():
##Switch to layer 4 to update UI
terminal.layer(3)
terminal.clear_area(dialog_pos_x,dialog_pos_y,dialog_width,dialog_height)
terminal.printf(dialog_pos_x,dialog_pos_y,ml.get_scroll_back())
terminal.refresh()
def selection_to_int(selection):
##This seems like an incredibly hacky way to do this but I do not see this
##functionality built into the bearlibterm for some reason.. :(
##TODO, ENUMERATE through "terminal" and get all properties whose key starts
##"TK_" Then lop off the end and return it as a char.
if selection == terminal.TK_1:
return 1
elif selection == terminal.TK_2:
return 2
elif selection == terminal.TK_3:
return 3
elif selection == terminal.TK_4:
return 4
elif selection == terminal.TK_5:
return 5
elif selection == terminal.TK_6:
return 6
elif selection == terminal.TK_7:
return 7
elif selection == terminal.TK_8:
return 8
elif selection == terminal.TK_9:
return 9
else:
return None
def dialog_condition_check(condition_code_string, char1, char2):
code_string_stack = condition_code_string.split(" ")
##OK, so here's the deal.
##conditions will be specified in dialog tree as a stringself.
##the string will have 3 positions to start, separated by spaces,
##except the third position in some cases but we'll get to that.
##Returns a dang bool holmes!
##First position is the trigger variable, which has to be a property
##on the player object or a keyword like item
try:
trigger_var_str = code_string_stack.pop(0)
trigger_var_str = trigger_var_str.lower()
#second pos is the comparison operator
operator_str = code_string_stack.pop(0)
##the third pos is whatever bits are remaining (to be split again later maybe)
##join it
condition_str = str.join(" ", code_string_stack)
##print(trigger_var_str)
##print(operator_str)
##print(condition_str)
except:
print("Couldn't parse condition string.")
return False
##Special case to check inventory items...
if trigger_var_str == 'item':
inventory = getattr(player, 'inventory', None)
quote_check_regex_arr = re.findall(r"\"(.*?)\"", condition_str, re.DOTALL)
if len(quote_check_regex_arr) > 0:
item_name = quote_check_regex_arr.pop()
##there might be an integer left at the end to specify quantity...
try:
quantity_str = condition_str.replace('"'+item_name+'"','').split(' ').pop()
quantity_int = int(quantity_str)
except:
quantity_int = 1
##print(quantity_int)
##print(item_name)
if not inventory is None:
return check_inventory_for_item(inventory, item_name, quantity_int)
else:
return False
# for item in inventory:
# print(item)
# if condition_str in inventory:
# return True
# else:
# return False
##Need to add extra conditions to check item quantity.
try:
trigger = getattr(player, trigger_var_str)
except:
print("Couldn't get player attribute " + trigger_var_str)
return False
return get_truth(trigger, operator_str, condition_str)
def | (npc, player, dialog_name):
dialog_tree = DialogTree(npc.dialog_dict)
text = dialog_tree.get_say(dialog_name)
conditions = dialog_tree.get_conditions(dialog_name)
responses = dialog_tree.get_responses(dialog_name)
target = dialog_tree.get_target_dialog(dialog_name)
if not conditions is None:
exit = False
for condition in conditions:
exit = not dialog_condition_check(condition['condition_string'], player, npc)
if exit:
return False
text = text
ml.log_message(text, npc.color)
if not responses is None:
response_count = 1
for response in responses:
ml.log_message(str(response_count) + ".) " + response['say'] + " (" + response['target_dialog'] + ")")
response_count += 1
updateui()
selected_response = None
while selected_response is None:
if terminal.has_input():
selection = terminal.read()
if selection == terminal.TK_ESCAPE:
selected_response = 99
else:
selected_response = selection_to_int(selection)
if not selected_response is None and selected_response >= 0 and selected_response < response_count:
##Subtract one from selection because count starts at 1 not 0
target = responses[selected_response - 1]["target_dialog"]
else:
ml.log_message("Select a response from 1 to " + str(response_count - 1 ))
else:
pass
updateui()
if dialog_tree.dialog_exists(target):
npc_dialog(npc, player, target)
def load_map(terminal, player, objects, map, new_map_index=0, dx=0, dy=0):
map.switch_map(new_map_index)
draw_map(terminal, map)
game_map.unblock(player.x, player.y)
player.move(dx , dy )
objects.clear()
objects.append(player)
if map.map_name in map_npc_db:
load_objects = map_npc_db[map.map_name]
for key in load_objects.keys():
objects.append(init_object(load_objects[key], key))
def add_to_inventory(inventory, item_to_add):
item_in_inventory = inventory.get(item_to_add.name, None)
if item_in_inventory is None:
inventory[item_to_add.name] = item_to_add
else:
item_in_inventory.quantity += item_to_add.quantity
def check_inventory_for_item(inventory, item_name, minimum_quantity = 1):
if item_name in inventory:
print(item_name + "qty:")
print(inventory[item_name].quantity)
if inventory[item_name].quantity >= minimum_quantity:
return True
else:
return False
else:
return False
def init_object(o, name):
if not 'x' in o:
o['x'] = 0
if not 'y' in o:
o['y'] = 0
if not 'char' in o:
o['char'] = '@'
if not 'color' in o:
o['color'] = 'black'
if not 'type' in o:
return GameObject(o['x'], o['y'], o['char'], o['color'], name)
elif o.get('type') == 'npc':
if 'dialog' in o:
dialog = o['dialog']
else:
dialog = 'default'
return NPC(o['x'], o['y'], o['char'], o['color'], name, dialog)
##TODO: abstract, automate init
terminal.open()
terminal.printf(1, 1, 'Hello, world!')
terminal.refresh()
terminal.set("window: size="+str(screen_width)+"x"+str(screen_height)+";")
run = True
ml = MessageLog(dialog_width, dialog_height)
test_count = 0
game_objects = []
dialog_entities = []
player = GameObject(3, 3, '@', 'red', "Hero", True)
player.inventory = {}
##Keep track of which direction player is pointing, start up.
player.last_dx = 0
player.last_dy = -1
game_objects.append(player)
add_to_inventory(player.inventory, InventoryItem("Goblet"))
add_to_inventory(player.inventory, InventoryItem("Replacement Plugs", 11))
game_map = GameMap(map_width,map_height)
load_map(terminal, player, game_objects, game_map)
draw_map(terminal, game_map)
draw_all(terminal, game_objects, map_width, map_height)
terminal.refresh()
while run:
action = None
if terminal.has_input():
action = terminal.read()
##1202 AM TODO:
###implement map system in blt engine✅
###implement NPC and fold in dialog system✅
##by adding a 'dialog' property to NPC object.
###implement item class and item description
##0118 # TODO:
##implement conditionals✅ 0119
if action == terminal.TK_CLOSE:
run = False
##BS test functions for the moment.
### TODO: remove da bs
elif action == terminal.TK_A:
get_object = game_map.get_game_object(player.x + player.last_dx, player.y + player.last_dy, game_objects)
##print(str(player.x + player.last_dx) +" "+ str(player.y + player.last_dy))
if not get_object is None:
##print(str(get_object))
if isinstance(get_object, NPC):
if not get_object.dialog_dict is None:
npc_dialog(get_object, player, "main")
elif action == terminal.TK_S:
ml.log_message(lorem + " " + str(test_count))
elif action == terminal.TK_M:
dialog_tree = DialogTree()
npc_dialog(dialog_tree, 'main')
control = handle_keys(action)
move = control.get('move')
if move:
dx,dy = move
new_x = player.x + dx
new_y = player.y + dy
if game_map.is_transport(new_x, new_y):
transport = game_map.spaces[new_x][new_y].transport
load_map(terminal, player, game_objects, game_map,
transport.new_map_index, dx + transport.dx, dy + transport.dy)
elif not game_map.is_blocked(new_x,new_y):
game_map.unblock(player.x, player.y)
player.move(dx,dy)
player.last_dx = dx
player.last_dy = dy
test_count += 1
draw_all(terminal, game_objects, map_width, map_height)
updateui()
terminal.close()
##layers:
##background 0
##terrain 1
##characters 2
##ui 3
| npc_dialog | identifier_name |
blt_engine.py | from bearlibterminal import terminal
from mapping.game_map import GameMap
from gameplay.dialog_tree import DialogTree
from message_log import MessageLog
from game_object import GameObject
from render_functions import draw_all, draw_map
from input_handler import handle_keys
from utils import *
from gameplay.npc import NPC
from gameplay.inventory import InventoryItem
import re
game_title = "StrangeHack"
screen_width = 120
screen_height = 40
map_width=65
map_height = 40
dialog_width = 50
dialog_height = 35
dialog_pos_x = 68
dialog_pos_y = 1
##Todo: This is starting to turn into spaghetti code. Probably need to refactor
##soon.
def updateui():
##Switch to layer 4 to update UI
terminal.layer(3)
terminal.clear_area(dialog_pos_x,dialog_pos_y,dialog_width,dialog_height)
terminal.printf(dialog_pos_x,dialog_pos_y,ml.get_scroll_back())
terminal.refresh()
def selection_to_int(selection):
##This seems like an incredibly hacky way to do this but I do not see this
##functionality built into the bearlibterm for some reason.. :(
##TODO, ENUMERATE through "terminal" and get all properties whose key starts
##"TK_" Then lop off the end and return it as a char.
if selection == terminal.TK_1:
return 1
elif selection == terminal.TK_2:
return 2
elif selection == terminal.TK_3:
return 3
elif selection == terminal.TK_4:
return 4
elif selection == terminal.TK_5:
return 5
elif selection == terminal.TK_6:
return 6
elif selection == terminal.TK_7:
return 7
elif selection == terminal.TK_8:
return 8
elif selection == terminal.TK_9:
return 9
else:
return None
def dialog_condition_check(condition_code_string, char1, char2):
code_string_stack = condition_code_string.split(" ")
##OK, so here's the deal.
##conditions will be specified in dialog tree as a stringself.
##the string will have 3 positions to start, separated by spaces,
##except the third position in some cases but we'll get to that.
##Returns a dang bool holmes!
##First position is the trigger variable, which has to be a property
##on the player object or a keyword like item
try:
trigger_var_str = code_string_stack.pop(0)
trigger_var_str = trigger_var_str.lower()
#second pos is the comparison operator
operator_str = code_string_stack.pop(0)
##the third pos is whatever bits are remaining (to be split again later maybe)
##join it
condition_str = str.join(" ", code_string_stack)
##print(trigger_var_str)
##print(operator_str)
##print(condition_str)
except:
print("Couldn't parse condition string.")
return False
##Special case to check inventory items...
if trigger_var_str == 'item':
inventory = getattr(player, 'inventory', None)
quote_check_regex_arr = re.findall(r"\"(.*?)\"", condition_str, re.DOTALL)
if len(quote_check_regex_arr) > 0:
item_name = quote_check_regex_arr.pop()
##there might be an integer left at the end to specify quantity...
try:
quantity_str = condition_str.replace('"'+item_name+'"','').split(' ').pop()
quantity_int = int(quantity_str)
except:
quantity_int = 1
##print(quantity_int)
##print(item_name)
if not inventory is None:
return check_inventory_for_item(inventory, item_name, quantity_int)
else:
return False
# for item in inventory:
# print(item)
# if condition_str in inventory:
# return True
# else:
# return False
##Need to add extra conditions to check item quantity.
try:
trigger = getattr(player, trigger_var_str)
except:
print("Couldn't get player attribute " + trigger_var_str)
return False
return get_truth(trigger, operator_str, condition_str)
def npc_dialog(npc, player, dialog_name):
dialog_tree = DialogTree(npc.dialog_dict)
text = dialog_tree.get_say(dialog_name)
conditions = dialog_tree.get_conditions(dialog_name)
responses = dialog_tree.get_responses(dialog_name)
target = dialog_tree.get_target_dialog(dialog_name)
if not conditions is None:
exit = False
for condition in conditions:
exit = not dialog_condition_check(condition['condition_string'], player, npc)
if exit:
return False
text = text
ml.log_message(text, npc.color)
if not responses is None:
response_count = 1
for response in responses:
ml.log_message(str(response_count) + ".) " + response['say'] + " (" + response['target_dialog'] + ")")
response_count += 1
updateui()
selected_response = None
while selected_response is None:
if terminal.has_input():
selection = terminal.read()
if selection == terminal.TK_ESCAPE:
selected_response = 99
else:
selected_response = selection_to_int(selection)
if not selected_response is None and selected_response >= 0 and selected_response < response_count:
##Subtract one from selection because count starts at 1 not 0
target = responses[selected_response - 1]["target_dialog"]
else:
ml.log_message("Select a response from 1 to " + str(response_count - 1 ))
else:
pass
updateui()
if dialog_tree.dialog_exists(target):
npc_dialog(npc, player, target)
def load_map(terminal, player, objects, map, new_map_index=0, dx=0, dy=0):
map.switch_map(new_map_index)
draw_map(terminal, map)
game_map.unblock(player.x, player.y)
player.move(dx , dy )
objects.clear()
objects.append(player)
if map.map_name in map_npc_db:
load_objects = map_npc_db[map.map_name]
for key in load_objects.keys():
objects.append(init_object(load_objects[key], key))
def add_to_inventory(inventory, item_to_add):
item_in_inventory = inventory.get(item_to_add.name, None)
if item_in_inventory is None:
inventory[item_to_add.name] = item_to_add
else:
item_in_inventory.quantity += item_to_add.quantity
def check_inventory_for_item(inventory, item_name, minimum_quantity = 1):
if item_name in inventory:
print(item_name + "qty:")
print(inventory[item_name].quantity)
if inventory[item_name].quantity >= minimum_quantity:
return True
else:
return False
else:
return False
def init_object(o, name):
if not 'x' in o:
o['x'] = 0
if not 'y' in o:
o['y'] = 0
if not 'char' in o:
o['char'] = '@'
if not 'color' in o:
o['color'] = 'black'
if not 'type' in o:
return GameObject(o['x'], o['y'], o['char'], o['color'], name)
elif o.get('type') == 'npc':
if 'dialog' in o:
dialog = o['dialog']
else:
dialog = 'default'
return NPC(o['x'], o['y'], o['char'], o['color'], name, dialog)
##TODO: abstract, automate init
terminal.open()
terminal.printf(1, 1, 'Hello, world!')
terminal.refresh()
terminal.set("window: size="+str(screen_width)+"x"+str(screen_height)+";")
run = True
ml = MessageLog(dialog_width, dialog_height)
test_count = 0
game_objects = []
dialog_entities = []
player = GameObject(3, 3, '@', 'red', "Hero", True)
player.inventory = {}
##Keep track of which direction player is pointing, start up.
player.last_dx = 0
player.last_dy = -1
game_objects.append(player)
add_to_inventory(player.inventory, InventoryItem("Goblet"))
add_to_inventory(player.inventory, InventoryItem("Replacement Plugs", 11))
game_map = GameMap(map_width,map_height)
load_map(terminal, player, game_objects, game_map)
draw_map(terminal, game_map)
draw_all(terminal, game_objects, map_width, map_height)
terminal.refresh()
while run:
action = None
if terminal.has_input():
| inal.close()
##layers:
##background 0
##terrain 1
##characters 2
##ui 3
| action = terminal.read()
##1202 AM TODO:
###implement map system in blt engine✅
###implement NPC and fold in dialog system✅
##by adding a 'dialog' property to NPC object.
###implement item class and item description
##0118 # TODO:
##implement conditionals✅ 0119
if action == terminal.TK_CLOSE:
run = False
##BS test functions for the moment.
### TODO: remove da bs
elif action == terminal.TK_A:
get_object = game_map.get_game_object(player.x + player.last_dx, player.y + player.last_dy, game_objects)
##print(str(player.x + player.last_dx) +" "+ str(player.y + player.last_dy))
if not get_object is None:
##print(str(get_object))
if isinstance(get_object, NPC):
if not get_object.dialog_dict is None:
npc_dialog(get_object, player, "main")
elif action == terminal.TK_S:
ml.log_message(lorem + " " + str(test_count))
elif action == terminal.TK_M:
dialog_tree = DialogTree()
npc_dialog(dialog_tree, 'main')
control = handle_keys(action)
move = control.get('move')
if move:
dx,dy = move
new_x = player.x + dx
new_y = player.y + dy
if game_map.is_transport(new_x, new_y):
transport = game_map.spaces[new_x][new_y].transport
load_map(terminal, player, game_objects, game_map,
transport.new_map_index, dx + transport.dx, dy + transport.dy)
elif not game_map.is_blocked(new_x,new_y):
game_map.unblock(player.x, player.y)
player.move(dx,dy)
player.last_dx = dx
player.last_dy = dy
test_count += 1
draw_all(terminal, game_objects, map_width, map_height)
updateui()
term | conditional_block |
blt_engine.py | from bearlibterminal import terminal
from mapping.game_map import GameMap
from gameplay.dialog_tree import DialogTree
from message_log import MessageLog
from game_object import GameObject
from render_functions import draw_all, draw_map
from input_handler import handle_keys
from utils import *
from gameplay.npc import NPC
from gameplay.inventory import InventoryItem
import re
game_title = "StrangeHack"
screen_width = 120
screen_height = 40
map_width=65
map_height = 40
dialog_width = 50
dialog_height = 35
dialog_pos_x = 68
dialog_pos_y = 1
##Todo: This is starting to turn into spaghetti code. Probably need to refactor
##soon.
def updateui():
##Switch to layer 4 to update UI
terminal.layer(3)
terminal.clear_area(dialog_pos_x,dialog_pos_y,dialog_width,dialog_height)
terminal.printf(dialog_pos_x,dialog_pos_y,ml.get_scroll_back())
terminal.refresh()
def selection_to_int(selection):
##This seems like an incredibly hacky way to do this but I do not see this
##functionality built into the bearlibterm for some reason.. :(
##TODO, ENUMERATE through "terminal" and get all properties whose key starts
##"TK_" Then lop off the end and return it as a char.
if selection == terminal.TK_1:
return 1
elif selection == terminal.TK_2:
return 2
elif selection == terminal.TK_3:
return 3
elif selection == terminal.TK_4:
return 4
elif selection == terminal.TK_5:
return 5
elif selection == terminal.TK_6:
return 6
elif selection == terminal.TK_7:
return 7
elif selection == terminal.TK_8:
return 8
elif selection == terminal.TK_9:
return 9
else:
return None
def dialog_condition_check(condition_code_string, char1, char2):
code_string_stack = condition_code_string.split(" ")
##OK, so here's the deal.
##conditions will be specified in dialog tree as a stringself.
##the string will have 3 positions to start, separated by spaces,
##except the third position in some cases but we'll get to that.
##Returns a dang bool holmes!
##First position is the trigger variable, which has to be a property
##on the player object or a keyword like item
try:
trigger_var_str = code_string_stack.pop(0)
trigger_var_str = trigger_var_str.lower()
#second pos is the comparison operator
operator_str = code_string_stack.pop(0)
##the third pos is whatever bits are remaining (to be split again later maybe)
##join it
condition_str = str.join(" ", code_string_stack)
##print(trigger_var_str)
##print(operator_str)
##print(condition_str)
except:
print("Couldn't parse condition string.")
return False
##Special case to check inventory items...
if trigger_var_str == 'item':
inventory = getattr(player, 'inventory', None)
quote_check_regex_arr = re.findall(r"\"(.*?)\"", condition_str, re.DOTALL)
if len(quote_check_regex_arr) > 0:
item_name = quote_check_regex_arr.pop()
##there might be an integer left at the end to specify quantity...
try:
quantity_str = condition_str.replace('"'+item_name+'"','').split(' ').pop()
quantity_int = int(quantity_str)
except:
quantity_int = 1
##print(quantity_int)
##print(item_name)
if not inventory is None:
return check_inventory_for_item(inventory, item_name, quantity_int)
else:
return False
# for item in inventory:
# print(item)
# if condition_str in inventory:
# return True
# else:
# return False
##Need to add extra conditions to check item quantity.
try:
trigger = getattr(player, trigger_var_str)
except:
print("Couldn't get player attribute " + trigger_var_str)
return False
return get_truth(trigger, operator_str, condition_str)
def npc_dialog(npc, player, dialog_name):
dialog_tree = DialogTree(npc.dialog_dict)
text = dialog_tree.get_say(dialog_name)
conditions = dialog_tree.get_conditions(dialog_name)
responses = dialog_tree.get_responses(dialog_name)
target = dialog_tree.get_target_dialog(dialog_name)
if not conditions is None:
exit = False
for condition in conditions:
exit = not dialog_condition_check(condition['condition_string'], player, npc)
if exit:
return False
text = text
ml.log_message(text, npc.color)
if not responses is None:
response_count = 1
for response in responses:
ml.log_message(str(response_count) + ".) " + response['say'] + " (" + response['target_dialog'] + ")")
response_count += 1
updateui()
selected_response = None
while selected_response is None:
if terminal.has_input():
selection = terminal.read()
if selection == terminal.TK_ESCAPE:
selected_response = 99
else:
selected_response = selection_to_int(selection)
if not selected_response is None and selected_response >= 0 and selected_response < response_count:
##Subtract one from selection because count starts at 1 not 0
target = responses[selected_response - 1]["target_dialog"]
else:
ml.log_message("Select a response from 1 to " + str(response_count - 1 ))
else:
pass
updateui()
if dialog_tree.dialog_exists(target):
npc_dialog(npc, player, target)
def load_map(terminal, player, objects, map, new_map_index=0, dx=0, dy=0):
map.switch_map(new_map_index)
draw_map(terminal, map)
game_map.unblock(player.x, player.y)
player.move(dx , dy )
objects.clear()
objects.append(player)
if map.map_name in map_npc_db:
load_objects = map_npc_db[map.map_name]
for key in load_objects.keys():
objects.append(init_object(load_objects[key], key))
def add_to_inventory(inventory, item_to_add):
item_in_inventory = inventory.get(item_to_add.name, None)
if item_in_inventory is None:
inventory[item_to_add.name] = item_to_add
else:
item_in_inventory.quantity += item_to_add.quantity
def check_inventory_for_item(inventory, item_name, minimum_quantity = 1):
if item_name in inventory:
print(item_name + "qty:")
print(inventory[item_name].quantity)
if inventory[item_name].quantity >= minimum_quantity:
return True
else:
return False
else:
return False
def init_object(o, name):
if not 'x' in o:
o['x'] = 0
if not 'y' in o:
o['y'] = 0
if not 'char' in o: | o['char'] = '@'
if not 'color' in o:
o['color'] = 'black'
if not 'type' in o:
return GameObject(o['x'], o['y'], o['char'], o['color'], name)
elif o.get('type') == 'npc':
if 'dialog' in o:
dialog = o['dialog']
else:
dialog = 'default'
return NPC(o['x'], o['y'], o['char'], o['color'], name, dialog)
##TODO: abstract, automate init
terminal.open()
terminal.printf(1, 1, 'Hello, world!')
terminal.refresh()
terminal.set("window: size="+str(screen_width)+"x"+str(screen_height)+";")
run = True
ml = MessageLog(dialog_width, dialog_height)
test_count = 0
game_objects = []
dialog_entities = []
player = GameObject(3, 3, '@', 'red', "Hero", True)
player.inventory = {}
##Keep track of which direction player is pointing, start up.
player.last_dx = 0
player.last_dy = -1
game_objects.append(player)
add_to_inventory(player.inventory, InventoryItem("Goblet"))
add_to_inventory(player.inventory, InventoryItem("Replacement Plugs", 11))
game_map = GameMap(map_width,map_height)
load_map(terminal, player, game_objects, game_map)
draw_map(terminal, game_map)
draw_all(terminal, game_objects, map_width, map_height)
terminal.refresh()
while run:
action = None
if terminal.has_input():
action = terminal.read()
##1202 AM TODO:
###implement map system in blt engine✅
###implement NPC and fold in dialog system✅
##by adding a 'dialog' property to NPC object.
###implement item class and item description
##0118 # TODO:
##implement conditionals✅ 0119
if action == terminal.TK_CLOSE:
run = False
##BS test functions for the moment.
### TODO: remove da bs
elif action == terminal.TK_A:
get_object = game_map.get_game_object(player.x + player.last_dx, player.y + player.last_dy, game_objects)
##print(str(player.x + player.last_dx) +" "+ str(player.y + player.last_dy))
if not get_object is None:
##print(str(get_object))
if isinstance(get_object, NPC):
if not get_object.dialog_dict is None:
npc_dialog(get_object, player, "main")
elif action == terminal.TK_S:
ml.log_message(lorem + " " + str(test_count))
elif action == terminal.TK_M:
dialog_tree = DialogTree()
npc_dialog(dialog_tree, 'main')
control = handle_keys(action)
move = control.get('move')
if move:
dx,dy = move
new_x = player.x + dx
new_y = player.y + dy
if game_map.is_transport(new_x, new_y):
transport = game_map.spaces[new_x][new_y].transport
load_map(terminal, player, game_objects, game_map,
transport.new_map_index, dx + transport.dx, dy + transport.dy)
elif not game_map.is_blocked(new_x,new_y):
game_map.unblock(player.x, player.y)
player.move(dx,dy)
player.last_dx = dx
player.last_dy = dy
test_count += 1
draw_all(terminal, game_objects, map_width, map_height)
updateui()
terminal.close()
##layers:
##background 0
##terrain 1
##characters 2
##ui 3 | random_line_split | |
blt_engine.py | from bearlibterminal import terminal
from mapping.game_map import GameMap
from gameplay.dialog_tree import DialogTree
from message_log import MessageLog
from game_object import GameObject
from render_functions import draw_all, draw_map
from input_handler import handle_keys
from utils import *
from gameplay.npc import NPC
from gameplay.inventory import InventoryItem
import re
game_title = "StrangeHack"
screen_width = 120
screen_height = 40
map_width=65
map_height = 40
dialog_width = 50
dialog_height = 35
dialog_pos_x = 68
dialog_pos_y = 1
##Todo: This is starting to turn into spaghetti code. Probably need to refactor
##soon.
def updateui():
##Switch to layer 4 to update UI
terminal.layer(3)
terminal.clear_area(dialog_pos_x,dialog_pos_y,dialog_width,dialog_height)
terminal.printf(dialog_pos_x,dialog_pos_y,ml.get_scroll_back())
terminal.refresh()
def selection_to_int(selection):
##This seems like an incredibly hacky way to do this but I do not see this
##functionality built into the bearlibterm for some reason.. :(
##TODO, ENUMERATE through "terminal" and get all properties whose key starts
##"TK_" Then lop off the end and return it as a char.
if selection == terminal.TK_1:
return 1
elif selection == terminal.TK_2:
return 2
elif selection == terminal.TK_3:
return 3
elif selection == terminal.TK_4:
return 4
elif selection == terminal.TK_5:
return 5
elif selection == terminal.TK_6:
return 6
elif selection == terminal.TK_7:
return 7
elif selection == terminal.TK_8:
return 8
elif selection == terminal.TK_9:
return 9
else:
return None
def dialog_condition_check(condition_code_string, char1, char2):
code_string_stack = condition_code_string.split(" ")
##OK, so here's the deal.
##conditions will be specified in dialog tree as a stringself.
##the string will have 3 positions to start, separated by spaces,
##except the third position in some cases but we'll get to that.
##Returns a dang bool holmes!
##First position is the trigger variable, which has to be a property
##on the player object or a keyword like item
try:
trigger_var_str = code_string_stack.pop(0)
trigger_var_str = trigger_var_str.lower()
#second pos is the comparison operator
operator_str = code_string_stack.pop(0)
##the third pos is whatever bits are remaining (to be split again later maybe)
##join it
condition_str = str.join(" ", code_string_stack)
##print(trigger_var_str)
##print(operator_str)
##print(condition_str)
except:
print("Couldn't parse condition string.")
return False
##Special case to check inventory items...
if trigger_var_str == 'item':
inventory = getattr(player, 'inventory', None)
quote_check_regex_arr = re.findall(r"\"(.*?)\"", condition_str, re.DOTALL)
if len(quote_check_regex_arr) > 0:
item_name = quote_check_regex_arr.pop()
##there might be an integer left at the end to specify quantity...
try:
quantity_str = condition_str.replace('"'+item_name+'"','').split(' ').pop()
quantity_int = int(quantity_str)
except:
quantity_int = 1
##print(quantity_int)
##print(item_name)
if not inventory is None:
return check_inventory_for_item(inventory, item_name, quantity_int)
else:
return False
# for item in inventory:
# print(item)
# if condition_str in inventory:
# return True
# else:
# return False
##Need to add extra conditions to check item quantity.
try:
trigger = getattr(player, trigger_var_str)
except:
print("Couldn't get player attribute " + trigger_var_str)
return False
return get_truth(trigger, operator_str, condition_str)
def npc_dialog(npc, player, dialog_name):
dialog_tree = DialogTree(npc.dialog_dict)
text = dialog_tree.get_say(dialog_name)
conditions = dialog_tree.get_conditions(dialog_name)
responses = dialog_tree.get_responses(dialog_name)
target = dialog_tree.get_target_dialog(dialog_name)
if not conditions is None:
exit = False
for condition in conditions:
exit = not dialog_condition_check(condition['condition_string'], player, npc)
if exit:
return False
text = text
ml.log_message(text, npc.color)
if not responses is None:
response_count = 1
for response in responses:
ml.log_message(str(response_count) + ".) " + response['say'] + " (" + response['target_dialog'] + ")")
response_count += 1
updateui()
selected_response = None
while selected_response is None:
if terminal.has_input():
selection = terminal.read()
if selection == terminal.TK_ESCAPE:
selected_response = 99
else:
selected_response = selection_to_int(selection)
if not selected_response is None and selected_response >= 0 and selected_response < response_count:
##Subtract one from selection because count starts at 1 not 0
target = responses[selected_response - 1]["target_dialog"]
else:
ml.log_message("Select a response from 1 to " + str(response_count - 1 ))
else:
pass
updateui()
if dialog_tree.dialog_exists(target):
npc_dialog(npc, player, target)
def load_map(terminal, player, objects, map, new_map_index=0, dx=0, dy=0):
|
def add_to_inventory(inventory, item_to_add):
item_in_inventory = inventory.get(item_to_add.name, None)
if item_in_inventory is None:
inventory[item_to_add.name] = item_to_add
else:
item_in_inventory.quantity += item_to_add.quantity
def check_inventory_for_item(inventory, item_name, minimum_quantity = 1):
if item_name in inventory:
print(item_name + "qty:")
print(inventory[item_name].quantity)
if inventory[item_name].quantity >= minimum_quantity:
return True
else:
return False
else:
return False
def init_object(o, name):
if not 'x' in o:
o['x'] = 0
if not 'y' in o:
o['y'] = 0
if not 'char' in o:
o['char'] = '@'
if not 'color' in o:
o['color'] = 'black'
if not 'type' in o:
return GameObject(o['x'], o['y'], o['char'], o['color'], name)
elif o.get('type') == 'npc':
if 'dialog' in o:
dialog = o['dialog']
else:
dialog = 'default'
return NPC(o['x'], o['y'], o['char'], o['color'], name, dialog)
##TODO: abstract, automate init
terminal.open()
terminal.printf(1, 1, 'Hello, world!')
terminal.refresh()
terminal.set("window: size="+str(screen_width)+"x"+str(screen_height)+";")
run = True
ml = MessageLog(dialog_width, dialog_height)
test_count = 0
game_objects = []
dialog_entities = []
player = GameObject(3, 3, '@', 'red', "Hero", True)
player.inventory = {}
##Keep track of which direction player is pointing, start up.
player.last_dx = 0
player.last_dy = -1
game_objects.append(player)
add_to_inventory(player.inventory, InventoryItem("Goblet"))
add_to_inventory(player.inventory, InventoryItem("Replacement Plugs", 11))
game_map = GameMap(map_width,map_height)
load_map(terminal, player, game_objects, game_map)
draw_map(terminal, game_map)
draw_all(terminal, game_objects, map_width, map_height)
terminal.refresh()
while run:
action = None
if terminal.has_input():
action = terminal.read()
##1202 AM TODO:
###implement map system in blt engine✅
###implement NPC and fold in dialog system✅
##by adding a 'dialog' property to NPC object.
###implement item class and item description
##0118 # TODO:
##implement conditionals✅ 0119
if action == terminal.TK_CLOSE:
run = False
##BS test functions for the moment.
### TODO: remove da bs
elif action == terminal.TK_A:
get_object = game_map.get_game_object(player.x + player.last_dx, player.y + player.last_dy, game_objects)
##print(str(player.x + player.last_dx) +" "+ str(player.y + player.last_dy))
if not get_object is None:
##print(str(get_object))
if isinstance(get_object, NPC):
if not get_object.dialog_dict is None:
npc_dialog(get_object, player, "main")
elif action == terminal.TK_S:
ml.log_message(lorem + " " + str(test_count))
elif action == terminal.TK_M:
dialog_tree = DialogTree()
npc_dialog(dialog_tree, 'main')
control = handle_keys(action)
move = control.get('move')
if move:
dx,dy = move
new_x = player.x + dx
new_y = player.y + dy
if game_map.is_transport(new_x, new_y):
transport = game_map.spaces[new_x][new_y].transport
load_map(terminal, player, game_objects, game_map,
transport.new_map_index, dx + transport.dx, dy + transport.dy)
elif not game_map.is_blocked(new_x,new_y):
game_map.unblock(player.x, player.y)
player.move(dx,dy)
player.last_dx = dx
player.last_dy = dy
test_count += 1
draw_all(terminal, game_objects, map_width, map_height)
updateui()
terminal.close()
##layers:
##background 0
##terrain 1
##characters 2
##ui 3
| map.switch_map(new_map_index)
draw_map(terminal, map)
game_map.unblock(player.x, player.y)
player.move(dx , dy )
objects.clear()
objects.append(player)
if map.map_name in map_npc_db:
load_objects = map_npc_db[map.map_name]
for key in load_objects.keys():
objects.append(init_object(load_objects[key], key)) | identifier_body |
main.py | import pygame
import os
import time
import random
pygame.init()
# Maximum height and width of the game surface
WIDTH, HEIGHT = (750, 750)
# To create the display surface
WIN = pygame.display.set_mode((WIDTH, HEIGHT))
# Set the surface caption
pygame.display.set_caption("MyGame")
# Background image
BG = pygame.image.load(os.path.join("assets", "background-black.png"))
# Scaling the background image to max width and height as game surface
BG = pygame.transform.scale(BG, (WIDTH, HEIGHT))
# Enemy Load image
RED_SPACE_SHIP = pygame.image.load(os.path.join("assets", "pixel_ship_red_small.png"))
GREEN_SPACE_SHIP = pygame.image.load(os.path.join("assets", "pixel_ship_green_small.png"))
BLUE_SPACE_SHIP = pygame.image.load(os.path.join("assets", "pixel_ship_blue_small.png"))
# Player ship image
YELLOW_SPACE_SHIP = pygame.image.load(os.path.join("assets", "pixel_ship_yellow.png"))
# lasers
RED_LASER = pygame.image.load(os.path.join("assets", "pixel_laser_red.png"))
GREEN_LASER = pygame.image.load(os.path.join("assets", "pixel_laser_green.png"))
BLUE_LASER = pygame.image.load(os.path.join("assets", "pixel_laser_blue.png"))
YELLOW_LASER = pygame.image.load(os.path.join("assets", "pixel_laser_yellow.png"))
# Generalized class
class Ship:
COOLDOWN = 30
def __init__(self, x, y, health=100):
self.x = x
self.y = y
self.health = health
self.ship_img = None
self.laser_img = None
# keep track of the lasers shoot
self.lasers = []
self.cool_down_counter = 0
def draw(self, window):
window.blit(self.ship_img, (self.x, self.y))
for laser in self.lasers:
laser.draw(window)
def move_lasers(self, vel, obj):
self.cooldown()
for laser in self.lasers:
laser.move(vel)
if laser.off_screen(HEIGHT):
self.lasers.remove(laser)
elif laser.collision(obj):
obj.health -= 10
self.lasers.remove(laser)
# used to initiate time to control of the next laser shooting time
def cooldown(self):
# if cool_down_counter exceed the COOL DOWN =30 --> allow to create laser
if self.cool_down_counter >= self.COOLDOWN:
self.cool_down_counter = 0
# increment of the cool_down_counter
elif self.cool_down_counter > 0:
self.cool_down_counter += 1
# used to initiate time for new laser
def shoot(self):
if self.cool_down_counter == 0:
laser = Laser(self.x, self.y, self.laser_img)
self.lasers.append(laser)
self.cool_down_counter = 1
def get_height(self):
return self.ship_img.get_width()
def get_width(self):
return self.ship_img.get_height()
class Laser:
def __init__(self, x, y, img):
self.x = x
self.y = y
self.img = img
self.mask = pygame.mask.from_surface(self.img)
def draw(self, window):
window.blit(self.img, (self.x, self.y))
# moves the laser to the certain velocity ratio
def move(self, vel):
self.y += vel
# check if the laser is off the screen
# for player it checks laser y position > 0
# for enemy it checks laser y position < HEIGHT
def off_screen(self, height):
return not(self.y <= height and self.y >= 0)
def collision(self, obj):
return collide(self, obj)
'''
Player():
draw() --> ship.draw()
Move_laser() --> ship.cool_down()
health_bar()
Ship ---- > ship.shoot()
'''
# Player class
class Player(Ship):
# Takes the x and y position to located the player character
def __init__(self, x, y, health=100):
super().__init__(x, y)
self.ship_img = YELLOW_SPACE_SHIP
self.laser_img = YELLOW_LASER
# masking take only the weighted pixel and ignore the other pixel
self.mask = pygame.mask.from_surface(self.ship_img)
self.max_health = health
# Shoot the laser when the user press the space bar
def move_lasers(self, vel, objs):
self.cooldown()
# Loop over the laser shoot by the player
for laser in self.lasers:
# Change the x and y pos of the laser
laser.move(vel)
if laser.off_screen(HEIGHT):
# If the laser is out off the screen -- destroy the laser object
self.lasers.remove(laser)
else:
for obj in objs:
if laser.collision(obj):
objs.remove(obj)
if laser in self.lasers:
self.lasers.remove(laser)
# Render the player object to the game surface ---> responsible for the movement of the character
def draw(self, window):
super().draw(window)
self.healthbar(window)
def healthbar(self, window):
pygame.draw.rect(window, (255, 0, 0),(self.x, self.y + self.ship_img.get_height() + 10, self.ship_img.get_width(), 10))
pygame.draw.rect(window, (0, 255, 0), (self.x, self.y + self.ship_img.get_height() + 10, self.ship_img.get_width() * (self.health / self.max_health),10))
'''
Enemy();
move()
shoot() ---> Laser()
move_laser()
Ship() ---> draw()
'''
class Enemy(Ship):
COLOR_MAP = {
"red": (RED_SPACE_SHIP, RED_LASER),
"blue": (BLUE_SPACE_SHIP, BLUE_LASER),
"green": (GREEN_SPACE_SHIP, GREEN_LASER)
}
def __init__(self, x, y, color, health=100):
super().__init__(x, y, health)
self.ship_img, self.laser_img = self.COLOR_MAP[color]
self.mask = pygame.mask.from_surface(self.ship_img)
def move(self, vel):
|
def shoot(self):
if self.cool_down_counter == 0:
laser = Laser(self.x-20, self.y, self.laser_img)
self.lasers.append(laser)
self.cool_down_counter = 1
def main():
# Flag to track the game status
run = True
# frame to be rendered per second
FPS = 60
# pygame clock initialisation
clock = pygame.time.Clock()
# Initial level of the game
level = 0
# Total lives of the player
lives = 5
# Font surface to render the level and lives
main_font = pygame.font.SysFont('comicsans', 50)
# Font surface to render the lost message
lost_font = pygame.font.SysFont('comicsans', 60)
# Player declaration
player = Player(375, 600)
# Player movement velocity
player_vel = 5
# laser movement velocity
laser_vel = 5
# Track of total enemy created
enemies = []
# Update number of enemy after a level
wave_length = 0
# Enemy spaceship velocity
enemy_vel = 1
# Flag to Tracking the game status of the player on basis of the health
lost = False
# Counting the lost
lost_count = 0
# Function to render the game objects onto the game surface
def render_window():
# Creating the font surface to render onto the game surface
# For Lives rendering
lives_label = main_font.render(f"Lives : {lives}", 1, (255, 255, 255))
# For Level rendering
level_label = main_font.render(f"Level : {level}", 1, (255, 255, 255))
# blit the background image to the game surface
WIN.blit(BG, (0, 0))
# blit the lives status to the game screen/surface
WIN.blit(lives_label, (10, 10))
# blit the level status to the game screen/surface
WIN.blit(level_label, (WIDTH - level_label.get_width() - 10, 10))
# Rendering the player character to the surface
player.draw(WIN)
# TO render the enemy onto the game surface
# This will draw the enemy if exist in the enemies list
for enemy in enemies:
# Calling the Enemy.draw function of the Enemy class
enemy.draw(WIN)
# If the lost flag is toggled ---> player lost
if lost:
# Creating the lost font surface to be rendered on the screen after the lost of the game
lost_label = lost_font.render("You Lost!!", 1, (255, 255, 255))
# Render the lost font surface to the game surface
WIN.blit(lost_label, (WIDTH/2 - lost_label.get_width()/2, 350))
# used to update the whole screen per frame
pygame.display.update()
def player_activity():
# Used to get the activity of the user/player
keys = pygame.key.get_pressed()
# <-- left movement
if keys[pygame.K_LEFT] and player.x - player_vel > 0:
player.x -= player_vel
# --> right
if keys[pygame.K_RIGHT] and player.x + player_vel + player.get_width() < WIDTH:
player.x += player_vel
# ^^^^^ up movement
if keys[pygame.K_UP] and player.y - player_vel > 0:
player.y -= player_vel
# Down movement
if keys[pygame.K_DOWN] and player.y + player_vel + player.get_height() + 10 < HEIGHT:
player.y += player_vel
# Used to fire the laser shoots
if keys[pygame.K_SPACE]:
player.shoot()
# Main Game Loop
while run:
# sets the number frame to be loaded per second and run this loop 60 time per second
clock.tick(FPS)
# used to render all the surfaces onto the screen
render_window()
# Check to track the game status as lost or win
if lives <= 0 or player.health <= 0:
# Toggle the lost flag
lost = True
# increase the lost count
lost_count += 1
# if the player lost toggle the game(run) for 3 seconds
if lost:
# to display the lost font surface for 3 seconds
if lost_count > FPS * 3:
run = False
else:
continue
# Used to get the activity of the user/player
for event in pygame.event.get():
# Trigger when the QUIT is pressed
if event.type == pygame.QUIT:
# run = False
quit()
print(event)
# To level up when NO enemy left
if len(enemies) == 0:
# Level up by 1
level += 1
# adding 5 additional enemy
wave_length += 5
# Declaration of enemy as per wave_length
for i in range(wave_length):
enemy = Enemy(random.randrange(50, WIDTH - 100),
random.randrange(-1500, -100),
random.choice(["red", "blue", "green"]))
enemies.append(enemy)
player_activity()
'''
Player():
draw() --> ship.draw()
Move_laser() --> ship.cool_down()
health_bar()
Ship() ---- > ship.shoot()
Enemy():
move()
shoot() ---> Laser()
Ship() ---> move_laser()
Ship() ---> draw()
'''
for enemy in enemies[:]:
# moving enemy itself
enemy.move(enemy_vel)
# moving enemy laser
enemy.move_lasers(laser_vel, player)
# setting the probability to shoot a laser
if random.randrange(0, 2 * 60) == 1:
enemy.shoot()
# Check for collision of the enemy and the player
if collide(enemy, player):
# if collide decrease the player health by 10
player.health -= 10
# Deleting the enemy who collide with the player
enemies.remove(enemy)
# destroying the enemy if the enemy passes the MAX_HEIGHT
elif enemy.y + enemy.get_height() > HEIGHT:
lives -= 1
enemies.remove(enemy)
# used to fire the laser and check the collision of the player laser with the enemy object
player.move_lasers(-laser_vel, enemies)
# check if the objects collide or not
def collide(obj1, obj2):
offset_x = obj2.x - obj1.x
offset_y = obj2.y - obj1.y
return obj1.mask.overlap(obj2.mask, (offset_x, offset_y)) != None
def main_menu():
# Initialisation of the font surface
title_font = pygame.font.SysFont("comicsans", 70)
# Used to show main menu after complietion of the game
run = True
while run:
# Blit the background surface to the screen surface
WIN.blit(BG, (0, 0))
# Setting the font to be rendered on the font surface
title_label = title_font.render("Press the mouse to begin...", 1, (255, 255, 255))
# blit the font on the game surface
WIN.blit(title_label, (WIDTH/2 - title_label.get_width()/2, 350))
# used to update the screen surface at every second according to the FPS
pygame.display.update()
# loop to handle the start or the close of the game
for event in pygame.event.get():
# triggered when the game screen is closed --> close the game
if event.type == pygame.QUIT:
run = False
# Triggered when the mouse is clicked --> Start game
if event.type == pygame.MOUSEBUTTONDOWN:
main()
# Quit the pygame instance
pygame.quit()
# Starting the game main menu
main_menu() | self.y += vel | identifier_body |
main.py | import pygame
import os
import time
import random
pygame.init()
# Maximum height and width of the game surface
WIDTH, HEIGHT = (750, 750)
# To create the display surface
WIN = pygame.display.set_mode((WIDTH, HEIGHT))
# Set the surface caption
pygame.display.set_caption("MyGame")
# Background image
BG = pygame.image.load(os.path.join("assets", "background-black.png"))
# Scaling the background image to max width and height as game surface
BG = pygame.transform.scale(BG, (WIDTH, HEIGHT))
# Enemy Load image
RED_SPACE_SHIP = pygame.image.load(os.path.join("assets", "pixel_ship_red_small.png"))
GREEN_SPACE_SHIP = pygame.image.load(os.path.join("assets", "pixel_ship_green_small.png"))
BLUE_SPACE_SHIP = pygame.image.load(os.path.join("assets", "pixel_ship_blue_small.png"))
# Player ship image
YELLOW_SPACE_SHIP = pygame.image.load(os.path.join("assets", "pixel_ship_yellow.png"))
# lasers
RED_LASER = pygame.image.load(os.path.join("assets", "pixel_laser_red.png"))
GREEN_LASER = pygame.image.load(os.path.join("assets", "pixel_laser_green.png"))
BLUE_LASER = pygame.image.load(os.path.join("assets", "pixel_laser_blue.png"))
YELLOW_LASER = pygame.image.load(os.path.join("assets", "pixel_laser_yellow.png"))
# Generalized class
class Ship:
COOLDOWN = 30
def __init__(self, x, y, health=100):
self.x = x
self.y = y
self.health = health
self.ship_img = None
self.laser_img = None
# keep track of the lasers shoot
self.lasers = []
self.cool_down_counter = 0
def draw(self, window):
window.blit(self.ship_img, (self.x, self.y))
for laser in self.lasers:
laser.draw(window)
def move_lasers(self, vel, obj):
self.cooldown()
for laser in self.lasers:
laser.move(vel)
if laser.off_screen(HEIGHT):
self.lasers.remove(laser)
elif laser.collision(obj):
obj.health -= 10
self.lasers.remove(laser)
# used to initiate time to control of the next laser shooting time
def cooldown(self):
# if cool_down_counter exceed the COOL DOWN =30 --> allow to create laser
if self.cool_down_counter >= self.COOLDOWN:
self.cool_down_counter = 0
# increment of the cool_down_counter
elif self.cool_down_counter > 0:
self.cool_down_counter += 1
# used to initiate time for new laser
def shoot(self):
if self.cool_down_counter == 0:
laser = Laser(self.x, self.y, self.laser_img)
self.lasers.append(laser)
self.cool_down_counter = 1
def get_height(self):
return self.ship_img.get_width()
def get_width(self):
return self.ship_img.get_height()
class Laser:
def __init__(self, x, y, img):
self.x = x
self.y = y
self.img = img
self.mask = pygame.mask.from_surface(self.img)
def draw(self, window):
window.blit(self.img, (self.x, self.y))
# moves the laser to the certain velocity ratio
def move(self, vel):
self.y += vel
# check if the laser is off the screen
# for player it checks laser y position > 0
# for enemy it checks laser y position < HEIGHT
def off_screen(self, height):
return not(self.y <= height and self.y >= 0)
def collision(self, obj):
return collide(self, obj)
'''
Player():
draw() --> ship.draw()
Move_laser() --> ship.cool_down()
health_bar()
Ship ---- > ship.shoot()
'''
# Player class
class Player(Ship):
# Takes the x and y position to located the player character
def __init__(self, x, y, health=100):
super().__init__(x, y)
self.ship_img = YELLOW_SPACE_SHIP
self.laser_img = YELLOW_LASER
# masking take only the weighted pixel and ignore the other pixel
self.mask = pygame.mask.from_surface(self.ship_img)
self.max_health = health
# Shoot the laser when the user press the space bar
def move_lasers(self, vel, objs):
self.cooldown()
# Loop over the laser shoot by the player
for laser in self.lasers:
# Change the x and y pos of the laser
laser.move(vel)
if laser.off_screen(HEIGHT):
# If the laser is out off the screen -- destroy the laser object
self.lasers.remove(laser)
else:
for obj in objs:
if laser.collision(obj):
objs.remove(obj)
if laser in self.lasers:
self.lasers.remove(laser)
# Render the player object to the game surface ---> responsible for the movement of the character
def draw(self, window):
super().draw(window)
self.healthbar(window)
def healthbar(self, window):
pygame.draw.rect(window, (255, 0, 0),(self.x, self.y + self.ship_img.get_height() + 10, self.ship_img.get_width(), 10))
pygame.draw.rect(window, (0, 255, 0), (self.x, self.y + self.ship_img.get_height() + 10, self.ship_img.get_width() * (self.health / self.max_health),10))
'''
Enemy();
move()
shoot() ---> Laser()
move_laser()
Ship() ---> draw()
'''
class Enemy(Ship):
COLOR_MAP = {
"red": (RED_SPACE_SHIP, RED_LASER),
"blue": (BLUE_SPACE_SHIP, BLUE_LASER),
"green": (GREEN_SPACE_SHIP, GREEN_LASER)
}
def __init__(self, x, y, color, health=100):
super().__init__(x, y, health)
self.ship_img, self.laser_img = self.COLOR_MAP[color]
self.mask = pygame.mask.from_surface(self.ship_img)
def move(self, vel):
self.y += vel
def shoot(self):
if self.cool_down_counter == 0:
laser = Laser(self.x-20, self.y, self.laser_img)
self.lasers.append(laser)
self.cool_down_counter = 1
def main():
# Flag to track the game status
run = True
# frame to be rendered per second
FPS = 60
# pygame clock initialisation
clock = pygame.time.Clock()
# Initial level of the game
level = 0
# Total lives of the player
lives = 5
# Font surface to render the level and lives
main_font = pygame.font.SysFont('comicsans', 50)
# Font surface to render the lost message
lost_font = pygame.font.SysFont('comicsans', 60)
# Player declaration
player = Player(375, 600)
# Player movement velocity
player_vel = 5
# laser movement velocity
laser_vel = 5
# Track of total enemy created
enemies = []
# Update number of enemy after a level
wave_length = 0
# Enemy spaceship velocity
enemy_vel = 1
# Flag to Tracking the game status of the player on basis of the health
lost = False
# Counting the lost
lost_count = 0
# Function to render the game objects onto the game surface
def render_window():
# Creating the font surface to render onto the game surface
# For Lives rendering
lives_label = main_font.render(f"Lives : {lives}", 1, (255, 255, 255))
# For Level rendering
level_label = main_font.render(f"Level : {level}", 1, (255, 255, 255))
# blit the background image to the game surface
WIN.blit(BG, (0, 0))
# blit the lives status to the game screen/surface
WIN.blit(lives_label, (10, 10))
# blit the level status to the game screen/surface
WIN.blit(level_label, (WIDTH - level_label.get_width() - 10, 10))
# Rendering the player character to the surface
player.draw(WIN)
# TO render the enemy onto the game surface
# This will draw the enemy if exist in the enemies list
for enemy in enemies:
# Calling the Enemy.draw function of the Enemy class
enemy.draw(WIN)
# If the lost flag is toggled ---> player lost
if lost:
# Creating the lost font surface to be rendered on the screen after the lost of the game
lost_label = lost_font.render("You Lost!!", 1, (255, 255, 255))
# Render the lost font surface to the game surface
WIN.blit(lost_label, (WIDTH/2 - lost_label.get_width()/2, 350))
# used to update the whole screen per frame
pygame.display.update()
def player_activity():
# Used to get the activity of the user/player
keys = pygame.key.get_pressed()
# <-- left movement
if keys[pygame.K_LEFT] and player.x - player_vel > 0:
player.x -= player_vel
# --> right
if keys[pygame.K_RIGHT] and player.x + player_vel + player.get_width() < WIDTH:
player.x += player_vel
# ^^^^^ up movement
if keys[pygame.K_UP] and player.y - player_vel > 0:
player.y -= player_vel
# Down movement
if keys[pygame.K_DOWN] and player.y + player_vel + player.get_height() + 10 < HEIGHT:
player.y += player_vel
# Used to fire the laser shoots
if keys[pygame.K_SPACE]:
player.shoot()
# Main Game Loop
while run:
# sets the number frame to be loaded per second and run this loop 60 time per second
clock.tick(FPS)
# used to render all the surfaces onto the screen
render_window()
# Check to track the game status as lost or win
if lives <= 0 or player.health <= 0:
# Toggle the lost flag
lost = True
# increase the lost count
lost_count += 1
# if the player lost toggle the game(run) for 3 seconds
if lost:
# to display the lost font surface for 3 seconds
if lost_count > FPS * 3:
run = False
else:
continue
# Used to get the activity of the user/player
for event in pygame.event.get():
# Trigger when the QUIT is pressed
if event.type == pygame.QUIT:
# run = False
quit()
print(event)
# To level up when NO enemy left
if len(enemies) == 0:
# Level up by 1
level += 1
# adding 5 additional enemy
wave_length += 5
# Declaration of enemy as per wave_length
for i in range(wave_length):
|
player_activity()
'''
Player():
draw() --> ship.draw()
Move_laser() --> ship.cool_down()
health_bar()
Ship() ---- > ship.shoot()
Enemy():
move()
shoot() ---> Laser()
Ship() ---> move_laser()
Ship() ---> draw()
'''
for enemy in enemies[:]:
# moving enemy itself
enemy.move(enemy_vel)
# moving enemy laser
enemy.move_lasers(laser_vel, player)
# setting the probability to shoot a laser
if random.randrange(0, 2 * 60) == 1:
enemy.shoot()
# Check for collision of the enemy and the player
if collide(enemy, player):
# if collide decrease the player health by 10
player.health -= 10
# Deleting the enemy who collide with the player
enemies.remove(enemy)
# destroying the enemy if the enemy passes the MAX_HEIGHT
elif enemy.y + enemy.get_height() > HEIGHT:
lives -= 1
enemies.remove(enemy)
# used to fire the laser and check the collision of the player laser with the enemy object
player.move_lasers(-laser_vel, enemies)
# check if the objects collide or not
def collide(obj1, obj2):
offset_x = obj2.x - obj1.x
offset_y = obj2.y - obj1.y
return obj1.mask.overlap(obj2.mask, (offset_x, offset_y)) != None
def main_menu():
# Initialisation of the font surface
title_font = pygame.font.SysFont("comicsans", 70)
# Used to show main menu after complietion of the game
run = True
while run:
# Blit the background surface to the screen surface
WIN.blit(BG, (0, 0))
# Setting the font to be rendered on the font surface
title_label = title_font.render("Press the mouse to begin...", 1, (255, 255, 255))
# blit the font on the game surface
WIN.blit(title_label, (WIDTH/2 - title_label.get_width()/2, 350))
# used to update the screen surface at every second according to the FPS
pygame.display.update()
# loop to handle the start or the close of the game
for event in pygame.event.get():
# triggered when the game screen is closed --> close the game
if event.type == pygame.QUIT:
run = False
# Triggered when the mouse is clicked --> Start game
if event.type == pygame.MOUSEBUTTONDOWN:
main()
# Quit the pygame instance
pygame.quit()
# Starting the game main menu
main_menu() | enemy = Enemy(random.randrange(50, WIDTH - 100),
random.randrange(-1500, -100),
random.choice(["red", "blue", "green"]))
enemies.append(enemy) | conditional_block |
main.py | import pygame
import os
import time
import random
pygame.init()
# Maximum height and width of the game surface
WIDTH, HEIGHT = (750, 750)
# To create the display surface
WIN = pygame.display.set_mode((WIDTH, HEIGHT))
# Set the surface caption
pygame.display.set_caption("MyGame")
# Background image
BG = pygame.image.load(os.path.join("assets", "background-black.png"))
# Scaling the background image to max width and height as game surface
BG = pygame.transform.scale(BG, (WIDTH, HEIGHT))
# Enemy Load image
RED_SPACE_SHIP = pygame.image.load(os.path.join("assets", "pixel_ship_red_small.png"))
GREEN_SPACE_SHIP = pygame.image.load(os.path.join("assets", "pixel_ship_green_small.png"))
BLUE_SPACE_SHIP = pygame.image.load(os.path.join("assets", "pixel_ship_blue_small.png"))
# Player ship image
YELLOW_SPACE_SHIP = pygame.image.load(os.path.join("assets", "pixel_ship_yellow.png"))
# lasers
RED_LASER = pygame.image.load(os.path.join("assets", "pixel_laser_red.png"))
GREEN_LASER = pygame.image.load(os.path.join("assets", "pixel_laser_green.png"))
BLUE_LASER = pygame.image.load(os.path.join("assets", "pixel_laser_blue.png"))
YELLOW_LASER = pygame.image.load(os.path.join("assets", "pixel_laser_yellow.png"))
# Generalized class
class Ship:
COOLDOWN = 30
def __init__(self, x, y, health=100):
self.x = x
self.y = y
self.health = health
self.ship_img = None
self.laser_img = None
# keep track of the lasers shoot
self.lasers = []
self.cool_down_counter = 0
def | (self, window):
window.blit(self.ship_img, (self.x, self.y))
for laser in self.lasers:
laser.draw(window)
def move_lasers(self, vel, obj):
self.cooldown()
for laser in self.lasers:
laser.move(vel)
if laser.off_screen(HEIGHT):
self.lasers.remove(laser)
elif laser.collision(obj):
obj.health -= 10
self.lasers.remove(laser)
# used to initiate time to control of the next laser shooting time
def cooldown(self):
# if cool_down_counter exceed the COOL DOWN =30 --> allow to create laser
if self.cool_down_counter >= self.COOLDOWN:
self.cool_down_counter = 0
# increment of the cool_down_counter
elif self.cool_down_counter > 0:
self.cool_down_counter += 1
# used to initiate time for new laser
def shoot(self):
if self.cool_down_counter == 0:
laser = Laser(self.x, self.y, self.laser_img)
self.lasers.append(laser)
self.cool_down_counter = 1
def get_height(self):
return self.ship_img.get_width()
def get_width(self):
return self.ship_img.get_height()
class Laser:
def __init__(self, x, y, img):
self.x = x
self.y = y
self.img = img
self.mask = pygame.mask.from_surface(self.img)
def draw(self, window):
window.blit(self.img, (self.x, self.y))
# moves the laser to the certain velocity ratio
def move(self, vel):
self.y += vel
# check if the laser is off the screen
# for player it checks laser y position > 0
# for enemy it checks laser y position < HEIGHT
def off_screen(self, height):
return not(self.y <= height and self.y >= 0)
def collision(self, obj):
return collide(self, obj)
'''
Player():
draw() --> ship.draw()
Move_laser() --> ship.cool_down()
health_bar()
Ship ---- > ship.shoot()
'''
# Player class
class Player(Ship):
# Takes the x and y position to located the player character
def __init__(self, x, y, health=100):
super().__init__(x, y)
self.ship_img = YELLOW_SPACE_SHIP
self.laser_img = YELLOW_LASER
# masking take only the weighted pixel and ignore the other pixel
self.mask = pygame.mask.from_surface(self.ship_img)
self.max_health = health
# Shoot the laser when the user press the space bar
def move_lasers(self, vel, objs):
self.cooldown()
# Loop over the laser shoot by the player
for laser in self.lasers:
# Change the x and y pos of the laser
laser.move(vel)
if laser.off_screen(HEIGHT):
# If the laser is out off the screen -- destroy the laser object
self.lasers.remove(laser)
else:
for obj in objs:
if laser.collision(obj):
objs.remove(obj)
if laser in self.lasers:
self.lasers.remove(laser)
# Render the player object to the game surface ---> responsible for the movement of the character
def draw(self, window):
super().draw(window)
self.healthbar(window)
def healthbar(self, window):
pygame.draw.rect(window, (255, 0, 0),(self.x, self.y + self.ship_img.get_height() + 10, self.ship_img.get_width(), 10))
pygame.draw.rect(window, (0, 255, 0), (self.x, self.y + self.ship_img.get_height() + 10, self.ship_img.get_width() * (self.health / self.max_health),10))
'''
Enemy();
move()
shoot() ---> Laser()
move_laser()
Ship() ---> draw()
'''
class Enemy(Ship):
COLOR_MAP = {
"red": (RED_SPACE_SHIP, RED_LASER),
"blue": (BLUE_SPACE_SHIP, BLUE_LASER),
"green": (GREEN_SPACE_SHIP, GREEN_LASER)
}
def __init__(self, x, y, color, health=100):
super().__init__(x, y, health)
self.ship_img, self.laser_img = self.COLOR_MAP[color]
self.mask = pygame.mask.from_surface(self.ship_img)
def move(self, vel):
self.y += vel
def shoot(self):
if self.cool_down_counter == 0:
laser = Laser(self.x-20, self.y, self.laser_img)
self.lasers.append(laser)
self.cool_down_counter = 1
def main():
# Flag to track the game status
run = True
# frame to be rendered per second
FPS = 60
# pygame clock initialisation
clock = pygame.time.Clock()
# Initial level of the game
level = 0
# Total lives of the player
lives = 5
# Font surface to render the level and lives
main_font = pygame.font.SysFont('comicsans', 50)
# Font surface to render the lost message
lost_font = pygame.font.SysFont('comicsans', 60)
# Player declaration
player = Player(375, 600)
# Player movement velocity
player_vel = 5
# laser movement velocity
laser_vel = 5
# Track of total enemy created
enemies = []
# Update number of enemy after a level
wave_length = 0
# Enemy spaceship velocity
enemy_vel = 1
# Flag to Tracking the game status of the player on basis of the health
lost = False
# Counting the lost
lost_count = 0
# Function to render the game objects onto the game surface
def render_window():
# Creating the font surface to render onto the game surface
# For Lives rendering
lives_label = main_font.render(f"Lives : {lives}", 1, (255, 255, 255))
# For Level rendering
level_label = main_font.render(f"Level : {level}", 1, (255, 255, 255))
# blit the background image to the game surface
WIN.blit(BG, (0, 0))
# blit the lives status to the game screen/surface
WIN.blit(lives_label, (10, 10))
# blit the level status to the game screen/surface
WIN.blit(level_label, (WIDTH - level_label.get_width() - 10, 10))
# Rendering the player character to the surface
player.draw(WIN)
# TO render the enemy onto the game surface
# This will draw the enemy if exist in the enemies list
for enemy in enemies:
# Calling the Enemy.draw function of the Enemy class
enemy.draw(WIN)
# If the lost flag is toggled ---> player lost
if lost:
# Creating the lost font surface to be rendered on the screen after the lost of the game
lost_label = lost_font.render("You Lost!!", 1, (255, 255, 255))
# Render the lost font surface to the game surface
WIN.blit(lost_label, (WIDTH/2 - lost_label.get_width()/2, 350))
# used to update the whole screen per frame
pygame.display.update()
def player_activity():
# Used to get the activity of the user/player
keys = pygame.key.get_pressed()
# <-- left movement
if keys[pygame.K_LEFT] and player.x - player_vel > 0:
player.x -= player_vel
# --> right
if keys[pygame.K_RIGHT] and player.x + player_vel + player.get_width() < WIDTH:
player.x += player_vel
# ^^^^^ up movement
if keys[pygame.K_UP] and player.y - player_vel > 0:
player.y -= player_vel
# Down movement
if keys[pygame.K_DOWN] and player.y + player_vel + player.get_height() + 10 < HEIGHT:
player.y += player_vel
# Used to fire the laser shoots
if keys[pygame.K_SPACE]:
player.shoot()
# Main Game Loop
while run:
# sets the number frame to be loaded per second and run this loop 60 time per second
clock.tick(FPS)
# used to render all the surfaces onto the screen
render_window()
# Check to track the game status as lost or win
if lives <= 0 or player.health <= 0:
# Toggle the lost flag
lost = True
# increase the lost count
lost_count += 1
# if the player lost toggle the game(run) for 3 seconds
if lost:
# to display the lost font surface for 3 seconds
if lost_count > FPS * 3:
run = False
else:
continue
# Used to get the activity of the user/player
for event in pygame.event.get():
# Trigger when the QUIT is pressed
if event.type == pygame.QUIT:
# run = False
quit()
print(event)
# To level up when NO enemy left
if len(enemies) == 0:
# Level up by 1
level += 1
# adding 5 additional enemy
wave_length += 5
# Declaration of enemy as per wave_length
for i in range(wave_length):
enemy = Enemy(random.randrange(50, WIDTH - 100),
random.randrange(-1500, -100),
random.choice(["red", "blue", "green"]))
enemies.append(enemy)
player_activity()
'''
Player():
draw() --> ship.draw()
Move_laser() --> ship.cool_down()
health_bar()
Ship() ---- > ship.shoot()
Enemy():
move()
shoot() ---> Laser()
Ship() ---> move_laser()
Ship() ---> draw()
'''
for enemy in enemies[:]:
# moving enemy itself
enemy.move(enemy_vel)
# moving enemy laser
enemy.move_lasers(laser_vel, player)
# setting the probability to shoot a laser
if random.randrange(0, 2 * 60) == 1:
enemy.shoot()
# Check for collision of the enemy and the player
if collide(enemy, player):
# if collide decrease the player health by 10
player.health -= 10
# Deleting the enemy who collide with the player
enemies.remove(enemy)
# destroying the enemy if the enemy passes the MAX_HEIGHT
elif enemy.y + enemy.get_height() > HEIGHT:
lives -= 1
enemies.remove(enemy)
# used to fire the laser and check the collision of the player laser with the enemy object
player.move_lasers(-laser_vel, enemies)
# check if the objects collide or not
def collide(obj1, obj2):
offset_x = obj2.x - obj1.x
offset_y = obj2.y - obj1.y
return obj1.mask.overlap(obj2.mask, (offset_x, offset_y)) != None
def main_menu():
# Initialisation of the font surface
title_font = pygame.font.SysFont("comicsans", 70)
# Used to show main menu after complietion of the game
run = True
while run:
# Blit the background surface to the screen surface
WIN.blit(BG, (0, 0))
# Setting the font to be rendered on the font surface
title_label = title_font.render("Press the mouse to begin...", 1, (255, 255, 255))
# blit the font on the game surface
WIN.blit(title_label, (WIDTH/2 - title_label.get_width()/2, 350))
# used to update the screen surface at every second according to the FPS
pygame.display.update()
# loop to handle the start or the close of the game
for event in pygame.event.get():
# triggered when the game screen is closed --> close the game
if event.type == pygame.QUIT:
run = False
# Triggered when the mouse is clicked --> Start game
if event.type == pygame.MOUSEBUTTONDOWN:
main()
# Quit the pygame instance
pygame.quit()
# Starting the game main menu
main_menu() | draw | identifier_name |
main.py | import pygame
import os
import time
import random
pygame.init()
# Maximum height and width of the game surface
WIDTH, HEIGHT = (750, 750)
# To create the display surface
WIN = pygame.display.set_mode((WIDTH, HEIGHT))
# Set the surface caption
pygame.display.set_caption("MyGame")
# Background image
BG = pygame.image.load(os.path.join("assets", "background-black.png"))
# Scaling the background image to max width and height as game surface
BG = pygame.transform.scale(BG, (WIDTH, HEIGHT))
# Enemy Load image
RED_SPACE_SHIP = pygame.image.load(os.path.join("assets", "pixel_ship_red_small.png"))
GREEN_SPACE_SHIP = pygame.image.load(os.path.join("assets", "pixel_ship_green_small.png"))
BLUE_SPACE_SHIP = pygame.image.load(os.path.join("assets", "pixel_ship_blue_small.png"))
# Player ship image
YELLOW_SPACE_SHIP = pygame.image.load(os.path.join("assets", "pixel_ship_yellow.png"))
# lasers
RED_LASER = pygame.image.load(os.path.join("assets", "pixel_laser_red.png"))
GREEN_LASER = pygame.image.load(os.path.join("assets", "pixel_laser_green.png"))
BLUE_LASER = pygame.image.load(os.path.join("assets", "pixel_laser_blue.png"))
YELLOW_LASER = pygame.image.load(os.path.join("assets", "pixel_laser_yellow.png"))
# Generalized class
class Ship:
COOLDOWN = 30
def __init__(self, x, y, health=100):
self.x = x
self.y = y
self.health = health
self.ship_img = None
self.laser_img = None
# keep track of the lasers shoot
self.lasers = []
self.cool_down_counter = 0
def draw(self, window):
window.blit(self.ship_img, (self.x, self.y))
for laser in self.lasers:
laser.draw(window)
def move_lasers(self, vel, obj):
self.cooldown()
for laser in self.lasers:
laser.move(vel)
if laser.off_screen(HEIGHT):
self.lasers.remove(laser)
elif laser.collision(obj):
obj.health -= 10
self.lasers.remove(laser)
# used to initiate time to control of the next laser shooting time
def cooldown(self):
# if cool_down_counter exceed the COOL DOWN =30 --> allow to create laser
if self.cool_down_counter >= self.COOLDOWN:
self.cool_down_counter = 0
# increment of the cool_down_counter
elif self.cool_down_counter > 0:
self.cool_down_counter += 1
# used to initiate time for new laser
def shoot(self):
if self.cool_down_counter == 0:
laser = Laser(self.x, self.y, self.laser_img)
self.lasers.append(laser)
self.cool_down_counter = 1
def get_height(self):
return self.ship_img.get_width()
def get_width(self):
return self.ship_img.get_height()
class Laser:
def __init__(self, x, y, img):
self.x = x
self.y = y
self.img = img
self.mask = pygame.mask.from_surface(self.img)
def draw(self, window):
window.blit(self.img, (self.x, self.y))
# moves the laser to the certain velocity ratio
def move(self, vel):
self.y += vel
# check if the laser is off the screen
# for player it checks laser y position > 0
# for enemy it checks laser y position < HEIGHT
def off_screen(self, height):
return not(self.y <= height and self.y >= 0)
def collision(self, obj):
return collide(self, obj)
'''
Player():
draw() --> ship.draw()
Move_laser() --> ship.cool_down()
health_bar()
Ship ---- > ship.shoot()
'''
# Player class
class Player(Ship):
# Takes the x and y position to located the player character
def __init__(self, x, y, health=100):
super().__init__(x, y)
self.ship_img = YELLOW_SPACE_SHIP
self.laser_img = YELLOW_LASER
# masking take only the weighted pixel and ignore the other pixel
self.mask = pygame.mask.from_surface(self.ship_img)
self.max_health = health
# Shoot the laser when the user press the space bar
def move_lasers(self, vel, objs):
self.cooldown()
# Loop over the laser shoot by the player
for laser in self.lasers:
# Change the x and y pos of the laser
laser.move(vel)
if laser.off_screen(HEIGHT):
# If the laser is out off the screen -- destroy the laser object
self.lasers.remove(laser)
else:
for obj in objs:
if laser.collision(obj):
objs.remove(obj)
if laser in self.lasers:
self.lasers.remove(laser)
# Render the player object to the game surface ---> responsible for the movement of the character
def draw(self, window):
super().draw(window)
self.healthbar(window)
def healthbar(self, window):
pygame.draw.rect(window, (255, 0, 0),(self.x, self.y + self.ship_img.get_height() + 10, self.ship_img.get_width(), 10))
pygame.draw.rect(window, (0, 255, 0), (self.x, self.y + self.ship_img.get_height() + 10, self.ship_img.get_width() * (self.health / self.max_health),10))
'''
Enemy();
move()
shoot() ---> Laser()
move_laser()
Ship() ---> draw()
'''
class Enemy(Ship):
COLOR_MAP = {
"red": (RED_SPACE_SHIP, RED_LASER),
"blue": (BLUE_SPACE_SHIP, BLUE_LASER),
"green": (GREEN_SPACE_SHIP, GREEN_LASER)
}
def __init__(self, x, y, color, health=100):
super().__init__(x, y, health)
self.ship_img, self.laser_img = self.COLOR_MAP[color]
self.mask = pygame.mask.from_surface(self.ship_img)
def move(self, vel):
self.y += vel
def shoot(self):
if self.cool_down_counter == 0:
laser = Laser(self.x-20, self.y, self.laser_img)
self.lasers.append(laser)
self.cool_down_counter = 1
def main():
# Flag to track the game status
run = True
# frame to be rendered per second
FPS = 60
# pygame clock initialisation
clock = pygame.time.Clock()
# Initial level of the game
level = 0
# Total lives of the player
lives = 5
# Font surface to render the level and lives
main_font = pygame.font.SysFont('comicsans', 50)
# Font surface to render the lost message
lost_font = pygame.font.SysFont('comicsans', 60)
# Player declaration
player = Player(375, 600)
# Player movement velocity
player_vel = 5
# laser movement velocity
laser_vel = 5
# Track of total enemy created
enemies = []
# Update number of enemy after a level
wave_length = 0
# Enemy spaceship velocity
enemy_vel = 1
# Flag to Tracking the game status of the player on basis of the health
lost = False
# Counting the lost
lost_count = 0
# Function to render the game objects onto the game surface
def render_window():
# Creating the font surface to render onto the game surface
# For Lives rendering
lives_label = main_font.render(f"Lives : {lives}", 1, (255, 255, 255))
# For Level rendering
level_label = main_font.render(f"Level : {level}", 1, (255, 255, 255))
# blit the background image to the game surface
WIN.blit(BG, (0, 0))
# blit the lives status to the game screen/surface
WIN.blit(lives_label, (10, 10))
# blit the level status to the game screen/surface
WIN.blit(level_label, (WIDTH - level_label.get_width() - 10, 10))
# Rendering the player character to the surface
player.draw(WIN)
# TO render the enemy onto the game surface
# This will draw the enemy if exist in the enemies list
for enemy in enemies:
# Calling the Enemy.draw function of the Enemy class
enemy.draw(WIN)
# If the lost flag is toggled ---> player lost
if lost:
# Creating the lost font surface to be rendered on the screen after the lost of the game
lost_label = lost_font.render("You Lost!!", 1, (255, 255, 255))
# Render the lost font surface to the game surface
WIN.blit(lost_label, (WIDTH/2 - lost_label.get_width()/2, 350))
# used to update the whole screen per frame
pygame.display.update()
def player_activity():
# Used to get the activity of the user/player
keys = pygame.key.get_pressed()
# <-- left movement
if keys[pygame.K_LEFT] and player.x - player_vel > 0:
player.x -= player_vel
# --> right
if keys[pygame.K_RIGHT] and player.x + player_vel + player.get_width() < WIDTH:
player.x += player_vel
# ^^^^^ up movement
if keys[pygame.K_UP] and player.y - player_vel > 0:
player.y -= player_vel
# Down movement
if keys[pygame.K_DOWN] and player.y + player_vel + player.get_height() + 10 < HEIGHT:
player.y += player_vel
# Used to fire the laser shoots
if keys[pygame.K_SPACE]:
player.shoot()
# Main Game Loop
while run:
# sets the number frame to be loaded per second and run this loop 60 time per second
clock.tick(FPS)
# used to render all the surfaces onto the screen
render_window()
# Check to track the game status as lost or win
if lives <= 0 or player.health <= 0:
# Toggle the lost flag
lost = True
# increase the lost count
lost_count += 1
# if the player lost toggle the game(run) for 3 seconds
if lost:
# to display the lost font surface for 3 seconds
if lost_count > FPS * 3:
run = False
else:
continue
# Used to get the activity of the user/player
for event in pygame.event.get():
# Trigger when the QUIT is pressed
if event.type == pygame.QUIT:
# run = False
quit()
print(event)
# To level up when NO enemy left
if len(enemies) == 0:
# Level up by 1
level += 1
# adding 5 additional enemy
wave_length += 5
# Declaration of enemy as per wave_length
for i in range(wave_length):
enemy = Enemy(random.randrange(50, WIDTH - 100),
random.randrange(-1500, -100),
| player_activity()
'''
Player():
draw() --> ship.draw()
Move_laser() --> ship.cool_down()
health_bar()
Ship() ---- > ship.shoot()
Enemy():
move()
shoot() ---> Laser()
Ship() ---> move_laser()
Ship() ---> draw()
'''
for enemy in enemies[:]:
# moving enemy itself
enemy.move(enemy_vel)
# moving enemy laser
enemy.move_lasers(laser_vel, player)
# setting the probability to shoot a laser
if random.randrange(0, 2 * 60) == 1:
enemy.shoot()
# Check for collision of the enemy and the player
if collide(enemy, player):
# if collide decrease the player health by 10
player.health -= 10
# Deleting the enemy who collide with the player
enemies.remove(enemy)
# destroying the enemy if the enemy passes the MAX_HEIGHT
elif enemy.y + enemy.get_height() > HEIGHT:
lives -= 1
enemies.remove(enemy)
# used to fire the laser and check the collision of the player laser with the enemy object
player.move_lasers(-laser_vel, enemies)
# check if the objects collide or not
def collide(obj1, obj2):
offset_x = obj2.x - obj1.x
offset_y = obj2.y - obj1.y
return obj1.mask.overlap(obj2.mask, (offset_x, offset_y)) != None
def main_menu():
# Initialisation of the font surface
title_font = pygame.font.SysFont("comicsans", 70)
# Used to show main menu after complietion of the game
run = True
while run:
# Blit the background surface to the screen surface
WIN.blit(BG, (0, 0))
# Setting the font to be rendered on the font surface
title_label = title_font.render("Press the mouse to begin...", 1, (255, 255, 255))
# blit the font on the game surface
WIN.blit(title_label, (WIDTH/2 - title_label.get_width()/2, 350))
# used to update the screen surface at every second according to the FPS
pygame.display.update()
# loop to handle the start or the close of the game
for event in pygame.event.get():
# triggered when the game screen is closed --> close the game
if event.type == pygame.QUIT:
run = False
# Triggered when the mouse is clicked --> Start game
if event.type == pygame.MOUSEBUTTONDOWN:
main()
# Quit the pygame instance
pygame.quit()
# Starting the game main menu
main_menu() | random.choice(["red", "blue", "green"]))
enemies.append(enemy)
| random_line_split |
store.ts | import { action, observable } from "mobx";
import {
reqIpTypeList,
upload,
getIpDetail,
getDownload,
delMaterial,
uploadBusinessData, listCompany, listMainType, listCountry, getCompanyType
} from "@utils/api";
interface IUpdateStatus {
pub: object,
sub: object,
showDate: object,
}
interface IUpdateState {
ipName: string, // IP名称
ipTypeSuperiorNumber: string, // IP分类 IP形象等一级类型guid
brokerageFirmGuid: string, // 下拉公司类型
ipDesc: string, // IP 简介
detail: string, // 图文详情
ipLocation: string, // 废弃
countryNames: string, // 国家名字
countryTypes: string, // 国家编号
ipTypeNumber: string, // IP类型 ip二级类型guid,
owner: string, // IP版权方
copyrightAgent: string, // ip版权代理方
recordCountry: string, // ip备案国
grantedType: string, // 已授权品类
authorizedType: string, // 可授权品类
intentAuthorization: string, // 意向授权品类
authorizedLocation: string, // 可授权区域
authorizedAllottedTime: string, // 可授权期限日期
isTransferable: Number, // 是否可以转授权
ipMaterialGuidList: string, // 商务资料
ipFormNumber: string,
ipPicGuid: string,
sex?: string,
height?: number,
prodect: Array<object>;
cooperationCase: Array<object>,
}
class CreateStore {
@observable
previousData: any = {};
// 记录新添加类别的状态
@observable typeListCase: object = {
selected: '',
clearditor: false,
};
@observable typeList: object[];
@observable typeListTop: object[];
@observable subTypeList: object[];
@observable locationList: object[];
@observable authorityZone: object[];
@observable modalityList: object[] = [];
// @observable updateList: {};
@observable updateList: IUpdateState = {
ipName: "",
ipTypeSuperiorNumber: '',
brokerageFirmGuid: '',
ipLocation: '1',
ipTypeNumber: '',
ipDesc: "",
detail: '',
ipFormNumber: '',
ipPicGuid: '', // 左侧背景图片
countryNames: '',
countryTypes: '',
owner: '', // IP版权方
copyrightAgent: '',
recordCountry: '',
grantedType: undefined, // 已授权品类
authorizedType: undefined, // 可授权品类
intentAuthorization: undefined, // 意向授权品类
authorizedLocation: undefined, // 可授权区域
authorizedAllottedTime: '', // 可授权期限日期
isTransferable: 0, // 是否可以转授权
ipMaterialGuidList: '', // 商务资料
prodect: [
{ pic: '', title: '' },
{ pic: '', title: '' },
{ pic: '', title: '' },
{ pic: '', title: '' },
],
cooperationCase: [
{ pic: '', title: '' },
{ pic: '', title: '' },
{ pic: '', title: '' },
{ pic: '', title: '' },
],
};
@observable businessList: [];
@observable companyData: [];
@observable brokerageFirmGuid: '';
@observable status: IUpdateStatus = {
pub: {
ipName: '',
ipTypeSuperiorNumber: '',
ipLocation: '',
ipTypeNumber: [],
ipTypeName: [], // IP类型 ip二级类型中文名
ipDesc: '',
ipFormNumber: [],
countryTypes: '',
ipPicGuid: ''
},
sub: {},
showDate: {},
};
// 切换IP分类时 仅限新增IP 清空参数值
clearSub() {
let _updateList: any = JSON.stringify(this.updateList);
// JSON.
_updateList = JSON.parse(_updateList);
delete _updateList.ipName;
delete _updateList.ipTypeSuperiorNumber;
delete _updateList.ipDesc;
for (let val in _updateList) {
if (_updateList.hasOwnProperty(val)) {
if (val === 'authorizedLocation' || val === 'authorizedType' || val === 'grantedType' || val === 'intentAuthorization') {
_updateList[val] = undefined;
} else if (val === 'prodect' || val === 'cooperationCase') {
_updateList[val] = [
{ pic: '', title: '' },
{ pic: '', title: '' },
{ pic: '', title: '' },
{ pic: '', title: '' },
];
} else {
_updateList[val] = '';
}
}
}
this.updateList = { ...this.updateList, ..._updateList };
}
// 获取最新 IP 分类
@action
async getlistMainType() {
await this.getLocation();
await this.getAuthorityZone({ type: 9 });
const { errorCode, result }: any = await listMainType();
if (errorCode === "200") {
let typeList: object[] = [];
let _typeListTop: object[] = [];
result.forEach(element => {
let { childTypeList, mainTypeGuid, picUrl, typeName } = element;
childTypeList && childTypeList.forEach(val => {
val['mainTypeGuid'] = mainTypeGuid;
val['type'] = val.ipType;
typeList.push(val);
});
_typeListTop.push({ mainTypeGuid, picUrl, typeName });
});
this.typeList = typeList;
this.typeListTop = _typeListTop;
}
}
// 修改之前的 IP分类 (二级分类菜单)
@action
async ipTypeList() {
let { errorCode, result }: any = await reqIpTypeList();
if (errorCode === "200") {
let subTypeList: object[] = [];
let modalityList: object[] = [];
result.forEach((item: any) => {
let { ipTypeNumber, sublist } = item;
sublist.forEach((val: any) => {
let { ipType, sublist: sub } = val;
if (ipType === "类型") {
let subtype = { [ipTypeNumber]: sub };
subTypeList.push(subtype);
}
if (ipType === "形式") {
let modality = { [ipTypeNumber]: sub };
modalityList.push(modality);
}
});
});
this.subTypeList = subTypeList;
this.modalityList = modalityList;
}
}
// 设置修改 页面的三级 IP类型
async setchildType(pub, item, subTypeList, callback) {
let { ipTypeNumber } = pub;
ipTypeNumber = ipTypeNumber ? ipTypeNumber : [];
let count = false;
let index_ = 0;
ipTypeNumber.forEach((val, indx) => {
if (val === item.ipTypeNumber) {
index_ = indx;
count = true;
}
});
if (count) {
ipTypeNumber.splice(index_, 1);
} else {
ipTypeNumber.push(item.ipTypeNumber);
}
// 匹配中文名字
let ipTypeName = [];
ipTypeNumber.forEach(val => {
subTypeList.map((item: any) => {
if (val === item.ipTypeNumber) {
ipTypeName.push(item.ipType);
}
});
});
callback({ ...pub, ipTypeNumber, ipTypeName });
let _ipTypeNumber = ipTypeNumber.join(',');
let _ipTypeName = ipTypeName.join(',');
let reg = /,{1+}/g;
_ipTypeNumber.replace(reg, ",");
_ipTypeName.replace(reg, ",");
await this.setStatus({ ipTypeNumber: _ipTypeNumber, ipTypeName: _ipTypeName });
}
// 页面设置国家
async setContry(boole, item, locationList, pub, callback) {
function replaceStr(oldStr, childStr) {
let re = new RegExp(childStr, "g"); // 通过RegExp使用变量
return oldStr.replace(re, '');
}
let countryTypes = this.updateList.countryTypes;
if (boole) {
countryTypes = replaceStr(countryTypes, item.resourceKey);
} else {
countryTypes = countryTypes + ',' + item.resourceKey;
}
// 匹配中文名字
let contryName = [];
countryTypes.split(',').forEach(val => {
locationList.map((item: any) => {
if (val === item.resourceKey) {
contryName.push(item.resourceValue);
}
});
});
let countryNames = contryName.join('/');
callback({ ...pub, countryTypes, countryNames });
await this.setStatus({ countryTypes, countryNames });
}
/** | let _locationList: object[] = [];
if (errorCode === "200") {
result.forEach((item: any) => {
_locationList.push(item);
});
this.locationList = _locationList;
return _locationList;
}
}
/**
* 可授权区
* @param params
*/
@action
async getAuthorityZone(params) {
let { errorCode, result }: any = await getCompanyType(params);
let _authorityZone: object[] = [];
if (errorCode === "200") {
result.forEach((item: any) => {
_authorityZone.push(item);
});
this.authorityZone = _authorityZone;
return _authorityZone;
}
}
@action
async upload(params) {
let { errorCode }: any = await upload(params);
if (errorCode === 200) {
}
}
@action
async doRest() {
this.updateList = {
ipName: "",
ipTypeSuperiorNumber: '',
brokerageFirmGuid: '',
ipLocation: '1',
ipTypeNumber: '',
ipDesc: "",
detail: '',
ipFormNumber: '',
ipPicGuid: '',
countryNames: '',
countryTypes: '',
owner: '', // IP版权方
copyrightAgent: '',
recordCountry: '',
grantedType: undefined, // 已授权品类
authorizedType: undefined, // 可授权品类
intentAuthorization: undefined, // 意向授权品类
authorizedLocation: undefined, // 可授权区域
authorizedAllottedTime: '', // 可授权期限日期
isTransferable: 0, // 是否可以转授权
ipMaterialGuidList: '', // 商务资料
prodect: [
{ pic: '', title: '' },
{ pic: '', title: '' },
{ pic: '', title: '' },
{ pic: '', title: '' },
],
cooperationCase: [
{ pic: '', title: '' },
{ pic: '', title: '' },
{ pic: '', title: '' },
{ pic: '', title: '' },
],
};
}
@action
async getBasic(params: IUpdateStatus, param) {
await this.setStatus(params);
await this.getUpdateDetail(param);
}
// 获取编辑页的基本信息
@action
async getUpdateDetail(params) {
const { ipid, ipTypeNumber, userGuid }: { ipid: number, ipTypeNumber: number, userGuid: any } = params;
let { errorCode, result }: any = await getIpDetail({
ipid, ipTypeNumber, userGuid
});
if (errorCode === '200') {
if (result.errorCode === 200) {
for (let val in result.data) {
if (result.data.hasOwnProperty(val)) {
if (val === 'authorizedLocation' || val === 'authorizedType' || val === 'grantedType' || val === 'intentAuthorization') {
if (result.data[val] === '' || result.data[val] === undefined) result.data[val] = undefined;
}
}
}
this.updateList = result.data;
this.brokerageFirmGuid = result.data && result.data.brokerageFirmGuid;
return {
request: true,
};
} else {
return {
request: false,
message: result.errorMsg,
};
// alert(result.errorMsg)
}
}
}
@action
async setStatus(params) {
this.updateList = { ...this.updateList, ...params };
}
async setStatus2(params) {
this.updateList = { ...this.updateList, ...params };
}
// 招商资料列表
@action
async getDownload({ ipid }: { ipid: number }) {
const { errorCode, result }: any = await getDownload(ipid);
if (errorCode === "200") {
this.businessList = result;
}
}
/**
* 上传商务资料
* @param params
*/
@action
async getBusiness(params) {
const { errorCode, result }: any = await uploadBusinessData(params);
if (errorCode === '200' && result.errorCode === 200) {
} else if (result.errorCode < 0) {
return { message: result.message };
}
}
// 下载招商资料
// async downloadMaterial(params) {
// const { errorCode, result }: any = await getDownloadMaterial(params);
// if (errorCode === '200' && result.errorCode === 200) {
// } else if (result.errorCode < 0) {
// return { message: result.errorMsg };
// }
// }
/**
* 删除
* 刷新页面
* @param params
*/
@action
async deleteMaterial(params) {
const { errorCode, result }: any = await delMaterial(params);
if (errorCode === '200' && result.errorCode === 200) {
} else if (result.errorCode < 0) {
return { message: result.errorMsg };
}
}
/**
* 经济公司 Ip版权代理方 版权方
* @param dataURI
*/
@action
async companyList({ companyName, currentPage, pageSize, companyType }) {
const { errorCode, result }: any = await listCompany({ companyName, currentPage, pageSize, companyType });
if (errorCode === '200' && companyName !== "") {
this.companyData = result;
return result;
} else {
this.companyData = [];
}
}
// 清空
@action
async setCompanyNull() {
this.companyData = [];
}
// base64 转二进制文件
@action
async dataURItoBlob(dataURI: any) {
let byteString = atob(dataURI.split(',')[1]);
let mimeString = dataURI.split(',')[0].split(':')[1].split(';')[0];
let ab = new ArrayBuffer(byteString.length);
let ia = new Uint8Array(ab);
for (let i = 0; i < byteString.length; i++) {
ia[i] = byteString.charCodeAt(i);
}
return new Blob([ab], { type: mimeString });
}
}
export default new CreateStore(); | * 国家地区
*/
@action
async getLocation() {
let { errorCode, result }: any = await listCountry(); | random_line_split |
store.ts | import { action, observable } from "mobx";
import {
reqIpTypeList,
upload,
getIpDetail,
getDownload,
delMaterial,
uploadBusinessData, listCompany, listMainType, listCountry, getCompanyType
} from "@utils/api";
interface IUpdateStatus {
pub: object,
sub: object,
showDate: object,
}
interface IUpdateState {
ipName: string, // IP名称
ipTypeSuperiorNumber: string, // IP分类 IP形象等一级类型guid
brokerageFirmGuid: string, // 下拉公司类型
ipDesc: string, // IP 简介
detail: string, // 图文详情
ipLocation: string, // 废弃
countryNames: string, // 国家名字
countryTypes: string, // 国家编号
ipTypeNumber: string, // IP类型 ip二级类型guid,
owner: string, // IP版权方
copyrightAgent: string, // ip版权代理方
recordCountry: string, // ip备案国
grantedType: string, // 已授权品类
authorizedType: string, // 可授权品类
intentAuthorization: string, // 意向授权品类
authorizedLocation: string, // 可授权区域
authorizedAllottedTime: string, // 可授权期限日期
isTransferable: Number, // 是否可以转授权
ipMaterialGuidList: string, // 商务资料
ipFormNumber: string,
ipPicGuid: string,
sex?: string,
height?: number,
prodect: Array<object>;
cooperationCase: Array<object>,
}
class CreateStore {
@observable
previousData: any = {};
// 记录新添加类别的状态
@observable typeListCase: object = {
selected: '',
clearditor: false,
};
@observable typeList: object[];
@observable typeListTop: object[];
@observable subTypeList: object[];
@observable locationList: object[];
@observable authorityZone: object[];
@observable modalityList: object[] = [];
// @observable updateList: {};
@observable updateList: IUpdateState = {
ipName: "",
ipTypeSuperiorNumber: '',
brokerageFirmGuid: '',
ipLocation: '1',
ipTypeNumber: '',
ipDesc: "",
detail: '',
ipFormNumber: '',
ipPicGuid: '', // 左侧背景图片
countryNames: '',
countryTypes: '',
owner: '', // IP版权方
copyrightAgent: '',
recordCountry: '',
grantedType: undefined, // 已授权品类
authorizedType: undefined, // 可授权品类
intentAuthorization: undefined, // 意向授权品类
authorizedLocation: undefined, // 可授权区域
authorizedAllottedTime: '', // 可授权期限日期
isTransferable: 0, // 是否可以转授权
ipMaterialGuidList: '', // 商务资料
prodect: [
{ pic: '', title: '' },
{ pic: '', title: '' },
{ pic: '', title: '' },
{ pic: '', title: '' },
],
cooperationCase: [
{ pic: '', title: '' },
{ pic: '', title: '' },
{ pic: '', title: '' },
{ pic: '', title: '' },
],
};
@observable businessList: [];
@observable companyData: [];
@observable brokerageFirmGuid: '';
@observable status: IUpdateStatus = {
pub: {
ipName: '',
ipTypeSuperiorNumber: '',
ipLocation: '',
ipTypeNumber: [],
ipTypeName: [], // IP类型 ip二级类型中文名
ipDesc: '',
ipFormNumber: [],
countryTypes: '',
ipPicGuid: ''
},
sub: {},
showDate: {},
};
// 切换IP分类时 仅限新增IP 清空参数值
clearSub() {
let _updateList: any = JSON.stringify(this.updateList);
// JSON.
_updateList = JSON.parse(_updateList);
delete _updateList.ipName;
delete _updateList.ipTypeSuperiorNumber;
delete _updateList.ipDesc;
for (let val in _updateList) {
if (_updateList.hasOwnProperty(val)) {
if (val === 'authorizedLocation' || val === 'authorizedType' || val === 'grantedType' || val === 'intentAuthorization') {
_updateList[val] = undefined;
} else if (val === 'prodect' || val === 'cooperationCase') {
_updateList[val] = [
{ pic: '', title: '' },
{ pic: '', title: '' },
{ pic: '', title: '' },
{ pic: '', title: '' },
];
} else {
_updateList[val] = '';
}
}
}
this.updateList = { ...this.updateList, ..._updateList };
}
// 获取最新 IP 分类
@action
async getlistMainType() {
await this.getLocation();
await this.getAuthorityZone({ type: 9 });
const { errorCode, result }: any = await listMainType();
if (errorCode === "200") {
let typeList: object[] = [];
let _typeListTop: object[] = [];
result.forEach(element => {
let { childTypeList, mainTypeGuid, picUrl, typeName } = element;
childTypeList && childTypeList.forEach(val => {
val['mainTypeGuid'] = mainTypeGuid;
val['type'] = val.ipType;
typeList.push(val);
});
_typeListTop.push({ mainTypeGuid, picUrl, typeName });
});
this.typeList = typeList;
this.typeListTop = _typeListTop;
}
}
// 修改之前的 IP分类 (二级分类菜单)
@action
async ipTypeList() {
let { errorCode, result }: any = await reqIpTypeList();
if (errorCode === "200") {
let subTypeList: object[] = [];
let modalityList: object[] = [];
result.forEach((item: any) => {
let { ipTypeNumber, sublist } = item;
sublist.forEach((val: any) => {
let { ipType, sublist: sub } = val;
if (ipType === "类型") {
let subtype = { [ipTypeNumber]: sub };
subTypeList.push(subtype);
}
if (ipType === "形式") {
let modality = { [ipTypeNumber]: sub };
modalityList.push(modality);
}
});
});
this.subTypeList = subTypeList;
this.modalityList = modalityList;
}
}
// 设置修改 页面的三级 IP类型
async setchildType(pub, item, subTypeList, callback) {
let { ipTypeNumber } = pub;
ipTypeNumber = ipTypeNumber ? ipTypeNumber : [];
let count = false;
let index_ = 0;
ipTypeNumber.forEach((val, indx) => {
if (val === item.ipTypeNumber) {
index_ = indx;
count = true;
}
});
if (count) {
ipTypeNumber.splice(index_, 1);
} else {
ipTypeNumber.push(item.ipTypeNumber);
}
// 匹配中文名字
let ipTypeName = [];
ipTypeNumber.forEach(val => {
subTypeList.map((item: any) => {
if (val === item.ipTypeNumber) {
ipTypeName.push(item.ipType);
}
});
});
callback({ ...pub, ipTypeNumber, ipTypeName });
let _ipTypeNumber = ipTypeNumber.join(',');
let _ipTypeName = ipTypeName.join(',');
let reg = /,{1+}/g;
_ipTypeNumber.replace(reg, ",");
_ipTypeName.replace(reg, ",");
await this.setStatus({ ipTypeNumber: _ipTypeNumber, ipTypeName: _ipTypeName });
}
// 页面设置国家
async setContry(boole, item, locationList, pub, callback) {
function replaceStr(oldStr, childStr) {
let re = new RegExp(childStr, "g"); // 通过RegExp使用变量
return oldStr.replace(re, '');
}
let countryTypes = this.updateList.countryTypes;
if (boole) {
countryTypes = replaceStr(countryTypes, item.resourceKey);
} else {
countryTypes = countryTypes + ',' + item.resourceKey;
}
// 匹配中文名字
let contryName = [];
countryTypes.split(',').forEach(val => {
locationList.map((item: any) => {
if (val === item.resourceKey) {
contryName.push(item.resourceValue);
}
});
});
let countryNames = contryName.join('/');
callback({ ...pub, countryTypes, countryNames });
await this.setStatus({ countryTypes, countryNames });
}
/**
* 国家地区
*/
@action
async getLocation() {
let { errorCode, result }: any = await listCountry();
let _locationList: object[] = [];
if (errorCode === "200") {
result.forEach((item: any) => {
_locationList.push(item);
});
this.locationList = _locationList;
return _locationList;
}
}
/**
* 可授权区
* @param params
*/
@action
async getAuthorityZone(params) {
let { errorCode, result }: any = await getCompanyType(params);
let _authorityZone: object[] = [];
if (errorCode === "200") {
result.forEach((item: any) => {
_authorityZone.push(item);
});
this.authorityZone = _authorityZone;
return _authorityZone;
}
}
@action
async upload(params) {
let { errorCode }: any = await upload(params);
if (errorCode === 200) {
}
}
@action
async doRest() {
this.updateList = {
ipName: "",
ipTypeSuperiorNumber: '',
brokerageFirmGuid: '',
ipLocation: '1',
ipTypeNumber: '',
ipDesc: "",
detail: '',
ipFormNumber: '',
ipPicGuid: '',
countryNames: '',
countryTypes: '',
owner: '', // IP版权方
copyrightAgent: '',
recordCountry: '',
grantedType: undefined, // 已授权品类
authorizedType: undefined, // 可授权品类
intentAuthorization: undefined, // 意向授权品类
authorizedLocation: undefined, // 可授权区域
authorizedAllottedTime: '', // 可授权期限日期
isTransferable: 0, // 是否可以转授权
ipMaterialGuidList: '', // 商务资料
prodect: [
{ pic: '', title: '' },
{ pic: '', title: '' },
{ pic: '', title: '' },
{ pic: '', title: '' },
],
cooperationCase: [
{ pic: '', title: '' },
{ pic: '', title: '' },
{ pic: '', title: '' },
{ pic: '', title: '' },
],
};
}
@action
async getBasic(params: IUpdateStatus, param) {
await this.setStatus(params);
await this.getUpdateDetail(param);
}
// 获取编辑页的基本信息
@action
async getUpdateDetail(params) {
const { ipid, ipTypeNumber, userGuid }: { ipid: number, ipTypeNumber: number, userGuid: any } = params;
let { errorCode, result }: any = await getIpDetail({
ipid, ipTypeNumber, userGuid
});
if (errorCode === '200') {
if (result.errorCode === 200) {
for (let val in result.data) {
if (result.data.hasOwnProperty(val)) {
if (val === 'authorizedLocation' || val === 'authorizedType' || val === 'grantedType' || val === 'intentAuthorization') {
if (result.data[val] === '' || result.data[val] === undefined) result.data[val] = undefined;
}
}
}
this.updateList = result.data;
this.brokerageFirmGuid = result.data && result.data.brokerageFirmGuid;
return {
request: true,
};
| rCode === '200' && result.errorCode === 200) {
} else if (result.errorCode < 0) {
return { message: result.message };
}
}
// 下载招商资料
// async downloadMaterial(params) {
// const { errorCode, result }: any = await getDownloadMaterial(params);
// if (errorCode === '200' && result.errorCode === 200) {
// } else if (result.errorCode < 0) {
// return { message: result.errorMsg };
// }
// }
/**
* 删除
* 刷新页面
* @param params
*/
@action
async deleteMaterial(params) {
const { errorCode, result }: any = await delMaterial(params);
if (errorCode === '200' && result.errorCode === 200) {
} else if (result.errorCode < 0) {
return { message: result.errorMsg };
}
}
/**
* 经济公司 Ip版权代理方 版权方
* @param dataURI
*/
@action
async companyList({ companyName, currentPage, pageSize, companyType }) {
const { errorCode, result }: any = await listCompany({ companyName, currentPage, pageSize, companyType });
if (errorCode === '200' && companyName !== "") {
this.companyData = result;
return result;
} else {
this.companyData = [];
}
}
// 清空
@action
async setCompanyNull() {
this.companyData = [];
}
// base64 转二进制文件
@action
async dataURItoBlob(dataURI: any) {
let byteString = atob(dataURI.split(',')[1]);
let mimeString = dataURI.split(',')[0].split(':')[1].split(';')[0];
let ab = new ArrayBuffer(byteString.length);
let ia = new Uint8Array(ab);
for (let i = 0; i < byteString.length; i++) {
ia[i] = byteString.charCodeAt(i);
}
return new Blob([ab], { type: mimeString });
}
}
export default new CreateStore();
| } else {
return {
request: false,
message: result.errorMsg,
};
// alert(result.errorMsg)
}
}
}
@action
async setStatus(params) {
this.updateList = { ...this.updateList, ...params };
}
async setStatus2(params) {
this.updateList = { ...this.updateList, ...params };
}
// 招商资料列表
@action
async getDownload({ ipid }: { ipid: number }) {
const { errorCode, result }: any = await getDownload(ipid);
if (errorCode === "200") {
this.businessList = result;
}
}
/**
* 上传商务资料
* @param params
*/
@action
async getBusiness(params) {
const { errorCode, result }: any = await uploadBusinessData(params);
if (erro | conditional_block |
store.ts | import { action, observable } from "mobx";
import {
reqIpTypeList,
upload,
getIpDetail,
getDownload,
delMaterial,
uploadBusinessData, listCompany, listMainType, listCountry, getCompanyType
} from "@utils/api";
interface IUpdateStatus {
pub: object,
sub: object,
showDate: object,
}
interface IUpdateState {
ipName: string, // IP名称
ipTypeSuperiorNumber: string, // IP分类 IP形象等一级类型guid
brokerageFirmGuid: string, // 下拉公司类型
ipDesc: string, // IP 简介
detail: string, // 图文详情
ipLocation: string, // 废弃
countryNames: string, // 国家名字
countryTypes: string, // 国家编号
ipTypeNumber: string, // IP类型 ip二级类型guid,
owner: string, // IP版权方
copyrightAgent: string, // ip版权代理方
recordCountry: string, // ip备案国
grantedType: string, // 已授权品类
authorizedType: string, // 可授权品类
intentAuthorization: string, // 意向授权品类
authorizedLocation: string, // 可授权区域
authorizedAllottedTime: string, // 可授权期限日期
isTransferable: Number, // 是否可以转授权
ipMaterialGuidList: string, // 商务资料
ipFormNumber: string,
ipPicGuid: string,
sex?: string,
height?: number,
prodect: Array<object>;
cooperationCase: Array<object>,
}
class CreateStore {
@observable
previousData: any = {};
// 记录新添加类别的状态
@observable typeListCase: object = {
selected: '',
clearditor: false,
};
@observable typeList: object[];
@observable typeListTop: object[];
@observable subTypeList: object[];
@observable locationList: object[];
@observable authorityZone: object[];
@observable modalityList: object[] = [];
// @observable updateList: {};
@observable updateList: IUpdateState = {
ipName: "",
ipTypeSuperiorNumber: '',
brokerageFirmGuid: '',
ipLocation: '1',
ipTypeNumber: '',
ipDesc: "",
detail: '',
ipFormNumber: '',
ipPicGuid: '', // 左侧背景图片
countryNames: '',
countryTypes: '',
owner: '', // IP版权方
copyrightAgent: '',
recordCountry: '',
grantedType: undefined, // 已授权品类
authorizedType: undefined, // 可授权品类
intentAuthorization: undefined, // 意向授权品类
authorizedLocation: undefined, // 可授权区域
authorizedAllottedTime: '', // 可授权期限日期
isTransferable: 0, // 是否可以转授权
ipMaterialGuidList: '', // 商务资料
prodect: [
{ pic: '', title: '' },
{ pic: '', title: '' },
{ pic: '', title: '' },
{ pic: '', title: '' },
],
cooperationCase: [
{ pic: '', title: '' },
{ pic: '', title: '' },
{ pic: '', title: '' },
{ pic: '', title: '' },
],
};
@observable businessList: [];
@observable companyData: [];
@observable brokerageFirmGuid: '';
@observable status: IUpdateStatus = {
pub: {
ipName: '',
ipTypeSuperiorNumber: '',
ipLocation: '',
ipTypeNumber: [],
ipTypeName: [], // IP类型 ip二级类型中文名
ipDesc: '',
ipFormNumber: [],
countryTypes: '',
ipPicGuid: ''
},
sub: {},
showDate: {},
};
// 切换IP分类时 仅限新增IP 清空参数值
clearSub() {
let _updateList: any = JSON.stringify(this.updateList);
// JSON.
_updateList = JSON.parse(_updateList);
delete _updateList.ipName;
delete _updateList.ipTypeSuperiorNumber;
delete _updateList.ipDesc;
for (let val in _updateList) {
if (_updateList.hasOwnProperty(val)) {
if (val === 'authorizedLocation' || val === 'authorizedType' || val === 'grantedType' || val === 'intentAuthorization') {
_updateList[val] = undefined;
} else if (val === 'prodect' || val === 'cooperationCase') {
_updateList[val] = [
{ pic: '', title: '' },
{ pic: '', title: '' },
{ pic: '', title: '' },
{ pic: '', title: '' },
];
} else {
_updateList[val] = '';
}
}
}
this.updateList = { ...this.updateList, ..._updateList };
}
// 获取最新 IP 分类
@action
async getlistMainType() {
await this.getLocation();
await this.getAuthorityZone({ type: 9 });
const { errorCode, result }: any = await listMainType();
if (errorCode === "200") {
let typeList: object[] = [];
let _typeListTop: object[] = [];
result.forEach(element => {
let { childTypeList, mainTypeGuid, picUrl, typeName } = element;
childTypeList && childTypeList.forEach(val => {
val['mainTypeGuid'] = mainTypeGuid;
val['type'] = val.ipType;
typeList.push(val);
});
_typeListTop.push({ mainTypeGuid, picUrl, typeName });
});
this.typeList = typeList;
this.typeListTop = _typeListTop;
}
}
// 修改之前的 IP分类 (二级分类菜单)
@action
async ipTypeList() {
let { errorCode, result }: any = await reqIpTypeList();
if (errorCode === "200") {
let subTypeList: object[] = [];
let modalityList: object[] = [];
result.forEach((item: any) => {
let { ipTypeNumber, sublist } = item;
sublist.forEach((val: any) => {
let { ipType, sublist: sub } = val;
if (ipType === "类型") {
let subtype = { [ipTypeNumber]: sub };
subTypeList.push(subtype);
}
if (ipType === "形式") {
let modality = { [ipTypeNumber]: sub };
modalityList.push(modality);
}
});
});
this.subTypeList = subTypeList;
this.modalityList = modalityList;
}
}
// 设置修改 页面的三级 IP类型
async setchildType(pub, item, subTypeList, callback) {
let { ipTypeNumber } = pub;
ipTypeNumber = ipTypeNumber ? ipTypeNumber : [];
let count = false;
let index_ = 0;
ipTypeNumber.forEach((val, indx) => {
if (val === item.ipTypeNumber) {
index_ = indx;
count = true;
}
});
if (count) {
ipTypeNumber.splice(index_, 1);
} else {
ipTypeNumber.push(item.ipTypeNumber);
}
// 匹配中文名字
let ipTypeName = [];
ipTypeNumber.forEach(val => {
subTypeList.map((item: any) => {
if (val === item.ipTypeNumber) {
ipTypeName.push(item.ipType);
}
});
});
callback({ ...pub, ipTypeNumber, ipTypeName });
let _ipTypeNumber = ipTypeNumber.join(',');
let _ipTypeName = ipTypeName.join(',');
let reg = /,{1+}/g;
_ipTypeNumber.replace(reg, ",");
_ipTypeName.replace(reg, ",");
await this.setStatus({ ipTypeNumber: _ipTypeNumber, ipTypeName: _ipTypeName });
}
// 页面设置国家
async setContry(boole, item, locationList, pub, callback) {
function replaceStr(oldStr, childStr) {
let re = new RegExp(childStr, "g"); // 通过RegExp使用变量
return oldStr.replace(re, '');
}
let countryTypes = this.updateList.countryTypes;
if (boole) {
countryTypes = replaceStr(countryTypes, item.resourceKey);
} else {
countryTypes = countryTypes + ',' + item.resourceKey;
}
// 匹配中文名字
let contryName = [];
countryTypes.split(',').forEach(val => {
locationList.map((item: any) => {
if (val === item.resourceKey) {
contryName.push(item.resourceValue);
}
});
});
let countryNames = contryName.join('/');
callback({ ...pub, countryTypes, countryNames });
await this.setStatus({ countryTypes, countryNames });
}
/**
* 国家地区
*/
@action
async getLocation() {
let { errorCode, result }: any = await listCountry();
let _locationList: object[] = [];
if (errorCode === "200") {
result.forEach((item: any) => {
_locationList.push(item);
});
this.locationList = _locationList;
return _locationList;
}
}
/**
* 可授权区
* @param params
*/
@action
async getAuthorityZone(params) {
let { errorCode, result }: any = await getCompanyType(params);
let _authorityZone: object[] = [];
if (errorCode === "200") {
result.forEach((item: any) => {
_authorityZone.push(item);
});
this.authorityZone = _authorityZone;
return _authorityZone;
}
}
@action
async upload(params) {
let { errorCode }: any = await upload(params);
if (errorCode === 200) {
}
}
@action
async doRest() {
this.updateList = {
ipName: "",
ipTypeSuperiorNumber: '',
brokerageFirmGuid: '',
ipLocation: '1',
ipTypeNumber: '',
ipDesc: "",
detail: '',
ipFormNumber: '',
ipPicGuid: '',
countryNames: '',
countryTypes: '',
owner: '', // IP版权方
copyrightAgent: '',
recordCountry: '',
grantedType: undefined, // 已授权品类
authorizedType: undefined, // 可授权品类
intentAuthorization: undefined, // 意向授权品类
authorizedLocation: undefined, // 可授权区域
authorizedAllottedTime: '', // 可授权期限日期
isTransferable: 0, // 是否可以转授权
ipMaterialGuidList: '', // 商务资料
prodect: [
{ pic: '', title: '' },
{ pic: '', title: '' },
{ pic: '', title: '' },
{ pic: '', title: '' },
],
cooperationCase: [
{ pic: '', title: '' },
{ pic: '', title: '' },
{ pic: '', title: '' },
{ pic: '', title: '' },
],
};
}
@action
async getBasic(params: IUpdateStatus, param) {
await this.setStatus(params);
await this.getUpdateDetail(param);
}
// 获取编辑页的基本信息
@action
async getUpdateDetail(params) {
const { ipid, ipTypeNumber, userGuid }: { ipid: number, ipTypeNumber: number, userGuid: any } = params;
let { errorCode, result }: any = await getIpDetail({
ipid, ipTypeNumber, userGuid
});
if (errorCode === '200') {
if (result.errorCode === 200) {
for (let val in result.data) {
if (result.data.hasOwnProperty(val)) {
if (val === 'authorizedLocation' || val === 'authorizedType' || val === 'grantedType' || val === 'intentAuthorization') {
if (result.data[val] === '' || result.d | defined) result.data[val] = undefined;
}
}
}
this.updateList = result.data;
this.brokerageFirmGuid = result.data && result.data.brokerageFirmGuid;
return {
request: true,
};
} else {
return {
request: false,
message: result.errorMsg,
};
// alert(result.errorMsg)
}
}
}
@action
async setStatus(params) {
this.updateList = { ...this.updateList, ...params };
}
async setStatus2(params) {
this.updateList = { ...this.updateList, ...params };
}
// 招商资料列表
@action
async getDownload({ ipid }: { ipid: number }) {
const { errorCode, result }: any = await getDownload(ipid);
if (errorCode === "200") {
this.businessList = result;
}
}
/**
* 上传商务资料
* @param params
*/
@action
async getBusiness(params) {
const { errorCode, result }: any = await uploadBusinessData(params);
if (errorCode === '200' && result.errorCode === 200) {
} else if (result.errorCode < 0) {
return { message: result.message };
}
}
// 下载招商资料
// async downloadMaterial(params) {
// const { errorCode, result }: any = await getDownloadMaterial(params);
// if (errorCode === '200' && result.errorCode === 200) {
// } else if (result.errorCode < 0) {
// return { message: result.errorMsg };
// }
// }
/**
* 删除
* 刷新页面
* @param params
*/
@action
async deleteMaterial(params) {
const { errorCode, result }: any = await delMaterial(params);
if (errorCode === '200' && result.errorCode === 200) {
} else if (result.errorCode < 0) {
return { message: result.errorMsg };
}
}
/**
* 经济公司 Ip版权代理方 版权方
* @param dataURI
*/
@action
async companyList({ companyName, currentPage, pageSize, companyType }) {
const { errorCode, result }: any = await listCompany({ companyName, currentPage, pageSize, companyType });
if (errorCode === '200' && companyName !== "") {
this.companyData = result;
return result;
} else {
this.companyData = [];
}
}
// 清空
@action
async setCompanyNull() {
this.companyData = [];
}
// base64 转二进制文件
@action
async dataURItoBlob(dataURI: any) {
let byteString = atob(dataURI.split(',')[1]);
let mimeString = dataURI.split(',')[0].split(':')[1].split(';')[0];
let ab = new ArrayBuffer(byteString.length);
let ia = new Uint8Array(ab);
for (let i = 0; i < byteString.length; i++) {
ia[i] = byteString.charCodeAt(i);
}
return new Blob([ab], { type: mimeString });
}
}
export default new CreateStore();
| ata[val] === un | identifier_name |
unwind.py | # Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 and
# only version 2 as published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
import sys
import re
import os
import struct
import datetime
import array
import string
import bisect
import traceback
from subprocess import *
from optparse import OptionParser
from optparse import OptionGroup
from struct import unpack
from ctypes import *
from print_out import *
FP = 11
SP = 13
LR = 14
PC = 15
THREAD_SIZE = 8192
class Stackframe () :
def __init__(self, fp, sp, lr, pc) :
self.fp = fp
self.sp = sp
self.lr = lr
self.pc = pc
class UnwindCtrlBlock () :
def __init__ (self) :
self.vrs = 16*[0]
self.insn = 0
self.entries = -1
self.byte = -1
self.index = 0
class Unwinder () :
def __init__(self, ramdump) :
start = ramdump.addr_lookup("__start_unwind_idx")
end = ramdump.addr_lookup("__stop_unwind_idx")
if (start is None) or (end is None) :
print_out_str ("!!! Could not lookup unwinding information")
return None
# addresses
self.start_idx = start
self.stop_idx = end
self.unwind_table = []
self.ramdump = ramdump
i = 0
for addr in range(start,end,8) :
(a,b) = ramdump.read_string(addr,"<II")
self.unwind_table.append((a,b,start+8*i))
i+=1
ver = ramdump.version
if re.search('3.0.\d',ver) is not None :
self.search_idx = self.search_idx_3_0
else :
self.search_idx = self.search_idx_3_4
# index into the table
self.origin = self.unwind_find_origin()
def unwind_find_origin(self) :
start = 0
stop = len(self.unwind_table)
while (start < stop) :
mid = start + ((stop - start) >> 1)
if (self.unwind_table[mid][0] >= 0x40000000) :
start = mid + 1
else :
stop = mid
return stop
def unwind_frame_generic(self, frame) :
high = 0
fp = frame.fp
low = frame.sp
mask = (THREAD_SIZE) - 1
high = (low + mask) & (~mask) #ALIGN(low, THREAD_SIZE)
# /* check current frame pointer is within bounds */
if (fp < (low + 12) or fp + 4 >= high) :
return -1
fp_is_at = self.ramdump.read_word(frame.fp-12)
sp_is_at = self.ramdump.read_word(frame.fp-8)
pc_is_at = self.ramdump.read_word(frame.fp-4)
frame.fp = fp_is_at
frame.sp = sp_is_at
frame.pc = pc_is_at
return 0
def walk_stackframe_generic(self, frame) :
while True :
symname = self.ramdump.addr_to_symbol(frame.pc)
print_out_str (symname)
ret = self.unwind_frame_generic(frame)
if ret < 0 :
break
def unwind_backtrace_generic(self, sp, fp, pc) :
frame = Stackframe()
frame.fp = fp
frame.pc = pc
frame.sp = sp
walk_stackframe_generic(frame)
def search_idx_3_4(self, addr) :
start = 0
stop = len(self.unwind_table)
orig = addr
if (addr < self.start_idx) :
stop = self.origin
else :
start = self.origin
addr = (addr - self.unwind_table[start][2]) & 0x7fffffff
while (start < (stop - 1)) :
mid = start + ((stop - start) >> 1)
dif = (self.unwind_table[mid][2] - self.unwind_table[start][2])
if ((addr - dif) < self.unwind_table[mid][0]) :
stop = mid
else :
addr = addr - dif
start = mid
if self.unwind_table[start][0] <= addr :
return self.unwind_table[start]
else :
return None
def search_idx_3_0(self, addr) :
first = 0
last = len(self.unwind_table)
while (first < last - 1) :
mid = first + ((last - first + 1) >> 1)
if (addr < self.unwind_table[mid][0]) :
last = mid
else :
first = mid
return self.unwind_table[first]
def unwind_get_byte(self, ctrl) :
if (ctrl.entries <= 0) :
print_out_str("unwind: Corrupt unwind table")
return 0
val = self.ramdump.read_word(ctrl.insn)
ret = (val >> (ctrl.byte * 8)) & 0xff
if (ctrl.byte == 0) :
ctrl.insn+=4
ctrl.entries-=1
ctrl.byte = 3
else :
ctrl.byte-=1
return ret
def unwind_exec_insn(self, ctrl, trace = False) :
insn = self.unwind_get_byte(ctrl)
if ((insn & 0xc0) == 0x00) :
ctrl.vrs[SP] += ((insn & 0x3f) << 2) + 4
if trace :
print_out_str (" add {0} to stack".format(((insn & 0x3f) << 2) + 4))
elif ((insn & 0xc0) == 0x40) :
ctrl.vrs[SP] -= ((insn & 0x3f) << 2) + 4
if trace :
print_out_str (" subtract {0} from stack".format(((insn & 0x3f) << 2) + 4))
elif ((insn & 0xf0) == 0x80) :
vsp = ctrl.vrs[SP]
reg = 4
insn = (insn << 8) | self.unwind_get_byte(ctrl)
mask = insn & 0x0fff
if (mask == 0) :
print_out_str ("unwind: 'Refuse to unwind' instruction")
return -1
# pop R4-R15 according to mask */
load_sp = mask & (1 << (13 - 4))
while (mask) :
if (mask & 1) :
ctrl.vrs[reg] = self.ramdump.read_word(vsp)
if trace :
print_out_str (" pop r{0} from stack".format(reg))
if ctrl.vrs[reg] is None :
return -1
vsp+=4
mask >>= 1
reg+=1
if not load_sp :
ctrl.vrs[SP] = vsp
elif ((insn & 0xf0) == 0x90 and (insn & 0x0d) != 0x0d) :
if trace :
print_out_str (" set SP with the value from {0}".format(insn & 0x0f))
ctrl.vrs[SP] = ctrl.vrs[insn & 0x0f]
elif ((insn & 0xf0) == 0xa0) :
vsp = ctrl.vrs[SP]
a = list(range(4,4 + (insn & 7)))
a.append(4 + (insn & 7))
# pop R4-R[4+bbb] */
for reg in (a) :
ctrl.vrs[reg] = self.ramdump.read_word(vsp)
if trace :
print_out_str (" pop r{0} from stack".format(reg))
if ctrl.vrs[reg] is None :
return -1
vsp+=4
if (insn & 0x80) :
if trace :
print_out_str (" set LR from the stack")
ctrl.vrs[14] = self.ramdump.read_word(vsp)
if ctrl.vrs[14] is None :
return -1
vsp+=4
ctrl.vrs[SP] = vsp
elif (insn == 0xb0) :
if trace :
print_out_str (" set pc = lr")
if (ctrl.vrs[PC] == 0) :
ctrl.vrs[PC] = ctrl.vrs[LR]
ctrl.entries = 0
elif (insn == 0xb1) :
mask = self.unwind_get_byte(ctrl)
vsp = ctrl.vrs[SP]
reg = 0
if (mask == 0 or mask & 0xf0) :
print_out_str ("unwind: Spare encoding")
return -1
# pop R0-R3 according to mask
while mask :
if (mask & 1) :
ctrl.vrs[reg] = self.ramdump.read_word(vsp)
if trace :
print_out_str (" pop r{0} from stack".format(reg))
if ctrl.vrs[reg] is None :
return -1
vsp+=4
mask >>= 1
reg+=1
ctrl.vrs[SP] = vsp
elif (insn == 0xb2) :
uleb128 = self.unwind_get_byte(ctrl)
if trace :
print_out_str (" Adjust sp by {0}".format(0x204 + (uleb128 << 2)))
ctrl.vrs[SP] += 0x204 + (uleb128 << 2)
else :
print_out_str ("unwind: Unhandled instruction")
return -1
return 0
def prel31_to_addr(self, addr) :
value = self.ramdump.read_word(addr)
# offset = (value << 1) >> 1
# C wants this sign extended. Python doesn't do that.
# Sign extend manually.
if (value & 0x40000000) :
offset = value | 0x80000000
else :
offset = value
# This addition relies on integer overflow
# Emulate this behavior
temp = addr + offset
return (temp & 0xffffffff) + ((temp >> 32) & 0xffffffff)
def | (self, frame, trace = False) :
low = frame.sp
high = ((low + (THREAD_SIZE - 1)) & ~(THREAD_SIZE - 1)) + THREAD_SIZE
idx = self.search_idx(frame.pc)
if (idx is None) :
if trace :
print_out_str ("can't find %x" % frame.pc)
return -1
ctrl = UnwindCtrlBlock()
ctrl.vrs[FP] = frame.fp
ctrl.vrs[SP] = frame.sp
ctrl.vrs[LR] = frame.lr
ctrl.vrs[PC] = 0
if (idx[1] == 1) :
return -1
elif ((idx[1] & 0x80000000) == 0) :
ctrl.insn = self.prel31_to_addr(idx[2]+4)
elif (idx[1] & 0xff000000) == 0x80000000 :
ctrl.insn = idx[2]+4
else :
print_out_str ("not supported")
return -1
val = self.ramdump.read_word(ctrl.insn)
if ((val & 0xff000000) == 0x80000000) :
ctrl.byte = 2
ctrl.entries = 1
elif ((val & 0xff000000) == 0x81000000) :
ctrl.byte = 1
ctrl.entries = 1 + ((val & 0x00ff0000) >> 16)
else :
return -1
while (ctrl.entries > 0) :
urc = self.unwind_exec_insn(ctrl, trace)
if (urc < 0) :
return urc
if (ctrl.vrs[SP] < low or ctrl.vrs[SP] >= high) :
return -1
if (ctrl.vrs[PC] == 0) :
ctrl.vrs[PC] = ctrl.vrs[LR]
# check for infinite loop */
if (frame.pc == ctrl.vrs[PC]) :
return -1
frame.fp = ctrl.vrs[FP]
frame.sp = ctrl.vrs[SP]
frame.lr = ctrl.vrs[LR]
frame.pc = ctrl.vrs[PC]
return 0
def unwind_backtrace(self, sp, fp, pc, lr, extra_str = "", out_file = None, trace = False) :
offset = 0
frame = Stackframe(fp, sp, lr, pc)
frame.fp = fp
frame.sp = sp
frame.lr = lr
frame.pc = pc
while True :
where = frame.pc
offset = 0
r = self.ramdump.unwind_lookup(frame.pc)
if r is None :
symname = "UNKNOWN"
offset = 0x0
else :
symname, offset = r
pstring = (extra_str+"[<{0:x}>] {1}+0x{2:x}".format(frame.pc, symname, offset))
if out_file :
out_file.write (pstring+"\n")
else :
print_out_str (pstring)
urc = self.unwind_frame(frame, trace)
if urc < 0 :
break
| unwind_frame | identifier_name |
unwind.py | # Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 and
# only version 2 as published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
import sys
import re
import os
import struct
import datetime
import array
import string
import bisect
import traceback
from subprocess import *
from optparse import OptionParser
from optparse import OptionGroup
from struct import unpack
from ctypes import *
from print_out import *
FP = 11
SP = 13
LR = 14
PC = 15
THREAD_SIZE = 8192
class Stackframe () :
def __init__(self, fp, sp, lr, pc) :
self.fp = fp
self.sp = sp
self.lr = lr
self.pc = pc
class UnwindCtrlBlock () :
def __init__ (self) :
self.vrs = 16*[0]
self.insn = 0
self.entries = -1
self.byte = -1
self.index = 0
class Unwinder () :
def __init__(self, ramdump) :
start = ramdump.addr_lookup("__start_unwind_idx")
end = ramdump.addr_lookup("__stop_unwind_idx")
if (start is None) or (end is None) :
print_out_str ("!!! Could not lookup unwinding information")
return None
# addresses
self.start_idx = start
self.stop_idx = end
self.unwind_table = []
self.ramdump = ramdump
i = 0
for addr in range(start,end,8) :
(a,b) = ramdump.read_string(addr,"<II")
self.unwind_table.append((a,b,start+8*i))
i+=1
ver = ramdump.version
if re.search('3.0.\d',ver) is not None :
self.search_idx = self.search_idx_3_0
else :
self.search_idx = self.search_idx_3_4
# index into the table
self.origin = self.unwind_find_origin()
def unwind_find_origin(self) :
start = 0
stop = len(self.unwind_table)
while (start < stop) :
mid = start + ((stop - start) >> 1)
if (self.unwind_table[mid][0] >= 0x40000000) :
start = mid + 1
else :
stop = mid
return stop
def unwind_frame_generic(self, frame) :
high = 0
fp = frame.fp
low = frame.sp
mask = (THREAD_SIZE) - 1
high = (low + mask) & (~mask) #ALIGN(low, THREAD_SIZE)
# /* check current frame pointer is within bounds */
if (fp < (low + 12) or fp + 4 >= high) :
return -1
fp_is_at = self.ramdump.read_word(frame.fp-12)
sp_is_at = self.ramdump.read_word(frame.fp-8)
pc_is_at = self.ramdump.read_word(frame.fp-4)
frame.fp = fp_is_at
frame.sp = sp_is_at
frame.pc = pc_is_at
return 0
def walk_stackframe_generic(self, frame) :
while True :
symname = self.ramdump.addr_to_symbol(frame.pc)
print_out_str (symname)
ret = self.unwind_frame_generic(frame)
if ret < 0 :
break
def unwind_backtrace_generic(self, sp, fp, pc) :
frame = Stackframe()
frame.fp = fp
frame.pc = pc
frame.sp = sp
walk_stackframe_generic(frame)
def search_idx_3_4(self, addr) :
start = 0
stop = len(self.unwind_table)
orig = addr
if (addr < self.start_idx) :
stop = self.origin
else :
start = self.origin
addr = (addr - self.unwind_table[start][2]) & 0x7fffffff
while (start < (stop - 1)) :
mid = start + ((stop - start) >> 1)
dif = (self.unwind_table[mid][2] - self.unwind_table[start][2])
if ((addr - dif) < self.unwind_table[mid][0]) :
stop = mid
else :
addr = addr - dif
start = mid
if self.unwind_table[start][0] <= addr :
return self.unwind_table[start]
else :
return None
def search_idx_3_0(self, addr) :
first = 0
last = len(self.unwind_table)
while (first < last - 1) :
mid = first + ((last - first + 1) >> 1)
if (addr < self.unwind_table[mid][0]) :
last = mid
else :
first = mid
return self.unwind_table[first]
def unwind_get_byte(self, ctrl) :
|
def unwind_exec_insn(self, ctrl, trace = False) :
insn = self.unwind_get_byte(ctrl)
if ((insn & 0xc0) == 0x00) :
ctrl.vrs[SP] += ((insn & 0x3f) << 2) + 4
if trace :
print_out_str (" add {0} to stack".format(((insn & 0x3f) << 2) + 4))
elif ((insn & 0xc0) == 0x40) :
ctrl.vrs[SP] -= ((insn & 0x3f) << 2) + 4
if trace :
print_out_str (" subtract {0} from stack".format(((insn & 0x3f) << 2) + 4))
elif ((insn & 0xf0) == 0x80) :
vsp = ctrl.vrs[SP]
reg = 4
insn = (insn << 8) | self.unwind_get_byte(ctrl)
mask = insn & 0x0fff
if (mask == 0) :
print_out_str ("unwind: 'Refuse to unwind' instruction")
return -1
# pop R4-R15 according to mask */
load_sp = mask & (1 << (13 - 4))
while (mask) :
if (mask & 1) :
ctrl.vrs[reg] = self.ramdump.read_word(vsp)
if trace :
print_out_str (" pop r{0} from stack".format(reg))
if ctrl.vrs[reg] is None :
return -1
vsp+=4
mask >>= 1
reg+=1
if not load_sp :
ctrl.vrs[SP] = vsp
elif ((insn & 0xf0) == 0x90 and (insn & 0x0d) != 0x0d) :
if trace :
print_out_str (" set SP with the value from {0}".format(insn & 0x0f))
ctrl.vrs[SP] = ctrl.vrs[insn & 0x0f]
elif ((insn & 0xf0) == 0xa0) :
vsp = ctrl.vrs[SP]
a = list(range(4,4 + (insn & 7)))
a.append(4 + (insn & 7))
# pop R4-R[4+bbb] */
for reg in (a) :
ctrl.vrs[reg] = self.ramdump.read_word(vsp)
if trace :
print_out_str (" pop r{0} from stack".format(reg))
if ctrl.vrs[reg] is None :
return -1
vsp+=4
if (insn & 0x80) :
if trace :
print_out_str (" set LR from the stack")
ctrl.vrs[14] = self.ramdump.read_word(vsp)
if ctrl.vrs[14] is None :
return -1
vsp+=4
ctrl.vrs[SP] = vsp
elif (insn == 0xb0) :
if trace :
print_out_str (" set pc = lr")
if (ctrl.vrs[PC] == 0) :
ctrl.vrs[PC] = ctrl.vrs[LR]
ctrl.entries = 0
elif (insn == 0xb1) :
mask = self.unwind_get_byte(ctrl)
vsp = ctrl.vrs[SP]
reg = 0
if (mask == 0 or mask & 0xf0) :
print_out_str ("unwind: Spare encoding")
return -1
# pop R0-R3 according to mask
while mask :
if (mask & 1) :
ctrl.vrs[reg] = self.ramdump.read_word(vsp)
if trace :
print_out_str (" pop r{0} from stack".format(reg))
if ctrl.vrs[reg] is None :
return -1
vsp+=4
mask >>= 1
reg+=1
ctrl.vrs[SP] = vsp
elif (insn == 0xb2) :
uleb128 = self.unwind_get_byte(ctrl)
if trace :
print_out_str (" Adjust sp by {0}".format(0x204 + (uleb128 << 2)))
ctrl.vrs[SP] += 0x204 + (uleb128 << 2)
else :
print_out_str ("unwind: Unhandled instruction")
return -1
return 0
def prel31_to_addr(self, addr) :
value = self.ramdump.read_word(addr)
# offset = (value << 1) >> 1
# C wants this sign extended. Python doesn't do that.
# Sign extend manually.
if (value & 0x40000000) :
offset = value | 0x80000000
else :
offset = value
# This addition relies on integer overflow
# Emulate this behavior
temp = addr + offset
return (temp & 0xffffffff) + ((temp >> 32) & 0xffffffff)
def unwind_frame(self, frame, trace = False) :
low = frame.sp
high = ((low + (THREAD_SIZE - 1)) & ~(THREAD_SIZE - 1)) + THREAD_SIZE
idx = self.search_idx(frame.pc)
if (idx is None) :
if trace :
print_out_str ("can't find %x" % frame.pc)
return -1
ctrl = UnwindCtrlBlock()
ctrl.vrs[FP] = frame.fp
ctrl.vrs[SP] = frame.sp
ctrl.vrs[LR] = frame.lr
ctrl.vrs[PC] = 0
if (idx[1] == 1) :
return -1
elif ((idx[1] & 0x80000000) == 0) :
ctrl.insn = self.prel31_to_addr(idx[2]+4)
elif (idx[1] & 0xff000000) == 0x80000000 :
ctrl.insn = idx[2]+4
else :
print_out_str ("not supported")
return -1
val = self.ramdump.read_word(ctrl.insn)
if ((val & 0xff000000) == 0x80000000) :
ctrl.byte = 2
ctrl.entries = 1
elif ((val & 0xff000000) == 0x81000000) :
ctrl.byte = 1
ctrl.entries = 1 + ((val & 0x00ff0000) >> 16)
else :
return -1
while (ctrl.entries > 0) :
urc = self.unwind_exec_insn(ctrl, trace)
if (urc < 0) :
return urc
if (ctrl.vrs[SP] < low or ctrl.vrs[SP] >= high) :
return -1
if (ctrl.vrs[PC] == 0) :
ctrl.vrs[PC] = ctrl.vrs[LR]
# check for infinite loop */
if (frame.pc == ctrl.vrs[PC]) :
return -1
frame.fp = ctrl.vrs[FP]
frame.sp = ctrl.vrs[SP]
frame.lr = ctrl.vrs[LR]
frame.pc = ctrl.vrs[PC]
return 0
def unwind_backtrace(self, sp, fp, pc, lr, extra_str = "", out_file = None, trace = False) :
offset = 0
frame = Stackframe(fp, sp, lr, pc)
frame.fp = fp
frame.sp = sp
frame.lr = lr
frame.pc = pc
while True :
where = frame.pc
offset = 0
r = self.ramdump.unwind_lookup(frame.pc)
if r is None :
symname = "UNKNOWN"
offset = 0x0
else :
symname, offset = r
pstring = (extra_str+"[<{0:x}>] {1}+0x{2:x}".format(frame.pc, symname, offset))
if out_file :
out_file.write (pstring+"\n")
else :
print_out_str (pstring)
urc = self.unwind_frame(frame, trace)
if urc < 0 :
break
| if (ctrl.entries <= 0) :
print_out_str("unwind: Corrupt unwind table")
return 0
val = self.ramdump.read_word(ctrl.insn)
ret = (val >> (ctrl.byte * 8)) & 0xff
if (ctrl.byte == 0) :
ctrl.insn+=4
ctrl.entries-=1
ctrl.byte = 3
else :
ctrl.byte-=1
return ret | identifier_body |
unwind.py | # Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 and
# only version 2 as published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
import sys
import re
import os
import struct
import datetime
import array
import string
import bisect
import traceback
from subprocess import *
from optparse import OptionParser | from struct import unpack
from ctypes import *
from print_out import *
FP = 11
SP = 13
LR = 14
PC = 15
THREAD_SIZE = 8192
class Stackframe () :
def __init__(self, fp, sp, lr, pc) :
self.fp = fp
self.sp = sp
self.lr = lr
self.pc = pc
class UnwindCtrlBlock () :
def __init__ (self) :
self.vrs = 16*[0]
self.insn = 0
self.entries = -1
self.byte = -1
self.index = 0
class Unwinder () :
def __init__(self, ramdump) :
start = ramdump.addr_lookup("__start_unwind_idx")
end = ramdump.addr_lookup("__stop_unwind_idx")
if (start is None) or (end is None) :
print_out_str ("!!! Could not lookup unwinding information")
return None
# addresses
self.start_idx = start
self.stop_idx = end
self.unwind_table = []
self.ramdump = ramdump
i = 0
for addr in range(start,end,8) :
(a,b) = ramdump.read_string(addr,"<II")
self.unwind_table.append((a,b,start+8*i))
i+=1
ver = ramdump.version
if re.search('3.0.\d',ver) is not None :
self.search_idx = self.search_idx_3_0
else :
self.search_idx = self.search_idx_3_4
# index into the table
self.origin = self.unwind_find_origin()
def unwind_find_origin(self) :
start = 0
stop = len(self.unwind_table)
while (start < stop) :
mid = start + ((stop - start) >> 1)
if (self.unwind_table[mid][0] >= 0x40000000) :
start = mid + 1
else :
stop = mid
return stop
def unwind_frame_generic(self, frame) :
high = 0
fp = frame.fp
low = frame.sp
mask = (THREAD_SIZE) - 1
high = (low + mask) & (~mask) #ALIGN(low, THREAD_SIZE)
# /* check current frame pointer is within bounds */
if (fp < (low + 12) or fp + 4 >= high) :
return -1
fp_is_at = self.ramdump.read_word(frame.fp-12)
sp_is_at = self.ramdump.read_word(frame.fp-8)
pc_is_at = self.ramdump.read_word(frame.fp-4)
frame.fp = fp_is_at
frame.sp = sp_is_at
frame.pc = pc_is_at
return 0
def walk_stackframe_generic(self, frame) :
while True :
symname = self.ramdump.addr_to_symbol(frame.pc)
print_out_str (symname)
ret = self.unwind_frame_generic(frame)
if ret < 0 :
break
def unwind_backtrace_generic(self, sp, fp, pc) :
frame = Stackframe()
frame.fp = fp
frame.pc = pc
frame.sp = sp
walk_stackframe_generic(frame)
def search_idx_3_4(self, addr) :
start = 0
stop = len(self.unwind_table)
orig = addr
if (addr < self.start_idx) :
stop = self.origin
else :
start = self.origin
addr = (addr - self.unwind_table[start][2]) & 0x7fffffff
while (start < (stop - 1)) :
mid = start + ((stop - start) >> 1)
dif = (self.unwind_table[mid][2] - self.unwind_table[start][2])
if ((addr - dif) < self.unwind_table[mid][0]) :
stop = mid
else :
addr = addr - dif
start = mid
if self.unwind_table[start][0] <= addr :
return self.unwind_table[start]
else :
return None
def search_idx_3_0(self, addr) :
first = 0
last = len(self.unwind_table)
while (first < last - 1) :
mid = first + ((last - first + 1) >> 1)
if (addr < self.unwind_table[mid][0]) :
last = mid
else :
first = mid
return self.unwind_table[first]
def unwind_get_byte(self, ctrl) :
if (ctrl.entries <= 0) :
print_out_str("unwind: Corrupt unwind table")
return 0
val = self.ramdump.read_word(ctrl.insn)
ret = (val >> (ctrl.byte * 8)) & 0xff
if (ctrl.byte == 0) :
ctrl.insn+=4
ctrl.entries-=1
ctrl.byte = 3
else :
ctrl.byte-=1
return ret
def unwind_exec_insn(self, ctrl, trace = False) :
insn = self.unwind_get_byte(ctrl)
if ((insn & 0xc0) == 0x00) :
ctrl.vrs[SP] += ((insn & 0x3f) << 2) + 4
if trace :
print_out_str (" add {0} to stack".format(((insn & 0x3f) << 2) + 4))
elif ((insn & 0xc0) == 0x40) :
ctrl.vrs[SP] -= ((insn & 0x3f) << 2) + 4
if trace :
print_out_str (" subtract {0} from stack".format(((insn & 0x3f) << 2) + 4))
elif ((insn & 0xf0) == 0x80) :
vsp = ctrl.vrs[SP]
reg = 4
insn = (insn << 8) | self.unwind_get_byte(ctrl)
mask = insn & 0x0fff
if (mask == 0) :
print_out_str ("unwind: 'Refuse to unwind' instruction")
return -1
# pop R4-R15 according to mask */
load_sp = mask & (1 << (13 - 4))
while (mask) :
if (mask & 1) :
ctrl.vrs[reg] = self.ramdump.read_word(vsp)
if trace :
print_out_str (" pop r{0} from stack".format(reg))
if ctrl.vrs[reg] is None :
return -1
vsp+=4
mask >>= 1
reg+=1
if not load_sp :
ctrl.vrs[SP] = vsp
elif ((insn & 0xf0) == 0x90 and (insn & 0x0d) != 0x0d) :
if trace :
print_out_str (" set SP with the value from {0}".format(insn & 0x0f))
ctrl.vrs[SP] = ctrl.vrs[insn & 0x0f]
elif ((insn & 0xf0) == 0xa0) :
vsp = ctrl.vrs[SP]
a = list(range(4,4 + (insn & 7)))
a.append(4 + (insn & 7))
# pop R4-R[4+bbb] */
for reg in (a) :
ctrl.vrs[reg] = self.ramdump.read_word(vsp)
if trace :
print_out_str (" pop r{0} from stack".format(reg))
if ctrl.vrs[reg] is None :
return -1
vsp+=4
if (insn & 0x80) :
if trace :
print_out_str (" set LR from the stack")
ctrl.vrs[14] = self.ramdump.read_word(vsp)
if ctrl.vrs[14] is None :
return -1
vsp+=4
ctrl.vrs[SP] = vsp
elif (insn == 0xb0) :
if trace :
print_out_str (" set pc = lr")
if (ctrl.vrs[PC] == 0) :
ctrl.vrs[PC] = ctrl.vrs[LR]
ctrl.entries = 0
elif (insn == 0xb1) :
mask = self.unwind_get_byte(ctrl)
vsp = ctrl.vrs[SP]
reg = 0
if (mask == 0 or mask & 0xf0) :
print_out_str ("unwind: Spare encoding")
return -1
# pop R0-R3 according to mask
while mask :
if (mask & 1) :
ctrl.vrs[reg] = self.ramdump.read_word(vsp)
if trace :
print_out_str (" pop r{0} from stack".format(reg))
if ctrl.vrs[reg] is None :
return -1
vsp+=4
mask >>= 1
reg+=1
ctrl.vrs[SP] = vsp
elif (insn == 0xb2) :
uleb128 = self.unwind_get_byte(ctrl)
if trace :
print_out_str (" Adjust sp by {0}".format(0x204 + (uleb128 << 2)))
ctrl.vrs[SP] += 0x204 + (uleb128 << 2)
else :
print_out_str ("unwind: Unhandled instruction")
return -1
return 0
def prel31_to_addr(self, addr) :
value = self.ramdump.read_word(addr)
# offset = (value << 1) >> 1
# C wants this sign extended. Python doesn't do that.
# Sign extend manually.
if (value & 0x40000000) :
offset = value | 0x80000000
else :
offset = value
# This addition relies on integer overflow
# Emulate this behavior
temp = addr + offset
return (temp & 0xffffffff) + ((temp >> 32) & 0xffffffff)
def unwind_frame(self, frame, trace = False) :
low = frame.sp
high = ((low + (THREAD_SIZE - 1)) & ~(THREAD_SIZE - 1)) + THREAD_SIZE
idx = self.search_idx(frame.pc)
if (idx is None) :
if trace :
print_out_str ("can't find %x" % frame.pc)
return -1
ctrl = UnwindCtrlBlock()
ctrl.vrs[FP] = frame.fp
ctrl.vrs[SP] = frame.sp
ctrl.vrs[LR] = frame.lr
ctrl.vrs[PC] = 0
if (idx[1] == 1) :
return -1
elif ((idx[1] & 0x80000000) == 0) :
ctrl.insn = self.prel31_to_addr(idx[2]+4)
elif (idx[1] & 0xff000000) == 0x80000000 :
ctrl.insn = idx[2]+4
else :
print_out_str ("not supported")
return -1
val = self.ramdump.read_word(ctrl.insn)
if ((val & 0xff000000) == 0x80000000) :
ctrl.byte = 2
ctrl.entries = 1
elif ((val & 0xff000000) == 0x81000000) :
ctrl.byte = 1
ctrl.entries = 1 + ((val & 0x00ff0000) >> 16)
else :
return -1
while (ctrl.entries > 0) :
urc = self.unwind_exec_insn(ctrl, trace)
if (urc < 0) :
return urc
if (ctrl.vrs[SP] < low or ctrl.vrs[SP] >= high) :
return -1
if (ctrl.vrs[PC] == 0) :
ctrl.vrs[PC] = ctrl.vrs[LR]
# check for infinite loop */
if (frame.pc == ctrl.vrs[PC]) :
return -1
frame.fp = ctrl.vrs[FP]
frame.sp = ctrl.vrs[SP]
frame.lr = ctrl.vrs[LR]
frame.pc = ctrl.vrs[PC]
return 0
def unwind_backtrace(self, sp, fp, pc, lr, extra_str = "", out_file = None, trace = False) :
offset = 0
frame = Stackframe(fp, sp, lr, pc)
frame.fp = fp
frame.sp = sp
frame.lr = lr
frame.pc = pc
while True :
where = frame.pc
offset = 0
r = self.ramdump.unwind_lookup(frame.pc)
if r is None :
symname = "UNKNOWN"
offset = 0x0
else :
symname, offset = r
pstring = (extra_str+"[<{0:x}>] {1}+0x{2:x}".format(frame.pc, symname, offset))
if out_file :
out_file.write (pstring+"\n")
else :
print_out_str (pstring)
urc = self.unwind_frame(frame, trace)
if urc < 0 :
break | from optparse import OptionGroup | random_line_split |
unwind.py | # Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 and
# only version 2 as published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
import sys
import re
import os
import struct
import datetime
import array
import string
import bisect
import traceback
from subprocess import *
from optparse import OptionParser
from optparse import OptionGroup
from struct import unpack
from ctypes import *
from print_out import *
FP = 11
SP = 13
LR = 14
PC = 15
THREAD_SIZE = 8192
class Stackframe () :
def __init__(self, fp, sp, lr, pc) :
self.fp = fp
self.sp = sp
self.lr = lr
self.pc = pc
class UnwindCtrlBlock () :
def __init__ (self) :
self.vrs = 16*[0]
self.insn = 0
self.entries = -1
self.byte = -1
self.index = 0
class Unwinder () :
def __init__(self, ramdump) :
start = ramdump.addr_lookup("__start_unwind_idx")
end = ramdump.addr_lookup("__stop_unwind_idx")
if (start is None) or (end is None) :
print_out_str ("!!! Could not lookup unwinding information")
return None
# addresses
self.start_idx = start
self.stop_idx = end
self.unwind_table = []
self.ramdump = ramdump
i = 0
for addr in range(start,end,8) :
(a,b) = ramdump.read_string(addr,"<II")
self.unwind_table.append((a,b,start+8*i))
i+=1
ver = ramdump.version
if re.search('3.0.\d',ver) is not None :
self.search_idx = self.search_idx_3_0
else :
self.search_idx = self.search_idx_3_4
# index into the table
self.origin = self.unwind_find_origin()
def unwind_find_origin(self) :
start = 0
stop = len(self.unwind_table)
while (start < stop) :
mid = start + ((stop - start) >> 1)
if (self.unwind_table[mid][0] >= 0x40000000) :
start = mid + 1
else :
stop = mid
return stop
def unwind_frame_generic(self, frame) :
high = 0
fp = frame.fp
low = frame.sp
mask = (THREAD_SIZE) - 1
high = (low + mask) & (~mask) #ALIGN(low, THREAD_SIZE)
# /* check current frame pointer is within bounds */
if (fp < (low + 12) or fp + 4 >= high) :
return -1
fp_is_at = self.ramdump.read_word(frame.fp-12)
sp_is_at = self.ramdump.read_word(frame.fp-8)
pc_is_at = self.ramdump.read_word(frame.fp-4)
frame.fp = fp_is_at
frame.sp = sp_is_at
frame.pc = pc_is_at
return 0
def walk_stackframe_generic(self, frame) :
while True :
symname = self.ramdump.addr_to_symbol(frame.pc)
print_out_str (symname)
ret = self.unwind_frame_generic(frame)
if ret < 0 :
break
def unwind_backtrace_generic(self, sp, fp, pc) :
frame = Stackframe()
frame.fp = fp
frame.pc = pc
frame.sp = sp
walk_stackframe_generic(frame)
def search_idx_3_4(self, addr) :
start = 0
stop = len(self.unwind_table)
orig = addr
if (addr < self.start_idx) :
stop = self.origin
else :
start = self.origin
addr = (addr - self.unwind_table[start][2]) & 0x7fffffff
while (start < (stop - 1)) :
mid = start + ((stop - start) >> 1)
dif = (self.unwind_table[mid][2] - self.unwind_table[start][2])
if ((addr - dif) < self.unwind_table[mid][0]) :
stop = mid
else :
addr = addr - dif
start = mid
if self.unwind_table[start][0] <= addr :
return self.unwind_table[start]
else :
return None
def search_idx_3_0(self, addr) :
first = 0
last = len(self.unwind_table)
while (first < last - 1) :
mid = first + ((last - first + 1) >> 1)
if (addr < self.unwind_table[mid][0]) :
last = mid
else :
first = mid
return self.unwind_table[first]
def unwind_get_byte(self, ctrl) :
if (ctrl.entries <= 0) :
print_out_str("unwind: Corrupt unwind table")
return 0
val = self.ramdump.read_word(ctrl.insn)
ret = (val >> (ctrl.byte * 8)) & 0xff
if (ctrl.byte == 0) :
ctrl.insn+=4
ctrl.entries-=1
ctrl.byte = 3
else :
ctrl.byte-=1
return ret
def unwind_exec_insn(self, ctrl, trace = False) :
insn = self.unwind_get_byte(ctrl)
if ((insn & 0xc0) == 0x00) :
ctrl.vrs[SP] += ((insn & 0x3f) << 2) + 4
if trace :
print_out_str (" add {0} to stack".format(((insn & 0x3f) << 2) + 4))
elif ((insn & 0xc0) == 0x40) :
ctrl.vrs[SP] -= ((insn & 0x3f) << 2) + 4
if trace :
print_out_str (" subtract {0} from stack".format(((insn & 0x3f) << 2) + 4))
elif ((insn & 0xf0) == 0x80) :
vsp = ctrl.vrs[SP]
reg = 4
insn = (insn << 8) | self.unwind_get_byte(ctrl)
mask = insn & 0x0fff
if (mask == 0) :
print_out_str ("unwind: 'Refuse to unwind' instruction")
return -1
# pop R4-R15 according to mask */
load_sp = mask & (1 << (13 - 4))
while (mask) :
if (mask & 1) :
ctrl.vrs[reg] = self.ramdump.read_word(vsp)
if trace :
print_out_str (" pop r{0} from stack".format(reg))
if ctrl.vrs[reg] is None :
return -1
vsp+=4
mask >>= 1
reg+=1
if not load_sp :
ctrl.vrs[SP] = vsp
elif ((insn & 0xf0) == 0x90 and (insn & 0x0d) != 0x0d) :
if trace :
print_out_str (" set SP with the value from {0}".format(insn & 0x0f))
ctrl.vrs[SP] = ctrl.vrs[insn & 0x0f]
elif ((insn & 0xf0) == 0xa0) :
vsp = ctrl.vrs[SP]
a = list(range(4,4 + (insn & 7)))
a.append(4 + (insn & 7))
# pop R4-R[4+bbb] */
for reg in (a) :
ctrl.vrs[reg] = self.ramdump.read_word(vsp)
if trace :
|
if ctrl.vrs[reg] is None :
return -1
vsp+=4
if (insn & 0x80) :
if trace :
print_out_str (" set LR from the stack")
ctrl.vrs[14] = self.ramdump.read_word(vsp)
if ctrl.vrs[14] is None :
return -1
vsp+=4
ctrl.vrs[SP] = vsp
elif (insn == 0xb0) :
if trace :
print_out_str (" set pc = lr")
if (ctrl.vrs[PC] == 0) :
ctrl.vrs[PC] = ctrl.vrs[LR]
ctrl.entries = 0
elif (insn == 0xb1) :
mask = self.unwind_get_byte(ctrl)
vsp = ctrl.vrs[SP]
reg = 0
if (mask == 0 or mask & 0xf0) :
print_out_str ("unwind: Spare encoding")
return -1
# pop R0-R3 according to mask
while mask :
if (mask & 1) :
ctrl.vrs[reg] = self.ramdump.read_word(vsp)
if trace :
print_out_str (" pop r{0} from stack".format(reg))
if ctrl.vrs[reg] is None :
return -1
vsp+=4
mask >>= 1
reg+=1
ctrl.vrs[SP] = vsp
elif (insn == 0xb2) :
uleb128 = self.unwind_get_byte(ctrl)
if trace :
print_out_str (" Adjust sp by {0}".format(0x204 + (uleb128 << 2)))
ctrl.vrs[SP] += 0x204 + (uleb128 << 2)
else :
print_out_str ("unwind: Unhandled instruction")
return -1
return 0
def prel31_to_addr(self, addr) :
value = self.ramdump.read_word(addr)
# offset = (value << 1) >> 1
# C wants this sign extended. Python doesn't do that.
# Sign extend manually.
if (value & 0x40000000) :
offset = value | 0x80000000
else :
offset = value
# This addition relies on integer overflow
# Emulate this behavior
temp = addr + offset
return (temp & 0xffffffff) + ((temp >> 32) & 0xffffffff)
def unwind_frame(self, frame, trace = False) :
low = frame.sp
high = ((low + (THREAD_SIZE - 1)) & ~(THREAD_SIZE - 1)) + THREAD_SIZE
idx = self.search_idx(frame.pc)
if (idx is None) :
if trace :
print_out_str ("can't find %x" % frame.pc)
return -1
ctrl = UnwindCtrlBlock()
ctrl.vrs[FP] = frame.fp
ctrl.vrs[SP] = frame.sp
ctrl.vrs[LR] = frame.lr
ctrl.vrs[PC] = 0
if (idx[1] == 1) :
return -1
elif ((idx[1] & 0x80000000) == 0) :
ctrl.insn = self.prel31_to_addr(idx[2]+4)
elif (idx[1] & 0xff000000) == 0x80000000 :
ctrl.insn = idx[2]+4
else :
print_out_str ("not supported")
return -1
val = self.ramdump.read_word(ctrl.insn)
if ((val & 0xff000000) == 0x80000000) :
ctrl.byte = 2
ctrl.entries = 1
elif ((val & 0xff000000) == 0x81000000) :
ctrl.byte = 1
ctrl.entries = 1 + ((val & 0x00ff0000) >> 16)
else :
return -1
while (ctrl.entries > 0) :
urc = self.unwind_exec_insn(ctrl, trace)
if (urc < 0) :
return urc
if (ctrl.vrs[SP] < low or ctrl.vrs[SP] >= high) :
return -1
if (ctrl.vrs[PC] == 0) :
ctrl.vrs[PC] = ctrl.vrs[LR]
# check for infinite loop */
if (frame.pc == ctrl.vrs[PC]) :
return -1
frame.fp = ctrl.vrs[FP]
frame.sp = ctrl.vrs[SP]
frame.lr = ctrl.vrs[LR]
frame.pc = ctrl.vrs[PC]
return 0
def unwind_backtrace(self, sp, fp, pc, lr, extra_str = "", out_file = None, trace = False) :
offset = 0
frame = Stackframe(fp, sp, lr, pc)
frame.fp = fp
frame.sp = sp
frame.lr = lr
frame.pc = pc
while True :
where = frame.pc
offset = 0
r = self.ramdump.unwind_lookup(frame.pc)
if r is None :
symname = "UNKNOWN"
offset = 0x0
else :
symname, offset = r
pstring = (extra_str+"[<{0:x}>] {1}+0x{2:x}".format(frame.pc, symname, offset))
if out_file :
out_file.write (pstring+"\n")
else :
print_out_str (pstring)
urc = self.unwind_frame(frame, trace)
if urc < 0 :
break
| print_out_str (" pop r{0} from stack".format(reg)) | conditional_block |
graph.go | package api
import (
"context"
"encoding/json"
"errors"
"fmt"
"strings"
"time"
"github.com/google/go-cmp/cmp"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/util/sets"
ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client"
)
// Step is a self-contained bit of work that the
// build pipeline needs to do.
// +k8s:deepcopy-gen=false
type Step interface {
Inputs() (InputDefinition, error)
// Validate checks inputs of steps that are part of the execution graph.
Validate() error
Run(ctx context.Context) error
// Name is the name of the stage, used to target it.
// If this is the empty string the stage cannot be targeted.
Name() string
// Description is a short, human readable description of this step.
Description() string
Requires() []StepLink
Creates() []StepLink
Provides() ParameterMap
// Objects returns all objects the client for this step has seen
Objects() []ctrlruntimeclient.Object
}
type InputDefinition []string
// +k8s:deepcopy-gen=false
type ParameterMap map[string]func() (string, error)
// StepLink abstracts the types of links that steps
// require and create.
// +k8s:deepcopy-gen=false
type StepLink interface {
// SatisfiedBy determines if the other link satisfies
// the requirements of this one, either partially or
// fully. If so, the other step will be executed first.
SatisfiedBy(other StepLink) bool
// UnsatisfiableError returns a human-understandable explanation
// of where exactly in the config the requirement came from and
// what needs to be done to satisfy it. It must be checked for
// emptyness and only be used when non-empty.
UnsatisfiableError() string
}
// internalImageStreamLink describes all tags in
// an ImageStream in the test's namespace
type internalImageStreamLink struct {
name string
}
func (l *internalImageStreamLink) SatisfiedBy(other StepLink) bool {
// an ImageStream in an internal namespace may only
// be provided by a literal link for that stream
switch link := other.(type) {
case *internalImageStreamLink:
return l.name == link.name
default:
return false
}
}
func (l *internalImageStreamLink) UnsatisfiableError() string {
return ""
}
// internalImageStreamTagLink describes a specific tag in
// an ImageStream in the test's namespace
type internalImageStreamTagLink struct {
name, tag, unsatisfiableError string
}
func (l *internalImageStreamTagLink) SatisfiedBy(other StepLink) bool {
// an ImageStreamTag in an internal namespace may
// either be provided by a literal link for that tag
// or by a link that provides the full stream
switch link := other.(type) {
case *internalImageStreamTagLink:
return l.name == link.name && l.tag == link.tag
case *internalImageStreamLink:
return l.name == link.name
default:
return false
}
}
func (l *internalImageStreamTagLink) UnsatisfiableError() string {
return l.unsatisfiableError
}
func AllStepsLink() StepLink {
return allStepsLink{}
}
type allStepsLink struct{}
func (_ allStepsLink) SatisfiedBy(_ StepLink) bool {
return true
}
func (_ allStepsLink) UnsatisfiableError() string {
return ""
}
func ExternalImageLink(ref ImageStreamTagReference) StepLink {
return &externalImageLink{
namespace: ref.Namespace,
name: ref.Name,
tag: ref.Tag,
}
}
type externalImageLink struct {
namespace, name, tag string
}
func (l *externalImageLink) SatisfiedBy(other StepLink) bool {
switch link := other.(type) {
case *externalImageLink:
return l.name == link.name &&
l.namespace == link.namespace &&
l.tag == link.tag
default:
return false
}
}
func (l *externalImageLink) UnsatisfiableError() string {
return ""
}
type StepLinkOptions struct {
// UnsatisfiableError holds a human-understandable explanation
// of where exactly in the config the requirement came from and
// what needs to be done to satisfy it.
UnsatisfiableError string
}
// +k8s:deepcopy-gen=false
type StepLinkOption func(*StepLinkOptions)
func StepLinkWithUnsatisfiableErrorMessage(msg string) StepLinkOption {
return func(slo *StepLinkOptions) {
slo.UnsatisfiableError = msg
}
}
// InternalImageLink describes a dependency on a tag in the pipeline stream
func InternalImageLink(tag PipelineImageStreamTagReference, o ...StepLinkOption) StepLink {
opts := StepLinkOptions{}
for _, o := range o {
o(&opts)
}
return &internalImageStreamTagLink{
name: PipelineImageStream,
tag: string(tag),
unsatisfiableError: opts.UnsatisfiableError,
}
}
func ReleasePayloadImageLink(tag string) StepLink {
return &internalImageStreamTagLink{
name: ReleaseImageStream,
tag: tag,
}
}
func ImagesReadyLink() StepLink {
return &imagesReadyLink{}
}
type imagesReadyLink struct{}
func (l *imagesReadyLink) SatisfiedBy(other StepLink) bool {
switch other.(type) {
case *imagesReadyLink:
return true
default:
return false
}
}
func (l *imagesReadyLink) UnsatisfiableError() string {
return ""
}
func RPMRepoLink() StepLink {
return &rpmRepoLink{}
}
type rpmRepoLink struct{}
func (l *rpmRepoLink) SatisfiedBy(other StepLink) bool {
switch other.(type) {
case *rpmRepoLink:
return true
default:
return false
}
}
func (l *rpmRepoLink) UnsatisfiableError() string {
return ""
}
// ReleaseImagesLink describes the content of a stable(-foo)?
// ImageStream in the test namespace.
func ReleaseImagesLink(name string) StepLink {
return &internalImageStreamLink{
name: ReleaseStreamFor(name),
}
}
// ReleaseImageTagLink describes a specific tag in a stable(-foo)?
// ImageStream in the test namespace.
func ReleaseImageTagLink(name, tag string) StepLink {
return &internalImageStreamTagLink{
name: ReleaseStreamFor(name),
tag: tag,
}
}
func Comparer() cmp.Option {
return cmp.AllowUnexported(
internalImageStreamLink{},
internalImageStreamTagLink{},
externalImageLink{},
)
}
// ReleaseStreamFor determines the ImageStream into which a named
// release will be imported or assembled.
func ReleaseStreamFor(name string) string {
if name == LatestReleaseName {
return StableImageStream
}
return fmt.Sprintf("%s-%s", StableImageStream, name)
}
// ReleaseNameFrom determines the named release that was imported
// or assembled into an ImageStream.
func ReleaseNameFrom(stream string) string {
if stream == StableImageStream {
return LatestReleaseName
}
return strings.TrimPrefix(stream, fmt.Sprintf("%s-", StableImageStream))
}
// IsReleaseStream determines if the ImageStream was created from
// an import or assembly of a release.
func IsReleaseStream(stream string) bool {
return strings.HasPrefix(stream, StableImageStream)
}
// IsReleasePayloadStream determines if the ImageStream holds
// release payload images.
func IsReleasePayloadStream(stream string) bool {
return stream == ReleaseImageStream
}
// +k8s:deepcopy-gen=false
type StepNode struct {
Step Step
Children []*StepNode
}
// GraphConfiguration contains step data used to build the execution graph.
type GraphConfiguration struct {
// Steps accumulates step configuration as the configuration is parsed.
Steps []StepConfiguration
}
func (c *GraphConfiguration) InputImages() (ret []*InputImageTagStepConfiguration) {
for _, s := range c.Steps {
if c := s.InputImageTagStepConfiguration; c != nil {
ret = append(ret, c)
}
}
return
}
// +k8s:deepcopy-gen=false
// StepGraph is a DAG of steps referenced by its roots
type StepGraph []*StepNode
// +k8s:deepcopy-gen=false
// OrderedStepList is a topologically-ordered sequence of steps
// Edges are determined based on the Creates/Requires methods.
type OrderedStepList []*StepNode
// BuildGraph returns a graph or graphs that include
// all steps given.
func BuildGraph(steps []Step) StepGraph {
var allNodes []*StepNode
for _, step := range steps {
node := StepNode{Step: step, Children: []*StepNode{}}
allNodes = append(allNodes, &node)
}
var ret StepGraph
for _, node := range allNodes {
isRoot := true
for _, other := range allNodes {
for _, nodeRequires := range node.Step.Requires() {
for _, otherCreates := range other.Step.Creates() {
if nodeRequires.SatisfiedBy(otherCreates) {
isRoot = false
addToNode(other, node)
}
}
}
}
if isRoot {
ret = append(ret, node)
}
}
return ret
}
// BuildPartialGraph returns a graph or graphs that include
// only the dependencies of the named steps.
func BuildPartialGraph(steps []Step, names []string) (StepGraph, error) {
if len(names) == 0 {
return BuildGraph(steps), nil
}
var required []StepLink
candidates := make([]bool, len(steps))
var allNames []string
for i, step := range steps {
allNames = append(allNames, step.Name())
for j, name := range names {
if name != step.Name() {
continue
}
candidates[i] = true
required = append(required, step.Requires()...)
names = append(names[:j], names[j+1:]...)
break
}
}
if len(names) > 0 {
return nil, fmt.Errorf("the following names were not found in the config or were duplicates: %s (from %s)", strings.Join(names, ", "), strings.Join(allNames, ", "))
}
// identify all other steps that provide any links required by the current set
for {
added := 0
for i, step := range steps {
if candidates[i] {
continue
}
if HasAnyLinks(required, step.Creates()) {
added++
candidates[i] = true
required = append(required, step.Requires()...)
}
}
if added == 0 {
break
}
}
var targeted []Step
for i, candidate := range candidates {
if candidate {
targeted = append(targeted, steps[i])
}
}
return BuildGraph(targeted), nil
}
// TopologicalSort validates nodes form a DAG and orders them topologically.
func (g StepGraph) TopologicalSort() (OrderedStepList, []error) {
var ret OrderedStepList
var satisfied []StepLink
if err := iterateDAG(g, nil, sets.New[string](), func(*StepNode) {}); err != nil {
return nil, err
}
seen := make(map[Step]struct{})
for len(g) > 0 {
var changed bool
var waiting []*StepNode
for _, node := range g |
if !changed && len(waiting) > 0 {
errMessages := sets.Set[string]{}
for _, node := range waiting {
missing := sets.Set[string]{}
for _, link := range node.Step.Requires() {
if !HasAllLinks([]StepLink{link}, satisfied) {
if msg := link.UnsatisfiableError(); msg != "" {
missing.Insert(msg)
} else {
missing.Insert(fmt.Sprintf("<%#v>", link))
}
}
}
// De-Duplicate errors
errMessages.Insert(fmt.Sprintf("step %s is missing dependencies: %s", node.Step.Name(), strings.Join(sets.List(missing), ", ")))
}
ret := make([]error, 0, errMessages.Len()+1)
ret = append(ret, errors.New("steps are missing dependencies"))
for _, message := range sets.List(errMessages) {
ret = append(ret, errors.New(message))
}
return nil, ret
}
g = waiting
}
return ret, nil
}
// iterateDAG applies a function to every node of a DAG, detecting cycles.
func iterateDAG(graph StepGraph, path []string, inPath sets.Set[string], f func(*StepNode)) (ret []error) {
for _, node := range graph {
name := node.Step.Name()
if inPath.Has(name) {
ret = append(ret, fmt.Errorf("cycle in graph: %s -> %s", strings.Join(path, " -> "), name))
continue
}
inPath.Insert(name)
ret = append(ret, iterateDAG(node.Children, append(path, name), inPath, f)...)
inPath.Delete(name)
f(node)
}
return ret
}
// IterateAllEdges applies an operation to every node in the graph once.
func (g StepGraph) IterateAllEdges(f func(*StepNode)) {
iterateAllEdges(g, sets.New[string](), f)
}
func iterateAllEdges(nodes []*StepNode, alreadyIterated sets.Set[string], f func(*StepNode)) {
for _, node := range nodes {
if alreadyIterated.Has(node.Step.Name()) {
continue
}
iterateAllEdges(node.Children, alreadyIterated, f)
if alreadyIterated.Has(node.Step.Name()) {
continue
}
f(node)
alreadyIterated.Insert(node.Step.Name())
}
}
func addToNode(parent, child *StepNode) bool {
for _, s := range parent.Children {
if s == child {
return false
}
}
parent.Children = append(parent.Children, child)
return true
}
func HasAnyLinks(steps, candidates []StepLink) bool {
for _, candidate := range candidates {
for _, step := range steps {
if step.SatisfiedBy(candidate) {
return true
}
}
}
return false
}
func HasAllLinks(needles, haystack []StepLink) bool {
for _, needle := range needles {
contains := false
for _, hay := range haystack {
if hay.SatisfiedBy(needle) {
contains = true
}
}
if !contains {
return false
}
}
return true
}
// +k8s:deepcopy-gen=false
type CIOperatorStepGraph []CIOperatorStepDetails
// MergeFrom merges two CIOperatorStepGraphs together using StepNames as merge keys.
// The merging logic will never ovewrwrite data and only set unset fields.
// Steps that do not exist in the first graph get appended.
func (graph *CIOperatorStepGraph) MergeFrom(from ...CIOperatorStepDetails) {
for _, step := range from {
var found bool
for idx, existing := range *graph {
if step.StepName != existing.StepName {
continue
}
found = true
(*graph)[idx] = mergeSteps(existing, step)
}
if !found {
*graph = append(*graph, step)
}
}
}
func mergeSteps(into, from CIOperatorStepDetails) CIOperatorStepDetails {
if into.Description == "" {
into.Description = from.Description
}
if into.Dependencies == nil {
into.Dependencies = from.Dependencies
}
if into.StartedAt == nil {
into.StartedAt = from.StartedAt
}
if into.StartedAt == nil {
into.StartedAt = from.StartedAt
}
if into.FinishedAt == nil {
into.FinishedAt = from.FinishedAt
}
if into.Duration == nil {
into.Duration = from.Duration
}
if into.Manifests == nil {
into.Manifests = from.Manifests
}
if into.LogURL == "" {
into.LogURL = from.LogURL
}
if into.Failed == nil {
into.Failed = from.Failed
}
if into.Substeps == nil {
into.Substeps = from.Substeps
}
return into
}
// +k8s:deepcopy-gen=false
type CIOperatorStepDetails struct {
CIOperatorStepDetailInfo `json:",inline"`
Substeps []CIOperatorStepDetailInfo `json:"substeps,omitempty"`
}
// +k8s:deepcopy-gen=false
type CIOperatorStepDetailInfo struct {
StepName string `json:"name"`
Description string `json:"description"`
Dependencies []string `json:"dependencies"`
StartedAt *time.Time `json:"started_at"`
FinishedAt *time.Time `json:"finished_at"`
Duration *time.Duration `json:"duration,omitempty"`
Manifests []ctrlruntimeclient.Object `json:"manifests,omitempty"`
LogURL string `json:"log_url,omitempty"`
Failed *bool `json:"failed,omitempty"`
}
func (c *CIOperatorStepDetailInfo) UnmarshalJSON(data []byte) error {
raw := map[string]interface{}{}
if err := json.Unmarshal(data, &raw); err != nil {
return err
}
manifests := []*unstructured.Unstructured{}
if rawManifests, ok := raw["manifests"]; ok {
serializedManifests, err := json.Marshal(rawManifests)
if err != nil {
return err
}
if err := json.Unmarshal(serializedManifests, &manifests); err != nil {
return err
}
delete(raw, "manifests")
}
reserializedWithoutManifests, err := json.Marshal(raw)
if err != nil {
return err
}
type silbling CIOperatorStepDetailInfo
var unmarshalTo silbling
if err := json.Unmarshal(reserializedWithoutManifests, &unmarshalTo); err != nil {
return err
}
*c = CIOperatorStepDetailInfo(unmarshalTo)
c.Manifests = nil
for _, manifest := range manifests {
c.Manifests = append(c.Manifests, manifest)
}
return nil
}
const CIOperatorStepGraphJSONFilename = "ci-operator-step-graph.json"
// StepGraphJSONURL takes a base url like https://storage.googleapis.com/origin-ci-test/pr-logs/pull/openshift_ci-tools/999/pull-ci-openshift-ci-tools-master-validate-vendor/1283812971092381696
// and returns the full url for the step graph json document.
func StepGraphJSONURL(baseJobURL string) string {
return strings.Join([]string{baseJobURL, "artifacts", CIOperatorStepGraphJSONFilename}, "/")
}
// LinkForImage determines what dependent link is required
// for the user's image dependency
func LinkForImage(imageStream, tag string) StepLink {
switch {
case imageStream == PipelineImageStream:
// the user needs an image we're building
return InternalImageLink(PipelineImageStreamTagReference(tag))
case IsReleaseStream(imageStream):
// the user needs a tag that's a component of some release;
// we cant' rely on a specific tag, as they are implicit in
// the import process and won't be present in the build graph,
// so we wait for the whole import to succeed
return ReleaseImagesLink(ReleaseNameFrom(imageStream))
case IsReleasePayloadStream(imageStream):
// the user needs a release payload
return ReleasePayloadImageLink(tag)
default:
// we have no idea what the user's configured
return nil
}
}
| {
for _, child := range node.Children {
if _, ok := seen[child.Step]; !ok {
waiting = append(waiting, child)
}
}
if _, ok := seen[node.Step]; ok {
continue
}
if !HasAllLinks(node.Step.Requires(), satisfied) {
waiting = append(waiting, node)
continue
}
satisfied = append(satisfied, node.Step.Creates()...)
ret = append(ret, node)
seen[node.Step] = struct{}{}
changed = true
} | conditional_block |
graph.go | package api
import (
"context"
"encoding/json"
"errors"
"fmt"
"strings"
"time"
"github.com/google/go-cmp/cmp"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/util/sets"
ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client"
)
// Step is a self-contained bit of work that the
// build pipeline needs to do.
// +k8s:deepcopy-gen=false
type Step interface {
Inputs() (InputDefinition, error)
// Validate checks inputs of steps that are part of the execution graph.
Validate() error
Run(ctx context.Context) error
// Name is the name of the stage, used to target it.
// If this is the empty string the stage cannot be targeted.
Name() string
// Description is a short, human readable description of this step.
Description() string
Requires() []StepLink
Creates() []StepLink
Provides() ParameterMap
// Objects returns all objects the client for this step has seen
Objects() []ctrlruntimeclient.Object
}
type InputDefinition []string
// +k8s:deepcopy-gen=false
type ParameterMap map[string]func() (string, error)
// StepLink abstracts the types of links that steps
// require and create.
// +k8s:deepcopy-gen=false
type StepLink interface {
// SatisfiedBy determines if the other link satisfies
// the requirements of this one, either partially or
// fully. If so, the other step will be executed first.
SatisfiedBy(other StepLink) bool
// UnsatisfiableError returns a human-understandable explanation
// of where exactly in the config the requirement came from and
// what needs to be done to satisfy it. It must be checked for
// emptyness and only be used when non-empty.
UnsatisfiableError() string
}
// internalImageStreamLink describes all tags in
// an ImageStream in the test's namespace
type internalImageStreamLink struct {
name string
}
func (l *internalImageStreamLink) SatisfiedBy(other StepLink) bool {
// an ImageStream in an internal namespace may only
// be provided by a literal link for that stream
switch link := other.(type) {
case *internalImageStreamLink:
return l.name == link.name
default:
return false
}
}
func (l *internalImageStreamLink) UnsatisfiableError() string {
return ""
}
// internalImageStreamTagLink describes a specific tag in
// an ImageStream in the test's namespace
type internalImageStreamTagLink struct {
name, tag, unsatisfiableError string
}
func (l *internalImageStreamTagLink) SatisfiedBy(other StepLink) bool {
// an ImageStreamTag in an internal namespace may
// either be provided by a literal link for that tag
// or by a link that provides the full stream
switch link := other.(type) {
case *internalImageStreamTagLink:
return l.name == link.name && l.tag == link.tag
case *internalImageStreamLink:
return l.name == link.name
default:
return false
}
}
func (l *internalImageStreamTagLink) UnsatisfiableError() string {
return l.unsatisfiableError
}
func AllStepsLink() StepLink {
return allStepsLink{}
}
type allStepsLink struct{}
func (_ allStepsLink) SatisfiedBy(_ StepLink) bool {
return true
}
func (_ allStepsLink) UnsatisfiableError() string {
return ""
}
func ExternalImageLink(ref ImageStreamTagReference) StepLink {
return &externalImageLink{
namespace: ref.Namespace,
name: ref.Name,
tag: ref.Tag,
}
}
type externalImageLink struct {
namespace, name, tag string
}
func (l *externalImageLink) SatisfiedBy(other StepLink) bool {
switch link := other.(type) {
case *externalImageLink:
return l.name == link.name &&
l.namespace == link.namespace &&
l.tag == link.tag
default:
return false
}
}
func (l *externalImageLink) UnsatisfiableError() string {
return ""
}
type StepLinkOptions struct {
// UnsatisfiableError holds a human-understandable explanation
// of where exactly in the config the requirement came from and
// what needs to be done to satisfy it.
UnsatisfiableError string
}
// +k8s:deepcopy-gen=false
type StepLinkOption func(*StepLinkOptions)
func StepLinkWithUnsatisfiableErrorMessage(msg string) StepLinkOption {
return func(slo *StepLinkOptions) {
slo.UnsatisfiableError = msg
}
}
// InternalImageLink describes a dependency on a tag in the pipeline stream
func InternalImageLink(tag PipelineImageStreamTagReference, o ...StepLinkOption) StepLink {
opts := StepLinkOptions{}
for _, o := range o {
o(&opts)
}
return &internalImageStreamTagLink{
name: PipelineImageStream,
tag: string(tag),
unsatisfiableError: opts.UnsatisfiableError,
}
}
func ReleasePayloadImageLink(tag string) StepLink {
return &internalImageStreamTagLink{
name: ReleaseImageStream,
tag: tag,
}
}
func ImagesReadyLink() StepLink {
return &imagesReadyLink{}
}
type imagesReadyLink struct{}
func (l *imagesReadyLink) SatisfiedBy(other StepLink) bool {
switch other.(type) {
case *imagesReadyLink:
return true
default:
return false
}
}
func (l *imagesReadyLink) UnsatisfiableError() string {
return ""
}
func RPMRepoLink() StepLink {
return &rpmRepoLink{}
}
type rpmRepoLink struct{}
func (l *rpmRepoLink) SatisfiedBy(other StepLink) bool {
switch other.(type) {
case *rpmRepoLink:
return true
default:
return false
}
}
func (l *rpmRepoLink) UnsatisfiableError() string {
return ""
}
// ReleaseImagesLink describes the content of a stable(-foo)?
// ImageStream in the test namespace.
func ReleaseImagesLink(name string) StepLink {
return &internalImageStreamLink{
name: ReleaseStreamFor(name),
}
}
// ReleaseImageTagLink describes a specific tag in a stable(-foo)?
// ImageStream in the test namespace.
func ReleaseImageTagLink(name, tag string) StepLink {
return &internalImageStreamTagLink{
name: ReleaseStreamFor(name),
tag: tag,
}
}
func Comparer() cmp.Option {
return cmp.AllowUnexported(
internalImageStreamLink{},
internalImageStreamTagLink{},
externalImageLink{},
)
}
// ReleaseStreamFor determines the ImageStream into which a named
// release will be imported or assembled.
func ReleaseStreamFor(name string) string {
if name == LatestReleaseName {
return StableImageStream
}
return fmt.Sprintf("%s-%s", StableImageStream, name)
}
// ReleaseNameFrom determines the named release that was imported
// or assembled into an ImageStream.
func ReleaseNameFrom(stream string) string {
if stream == StableImageStream {
return LatestReleaseName
}
return strings.TrimPrefix(stream, fmt.Sprintf("%s-", StableImageStream))
}
// IsReleaseStream determines if the ImageStream was created from
// an import or assembly of a release.
func IsReleaseStream(stream string) bool {
return strings.HasPrefix(stream, StableImageStream)
}
// IsReleasePayloadStream determines if the ImageStream holds
// release payload images.
func IsReleasePayloadStream(stream string) bool {
return stream == ReleaseImageStream
}
// +k8s:deepcopy-gen=false
type StepNode struct {
Step Step
Children []*StepNode
}
// GraphConfiguration contains step data used to build the execution graph.
type GraphConfiguration struct {
// Steps accumulates step configuration as the configuration is parsed.
Steps []StepConfiguration
}
func (c *GraphConfiguration) InputImages() (ret []*InputImageTagStepConfiguration) {
for _, s := range c.Steps {
if c := s.InputImageTagStepConfiguration; c != nil {
ret = append(ret, c)
}
}
return
}
// +k8s:deepcopy-gen=false
// StepGraph is a DAG of steps referenced by its roots
type StepGraph []*StepNode
// +k8s:deepcopy-gen=false
// OrderedStepList is a topologically-ordered sequence of steps
// Edges are determined based on the Creates/Requires methods.
type OrderedStepList []*StepNode
| node := StepNode{Step: step, Children: []*StepNode{}}
allNodes = append(allNodes, &node)
}
var ret StepGraph
for _, node := range allNodes {
isRoot := true
for _, other := range allNodes {
for _, nodeRequires := range node.Step.Requires() {
for _, otherCreates := range other.Step.Creates() {
if nodeRequires.SatisfiedBy(otherCreates) {
isRoot = false
addToNode(other, node)
}
}
}
}
if isRoot {
ret = append(ret, node)
}
}
return ret
}
// BuildPartialGraph returns a graph or graphs that include
// only the dependencies of the named steps.
func BuildPartialGraph(steps []Step, names []string) (StepGraph, error) {
if len(names) == 0 {
return BuildGraph(steps), nil
}
var required []StepLink
candidates := make([]bool, len(steps))
var allNames []string
for i, step := range steps {
allNames = append(allNames, step.Name())
for j, name := range names {
if name != step.Name() {
continue
}
candidates[i] = true
required = append(required, step.Requires()...)
names = append(names[:j], names[j+1:]...)
break
}
}
if len(names) > 0 {
return nil, fmt.Errorf("the following names were not found in the config or were duplicates: %s (from %s)", strings.Join(names, ", "), strings.Join(allNames, ", "))
}
// identify all other steps that provide any links required by the current set
for {
added := 0
for i, step := range steps {
if candidates[i] {
continue
}
if HasAnyLinks(required, step.Creates()) {
added++
candidates[i] = true
required = append(required, step.Requires()...)
}
}
if added == 0 {
break
}
}
var targeted []Step
for i, candidate := range candidates {
if candidate {
targeted = append(targeted, steps[i])
}
}
return BuildGraph(targeted), nil
}
// TopologicalSort validates nodes form a DAG and orders them topologically.
func (g StepGraph) TopologicalSort() (OrderedStepList, []error) {
var ret OrderedStepList
var satisfied []StepLink
if err := iterateDAG(g, nil, sets.New[string](), func(*StepNode) {}); err != nil {
return nil, err
}
seen := make(map[Step]struct{})
for len(g) > 0 {
var changed bool
var waiting []*StepNode
for _, node := range g {
for _, child := range node.Children {
if _, ok := seen[child.Step]; !ok {
waiting = append(waiting, child)
}
}
if _, ok := seen[node.Step]; ok {
continue
}
if !HasAllLinks(node.Step.Requires(), satisfied) {
waiting = append(waiting, node)
continue
}
satisfied = append(satisfied, node.Step.Creates()...)
ret = append(ret, node)
seen[node.Step] = struct{}{}
changed = true
}
if !changed && len(waiting) > 0 {
errMessages := sets.Set[string]{}
for _, node := range waiting {
missing := sets.Set[string]{}
for _, link := range node.Step.Requires() {
if !HasAllLinks([]StepLink{link}, satisfied) {
if msg := link.UnsatisfiableError(); msg != "" {
missing.Insert(msg)
} else {
missing.Insert(fmt.Sprintf("<%#v>", link))
}
}
}
// De-Duplicate errors
errMessages.Insert(fmt.Sprintf("step %s is missing dependencies: %s", node.Step.Name(), strings.Join(sets.List(missing), ", ")))
}
ret := make([]error, 0, errMessages.Len()+1)
ret = append(ret, errors.New("steps are missing dependencies"))
for _, message := range sets.List(errMessages) {
ret = append(ret, errors.New(message))
}
return nil, ret
}
g = waiting
}
return ret, nil
}
// iterateDAG applies a function to every node of a DAG, detecting cycles.
func iterateDAG(graph StepGraph, path []string, inPath sets.Set[string], f func(*StepNode)) (ret []error) {
for _, node := range graph {
name := node.Step.Name()
if inPath.Has(name) {
ret = append(ret, fmt.Errorf("cycle in graph: %s -> %s", strings.Join(path, " -> "), name))
continue
}
inPath.Insert(name)
ret = append(ret, iterateDAG(node.Children, append(path, name), inPath, f)...)
inPath.Delete(name)
f(node)
}
return ret
}
// IterateAllEdges applies an operation to every node in the graph once.
func (g StepGraph) IterateAllEdges(f func(*StepNode)) {
iterateAllEdges(g, sets.New[string](), f)
}
func iterateAllEdges(nodes []*StepNode, alreadyIterated sets.Set[string], f func(*StepNode)) {
for _, node := range nodes {
if alreadyIterated.Has(node.Step.Name()) {
continue
}
iterateAllEdges(node.Children, alreadyIterated, f)
if alreadyIterated.Has(node.Step.Name()) {
continue
}
f(node)
alreadyIterated.Insert(node.Step.Name())
}
}
func addToNode(parent, child *StepNode) bool {
for _, s := range parent.Children {
if s == child {
return false
}
}
parent.Children = append(parent.Children, child)
return true
}
func HasAnyLinks(steps, candidates []StepLink) bool {
for _, candidate := range candidates {
for _, step := range steps {
if step.SatisfiedBy(candidate) {
return true
}
}
}
return false
}
func HasAllLinks(needles, haystack []StepLink) bool {
for _, needle := range needles {
contains := false
for _, hay := range haystack {
if hay.SatisfiedBy(needle) {
contains = true
}
}
if !contains {
return false
}
}
return true
}
// +k8s:deepcopy-gen=false
type CIOperatorStepGraph []CIOperatorStepDetails
// MergeFrom merges two CIOperatorStepGraphs together using StepNames as merge keys.
// The merging logic will never ovewrwrite data and only set unset fields.
// Steps that do not exist in the first graph get appended.
func (graph *CIOperatorStepGraph) MergeFrom(from ...CIOperatorStepDetails) {
for _, step := range from {
var found bool
for idx, existing := range *graph {
if step.StepName != existing.StepName {
continue
}
found = true
(*graph)[idx] = mergeSteps(existing, step)
}
if !found {
*graph = append(*graph, step)
}
}
}
func mergeSteps(into, from CIOperatorStepDetails) CIOperatorStepDetails {
if into.Description == "" {
into.Description = from.Description
}
if into.Dependencies == nil {
into.Dependencies = from.Dependencies
}
if into.StartedAt == nil {
into.StartedAt = from.StartedAt
}
if into.StartedAt == nil {
into.StartedAt = from.StartedAt
}
if into.FinishedAt == nil {
into.FinishedAt = from.FinishedAt
}
if into.Duration == nil {
into.Duration = from.Duration
}
if into.Manifests == nil {
into.Manifests = from.Manifests
}
if into.LogURL == "" {
into.LogURL = from.LogURL
}
if into.Failed == nil {
into.Failed = from.Failed
}
if into.Substeps == nil {
into.Substeps = from.Substeps
}
return into
}
// +k8s:deepcopy-gen=false
type CIOperatorStepDetails struct {
CIOperatorStepDetailInfo `json:",inline"`
Substeps []CIOperatorStepDetailInfo `json:"substeps,omitempty"`
}
// +k8s:deepcopy-gen=false
type CIOperatorStepDetailInfo struct {
StepName string `json:"name"`
Description string `json:"description"`
Dependencies []string `json:"dependencies"`
StartedAt *time.Time `json:"started_at"`
FinishedAt *time.Time `json:"finished_at"`
Duration *time.Duration `json:"duration,omitempty"`
Manifests []ctrlruntimeclient.Object `json:"manifests,omitempty"`
LogURL string `json:"log_url,omitempty"`
Failed *bool `json:"failed,omitempty"`
}
func (c *CIOperatorStepDetailInfo) UnmarshalJSON(data []byte) error {
raw := map[string]interface{}{}
if err := json.Unmarshal(data, &raw); err != nil {
return err
}
manifests := []*unstructured.Unstructured{}
if rawManifests, ok := raw["manifests"]; ok {
serializedManifests, err := json.Marshal(rawManifests)
if err != nil {
return err
}
if err := json.Unmarshal(serializedManifests, &manifests); err != nil {
return err
}
delete(raw, "manifests")
}
reserializedWithoutManifests, err := json.Marshal(raw)
if err != nil {
return err
}
type silbling CIOperatorStepDetailInfo
var unmarshalTo silbling
if err := json.Unmarshal(reserializedWithoutManifests, &unmarshalTo); err != nil {
return err
}
*c = CIOperatorStepDetailInfo(unmarshalTo)
c.Manifests = nil
for _, manifest := range manifests {
c.Manifests = append(c.Manifests, manifest)
}
return nil
}
const CIOperatorStepGraphJSONFilename = "ci-operator-step-graph.json"
// StepGraphJSONURL takes a base url like https://storage.googleapis.com/origin-ci-test/pr-logs/pull/openshift_ci-tools/999/pull-ci-openshift-ci-tools-master-validate-vendor/1283812971092381696
// and returns the full url for the step graph json document.
func StepGraphJSONURL(baseJobURL string) string {
return strings.Join([]string{baseJobURL, "artifacts", CIOperatorStepGraphJSONFilename}, "/")
}
// LinkForImage determines what dependent link is required
// for the user's image dependency
func LinkForImage(imageStream, tag string) StepLink {
switch {
case imageStream == PipelineImageStream:
// the user needs an image we're building
return InternalImageLink(PipelineImageStreamTagReference(tag))
case IsReleaseStream(imageStream):
// the user needs a tag that's a component of some release;
// we cant' rely on a specific tag, as they are implicit in
// the import process and won't be present in the build graph,
// so we wait for the whole import to succeed
return ReleaseImagesLink(ReleaseNameFrom(imageStream))
case IsReleasePayloadStream(imageStream):
// the user needs a release payload
return ReleasePayloadImageLink(tag)
default:
// we have no idea what the user's configured
return nil
}
} | // BuildGraph returns a graph or graphs that include
// all steps given.
func BuildGraph(steps []Step) StepGraph {
var allNodes []*StepNode
for _, step := range steps { | random_line_split |
graph.go | package api
import (
"context"
"encoding/json"
"errors"
"fmt"
"strings"
"time"
"github.com/google/go-cmp/cmp"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/util/sets"
ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client"
)
// Step is a self-contained bit of work that the
// build pipeline needs to do.
// +k8s:deepcopy-gen=false
type Step interface {
Inputs() (InputDefinition, error)
// Validate checks inputs of steps that are part of the execution graph.
Validate() error
Run(ctx context.Context) error
// Name is the name of the stage, used to target it.
// If this is the empty string the stage cannot be targeted.
Name() string
// Description is a short, human readable description of this step.
Description() string
Requires() []StepLink
Creates() []StepLink
Provides() ParameterMap
// Objects returns all objects the client for this step has seen
Objects() []ctrlruntimeclient.Object
}
type InputDefinition []string
// +k8s:deepcopy-gen=false
type ParameterMap map[string]func() (string, error)
// StepLink abstracts the types of links that steps
// require and create.
// +k8s:deepcopy-gen=false
type StepLink interface {
// SatisfiedBy determines if the other link satisfies
// the requirements of this one, either partially or
// fully. If so, the other step will be executed first.
SatisfiedBy(other StepLink) bool
// UnsatisfiableError returns a human-understandable explanation
// of where exactly in the config the requirement came from and
// what needs to be done to satisfy it. It must be checked for
// emptyness and only be used when non-empty.
UnsatisfiableError() string
}
// internalImageStreamLink describes all tags in
// an ImageStream in the test's namespace
type internalImageStreamLink struct {
name string
}
func (l *internalImageStreamLink) SatisfiedBy(other StepLink) bool {
// an ImageStream in an internal namespace may only
// be provided by a literal link for that stream
switch link := other.(type) {
case *internalImageStreamLink:
return l.name == link.name
default:
return false
}
}
func (l *internalImageStreamLink) UnsatisfiableError() string {
return ""
}
// internalImageStreamTagLink describes a specific tag in
// an ImageStream in the test's namespace
type internalImageStreamTagLink struct {
name, tag, unsatisfiableError string
}
func (l *internalImageStreamTagLink) SatisfiedBy(other StepLink) bool {
// an ImageStreamTag in an internal namespace may
// either be provided by a literal link for that tag
// or by a link that provides the full stream
switch link := other.(type) {
case *internalImageStreamTagLink:
return l.name == link.name && l.tag == link.tag
case *internalImageStreamLink:
return l.name == link.name
default:
return false
}
}
func (l *internalImageStreamTagLink) | () string {
return l.unsatisfiableError
}
func AllStepsLink() StepLink {
return allStepsLink{}
}
type allStepsLink struct{}
func (_ allStepsLink) SatisfiedBy(_ StepLink) bool {
return true
}
func (_ allStepsLink) UnsatisfiableError() string {
return ""
}
func ExternalImageLink(ref ImageStreamTagReference) StepLink {
return &externalImageLink{
namespace: ref.Namespace,
name: ref.Name,
tag: ref.Tag,
}
}
type externalImageLink struct {
namespace, name, tag string
}
func (l *externalImageLink) SatisfiedBy(other StepLink) bool {
switch link := other.(type) {
case *externalImageLink:
return l.name == link.name &&
l.namespace == link.namespace &&
l.tag == link.tag
default:
return false
}
}
func (l *externalImageLink) UnsatisfiableError() string {
return ""
}
type StepLinkOptions struct {
// UnsatisfiableError holds a human-understandable explanation
// of where exactly in the config the requirement came from and
// what needs to be done to satisfy it.
UnsatisfiableError string
}
// +k8s:deepcopy-gen=false
type StepLinkOption func(*StepLinkOptions)
func StepLinkWithUnsatisfiableErrorMessage(msg string) StepLinkOption {
return func(slo *StepLinkOptions) {
slo.UnsatisfiableError = msg
}
}
// InternalImageLink describes a dependency on a tag in the pipeline stream
func InternalImageLink(tag PipelineImageStreamTagReference, o ...StepLinkOption) StepLink {
opts := StepLinkOptions{}
for _, o := range o {
o(&opts)
}
return &internalImageStreamTagLink{
name: PipelineImageStream,
tag: string(tag),
unsatisfiableError: opts.UnsatisfiableError,
}
}
func ReleasePayloadImageLink(tag string) StepLink {
return &internalImageStreamTagLink{
name: ReleaseImageStream,
tag: tag,
}
}
func ImagesReadyLink() StepLink {
return &imagesReadyLink{}
}
type imagesReadyLink struct{}
func (l *imagesReadyLink) SatisfiedBy(other StepLink) bool {
switch other.(type) {
case *imagesReadyLink:
return true
default:
return false
}
}
func (l *imagesReadyLink) UnsatisfiableError() string {
return ""
}
func RPMRepoLink() StepLink {
return &rpmRepoLink{}
}
type rpmRepoLink struct{}
func (l *rpmRepoLink) SatisfiedBy(other StepLink) bool {
switch other.(type) {
case *rpmRepoLink:
return true
default:
return false
}
}
func (l *rpmRepoLink) UnsatisfiableError() string {
return ""
}
// ReleaseImagesLink describes the content of a stable(-foo)?
// ImageStream in the test namespace.
func ReleaseImagesLink(name string) StepLink {
return &internalImageStreamLink{
name: ReleaseStreamFor(name),
}
}
// ReleaseImageTagLink describes a specific tag in a stable(-foo)?
// ImageStream in the test namespace.
func ReleaseImageTagLink(name, tag string) StepLink {
return &internalImageStreamTagLink{
name: ReleaseStreamFor(name),
tag: tag,
}
}
func Comparer() cmp.Option {
return cmp.AllowUnexported(
internalImageStreamLink{},
internalImageStreamTagLink{},
externalImageLink{},
)
}
// ReleaseStreamFor determines the ImageStream into which a named
// release will be imported or assembled.
func ReleaseStreamFor(name string) string {
if name == LatestReleaseName {
return StableImageStream
}
return fmt.Sprintf("%s-%s", StableImageStream, name)
}
// ReleaseNameFrom determines the named release that was imported
// or assembled into an ImageStream.
func ReleaseNameFrom(stream string) string {
if stream == StableImageStream {
return LatestReleaseName
}
return strings.TrimPrefix(stream, fmt.Sprintf("%s-", StableImageStream))
}
// IsReleaseStream determines if the ImageStream was created from
// an import or assembly of a release.
func IsReleaseStream(stream string) bool {
return strings.HasPrefix(stream, StableImageStream)
}
// IsReleasePayloadStream determines if the ImageStream holds
// release payload images.
func IsReleasePayloadStream(stream string) bool {
return stream == ReleaseImageStream
}
// +k8s:deepcopy-gen=false
type StepNode struct {
Step Step
Children []*StepNode
}
// GraphConfiguration contains step data used to build the execution graph.
type GraphConfiguration struct {
// Steps accumulates step configuration as the configuration is parsed.
Steps []StepConfiguration
}
func (c *GraphConfiguration) InputImages() (ret []*InputImageTagStepConfiguration) {
for _, s := range c.Steps {
if c := s.InputImageTagStepConfiguration; c != nil {
ret = append(ret, c)
}
}
return
}
// +k8s:deepcopy-gen=false
// StepGraph is a DAG of steps referenced by its roots
type StepGraph []*StepNode
// +k8s:deepcopy-gen=false
// OrderedStepList is a topologically-ordered sequence of steps
// Edges are determined based on the Creates/Requires methods.
type OrderedStepList []*StepNode
// BuildGraph returns a graph or graphs that include
// all steps given.
func BuildGraph(steps []Step) StepGraph {
var allNodes []*StepNode
for _, step := range steps {
node := StepNode{Step: step, Children: []*StepNode{}}
allNodes = append(allNodes, &node)
}
var ret StepGraph
for _, node := range allNodes {
isRoot := true
for _, other := range allNodes {
for _, nodeRequires := range node.Step.Requires() {
for _, otherCreates := range other.Step.Creates() {
if nodeRequires.SatisfiedBy(otherCreates) {
isRoot = false
addToNode(other, node)
}
}
}
}
if isRoot {
ret = append(ret, node)
}
}
return ret
}
// BuildPartialGraph returns a graph or graphs that include
// only the dependencies of the named steps.
func BuildPartialGraph(steps []Step, names []string) (StepGraph, error) {
if len(names) == 0 {
return BuildGraph(steps), nil
}
var required []StepLink
candidates := make([]bool, len(steps))
var allNames []string
for i, step := range steps {
allNames = append(allNames, step.Name())
for j, name := range names {
if name != step.Name() {
continue
}
candidates[i] = true
required = append(required, step.Requires()...)
names = append(names[:j], names[j+1:]...)
break
}
}
if len(names) > 0 {
return nil, fmt.Errorf("the following names were not found in the config or were duplicates: %s (from %s)", strings.Join(names, ", "), strings.Join(allNames, ", "))
}
// identify all other steps that provide any links required by the current set
for {
added := 0
for i, step := range steps {
if candidates[i] {
continue
}
if HasAnyLinks(required, step.Creates()) {
added++
candidates[i] = true
required = append(required, step.Requires()...)
}
}
if added == 0 {
break
}
}
var targeted []Step
for i, candidate := range candidates {
if candidate {
targeted = append(targeted, steps[i])
}
}
return BuildGraph(targeted), nil
}
// TopologicalSort validates nodes form a DAG and orders them topologically.
func (g StepGraph) TopologicalSort() (OrderedStepList, []error) {
var ret OrderedStepList
var satisfied []StepLink
if err := iterateDAG(g, nil, sets.New[string](), func(*StepNode) {}); err != nil {
return nil, err
}
seen := make(map[Step]struct{})
for len(g) > 0 {
var changed bool
var waiting []*StepNode
for _, node := range g {
for _, child := range node.Children {
if _, ok := seen[child.Step]; !ok {
waiting = append(waiting, child)
}
}
if _, ok := seen[node.Step]; ok {
continue
}
if !HasAllLinks(node.Step.Requires(), satisfied) {
waiting = append(waiting, node)
continue
}
satisfied = append(satisfied, node.Step.Creates()...)
ret = append(ret, node)
seen[node.Step] = struct{}{}
changed = true
}
if !changed && len(waiting) > 0 {
errMessages := sets.Set[string]{}
for _, node := range waiting {
missing := sets.Set[string]{}
for _, link := range node.Step.Requires() {
if !HasAllLinks([]StepLink{link}, satisfied) {
if msg := link.UnsatisfiableError(); msg != "" {
missing.Insert(msg)
} else {
missing.Insert(fmt.Sprintf("<%#v>", link))
}
}
}
// De-Duplicate errors
errMessages.Insert(fmt.Sprintf("step %s is missing dependencies: %s", node.Step.Name(), strings.Join(sets.List(missing), ", ")))
}
ret := make([]error, 0, errMessages.Len()+1)
ret = append(ret, errors.New("steps are missing dependencies"))
for _, message := range sets.List(errMessages) {
ret = append(ret, errors.New(message))
}
return nil, ret
}
g = waiting
}
return ret, nil
}
// iterateDAG applies a function to every node of a DAG, detecting cycles.
func iterateDAG(graph StepGraph, path []string, inPath sets.Set[string], f func(*StepNode)) (ret []error) {
for _, node := range graph {
name := node.Step.Name()
if inPath.Has(name) {
ret = append(ret, fmt.Errorf("cycle in graph: %s -> %s", strings.Join(path, " -> "), name))
continue
}
inPath.Insert(name)
ret = append(ret, iterateDAG(node.Children, append(path, name), inPath, f)...)
inPath.Delete(name)
f(node)
}
return ret
}
// IterateAllEdges applies an operation to every node in the graph once.
func (g StepGraph) IterateAllEdges(f func(*StepNode)) {
iterateAllEdges(g, sets.New[string](), f)
}
func iterateAllEdges(nodes []*StepNode, alreadyIterated sets.Set[string], f func(*StepNode)) {
for _, node := range nodes {
if alreadyIterated.Has(node.Step.Name()) {
continue
}
iterateAllEdges(node.Children, alreadyIterated, f)
if alreadyIterated.Has(node.Step.Name()) {
continue
}
f(node)
alreadyIterated.Insert(node.Step.Name())
}
}
func addToNode(parent, child *StepNode) bool {
for _, s := range parent.Children {
if s == child {
return false
}
}
parent.Children = append(parent.Children, child)
return true
}
func HasAnyLinks(steps, candidates []StepLink) bool {
for _, candidate := range candidates {
for _, step := range steps {
if step.SatisfiedBy(candidate) {
return true
}
}
}
return false
}
func HasAllLinks(needles, haystack []StepLink) bool {
for _, needle := range needles {
contains := false
for _, hay := range haystack {
if hay.SatisfiedBy(needle) {
contains = true
}
}
if !contains {
return false
}
}
return true
}
// +k8s:deepcopy-gen=false
type CIOperatorStepGraph []CIOperatorStepDetails
// MergeFrom merges two CIOperatorStepGraphs together using StepNames as merge keys.
// The merging logic will never ovewrwrite data and only set unset fields.
// Steps that do not exist in the first graph get appended.
func (graph *CIOperatorStepGraph) MergeFrom(from ...CIOperatorStepDetails) {
for _, step := range from {
var found bool
for idx, existing := range *graph {
if step.StepName != existing.StepName {
continue
}
found = true
(*graph)[idx] = mergeSteps(existing, step)
}
if !found {
*graph = append(*graph, step)
}
}
}
func mergeSteps(into, from CIOperatorStepDetails) CIOperatorStepDetails {
if into.Description == "" {
into.Description = from.Description
}
if into.Dependencies == nil {
into.Dependencies = from.Dependencies
}
if into.StartedAt == nil {
into.StartedAt = from.StartedAt
}
if into.StartedAt == nil {
into.StartedAt = from.StartedAt
}
if into.FinishedAt == nil {
into.FinishedAt = from.FinishedAt
}
if into.Duration == nil {
into.Duration = from.Duration
}
if into.Manifests == nil {
into.Manifests = from.Manifests
}
if into.LogURL == "" {
into.LogURL = from.LogURL
}
if into.Failed == nil {
into.Failed = from.Failed
}
if into.Substeps == nil {
into.Substeps = from.Substeps
}
return into
}
// +k8s:deepcopy-gen=false
type CIOperatorStepDetails struct {
CIOperatorStepDetailInfo `json:",inline"`
Substeps []CIOperatorStepDetailInfo `json:"substeps,omitempty"`
}
// +k8s:deepcopy-gen=false
type CIOperatorStepDetailInfo struct {
StepName string `json:"name"`
Description string `json:"description"`
Dependencies []string `json:"dependencies"`
StartedAt *time.Time `json:"started_at"`
FinishedAt *time.Time `json:"finished_at"`
Duration *time.Duration `json:"duration,omitempty"`
Manifests []ctrlruntimeclient.Object `json:"manifests,omitempty"`
LogURL string `json:"log_url,omitempty"`
Failed *bool `json:"failed,omitempty"`
}
func (c *CIOperatorStepDetailInfo) UnmarshalJSON(data []byte) error {
raw := map[string]interface{}{}
if err := json.Unmarshal(data, &raw); err != nil {
return err
}
manifests := []*unstructured.Unstructured{}
if rawManifests, ok := raw["manifests"]; ok {
serializedManifests, err := json.Marshal(rawManifests)
if err != nil {
return err
}
if err := json.Unmarshal(serializedManifests, &manifests); err != nil {
return err
}
delete(raw, "manifests")
}
reserializedWithoutManifests, err := json.Marshal(raw)
if err != nil {
return err
}
type silbling CIOperatorStepDetailInfo
var unmarshalTo silbling
if err := json.Unmarshal(reserializedWithoutManifests, &unmarshalTo); err != nil {
return err
}
*c = CIOperatorStepDetailInfo(unmarshalTo)
c.Manifests = nil
for _, manifest := range manifests {
c.Manifests = append(c.Manifests, manifest)
}
return nil
}
const CIOperatorStepGraphJSONFilename = "ci-operator-step-graph.json"
// StepGraphJSONURL takes a base url like https://storage.googleapis.com/origin-ci-test/pr-logs/pull/openshift_ci-tools/999/pull-ci-openshift-ci-tools-master-validate-vendor/1283812971092381696
// and returns the full url for the step graph json document.
func StepGraphJSONURL(baseJobURL string) string {
return strings.Join([]string{baseJobURL, "artifacts", CIOperatorStepGraphJSONFilename}, "/")
}
// LinkForImage determines what dependent link is required
// for the user's image dependency
func LinkForImage(imageStream, tag string) StepLink {
switch {
case imageStream == PipelineImageStream:
// the user needs an image we're building
return InternalImageLink(PipelineImageStreamTagReference(tag))
case IsReleaseStream(imageStream):
// the user needs a tag that's a component of some release;
// we cant' rely on a specific tag, as they are implicit in
// the import process and won't be present in the build graph,
// so we wait for the whole import to succeed
return ReleaseImagesLink(ReleaseNameFrom(imageStream))
case IsReleasePayloadStream(imageStream):
// the user needs a release payload
return ReleasePayloadImageLink(tag)
default:
// we have no idea what the user's configured
return nil
}
}
| UnsatisfiableError | identifier_name |
graph.go | package api
import (
"context"
"encoding/json"
"errors"
"fmt"
"strings"
"time"
"github.com/google/go-cmp/cmp"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/util/sets"
ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client"
)
// Step is a self-contained bit of work that the
// build pipeline needs to do.
// +k8s:deepcopy-gen=false
type Step interface {
Inputs() (InputDefinition, error)
// Validate checks inputs of steps that are part of the execution graph.
Validate() error
Run(ctx context.Context) error
// Name is the name of the stage, used to target it.
// If this is the empty string the stage cannot be targeted.
Name() string
// Description is a short, human readable description of this step.
Description() string
Requires() []StepLink
Creates() []StepLink
Provides() ParameterMap
// Objects returns all objects the client for this step has seen
Objects() []ctrlruntimeclient.Object
}
type InputDefinition []string
// +k8s:deepcopy-gen=false
type ParameterMap map[string]func() (string, error)
// StepLink abstracts the types of links that steps
// require and create.
// +k8s:deepcopy-gen=false
type StepLink interface {
// SatisfiedBy determines if the other link satisfies
// the requirements of this one, either partially or
// fully. If so, the other step will be executed first.
SatisfiedBy(other StepLink) bool
// UnsatisfiableError returns a human-understandable explanation
// of where exactly in the config the requirement came from and
// what needs to be done to satisfy it. It must be checked for
// emptyness and only be used when non-empty.
UnsatisfiableError() string
}
// internalImageStreamLink describes all tags in
// an ImageStream in the test's namespace
type internalImageStreamLink struct {
name string
}
func (l *internalImageStreamLink) SatisfiedBy(other StepLink) bool {
// an ImageStream in an internal namespace may only
// be provided by a literal link for that stream
switch link := other.(type) {
case *internalImageStreamLink:
return l.name == link.name
default:
return false
}
}
func (l *internalImageStreamLink) UnsatisfiableError() string {
return ""
}
// internalImageStreamTagLink describes a specific tag in
// an ImageStream in the test's namespace
type internalImageStreamTagLink struct {
name, tag, unsatisfiableError string
}
func (l *internalImageStreamTagLink) SatisfiedBy(other StepLink) bool {
// an ImageStreamTag in an internal namespace may
// either be provided by a literal link for that tag
// or by a link that provides the full stream
switch link := other.(type) {
case *internalImageStreamTagLink:
return l.name == link.name && l.tag == link.tag
case *internalImageStreamLink:
return l.name == link.name
default:
return false
}
}
func (l *internalImageStreamTagLink) UnsatisfiableError() string {
return l.unsatisfiableError
}
func AllStepsLink() StepLink {
return allStepsLink{}
}
type allStepsLink struct{}
func (_ allStepsLink) SatisfiedBy(_ StepLink) bool {
return true
}
func (_ allStepsLink) UnsatisfiableError() string {
return ""
}
func ExternalImageLink(ref ImageStreamTagReference) StepLink {
return &externalImageLink{
namespace: ref.Namespace,
name: ref.Name,
tag: ref.Tag,
}
}
type externalImageLink struct {
namespace, name, tag string
}
func (l *externalImageLink) SatisfiedBy(other StepLink) bool {
switch link := other.(type) {
case *externalImageLink:
return l.name == link.name &&
l.namespace == link.namespace &&
l.tag == link.tag
default:
return false
}
}
func (l *externalImageLink) UnsatisfiableError() string {
return ""
}
type StepLinkOptions struct {
// UnsatisfiableError holds a human-understandable explanation
// of where exactly in the config the requirement came from and
// what needs to be done to satisfy it.
UnsatisfiableError string
}
// +k8s:deepcopy-gen=false
type StepLinkOption func(*StepLinkOptions)
func StepLinkWithUnsatisfiableErrorMessage(msg string) StepLinkOption {
return func(slo *StepLinkOptions) {
slo.UnsatisfiableError = msg
}
}
// InternalImageLink describes a dependency on a tag in the pipeline stream
func InternalImageLink(tag PipelineImageStreamTagReference, o ...StepLinkOption) StepLink {
opts := StepLinkOptions{}
for _, o := range o {
o(&opts)
}
return &internalImageStreamTagLink{
name: PipelineImageStream,
tag: string(tag),
unsatisfiableError: opts.UnsatisfiableError,
}
}
func ReleasePayloadImageLink(tag string) StepLink {
return &internalImageStreamTagLink{
name: ReleaseImageStream,
tag: tag,
}
}
func ImagesReadyLink() StepLink {
return &imagesReadyLink{}
}
type imagesReadyLink struct{}
func (l *imagesReadyLink) SatisfiedBy(other StepLink) bool {
switch other.(type) {
case *imagesReadyLink:
return true
default:
return false
}
}
func (l *imagesReadyLink) UnsatisfiableError() string {
return ""
}
func RPMRepoLink() StepLink {
return &rpmRepoLink{}
}
type rpmRepoLink struct{}
func (l *rpmRepoLink) SatisfiedBy(other StepLink) bool {
switch other.(type) {
case *rpmRepoLink:
return true
default:
return false
}
}
func (l *rpmRepoLink) UnsatisfiableError() string {
return ""
}
// ReleaseImagesLink describes the content of a stable(-foo)?
// ImageStream in the test namespace.
func ReleaseImagesLink(name string) StepLink {
return &internalImageStreamLink{
name: ReleaseStreamFor(name),
}
}
// ReleaseImageTagLink describes a specific tag in a stable(-foo)?
// ImageStream in the test namespace.
func ReleaseImageTagLink(name, tag string) StepLink {
return &internalImageStreamTagLink{
name: ReleaseStreamFor(name),
tag: tag,
}
}
func Comparer() cmp.Option {
return cmp.AllowUnexported(
internalImageStreamLink{},
internalImageStreamTagLink{},
externalImageLink{},
)
}
// ReleaseStreamFor determines the ImageStream into which a named
// release will be imported or assembled.
func ReleaseStreamFor(name string) string {
if name == LatestReleaseName {
return StableImageStream
}
return fmt.Sprintf("%s-%s", StableImageStream, name)
}
// ReleaseNameFrom determines the named release that was imported
// or assembled into an ImageStream.
func ReleaseNameFrom(stream string) string {
if stream == StableImageStream {
return LatestReleaseName
}
return strings.TrimPrefix(stream, fmt.Sprintf("%s-", StableImageStream))
}
// IsReleaseStream determines if the ImageStream was created from
// an import or assembly of a release.
func IsReleaseStream(stream string) bool {
return strings.HasPrefix(stream, StableImageStream)
}
// IsReleasePayloadStream determines if the ImageStream holds
// release payload images.
func IsReleasePayloadStream(stream string) bool {
return stream == ReleaseImageStream
}
// +k8s:deepcopy-gen=false
type StepNode struct {
Step Step
Children []*StepNode
}
// GraphConfiguration contains step data used to build the execution graph.
type GraphConfiguration struct {
// Steps accumulates step configuration as the configuration is parsed.
Steps []StepConfiguration
}
func (c *GraphConfiguration) InputImages() (ret []*InputImageTagStepConfiguration) {
for _, s := range c.Steps {
if c := s.InputImageTagStepConfiguration; c != nil {
ret = append(ret, c)
}
}
return
}
// +k8s:deepcopy-gen=false
// StepGraph is a DAG of steps referenced by its roots
type StepGraph []*StepNode
// +k8s:deepcopy-gen=false
// OrderedStepList is a topologically-ordered sequence of steps
// Edges are determined based on the Creates/Requires methods.
type OrderedStepList []*StepNode
// BuildGraph returns a graph or graphs that include
// all steps given.
func BuildGraph(steps []Step) StepGraph {
var allNodes []*StepNode
for _, step := range steps {
node := StepNode{Step: step, Children: []*StepNode{}}
allNodes = append(allNodes, &node)
}
var ret StepGraph
for _, node := range allNodes {
isRoot := true
for _, other := range allNodes {
for _, nodeRequires := range node.Step.Requires() {
for _, otherCreates := range other.Step.Creates() {
if nodeRequires.SatisfiedBy(otherCreates) {
isRoot = false
addToNode(other, node)
}
}
}
}
if isRoot {
ret = append(ret, node)
}
}
return ret
}
// BuildPartialGraph returns a graph or graphs that include
// only the dependencies of the named steps.
func BuildPartialGraph(steps []Step, names []string) (StepGraph, error) {
if len(names) == 0 {
return BuildGraph(steps), nil
}
var required []StepLink
candidates := make([]bool, len(steps))
var allNames []string
for i, step := range steps {
allNames = append(allNames, step.Name())
for j, name := range names {
if name != step.Name() {
continue
}
candidates[i] = true
required = append(required, step.Requires()...)
names = append(names[:j], names[j+1:]...)
break
}
}
if len(names) > 0 {
return nil, fmt.Errorf("the following names were not found in the config or were duplicates: %s (from %s)", strings.Join(names, ", "), strings.Join(allNames, ", "))
}
// identify all other steps that provide any links required by the current set
for {
added := 0
for i, step := range steps {
if candidates[i] {
continue
}
if HasAnyLinks(required, step.Creates()) {
added++
candidates[i] = true
required = append(required, step.Requires()...)
}
}
if added == 0 {
break
}
}
var targeted []Step
for i, candidate := range candidates {
if candidate {
targeted = append(targeted, steps[i])
}
}
return BuildGraph(targeted), nil
}
// TopologicalSort validates nodes form a DAG and orders them topologically.
func (g StepGraph) TopologicalSort() (OrderedStepList, []error) {
var ret OrderedStepList
var satisfied []StepLink
if err := iterateDAG(g, nil, sets.New[string](), func(*StepNode) {}); err != nil {
return nil, err
}
seen := make(map[Step]struct{})
for len(g) > 0 {
var changed bool
var waiting []*StepNode
for _, node := range g {
for _, child := range node.Children {
if _, ok := seen[child.Step]; !ok {
waiting = append(waiting, child)
}
}
if _, ok := seen[node.Step]; ok {
continue
}
if !HasAllLinks(node.Step.Requires(), satisfied) {
waiting = append(waiting, node)
continue
}
satisfied = append(satisfied, node.Step.Creates()...)
ret = append(ret, node)
seen[node.Step] = struct{}{}
changed = true
}
if !changed && len(waiting) > 0 {
errMessages := sets.Set[string]{}
for _, node := range waiting {
missing := sets.Set[string]{}
for _, link := range node.Step.Requires() {
if !HasAllLinks([]StepLink{link}, satisfied) {
if msg := link.UnsatisfiableError(); msg != "" {
missing.Insert(msg)
} else {
missing.Insert(fmt.Sprintf("<%#v>", link))
}
}
}
// De-Duplicate errors
errMessages.Insert(fmt.Sprintf("step %s is missing dependencies: %s", node.Step.Name(), strings.Join(sets.List(missing), ", ")))
}
ret := make([]error, 0, errMessages.Len()+1)
ret = append(ret, errors.New("steps are missing dependencies"))
for _, message := range sets.List(errMessages) {
ret = append(ret, errors.New(message))
}
return nil, ret
}
g = waiting
}
return ret, nil
}
// iterateDAG applies a function to every node of a DAG, detecting cycles.
func iterateDAG(graph StepGraph, path []string, inPath sets.Set[string], f func(*StepNode)) (ret []error) {
for _, node := range graph {
name := node.Step.Name()
if inPath.Has(name) {
ret = append(ret, fmt.Errorf("cycle in graph: %s -> %s", strings.Join(path, " -> "), name))
continue
}
inPath.Insert(name)
ret = append(ret, iterateDAG(node.Children, append(path, name), inPath, f)...)
inPath.Delete(name)
f(node)
}
return ret
}
// IterateAllEdges applies an operation to every node in the graph once.
func (g StepGraph) IterateAllEdges(f func(*StepNode)) {
iterateAllEdges(g, sets.New[string](), f)
}
func iterateAllEdges(nodes []*StepNode, alreadyIterated sets.Set[string], f func(*StepNode)) {
for _, node := range nodes {
if alreadyIterated.Has(node.Step.Name()) {
continue
}
iterateAllEdges(node.Children, alreadyIterated, f)
if alreadyIterated.Has(node.Step.Name()) {
continue
}
f(node)
alreadyIterated.Insert(node.Step.Name())
}
}
func addToNode(parent, child *StepNode) bool {
for _, s := range parent.Children {
if s == child {
return false
}
}
parent.Children = append(parent.Children, child)
return true
}
func HasAnyLinks(steps, candidates []StepLink) bool {
for _, candidate := range candidates {
for _, step := range steps {
if step.SatisfiedBy(candidate) {
return true
}
}
}
return false
}
func HasAllLinks(needles, haystack []StepLink) bool {
for _, needle := range needles {
contains := false
for _, hay := range haystack {
if hay.SatisfiedBy(needle) {
contains = true
}
}
if !contains {
return false
}
}
return true
}
// +k8s:deepcopy-gen=false
type CIOperatorStepGraph []CIOperatorStepDetails
// MergeFrom merges two CIOperatorStepGraphs together using StepNames as merge keys.
// The merging logic will never ovewrwrite data and only set unset fields.
// Steps that do not exist in the first graph get appended.
func (graph *CIOperatorStepGraph) MergeFrom(from ...CIOperatorStepDetails) {
for _, step := range from {
var found bool
for idx, existing := range *graph {
if step.StepName != existing.StepName {
continue
}
found = true
(*graph)[idx] = mergeSteps(existing, step)
}
if !found {
*graph = append(*graph, step)
}
}
}
func mergeSteps(into, from CIOperatorStepDetails) CIOperatorStepDetails |
// +k8s:deepcopy-gen=false
type CIOperatorStepDetails struct {
CIOperatorStepDetailInfo `json:",inline"`
Substeps []CIOperatorStepDetailInfo `json:"substeps,omitempty"`
}
// +k8s:deepcopy-gen=false
type CIOperatorStepDetailInfo struct {
StepName string `json:"name"`
Description string `json:"description"`
Dependencies []string `json:"dependencies"`
StartedAt *time.Time `json:"started_at"`
FinishedAt *time.Time `json:"finished_at"`
Duration *time.Duration `json:"duration,omitempty"`
Manifests []ctrlruntimeclient.Object `json:"manifests,omitempty"`
LogURL string `json:"log_url,omitempty"`
Failed *bool `json:"failed,omitempty"`
}
func (c *CIOperatorStepDetailInfo) UnmarshalJSON(data []byte) error {
raw := map[string]interface{}{}
if err := json.Unmarshal(data, &raw); err != nil {
return err
}
manifests := []*unstructured.Unstructured{}
if rawManifests, ok := raw["manifests"]; ok {
serializedManifests, err := json.Marshal(rawManifests)
if err != nil {
return err
}
if err := json.Unmarshal(serializedManifests, &manifests); err != nil {
return err
}
delete(raw, "manifests")
}
reserializedWithoutManifests, err := json.Marshal(raw)
if err != nil {
return err
}
type silbling CIOperatorStepDetailInfo
var unmarshalTo silbling
if err := json.Unmarshal(reserializedWithoutManifests, &unmarshalTo); err != nil {
return err
}
*c = CIOperatorStepDetailInfo(unmarshalTo)
c.Manifests = nil
for _, manifest := range manifests {
c.Manifests = append(c.Manifests, manifest)
}
return nil
}
const CIOperatorStepGraphJSONFilename = "ci-operator-step-graph.json"
// StepGraphJSONURL takes a base url like https://storage.googleapis.com/origin-ci-test/pr-logs/pull/openshift_ci-tools/999/pull-ci-openshift-ci-tools-master-validate-vendor/1283812971092381696
// and returns the full url for the step graph json document.
func StepGraphJSONURL(baseJobURL string) string {
return strings.Join([]string{baseJobURL, "artifacts", CIOperatorStepGraphJSONFilename}, "/")
}
// LinkForImage determines what dependent link is required
// for the user's image dependency
func LinkForImage(imageStream, tag string) StepLink {
switch {
case imageStream == PipelineImageStream:
// the user needs an image we're building
return InternalImageLink(PipelineImageStreamTagReference(tag))
case IsReleaseStream(imageStream):
// the user needs a tag that's a component of some release;
// we cant' rely on a specific tag, as they are implicit in
// the import process and won't be present in the build graph,
// so we wait for the whole import to succeed
return ReleaseImagesLink(ReleaseNameFrom(imageStream))
case IsReleasePayloadStream(imageStream):
// the user needs a release payload
return ReleasePayloadImageLink(tag)
default:
// we have no idea what the user's configured
return nil
}
}
| {
if into.Description == "" {
into.Description = from.Description
}
if into.Dependencies == nil {
into.Dependencies = from.Dependencies
}
if into.StartedAt == nil {
into.StartedAt = from.StartedAt
}
if into.StartedAt == nil {
into.StartedAt = from.StartedAt
}
if into.FinishedAt == nil {
into.FinishedAt = from.FinishedAt
}
if into.Duration == nil {
into.Duration = from.Duration
}
if into.Manifests == nil {
into.Manifests = from.Manifests
}
if into.LogURL == "" {
into.LogURL = from.LogURL
}
if into.Failed == nil {
into.Failed = from.Failed
}
if into.Substeps == nil {
into.Substeps = from.Substeps
}
return into
} | identifier_body |
ppapi_generator.py | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import collections
import datetime
import os.path
import sys
import code
import cpp_util
import model
try:
import jinja2
except ImportError:
sys.path.append(os.path.join(os.path.dirname(__file__), '..', '..',
'third_party'))
import jinja2
class _PpapiGeneratorBase(object):
"""A base class for ppapi generators.
Implementations should set TEMPLATE_NAME to a string containing the name of
the template file without its extension. The template will be rendered with
the following symbols available:
name: A string containing the name of the namespace.
enums: A list of enums within the namespace.
types: A list of types within the namespace, sorted such that no element
depends on an earlier element.
events: A dict of events within the namespace.
functions: A dict of functions within the namespace.
year: An int containing the current year.
source_file: The name of the input file.
"""
def __init__(self, namespace):
self._namespace = namespace
self._required_types = {}
self._array_types = set()
self._optional_types = set()
self._optional_array_types = set()
self._dependencies = collections.OrderedDict()
self._types = []
self._enums = []
self.jinja_environment = jinja2.Environment(
loader=jinja2.FileSystemLoader(os.path.join(os.path.dirname(__file__),
'templates', 'ppapi')))
self._SetupFilters()
self._ResolveTypeDependencies()
def _SetupFilters(self):
self.jinja_environment.filters.update({
'ppapi_type': self.ToPpapiType,
'classname': cpp_util.Classname,
'enum_value': self.EnumValueName,
'return_type': self.GetFunctionReturnType,
'format_param_type': self.FormatParamType,
'needs_optional': self.NeedsOptional,
'needs_array': self.NeedsArray,
'needs_optional_array': self.NeedsOptionalArray,
'has_array_outs': self.HasArrayOuts,
})
def Render(self, template_name, values):
generated_code = code.Code()
template = self.jinja_environment.get_template(
'%s.template' % template_name) | def Generate(self):
"""Generates a Code object for a single namespace."""
return self.Render(self.TEMPLATE_NAME, {
'name': self._namespace.name,
'enums': self._enums,
'types': self._types,
'events': self._namespace.events,
'functions': self._namespace.functions,
# TODO(sammc): Don't change years when regenerating existing output files.
'year': datetime.date.today().year,
'source_file': self._namespace.source_file,
})
def _ResolveTypeDependencies(self):
"""Calculates the transitive closure of the types in _required_types.
Returns a tuple containing the list of struct types and the list of enum
types. The list of struct types is ordered such that no type depends on a
type later in the list.
"""
if self._namespace.functions:
for function in self._namespace.functions.itervalues():
self._FindFunctionDependencies(function)
if self._namespace.events:
for event in self._namespace.events.itervalues():
self._FindFunctionDependencies(event)
resolved_types = set()
while resolved_types < set(self._required_types):
for typename in sorted(set(self._required_types) - resolved_types):
type_ = self._required_types[typename]
self._dependencies.setdefault(typename, set())
for member in type_.properties.itervalues():
self._RegisterDependency(member, self._NameComponents(type_))
resolved_types.add(typename)
while self._dependencies:
for name, deps in self._dependencies.items():
if not deps:
if (self._required_types[name].property_type ==
model.PropertyType.ENUM):
self._enums.append(self._required_types[name])
else:
self._types.append(self._required_types[name])
for deps in self._dependencies.itervalues():
deps.discard(name)
del self._dependencies[name]
break
else:
raise ValueError('Circular dependency %s' % self._dependencies)
def _FindFunctionDependencies(self, function):
for param in function.params:
self._RegisterDependency(param, None)
if function.callback:
for param in function.callback.params:
self._RegisterDependency(param, None)
if function.returns:
self._RegisterTypeDependency(function.returns, None, False, False)
def _RegisterDependency(self, member, depender):
self._RegisterTypeDependency(member.type_, depender, member.optional, False)
def _RegisterTypeDependency(self, type_, depender, optional, array):
if type_.property_type == model.PropertyType.ARRAY:
self._RegisterTypeDependency(type_.item_type, depender, optional, True)
elif type_.property_type == model.PropertyType.REF:
self._RegisterTypeDependency(self._namespace.types[type_.ref_type],
depender, optional, array)
elif type_.property_type in (model.PropertyType.OBJECT,
model.PropertyType.ENUM):
name_components = self._NameComponents(type_)
self._required_types[name_components] = type_
if depender:
self._dependencies.setdefault(depender, set()).add(
name_components)
if array:
self._array_types.add(name_components)
if optional:
self._optional_array_types.add(name_components)
elif optional:
self._optional_types.add(name_components)
@staticmethod
def _NameComponents(entity):
"""Returns a tuple of the fully-qualified name of an entity."""
names = []
while entity:
if (not isinstance(entity, model.Type) or
entity.property_type != model.PropertyType.ARRAY):
names.append(entity.name)
entity = entity.parent
return tuple(reversed(names[:-1]))
def ToPpapiType(self, type_, array=False, optional=False):
"""Returns a string containing the name of the Pepper C type for |type_|.
If array is True, returns the name of an array of |type_|. If optional is
True, returns the name of an optional |type_|. If both array and optional
are True, returns the name of an optional array of |type_|.
"""
if isinstance(type_, model.Function) or type_.property_type in (
model.PropertyType.OBJECT, model.PropertyType.ENUM):
return self._FormatPpapiTypeName(
array, optional, '_'.join(
cpp_util.Classname(s) for s in self._NameComponents(type_)),
namespace=cpp_util.Classname(self._namespace.name))
elif type_.property_type == model.PropertyType.REF:
return self.ToPpapiType(self._namespace.types[type_.ref_type],
optional=optional, array=array)
elif type_.property_type == model.PropertyType.ARRAY:
return self.ToPpapiType(type_.item_type, array=True,
optional=optional)
elif type_.property_type == model.PropertyType.STRING and not array:
return 'PP_Var'
elif array or optional:
if type_.property_type in self._PPAPI_COMPOUND_PRIMITIVE_TYPE_MAP:
return self._FormatPpapiTypeName(
array, optional,
self._PPAPI_COMPOUND_PRIMITIVE_TYPE_MAP[type_.property_type], '')
return self._PPAPI_PRIMITIVE_TYPE_MAP.get(type_.property_type, 'PP_Var')
_PPAPI_PRIMITIVE_TYPE_MAP = {
model.PropertyType.BOOLEAN: 'PP_Bool',
model.PropertyType.DOUBLE: 'double_t',
model.PropertyType.INT64: 'int64_t',
model.PropertyType.INTEGER: 'int32_t',
}
_PPAPI_COMPOUND_PRIMITIVE_TYPE_MAP = {
model.PropertyType.BOOLEAN: 'Bool',
model.PropertyType.DOUBLE: 'Double',
model.PropertyType.INT64: 'Int64',
model.PropertyType.INTEGER: 'Int32',
model.PropertyType.STRING: 'String',
}
@staticmethod
def _FormatPpapiTypeName(array, optional, name, namespace=''):
if namespace:
namespace = '%s_' % namespace
if array:
if optional:
return 'PP_%sOptional_%s_Array' % (namespace, name)
return 'PP_%s%s_Array' % (namespace, name)
if optional:
return 'PP_%sOptional_%s' % (namespace, name)
return 'PP_%s%s' % (namespace, name)
def NeedsOptional(self, type_):
"""Returns True if an optional |type_| is required."""
return self._NameComponents(type_) in self._optional_types
def NeedsArray(self, type_):
"""Returns True if an array of |type_| is required."""
return self._NameComponents(type_) in self._array_types
def NeedsOptionalArray(self, type_):
"""Returns True if an optional array of |type_| is required."""
return self._NameComponents(type_) in self._optional_array_types
def FormatParamType(self, param):
"""Formats the type of a parameter or property."""
return self.ToPpapiType(param.type_, optional=param.optional)
@staticmethod
def GetFunctionReturnType(function):
return 'int32_t' if function.callback or function.returns else 'void'
def EnumValueName(self, enum_value, enum_type):
"""Returns a string containing the name for an enum value."""
return '%s_%s' % (self.ToPpapiType(enum_type).upper(),
enum_value.name.upper())
def _ResolveType(self, type_):
if type_.property_type == model.PropertyType.REF:
return self._ResolveType(self._namespace.types[type_.ref_type])
if type_.property_type == model.PropertyType.ARRAY:
return self._ResolveType(type_.item_type)
return type_
def _IsOrContainsArray(self, type_):
if type_.property_type == model.PropertyType.ARRAY:
return True
type_ = self._ResolveType(type_)
if type_.property_type == model.PropertyType.OBJECT:
return any(self._IsOrContainsArray(param.type_)
for param in type_.properties.itervalues())
return False
def HasArrayOuts(self, function):
"""Returns True if the function produces any arrays as outputs.
This includes arrays that are properties of other objects.
"""
if function.callback:
for param in function.callback.params:
if self._IsOrContainsArray(param.type_):
return True
return function.returns and self._IsOrContainsArray(function.returns)
class _IdlGenerator(_PpapiGeneratorBase):
TEMPLATE_NAME = 'idl'
class _GeneratorWrapper(object):
def __init__(self, generator_factory):
self._generator_factory = generator_factory
def Generate(self, namespace):
return self._generator_factory(namespace).Generate()
class PpapiGenerator(object):
def __init__(self):
self.idl_generator = _GeneratorWrapper(_IdlGenerator) | generated_code.Append(template.render(values))
return generated_code
| random_line_split |
ppapi_generator.py | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import collections
import datetime
import os.path
import sys
import code
import cpp_util
import model
try:
import jinja2
except ImportError:
sys.path.append(os.path.join(os.path.dirname(__file__), '..', '..',
'third_party'))
import jinja2
class _PpapiGeneratorBase(object):
"""A base class for ppapi generators.
Implementations should set TEMPLATE_NAME to a string containing the name of
the template file without its extension. The template will be rendered with
the following symbols available:
name: A string containing the name of the namespace.
enums: A list of enums within the namespace.
types: A list of types within the namespace, sorted such that no element
depends on an earlier element.
events: A dict of events within the namespace.
functions: A dict of functions within the namespace.
year: An int containing the current year.
source_file: The name of the input file.
"""
def __init__(self, namespace):
self._namespace = namespace
self._required_types = {}
self._array_types = set()
self._optional_types = set()
self._optional_array_types = set()
self._dependencies = collections.OrderedDict()
self._types = []
self._enums = []
self.jinja_environment = jinja2.Environment(
loader=jinja2.FileSystemLoader(os.path.join(os.path.dirname(__file__),
'templates', 'ppapi')))
self._SetupFilters()
self._ResolveTypeDependencies()
def _SetupFilters(self):
self.jinja_environment.filters.update({
'ppapi_type': self.ToPpapiType,
'classname': cpp_util.Classname,
'enum_value': self.EnumValueName,
'return_type': self.GetFunctionReturnType,
'format_param_type': self.FormatParamType,
'needs_optional': self.NeedsOptional,
'needs_array': self.NeedsArray,
'needs_optional_array': self.NeedsOptionalArray,
'has_array_outs': self.HasArrayOuts,
})
def Render(self, template_name, values):
generated_code = code.Code()
template = self.jinja_environment.get_template(
'%s.template' % template_name)
generated_code.Append(template.render(values))
return generated_code
def Generate(self):
"""Generates a Code object for a single namespace."""
return self.Render(self.TEMPLATE_NAME, {
'name': self._namespace.name,
'enums': self._enums,
'types': self._types,
'events': self._namespace.events,
'functions': self._namespace.functions,
# TODO(sammc): Don't change years when regenerating existing output files.
'year': datetime.date.today().year,
'source_file': self._namespace.source_file,
})
def _ResolveTypeDependencies(self):
"""Calculates the transitive closure of the types in _required_types.
Returns a tuple containing the list of struct types and the list of enum
types. The list of struct types is ordered such that no type depends on a
type later in the list.
"""
if self._namespace.functions:
for function in self._namespace.functions.itervalues():
self._FindFunctionDependencies(function)
if self._namespace.events:
for event in self._namespace.events.itervalues():
self._FindFunctionDependencies(event)
resolved_types = set()
while resolved_types < set(self._required_types):
|
while self._dependencies:
for name, deps in self._dependencies.items():
if not deps:
if (self._required_types[name].property_type ==
model.PropertyType.ENUM):
self._enums.append(self._required_types[name])
else:
self._types.append(self._required_types[name])
for deps in self._dependencies.itervalues():
deps.discard(name)
del self._dependencies[name]
break
else:
raise ValueError('Circular dependency %s' % self._dependencies)
def _FindFunctionDependencies(self, function):
for param in function.params:
self._RegisterDependency(param, None)
if function.callback:
for param in function.callback.params:
self._RegisterDependency(param, None)
if function.returns:
self._RegisterTypeDependency(function.returns, None, False, False)
def _RegisterDependency(self, member, depender):
self._RegisterTypeDependency(member.type_, depender, member.optional, False)
def _RegisterTypeDependency(self, type_, depender, optional, array):
if type_.property_type == model.PropertyType.ARRAY:
self._RegisterTypeDependency(type_.item_type, depender, optional, True)
elif type_.property_type == model.PropertyType.REF:
self._RegisterTypeDependency(self._namespace.types[type_.ref_type],
depender, optional, array)
elif type_.property_type in (model.PropertyType.OBJECT,
model.PropertyType.ENUM):
name_components = self._NameComponents(type_)
self._required_types[name_components] = type_
if depender:
self._dependencies.setdefault(depender, set()).add(
name_components)
if array:
self._array_types.add(name_components)
if optional:
self._optional_array_types.add(name_components)
elif optional:
self._optional_types.add(name_components)
@staticmethod
def _NameComponents(entity):
"""Returns a tuple of the fully-qualified name of an entity."""
names = []
while entity:
if (not isinstance(entity, model.Type) or
entity.property_type != model.PropertyType.ARRAY):
names.append(entity.name)
entity = entity.parent
return tuple(reversed(names[:-1]))
def ToPpapiType(self, type_, array=False, optional=False):
"""Returns a string containing the name of the Pepper C type for |type_|.
If array is True, returns the name of an array of |type_|. If optional is
True, returns the name of an optional |type_|. If both array and optional
are True, returns the name of an optional array of |type_|.
"""
if isinstance(type_, model.Function) or type_.property_type in (
model.PropertyType.OBJECT, model.PropertyType.ENUM):
return self._FormatPpapiTypeName(
array, optional, '_'.join(
cpp_util.Classname(s) for s in self._NameComponents(type_)),
namespace=cpp_util.Classname(self._namespace.name))
elif type_.property_type == model.PropertyType.REF:
return self.ToPpapiType(self._namespace.types[type_.ref_type],
optional=optional, array=array)
elif type_.property_type == model.PropertyType.ARRAY:
return self.ToPpapiType(type_.item_type, array=True,
optional=optional)
elif type_.property_type == model.PropertyType.STRING and not array:
return 'PP_Var'
elif array or optional:
if type_.property_type in self._PPAPI_COMPOUND_PRIMITIVE_TYPE_MAP:
return self._FormatPpapiTypeName(
array, optional,
self._PPAPI_COMPOUND_PRIMITIVE_TYPE_MAP[type_.property_type], '')
return self._PPAPI_PRIMITIVE_TYPE_MAP.get(type_.property_type, 'PP_Var')
_PPAPI_PRIMITIVE_TYPE_MAP = {
model.PropertyType.BOOLEAN: 'PP_Bool',
model.PropertyType.DOUBLE: 'double_t',
model.PropertyType.INT64: 'int64_t',
model.PropertyType.INTEGER: 'int32_t',
}
_PPAPI_COMPOUND_PRIMITIVE_TYPE_MAP = {
model.PropertyType.BOOLEAN: 'Bool',
model.PropertyType.DOUBLE: 'Double',
model.PropertyType.INT64: 'Int64',
model.PropertyType.INTEGER: 'Int32',
model.PropertyType.STRING: 'String',
}
@staticmethod
def _FormatPpapiTypeName(array, optional, name, namespace=''):
if namespace:
namespace = '%s_' % namespace
if array:
if optional:
return 'PP_%sOptional_%s_Array' % (namespace, name)
return 'PP_%s%s_Array' % (namespace, name)
if optional:
return 'PP_%sOptional_%s' % (namespace, name)
return 'PP_%s%s' % (namespace, name)
def NeedsOptional(self, type_):
"""Returns True if an optional |type_| is required."""
return self._NameComponents(type_) in self._optional_types
def NeedsArray(self, type_):
"""Returns True if an array of |type_| is required."""
return self._NameComponents(type_) in self._array_types
def NeedsOptionalArray(self, type_):
"""Returns True if an optional array of |type_| is required."""
return self._NameComponents(type_) in self._optional_array_types
def FormatParamType(self, param):
"""Formats the type of a parameter or property."""
return self.ToPpapiType(param.type_, optional=param.optional)
@staticmethod
def GetFunctionReturnType(function):
return 'int32_t' if function.callback or function.returns else 'void'
def EnumValueName(self, enum_value, enum_type):
"""Returns a string containing the name for an enum value."""
return '%s_%s' % (self.ToPpapiType(enum_type).upper(),
enum_value.name.upper())
def _ResolveType(self, type_):
if type_.property_type == model.PropertyType.REF:
return self._ResolveType(self._namespace.types[type_.ref_type])
if type_.property_type == model.PropertyType.ARRAY:
return self._ResolveType(type_.item_type)
return type_
def _IsOrContainsArray(self, type_):
if type_.property_type == model.PropertyType.ARRAY:
return True
type_ = self._ResolveType(type_)
if type_.property_type == model.PropertyType.OBJECT:
return any(self._IsOrContainsArray(param.type_)
for param in type_.properties.itervalues())
return False
def HasArrayOuts(self, function):
"""Returns True if the function produces any arrays as outputs.
This includes arrays that are properties of other objects.
"""
if function.callback:
for param in function.callback.params:
if self._IsOrContainsArray(param.type_):
return True
return function.returns and self._IsOrContainsArray(function.returns)
class _IdlGenerator(_PpapiGeneratorBase):
TEMPLATE_NAME = 'idl'
class _GeneratorWrapper(object):
def __init__(self, generator_factory):
self._generator_factory = generator_factory
def Generate(self, namespace):
return self._generator_factory(namespace).Generate()
class PpapiGenerator(object):
def __init__(self):
self.idl_generator = _GeneratorWrapper(_IdlGenerator)
| for typename in sorted(set(self._required_types) - resolved_types):
type_ = self._required_types[typename]
self._dependencies.setdefault(typename, set())
for member in type_.properties.itervalues():
self._RegisterDependency(member, self._NameComponents(type_))
resolved_types.add(typename) | conditional_block |
ppapi_generator.py | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import collections
import datetime
import os.path
import sys
import code
import cpp_util
import model
try:
import jinja2
except ImportError:
sys.path.append(os.path.join(os.path.dirname(__file__), '..', '..',
'third_party'))
import jinja2
class _PpapiGeneratorBase(object):
"""A base class for ppapi generators.
Implementations should set TEMPLATE_NAME to a string containing the name of
the template file without its extension. The template will be rendered with
the following symbols available:
name: A string containing the name of the namespace.
enums: A list of enums within the namespace.
types: A list of types within the namespace, sorted such that no element
depends on an earlier element.
events: A dict of events within the namespace.
functions: A dict of functions within the namespace.
year: An int containing the current year.
source_file: The name of the input file.
"""
def __init__(self, namespace):
self._namespace = namespace
self._required_types = {}
self._array_types = set()
self._optional_types = set()
self._optional_array_types = set()
self._dependencies = collections.OrderedDict()
self._types = []
self._enums = []
self.jinja_environment = jinja2.Environment(
loader=jinja2.FileSystemLoader(os.path.join(os.path.dirname(__file__),
'templates', 'ppapi')))
self._SetupFilters()
self._ResolveTypeDependencies()
def _SetupFilters(self):
self.jinja_environment.filters.update({
'ppapi_type': self.ToPpapiType,
'classname': cpp_util.Classname,
'enum_value': self.EnumValueName,
'return_type': self.GetFunctionReturnType,
'format_param_type': self.FormatParamType,
'needs_optional': self.NeedsOptional,
'needs_array': self.NeedsArray,
'needs_optional_array': self.NeedsOptionalArray,
'has_array_outs': self.HasArrayOuts,
})
def Render(self, template_name, values):
generated_code = code.Code()
template = self.jinja_environment.get_template(
'%s.template' % template_name)
generated_code.Append(template.render(values))
return generated_code
def Generate(self):
"""Generates a Code object for a single namespace."""
return self.Render(self.TEMPLATE_NAME, {
'name': self._namespace.name,
'enums': self._enums,
'types': self._types,
'events': self._namespace.events,
'functions': self._namespace.functions,
# TODO(sammc): Don't change years when regenerating existing output files.
'year': datetime.date.today().year,
'source_file': self._namespace.source_file,
})
def _ResolveTypeDependencies(self):
"""Calculates the transitive closure of the types in _required_types.
Returns a tuple containing the list of struct types and the list of enum
types. The list of struct types is ordered such that no type depends on a
type later in the list.
"""
if self._namespace.functions:
for function in self._namespace.functions.itervalues():
self._FindFunctionDependencies(function)
if self._namespace.events:
for event in self._namespace.events.itervalues():
self._FindFunctionDependencies(event)
resolved_types = set()
while resolved_types < set(self._required_types):
for typename in sorted(set(self._required_types) - resolved_types):
type_ = self._required_types[typename]
self._dependencies.setdefault(typename, set())
for member in type_.properties.itervalues():
self._RegisterDependency(member, self._NameComponents(type_))
resolved_types.add(typename)
while self._dependencies:
for name, deps in self._dependencies.items():
if not deps:
if (self._required_types[name].property_type ==
model.PropertyType.ENUM):
self._enums.append(self._required_types[name])
else:
self._types.append(self._required_types[name])
for deps in self._dependencies.itervalues():
deps.discard(name)
del self._dependencies[name]
break
else:
raise ValueError('Circular dependency %s' % self._dependencies)
def _FindFunctionDependencies(self, function):
for param in function.params:
self._RegisterDependency(param, None)
if function.callback:
for param in function.callback.params:
self._RegisterDependency(param, None)
if function.returns:
self._RegisterTypeDependency(function.returns, None, False, False)
def | (self, member, depender):
self._RegisterTypeDependency(member.type_, depender, member.optional, False)
def _RegisterTypeDependency(self, type_, depender, optional, array):
if type_.property_type == model.PropertyType.ARRAY:
self._RegisterTypeDependency(type_.item_type, depender, optional, True)
elif type_.property_type == model.PropertyType.REF:
self._RegisterTypeDependency(self._namespace.types[type_.ref_type],
depender, optional, array)
elif type_.property_type in (model.PropertyType.OBJECT,
model.PropertyType.ENUM):
name_components = self._NameComponents(type_)
self._required_types[name_components] = type_
if depender:
self._dependencies.setdefault(depender, set()).add(
name_components)
if array:
self._array_types.add(name_components)
if optional:
self._optional_array_types.add(name_components)
elif optional:
self._optional_types.add(name_components)
@staticmethod
def _NameComponents(entity):
"""Returns a tuple of the fully-qualified name of an entity."""
names = []
while entity:
if (not isinstance(entity, model.Type) or
entity.property_type != model.PropertyType.ARRAY):
names.append(entity.name)
entity = entity.parent
return tuple(reversed(names[:-1]))
def ToPpapiType(self, type_, array=False, optional=False):
"""Returns a string containing the name of the Pepper C type for |type_|.
If array is True, returns the name of an array of |type_|. If optional is
True, returns the name of an optional |type_|. If both array and optional
are True, returns the name of an optional array of |type_|.
"""
if isinstance(type_, model.Function) or type_.property_type in (
model.PropertyType.OBJECT, model.PropertyType.ENUM):
return self._FormatPpapiTypeName(
array, optional, '_'.join(
cpp_util.Classname(s) for s in self._NameComponents(type_)),
namespace=cpp_util.Classname(self._namespace.name))
elif type_.property_type == model.PropertyType.REF:
return self.ToPpapiType(self._namespace.types[type_.ref_type],
optional=optional, array=array)
elif type_.property_type == model.PropertyType.ARRAY:
return self.ToPpapiType(type_.item_type, array=True,
optional=optional)
elif type_.property_type == model.PropertyType.STRING and not array:
return 'PP_Var'
elif array or optional:
if type_.property_type in self._PPAPI_COMPOUND_PRIMITIVE_TYPE_MAP:
return self._FormatPpapiTypeName(
array, optional,
self._PPAPI_COMPOUND_PRIMITIVE_TYPE_MAP[type_.property_type], '')
return self._PPAPI_PRIMITIVE_TYPE_MAP.get(type_.property_type, 'PP_Var')
_PPAPI_PRIMITIVE_TYPE_MAP = {
model.PropertyType.BOOLEAN: 'PP_Bool',
model.PropertyType.DOUBLE: 'double_t',
model.PropertyType.INT64: 'int64_t',
model.PropertyType.INTEGER: 'int32_t',
}
_PPAPI_COMPOUND_PRIMITIVE_TYPE_MAP = {
model.PropertyType.BOOLEAN: 'Bool',
model.PropertyType.DOUBLE: 'Double',
model.PropertyType.INT64: 'Int64',
model.PropertyType.INTEGER: 'Int32',
model.PropertyType.STRING: 'String',
}
@staticmethod
def _FormatPpapiTypeName(array, optional, name, namespace=''):
if namespace:
namespace = '%s_' % namespace
if array:
if optional:
return 'PP_%sOptional_%s_Array' % (namespace, name)
return 'PP_%s%s_Array' % (namespace, name)
if optional:
return 'PP_%sOptional_%s' % (namespace, name)
return 'PP_%s%s' % (namespace, name)
def NeedsOptional(self, type_):
"""Returns True if an optional |type_| is required."""
return self._NameComponents(type_) in self._optional_types
def NeedsArray(self, type_):
"""Returns True if an array of |type_| is required."""
return self._NameComponents(type_) in self._array_types
def NeedsOptionalArray(self, type_):
"""Returns True if an optional array of |type_| is required."""
return self._NameComponents(type_) in self._optional_array_types
def FormatParamType(self, param):
"""Formats the type of a parameter or property."""
return self.ToPpapiType(param.type_, optional=param.optional)
@staticmethod
def GetFunctionReturnType(function):
return 'int32_t' if function.callback or function.returns else 'void'
def EnumValueName(self, enum_value, enum_type):
"""Returns a string containing the name for an enum value."""
return '%s_%s' % (self.ToPpapiType(enum_type).upper(),
enum_value.name.upper())
def _ResolveType(self, type_):
if type_.property_type == model.PropertyType.REF:
return self._ResolveType(self._namespace.types[type_.ref_type])
if type_.property_type == model.PropertyType.ARRAY:
return self._ResolveType(type_.item_type)
return type_
def _IsOrContainsArray(self, type_):
if type_.property_type == model.PropertyType.ARRAY:
return True
type_ = self._ResolveType(type_)
if type_.property_type == model.PropertyType.OBJECT:
return any(self._IsOrContainsArray(param.type_)
for param in type_.properties.itervalues())
return False
def HasArrayOuts(self, function):
"""Returns True if the function produces any arrays as outputs.
This includes arrays that are properties of other objects.
"""
if function.callback:
for param in function.callback.params:
if self._IsOrContainsArray(param.type_):
return True
return function.returns and self._IsOrContainsArray(function.returns)
class _IdlGenerator(_PpapiGeneratorBase):
TEMPLATE_NAME = 'idl'
class _GeneratorWrapper(object):
def __init__(self, generator_factory):
self._generator_factory = generator_factory
def Generate(self, namespace):
return self._generator_factory(namespace).Generate()
class PpapiGenerator(object):
def __init__(self):
self.idl_generator = _GeneratorWrapper(_IdlGenerator)
| _RegisterDependency | identifier_name |
ppapi_generator.py | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import collections
import datetime
import os.path
import sys
import code
import cpp_util
import model
try:
import jinja2
except ImportError:
sys.path.append(os.path.join(os.path.dirname(__file__), '..', '..',
'third_party'))
import jinja2
class _PpapiGeneratorBase(object):
"""A base class for ppapi generators.
Implementations should set TEMPLATE_NAME to a string containing the name of
the template file without its extension. The template will be rendered with
the following symbols available:
name: A string containing the name of the namespace.
enums: A list of enums within the namespace.
types: A list of types within the namespace, sorted such that no element
depends on an earlier element.
events: A dict of events within the namespace.
functions: A dict of functions within the namespace.
year: An int containing the current year.
source_file: The name of the input file.
"""
def __init__(self, namespace):
self._namespace = namespace
self._required_types = {}
self._array_types = set()
self._optional_types = set()
self._optional_array_types = set()
self._dependencies = collections.OrderedDict()
self._types = []
self._enums = []
self.jinja_environment = jinja2.Environment(
loader=jinja2.FileSystemLoader(os.path.join(os.path.dirname(__file__),
'templates', 'ppapi')))
self._SetupFilters()
self._ResolveTypeDependencies()
def _SetupFilters(self):
self.jinja_environment.filters.update({
'ppapi_type': self.ToPpapiType,
'classname': cpp_util.Classname,
'enum_value': self.EnumValueName,
'return_type': self.GetFunctionReturnType,
'format_param_type': self.FormatParamType,
'needs_optional': self.NeedsOptional,
'needs_array': self.NeedsArray,
'needs_optional_array': self.NeedsOptionalArray,
'has_array_outs': self.HasArrayOuts,
})
def Render(self, template_name, values):
generated_code = code.Code()
template = self.jinja_environment.get_template(
'%s.template' % template_name)
generated_code.Append(template.render(values))
return generated_code
def Generate(self):
"""Generates a Code object for a single namespace."""
return self.Render(self.TEMPLATE_NAME, {
'name': self._namespace.name,
'enums': self._enums,
'types': self._types,
'events': self._namespace.events,
'functions': self._namespace.functions,
# TODO(sammc): Don't change years when regenerating existing output files.
'year': datetime.date.today().year,
'source_file': self._namespace.source_file,
})
def _ResolveTypeDependencies(self):
"""Calculates the transitive closure of the types in _required_types.
Returns a tuple containing the list of struct types and the list of enum
types. The list of struct types is ordered such that no type depends on a
type later in the list.
"""
if self._namespace.functions:
for function in self._namespace.functions.itervalues():
self._FindFunctionDependencies(function)
if self._namespace.events:
for event in self._namespace.events.itervalues():
self._FindFunctionDependencies(event)
resolved_types = set()
while resolved_types < set(self._required_types):
for typename in sorted(set(self._required_types) - resolved_types):
type_ = self._required_types[typename]
self._dependencies.setdefault(typename, set())
for member in type_.properties.itervalues():
self._RegisterDependency(member, self._NameComponents(type_))
resolved_types.add(typename)
while self._dependencies:
for name, deps in self._dependencies.items():
if not deps:
if (self._required_types[name].property_type ==
model.PropertyType.ENUM):
self._enums.append(self._required_types[name])
else:
self._types.append(self._required_types[name])
for deps in self._dependencies.itervalues():
deps.discard(name)
del self._dependencies[name]
break
else:
raise ValueError('Circular dependency %s' % self._dependencies)
def _FindFunctionDependencies(self, function):
for param in function.params:
self._RegisterDependency(param, None)
if function.callback:
for param in function.callback.params:
self._RegisterDependency(param, None)
if function.returns:
self._RegisterTypeDependency(function.returns, None, False, False)
def _RegisterDependency(self, member, depender):
self._RegisterTypeDependency(member.type_, depender, member.optional, False)
def _RegisterTypeDependency(self, type_, depender, optional, array):
if type_.property_type == model.PropertyType.ARRAY:
self._RegisterTypeDependency(type_.item_type, depender, optional, True)
elif type_.property_type == model.PropertyType.REF:
self._RegisterTypeDependency(self._namespace.types[type_.ref_type],
depender, optional, array)
elif type_.property_type in (model.PropertyType.OBJECT,
model.PropertyType.ENUM):
name_components = self._NameComponents(type_)
self._required_types[name_components] = type_
if depender:
self._dependencies.setdefault(depender, set()).add(
name_components)
if array:
self._array_types.add(name_components)
if optional:
self._optional_array_types.add(name_components)
elif optional:
self._optional_types.add(name_components)
@staticmethod
def _NameComponents(entity):
"""Returns a tuple of the fully-qualified name of an entity."""
names = []
while entity:
if (not isinstance(entity, model.Type) or
entity.property_type != model.PropertyType.ARRAY):
names.append(entity.name)
entity = entity.parent
return tuple(reversed(names[:-1]))
def ToPpapiType(self, type_, array=False, optional=False):
"""Returns a string containing the name of the Pepper C type for |type_|.
If array is True, returns the name of an array of |type_|. If optional is
True, returns the name of an optional |type_|. If both array and optional
are True, returns the name of an optional array of |type_|.
"""
if isinstance(type_, model.Function) or type_.property_type in (
model.PropertyType.OBJECT, model.PropertyType.ENUM):
return self._FormatPpapiTypeName(
array, optional, '_'.join(
cpp_util.Classname(s) for s in self._NameComponents(type_)),
namespace=cpp_util.Classname(self._namespace.name))
elif type_.property_type == model.PropertyType.REF:
return self.ToPpapiType(self._namespace.types[type_.ref_type],
optional=optional, array=array)
elif type_.property_type == model.PropertyType.ARRAY:
return self.ToPpapiType(type_.item_type, array=True,
optional=optional)
elif type_.property_type == model.PropertyType.STRING and not array:
return 'PP_Var'
elif array or optional:
if type_.property_type in self._PPAPI_COMPOUND_PRIMITIVE_TYPE_MAP:
return self._FormatPpapiTypeName(
array, optional,
self._PPAPI_COMPOUND_PRIMITIVE_TYPE_MAP[type_.property_type], '')
return self._PPAPI_PRIMITIVE_TYPE_MAP.get(type_.property_type, 'PP_Var')
_PPAPI_PRIMITIVE_TYPE_MAP = {
model.PropertyType.BOOLEAN: 'PP_Bool',
model.PropertyType.DOUBLE: 'double_t',
model.PropertyType.INT64: 'int64_t',
model.PropertyType.INTEGER: 'int32_t',
}
_PPAPI_COMPOUND_PRIMITIVE_TYPE_MAP = {
model.PropertyType.BOOLEAN: 'Bool',
model.PropertyType.DOUBLE: 'Double',
model.PropertyType.INT64: 'Int64',
model.PropertyType.INTEGER: 'Int32',
model.PropertyType.STRING: 'String',
}
@staticmethod
def _FormatPpapiTypeName(array, optional, name, namespace=''):
if namespace:
namespace = '%s_' % namespace
if array:
if optional:
return 'PP_%sOptional_%s_Array' % (namespace, name)
return 'PP_%s%s_Array' % (namespace, name)
if optional:
return 'PP_%sOptional_%s' % (namespace, name)
return 'PP_%s%s' % (namespace, name)
def NeedsOptional(self, type_):
"""Returns True if an optional |type_| is required."""
return self._NameComponents(type_) in self._optional_types
def NeedsArray(self, type_):
"""Returns True if an array of |type_| is required."""
return self._NameComponents(type_) in self._array_types
def NeedsOptionalArray(self, type_):
"""Returns True if an optional array of |type_| is required."""
return self._NameComponents(type_) in self._optional_array_types
def FormatParamType(self, param):
"""Formats the type of a parameter or property."""
return self.ToPpapiType(param.type_, optional=param.optional)
@staticmethod
def GetFunctionReturnType(function):
return 'int32_t' if function.callback or function.returns else 'void'
def EnumValueName(self, enum_value, enum_type):
"""Returns a string containing the name for an enum value."""
return '%s_%s' % (self.ToPpapiType(enum_type).upper(),
enum_value.name.upper())
def _ResolveType(self, type_):
if type_.property_type == model.PropertyType.REF:
return self._ResolveType(self._namespace.types[type_.ref_type])
if type_.property_type == model.PropertyType.ARRAY:
return self._ResolveType(type_.item_type)
return type_
def _IsOrContainsArray(self, type_):
if type_.property_type == model.PropertyType.ARRAY:
return True
type_ = self._ResolveType(type_)
if type_.property_type == model.PropertyType.OBJECT:
return any(self._IsOrContainsArray(param.type_)
for param in type_.properties.itervalues())
return False
def HasArrayOuts(self, function):
"""Returns True if the function produces any arrays as outputs.
This includes arrays that are properties of other objects.
"""
if function.callback:
for param in function.callback.params:
if self._IsOrContainsArray(param.type_):
return True
return function.returns and self._IsOrContainsArray(function.returns)
class _IdlGenerator(_PpapiGeneratorBase):
TEMPLATE_NAME = 'idl'
class _GeneratorWrapper(object):
def __init__(self, generator_factory):
self._generator_factory = generator_factory
def Generate(self, namespace):
|
class PpapiGenerator(object):
def __init__(self):
self.idl_generator = _GeneratorWrapper(_IdlGenerator)
| return self._generator_factory(namespace).Generate() | identifier_body |
statistic_compare.py | import scipy.stats as stats
import numpy as np
import matplotlib
from matplotlib import pyplot as plt
from re import findall
SMALL_SIZE = 9
matplotlib.rc('font', size=SMALL_SIZE)
matplotlib.rc('axes', titlesize=SMALL_SIZE)
def get_metric(path, metric='loss'):
loss = -1
loss_list = []
if metric == 'loss':
split = 'Lowest loss:'
else:
split = 'Accuracy:'
with open(path, 'r') as f:
lines = f.readlines()
for line in lines:
if 'Lowest loss:' in line and 'Task 0-rank 0' in line:
# task_id = int(line.split('Task ')[-1].split(',')[0])
# loss = float(line.split(split)[-1].split(',')[0])
loss = float(findall(r"\d+\.?\d*e?[-+]?\d+", line.split(split)[1])[0])
# f1_score = float(line.split('F1 score: ')[-1])
if 'overall accuracy:' in line:
loss_list.append(loss)
try:
return loss_list
except UnboundLocalError:
print('error file {}'.format(path))
# def get_training_loss
def get_checkpoint(path):
oa_list = []
nt_list = []
count = 0
c_trans = 0
with open(path, 'r') as f:
lines = f.readlines()
for line in lines:
if 'Task 0' in line and '# Iter:' in line:
rank_num = int(line.split('rank')[-1].split(',')[0])
if rank_num % 2 == 0:
# task_id = int(line.split('Task ')[-1].split(',')[0])
loss = float(findall(r"\d+\.?\d*e?[-+]?\d+", line.split('Loss:')[1])[0])
n_trans = float(findall(r"\d+\.?\d*", line.split('Count:')[1])[0])
# accuracy = float(re.findall(r"\d+\.?\d*", line.split('Loss:')[1])[0])
oa_list.append(loss)
if n_trans > c_trans:
c_trans = n_trans
nt_list.append((count, loss))
count += 1
return oa_list, nt_list
def get_overall_accuracy(path, find_best=False):
oa = 0
n_trans = 0
percentage = 0
bestlist = []
# try:
with open(path, 'r') as f:
lines = f.readlines()
for line in lines:
if '# Task' in line:
pbest = float(findall(r"\d+\.?\d*", line.split('Accuracy:')[1])[0])
bestlist.append(pbest)
if '# Task 0 #' in line:
# if 'Loss:' in line and 'Task 0' in line:
oa = float(findall(r"\d+\.?\d*", line.split('Accuracy:')[1])[0])
# oa = float(re.findall(r"\d+\.?\d*e?[-+]?\d+", line.split('Loss:')[1])[0])
# oa = float(line.split('accuracy:')[1][:7])
# break
if 'Task 0-rank 0' in line:
n_trans = float(findall(r"\d+\.?\d*", line.split('Transfer Count:')[1])[0])
# iters = float(re.findall(r"\d+\.?\d*", line.split('Iter:')[1])[0])
# percentage = n_trans*100/iters
if find_best:
try:
oa = max(bestlist)
except ValueError:
print('error file is {}'.format(path))
oa = 0
# except:
# print('exception catched', line)
# oa = 0
return oa, n_trans
def multi_oa(path):
bestlist = []
# try:
with open(path, 'r') as f:
lines = f.readlines()
for line in lines:
if 'overall accuracy' in line:
pbest = float(findall(r"\d+\.?\d*", line.split('overall accuracy:')[1])[0])
bestlist.append(pbest)
return bestlist
def compare_sto():
# models = ['alexnet', 'vgg16_bn', 'resnet18', 'resnet50', 'densenet121']
models = ['squeezenet1_1', 'mobilenet_v2', 'densenet121']
# models = ['vgg16_bn', 'densenet121']
# datasets = ['UCMerced', 'WHU19', 'RSSCN7','AID']
datasets = ['RSSCN7', 'OxfordPets', 'UCMerced']
draw_figure = False
for d in datasets:
if draw_figure:
fig = plt.figure(figsize=(30, 20))
print('datasets {}'.format(d))
for j, m in enumerate(models):
print('models {}'.format(m))
avg_acc_single = []
avg_loss_single = []
avg_trans = []
avg_acc_mto = []
avg_loss_mto = []
for i in range(1):
# f_single = '../../cv_mto/rval/{}_single_{}_rval_ozstar_n1_seed{}.txt'.format(d, m, i)
# f_single = '../../cv_mto/rval5/{}_single_{}_ozstar_n1.txt'.format(d, m)
f_single = '../../cv_mto/rval5/{}_w_rancl_{}_rval_ozstar_n4.txt'.format(d, m)
# f_mto = '../../results/1007/{}_w_VS_2_rancl_100000_{}_n4_seed{}.txt'.format(d, m, i)
# f_mto = '../../cv_mto/rval/{}_w_{}_rval_ozstar_n4_seed{}.txt'.format(d, m, i)
f_mto = '../../cv_mto/rval5/{}_w_{}_rval_ozstar_n4.txt'.format(d, m)
if not draw_figure:
# oa, ntrans = get_overall_accuracy(f_single, find_best=True)
oa = multi_oa(f_single)
loss = get_metric(f_single)
avg_acc_single.extend(oa)
avg_loss_single.extend(loss)
# oa, ntrans = get_overall_accuracy(f_mto)
oa = multi_oa(f_mto)
loss = get_metric(f_mto)
avg_acc_mto.extend(oa)
avg_loss_mto.extend(loss)
# avg_trans.append(ntrans)
else:
ax1 = fig.add_subplot(len(models), 5, j*5+i+1)
oa_list_sto, _ = get_checkpoint(f_single)
min_loss_sto = min(oa_list_sto)
min_idx_sto = np.argmin(oa_list_sto)
avg_acc_single.append(oa_list_sto[-1])
oa_list_mto, nt_list = get_checkpoint(f_mto)
avg_trans.append(nt_list[-1])
min_loss_mto = min(oa_list_mto)
min_idx_mto = np.argmin(oa_list_mto)
avg_acc_mto.append(oa_list_mto[-1])
ax1.plot(oa_list_sto)
ax1.scatter(min_idx_sto, min_loss_sto,
color='m', marker='o', s=30)
# ax1.hlines(min_loss_sto, 0, max(len(oa_list_sto), len(oa_list_mto)), linestyles='dashed')
ax1.plot(oa_list_mto)
ax1.scatter(min_idx_mto, min_loss_mto,
color='m', marker='o', s=30)
# ax1.hlines(min_loss_mto, 0, max(len(oa_list_sto), len(oa_list_mto)), linestyles='dashed')
ax1.scatter(list(zip(*nt_list))[0], list(zip(*nt_list))[1],
color='', marker='o', edgecolors='g', s=30)
ax1.legend(['sto', 'mto'])
ax1.set_ylabel('Val loss')
ax1.set_xlabel('steps (*100)')
ax1.yaxis.get_major_formatter().set_powerlimits((0, 1))
# ax2 = ax1.twinx() # this is the important function
# ax2.plot(nt_list)
# ax2.set_ylabel('mto n_trans')
print(avg_acc_single)
print(avg_loss_single)
print(avg_acc_mto)
print(avg_loss_mto)
print('avg single {}'.format(sum(avg_acc_single)/len(avg_acc_single)))
print('avg single {}'.format(sum(avg_loss_single)/len(avg_loss_single)))
print('avg mto {}'.format(sum(avg_acc_mto)/len(avg_acc_mto)))
print('avg mto {}'.format(sum(avg_loss_mto)/len(avg_loss_mto)))
print('trans percentage {}'.format(avg_trans))
# print('average trans percentage {}'.format(sum(avg_trans)/len(avg_trans)))
print('-------------------------')
if draw_figure:
plt.tight_layout()
fig.savefig('{}.pdf'.format(d))
print('============================')
def | ():
# plt.rcParams['font.sans-serif'] = ['Times']
model = ['densenet121', 'mobilenet_v2', 'squeezenet1_1']
# model = 'mobilenet_v2'
# model = 'squeezenet1_1'
# datasets = ['UCMerced', 'RSSCN7', 'WHU19', 'AID']
datasets = ['UCMerced', 'OxfordPets', 'RSSCN7']
# datasets = ['AID']
# ntasks = [0, 50, 100, 200, 400]
ntasks = [1, 2, 4, 6]
fig = plt.figure(figsize=(12,9))
# fig, axes = plt.subplots(len(model), len(datasets), sharex='col', sharey=True, figsize=(10, 9))
for n, d in enumerate(datasets):
plt.figure()
# avg_loss = np.zeros((5,len(ntasks)))
# avg_acc = np.zeros((5,len(ntasks)))
avg_loss = np.zeros(len(ntasks))
avg_acc = np.zeros(len(ntasks))
for k, m in enumerate(model):
files = []
# 1007: 不同交互频率
# 1003_n: STO和MTO的结果
# files.append('../../results/1003_n/{}_single_{}_n1_seed{}.txt'.format(d, model, i))
# files.append('../../results/1007/{}_w_VS_2_rancl_{}_n2_seed{}.txt'.format(d, model, i))
# files.append('../../results/1007/{}_w_VS_2_rancl_{}_n3_seed{}.txt'.format(d, model, i))
# files.append('../../results/1003_n/{}_w_VS_2_rancl_{}_n4_seed{}.txt'.format(d, model, i))
# files.append('../../results/1007/{}_w_VS_2_rancl_{}_n5_seed{}.txt'.format(d, model, i))
# files.append('../../results/1007/{}_w_VS_2_rancl_{}_n6_seed{}.txt'.format(d, model, i))
# files.append('../../results/1007/{}_w_VS_2_rancl_{}_n7_seed{}.txt'.format(d, model, i))
# files.append('../../results/1007/{}_w_VS_2_rancl_{}_n8_seed{}.txt'.format(d, model, i))
files.append('../../cv_mto/rval5/{}_single_{}_rval_ozstar_n1.txt'.format(d, m))
files.append('../../cv_mto/rval5/{}_w_{}_rval_ozstar_n2.txt'.format(d, m))
files.append('../../cv_mto/rval5/{}_w_{}_rval_ozstar_n4.txt'.format(d, m))
files.append('../../cv_mto/rval5/{}_w_{}_rval_ozstar_n6.txt'.format(d, m))
for j, f in enumerate(files):
loss = get_metric(f)
# acc, _ = get_overall_accuracy(f)
acc = multi_oa(f)
avg_loss[j] = sum(loss)/len(loss)
avg_acc[j] = sum(acc)/len(acc)
# print(avg_loss)
# print(avg_acc)
# loss_trend = np.mean(avg_loss, axis=0)
# acc_trend = np.mean(avg_acc, axis=0)
loss_trend = avg_loss #[format(num, '.2f') for num in avg_loss]
acc_trend = avg_acc #[format(num, '.2f') for num in avg_acc]
ax = fig.add_subplot(3, 3, n*len(model)+k+1)
# TODO: 使用科学记数法,放大字体
lns1 = ax.plot(ntasks, loss_trend, marker="o", color='C1', label='Val loss', linestyle='dashed')
# ax.yaxis.get_major_formatter().set_powerlimits((0, 1))
ax.set_ylabel('Val loss')
ax.set_xlabel('Number of tasks')
# ax.set_title(d if d !="WHU19" else "WHU19-RS")
if m == 'mobilenet_v2':
m = 'MobileNetV2'
if m == 'squeezenet1_1':
m = 'SqueezeNet'
if m == 'densenet121':
m = 'DenseNet-121'
ax.set_title('{} {}'.format(d,m))
# plt.legend([], loc='right')
ax2 = ax.twinx() # this is the important function
lns2 = ax2.plot(ntasks, acc_trend, marker="v", color='C2', label='Accuracy')
ax2.set_ylabel('Accuracy (%)')
lns = lns1 + lns2
labs = [l.get_label() for l in lns]
ax.legend(lns, labs, loc=5)
fig.tight_layout()
fig.savefig('ntasks.pdf')
# compare_sto()
compare_n()
# plt.plot(get_checkpoint('../../results/RSSCN7_single_resnet18_seed0.txt'))
# plt.plot(get_checkpoint('../../results/RSSCN7_w_VS_rancl_resnet18_seed0.txt'))
#
# plt.show()
# kd = [95.50, 95.52, 95.94, 95.58, 95.51, 95.56, 95.76, 95.79, 95.53, 95.65]
# at = [95.44, 95.33, 95.94, 95.58, 94.94, 95.50, 95.35, 94.24, 95.39, 94.98]
# wst = [95.30, 95.19, 94.60, 95.47, 95.16, 95.44, 95.25, 94.50, 95.37, 95.16]
# single = get_metric('../imagenet32_single.txt')
# kd = get_metric('../imagenet32_k.txt')
# at = get_metric('../imagenet32_a.txt')
# wst = get_metric('../imagenet32_ws.txt')
# # atl = get_metric('../cifar10_at_last_vl.txt')
# print(single)
# print(kd)
# print(at)
# print(wst)
# print(stats.shapiro(single), stats.levene(single,at,wst))
# print(stats.shapiro(at), stats.levene(at))
# print(stats.shapiro(wst), stats.levene(wst))
# length = len(single)
# print(sum(single)/length, sum(kd)/length, sum(at)/length, sum(wst)/length)
#
# print(stats.mannwhitneyu(single, kd))
# print(stats.mannwhitneyu(single, at))
# print(stats.mannwhitneyu(single, wst))
| compare_n | identifier_name |
statistic_compare.py | import scipy.stats as stats
import numpy as np
import matplotlib
from matplotlib import pyplot as plt
from re import findall
SMALL_SIZE = 9
matplotlib.rc('font', size=SMALL_SIZE)
matplotlib.rc('axes', titlesize=SMALL_SIZE)
def get_metric(path, metric='loss'):
loss = -1
loss_list = []
if metric == 'loss':
split = 'Lowest loss:'
else:
split = 'Accuracy:'
with open(path, 'r') as f:
lines = f.readlines()
for line in lines:
if 'Lowest loss:' in line and 'Task 0-rank 0' in line:
# task_id = int(line.split('Task ')[-1].split(',')[0])
# loss = float(line.split(split)[-1].split(',')[0])
loss = float(findall(r"\d+\.?\d*e?[-+]?\d+", line.split(split)[1])[0])
# f1_score = float(line.split('F1 score: ')[-1])
if 'overall accuracy:' in line:
loss_list.append(loss)
try:
return loss_list
except UnboundLocalError:
print('error file {}'.format(path))
# def get_training_loss
def get_checkpoint(path):
oa_list = []
nt_list = []
count = 0
c_trans = 0
with open(path, 'r') as f:
lines = f.readlines()
for line in lines:
if 'Task 0' in line and '# Iter:' in line:
rank_num = int(line.split('rank')[-1].split(',')[0])
if rank_num % 2 == 0:
# task_id = int(line.split('Task ')[-1].split(',')[0])
loss = float(findall(r"\d+\.?\d*e?[-+]?\d+", line.split('Loss:')[1])[0])
n_trans = float(findall(r"\d+\.?\d*", line.split('Count:')[1])[0])
# accuracy = float(re.findall(r"\d+\.?\d*", line.split('Loss:')[1])[0])
oa_list.append(loss)
if n_trans > c_trans:
c_trans = n_trans
nt_list.append((count, loss))
count += 1
return oa_list, nt_list
def get_overall_accuracy(path, find_best=False):
oa = 0
n_trans = 0
percentage = 0
bestlist = []
# try:
with open(path, 'r') as f:
lines = f.readlines()
for line in lines:
if '# Task' in line:
pbest = float(findall(r"\d+\.?\d*", line.split('Accuracy:')[1])[0])
bestlist.append(pbest)
if '# Task 0 #' in line:
# if 'Loss:' in line and 'Task 0' in line:
oa = float(findall(r"\d+\.?\d*", line.split('Accuracy:')[1])[0])
# oa = float(re.findall(r"\d+\.?\d*e?[-+]?\d+", line.split('Loss:')[1])[0])
# oa = float(line.split('accuracy:')[1][:7])
# break
if 'Task 0-rank 0' in line:
n_trans = float(findall(r"\d+\.?\d*", line.split('Transfer Count:')[1])[0])
# iters = float(re.findall(r"\d+\.?\d*", line.split('Iter:')[1])[0])
# percentage = n_trans*100/iters
if find_best:
try:
oa = max(bestlist)
except ValueError:
print('error file is {}'.format(path))
oa = 0
# except:
# print('exception catched', line)
# oa = 0
return oa, n_trans
def multi_oa(path):
bestlist = []
# try:
with open(path, 'r') as f:
lines = f.readlines()
for line in lines:
if 'overall accuracy' in line:
pbest = float(findall(r"\d+\.?\d*", line.split('overall accuracy:')[1])[0])
bestlist.append(pbest)
return bestlist
def compare_sto():
# models = ['alexnet', 'vgg16_bn', 'resnet18', 'resnet50', 'densenet121']
models = ['squeezenet1_1', 'mobilenet_v2', 'densenet121']
# models = ['vgg16_bn', 'densenet121']
# datasets = ['UCMerced', 'WHU19', 'RSSCN7','AID']
datasets = ['RSSCN7', 'OxfordPets', 'UCMerced']
draw_figure = False
for d in datasets:
if draw_figure:
fig = plt.figure(figsize=(30, 20))
print('datasets {}'.format(d))
for j, m in enumerate(models):
print('models {}'.format(m))
avg_acc_single = []
avg_loss_single = []
avg_trans = []
avg_acc_mto = []
avg_loss_mto = []
for i in range(1):
# f_single = '../../cv_mto/rval/{}_single_{}_rval_ozstar_n1_seed{}.txt'.format(d, m, i)
# f_single = '../../cv_mto/rval5/{}_single_{}_ozstar_n1.txt'.format(d, m)
f_single = '../../cv_mto/rval5/{}_w_rancl_{}_rval_ozstar_n4.txt'.format(d, m)
# f_mto = '../../results/1007/{}_w_VS_2_rancl_100000_{}_n4_seed{}.txt'.format(d, m, i)
# f_mto = '../../cv_mto/rval/{}_w_{}_rval_ozstar_n4_seed{}.txt'.format(d, m, i)
f_mto = '../../cv_mto/rval5/{}_w_{}_rval_ozstar_n4.txt'.format(d, m)
if not draw_figure:
# oa, ntrans = get_overall_accuracy(f_single, find_best=True)
oa = multi_oa(f_single)
loss = get_metric(f_single)
avg_acc_single.extend(oa)
avg_loss_single.extend(loss)
# oa, ntrans = get_overall_accuracy(f_mto)
oa = multi_oa(f_mto)
loss = get_metric(f_mto)
avg_acc_mto.extend(oa)
avg_loss_mto.extend(loss)
# avg_trans.append(ntrans)
else:
ax1 = fig.add_subplot(len(models), 5, j*5+i+1)
oa_list_sto, _ = get_checkpoint(f_single)
min_loss_sto = min(oa_list_sto)
min_idx_sto = np.argmin(oa_list_sto)
avg_acc_single.append(oa_list_sto[-1])
oa_list_mto, nt_list = get_checkpoint(f_mto)
avg_trans.append(nt_list[-1])
min_loss_mto = min(oa_list_mto)
min_idx_mto = np.argmin(oa_list_mto)
avg_acc_mto.append(oa_list_mto[-1])
ax1.plot(oa_list_sto)
ax1.scatter(min_idx_sto, min_loss_sto,
color='m', marker='o', s=30)
# ax1.hlines(min_loss_sto, 0, max(len(oa_list_sto), len(oa_list_mto)), linestyles='dashed')
ax1.plot(oa_list_mto)
ax1.scatter(min_idx_mto, min_loss_mto,
color='m', marker='o', s=30)
# ax1.hlines(min_loss_mto, 0, max(len(oa_list_sto), len(oa_list_mto)), linestyles='dashed')
ax1.scatter(list(zip(*nt_list))[0], list(zip(*nt_list))[1],
color='', marker='o', edgecolors='g', s=30)
ax1.legend(['sto', 'mto'])
ax1.set_ylabel('Val loss')
ax1.set_xlabel('steps (*100)')
ax1.yaxis.get_major_formatter().set_powerlimits((0, 1))
# ax2 = ax1.twinx() # this is the important function
# ax2.plot(nt_list)
# ax2.set_ylabel('mto n_trans')
print(avg_acc_single)
print(avg_loss_single)
print(avg_acc_mto)
print(avg_loss_mto)
print('avg single {}'.format(sum(avg_acc_single)/len(avg_acc_single)))
print('avg single {}'.format(sum(avg_loss_single)/len(avg_loss_single)))
print('avg mto {}'.format(sum(avg_acc_mto)/len(avg_acc_mto)))
print('avg mto {}'.format(sum(avg_loss_mto)/len(avg_loss_mto)))
print('trans percentage {}'.format(avg_trans))
# print('average trans percentage {}'.format(sum(avg_trans)/len(avg_trans)))
print('-------------------------')
if draw_figure:
plt.tight_layout()
fig.savefig('{}.pdf'.format(d))
print('============================')
def compare_n():
# plt.rcParams['font.sans-serif'] = ['Times']
model = ['densenet121', 'mobilenet_v2', 'squeezenet1_1']
# model = 'mobilenet_v2'
# model = 'squeezenet1_1'
# datasets = ['UCMerced', 'RSSCN7', 'WHU19', 'AID']
datasets = ['UCMerced', 'OxfordPets', 'RSSCN7']
# datasets = ['AID']
# ntasks = [0, 50, 100, 200, 400]
ntasks = [1, 2, 4, 6]
fig = plt.figure(figsize=(12,9))
# fig, axes = plt.subplots(len(model), len(datasets), sharex='col', sharey=True, figsize=(10, 9))
for n, d in enumerate(datasets):
| s.pdf')
# compare_sto()
compare_n()
# plt.plot(get_checkpoint('../../results/RSSCN7_single_resnet18_seed0.txt'))
# plt.plot(get_checkpoint('../../results/RSSCN7_w_VS_rancl_resnet18_seed0.txt'))
#
# plt.show()
# kd = [95.50, 95.52, 95.94, 95.58, 95.51, 95.56, 95.76, 95.79, 95.53, 95.65]
# at = [95.44, 95.33, 95.94, 95.58, 94.94, 95.50, 95.35, 94.24, 95.39, 94.98]
# wst = [95.30, 95.19, 94.60, 95.47, 95.16, 95.44, 95.25, 94.50, 95.37, 95.16]
# single = get_metric('../imagenet32_single.txt')
# kd = get_metric('../imagenet32_k.txt')
# at = get_metric('../imagenet32_a.txt')
# wst = get_metric('../imagenet32_ws.txt')
# # atl = get_metric('../cifar10_at_last_vl.txt')
# print(single)
# print(kd)
# print(at)
# print(wst)
# print(stats.shapiro(single), stats.levene(single,at,wst))
# print(stats.shapiro(at), stats.levene(at))
# print(stats.shapiro(wst), stats.levene(wst))
# length = len(single)
# print(sum(single)/length, sum(kd)/length, sum(at)/length, sum(wst)/length)
#
# print(stats.mannwhitneyu(single, kd))
# print(stats.mannwhitneyu(single, at))
# print(stats.mannwhitneyu(single, wst))
| plt.figure()
# avg_loss = np.zeros((5,len(ntasks)))
# avg_acc = np.zeros((5,len(ntasks)))
avg_loss = np.zeros(len(ntasks))
avg_acc = np.zeros(len(ntasks))
for k, m in enumerate(model):
files = []
# 1007: 不同交互频率
# 1003_n: STO和MTO的结果
# files.append('../../results/1003_n/{}_single_{}_n1_seed{}.txt'.format(d, model, i))
# files.append('../../results/1007/{}_w_VS_2_rancl_{}_n2_seed{}.txt'.format(d, model, i))
# files.append('../../results/1007/{}_w_VS_2_rancl_{}_n3_seed{}.txt'.format(d, model, i))
# files.append('../../results/1003_n/{}_w_VS_2_rancl_{}_n4_seed{}.txt'.format(d, model, i))
# files.append('../../results/1007/{}_w_VS_2_rancl_{}_n5_seed{}.txt'.format(d, model, i))
# files.append('../../results/1007/{}_w_VS_2_rancl_{}_n6_seed{}.txt'.format(d, model, i))
# files.append('../../results/1007/{}_w_VS_2_rancl_{}_n7_seed{}.txt'.format(d, model, i))
# files.append('../../results/1007/{}_w_VS_2_rancl_{}_n8_seed{}.txt'.format(d, model, i))
files.append('../../cv_mto/rval5/{}_single_{}_rval_ozstar_n1.txt'.format(d, m))
files.append('../../cv_mto/rval5/{}_w_{}_rval_ozstar_n2.txt'.format(d, m))
files.append('../../cv_mto/rval5/{}_w_{}_rval_ozstar_n4.txt'.format(d, m))
files.append('../../cv_mto/rval5/{}_w_{}_rval_ozstar_n6.txt'.format(d, m))
for j, f in enumerate(files):
loss = get_metric(f)
# acc, _ = get_overall_accuracy(f)
acc = multi_oa(f)
avg_loss[j] = sum(loss)/len(loss)
avg_acc[j] = sum(acc)/len(acc)
# print(avg_loss)
# print(avg_acc)
# loss_trend = np.mean(avg_loss, axis=0)
# acc_trend = np.mean(avg_acc, axis=0)
loss_trend = avg_loss #[format(num, '.2f') for num in avg_loss]
acc_trend = avg_acc #[format(num, '.2f') for num in avg_acc]
ax = fig.add_subplot(3, 3, n*len(model)+k+1)
# TODO: 使用科学记数法,放大字体
lns1 = ax.plot(ntasks, loss_trend, marker="o", color='C1', label='Val loss', linestyle='dashed')
# ax.yaxis.get_major_formatter().set_powerlimits((0, 1))
ax.set_ylabel('Val loss')
ax.set_xlabel('Number of tasks')
# ax.set_title(d if d !="WHU19" else "WHU19-RS")
if m == 'mobilenet_v2':
m = 'MobileNetV2'
if m == 'squeezenet1_1':
m = 'SqueezeNet'
if m == 'densenet121':
m = 'DenseNet-121'
ax.set_title('{} {}'.format(d,m))
# plt.legend([], loc='right')
ax2 = ax.twinx() # this is the important function
lns2 = ax2.plot(ntasks, acc_trend, marker="v", color='C2', label='Accuracy')
ax2.set_ylabel('Accuracy (%)')
lns = lns1 + lns2
labs = [l.get_label() for l in lns]
ax.legend(lns, labs, loc=5)
fig.tight_layout()
fig.savefig('ntask | conditional_block |
statistic_compare.py | import scipy.stats as stats
import numpy as np
import matplotlib
from matplotlib import pyplot as plt
from re import findall
SMALL_SIZE = 9
matplotlib.rc('font', size=SMALL_SIZE)
matplotlib.rc('axes', titlesize=SMALL_SIZE)
def get_metric(path, metric='loss'):
loss = -1
loss_list = []
if metric == 'loss':
split = 'Lowest loss:'
else:
split = 'Accuracy:'
with open(path, 'r') as f:
lines = f.readlines()
for line in lines:
if 'Lowest loss:' in line and 'Task 0-rank 0' in line:
# task_id = int(line.split('Task ')[-1].split(',')[0])
# loss = float(line.split(split)[-1].split(',')[0])
loss = float(findall(r"\d+\.?\d*e?[-+]?\d+", line.split(split)[1])[0])
# f1_score = float(line.split('F1 score: ')[-1])
if 'overall accuracy:' in line:
loss_list.append(loss)
try:
return loss_list
except UnboundLocalError:
print('error file {}'.format(path))
# def get_training_loss
def get_checkpoint(path):
|
def get_overall_accuracy(path, find_best=False):
oa = 0
n_trans = 0
percentage = 0
bestlist = []
# try:
with open(path, 'r') as f:
lines = f.readlines()
for line in lines:
if '# Task' in line:
pbest = float(findall(r"\d+\.?\d*", line.split('Accuracy:')[1])[0])
bestlist.append(pbest)
if '# Task 0 #' in line:
# if 'Loss:' in line and 'Task 0' in line:
oa = float(findall(r"\d+\.?\d*", line.split('Accuracy:')[1])[0])
# oa = float(re.findall(r"\d+\.?\d*e?[-+]?\d+", line.split('Loss:')[1])[0])
# oa = float(line.split('accuracy:')[1][:7])
# break
if 'Task 0-rank 0' in line:
n_trans = float(findall(r"\d+\.?\d*", line.split('Transfer Count:')[1])[0])
# iters = float(re.findall(r"\d+\.?\d*", line.split('Iter:')[1])[0])
# percentage = n_trans*100/iters
if find_best:
try:
oa = max(bestlist)
except ValueError:
print('error file is {}'.format(path))
oa = 0
# except:
# print('exception catched', line)
# oa = 0
return oa, n_trans
def multi_oa(path):
bestlist = []
# try:
with open(path, 'r') as f:
lines = f.readlines()
for line in lines:
if 'overall accuracy' in line:
pbest = float(findall(r"\d+\.?\d*", line.split('overall accuracy:')[1])[0])
bestlist.append(pbest)
return bestlist
def compare_sto():
# models = ['alexnet', 'vgg16_bn', 'resnet18', 'resnet50', 'densenet121']
models = ['squeezenet1_1', 'mobilenet_v2', 'densenet121']
# models = ['vgg16_bn', 'densenet121']
# datasets = ['UCMerced', 'WHU19', 'RSSCN7','AID']
datasets = ['RSSCN7', 'OxfordPets', 'UCMerced']
draw_figure = False
for d in datasets:
if draw_figure:
fig = plt.figure(figsize=(30, 20))
print('datasets {}'.format(d))
for j, m in enumerate(models):
print('models {}'.format(m))
avg_acc_single = []
avg_loss_single = []
avg_trans = []
avg_acc_mto = []
avg_loss_mto = []
for i in range(1):
# f_single = '../../cv_mto/rval/{}_single_{}_rval_ozstar_n1_seed{}.txt'.format(d, m, i)
# f_single = '../../cv_mto/rval5/{}_single_{}_ozstar_n1.txt'.format(d, m)
f_single = '../../cv_mto/rval5/{}_w_rancl_{}_rval_ozstar_n4.txt'.format(d, m)
# f_mto = '../../results/1007/{}_w_VS_2_rancl_100000_{}_n4_seed{}.txt'.format(d, m, i)
# f_mto = '../../cv_mto/rval/{}_w_{}_rval_ozstar_n4_seed{}.txt'.format(d, m, i)
f_mto = '../../cv_mto/rval5/{}_w_{}_rval_ozstar_n4.txt'.format(d, m)
if not draw_figure:
# oa, ntrans = get_overall_accuracy(f_single, find_best=True)
oa = multi_oa(f_single)
loss = get_metric(f_single)
avg_acc_single.extend(oa)
avg_loss_single.extend(loss)
# oa, ntrans = get_overall_accuracy(f_mto)
oa = multi_oa(f_mto)
loss = get_metric(f_mto)
avg_acc_mto.extend(oa)
avg_loss_mto.extend(loss)
# avg_trans.append(ntrans)
else:
ax1 = fig.add_subplot(len(models), 5, j*5+i+1)
oa_list_sto, _ = get_checkpoint(f_single)
min_loss_sto = min(oa_list_sto)
min_idx_sto = np.argmin(oa_list_sto)
avg_acc_single.append(oa_list_sto[-1])
oa_list_mto, nt_list = get_checkpoint(f_mto)
avg_trans.append(nt_list[-1])
min_loss_mto = min(oa_list_mto)
min_idx_mto = np.argmin(oa_list_mto)
avg_acc_mto.append(oa_list_mto[-1])
ax1.plot(oa_list_sto)
ax1.scatter(min_idx_sto, min_loss_sto,
color='m', marker='o', s=30)
# ax1.hlines(min_loss_sto, 0, max(len(oa_list_sto), len(oa_list_mto)), linestyles='dashed')
ax1.plot(oa_list_mto)
ax1.scatter(min_idx_mto, min_loss_mto,
color='m', marker='o', s=30)
# ax1.hlines(min_loss_mto, 0, max(len(oa_list_sto), len(oa_list_mto)), linestyles='dashed')
ax1.scatter(list(zip(*nt_list))[0], list(zip(*nt_list))[1],
color='', marker='o', edgecolors='g', s=30)
ax1.legend(['sto', 'mto'])
ax1.set_ylabel('Val loss')
ax1.set_xlabel('steps (*100)')
ax1.yaxis.get_major_formatter().set_powerlimits((0, 1))
# ax2 = ax1.twinx() # this is the important function
# ax2.plot(nt_list)
# ax2.set_ylabel('mto n_trans')
print(avg_acc_single)
print(avg_loss_single)
print(avg_acc_mto)
print(avg_loss_mto)
print('avg single {}'.format(sum(avg_acc_single)/len(avg_acc_single)))
print('avg single {}'.format(sum(avg_loss_single)/len(avg_loss_single)))
print('avg mto {}'.format(sum(avg_acc_mto)/len(avg_acc_mto)))
print('avg mto {}'.format(sum(avg_loss_mto)/len(avg_loss_mto)))
print('trans percentage {}'.format(avg_trans))
# print('average trans percentage {}'.format(sum(avg_trans)/len(avg_trans)))
print('-------------------------')
if draw_figure:
plt.tight_layout()
fig.savefig('{}.pdf'.format(d))
print('============================')
def compare_n():
# plt.rcParams['font.sans-serif'] = ['Times']
model = ['densenet121', 'mobilenet_v2', 'squeezenet1_1']
# model = 'mobilenet_v2'
# model = 'squeezenet1_1'
# datasets = ['UCMerced', 'RSSCN7', 'WHU19', 'AID']
datasets = ['UCMerced', 'OxfordPets', 'RSSCN7']
# datasets = ['AID']
# ntasks = [0, 50, 100, 200, 400]
ntasks = [1, 2, 4, 6]
fig = plt.figure(figsize=(12,9))
# fig, axes = plt.subplots(len(model), len(datasets), sharex='col', sharey=True, figsize=(10, 9))
for n, d in enumerate(datasets):
plt.figure()
# avg_loss = np.zeros((5,len(ntasks)))
# avg_acc = np.zeros((5,len(ntasks)))
avg_loss = np.zeros(len(ntasks))
avg_acc = np.zeros(len(ntasks))
for k, m in enumerate(model):
files = []
# 1007: 不同交互频率
# 1003_n: STO和MTO的结果
# files.append('../../results/1003_n/{}_single_{}_n1_seed{}.txt'.format(d, model, i))
# files.append('../../results/1007/{}_w_VS_2_rancl_{}_n2_seed{}.txt'.format(d, model, i))
# files.append('../../results/1007/{}_w_VS_2_rancl_{}_n3_seed{}.txt'.format(d, model, i))
# files.append('../../results/1003_n/{}_w_VS_2_rancl_{}_n4_seed{}.txt'.format(d, model, i))
# files.append('../../results/1007/{}_w_VS_2_rancl_{}_n5_seed{}.txt'.format(d, model, i))
# files.append('../../results/1007/{}_w_VS_2_rancl_{}_n6_seed{}.txt'.format(d, model, i))
# files.append('../../results/1007/{}_w_VS_2_rancl_{}_n7_seed{}.txt'.format(d, model, i))
# files.append('../../results/1007/{}_w_VS_2_rancl_{}_n8_seed{}.txt'.format(d, model, i))
files.append('../../cv_mto/rval5/{}_single_{}_rval_ozstar_n1.txt'.format(d, m))
files.append('../../cv_mto/rval5/{}_w_{}_rval_ozstar_n2.txt'.format(d, m))
files.append('../../cv_mto/rval5/{}_w_{}_rval_ozstar_n4.txt'.format(d, m))
files.append('../../cv_mto/rval5/{}_w_{}_rval_ozstar_n6.txt'.format(d, m))
for j, f in enumerate(files):
loss = get_metric(f)
# acc, _ = get_overall_accuracy(f)
acc = multi_oa(f)
avg_loss[j] = sum(loss)/len(loss)
avg_acc[j] = sum(acc)/len(acc)
# print(avg_loss)
# print(avg_acc)
# loss_trend = np.mean(avg_loss, axis=0)
# acc_trend = np.mean(avg_acc, axis=0)
loss_trend = avg_loss #[format(num, '.2f') for num in avg_loss]
acc_trend = avg_acc #[format(num, '.2f') for num in avg_acc]
ax = fig.add_subplot(3, 3, n*len(model)+k+1)
# TODO: 使用科学记数法,放大字体
lns1 = ax.plot(ntasks, loss_trend, marker="o", color='C1', label='Val loss', linestyle='dashed')
# ax.yaxis.get_major_formatter().set_powerlimits((0, 1))
ax.set_ylabel('Val loss')
ax.set_xlabel('Number of tasks')
# ax.set_title(d if d !="WHU19" else "WHU19-RS")
if m == 'mobilenet_v2':
m = 'MobileNetV2'
if m == 'squeezenet1_1':
m = 'SqueezeNet'
if m == 'densenet121':
m = 'DenseNet-121'
ax.set_title('{} {}'.format(d,m))
# plt.legend([], loc='right')
ax2 = ax.twinx() # this is the important function
lns2 = ax2.plot(ntasks, acc_trend, marker="v", color='C2', label='Accuracy')
ax2.set_ylabel('Accuracy (%)')
lns = lns1 + lns2
labs = [l.get_label() for l in lns]
ax.legend(lns, labs, loc=5)
fig.tight_layout()
fig.savefig('ntasks.pdf')
# compare_sto()
compare_n()
# plt.plot(get_checkpoint('../../results/RSSCN7_single_resnet18_seed0.txt'))
# plt.plot(get_checkpoint('../../results/RSSCN7_w_VS_rancl_resnet18_seed0.txt'))
#
# plt.show()
# kd = [95.50, 95.52, 95.94, 95.58, 95.51, 95.56, 95.76, 95.79, 95.53, 95.65]
# at = [95.44, 95.33, 95.94, 95.58, 94.94, 95.50, 95.35, 94.24, 95.39, 94.98]
# wst = [95.30, 95.19, 94.60, 95.47, 95.16, 95.44, 95.25, 94.50, 95.37, 95.16]
# single = get_metric('../imagenet32_single.txt')
# kd = get_metric('../imagenet32_k.txt')
# at = get_metric('../imagenet32_a.txt')
# wst = get_metric('../imagenet32_ws.txt')
# # atl = get_metric('../cifar10_at_last_vl.txt')
# print(single)
# print(kd)
# print(at)
# print(wst)
# print(stats.shapiro(single), stats.levene(single,at,wst))
# print(stats.shapiro(at), stats.levene(at))
# print(stats.shapiro(wst), stats.levene(wst))
# length = len(single)
# print(sum(single)/length, sum(kd)/length, sum(at)/length, sum(wst)/length)
#
# print(stats.mannwhitneyu(single, kd))
# print(stats.mannwhitneyu(single, at))
# print(stats.mannwhitneyu(single, wst))
| oa_list = []
nt_list = []
count = 0
c_trans = 0
with open(path, 'r') as f:
lines = f.readlines()
for line in lines:
if 'Task 0' in line and '# Iter:' in line:
rank_num = int(line.split('rank')[-1].split(',')[0])
if rank_num % 2 == 0:
# task_id = int(line.split('Task ')[-1].split(',')[0])
loss = float(findall(r"\d+\.?\d*e?[-+]?\d+", line.split('Loss:')[1])[0])
n_trans = float(findall(r"\d+\.?\d*", line.split('Count:')[1])[0])
# accuracy = float(re.findall(r"\d+\.?\d*", line.split('Loss:')[1])[0])
oa_list.append(loss)
if n_trans > c_trans:
c_trans = n_trans
nt_list.append((count, loss))
count += 1
return oa_list, nt_list | identifier_body |
statistic_compare.py | import scipy.stats as stats
import numpy as np
import matplotlib
from matplotlib import pyplot as plt
from re import findall
SMALL_SIZE = 9
matplotlib.rc('font', size=SMALL_SIZE)
matplotlib.rc('axes', titlesize=SMALL_SIZE)
def get_metric(path, metric='loss'):
loss = -1
loss_list = []
if metric == 'loss':
split = 'Lowest loss:'
else:
split = 'Accuracy:'
with open(path, 'r') as f:
lines = f.readlines()
for line in lines:
if 'Lowest loss:' in line and 'Task 0-rank 0' in line:
# task_id = int(line.split('Task ')[-1].split(',')[0])
# loss = float(line.split(split)[-1].split(',')[0])
loss = float(findall(r"\d+\.?\d*e?[-+]?\d+", line.split(split)[1])[0])
# f1_score = float(line.split('F1 score: ')[-1])
if 'overall accuracy:' in line:
loss_list.append(loss)
try:
return loss_list
except UnboundLocalError:
print('error file {}'.format(path))
# def get_training_loss
def get_checkpoint(path):
oa_list = []
nt_list = []
count = 0
c_trans = 0
with open(path, 'r') as f:
lines = f.readlines()
for line in lines:
if 'Task 0' in line and '# Iter:' in line:
rank_num = int(line.split('rank')[-1].split(',')[0])
if rank_num % 2 == 0:
# task_id = int(line.split('Task ')[-1].split(',')[0])
loss = float(findall(r"\d+\.?\d*e?[-+]?\d+", line.split('Loss:')[1])[0])
n_trans = float(findall(r"\d+\.?\d*", line.split('Count:')[1])[0])
# accuracy = float(re.findall(r"\d+\.?\d*", line.split('Loss:')[1])[0])
oa_list.append(loss)
if n_trans > c_trans:
c_trans = n_trans
nt_list.append((count, loss))
count += 1
return oa_list, nt_list
def get_overall_accuracy(path, find_best=False):
oa = 0
n_trans = 0
percentage = 0
bestlist = []
# try:
with open(path, 'r') as f:
lines = f.readlines()
for line in lines:
if '# Task' in line:
pbest = float(findall(r"\d+\.?\d*", line.split('Accuracy:')[1])[0])
bestlist.append(pbest)
if '# Task 0 #' in line:
# if 'Loss:' in line and 'Task 0' in line:
oa = float(findall(r"\d+\.?\d*", line.split('Accuracy:')[1])[0])
# oa = float(re.findall(r"\d+\.?\d*e?[-+]?\d+", line.split('Loss:')[1])[0])
# oa = float(line.split('accuracy:')[1][:7])
# break
if 'Task 0-rank 0' in line:
n_trans = float(findall(r"\d+\.?\d*", line.split('Transfer Count:')[1])[0])
# iters = float(re.findall(r"\d+\.?\d*", line.split('Iter:')[1])[0])
# percentage = n_trans*100/iters
if find_best:
try:
oa = max(bestlist)
except ValueError:
print('error file is {}'.format(path))
oa = 0
# except:
# print('exception catched', line)
# oa = 0
return oa, n_trans
def multi_oa(path):
bestlist = []
# try:
with open(path, 'r') as f:
lines = f.readlines()
for line in lines:
if 'overall accuracy' in line:
pbest = float(findall(r"\d+\.?\d*", line.split('overall accuracy:')[1])[0])
bestlist.append(pbest)
return bestlist
def compare_sto():
# models = ['alexnet', 'vgg16_bn', 'resnet18', 'resnet50', 'densenet121']
models = ['squeezenet1_1', 'mobilenet_v2', 'densenet121']
# models = ['vgg16_bn', 'densenet121']
# datasets = ['UCMerced', 'WHU19', 'RSSCN7','AID']
datasets = ['RSSCN7', 'OxfordPets', 'UCMerced']
draw_figure = False
for d in datasets:
if draw_figure:
fig = plt.figure(figsize=(30, 20))
print('datasets {}'.format(d))
for j, m in enumerate(models):
print('models {}'.format(m))
avg_acc_single = []
avg_loss_single = []
avg_trans = []
avg_acc_mto = []
avg_loss_mto = []
for i in range(1):
# f_single = '../../cv_mto/rval/{}_single_{}_rval_ozstar_n1_seed{}.txt'.format(d, m, i)
# f_single = '../../cv_mto/rval5/{}_single_{}_ozstar_n1.txt'.format(d, m)
f_single = '../../cv_mto/rval5/{}_w_rancl_{}_rval_ozstar_n4.txt'.format(d, m)
# f_mto = '../../results/1007/{}_w_VS_2_rancl_100000_{}_n4_seed{}.txt'.format(d, m, i)
# f_mto = '../../cv_mto/rval/{}_w_{}_rval_ozstar_n4_seed{}.txt'.format(d, m, i)
f_mto = '../../cv_mto/rval5/{}_w_{}_rval_ozstar_n4.txt'.format(d, m)
if not draw_figure:
# oa, ntrans = get_overall_accuracy(f_single, find_best=True)
oa = multi_oa(f_single)
loss = get_metric(f_single)
avg_acc_single.extend(oa)
avg_loss_single.extend(loss)
# oa, ntrans = get_overall_accuracy(f_mto)
oa = multi_oa(f_mto)
loss = get_metric(f_mto)
avg_acc_mto.extend(oa)
avg_loss_mto.extend(loss)
# avg_trans.append(ntrans)
else:
ax1 = fig.add_subplot(len(models), 5, j*5+i+1)
oa_list_sto, _ = get_checkpoint(f_single)
min_loss_sto = min(oa_list_sto)
min_idx_sto = np.argmin(oa_list_sto)
avg_acc_single.append(oa_list_sto[-1])
oa_list_mto, nt_list = get_checkpoint(f_mto)
avg_trans.append(nt_list[-1])
min_loss_mto = min(oa_list_mto)
min_idx_mto = np.argmin(oa_list_mto)
avg_acc_mto.append(oa_list_mto[-1])
ax1.plot(oa_list_sto)
ax1.scatter(min_idx_sto, min_loss_sto,
color='m', marker='o', s=30)
# ax1.hlines(min_loss_sto, 0, max(len(oa_list_sto), len(oa_list_mto)), linestyles='dashed')
ax1.plot(oa_list_mto)
ax1.scatter(min_idx_mto, min_loss_mto,
color='m', marker='o', s=30)
# ax1.hlines(min_loss_mto, 0, max(len(oa_list_sto), len(oa_list_mto)), linestyles='dashed')
ax1.scatter(list(zip(*nt_list))[0], list(zip(*nt_list))[1],
color='', marker='o', edgecolors='g', s=30)
ax1.legend(['sto', 'mto'])
ax1.set_ylabel('Val loss')
ax1.set_xlabel('steps (*100)')
ax1.yaxis.get_major_formatter().set_powerlimits((0, 1))
# ax2 = ax1.twinx() # this is the important function
# ax2.plot(nt_list)
# ax2.set_ylabel('mto n_trans')
print(avg_acc_single)
print(avg_loss_single)
print(avg_acc_mto)
print(avg_loss_mto)
print('avg single {}'.format(sum(avg_acc_single)/len(avg_acc_single)))
print('avg single {}'.format(sum(avg_loss_single)/len(avg_loss_single)))
print('avg mto {}'.format(sum(avg_acc_mto)/len(avg_acc_mto)))
print('avg mto {}'.format(sum(avg_loss_mto)/len(avg_loss_mto)))
| print('-------------------------')
if draw_figure:
plt.tight_layout()
fig.savefig('{}.pdf'.format(d))
print('============================')
def compare_n():
# plt.rcParams['font.sans-serif'] = ['Times']
model = ['densenet121', 'mobilenet_v2', 'squeezenet1_1']
# model = 'mobilenet_v2'
# model = 'squeezenet1_1'
# datasets = ['UCMerced', 'RSSCN7', 'WHU19', 'AID']
datasets = ['UCMerced', 'OxfordPets', 'RSSCN7']
# datasets = ['AID']
# ntasks = [0, 50, 100, 200, 400]
ntasks = [1, 2, 4, 6]
fig = plt.figure(figsize=(12,9))
# fig, axes = plt.subplots(len(model), len(datasets), sharex='col', sharey=True, figsize=(10, 9))
for n, d in enumerate(datasets):
plt.figure()
# avg_loss = np.zeros((5,len(ntasks)))
# avg_acc = np.zeros((5,len(ntasks)))
avg_loss = np.zeros(len(ntasks))
avg_acc = np.zeros(len(ntasks))
for k, m in enumerate(model):
files = []
# 1007: 不同交互频率
# 1003_n: STO和MTO的结果
# files.append('../../results/1003_n/{}_single_{}_n1_seed{}.txt'.format(d, model, i))
# files.append('../../results/1007/{}_w_VS_2_rancl_{}_n2_seed{}.txt'.format(d, model, i))
# files.append('../../results/1007/{}_w_VS_2_rancl_{}_n3_seed{}.txt'.format(d, model, i))
# files.append('../../results/1003_n/{}_w_VS_2_rancl_{}_n4_seed{}.txt'.format(d, model, i))
# files.append('../../results/1007/{}_w_VS_2_rancl_{}_n5_seed{}.txt'.format(d, model, i))
# files.append('../../results/1007/{}_w_VS_2_rancl_{}_n6_seed{}.txt'.format(d, model, i))
# files.append('../../results/1007/{}_w_VS_2_rancl_{}_n7_seed{}.txt'.format(d, model, i))
# files.append('../../results/1007/{}_w_VS_2_rancl_{}_n8_seed{}.txt'.format(d, model, i))
files.append('../../cv_mto/rval5/{}_single_{}_rval_ozstar_n1.txt'.format(d, m))
files.append('../../cv_mto/rval5/{}_w_{}_rval_ozstar_n2.txt'.format(d, m))
files.append('../../cv_mto/rval5/{}_w_{}_rval_ozstar_n4.txt'.format(d, m))
files.append('../../cv_mto/rval5/{}_w_{}_rval_ozstar_n6.txt'.format(d, m))
for j, f in enumerate(files):
loss = get_metric(f)
# acc, _ = get_overall_accuracy(f)
acc = multi_oa(f)
avg_loss[j] = sum(loss)/len(loss)
avg_acc[j] = sum(acc)/len(acc)
# print(avg_loss)
# print(avg_acc)
# loss_trend = np.mean(avg_loss, axis=0)
# acc_trend = np.mean(avg_acc, axis=0)
loss_trend = avg_loss #[format(num, '.2f') for num in avg_loss]
acc_trend = avg_acc #[format(num, '.2f') for num in avg_acc]
ax = fig.add_subplot(3, 3, n*len(model)+k+1)
# TODO: 使用科学记数法,放大字体
lns1 = ax.plot(ntasks, loss_trend, marker="o", color='C1', label='Val loss', linestyle='dashed')
# ax.yaxis.get_major_formatter().set_powerlimits((0, 1))
ax.set_ylabel('Val loss')
ax.set_xlabel('Number of tasks')
# ax.set_title(d if d !="WHU19" else "WHU19-RS")
if m == 'mobilenet_v2':
m = 'MobileNetV2'
if m == 'squeezenet1_1':
m = 'SqueezeNet'
if m == 'densenet121':
m = 'DenseNet-121'
ax.set_title('{} {}'.format(d,m))
# plt.legend([], loc='right')
ax2 = ax.twinx() # this is the important function
lns2 = ax2.plot(ntasks, acc_trend, marker="v", color='C2', label='Accuracy')
ax2.set_ylabel('Accuracy (%)')
lns = lns1 + lns2
labs = [l.get_label() for l in lns]
ax.legend(lns, labs, loc=5)
fig.tight_layout()
fig.savefig('ntasks.pdf')
# compare_sto()
compare_n()
# plt.plot(get_checkpoint('../../results/RSSCN7_single_resnet18_seed0.txt'))
# plt.plot(get_checkpoint('../../results/RSSCN7_w_VS_rancl_resnet18_seed0.txt'))
#
# plt.show()
# kd = [95.50, 95.52, 95.94, 95.58, 95.51, 95.56, 95.76, 95.79, 95.53, 95.65]
# at = [95.44, 95.33, 95.94, 95.58, 94.94, 95.50, 95.35, 94.24, 95.39, 94.98]
# wst = [95.30, 95.19, 94.60, 95.47, 95.16, 95.44, 95.25, 94.50, 95.37, 95.16]
# single = get_metric('../imagenet32_single.txt')
# kd = get_metric('../imagenet32_k.txt')
# at = get_metric('../imagenet32_a.txt')
# wst = get_metric('../imagenet32_ws.txt')
# # atl = get_metric('../cifar10_at_last_vl.txt')
# print(single)
# print(kd)
# print(at)
# print(wst)
# print(stats.shapiro(single), stats.levene(single,at,wst))
# print(stats.shapiro(at), stats.levene(at))
# print(stats.shapiro(wst), stats.levene(wst))
# length = len(single)
# print(sum(single)/length, sum(kd)/length, sum(at)/length, sum(wst)/length)
#
# print(stats.mannwhitneyu(single, kd))
# print(stats.mannwhitneyu(single, at))
# print(stats.mannwhitneyu(single, wst)) | print('trans percentage {}'.format(avg_trans))
# print('average trans percentage {}'.format(sum(avg_trans)/len(avg_trans)))
| random_line_split |
kiteworks.go | package core
import (
"errors"
"fmt"
"reflect"
"strings"
"time"
)
var ErrNotFound = errors.New("Requested item not found.")
type FileInfo interface {
Name() string
Size() int64
ModTime() time.Time
}
type KiteMember struct {
ID int `json:"objectId"`
RoleID int `json:"roleId`
User KiteUser `json:"user"`
Role KitePermission `json:"role"`
}
// KiteFile/Folder/Attachment
type KiteObject struct {
Type string `json:"type"`
Status string `json:"status"`
ID int `json:"id"`
Name string `json:"name"`
Description string `json:"description"`
Created string `json:"created"`
Modified string `json:"modified"`
ClientCreated string `json:"clientCreated"`
ClientModified string `json:"clientModified"`
Deleted bool `json:"deleted"`
PermDeleted bool `json:"permDeleted"`
Expire interface{} `json:"expire"`
Path string `json:"path"`
ParentID int `json:"parentId"`
UserID int `json:"userId"`
Permalink string `json:"permalink"`
Secure bool `json:"secure"`
LockUser int `json:"lockUser"`
Fingerprint string `json:"fingerprint"`
ProfileID int `json:"typeID`
Size int64 `json:"size"`
Mime string `json:"mime"`
AVStatus string `json:"avStatus"`
DLPStatus string `json:"dlpStatus"`
AdminQuarantineStatus string `json:"adminQuarantineStatus`
Quarantined bool `json:"quarantined"`
DLPLocked bool `json:"dlpLocked"`
FileLifetime int `json:"fileLifetime"`
MailID int `json:"mail_id"`
Links []KiteLinks `json:"links"`
CurrentUserRole KitePermission `json:"currentUserRole"`
}
// Returns the Expiration in time.Time.
func (K *KiteObject) Expiry() time.Time {
var exp_time time.Time
if exp_string, ok := K.Expire.(string); ok {
exp_time, _ = ReadKWTime(exp_string)
}
return exp_time
}
// Kiteworks Links Data
type KiteLinks struct {
Relationship string `json:"rel"`
Entity string `json:"entity"`
ID int `json:"id"`
URL string `json:"href"`
}
// Permission information
type KitePermission struct {
ID int `json:"id"`
Name string `json:"name"`
Rank int `json:"rank"`
Modifiable bool `json:"modifiable"`
Disabled bool `json:"disabled"`
}
type kw_rest_folder struct {
folder_id int
*KWSession
}
func (s KWSession) Folder(folder_id int) kw_rest_folder {
return kw_rest_folder{
folder_id,
&s,
}
}
func (s kw_rest_folder) Members(params ...interface{}) (result []KiteMember, err error) {
return result, s.DataCall(APIRequest{
Method: "GET",
Path: SetPath("/rest/folders/%d/members", s.folder_id),
Output: &result,
Params: SetParams(params, Query{"with": "(user,role)"}),
}, -1, 1000)
}
func (s kw_rest_folder) AddUsersToFolder(emails []string, role_id int, notify bool, notify_files_added bool, params ...interface{}) (err error) {
params = SetParams(PostJSON{"notify": notify, "notifyFileAdded": notify_files_added, "emails": emails, "roleId": role_id}, Query{"updateIfExists": true, "partialSuccess": true}, params)
err = s.Call(APIRequest{
Method: "POST",
Path: SetPath("/rest/folders/%d/members", s.folder_id),
Params: params,
})
return
}
func (s kw_rest_folder) ResolvePath(path string) (result KiteObject, err error) {
folder_path := SplitPath(path)
current_id := s.folder_id
var current KiteObject
for _, f := range folder_path {
current, err = s.Folder(current_id).Find(f)
if err != nil {
if err == ErrNotFound {
current, err = s.Folder(current_id).NewFolder(f)
if err != nil {
return
}
current_id = current.ID
}
}
current_id = current.ID
}
result = current
return
}
// Find item in folder, using folder path, if folder_id > 0, start search there.
func (s kw_rest_folder) Find(path string, params ...interface{}) (result KiteObject, err error) {
if len(params) == 0 {
params = SetParams(Query{"deleted": false})
}
folder_path := SplitPath(path)
var current []KiteObject
if s.folder_id <= 0 {
current, err = s.TopFolders(params)
} else {
current, err = s.Folder(s.folder_id).Contents(params)
}
if err != nil {
return
}
var found bool
folder_len := len(folder_path) - 1
for i, f := range folder_path {
found = false
for _, c := range current {
if strings.ToLower(f) == strings.ToLower(c.Name) {
result = c
if i < folder_len && c.Type == "d" {
current, err = s.Folder(c.ID).Contents(params)
if err != nil {
return
}
found = true
break
} else if i == folder_len {
return
}
}
}
if found == false {
return result, ErrNotFound
}
}
return result, ErrNotFound
}
type kw_rest_admin struct {
*KWSession
}
func (s KWSession) Admin() kw_rest_admin {
return kw_rest_admin{&s}
}
// Creates a new user on the system.
func (s kw_rest_admin) NewUser(user_email string, type_id int, verified, notify bool) (user *KiteUser, err error) {
err = s.Call(APIRequest{
Method: "POST",
Path: "/rest/users",
Params: SetParams(PostJSON{"email": user_email, "userTypeId": type_id, "verified": verified, "sendNotification": notify}, Query{"returnEntity": true}),
Output: &user,
})
return user, err
}
func (s kw_rest_admin) FindProfileUsers(profile_id int, params ...interface{}) (emails []string, err error) {
var users []struct {
Email string `json:"email"`
}
err = s.DataCall(APIRequest{
Method: "GET",
Path: SetPath("/rest/admin/profiles/%d/users", profile_id),
Params: SetParams(params),
Output: &users,
}, -1, 1000)
if err != nil {
return nil, err
}
for _, u := range users {
emails = append(emails, u.Email)
}
return
}
type kw_rest_file struct {
file_id int
*KWSession
}
func (s KWSession) File(file_id int) kw_rest_file {
return kw_rest_file{file_id, &s}
}
func (s kw_rest_file) Info(params ...interface{}) (result KiteObject, err error) {
err = s.Call(APIRequest{
Method: "GET",
Path: SetPath("/rest/files/%d", s.file_id),
Params: SetParams(params),
Output: &result,
})
return
}
func (s kw_rest_file) Delete(params ...interface{}) (err error) {
err = s.Call(APIRequest{
Method: "DELETE",
Path: SetPath("/rest/files/%d", s.file_id),
Params: SetParams(params),
})
return
}
func (s kw_rest_file) PermDelete() (err error) {
err = s.Call(APIRequest{
Method: "DELETE",
Path: SetPath("/rest/files/%d/actions/permanent", s.file_id),
})
return
}
/*
// Drills down specific folder and returns all results.
func (s KWSession) CrawlFolder(folder_id int, params...interface{}) (results []KiteObject, err error) {
if len(params) == 0 {
}
}*/
// Get list of all top folders
func (s KWSession) TopFolders(params ...interface{}) (folders []KiteObject, err error) {
if len(params) == 0 {
params = SetParams(Query{"deleted": false})
}
err = s.DataCall(APIRequest{
Method: "GET",
Path: "/rest/folders/top",
Output: &folders,
Params: SetParams(params, Query{"with": "(path,currentUserRole)"}),
}, -1, 1000)
return
}
/*
// File Uploader
func (S kw_rest_folder) Upload(src SourceFile, overwrite_newer, version bool, count_cb func(num int)) (error) {
if S.folder_id == 0 {
Notice("%s: Uploading files to base path is not permitted, ignoring file.", src.Name())
return nil
}
var UploadRecord struct {
Name string
ID int
ClientModified time.Time
Size int64
Created time.Time
}
if count_cb == nil {
count_cb = func(num int) {
return
}
}
transfer_file := func(src SourceFile, uid int) (err error) {
defer src.Close()
x := TransferCounter(src, count_cb)
_, err = S.KWSession.Upload(src.Name(), uid, x)
return
}
target := fmt.Sprintf("%d:%s", S.folder_id, src.String())
uploads := S.db.Table("uploads")
if uploads.Get(target, &UploadRecord) {
if err := transfer_file(src, UploadRecord.ID); err != nil {
Debug("Error attempting to resume file: %s", err.Error())
} else {
uploads.Unset(target)
return nil
}
}
kw_file_info, err := S.Folder(S.folder_id).Find(src.Name())
if err != nil && err != ErrNotFound {
return err
}
var uid int
if kw_file_info.ID > 0 {
modified, _ := ReadKWTime(kw_file_info.ClientModified)
// File on kiteworks is newer than local file.
if modified.UTC().Unix() > src.ModTime().UTC().Unix() {
if overwrite_newer {
uid, err = S.File(kw_file_info.ID).NewVersion(src)
if err != nil {
return err
}
} else {
uploads.Unset(target)
return nil
}
// Local file is newer than kiteworks file.
} else if modified.UTC().Unix() < src.ModTime().UTC().Unix() {
uid, err = S.File(kw_file_info.ID).NewVersion(src, PostJSON{"disableAutoVersion": !version})
if err != nil {
return err
}
} else {
return nil
}
} else {
uid, err = S.Folder(S.folder_id).NewUpload(src)
if err != nil {
return err
}
}
UploadRecord.Name = src.Name()
UploadRecord.ID = uid
UploadRecord.ClientModified = src.ModTime()
UploadRecord.Size = src.Size()
uploads.Set(target, &UploadRecord)
for i := uint(0); i <= S.Retries; i++ {
err = transfer_file(src, uid)
if err == nil || IsAPIError(err) {
if err != nil && IsAPIError(err, "ERR_INTERNAL_SERVER_ERROR") {
Debug("[%d]%s: %s (%d/%d)", uid, UploadRecord.Name, err.Error(), i+1, S.Retries+1)
S.BackoffTimer(i)
continue
}
uploads.Unset(target)
return err
}
break
}
return nil
}
*/
// Returns all items with listed folder_id.
func (s kw_rest_folder) Contents(params ...interface{}) (children []KiteObject, err error) {
if len(params) == 0 {
params = SetParams(Query{"deleted": false})
}
err = s.DataCall(APIRequest{
Method: "GET",
Path: SetPath("/rest/folders/%d/children", s.folder_id),
Output: &children,
Params: SetParams(params, Query{"with": "(path,currentUserRole)"}),
}, -1, 1000)
return
}
// Returns all items with listed folder_id.
func (s kw_rest_folder) Folders(params ...interface{}) (children []KiteObject, err error) {
if len(params) == 0 {
params = SetParams(Query{"deleted": false})
}
err = s.DataCall(APIRequest{
Method: "GET",
Path: SetPath("/rest/folders/%d/folders", s.folder_id),
Output: &children,
Params: SetParams(params, Query{"with": "(path,currentUserRole)"}),
}, -1, 1000)
return
}
func (s kw_rest_folder) Recover(params ...interface{}) (err error) {
return s.Call(APIRequest{
Method: "PATCH",
Path: SetPath("/rest/folders/%d/actions/recover", s.folder_id),
})
}
func (s kw_rest_file) Recover(params ...interface{}) (err error) {
return s.Call(APIRequest{
Method: "PATCH",
Path: SetPath("/rest/files/%d/actions/recover", s.file_id),
})
}
func (s kw_rest_folder) Files(params ...interface{}) (children []KiteObject, err error) {
if len(params) == 0 {
params = SetParams(Query{"deleted": false})
}
err = s.DataCall(APIRequest{
Method: "GET",
Path: SetPath("/rest/folders/%d/files", s.folder_id),
Output: &children,
Params: SetParams(params),
}, -1, 1000)
return
}
func (s kw_rest_folder) Info(params ...interface{}) (output KiteObject, err error) {
if params == nil {
params = SetParams(Query{"deleted": false})
}
if s.folder_id == 0 {
return
}
err = s.Call(APIRequest{
Method: "GET",
Path: SetPath("/rest/folders/%d", s.folder_id),
Params: SetParams(params, Query{"mode": "full", "with": "(currentUserRole, fileLifetime, path)"}),
Output: &output,
})
return
}
func (s kw_rest_folder) NewFolder(name string, params ...interface{}) (output KiteObject, err error) {
err = s.Call(APIRequest{
Method: "POST",
Path: SetPath("/rest/folders/%d/folders", s.folder_id),
Params: SetParams(PostJSON{"name": name}, Query{"returnEntity": true}, params),
Output: &output,
})
return
}
// Kiteworks User Data
type KiteUser struct {
ID int `json:"id"`
Active bool `json:"active"`
Deactivated bool `json:"deactivated"`
Suspended bool `json:"suspended"`
BaseDirID int `json:"basedirId"`
Deleted bool `json:"deleted"`
Email string `json:"email"`
MyDirID int `json:"mydirId"`
Name string `json:"name"`
SyncDirID int `json:"syncdirId"`
UserTypeID int `json:"userTypeId"`
Verified bool `json:"verified"`
Internal bool `json:"internal"`
}
// Retrieve my user info.
func (s KWSession) MyUser() (user KiteUser, err error) {
err = s.Call(APIRequest{
Method: "GET",
Path: "/rest/users/me",
Output: &user,
})
return
}
// Get total count of users.
func (s kw_rest_admin) UserCount(emails []string, params ...interface{}) (users int, err error) {
var user []struct{}
if emails != nil && emails[0] != NONE {
for _, u := range emails {
err = s.DataCall(APIRequest{
Method: "GET",
Path: "/rest/admin/users",
Params: SetParams(Query{"email": u}, params),
Output: &user}, -1, 1000)
if err != nil {
return
}
users = len(user) + users
}
return
}
err = s.DataCall(APIRequest{
Method: "GET",
Path: "/rest/admin/users",
Params: SetParams(params),
Output: &user}, -1, 1000)
return len(user), err
}
// Get Users
type GetUsers struct {
offset int
filter Query
emails []string
params []interface{}
session *kw_rest_admin
completed bool
}
// Admin EAPI endpoint to pull all users matching parameters.
func (s kw_rest_admin) Users(emails []string, params ...interface{}) *GetUsers {
var T GetUsers
T.filter = make(Query)
T.offset = 0
T.emails = emails
// First extract the query from request.
params = SetParams(params)
var query Query
tmp := params[0:0]
for _, v := range params {
switch e := v.(type) {
case Query:
query = e
default:
tmp = append(tmp, v)
}
}
params = tmp
// Next take remainder of query and reattach it to outgoing request.
var forward_query Query
forward_query = make(Query)
for key, val := range query {
switch strings.ToLower(key) {
case "suspended":
fallthrough
case "active":
fallthrough
case "deleted":
fallthrough
case "verified":
T.filter[key] = val
default:
forward_query[key] = val
}
}
T.params = SetParams(params, forward_query)
T.session = &s
return &T
}
// Return a set of users to process.
func (T *GetUsers) Next() (users []KiteUser, err error) {
if T.emails != nil && T.emails[0] != NONE {
if !T.completed {
T.completed = true
return T.findEmails()
} else {
return []KiteUser{}, nil
}
}
for {
var raw_users []KiteUser
err = T.session.DataCall(APIRequest{
Method: "GET",
Path: "/rest/admin/users",
Params: T.params,
Output: &raw_users}, T.offset, 1000)
if err != nil {
return nil, err | if len(raw_users) == 0 {
return
}
T.offset = T.offset + len(raw_users)
users, err = T.filterUsers(raw_users)
if err != nil {
return nil, err
}
if len(users) == 0 {
continue
} else {
break
}
}
return
}
func (T *GetUsers) findEmails() (users []KiteUser, err error) {
for _, u := range T.emails {
var raw_users []KiteUser
err = T.session.DataCall(APIRequest{
Method: "GET",
Path: "/rest/admin/users",
Params: SetParams(Query{"email": u}, T.params),
Output: &raw_users}, -1, 1000)
if err != nil {
Err("%s: %s", u, err.Error())
continue
}
filtered_users, err := T.filterUsers(raw_users)
if err != nil {
return nil, err
}
if len(filtered_users) > 0 {
users = append(users, filtered_users[0:]...)
continue
}
Err("%s: User not found, or did not meet specified criteria", u)
}
return
}
// Filter out users matching filter specified in GetUsers call.
func (T *GetUsers) filterUsers(input []KiteUser) (users []KiteUser, err error) {
// Match bool variables
matchBool := func(input KiteUser, key string, value bool) bool {
var bool_var bool
switch key {
case "suspended":
bool_var = input.Suspended
case "active":
bool_var = input.Active
case "deleted":
bool_var = input.Deleted
case "verified":
bool_var = input.Verified
}
if bool_var == value {
return true
}
return false
}
for key, val := range T.filter {
key = strings.ToLower(key)
switch key {
case "suspended":
fallthrough
case "active":
fallthrough
case "deleted":
fallthrough
case "verified":
if v, ok := val.(bool); ok {
tmp := input[0:0]
for _, user := range input {
if matchBool(user, key, v) {
tmp = append(tmp, user)
}
}
input = tmp
} else {
return nil, fmt.Errorf("Invalid filter for \"%s\", expected bool got %v(%v) instead.", key, reflect.TypeOf(val), val)
}
}
}
return input, nil
}
// Downloads a file to a specific path
func (s KWSession) FileDownload(file *KiteObject) (ReadSeekCloser, error) {
if file == nil {
return nil, fmt.Errorf("nil file object provided.")
}
req, err := s.NewRequest("GET", SetPath("/rest/files/%d/content", file.ID))
if err != nil {
return nil, err
}
req.Header.Set("X-Accellion-Version", fmt.Sprintf("%d", 7))
err = s.SetToken(s.Username, req)
return transferMonitor(file.Name, file.Size, rightToLeft, s.Download(req)), err
}
type kw_profile struct {
profile_id int
*KWSession
}
func (K KWSession) Profile(profile_id int) kw_profile {
return kw_profile{
profile_id,
&K,
}
}
func (K kw_profile) Get() (profile KWProfile, err error) {
err = K.Call(APIRequest{
Version: 13,
Path: SetPath("/rest/profiles/%d", K.profile_id),
Output: &profile,
})
return
}
type KWProfile struct {
Features struct {
AllowSFTP bool `json:"allowSftp"`
MaxStorage int64 `json:"maxStorage"`
SendExternal bool `json:"sendExternal`
FolderCreate int `json:"folderCreate"`
} `json:"features"`
} | } | random_line_split |
kiteworks.go | package core
import (
"errors"
"fmt"
"reflect"
"strings"
"time"
)
var ErrNotFound = errors.New("Requested item not found.")
type FileInfo interface {
Name() string
Size() int64
ModTime() time.Time
}
type KiteMember struct {
ID int `json:"objectId"`
RoleID int `json:"roleId`
User KiteUser `json:"user"`
Role KitePermission `json:"role"`
}
// KiteFile/Folder/Attachment
type KiteObject struct {
Type string `json:"type"`
Status string `json:"status"`
ID int `json:"id"`
Name string `json:"name"`
Description string `json:"description"`
Created string `json:"created"`
Modified string `json:"modified"`
ClientCreated string `json:"clientCreated"`
ClientModified string `json:"clientModified"`
Deleted bool `json:"deleted"`
PermDeleted bool `json:"permDeleted"`
Expire interface{} `json:"expire"`
Path string `json:"path"`
ParentID int `json:"parentId"`
UserID int `json:"userId"`
Permalink string `json:"permalink"`
Secure bool `json:"secure"`
LockUser int `json:"lockUser"`
Fingerprint string `json:"fingerprint"`
ProfileID int `json:"typeID`
Size int64 `json:"size"`
Mime string `json:"mime"`
AVStatus string `json:"avStatus"`
DLPStatus string `json:"dlpStatus"`
AdminQuarantineStatus string `json:"adminQuarantineStatus`
Quarantined bool `json:"quarantined"`
DLPLocked bool `json:"dlpLocked"`
FileLifetime int `json:"fileLifetime"`
MailID int `json:"mail_id"`
Links []KiteLinks `json:"links"`
CurrentUserRole KitePermission `json:"currentUserRole"`
}
// Returns the Expiration in time.Time.
func (K *KiteObject) Expiry() time.Time {
var exp_time time.Time
if exp_string, ok := K.Expire.(string); ok {
exp_time, _ = ReadKWTime(exp_string)
}
return exp_time
}
// Kiteworks Links Data
type KiteLinks struct {
Relationship string `json:"rel"`
Entity string `json:"entity"`
ID int `json:"id"`
URL string `json:"href"`
}
// Permission information
type KitePermission struct {
ID int `json:"id"`
Name string `json:"name"`
Rank int `json:"rank"`
Modifiable bool `json:"modifiable"`
Disabled bool `json:"disabled"`
}
type kw_rest_folder struct {
folder_id int
*KWSession
}
func (s KWSession) Folder(folder_id int) kw_rest_folder {
return kw_rest_folder{
folder_id,
&s,
}
}
func (s kw_rest_folder) Members(params ...interface{}) (result []KiteMember, err error) {
return result, s.DataCall(APIRequest{
Method: "GET",
Path: SetPath("/rest/folders/%d/members", s.folder_id),
Output: &result,
Params: SetParams(params, Query{"with": "(user,role)"}),
}, -1, 1000)
}
func (s kw_rest_folder) AddUsersToFolder(emails []string, role_id int, notify bool, notify_files_added bool, params ...interface{}) (err error) {
params = SetParams(PostJSON{"notify": notify, "notifyFileAdded": notify_files_added, "emails": emails, "roleId": role_id}, Query{"updateIfExists": true, "partialSuccess": true}, params)
err = s.Call(APIRequest{
Method: "POST",
Path: SetPath("/rest/folders/%d/members", s.folder_id),
Params: params,
})
return
}
func (s kw_rest_folder) ResolvePath(path string) (result KiteObject, err error) {
folder_path := SplitPath(path)
current_id := s.folder_id
var current KiteObject
for _, f := range folder_path {
current, err = s.Folder(current_id).Find(f)
if err != nil {
if err == ErrNotFound {
current, err = s.Folder(current_id).NewFolder(f)
if err != nil {
return
}
current_id = current.ID
}
}
current_id = current.ID
}
result = current
return
}
// Find item in folder, using folder path, if folder_id > 0, start search there.
func (s kw_rest_folder) Find(path string, params ...interface{}) (result KiteObject, err error) {
if len(params) == 0 {
params = SetParams(Query{"deleted": false})
}
folder_path := SplitPath(path)
var current []KiteObject
if s.folder_id <= 0 {
current, err = s.TopFolders(params)
} else {
current, err = s.Folder(s.folder_id).Contents(params)
}
if err != nil {
return
}
var found bool
folder_len := len(folder_path) - 1
for i, f := range folder_path {
found = false
for _, c := range current {
if strings.ToLower(f) == strings.ToLower(c.Name) {
result = c
if i < folder_len && c.Type == "d" {
current, err = s.Folder(c.ID).Contents(params)
if err != nil {
return
}
found = true
break
} else if i == folder_len {
return
}
}
}
if found == false {
return result, ErrNotFound
}
}
return result, ErrNotFound
}
type kw_rest_admin struct {
*KWSession
}
func (s KWSession) Admin() kw_rest_admin {
return kw_rest_admin{&s}
}
// Creates a new user on the system.
func (s kw_rest_admin) NewUser(user_email string, type_id int, verified, notify bool) (user *KiteUser, err error) {
err = s.Call(APIRequest{
Method: "POST",
Path: "/rest/users",
Params: SetParams(PostJSON{"email": user_email, "userTypeId": type_id, "verified": verified, "sendNotification": notify}, Query{"returnEntity": true}),
Output: &user,
})
return user, err
}
func (s kw_rest_admin) FindProfileUsers(profile_id int, params ...interface{}) (emails []string, err error) {
var users []struct {
Email string `json:"email"`
}
err = s.DataCall(APIRequest{
Method: "GET",
Path: SetPath("/rest/admin/profiles/%d/users", profile_id),
Params: SetParams(params),
Output: &users,
}, -1, 1000)
if err != nil {
return nil, err
}
for _, u := range users {
emails = append(emails, u.Email)
}
return
}
type kw_rest_file struct {
file_id int
*KWSession
}
func (s KWSession) File(file_id int) kw_rest_file {
return kw_rest_file{file_id, &s}
}
func (s kw_rest_file) Info(params ...interface{}) (result KiteObject, err error) {
err = s.Call(APIRequest{
Method: "GET",
Path: SetPath("/rest/files/%d", s.file_id),
Params: SetParams(params),
Output: &result,
})
return
}
func (s kw_rest_file) Delete(params ...interface{}) (err error) {
err = s.Call(APIRequest{
Method: "DELETE",
Path: SetPath("/rest/files/%d", s.file_id),
Params: SetParams(params),
})
return
}
func (s kw_rest_file) PermDelete() (err error) {
err = s.Call(APIRequest{
Method: "DELETE",
Path: SetPath("/rest/files/%d/actions/permanent", s.file_id),
})
return
}
/*
// Drills down specific folder and returns all results.
func (s KWSession) CrawlFolder(folder_id int, params...interface{}) (results []KiteObject, err error) {
if len(params) == 0 {
}
}*/
// Get list of all top folders
func (s KWSession) TopFolders(params ...interface{}) (folders []KiteObject, err error) {
if len(params) == 0 {
params = SetParams(Query{"deleted": false})
}
err = s.DataCall(APIRequest{
Method: "GET",
Path: "/rest/folders/top",
Output: &folders,
Params: SetParams(params, Query{"with": "(path,currentUserRole)"}),
}, -1, 1000)
return
}
/*
// File Uploader
func (S kw_rest_folder) Upload(src SourceFile, overwrite_newer, version bool, count_cb func(num int)) (error) {
if S.folder_id == 0 {
Notice("%s: Uploading files to base path is not permitted, ignoring file.", src.Name())
return nil
}
var UploadRecord struct {
Name string
ID int
ClientModified time.Time
Size int64
Created time.Time
}
if count_cb == nil {
count_cb = func(num int) {
return
}
}
transfer_file := func(src SourceFile, uid int) (err error) {
defer src.Close()
x := TransferCounter(src, count_cb)
_, err = S.KWSession.Upload(src.Name(), uid, x)
return
}
target := fmt.Sprintf("%d:%s", S.folder_id, src.String())
uploads := S.db.Table("uploads")
if uploads.Get(target, &UploadRecord) {
if err := transfer_file(src, UploadRecord.ID); err != nil {
Debug("Error attempting to resume file: %s", err.Error())
} else {
uploads.Unset(target)
return nil
}
}
kw_file_info, err := S.Folder(S.folder_id).Find(src.Name())
if err != nil && err != ErrNotFound {
return err
}
var uid int
if kw_file_info.ID > 0 {
modified, _ := ReadKWTime(kw_file_info.ClientModified)
// File on kiteworks is newer than local file.
if modified.UTC().Unix() > src.ModTime().UTC().Unix() {
if overwrite_newer {
uid, err = S.File(kw_file_info.ID).NewVersion(src)
if err != nil {
return err
}
} else {
uploads.Unset(target)
return nil
}
// Local file is newer than kiteworks file.
} else if modified.UTC().Unix() < src.ModTime().UTC().Unix() {
uid, err = S.File(kw_file_info.ID).NewVersion(src, PostJSON{"disableAutoVersion": !version})
if err != nil {
return err
}
} else {
return nil
}
} else {
uid, err = S.Folder(S.folder_id).NewUpload(src)
if err != nil {
return err
}
}
UploadRecord.Name = src.Name()
UploadRecord.ID = uid
UploadRecord.ClientModified = src.ModTime()
UploadRecord.Size = src.Size()
uploads.Set(target, &UploadRecord)
for i := uint(0); i <= S.Retries; i++ {
err = transfer_file(src, uid)
if err == nil || IsAPIError(err) {
if err != nil && IsAPIError(err, "ERR_INTERNAL_SERVER_ERROR") {
Debug("[%d]%s: %s (%d/%d)", uid, UploadRecord.Name, err.Error(), i+1, S.Retries+1)
S.BackoffTimer(i)
continue
}
uploads.Unset(target)
return err
}
break
}
return nil
}
*/
// Returns all items with listed folder_id.
func (s kw_rest_folder) | (params ...interface{}) (children []KiteObject, err error) {
if len(params) == 0 {
params = SetParams(Query{"deleted": false})
}
err = s.DataCall(APIRequest{
Method: "GET",
Path: SetPath("/rest/folders/%d/children", s.folder_id),
Output: &children,
Params: SetParams(params, Query{"with": "(path,currentUserRole)"}),
}, -1, 1000)
return
}
// Returns all items with listed folder_id.
func (s kw_rest_folder) Folders(params ...interface{}) (children []KiteObject, err error) {
if len(params) == 0 {
params = SetParams(Query{"deleted": false})
}
err = s.DataCall(APIRequest{
Method: "GET",
Path: SetPath("/rest/folders/%d/folders", s.folder_id),
Output: &children,
Params: SetParams(params, Query{"with": "(path,currentUserRole)"}),
}, -1, 1000)
return
}
func (s kw_rest_folder) Recover(params ...interface{}) (err error) {
return s.Call(APIRequest{
Method: "PATCH",
Path: SetPath("/rest/folders/%d/actions/recover", s.folder_id),
})
}
func (s kw_rest_file) Recover(params ...interface{}) (err error) {
return s.Call(APIRequest{
Method: "PATCH",
Path: SetPath("/rest/files/%d/actions/recover", s.file_id),
})
}
func (s kw_rest_folder) Files(params ...interface{}) (children []KiteObject, err error) {
if len(params) == 0 {
params = SetParams(Query{"deleted": false})
}
err = s.DataCall(APIRequest{
Method: "GET",
Path: SetPath("/rest/folders/%d/files", s.folder_id),
Output: &children,
Params: SetParams(params),
}, -1, 1000)
return
}
func (s kw_rest_folder) Info(params ...interface{}) (output KiteObject, err error) {
if params == nil {
params = SetParams(Query{"deleted": false})
}
if s.folder_id == 0 {
return
}
err = s.Call(APIRequest{
Method: "GET",
Path: SetPath("/rest/folders/%d", s.folder_id),
Params: SetParams(params, Query{"mode": "full", "with": "(currentUserRole, fileLifetime, path)"}),
Output: &output,
})
return
}
func (s kw_rest_folder) NewFolder(name string, params ...interface{}) (output KiteObject, err error) {
err = s.Call(APIRequest{
Method: "POST",
Path: SetPath("/rest/folders/%d/folders", s.folder_id),
Params: SetParams(PostJSON{"name": name}, Query{"returnEntity": true}, params),
Output: &output,
})
return
}
// Kiteworks User Data
type KiteUser struct {
ID int `json:"id"`
Active bool `json:"active"`
Deactivated bool `json:"deactivated"`
Suspended bool `json:"suspended"`
BaseDirID int `json:"basedirId"`
Deleted bool `json:"deleted"`
Email string `json:"email"`
MyDirID int `json:"mydirId"`
Name string `json:"name"`
SyncDirID int `json:"syncdirId"`
UserTypeID int `json:"userTypeId"`
Verified bool `json:"verified"`
Internal bool `json:"internal"`
}
// Retrieve my user info.
func (s KWSession) MyUser() (user KiteUser, err error) {
err = s.Call(APIRequest{
Method: "GET",
Path: "/rest/users/me",
Output: &user,
})
return
}
// Get total count of users.
func (s kw_rest_admin) UserCount(emails []string, params ...interface{}) (users int, err error) {
var user []struct{}
if emails != nil && emails[0] != NONE {
for _, u := range emails {
err = s.DataCall(APIRequest{
Method: "GET",
Path: "/rest/admin/users",
Params: SetParams(Query{"email": u}, params),
Output: &user}, -1, 1000)
if err != nil {
return
}
users = len(user) + users
}
return
}
err = s.DataCall(APIRequest{
Method: "GET",
Path: "/rest/admin/users",
Params: SetParams(params),
Output: &user}, -1, 1000)
return len(user), err
}
// Get Users
type GetUsers struct {
offset int
filter Query
emails []string
params []interface{}
session *kw_rest_admin
completed bool
}
// Admin EAPI endpoint to pull all users matching parameters.
func (s kw_rest_admin) Users(emails []string, params ...interface{}) *GetUsers {
var T GetUsers
T.filter = make(Query)
T.offset = 0
T.emails = emails
// First extract the query from request.
params = SetParams(params)
var query Query
tmp := params[0:0]
for _, v := range params {
switch e := v.(type) {
case Query:
query = e
default:
tmp = append(tmp, v)
}
}
params = tmp
// Next take remainder of query and reattach it to outgoing request.
var forward_query Query
forward_query = make(Query)
for key, val := range query {
switch strings.ToLower(key) {
case "suspended":
fallthrough
case "active":
fallthrough
case "deleted":
fallthrough
case "verified":
T.filter[key] = val
default:
forward_query[key] = val
}
}
T.params = SetParams(params, forward_query)
T.session = &s
return &T
}
// Return a set of users to process.
func (T *GetUsers) Next() (users []KiteUser, err error) {
if T.emails != nil && T.emails[0] != NONE {
if !T.completed {
T.completed = true
return T.findEmails()
} else {
return []KiteUser{}, nil
}
}
for {
var raw_users []KiteUser
err = T.session.DataCall(APIRequest{
Method: "GET",
Path: "/rest/admin/users",
Params: T.params,
Output: &raw_users}, T.offset, 1000)
if err != nil {
return nil, err
}
if len(raw_users) == 0 {
return
}
T.offset = T.offset + len(raw_users)
users, err = T.filterUsers(raw_users)
if err != nil {
return nil, err
}
if len(users) == 0 {
continue
} else {
break
}
}
return
}
func (T *GetUsers) findEmails() (users []KiteUser, err error) {
for _, u := range T.emails {
var raw_users []KiteUser
err = T.session.DataCall(APIRequest{
Method: "GET",
Path: "/rest/admin/users",
Params: SetParams(Query{"email": u}, T.params),
Output: &raw_users}, -1, 1000)
if err != nil {
Err("%s: %s", u, err.Error())
continue
}
filtered_users, err := T.filterUsers(raw_users)
if err != nil {
return nil, err
}
if len(filtered_users) > 0 {
users = append(users, filtered_users[0:]...)
continue
}
Err("%s: User not found, or did not meet specified criteria", u)
}
return
}
// Filter out users matching filter specified in GetUsers call.
func (T *GetUsers) filterUsers(input []KiteUser) (users []KiteUser, err error) {
// Match bool variables
matchBool := func(input KiteUser, key string, value bool) bool {
var bool_var bool
switch key {
case "suspended":
bool_var = input.Suspended
case "active":
bool_var = input.Active
case "deleted":
bool_var = input.Deleted
case "verified":
bool_var = input.Verified
}
if bool_var == value {
return true
}
return false
}
for key, val := range T.filter {
key = strings.ToLower(key)
switch key {
case "suspended":
fallthrough
case "active":
fallthrough
case "deleted":
fallthrough
case "verified":
if v, ok := val.(bool); ok {
tmp := input[0:0]
for _, user := range input {
if matchBool(user, key, v) {
tmp = append(tmp, user)
}
}
input = tmp
} else {
return nil, fmt.Errorf("Invalid filter for \"%s\", expected bool got %v(%v) instead.", key, reflect.TypeOf(val), val)
}
}
}
return input, nil
}
// Downloads a file to a specific path
func (s KWSession) FileDownload(file *KiteObject) (ReadSeekCloser, error) {
if file == nil {
return nil, fmt.Errorf("nil file object provided.")
}
req, err := s.NewRequest("GET", SetPath("/rest/files/%d/content", file.ID))
if err != nil {
return nil, err
}
req.Header.Set("X-Accellion-Version", fmt.Sprintf("%d", 7))
err = s.SetToken(s.Username, req)
return transferMonitor(file.Name, file.Size, rightToLeft, s.Download(req)), err
}
type kw_profile struct {
profile_id int
*KWSession
}
func (K KWSession) Profile(profile_id int) kw_profile {
return kw_profile{
profile_id,
&K,
}
}
func (K kw_profile) Get() (profile KWProfile, err error) {
err = K.Call(APIRequest{
Version: 13,
Path: SetPath("/rest/profiles/%d", K.profile_id),
Output: &profile,
})
return
}
type KWProfile struct {
Features struct {
AllowSFTP bool `json:"allowSftp"`
MaxStorage int64 `json:"maxStorage"`
SendExternal bool `json:"sendExternal`
FolderCreate int `json:"folderCreate"`
} `json:"features"`
}
| Contents | identifier_name |
kiteworks.go | package core
import (
"errors"
"fmt"
"reflect"
"strings"
"time"
)
var ErrNotFound = errors.New("Requested item not found.")
type FileInfo interface {
Name() string
Size() int64
ModTime() time.Time
}
type KiteMember struct {
ID int `json:"objectId"`
RoleID int `json:"roleId`
User KiteUser `json:"user"`
Role KitePermission `json:"role"`
}
// KiteFile/Folder/Attachment
type KiteObject struct {
Type string `json:"type"`
Status string `json:"status"`
ID int `json:"id"`
Name string `json:"name"`
Description string `json:"description"`
Created string `json:"created"`
Modified string `json:"modified"`
ClientCreated string `json:"clientCreated"`
ClientModified string `json:"clientModified"`
Deleted bool `json:"deleted"`
PermDeleted bool `json:"permDeleted"`
Expire interface{} `json:"expire"`
Path string `json:"path"`
ParentID int `json:"parentId"`
UserID int `json:"userId"`
Permalink string `json:"permalink"`
Secure bool `json:"secure"`
LockUser int `json:"lockUser"`
Fingerprint string `json:"fingerprint"`
ProfileID int `json:"typeID`
Size int64 `json:"size"`
Mime string `json:"mime"`
AVStatus string `json:"avStatus"`
DLPStatus string `json:"dlpStatus"`
AdminQuarantineStatus string `json:"adminQuarantineStatus`
Quarantined bool `json:"quarantined"`
DLPLocked bool `json:"dlpLocked"`
FileLifetime int `json:"fileLifetime"`
MailID int `json:"mail_id"`
Links []KiteLinks `json:"links"`
CurrentUserRole KitePermission `json:"currentUserRole"`
}
// Returns the Expiration in time.Time.
func (K *KiteObject) Expiry() time.Time {
var exp_time time.Time
if exp_string, ok := K.Expire.(string); ok {
exp_time, _ = ReadKWTime(exp_string)
}
return exp_time
}
// Kiteworks Links Data
type KiteLinks struct {
Relationship string `json:"rel"`
Entity string `json:"entity"`
ID int `json:"id"`
URL string `json:"href"`
}
// Permission information
type KitePermission struct {
ID int `json:"id"`
Name string `json:"name"`
Rank int `json:"rank"`
Modifiable bool `json:"modifiable"`
Disabled bool `json:"disabled"`
}
type kw_rest_folder struct {
folder_id int
*KWSession
}
func (s KWSession) Folder(folder_id int) kw_rest_folder {
return kw_rest_folder{
folder_id,
&s,
}
}
func (s kw_rest_folder) Members(params ...interface{}) (result []KiteMember, err error) {
return result, s.DataCall(APIRequest{
Method: "GET",
Path: SetPath("/rest/folders/%d/members", s.folder_id),
Output: &result,
Params: SetParams(params, Query{"with": "(user,role)"}),
}, -1, 1000)
}
func (s kw_rest_folder) AddUsersToFolder(emails []string, role_id int, notify bool, notify_files_added bool, params ...interface{}) (err error) {
params = SetParams(PostJSON{"notify": notify, "notifyFileAdded": notify_files_added, "emails": emails, "roleId": role_id}, Query{"updateIfExists": true, "partialSuccess": true}, params)
err = s.Call(APIRequest{
Method: "POST",
Path: SetPath("/rest/folders/%d/members", s.folder_id),
Params: params,
})
return
}
func (s kw_rest_folder) ResolvePath(path string) (result KiteObject, err error) {
folder_path := SplitPath(path)
current_id := s.folder_id
var current KiteObject
for _, f := range folder_path {
current, err = s.Folder(current_id).Find(f)
if err != nil {
if err == ErrNotFound {
current, err = s.Folder(current_id).NewFolder(f)
if err != nil {
return
}
current_id = current.ID
}
}
current_id = current.ID
}
result = current
return
}
// Find item in folder, using folder path, if folder_id > 0, start search there.
func (s kw_rest_folder) Find(path string, params ...interface{}) (result KiteObject, err error) {
if len(params) == 0 {
params = SetParams(Query{"deleted": false})
}
folder_path := SplitPath(path)
var current []KiteObject
if s.folder_id <= 0 {
current, err = s.TopFolders(params)
} else {
current, err = s.Folder(s.folder_id).Contents(params)
}
if err != nil {
return
}
var found bool
folder_len := len(folder_path) - 1
for i, f := range folder_path {
found = false
for _, c := range current {
if strings.ToLower(f) == strings.ToLower(c.Name) {
result = c
if i < folder_len && c.Type == "d" {
current, err = s.Folder(c.ID).Contents(params)
if err != nil {
return
}
found = true
break
} else if i == folder_len {
return
}
}
}
if found == false {
return result, ErrNotFound
}
}
return result, ErrNotFound
}
type kw_rest_admin struct {
*KWSession
}
func (s KWSession) Admin() kw_rest_admin {
return kw_rest_admin{&s}
}
// Creates a new user on the system.
func (s kw_rest_admin) NewUser(user_email string, type_id int, verified, notify bool) (user *KiteUser, err error) {
err = s.Call(APIRequest{
Method: "POST",
Path: "/rest/users",
Params: SetParams(PostJSON{"email": user_email, "userTypeId": type_id, "verified": verified, "sendNotification": notify}, Query{"returnEntity": true}),
Output: &user,
})
return user, err
}
func (s kw_rest_admin) FindProfileUsers(profile_id int, params ...interface{}) (emails []string, err error) {
var users []struct {
Email string `json:"email"`
}
err = s.DataCall(APIRequest{
Method: "GET",
Path: SetPath("/rest/admin/profiles/%d/users", profile_id),
Params: SetParams(params),
Output: &users,
}, -1, 1000)
if err != nil {
return nil, err
}
for _, u := range users {
emails = append(emails, u.Email)
}
return
}
type kw_rest_file struct {
file_id int
*KWSession
}
func (s KWSession) File(file_id int) kw_rest_file {
return kw_rest_file{file_id, &s}
}
func (s kw_rest_file) Info(params ...interface{}) (result KiteObject, err error) {
err = s.Call(APIRequest{
Method: "GET",
Path: SetPath("/rest/files/%d", s.file_id),
Params: SetParams(params),
Output: &result,
})
return
}
func (s kw_rest_file) Delete(params ...interface{}) (err error) {
err = s.Call(APIRequest{
Method: "DELETE",
Path: SetPath("/rest/files/%d", s.file_id),
Params: SetParams(params),
})
return
}
func (s kw_rest_file) PermDelete() (err error) {
err = s.Call(APIRequest{
Method: "DELETE",
Path: SetPath("/rest/files/%d/actions/permanent", s.file_id),
})
return
}
/*
// Drills down specific folder and returns all results.
func (s KWSession) CrawlFolder(folder_id int, params...interface{}) (results []KiteObject, err error) {
if len(params) == 0 {
}
}*/
// Get list of all top folders
func (s KWSession) TopFolders(params ...interface{}) (folders []KiteObject, err error) {
if len(params) == 0 {
params = SetParams(Query{"deleted": false})
}
err = s.DataCall(APIRequest{
Method: "GET",
Path: "/rest/folders/top",
Output: &folders,
Params: SetParams(params, Query{"with": "(path,currentUserRole)"}),
}, -1, 1000)
return
}
/*
// File Uploader
func (S kw_rest_folder) Upload(src SourceFile, overwrite_newer, version bool, count_cb func(num int)) (error) {
if S.folder_id == 0 {
Notice("%s: Uploading files to base path is not permitted, ignoring file.", src.Name())
return nil
}
var UploadRecord struct {
Name string
ID int
ClientModified time.Time
Size int64
Created time.Time
}
if count_cb == nil {
count_cb = func(num int) {
return
}
}
transfer_file := func(src SourceFile, uid int) (err error) {
defer src.Close()
x := TransferCounter(src, count_cb)
_, err = S.KWSession.Upload(src.Name(), uid, x)
return
}
target := fmt.Sprintf("%d:%s", S.folder_id, src.String())
uploads := S.db.Table("uploads")
if uploads.Get(target, &UploadRecord) {
if err := transfer_file(src, UploadRecord.ID); err != nil {
Debug("Error attempting to resume file: %s", err.Error())
} else {
uploads.Unset(target)
return nil
}
}
kw_file_info, err := S.Folder(S.folder_id).Find(src.Name())
if err != nil && err != ErrNotFound {
return err
}
var uid int
if kw_file_info.ID > 0 {
modified, _ := ReadKWTime(kw_file_info.ClientModified)
// File on kiteworks is newer than local file.
if modified.UTC().Unix() > src.ModTime().UTC().Unix() {
if overwrite_newer {
uid, err = S.File(kw_file_info.ID).NewVersion(src)
if err != nil {
return err
}
} else {
uploads.Unset(target)
return nil
}
// Local file is newer than kiteworks file.
} else if modified.UTC().Unix() < src.ModTime().UTC().Unix() {
uid, err = S.File(kw_file_info.ID).NewVersion(src, PostJSON{"disableAutoVersion": !version})
if err != nil {
return err
}
} else {
return nil
}
} else {
uid, err = S.Folder(S.folder_id).NewUpload(src)
if err != nil {
return err
}
}
UploadRecord.Name = src.Name()
UploadRecord.ID = uid
UploadRecord.ClientModified = src.ModTime()
UploadRecord.Size = src.Size()
uploads.Set(target, &UploadRecord)
for i := uint(0); i <= S.Retries; i++ {
err = transfer_file(src, uid)
if err == nil || IsAPIError(err) {
if err != nil && IsAPIError(err, "ERR_INTERNAL_SERVER_ERROR") {
Debug("[%d]%s: %s (%d/%d)", uid, UploadRecord.Name, err.Error(), i+1, S.Retries+1)
S.BackoffTimer(i)
continue
}
uploads.Unset(target)
return err
}
break
}
return nil
}
*/
// Returns all items with listed folder_id.
func (s kw_rest_folder) Contents(params ...interface{}) (children []KiteObject, err error) {
if len(params) == 0 {
params = SetParams(Query{"deleted": false})
}
err = s.DataCall(APIRequest{
Method: "GET",
Path: SetPath("/rest/folders/%d/children", s.folder_id),
Output: &children,
Params: SetParams(params, Query{"with": "(path,currentUserRole)"}),
}, -1, 1000)
return
}
// Returns all items with listed folder_id.
func (s kw_rest_folder) Folders(params ...interface{}) (children []KiteObject, err error) {
if len(params) == 0 {
params = SetParams(Query{"deleted": false})
}
err = s.DataCall(APIRequest{
Method: "GET",
Path: SetPath("/rest/folders/%d/folders", s.folder_id),
Output: &children,
Params: SetParams(params, Query{"with": "(path,currentUserRole)"}),
}, -1, 1000)
return
}
func (s kw_rest_folder) Recover(params ...interface{}) (err error) {
return s.Call(APIRequest{
Method: "PATCH",
Path: SetPath("/rest/folders/%d/actions/recover", s.folder_id),
})
}
func (s kw_rest_file) Recover(params ...interface{}) (err error) {
return s.Call(APIRequest{
Method: "PATCH",
Path: SetPath("/rest/files/%d/actions/recover", s.file_id),
})
}
func (s kw_rest_folder) Files(params ...interface{}) (children []KiteObject, err error) {
if len(params) == 0 {
params = SetParams(Query{"deleted": false})
}
err = s.DataCall(APIRequest{
Method: "GET",
Path: SetPath("/rest/folders/%d/files", s.folder_id),
Output: &children,
Params: SetParams(params),
}, -1, 1000)
return
}
func (s kw_rest_folder) Info(params ...interface{}) (output KiteObject, err error) {
if params == nil {
params = SetParams(Query{"deleted": false})
}
if s.folder_id == 0 {
return
}
err = s.Call(APIRequest{
Method: "GET",
Path: SetPath("/rest/folders/%d", s.folder_id),
Params: SetParams(params, Query{"mode": "full", "with": "(currentUserRole, fileLifetime, path)"}),
Output: &output,
})
return
}
func (s kw_rest_folder) NewFolder(name string, params ...interface{}) (output KiteObject, err error) {
err = s.Call(APIRequest{
Method: "POST",
Path: SetPath("/rest/folders/%d/folders", s.folder_id),
Params: SetParams(PostJSON{"name": name}, Query{"returnEntity": true}, params),
Output: &output,
})
return
}
// Kiteworks User Data
type KiteUser struct {
ID int `json:"id"`
Active bool `json:"active"`
Deactivated bool `json:"deactivated"`
Suspended bool `json:"suspended"`
BaseDirID int `json:"basedirId"`
Deleted bool `json:"deleted"`
Email string `json:"email"`
MyDirID int `json:"mydirId"`
Name string `json:"name"`
SyncDirID int `json:"syncdirId"`
UserTypeID int `json:"userTypeId"`
Verified bool `json:"verified"`
Internal bool `json:"internal"`
}
// Retrieve my user info.
func (s KWSession) MyUser() (user KiteUser, err error) {
err = s.Call(APIRequest{
Method: "GET",
Path: "/rest/users/me",
Output: &user,
})
return
}
// Get total count of users.
func (s kw_rest_admin) UserCount(emails []string, params ...interface{}) (users int, err error) |
// Get Users
type GetUsers struct {
offset int
filter Query
emails []string
params []interface{}
session *kw_rest_admin
completed bool
}
// Admin EAPI endpoint to pull all users matching parameters.
func (s kw_rest_admin) Users(emails []string, params ...interface{}) *GetUsers {
var T GetUsers
T.filter = make(Query)
T.offset = 0
T.emails = emails
// First extract the query from request.
params = SetParams(params)
var query Query
tmp := params[0:0]
for _, v := range params {
switch e := v.(type) {
case Query:
query = e
default:
tmp = append(tmp, v)
}
}
params = tmp
// Next take remainder of query and reattach it to outgoing request.
var forward_query Query
forward_query = make(Query)
for key, val := range query {
switch strings.ToLower(key) {
case "suspended":
fallthrough
case "active":
fallthrough
case "deleted":
fallthrough
case "verified":
T.filter[key] = val
default:
forward_query[key] = val
}
}
T.params = SetParams(params, forward_query)
T.session = &s
return &T
}
// Return a set of users to process.
func (T *GetUsers) Next() (users []KiteUser, err error) {
if T.emails != nil && T.emails[0] != NONE {
if !T.completed {
T.completed = true
return T.findEmails()
} else {
return []KiteUser{}, nil
}
}
for {
var raw_users []KiteUser
err = T.session.DataCall(APIRequest{
Method: "GET",
Path: "/rest/admin/users",
Params: T.params,
Output: &raw_users}, T.offset, 1000)
if err != nil {
return nil, err
}
if len(raw_users) == 0 {
return
}
T.offset = T.offset + len(raw_users)
users, err = T.filterUsers(raw_users)
if err != nil {
return nil, err
}
if len(users) == 0 {
continue
} else {
break
}
}
return
}
func (T *GetUsers) findEmails() (users []KiteUser, err error) {
for _, u := range T.emails {
var raw_users []KiteUser
err = T.session.DataCall(APIRequest{
Method: "GET",
Path: "/rest/admin/users",
Params: SetParams(Query{"email": u}, T.params),
Output: &raw_users}, -1, 1000)
if err != nil {
Err("%s: %s", u, err.Error())
continue
}
filtered_users, err := T.filterUsers(raw_users)
if err != nil {
return nil, err
}
if len(filtered_users) > 0 {
users = append(users, filtered_users[0:]...)
continue
}
Err("%s: User not found, or did not meet specified criteria", u)
}
return
}
// Filter out users matching filter specified in GetUsers call.
func (T *GetUsers) filterUsers(input []KiteUser) (users []KiteUser, err error) {
// Match bool variables
matchBool := func(input KiteUser, key string, value bool) bool {
var bool_var bool
switch key {
case "suspended":
bool_var = input.Suspended
case "active":
bool_var = input.Active
case "deleted":
bool_var = input.Deleted
case "verified":
bool_var = input.Verified
}
if bool_var == value {
return true
}
return false
}
for key, val := range T.filter {
key = strings.ToLower(key)
switch key {
case "suspended":
fallthrough
case "active":
fallthrough
case "deleted":
fallthrough
case "verified":
if v, ok := val.(bool); ok {
tmp := input[0:0]
for _, user := range input {
if matchBool(user, key, v) {
tmp = append(tmp, user)
}
}
input = tmp
} else {
return nil, fmt.Errorf("Invalid filter for \"%s\", expected bool got %v(%v) instead.", key, reflect.TypeOf(val), val)
}
}
}
return input, nil
}
// Downloads a file to a specific path
func (s KWSession) FileDownload(file *KiteObject) (ReadSeekCloser, error) {
if file == nil {
return nil, fmt.Errorf("nil file object provided.")
}
req, err := s.NewRequest("GET", SetPath("/rest/files/%d/content", file.ID))
if err != nil {
return nil, err
}
req.Header.Set("X-Accellion-Version", fmt.Sprintf("%d", 7))
err = s.SetToken(s.Username, req)
return transferMonitor(file.Name, file.Size, rightToLeft, s.Download(req)), err
}
type kw_profile struct {
profile_id int
*KWSession
}
func (K KWSession) Profile(profile_id int) kw_profile {
return kw_profile{
profile_id,
&K,
}
}
func (K kw_profile) Get() (profile KWProfile, err error) {
err = K.Call(APIRequest{
Version: 13,
Path: SetPath("/rest/profiles/%d", K.profile_id),
Output: &profile,
})
return
}
type KWProfile struct {
Features struct {
AllowSFTP bool `json:"allowSftp"`
MaxStorage int64 `json:"maxStorage"`
SendExternal bool `json:"sendExternal`
FolderCreate int `json:"folderCreate"`
} `json:"features"`
}
| {
var user []struct{}
if emails != nil && emails[0] != NONE {
for _, u := range emails {
err = s.DataCall(APIRequest{
Method: "GET",
Path: "/rest/admin/users",
Params: SetParams(Query{"email": u}, params),
Output: &user}, -1, 1000)
if err != nil {
return
}
users = len(user) + users
}
return
}
err = s.DataCall(APIRequest{
Method: "GET",
Path: "/rest/admin/users",
Params: SetParams(params),
Output: &user}, -1, 1000)
return len(user), err
} | identifier_body |
kiteworks.go | package core
import (
"errors"
"fmt"
"reflect"
"strings"
"time"
)
var ErrNotFound = errors.New("Requested item not found.")
type FileInfo interface {
Name() string
Size() int64
ModTime() time.Time
}
type KiteMember struct {
ID int `json:"objectId"`
RoleID int `json:"roleId`
User KiteUser `json:"user"`
Role KitePermission `json:"role"`
}
// KiteFile/Folder/Attachment
type KiteObject struct {
Type string `json:"type"`
Status string `json:"status"`
ID int `json:"id"`
Name string `json:"name"`
Description string `json:"description"`
Created string `json:"created"`
Modified string `json:"modified"`
ClientCreated string `json:"clientCreated"`
ClientModified string `json:"clientModified"`
Deleted bool `json:"deleted"`
PermDeleted bool `json:"permDeleted"`
Expire interface{} `json:"expire"`
Path string `json:"path"`
ParentID int `json:"parentId"`
UserID int `json:"userId"`
Permalink string `json:"permalink"`
Secure bool `json:"secure"`
LockUser int `json:"lockUser"`
Fingerprint string `json:"fingerprint"`
ProfileID int `json:"typeID`
Size int64 `json:"size"`
Mime string `json:"mime"`
AVStatus string `json:"avStatus"`
DLPStatus string `json:"dlpStatus"`
AdminQuarantineStatus string `json:"adminQuarantineStatus`
Quarantined bool `json:"quarantined"`
DLPLocked bool `json:"dlpLocked"`
FileLifetime int `json:"fileLifetime"`
MailID int `json:"mail_id"`
Links []KiteLinks `json:"links"`
CurrentUserRole KitePermission `json:"currentUserRole"`
}
// Returns the Expiration in time.Time.
func (K *KiteObject) Expiry() time.Time {
var exp_time time.Time
if exp_string, ok := K.Expire.(string); ok {
exp_time, _ = ReadKWTime(exp_string)
}
return exp_time
}
// Kiteworks Links Data
type KiteLinks struct {
Relationship string `json:"rel"`
Entity string `json:"entity"`
ID int `json:"id"`
URL string `json:"href"`
}
// Permission information
type KitePermission struct {
ID int `json:"id"`
Name string `json:"name"`
Rank int `json:"rank"`
Modifiable bool `json:"modifiable"`
Disabled bool `json:"disabled"`
}
type kw_rest_folder struct {
folder_id int
*KWSession
}
func (s KWSession) Folder(folder_id int) kw_rest_folder {
return kw_rest_folder{
folder_id,
&s,
}
}
func (s kw_rest_folder) Members(params ...interface{}) (result []KiteMember, err error) {
return result, s.DataCall(APIRequest{
Method: "GET",
Path: SetPath("/rest/folders/%d/members", s.folder_id),
Output: &result,
Params: SetParams(params, Query{"with": "(user,role)"}),
}, -1, 1000)
}
func (s kw_rest_folder) AddUsersToFolder(emails []string, role_id int, notify bool, notify_files_added bool, params ...interface{}) (err error) {
params = SetParams(PostJSON{"notify": notify, "notifyFileAdded": notify_files_added, "emails": emails, "roleId": role_id}, Query{"updateIfExists": true, "partialSuccess": true}, params)
err = s.Call(APIRequest{
Method: "POST",
Path: SetPath("/rest/folders/%d/members", s.folder_id),
Params: params,
})
return
}
func (s kw_rest_folder) ResolvePath(path string) (result KiteObject, err error) {
folder_path := SplitPath(path)
current_id := s.folder_id
var current KiteObject
for _, f := range folder_path {
current, err = s.Folder(current_id).Find(f)
if err != nil {
if err == ErrNotFound {
current, err = s.Folder(current_id).NewFolder(f)
if err != nil {
return
}
current_id = current.ID
}
}
current_id = current.ID
}
result = current
return
}
// Find item in folder, using folder path, if folder_id > 0, start search there.
func (s kw_rest_folder) Find(path string, params ...interface{}) (result KiteObject, err error) {
if len(params) == 0 {
params = SetParams(Query{"deleted": false})
}
folder_path := SplitPath(path)
var current []KiteObject
if s.folder_id <= 0 {
current, err = s.TopFolders(params)
} else {
current, err = s.Folder(s.folder_id).Contents(params)
}
if err != nil {
return
}
var found bool
folder_len := len(folder_path) - 1
for i, f := range folder_path {
found = false
for _, c := range current {
if strings.ToLower(f) == strings.ToLower(c.Name) {
result = c
if i < folder_len && c.Type == "d" | else if i == folder_len {
return
}
}
}
if found == false {
return result, ErrNotFound
}
}
return result, ErrNotFound
}
type kw_rest_admin struct {
*KWSession
}
func (s KWSession) Admin() kw_rest_admin {
return kw_rest_admin{&s}
}
// Creates a new user on the system.
func (s kw_rest_admin) NewUser(user_email string, type_id int, verified, notify bool) (user *KiteUser, err error) {
err = s.Call(APIRequest{
Method: "POST",
Path: "/rest/users",
Params: SetParams(PostJSON{"email": user_email, "userTypeId": type_id, "verified": verified, "sendNotification": notify}, Query{"returnEntity": true}),
Output: &user,
})
return user, err
}
func (s kw_rest_admin) FindProfileUsers(profile_id int, params ...interface{}) (emails []string, err error) {
var users []struct {
Email string `json:"email"`
}
err = s.DataCall(APIRequest{
Method: "GET",
Path: SetPath("/rest/admin/profiles/%d/users", profile_id),
Params: SetParams(params),
Output: &users,
}, -1, 1000)
if err != nil {
return nil, err
}
for _, u := range users {
emails = append(emails, u.Email)
}
return
}
type kw_rest_file struct {
file_id int
*KWSession
}
func (s KWSession) File(file_id int) kw_rest_file {
return kw_rest_file{file_id, &s}
}
func (s kw_rest_file) Info(params ...interface{}) (result KiteObject, err error) {
err = s.Call(APIRequest{
Method: "GET",
Path: SetPath("/rest/files/%d", s.file_id),
Params: SetParams(params),
Output: &result,
})
return
}
func (s kw_rest_file) Delete(params ...interface{}) (err error) {
err = s.Call(APIRequest{
Method: "DELETE",
Path: SetPath("/rest/files/%d", s.file_id),
Params: SetParams(params),
})
return
}
func (s kw_rest_file) PermDelete() (err error) {
err = s.Call(APIRequest{
Method: "DELETE",
Path: SetPath("/rest/files/%d/actions/permanent", s.file_id),
})
return
}
/*
// Drills down specific folder and returns all results.
func (s KWSession) CrawlFolder(folder_id int, params...interface{}) (results []KiteObject, err error) {
if len(params) == 0 {
}
}*/
// Get list of all top folders
func (s KWSession) TopFolders(params ...interface{}) (folders []KiteObject, err error) {
if len(params) == 0 {
params = SetParams(Query{"deleted": false})
}
err = s.DataCall(APIRequest{
Method: "GET",
Path: "/rest/folders/top",
Output: &folders,
Params: SetParams(params, Query{"with": "(path,currentUserRole)"}),
}, -1, 1000)
return
}
/*
// File Uploader
func (S kw_rest_folder) Upload(src SourceFile, overwrite_newer, version bool, count_cb func(num int)) (error) {
if S.folder_id == 0 {
Notice("%s: Uploading files to base path is not permitted, ignoring file.", src.Name())
return nil
}
var UploadRecord struct {
Name string
ID int
ClientModified time.Time
Size int64
Created time.Time
}
if count_cb == nil {
count_cb = func(num int) {
return
}
}
transfer_file := func(src SourceFile, uid int) (err error) {
defer src.Close()
x := TransferCounter(src, count_cb)
_, err = S.KWSession.Upload(src.Name(), uid, x)
return
}
target := fmt.Sprintf("%d:%s", S.folder_id, src.String())
uploads := S.db.Table("uploads")
if uploads.Get(target, &UploadRecord) {
if err := transfer_file(src, UploadRecord.ID); err != nil {
Debug("Error attempting to resume file: %s", err.Error())
} else {
uploads.Unset(target)
return nil
}
}
kw_file_info, err := S.Folder(S.folder_id).Find(src.Name())
if err != nil && err != ErrNotFound {
return err
}
var uid int
if kw_file_info.ID > 0 {
modified, _ := ReadKWTime(kw_file_info.ClientModified)
// File on kiteworks is newer than local file.
if modified.UTC().Unix() > src.ModTime().UTC().Unix() {
if overwrite_newer {
uid, err = S.File(kw_file_info.ID).NewVersion(src)
if err != nil {
return err
}
} else {
uploads.Unset(target)
return nil
}
// Local file is newer than kiteworks file.
} else if modified.UTC().Unix() < src.ModTime().UTC().Unix() {
uid, err = S.File(kw_file_info.ID).NewVersion(src, PostJSON{"disableAutoVersion": !version})
if err != nil {
return err
}
} else {
return nil
}
} else {
uid, err = S.Folder(S.folder_id).NewUpload(src)
if err != nil {
return err
}
}
UploadRecord.Name = src.Name()
UploadRecord.ID = uid
UploadRecord.ClientModified = src.ModTime()
UploadRecord.Size = src.Size()
uploads.Set(target, &UploadRecord)
for i := uint(0); i <= S.Retries; i++ {
err = transfer_file(src, uid)
if err == nil || IsAPIError(err) {
if err != nil && IsAPIError(err, "ERR_INTERNAL_SERVER_ERROR") {
Debug("[%d]%s: %s (%d/%d)", uid, UploadRecord.Name, err.Error(), i+1, S.Retries+1)
S.BackoffTimer(i)
continue
}
uploads.Unset(target)
return err
}
break
}
return nil
}
*/
// Returns all items with listed folder_id.
func (s kw_rest_folder) Contents(params ...interface{}) (children []KiteObject, err error) {
if len(params) == 0 {
params = SetParams(Query{"deleted": false})
}
err = s.DataCall(APIRequest{
Method: "GET",
Path: SetPath("/rest/folders/%d/children", s.folder_id),
Output: &children,
Params: SetParams(params, Query{"with": "(path,currentUserRole)"}),
}, -1, 1000)
return
}
// Returns all items with listed folder_id.
func (s kw_rest_folder) Folders(params ...interface{}) (children []KiteObject, err error) {
if len(params) == 0 {
params = SetParams(Query{"deleted": false})
}
err = s.DataCall(APIRequest{
Method: "GET",
Path: SetPath("/rest/folders/%d/folders", s.folder_id),
Output: &children,
Params: SetParams(params, Query{"with": "(path,currentUserRole)"}),
}, -1, 1000)
return
}
func (s kw_rest_folder) Recover(params ...interface{}) (err error) {
return s.Call(APIRequest{
Method: "PATCH",
Path: SetPath("/rest/folders/%d/actions/recover", s.folder_id),
})
}
func (s kw_rest_file) Recover(params ...interface{}) (err error) {
return s.Call(APIRequest{
Method: "PATCH",
Path: SetPath("/rest/files/%d/actions/recover", s.file_id),
})
}
func (s kw_rest_folder) Files(params ...interface{}) (children []KiteObject, err error) {
if len(params) == 0 {
params = SetParams(Query{"deleted": false})
}
err = s.DataCall(APIRequest{
Method: "GET",
Path: SetPath("/rest/folders/%d/files", s.folder_id),
Output: &children,
Params: SetParams(params),
}, -1, 1000)
return
}
func (s kw_rest_folder) Info(params ...interface{}) (output KiteObject, err error) {
if params == nil {
params = SetParams(Query{"deleted": false})
}
if s.folder_id == 0 {
return
}
err = s.Call(APIRequest{
Method: "GET",
Path: SetPath("/rest/folders/%d", s.folder_id),
Params: SetParams(params, Query{"mode": "full", "with": "(currentUserRole, fileLifetime, path)"}),
Output: &output,
})
return
}
func (s kw_rest_folder) NewFolder(name string, params ...interface{}) (output KiteObject, err error) {
err = s.Call(APIRequest{
Method: "POST",
Path: SetPath("/rest/folders/%d/folders", s.folder_id),
Params: SetParams(PostJSON{"name": name}, Query{"returnEntity": true}, params),
Output: &output,
})
return
}
// Kiteworks User Data
type KiteUser struct {
ID int `json:"id"`
Active bool `json:"active"`
Deactivated bool `json:"deactivated"`
Suspended bool `json:"suspended"`
BaseDirID int `json:"basedirId"`
Deleted bool `json:"deleted"`
Email string `json:"email"`
MyDirID int `json:"mydirId"`
Name string `json:"name"`
SyncDirID int `json:"syncdirId"`
UserTypeID int `json:"userTypeId"`
Verified bool `json:"verified"`
Internal bool `json:"internal"`
}
// Retrieve my user info.
func (s KWSession) MyUser() (user KiteUser, err error) {
err = s.Call(APIRequest{
Method: "GET",
Path: "/rest/users/me",
Output: &user,
})
return
}
// Get total count of users.
func (s kw_rest_admin) UserCount(emails []string, params ...interface{}) (users int, err error) {
var user []struct{}
if emails != nil && emails[0] != NONE {
for _, u := range emails {
err = s.DataCall(APIRequest{
Method: "GET",
Path: "/rest/admin/users",
Params: SetParams(Query{"email": u}, params),
Output: &user}, -1, 1000)
if err != nil {
return
}
users = len(user) + users
}
return
}
err = s.DataCall(APIRequest{
Method: "GET",
Path: "/rest/admin/users",
Params: SetParams(params),
Output: &user}, -1, 1000)
return len(user), err
}
// Get Users
type GetUsers struct {
offset int
filter Query
emails []string
params []interface{}
session *kw_rest_admin
completed bool
}
// Admin EAPI endpoint to pull all users matching parameters.
func (s kw_rest_admin) Users(emails []string, params ...interface{}) *GetUsers {
var T GetUsers
T.filter = make(Query)
T.offset = 0
T.emails = emails
// First extract the query from request.
params = SetParams(params)
var query Query
tmp := params[0:0]
for _, v := range params {
switch e := v.(type) {
case Query:
query = e
default:
tmp = append(tmp, v)
}
}
params = tmp
// Next take remainder of query and reattach it to outgoing request.
var forward_query Query
forward_query = make(Query)
for key, val := range query {
switch strings.ToLower(key) {
case "suspended":
fallthrough
case "active":
fallthrough
case "deleted":
fallthrough
case "verified":
T.filter[key] = val
default:
forward_query[key] = val
}
}
T.params = SetParams(params, forward_query)
T.session = &s
return &T
}
// Return a set of users to process.
func (T *GetUsers) Next() (users []KiteUser, err error) {
if T.emails != nil && T.emails[0] != NONE {
if !T.completed {
T.completed = true
return T.findEmails()
} else {
return []KiteUser{}, nil
}
}
for {
var raw_users []KiteUser
err = T.session.DataCall(APIRequest{
Method: "GET",
Path: "/rest/admin/users",
Params: T.params,
Output: &raw_users}, T.offset, 1000)
if err != nil {
return nil, err
}
if len(raw_users) == 0 {
return
}
T.offset = T.offset + len(raw_users)
users, err = T.filterUsers(raw_users)
if err != nil {
return nil, err
}
if len(users) == 0 {
continue
} else {
break
}
}
return
}
func (T *GetUsers) findEmails() (users []KiteUser, err error) {
for _, u := range T.emails {
var raw_users []KiteUser
err = T.session.DataCall(APIRequest{
Method: "GET",
Path: "/rest/admin/users",
Params: SetParams(Query{"email": u}, T.params),
Output: &raw_users}, -1, 1000)
if err != nil {
Err("%s: %s", u, err.Error())
continue
}
filtered_users, err := T.filterUsers(raw_users)
if err != nil {
return nil, err
}
if len(filtered_users) > 0 {
users = append(users, filtered_users[0:]...)
continue
}
Err("%s: User not found, or did not meet specified criteria", u)
}
return
}
// Filter out users matching filter specified in GetUsers call.
func (T *GetUsers) filterUsers(input []KiteUser) (users []KiteUser, err error) {
// Match bool variables
matchBool := func(input KiteUser, key string, value bool) bool {
var bool_var bool
switch key {
case "suspended":
bool_var = input.Suspended
case "active":
bool_var = input.Active
case "deleted":
bool_var = input.Deleted
case "verified":
bool_var = input.Verified
}
if bool_var == value {
return true
}
return false
}
for key, val := range T.filter {
key = strings.ToLower(key)
switch key {
case "suspended":
fallthrough
case "active":
fallthrough
case "deleted":
fallthrough
case "verified":
if v, ok := val.(bool); ok {
tmp := input[0:0]
for _, user := range input {
if matchBool(user, key, v) {
tmp = append(tmp, user)
}
}
input = tmp
} else {
return nil, fmt.Errorf("Invalid filter for \"%s\", expected bool got %v(%v) instead.", key, reflect.TypeOf(val), val)
}
}
}
return input, nil
}
// Downloads a file to a specific path
func (s KWSession) FileDownload(file *KiteObject) (ReadSeekCloser, error) {
if file == nil {
return nil, fmt.Errorf("nil file object provided.")
}
req, err := s.NewRequest("GET", SetPath("/rest/files/%d/content", file.ID))
if err != nil {
return nil, err
}
req.Header.Set("X-Accellion-Version", fmt.Sprintf("%d", 7))
err = s.SetToken(s.Username, req)
return transferMonitor(file.Name, file.Size, rightToLeft, s.Download(req)), err
}
type kw_profile struct {
profile_id int
*KWSession
}
func (K KWSession) Profile(profile_id int) kw_profile {
return kw_profile{
profile_id,
&K,
}
}
func (K kw_profile) Get() (profile KWProfile, err error) {
err = K.Call(APIRequest{
Version: 13,
Path: SetPath("/rest/profiles/%d", K.profile_id),
Output: &profile,
})
return
}
type KWProfile struct {
Features struct {
AllowSFTP bool `json:"allowSftp"`
MaxStorage int64 `json:"maxStorage"`
SendExternal bool `json:"sendExternal`
FolderCreate int `json:"folderCreate"`
} `json:"features"`
}
| {
current, err = s.Folder(c.ID).Contents(params)
if err != nil {
return
}
found = true
break
} | conditional_block |
run_template.go | // Copyright 2019 Copyright (c) 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package run_template
import (
"fmt"
"github.com/gardener/test-infra/pkg/shootflavors"
"os"
"time"
"github.com/gardener/test-infra/pkg/logger"
"github.com/gardener/test-infra/pkg/util"
"github.com/gardener/gardener/pkg/client/kubernetes"
"sigs.k8s.io/controller-runtime/pkg/client"
"github.com/gardener/test-infra/pkg/testrunner"
"github.com/gardener/test-infra/pkg/testrunner/result"
testrunnerTemplate "github.com/gardener/test-infra/pkg/testrunner/template"
"github.com/spf13/cobra"
)
var testrunnerConfig = testrunner.Config{}
var collectConfig = result.Config{}
var shootParameters = testrunnerTemplate.Parameters{}
var (
testrunNamePrefix string
shootPrefix string
tmKubeconfigPath string
filterPatchVersions bool
failOnError bool
testrunFlakeAttempts int
timeout int64
interval int64
)
// AddCommand adds run-template to a command.
func AddCommand(cmd *cobra.Command) {
cmd.AddCommand(runCmd)
}
var runCmd = &cobra.Command{
Use: "run-template",
Short: "Run the testrunner with a helm template containing testruns",
Aliases: []string{
"run", // for backward compatibility
"run-tmpl",
},
Run: func(cmd *cobra.Command, args []string) {
var (
err error
stopCh = make(chan struct{})
shootFlavors []*shootflavors.ExtendedFlavorInstance
)
defer close(stopCh)
dryRun, _ := cmd.Flags().GetBool("dry-run")
logger.Log.Info("Start testmachinery testrunner")
testrunnerConfig.Watch, err = testrunner.StartWatchController(logger.Log, tmKubeconfigPath, stopCh)
if err != nil {
logger.Log.Error(err, "unable to start testrun watch controller")
os.Exit(1)
}
gardenK8sClient, err := kubernetes.NewClientFromFile("", shootParameters.GardenKubeconfigPath, kubernetes.WithClientOptions(client.Options{
Scheme: kubernetes.GardenScheme,
}))
if err != nil {
logger.Log.Error(err, "unable to build garden kubernetes client", "file", tmKubeconfigPath)
os.Exit(1)
}
testrunnerConfig.Timeout = time.Duration(timeout) * time.Second
testrunnerConfig.FlakeAttempts = testrunFlakeAttempts
collectConfig.ComponentDescriptorPath = shootParameters.ComponentDescriptorPath
if shootParameters.FlavorConfigPath != "" {
flavors, err := GetShootFlavors(shootParameters.FlavorConfigPath, gardenK8sClient, shootPrefix, filterPatchVersions)
if err != nil {
logger.Log.Error(err, "unable to parse shoot flavors from test configuration")
os.Exit(1)
}
shootFlavors = flavors.GetShoots()
}
runs, err := testrunnerTemplate.RenderTestruns(logger.Log.WithName("Render"), &shootParameters, shootFlavors)
if err != nil {
logger.Log.Error(err, "unable to render testrun")
os.Exit(1)
}
if dryRun {
fmt.Print(util.PrettyPrintStruct(runs))
os.Exit(0)
}
collector, err := result.New(logger.Log.WithName("collector"), collectConfig, tmKubeconfigPath)
if err != nil {
logger.Log.Error(err, "unable to initialize collector")
os.Exit(1)
}
if err := collector.PreRunShoots(shootParameters.GardenKubeconfigPath, runs); err != nil {
logger.Log.Error(err, "unable to setup collector")
os.Exit(1)
}
if err := testrunner.ExecuteTestruns(logger.Log.WithName("Execute"), &testrunnerConfig, runs, testrunNamePrefix, collector.RunExecCh); err != nil {
logger.Log.Error(err, "unable to run testruns")
os.Exit(1)
}
failed, err := collector.Collect(logger.Log.WithName("Collect"), testrunnerConfig.Watch.Client(), testrunnerConfig.Namespace, runs)
if err != nil {
logger.Log.Error(err, "unable to collect test output")
os.Exit(1)
}
result.GenerateNotificationConfigForAlerting(runs.GetTestruns(), collectConfig.ConcourseOnErrorDir)
logger.Log.Info("Testrunner finished")
// Fail when one testrun is failed and we should fail on failed testruns.
// Otherwise only fail when the testrun execution is erroneous.
if runs.HasErrors() {
os.Exit(1)
}
if failOnError && failed {
os.Exit(1)
}
},
}
func init() {
// configuration flags
runCmd.Flags().StringVar(&tmKubeconfigPath, "tm-kubeconfig-path", "", "Path to the testmachinery cluster kubeconfig")
if err := runCmd.MarkFlagRequired("tm-kubeconfig-path"); err != nil {
logger.Log.Error(err, "mark flag required", "flag", "tm-kubeconfig-path")
}
if err := runCmd.MarkFlagFilename("tm-kubeconfig-path"); err != nil {
logger.Log.Error(err, "mark flag filename", "flag", "tm-kubeconfig-path")
}
runCmd.Flags().StringVar(&testrunNamePrefix, "testrun-prefix", "default-", "Testrun name prefix which is used to generate a unique testrun name.")
if err := runCmd.MarkFlagRequired("testrun-prefix"); err != nil {
logger.Log.Error(err, "mark flag required", "flag", "testrun-prefix")
}
runCmd.Flags().StringVarP(&testrunnerConfig.Namespace, "namespace", "n", "default", "Namesapce where the testrun should be deployed.")
runCmd.Flags().Int64Var(&timeout, "timeout", 3600, "Timout in seconds of the testrunner to wait for the complete testrun to finish.")
runCmd.Flags().Int64Var(&interval, "interval", 20, "Poll interval in seconds of the testrunner to poll for the testrun status.")
runCmd.Flags().IntVar(&testrunFlakeAttempts, "testrun-flake-attempts", 0, "Max number of testruns until testrun is successful")
runCmd.Flags().BoolVar(&failOnError, "fail-on-error", true, "Testrunners exits with 1 if one testruns failed.")
runCmd.Flags().BoolVar(&collectConfig.EnableTelemetry, "enable-telemetry", false, "Enables the measurements of metrics during execution")
runCmd.Flags().BoolVar(&testrunnerConfig.Serial, "serial", false, "executes all testruns of a bucket only after the previous bucket has finished")
runCmd.Flags().IntVar(&testrunnerConfig.BackoffBucket, "backoff-bucket", 0, "Number of parallel created testruns per backoff period")
runCmd.Flags().DurationVar(&testrunnerConfig.BackoffPeriod, "backoff-period", 0, "Time to wait between the creation of testrun buckets")
runCmd.Flags().StringVar(&collectConfig.ConcourseOnErrorDir, "concourse-onError-dir", os.Getenv("ON_ERROR_DIR"), "On error dir which is used by Concourse.")
// status asset upload
runCmd.Flags().BoolVar(&collectConfig.UploadStatusAsset, "upload-status-asset", false, "Upload testrun status as a github release asset.")
runCmd.Flags().StringVar(&collectConfig.GithubUser, "github-user", os.Getenv("GITHUB_USER"), "GitHUb username.")
runCmd.Flags().StringVar(&collectConfig.GithubPassword, "github-password", os.Getenv("GITHUB_PASSWORD"), "Github password.")
runCmd.Flags().StringArrayVar(&collectConfig.AssetComponents, "asset-component", []string{}, "The github components to which the testrun status shall be attached as an asset.")
runCmd.Flags().StringVar(&collectConfig.AssetPrefix, "asset-prefix", "", "Prefix of the asset name.")
// parameter flags
runCmd.Flags().StringVar(&shootParameters.DefaultTestrunChartPath, "testruns-chart-path", "", "Path to the default testruns chart.")
if err := runCmd.MarkFlagFilename("testruns-chart-path"); err != nil {
logger.Log.Error(err, "mark flag filename", "flag", "testruns-chart-path")
}
runCmd.Flags().StringVar(&shootParameters.FlavoredTestrunChartPath, "flavored-testruns-chart-path", "", "Path to the testruns chart to test shoots.")
if err := runCmd.MarkFlagFilename("flavored-testruns-chart-path"); err != nil {
logger.Log.Error(err, "mark flag filename", "flag", "flavored-testruns-chart-path")
}
runCmd.Flags().StringVar(&shootParameters.GardenKubeconfigPath, "gardener-kubeconfig-path", "", "Path to the gardener kubeconfig.")
if err := runCmd.MarkFlagRequired("gardener-kubeconfig-path"); err != nil {
logger.Log.Error(err, "mark flag required", "flag", "gardener-kubeconfig-path")
}
if err := runCmd.MarkFlagFilename("gardener-kubeconfig-path"); err != nil {
logger.Log.Error(err, "mark flag filename", "flag", "gardener-kubeconfig-path")
}
if err := runCmd.MarkFlagRequired("gardener-kubeconfig-path"); err != nil {
logger.Log.Error(err, "mark flag required", "flag", "gardener-kubeconfig-path")
}
runCmd.Flags().StringVar(&shootParameters.FlavorConfigPath, "flavor-config", "", "Path to shoot test configuration.")
if err := runCmd.MarkFlagFilename("flavor-config"); err != nil {
logger.Log.Error(err, "mark flag filename", "flag", "flavor-config")
}
runCmd.Flags().StringVar(&shootPrefix, "shoot-name", "", "Shoot name which is used to run tests.")
if err := runCmd.MarkFlagRequired("shoot-name"); err != nil |
runCmd.Flags().BoolVar(&filterPatchVersions, "filter-patch-versions", false, "Filters patch versions so that only the latest patch versions per minor versions is used.")
runCmd.Flags().StringVar(&shootParameters.ComponentDescriptorPath, "component-descriptor-path", "", "Path to the component descriptor (BOM) of the current landscape.")
runCmd.Flags().StringVar(&shootParameters.Landscape, "landscape", "", "Current gardener landscape.")
runCmd.Flags().StringArrayVar(&shootParameters.SetValues, "set", make([]string, 0), "setValues additional helm values")
runCmd.Flags().StringArrayVarP(&shootParameters.FileValues, "values", "f", make([]string, 0), "yaml value files to override template values")
// DEPRECATED FLAGS
// is now handled by the testmachinery
runCmd.Flags().StringVar(&collectConfig.OutputDir, "output-dir-path", "./testout", "The filepath where the summary should be written to.")
runCmd.Flags().String("es-config-name", "sap_internal", "DEPRECATED: The elasticsearch secret-server config name.")
runCmd.Flags().String("es-endpoint", "", "endpoint of the elasticsearch instance")
runCmd.Flags().String("es-username", "", "username to authenticate against a elasticsearch instance")
runCmd.Flags().String("es-password", "", "password to authenticate against a elasticsearch instance")
runCmd.Flags().String("s3-endpoint", os.Getenv("S3_ENDPOINT"), "S3 endpoint of the testmachinery cluster.")
runCmd.Flags().Bool("s3-ssl", false, "S3 has SSL enabled.")
runCmd.Flags().MarkDeprecated("output-dir-path", "DEPRECATED: will not we used anymore")
runCmd.Flags().MarkDeprecated("es-config-name", "DEPRECATED: will not we used anymore")
runCmd.Flags().MarkDeprecated("es-endpoint", "DEPRECATED: will not we used anymore")
runCmd.Flags().MarkDeprecated("es-username", "DEPRECATED: will not we used anymore")
runCmd.Flags().MarkDeprecated("es-password", "DEPRECATED: will not we used anymore")
runCmd.Flags().MarkDeprecated("s3-endpoint", "DEPRECATED: will not we used anymore")
runCmd.Flags().MarkDeprecated("s3-ssl", "DEPRECATED: will not we used anymore")
}
| {
logger.Log.Error(err, "mark flag required", "flag", "shoot-name")
} | conditional_block |
run_template.go | // Copyright 2019 Copyright (c) 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package run_template
import (
"fmt"
"github.com/gardener/test-infra/pkg/shootflavors"
"os"
"time"
"github.com/gardener/test-infra/pkg/logger"
"github.com/gardener/test-infra/pkg/util"
"github.com/gardener/gardener/pkg/client/kubernetes"
"sigs.k8s.io/controller-runtime/pkg/client"
"github.com/gardener/test-infra/pkg/testrunner"
"github.com/gardener/test-infra/pkg/testrunner/result"
testrunnerTemplate "github.com/gardener/test-infra/pkg/testrunner/template"
"github.com/spf13/cobra"
)
var testrunnerConfig = testrunner.Config{}
var collectConfig = result.Config{}
var shootParameters = testrunnerTemplate.Parameters{}
var (
testrunNamePrefix string
shootPrefix string
tmKubeconfigPath string
filterPatchVersions bool
failOnError bool
testrunFlakeAttempts int
timeout int64
interval int64
)
// AddCommand adds run-template to a command.
func AddCommand(cmd *cobra.Command) {
cmd.AddCommand(runCmd)
}
var runCmd = &cobra.Command{
Use: "run-template",
Short: "Run the testrunner with a helm template containing testruns",
Aliases: []string{
"run", // for backward compatibility
"run-tmpl",
},
Run: func(cmd *cobra.Command, args []string) {
var (
err error
stopCh = make(chan struct{})
shootFlavors []*shootflavors.ExtendedFlavorInstance
)
defer close(stopCh)
dryRun, _ := cmd.Flags().GetBool("dry-run")
logger.Log.Info("Start testmachinery testrunner")
testrunnerConfig.Watch, err = testrunner.StartWatchController(logger.Log, tmKubeconfigPath, stopCh)
if err != nil {
logger.Log.Error(err, "unable to start testrun watch controller")
os.Exit(1)
}
gardenK8sClient, err := kubernetes.NewClientFromFile("", shootParameters.GardenKubeconfigPath, kubernetes.WithClientOptions(client.Options{
Scheme: kubernetes.GardenScheme,
}))
if err != nil {
logger.Log.Error(err, "unable to build garden kubernetes client", "file", tmKubeconfigPath)
os.Exit(1)
}
testrunnerConfig.Timeout = time.Duration(timeout) * time.Second
testrunnerConfig.FlakeAttempts = testrunFlakeAttempts
collectConfig.ComponentDescriptorPath = shootParameters.ComponentDescriptorPath
if shootParameters.FlavorConfigPath != "" {
flavors, err := GetShootFlavors(shootParameters.FlavorConfigPath, gardenK8sClient, shootPrefix, filterPatchVersions)
if err != nil {
logger.Log.Error(err, "unable to parse shoot flavors from test configuration")
os.Exit(1)
}
shootFlavors = flavors.GetShoots()
}
runs, err := testrunnerTemplate.RenderTestruns(logger.Log.WithName("Render"), &shootParameters, shootFlavors)
if err != nil {
logger.Log.Error(err, "unable to render testrun")
os.Exit(1)
}
if dryRun {
fmt.Print(util.PrettyPrintStruct(runs))
os.Exit(0)
}
collector, err := result.New(logger.Log.WithName("collector"), collectConfig, tmKubeconfigPath)
if err != nil {
logger.Log.Error(err, "unable to initialize collector")
os.Exit(1)
}
if err := collector.PreRunShoots(shootParameters.GardenKubeconfigPath, runs); err != nil {
logger.Log.Error(err, "unable to setup collector")
os.Exit(1)
}
if err := testrunner.ExecuteTestruns(logger.Log.WithName("Execute"), &testrunnerConfig, runs, testrunNamePrefix, collector.RunExecCh); err != nil {
logger.Log.Error(err, "unable to run testruns")
os.Exit(1)
}
failed, err := collector.Collect(logger.Log.WithName("Collect"), testrunnerConfig.Watch.Client(), testrunnerConfig.Namespace, runs)
if err != nil {
logger.Log.Error(err, "unable to collect test output")
os.Exit(1)
}
result.GenerateNotificationConfigForAlerting(runs.GetTestruns(), collectConfig.ConcourseOnErrorDir)
logger.Log.Info("Testrunner finished")
// Fail when one testrun is failed and we should fail on failed testruns.
// Otherwise only fail when the testrun execution is erroneous.
if runs.HasErrors() {
os.Exit(1)
}
if failOnError && failed {
os.Exit(1)
}
},
}
func init() {
// configuration flags
runCmd.Flags().StringVar(&tmKubeconfigPath, "tm-kubeconfig-path", "", "Path to the testmachinery cluster kubeconfig")
if err := runCmd.MarkFlagRequired("tm-kubeconfig-path"); err != nil {
logger.Log.Error(err, "mark flag required", "flag", "tm-kubeconfig-path")
}
if err := runCmd.MarkFlagFilename("tm-kubeconfig-path"); err != nil {
logger.Log.Error(err, "mark flag filename", "flag", "tm-kubeconfig-path")
}
runCmd.Flags().StringVar(&testrunNamePrefix, "testrun-prefix", "default-", "Testrun name prefix which is used to generate a unique testrun name.")
if err := runCmd.MarkFlagRequired("testrun-prefix"); err != nil {
logger.Log.Error(err, "mark flag required", "flag", "testrun-prefix")
}
runCmd.Flags().StringVarP(&testrunnerConfig.Namespace, "namespace", "n", "default", "Namesapce where the testrun should be deployed.")
runCmd.Flags().Int64Var(&timeout, "timeout", 3600, "Timout in seconds of the testrunner to wait for the complete testrun to finish.")
runCmd.Flags().Int64Var(&interval, "interval", 20, "Poll interval in seconds of the testrunner to poll for the testrun status.")
runCmd.Flags().IntVar(&testrunFlakeAttempts, "testrun-flake-attempts", 0, "Max number of testruns until testrun is successful")
runCmd.Flags().BoolVar(&failOnError, "fail-on-error", true, "Testrunners exits with 1 if one testruns failed.")
runCmd.Flags().BoolVar(&collectConfig.EnableTelemetry, "enable-telemetry", false, "Enables the measurements of metrics during execution")
runCmd.Flags().BoolVar(&testrunnerConfig.Serial, "serial", false, "executes all testruns of a bucket only after the previous bucket has finished")
runCmd.Flags().IntVar(&testrunnerConfig.BackoffBucket, "backoff-bucket", 0, "Number of parallel created testruns per backoff period")
runCmd.Flags().DurationVar(&testrunnerConfig.BackoffPeriod, "backoff-period", 0, "Time to wait between the creation of testrun buckets")
runCmd.Flags().StringVar(&collectConfig.ConcourseOnErrorDir, "concourse-onError-dir", os.Getenv("ON_ERROR_DIR"), "On error dir which is used by Concourse.")
// status asset upload
runCmd.Flags().BoolVar(&collectConfig.UploadStatusAsset, "upload-status-asset", false, "Upload testrun status as a github release asset.")
runCmd.Flags().StringVar(&collectConfig.GithubUser, "github-user", os.Getenv("GITHUB_USER"), "GitHUb username.")
runCmd.Flags().StringVar(&collectConfig.GithubPassword, "github-password", os.Getenv("GITHUB_PASSWORD"), "Github password.")
runCmd.Flags().StringArrayVar(&collectConfig.AssetComponents, "asset-component", []string{}, "The github components to which the testrun status shall be attached as an asset.")
runCmd.Flags().StringVar(&collectConfig.AssetPrefix, "asset-prefix", "", "Prefix of the asset name.")
// parameter flags
runCmd.Flags().StringVar(&shootParameters.DefaultTestrunChartPath, "testruns-chart-path", "", "Path to the default testruns chart.")
if err := runCmd.MarkFlagFilename("testruns-chart-path"); err != nil {
logger.Log.Error(err, "mark flag filename", "flag", "testruns-chart-path")
}
runCmd.Flags().StringVar(&shootParameters.FlavoredTestrunChartPath, "flavored-testruns-chart-path", "", "Path to the testruns chart to test shoots.")
if err := runCmd.MarkFlagFilename("flavored-testruns-chart-path"); err != nil {
logger.Log.Error(err, "mark flag filename", "flag", "flavored-testruns-chart-path")
}
runCmd.Flags().StringVar(&shootParameters.GardenKubeconfigPath, "gardener-kubeconfig-path", "", "Path to the gardener kubeconfig.")
if err := runCmd.MarkFlagRequired("gardener-kubeconfig-path"); err != nil {
logger.Log.Error(err, "mark flag required", "flag", "gardener-kubeconfig-path")
}
if err := runCmd.MarkFlagFilename("gardener-kubeconfig-path"); err != nil {
logger.Log.Error(err, "mark flag filename", "flag", "gardener-kubeconfig-path")
}
if err := runCmd.MarkFlagRequired("gardener-kubeconfig-path"); err != nil {
logger.Log.Error(err, "mark flag required", "flag", "gardener-kubeconfig-path")
}
runCmd.Flags().StringVar(&shootParameters.FlavorConfigPath, "flavor-config", "", "Path to shoot test configuration.")
if err := runCmd.MarkFlagFilename("flavor-config"); err != nil {
logger.Log.Error(err, "mark flag filename", "flag", "flavor-config")
}
runCmd.Flags().StringVar(&shootPrefix, "shoot-name", "", "Shoot name which is used to run tests.")
if err := runCmd.MarkFlagRequired("shoot-name"); err != nil {
logger.Log.Error(err, "mark flag required", "flag", "shoot-name")
}
runCmd.Flags().BoolVar(&filterPatchVersions, "filter-patch-versions", false, "Filters patch versions so that only the latest patch versions per minor versions is used.")
runCmd.Flags().StringVar(&shootParameters.ComponentDescriptorPath, "component-descriptor-path", "", "Path to the component descriptor (BOM) of the current landscape.") |
// DEPRECATED FLAGS
// is now handled by the testmachinery
runCmd.Flags().StringVar(&collectConfig.OutputDir, "output-dir-path", "./testout", "The filepath where the summary should be written to.")
runCmd.Flags().String("es-config-name", "sap_internal", "DEPRECATED: The elasticsearch secret-server config name.")
runCmd.Flags().String("es-endpoint", "", "endpoint of the elasticsearch instance")
runCmd.Flags().String("es-username", "", "username to authenticate against a elasticsearch instance")
runCmd.Flags().String("es-password", "", "password to authenticate against a elasticsearch instance")
runCmd.Flags().String("s3-endpoint", os.Getenv("S3_ENDPOINT"), "S3 endpoint of the testmachinery cluster.")
runCmd.Flags().Bool("s3-ssl", false, "S3 has SSL enabled.")
runCmd.Flags().MarkDeprecated("output-dir-path", "DEPRECATED: will not we used anymore")
runCmd.Flags().MarkDeprecated("es-config-name", "DEPRECATED: will not we used anymore")
runCmd.Flags().MarkDeprecated("es-endpoint", "DEPRECATED: will not we used anymore")
runCmd.Flags().MarkDeprecated("es-username", "DEPRECATED: will not we used anymore")
runCmd.Flags().MarkDeprecated("es-password", "DEPRECATED: will not we used anymore")
runCmd.Flags().MarkDeprecated("s3-endpoint", "DEPRECATED: will not we used anymore")
runCmd.Flags().MarkDeprecated("s3-ssl", "DEPRECATED: will not we used anymore")
} | runCmd.Flags().StringVar(&shootParameters.Landscape, "landscape", "", "Current gardener landscape.")
runCmd.Flags().StringArrayVar(&shootParameters.SetValues, "set", make([]string, 0), "setValues additional helm values")
runCmd.Flags().StringArrayVarP(&shootParameters.FileValues, "values", "f", make([]string, 0), "yaml value files to override template values") | random_line_split |
run_template.go | // Copyright 2019 Copyright (c) 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package run_template
import (
"fmt"
"github.com/gardener/test-infra/pkg/shootflavors"
"os"
"time"
"github.com/gardener/test-infra/pkg/logger"
"github.com/gardener/test-infra/pkg/util"
"github.com/gardener/gardener/pkg/client/kubernetes"
"sigs.k8s.io/controller-runtime/pkg/client"
"github.com/gardener/test-infra/pkg/testrunner"
"github.com/gardener/test-infra/pkg/testrunner/result"
testrunnerTemplate "github.com/gardener/test-infra/pkg/testrunner/template"
"github.com/spf13/cobra"
)
var testrunnerConfig = testrunner.Config{}
var collectConfig = result.Config{}
var shootParameters = testrunnerTemplate.Parameters{}
var (
testrunNamePrefix string
shootPrefix string
tmKubeconfigPath string
filterPatchVersions bool
failOnError bool
testrunFlakeAttempts int
timeout int64
interval int64
)
// AddCommand adds run-template to a command.
func AddCommand(cmd *cobra.Command) {
cmd.AddCommand(runCmd)
}
var runCmd = &cobra.Command{
Use: "run-template",
Short: "Run the testrunner with a helm template containing testruns",
Aliases: []string{
"run", // for backward compatibility
"run-tmpl",
},
Run: func(cmd *cobra.Command, args []string) {
var (
err error
stopCh = make(chan struct{})
shootFlavors []*shootflavors.ExtendedFlavorInstance
)
defer close(stopCh)
dryRun, _ := cmd.Flags().GetBool("dry-run")
logger.Log.Info("Start testmachinery testrunner")
testrunnerConfig.Watch, err = testrunner.StartWatchController(logger.Log, tmKubeconfigPath, stopCh)
if err != nil {
logger.Log.Error(err, "unable to start testrun watch controller")
os.Exit(1)
}
gardenK8sClient, err := kubernetes.NewClientFromFile("", shootParameters.GardenKubeconfigPath, kubernetes.WithClientOptions(client.Options{
Scheme: kubernetes.GardenScheme,
}))
if err != nil {
logger.Log.Error(err, "unable to build garden kubernetes client", "file", tmKubeconfigPath)
os.Exit(1)
}
testrunnerConfig.Timeout = time.Duration(timeout) * time.Second
testrunnerConfig.FlakeAttempts = testrunFlakeAttempts
collectConfig.ComponentDescriptorPath = shootParameters.ComponentDescriptorPath
if shootParameters.FlavorConfigPath != "" {
flavors, err := GetShootFlavors(shootParameters.FlavorConfigPath, gardenK8sClient, shootPrefix, filterPatchVersions)
if err != nil {
logger.Log.Error(err, "unable to parse shoot flavors from test configuration")
os.Exit(1)
}
shootFlavors = flavors.GetShoots()
}
runs, err := testrunnerTemplate.RenderTestruns(logger.Log.WithName("Render"), &shootParameters, shootFlavors)
if err != nil {
logger.Log.Error(err, "unable to render testrun")
os.Exit(1)
}
if dryRun {
fmt.Print(util.PrettyPrintStruct(runs))
os.Exit(0)
}
collector, err := result.New(logger.Log.WithName("collector"), collectConfig, tmKubeconfigPath)
if err != nil {
logger.Log.Error(err, "unable to initialize collector")
os.Exit(1)
}
if err := collector.PreRunShoots(shootParameters.GardenKubeconfigPath, runs); err != nil {
logger.Log.Error(err, "unable to setup collector")
os.Exit(1)
}
if err := testrunner.ExecuteTestruns(logger.Log.WithName("Execute"), &testrunnerConfig, runs, testrunNamePrefix, collector.RunExecCh); err != nil {
logger.Log.Error(err, "unable to run testruns")
os.Exit(1)
}
failed, err := collector.Collect(logger.Log.WithName("Collect"), testrunnerConfig.Watch.Client(), testrunnerConfig.Namespace, runs)
if err != nil {
logger.Log.Error(err, "unable to collect test output")
os.Exit(1)
}
result.GenerateNotificationConfigForAlerting(runs.GetTestruns(), collectConfig.ConcourseOnErrorDir)
logger.Log.Info("Testrunner finished")
// Fail when one testrun is failed and we should fail on failed testruns.
// Otherwise only fail when the testrun execution is erroneous.
if runs.HasErrors() {
os.Exit(1)
}
if failOnError && failed {
os.Exit(1)
}
},
}
func | () {
// configuration flags
runCmd.Flags().StringVar(&tmKubeconfigPath, "tm-kubeconfig-path", "", "Path to the testmachinery cluster kubeconfig")
if err := runCmd.MarkFlagRequired("tm-kubeconfig-path"); err != nil {
logger.Log.Error(err, "mark flag required", "flag", "tm-kubeconfig-path")
}
if err := runCmd.MarkFlagFilename("tm-kubeconfig-path"); err != nil {
logger.Log.Error(err, "mark flag filename", "flag", "tm-kubeconfig-path")
}
runCmd.Flags().StringVar(&testrunNamePrefix, "testrun-prefix", "default-", "Testrun name prefix which is used to generate a unique testrun name.")
if err := runCmd.MarkFlagRequired("testrun-prefix"); err != nil {
logger.Log.Error(err, "mark flag required", "flag", "testrun-prefix")
}
runCmd.Flags().StringVarP(&testrunnerConfig.Namespace, "namespace", "n", "default", "Namesapce where the testrun should be deployed.")
runCmd.Flags().Int64Var(&timeout, "timeout", 3600, "Timout in seconds of the testrunner to wait for the complete testrun to finish.")
runCmd.Flags().Int64Var(&interval, "interval", 20, "Poll interval in seconds of the testrunner to poll for the testrun status.")
runCmd.Flags().IntVar(&testrunFlakeAttempts, "testrun-flake-attempts", 0, "Max number of testruns until testrun is successful")
runCmd.Flags().BoolVar(&failOnError, "fail-on-error", true, "Testrunners exits with 1 if one testruns failed.")
runCmd.Flags().BoolVar(&collectConfig.EnableTelemetry, "enable-telemetry", false, "Enables the measurements of metrics during execution")
runCmd.Flags().BoolVar(&testrunnerConfig.Serial, "serial", false, "executes all testruns of a bucket only after the previous bucket has finished")
runCmd.Flags().IntVar(&testrunnerConfig.BackoffBucket, "backoff-bucket", 0, "Number of parallel created testruns per backoff period")
runCmd.Flags().DurationVar(&testrunnerConfig.BackoffPeriod, "backoff-period", 0, "Time to wait between the creation of testrun buckets")
runCmd.Flags().StringVar(&collectConfig.ConcourseOnErrorDir, "concourse-onError-dir", os.Getenv("ON_ERROR_DIR"), "On error dir which is used by Concourse.")
// status asset upload
runCmd.Flags().BoolVar(&collectConfig.UploadStatusAsset, "upload-status-asset", false, "Upload testrun status as a github release asset.")
runCmd.Flags().StringVar(&collectConfig.GithubUser, "github-user", os.Getenv("GITHUB_USER"), "GitHUb username.")
runCmd.Flags().StringVar(&collectConfig.GithubPassword, "github-password", os.Getenv("GITHUB_PASSWORD"), "Github password.")
runCmd.Flags().StringArrayVar(&collectConfig.AssetComponents, "asset-component", []string{}, "The github components to which the testrun status shall be attached as an asset.")
runCmd.Flags().StringVar(&collectConfig.AssetPrefix, "asset-prefix", "", "Prefix of the asset name.")
// parameter flags
runCmd.Flags().StringVar(&shootParameters.DefaultTestrunChartPath, "testruns-chart-path", "", "Path to the default testruns chart.")
if err := runCmd.MarkFlagFilename("testruns-chart-path"); err != nil {
logger.Log.Error(err, "mark flag filename", "flag", "testruns-chart-path")
}
runCmd.Flags().StringVar(&shootParameters.FlavoredTestrunChartPath, "flavored-testruns-chart-path", "", "Path to the testruns chart to test shoots.")
if err := runCmd.MarkFlagFilename("flavored-testruns-chart-path"); err != nil {
logger.Log.Error(err, "mark flag filename", "flag", "flavored-testruns-chart-path")
}
runCmd.Flags().StringVar(&shootParameters.GardenKubeconfigPath, "gardener-kubeconfig-path", "", "Path to the gardener kubeconfig.")
if err := runCmd.MarkFlagRequired("gardener-kubeconfig-path"); err != nil {
logger.Log.Error(err, "mark flag required", "flag", "gardener-kubeconfig-path")
}
if err := runCmd.MarkFlagFilename("gardener-kubeconfig-path"); err != nil {
logger.Log.Error(err, "mark flag filename", "flag", "gardener-kubeconfig-path")
}
if err := runCmd.MarkFlagRequired("gardener-kubeconfig-path"); err != nil {
logger.Log.Error(err, "mark flag required", "flag", "gardener-kubeconfig-path")
}
runCmd.Flags().StringVar(&shootParameters.FlavorConfigPath, "flavor-config", "", "Path to shoot test configuration.")
if err := runCmd.MarkFlagFilename("flavor-config"); err != nil {
logger.Log.Error(err, "mark flag filename", "flag", "flavor-config")
}
runCmd.Flags().StringVar(&shootPrefix, "shoot-name", "", "Shoot name which is used to run tests.")
if err := runCmd.MarkFlagRequired("shoot-name"); err != nil {
logger.Log.Error(err, "mark flag required", "flag", "shoot-name")
}
runCmd.Flags().BoolVar(&filterPatchVersions, "filter-patch-versions", false, "Filters patch versions so that only the latest patch versions per minor versions is used.")
runCmd.Flags().StringVar(&shootParameters.ComponentDescriptorPath, "component-descriptor-path", "", "Path to the component descriptor (BOM) of the current landscape.")
runCmd.Flags().StringVar(&shootParameters.Landscape, "landscape", "", "Current gardener landscape.")
runCmd.Flags().StringArrayVar(&shootParameters.SetValues, "set", make([]string, 0), "setValues additional helm values")
runCmd.Flags().StringArrayVarP(&shootParameters.FileValues, "values", "f", make([]string, 0), "yaml value files to override template values")
// DEPRECATED FLAGS
// is now handled by the testmachinery
runCmd.Flags().StringVar(&collectConfig.OutputDir, "output-dir-path", "./testout", "The filepath where the summary should be written to.")
runCmd.Flags().String("es-config-name", "sap_internal", "DEPRECATED: The elasticsearch secret-server config name.")
runCmd.Flags().String("es-endpoint", "", "endpoint of the elasticsearch instance")
runCmd.Flags().String("es-username", "", "username to authenticate against a elasticsearch instance")
runCmd.Flags().String("es-password", "", "password to authenticate against a elasticsearch instance")
runCmd.Flags().String("s3-endpoint", os.Getenv("S3_ENDPOINT"), "S3 endpoint of the testmachinery cluster.")
runCmd.Flags().Bool("s3-ssl", false, "S3 has SSL enabled.")
runCmd.Flags().MarkDeprecated("output-dir-path", "DEPRECATED: will not we used anymore")
runCmd.Flags().MarkDeprecated("es-config-name", "DEPRECATED: will not we used anymore")
runCmd.Flags().MarkDeprecated("es-endpoint", "DEPRECATED: will not we used anymore")
runCmd.Flags().MarkDeprecated("es-username", "DEPRECATED: will not we used anymore")
runCmd.Flags().MarkDeprecated("es-password", "DEPRECATED: will not we used anymore")
runCmd.Flags().MarkDeprecated("s3-endpoint", "DEPRECATED: will not we used anymore")
runCmd.Flags().MarkDeprecated("s3-ssl", "DEPRECATED: will not we used anymore")
}
| init | identifier_name |
run_template.go | // Copyright 2019 Copyright (c) 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package run_template
import (
"fmt"
"github.com/gardener/test-infra/pkg/shootflavors"
"os"
"time"
"github.com/gardener/test-infra/pkg/logger"
"github.com/gardener/test-infra/pkg/util"
"github.com/gardener/gardener/pkg/client/kubernetes"
"sigs.k8s.io/controller-runtime/pkg/client"
"github.com/gardener/test-infra/pkg/testrunner"
"github.com/gardener/test-infra/pkg/testrunner/result"
testrunnerTemplate "github.com/gardener/test-infra/pkg/testrunner/template"
"github.com/spf13/cobra"
)
var testrunnerConfig = testrunner.Config{}
var collectConfig = result.Config{}
var shootParameters = testrunnerTemplate.Parameters{}
var (
testrunNamePrefix string
shootPrefix string
tmKubeconfigPath string
filterPatchVersions bool
failOnError bool
testrunFlakeAttempts int
timeout int64
interval int64
)
// AddCommand adds run-template to a command.
func AddCommand(cmd *cobra.Command) {
cmd.AddCommand(runCmd)
}
var runCmd = &cobra.Command{
Use: "run-template",
Short: "Run the testrunner with a helm template containing testruns",
Aliases: []string{
"run", // for backward compatibility
"run-tmpl",
},
Run: func(cmd *cobra.Command, args []string) {
var (
err error
stopCh = make(chan struct{})
shootFlavors []*shootflavors.ExtendedFlavorInstance
)
defer close(stopCh)
dryRun, _ := cmd.Flags().GetBool("dry-run")
logger.Log.Info("Start testmachinery testrunner")
testrunnerConfig.Watch, err = testrunner.StartWatchController(logger.Log, tmKubeconfigPath, stopCh)
if err != nil {
logger.Log.Error(err, "unable to start testrun watch controller")
os.Exit(1)
}
gardenK8sClient, err := kubernetes.NewClientFromFile("", shootParameters.GardenKubeconfigPath, kubernetes.WithClientOptions(client.Options{
Scheme: kubernetes.GardenScheme,
}))
if err != nil {
logger.Log.Error(err, "unable to build garden kubernetes client", "file", tmKubeconfigPath)
os.Exit(1)
}
testrunnerConfig.Timeout = time.Duration(timeout) * time.Second
testrunnerConfig.FlakeAttempts = testrunFlakeAttempts
collectConfig.ComponentDescriptorPath = shootParameters.ComponentDescriptorPath
if shootParameters.FlavorConfigPath != "" {
flavors, err := GetShootFlavors(shootParameters.FlavorConfigPath, gardenK8sClient, shootPrefix, filterPatchVersions)
if err != nil {
logger.Log.Error(err, "unable to parse shoot flavors from test configuration")
os.Exit(1)
}
shootFlavors = flavors.GetShoots()
}
runs, err := testrunnerTemplate.RenderTestruns(logger.Log.WithName("Render"), &shootParameters, shootFlavors)
if err != nil {
logger.Log.Error(err, "unable to render testrun")
os.Exit(1)
}
if dryRun {
fmt.Print(util.PrettyPrintStruct(runs))
os.Exit(0)
}
collector, err := result.New(logger.Log.WithName("collector"), collectConfig, tmKubeconfigPath)
if err != nil {
logger.Log.Error(err, "unable to initialize collector")
os.Exit(1)
}
if err := collector.PreRunShoots(shootParameters.GardenKubeconfigPath, runs); err != nil {
logger.Log.Error(err, "unable to setup collector")
os.Exit(1)
}
if err := testrunner.ExecuteTestruns(logger.Log.WithName("Execute"), &testrunnerConfig, runs, testrunNamePrefix, collector.RunExecCh); err != nil {
logger.Log.Error(err, "unable to run testruns")
os.Exit(1)
}
failed, err := collector.Collect(logger.Log.WithName("Collect"), testrunnerConfig.Watch.Client(), testrunnerConfig.Namespace, runs)
if err != nil {
logger.Log.Error(err, "unable to collect test output")
os.Exit(1)
}
result.GenerateNotificationConfigForAlerting(runs.GetTestruns(), collectConfig.ConcourseOnErrorDir)
logger.Log.Info("Testrunner finished")
// Fail when one testrun is failed and we should fail on failed testruns.
// Otherwise only fail when the testrun execution is erroneous.
if runs.HasErrors() {
os.Exit(1)
}
if failOnError && failed {
os.Exit(1)
}
},
}
func init() | {
// configuration flags
runCmd.Flags().StringVar(&tmKubeconfigPath, "tm-kubeconfig-path", "", "Path to the testmachinery cluster kubeconfig")
if err := runCmd.MarkFlagRequired("tm-kubeconfig-path"); err != nil {
logger.Log.Error(err, "mark flag required", "flag", "tm-kubeconfig-path")
}
if err := runCmd.MarkFlagFilename("tm-kubeconfig-path"); err != nil {
logger.Log.Error(err, "mark flag filename", "flag", "tm-kubeconfig-path")
}
runCmd.Flags().StringVar(&testrunNamePrefix, "testrun-prefix", "default-", "Testrun name prefix which is used to generate a unique testrun name.")
if err := runCmd.MarkFlagRequired("testrun-prefix"); err != nil {
logger.Log.Error(err, "mark flag required", "flag", "testrun-prefix")
}
runCmd.Flags().StringVarP(&testrunnerConfig.Namespace, "namespace", "n", "default", "Namesapce where the testrun should be deployed.")
runCmd.Flags().Int64Var(&timeout, "timeout", 3600, "Timout in seconds of the testrunner to wait for the complete testrun to finish.")
runCmd.Flags().Int64Var(&interval, "interval", 20, "Poll interval in seconds of the testrunner to poll for the testrun status.")
runCmd.Flags().IntVar(&testrunFlakeAttempts, "testrun-flake-attempts", 0, "Max number of testruns until testrun is successful")
runCmd.Flags().BoolVar(&failOnError, "fail-on-error", true, "Testrunners exits with 1 if one testruns failed.")
runCmd.Flags().BoolVar(&collectConfig.EnableTelemetry, "enable-telemetry", false, "Enables the measurements of metrics during execution")
runCmd.Flags().BoolVar(&testrunnerConfig.Serial, "serial", false, "executes all testruns of a bucket only after the previous bucket has finished")
runCmd.Flags().IntVar(&testrunnerConfig.BackoffBucket, "backoff-bucket", 0, "Number of parallel created testruns per backoff period")
runCmd.Flags().DurationVar(&testrunnerConfig.BackoffPeriod, "backoff-period", 0, "Time to wait between the creation of testrun buckets")
runCmd.Flags().StringVar(&collectConfig.ConcourseOnErrorDir, "concourse-onError-dir", os.Getenv("ON_ERROR_DIR"), "On error dir which is used by Concourse.")
// status asset upload
runCmd.Flags().BoolVar(&collectConfig.UploadStatusAsset, "upload-status-asset", false, "Upload testrun status as a github release asset.")
runCmd.Flags().StringVar(&collectConfig.GithubUser, "github-user", os.Getenv("GITHUB_USER"), "GitHUb username.")
runCmd.Flags().StringVar(&collectConfig.GithubPassword, "github-password", os.Getenv("GITHUB_PASSWORD"), "Github password.")
runCmd.Flags().StringArrayVar(&collectConfig.AssetComponents, "asset-component", []string{}, "The github components to which the testrun status shall be attached as an asset.")
runCmd.Flags().StringVar(&collectConfig.AssetPrefix, "asset-prefix", "", "Prefix of the asset name.")
// parameter flags
runCmd.Flags().StringVar(&shootParameters.DefaultTestrunChartPath, "testruns-chart-path", "", "Path to the default testruns chart.")
if err := runCmd.MarkFlagFilename("testruns-chart-path"); err != nil {
logger.Log.Error(err, "mark flag filename", "flag", "testruns-chart-path")
}
runCmd.Flags().StringVar(&shootParameters.FlavoredTestrunChartPath, "flavored-testruns-chart-path", "", "Path to the testruns chart to test shoots.")
if err := runCmd.MarkFlagFilename("flavored-testruns-chart-path"); err != nil {
logger.Log.Error(err, "mark flag filename", "flag", "flavored-testruns-chart-path")
}
runCmd.Flags().StringVar(&shootParameters.GardenKubeconfigPath, "gardener-kubeconfig-path", "", "Path to the gardener kubeconfig.")
if err := runCmd.MarkFlagRequired("gardener-kubeconfig-path"); err != nil {
logger.Log.Error(err, "mark flag required", "flag", "gardener-kubeconfig-path")
}
if err := runCmd.MarkFlagFilename("gardener-kubeconfig-path"); err != nil {
logger.Log.Error(err, "mark flag filename", "flag", "gardener-kubeconfig-path")
}
if err := runCmd.MarkFlagRequired("gardener-kubeconfig-path"); err != nil {
logger.Log.Error(err, "mark flag required", "flag", "gardener-kubeconfig-path")
}
runCmd.Flags().StringVar(&shootParameters.FlavorConfigPath, "flavor-config", "", "Path to shoot test configuration.")
if err := runCmd.MarkFlagFilename("flavor-config"); err != nil {
logger.Log.Error(err, "mark flag filename", "flag", "flavor-config")
}
runCmd.Flags().StringVar(&shootPrefix, "shoot-name", "", "Shoot name which is used to run tests.")
if err := runCmd.MarkFlagRequired("shoot-name"); err != nil {
logger.Log.Error(err, "mark flag required", "flag", "shoot-name")
}
runCmd.Flags().BoolVar(&filterPatchVersions, "filter-patch-versions", false, "Filters patch versions so that only the latest patch versions per minor versions is used.")
runCmd.Flags().StringVar(&shootParameters.ComponentDescriptorPath, "component-descriptor-path", "", "Path to the component descriptor (BOM) of the current landscape.")
runCmd.Flags().StringVar(&shootParameters.Landscape, "landscape", "", "Current gardener landscape.")
runCmd.Flags().StringArrayVar(&shootParameters.SetValues, "set", make([]string, 0), "setValues additional helm values")
runCmd.Flags().StringArrayVarP(&shootParameters.FileValues, "values", "f", make([]string, 0), "yaml value files to override template values")
// DEPRECATED FLAGS
// is now handled by the testmachinery
runCmd.Flags().StringVar(&collectConfig.OutputDir, "output-dir-path", "./testout", "The filepath where the summary should be written to.")
runCmd.Flags().String("es-config-name", "sap_internal", "DEPRECATED: The elasticsearch secret-server config name.")
runCmd.Flags().String("es-endpoint", "", "endpoint of the elasticsearch instance")
runCmd.Flags().String("es-username", "", "username to authenticate against a elasticsearch instance")
runCmd.Flags().String("es-password", "", "password to authenticate against a elasticsearch instance")
runCmd.Flags().String("s3-endpoint", os.Getenv("S3_ENDPOINT"), "S3 endpoint of the testmachinery cluster.")
runCmd.Flags().Bool("s3-ssl", false, "S3 has SSL enabled.")
runCmd.Flags().MarkDeprecated("output-dir-path", "DEPRECATED: will not we used anymore")
runCmd.Flags().MarkDeprecated("es-config-name", "DEPRECATED: will not we used anymore")
runCmd.Flags().MarkDeprecated("es-endpoint", "DEPRECATED: will not we used anymore")
runCmd.Flags().MarkDeprecated("es-username", "DEPRECATED: will not we used anymore")
runCmd.Flags().MarkDeprecated("es-password", "DEPRECATED: will not we used anymore")
runCmd.Flags().MarkDeprecated("s3-endpoint", "DEPRECATED: will not we used anymore")
runCmd.Flags().MarkDeprecated("s3-ssl", "DEPRECATED: will not we used anymore")
} | identifier_body | |
stat.go | package stat
import (
"fmt"
"strconv"
"sync"
"time"
"github.com/smartwalle/container/smap"
"math"
kdb "github.com/sv/kdbgo"
)
type Response struct {
Sym string
Qid string
Accountname string
Time time.Time
Entrustno int32
Stockcode string
Askprice float64
Askvol int32
Bidprice float64
Bidvol int32
Withdraw int32
Status int32
}
type ResponseInt64 struct {
sync.Mutex
Sym string
Qid string
Accountname string
Time time.Time
Entrustno int64
Stockcode string
Askprice float64
Askvol int64
Bidprice float64
Bidvol int64
Withdraw int64
Status int64
}
type STK struct {
sync.Mutex
SpaceStk SpaceSTK
ProfitStk ProfitSTK
orderArray []*Response
}
//仓位统计
type SpaceSTK struct {
Sym string
Accountname string
Stockcode string
SpaceVol int32 //仓位
OnlineProfit float64 //浮动盈利
AvgPrice float64 //均价
}
//利润统计
type ProfitSTK struct {
Sym string
Accountname string
Stockcode string
PastProfit float64 //已成利润
BidCount int32 //交易笔数
BidNum int32 //股数
BidMoneySum float64 //交易额
TotalTax float64 //总费用
}
type Market struct {
Sym string
Time time.Time
SzWindCode string
NActionDay int32
NTime int32
NStatus int32
NPreClose int32
NOpen int32
NHigh int32
NLow int32
NMatch int32
NAskPrice1 int32
NAskPrice2 int32
NAskPrice3 int32
NAskPrice4 int32
NAskPrice5 float32
NAskPrice6 float32
NAskPrice7 float32
NAskPrice8 float32
NAskPrice9 float32
NAskPrice10 float32
NAskVol1 int32
NAskVol2 int32
NAskVol3 int32
NAskVol4 int32
NAskVol5 int32
NAskVol6 int32
NAskVol7 int32
NAskVol8 int32
NAskVol9 int32
NAskVol10 int32
NBidPrice1 float32
NBidPrice2 float32
NBidPrice3 float32
NBidPrice4 float32
NBidPrice5 float32
NBidPrice6 float32
NBidPrice7 float32
NBidPrice8 float32
NBidPrice9 float32
NBidPrice10 float32
NBidVol1 int32
NBidVol2 int32
NBidVol3 int32
NBidVol4 int32
NBidVol5 int32
NBidVol6 int32
NBidVol7 int32
NBidVol8 int32
NBidVol9 int32
NBidVol10 int32
NNumTrades int32
IVolume int32
Turnover int32
NTotalBidVol int32
NTotalAskVol int32
NWeightedAvgBidPrice int32
NWeightedAvgAskPrice int32
NIOPV int32
NYieldToMaturity int32
NHighLimited int32
NLowLimited int32
NSyl1 int32
NSyl2 int32
NSD2 int32
}
// Map<String,Map<String,Map<String,STK>>> 仓位的统计存放容器
var mapResult smap.Map = smap.New(true)
var marketChan chan int = make(chan int)
var orderChan chan int = make(chan int)
var tChan chan int = make(chan int)
func DoMain() {
//nmatch是市价
fmt.Println("==stat=main===")
SelectTransaction()
go GetMarket()
go GetTransaction()
printMap()
// <-marketChan
fmt.Println("==stat=over===")
}
func SelectTransaction() {
fmt.Println("==SelectTransaction==")
var con *kdb.KDBConn
var err error
con, err = kdb.DialKDB("139.224.9.75", 52800, "")
// con, err = kdb.DialKDB("139.196.77.165", 5033, "")
if err != nil {
fmt.Printf("Failed to connect kdb: %s", err.Error())
return
}
res, err := con.Call("0!select from response")
if err != nil {
fmt.Println("Subscribe: %s", err.Error())
return
}
// ignore type print output
// fmt.Println("res:", res)
table := res.Data.(kdb.Table)
// fmt.Println("table:", table)
for i := 0; i < int(table.Data[0].Len()); i++ {
kline_data := &Response{}
kline_data2 := &ResponseInt64{}
err := kdb.UnmarshalDict(table.Index(i), kline_data)
if err != nil {
fmt.Println("Failed to unmrshall dict ", err)
continue
}
err2 := kdb.UnmarshalDict(table.Index(i), kline_data2)
if err2 != nil {
fmt.Println("Failed to unmrshall dict ", err2)
continue
}
if kline_data.Askvol == 0 && kline_data2.Askvol != 0 {
kline_data.Askvol = int32(kline_data2.Askvol)
kline_data.Withdraw = int32(kline_data2.Withdraw)
kline_data.Status = int32(kline_data2.Status)
kline_data.Bidvol = int32(kline_data2.Bidvol)
kline_data.Entrustno = int32(kline_data2.Entrustno)
}
handleData(kline_data)
}
// fmt.Println("==SelectTransaction is over ==")
}
//按照 用户->账户->股票 结构初始化map容器下每一个STK统计对象。每个STK对应的是哪个用户下哪个账户的哪个票。然后新订单来了,拿对应的STK来做统计
func handleData(kline_data *Response) {
fmt.Println("select:", kline_data)
user := kline_data.Sym
account := kline_data.Accountname
stat := &STK{}
p := ProfitSTK{}
s := SpaceSTK{}
stat.ProfitStk = p
stat.SpaceStk = s
arr := []*Response{}
stat.orderArray = arr
stat.ProfitStk.Sym = kline_data.Sym
stat.ProfitStk.Accountname = kline_data.Accountname
stat.ProfitStk.Stockcode = kline_data.Stockcode
stat.SpaceStk.Sym = kline_data.Sym
stat.SpaceStk.Accountname = kline_data.Accountname
stat.SpaceStk.Stockcode = kline_data.Stockcode
var acc_map smap.Map
if mapResult.Exists(user) {
acc_map = (mapResult.Value(user)).(smap.Map)
if acc_map.Exists(account) {
stock_map := acc_map.Value(account).(smap.Map)
if stock_map.Exists(kline_data.Stockcode) {
stat = (stock_map.Value(kline_data.Stockcode)).(*STK)
} else {
stock_map.Set(kline_data.Stockcode, stat)
}
} else {
stock_map := smap.New(true)
stock_map.Set(kline_data.Stockcode, stat)
acc_map.Set(account, stock_map)
}
} else {
stock_map := smap.New(true)
stock_map.Set(kline_data.Stockcode, stat)
acc_map = smap.New(true)
acc_map.Set(account, stock_map)
mapResult.Set(user, acc_map)
}
DoCalculateSTK(kline_data, stat)
}
func GetTransaction() {
for {
var con *kdb.KDBConn
var err error
con, err = kdb.DialKDB("127.0.0.1", 3900, "")
// con, err = kdb.DialKDB("139.196.77.165", 5033, "")
if err != nil {
fmt.Printf("Failed to connect kdb: %s", err.Error())
return
}
err = con.AsyncCall(".u.sub", &kdb.K{-kdb.KS, kdb.NONE, "response"}, &kdb.K{-kdb.KS, kdb.NONE, ""})
if err != nil {
fmt.Println("Subscribe: %s", err.Error())
return
}
// ignore type print output
res, _, err := con.ReadMessage()
if err != nil {
fmt.Println("Error processing message: ", err.Error())
return
}
data_list := res.Data.([]*kdb.K)
fmt.Println("data_list:", data_list)
table := data_list[2].Data.(kdb.Table)
fmt.Println("table:", table)
for i := 0; i < int(table.Data[0].Len()); i++ {
kline_data := &Response{}
kline_data2 := &ResponseInt64{}
err := kdb.UnmarshalDict(table.Index(i), kline_data)
if err != nil {
fmt.Println("Failed to unmrshall dict ", err)
continue
}
err2 := kdb.UnmarshalDict(table.Index(i), kline_data2)
if err2 != nil {
fmt.Println("Failed to unmrshall dict ", err2)
continue
}
// fmt.Println("get:", kline_data)
// fmt.Println("get2:", kline_data2)
if kline_data.Askvol == 0 && kline_data2.Askvol != 0 {
kline_data.Askvol = int32(kline_data2.Askvol)
kline_data.Withdraw = int32(kline_data2.Withdraw)
kline_data.Status = int32(kline_data2.Status)
kline_data.Bidvol = int32(kline_data2.Bidvol)
}
handleData(kline_data)
}
}
}
//获取行情数据来统计map内每个票的浮动盈亏
func GetMarket() {
for {
fmt.Println("==GetMarket==", time.Now())
var con *kdb.KDBConn
var err error
// con, err = kdb.DialKDB("10.0.0.71", 5010, "")
con, err = kdb.DialKDB("139.196.77.165", 5031, "")
if err != nil {
fmt.Printf("Failed to connect kdb: %s", err.Error())
return
}
err = con.AsyncCall(".u.sub", &kdb.K{-kdb.KS, kdb.NONE, "Market"}, &kdb.K{-kdb.KS, kdb.NONE, ""})
if err != nil {
fmt.Println("Subscribe: %s", err.Error())
return
}
res, _, err := con.ReadMessage()
if err != nil {
fmt.Println("Error processing message: ", err.Error())
return
}
data_list := res.Data.([]*kdb.K)
table := data_list[2].Data.(kdb.Table)
for i := 0; i < int(table.Data[0].Len()); i++ {
kline_data := &Market{}
err := kdb.UnmarshalDict(table.Index(i), kline_data)
if err != nil {
fmt.Println("Failed to unmrshall dict ", err)
continue
}
fmt.Println("getMarket:", kline_data)
for _, user_map := range mapResult.Values() {
for _, account_map := range (user_map.(smap.Map)).Values() {
for _, stock_map := range (account_map.(smap.Map)).Values() {
stat := stock_map.(*STK)
if stat.SpaceStk.Stockcode == kline_data.Sym {
DoRefresh(float64(kline_data.NMatch/10000), stat)
}
}
}
}
}
}
marketChan <- 0
}
//再算每个订单之前,要判断是不是增量。算完之后,把订单存到数组
func DoCalculateSTK(newOrder *Response, stk *STK) {
fmt.Println("---DoCalculateSTK newOrder:", newOrder)
fmt.Println("---DoCalculateSTK stk:", stk)
// //清除
// stk.SpaceStk.AvgPrice = 0
// stk.SpaceStk.OnlineProfit = 0
// stk.SpaceStk.SpaceVol = 0
// stk.ProfitStk.BidCount = 0
// stk.ProfitStk.BidMoneySum = 0
// stk.ProfitStk.BidNum = 0
// stk.ProfitStk.PastProfit = 0
// stk.ProfitStk.TotalTax = 0
// //之前的全部统计一遍
// for _, order := range stk.orderArray {
// if order.Bidvol != 0 && (order.Status == 2 || order.Status == 5 || order.Status == 4) {
// CalculateSingle(order, stk)
// }
// }
//先统计新订单,再更新订单数组
if newOrder.Status == 4 {
CalculateSingle(newOrder, stk)
var index int
flag := false
for i, order := range stk.orderArray {
// fmt.Println("iiiii ", i)
if newOrder.Entrustno == order.Entrustno && order.Status != 4 {
index = i
flag = true
break
}
}
if flag {
updateArray(stk, index, newOrder)
} else {
stk.orderArray = append(stk.orderArray, newOrder)
}
} else if newOrder.Status == 2 || newOrder.Status == 5 {
var index int
flag := false
for i, order := range stk.orderArray {
if newOrder.Entrustno == order.Entrustno && order.Status != 4 {
//算增量
fmt.Println("---算增量----")
x := &Response{}
x.Bidvol = newOrder.Bidvol - order.Bidvol
x.Bidprice = (newOrder.Bidprice*float64(newOrder.Bidvol) - order.Bidprice*float64(order.Bidvol)) / float64(newOrder.Bidvol-order.Bidvol)
CalculateSingle(x, stk)
index = i
flag = true
break
}
}
if flag {
updateArray(stk, index, newOrder)
} else {
CalculateSingle(newOrder, stk)
stk.orderArray = append(stk.orderArray, newOrder)
}
} else {
stk.orderArray = append(stk.orderArray, newOrder)
}
}
func CalculateSingle(newOrder *Response, stat *STK) {
fmt.Println("CalculateSingle--- vol:", newOrder.Bidvol, " price:", newOrder.Bidprice, " status:", newOrder.Status)
stat.Lock()
//StaticsResult为实时统计对象,每一个交易完成,刷下统计
if newOrder.Bidvol != 0 {
//每次买入刷新均价。然后每次实时价格减去均价不断出现浮动盈利
//算仓位 不管买还是卖,仓位都是相加减
var spaceTemp int32 = stat.SpaceStk.SpaceVol //临时对象记录下之前的仓位量
var avgTemp float64 = stat.SpaceStk.AvgPrice //临时对象记录下之前的均价
//卖的大于原有仓位
var flag bool = false
if AbsInt(newOrder.Bidvol) >= AbsInt(stat.SpaceStk.SpaceVol) {
flag = true
}
stat.SpaceStk.SpaceVol = stat.SpaceStk.SpaceVol + newOrder.Bidvol
fmt.Println("算仓位", stat.SpaceStk.SpaceVol)
if newOrder.Bidvol > 0 {
//算均价
if spaceTemp < 0 {
if flag {
stat.SpaceStk.AvgPrice = math.Abs(newOrder.Bidprice)
}
} else {
stat.SpaceStk.AvgPrice = math.Abs((stat.SpaceStk.AvgPrice*(float64(spaceTemp)) + newOrder.Bidprice*float64(newOrder.Bidvol)) / float64(stat.SpaceStk.SpaceVol))
}
} else {
if spaceTemp > 0 {
if flag {
stat.SpaceStk.AvgPrice = math.Abs(newOrder.Bidprice)
}
} else {
stat.SpaceStk.AvgPrice = math.Abs((stat.SpaceStk.AvgPrice*(float64(spaceTemp)) + newOrder.Bidprice*float64(newOrder.Bidvol)) / float64(stat.SpaceStk.SpaceVol))
}
}
fmt.Println(" | rder.Bidvol > 0 {
stattax = math.Abs(float64(newOrder.Bidprice*float64(newOrder.Bidvol))) * 3 / 10000
} else {
stattax = math.Abs(float64(newOrder.Bidprice*float64(newOrder.Bidvol))) * 13 / 10000
}
fmt.Println("之前费用", stat.ProfitStk.TotalTax, " 本次费用 ", stattax)
stat.ProfitStk.TotalTax = stat.ProfitStk.TotalTax + stattax
stat.ProfitStk.TotalTax = Float64Fmt(stat.ProfitStk.TotalTax, 2)
fmt.Println("算费用", stat.ProfitStk.TotalTax)
//算利润
var soldNum int32 = AbsInt(newOrder.Bidvol) //本笔卖出的量
if flag {
//卖的大于原有仓位
soldNum = AbsInt(spaceTemp)
} else {
soldNum = AbsInt(newOrder.Bidvol)
}
if newOrder.Bidvol > 0 {
if spaceTemp < 0 {
g := (avgTemp - newOrder.Bidprice) * float64(soldNum)
fmt.Println("ggggggggggggggain:", g, "soldNum", soldNum)
stat.ProfitStk.PastProfit = stat.ProfitStk.PastProfit + g - stattax
} else {
stat.ProfitStk.PastProfit = stat.ProfitStk.PastProfit - stattax
}
} else if newOrder.Bidvol < 0 {
if spaceTemp > 0 {
g := (newOrder.Bidprice - avgTemp) * float64(soldNum)
fmt.Println("ggggggggggggggain:", g, "soldNum", soldNum)
stat.ProfitStk.PastProfit = stat.ProfitStk.PastProfit + g - stattax
} else {
stat.ProfitStk.PastProfit = stat.ProfitStk.PastProfit - stattax
}
}
stat.ProfitStk.PastProfit = Float64Fmt(stat.ProfitStk.PastProfit, 2)
fmt.Println("算利润", stat.ProfitStk.PastProfit)
//算交易笔数
stat.ProfitStk.BidCount = stat.ProfitStk.BidCount + 1
//算交易股数
// fmt.Println("AbsInt(stat.ProfitStk.BidNum) ::", AbsInt(stat.ProfitStk.BidNum), " soldNum", soldNum)
stat.ProfitStk.BidNum = stat.ProfitStk.BidNum + AbsInt(newOrder.Bidvol)
fmt.Println("after stat.ProfitStk.BidNum ", stat.ProfitStk.BidNum)
//算交易额
stat.ProfitStk.BidMoneySum = stat.ProfitStk.BidMoneySum + math.Abs(float64(AbsInt(newOrder.Bidvol))*newOrder.Bidprice)
}
stat.Unlock()
}
func DoRefresh(nMatch float64, stat *STK) {
stat.Lock()
//非交易统计,每次实时价格减去均价和费用不断出现浮动盈利
stat.SpaceStk.OnlineProfit = (float64(stat.SpaceStk.SpaceVol) * (nMatch - stat.SpaceStk.AvgPrice)) - (math.Abs(float64(nMatch*float64(stat.SpaceStk.SpaceVol))) * 13 / 10000)
stat.SpaceStk.OnlineProfit = Float64Fmt(stat.SpaceStk.OnlineProfit, 64)
stat.Unlock()
}
func printMap() {
for {
// fmt.Println("map:::", mapResult)
fmt.Println("用户 账户 票 仓位 均价 浮盈 利润 笔数 股数 交易额 费用 ")
for _, user_map := range mapResult.Values() {
//累积每个用户的总浮动盈亏和 总利润
var totalOnlineProfit float64
var totalProfit float64
for _, account_map := range (user_map.(smap.Map)).Values() {
for _, stock_map := range (account_map.(smap.Map)).Values() {
stat := stock_map.(*STK)
totalOnlineProfit = totalOnlineProfit + stat.SpaceStk.OnlineProfit
totalProfit = totalProfit + stat.ProfitStk.PastProfit
fmt.Println(stat.SpaceStk.Sym, " ", stat.SpaceStk.Accountname, " ", stat.SpaceStk.Stockcode, " ", stat.SpaceStk.SpaceVol, " ", stat.SpaceStk.AvgPrice, " ", stat.SpaceStk.OnlineProfit, " ", stat.ProfitStk.PastProfit, " ", stat.ProfitStk.BidCount, " ", stat.ProfitStk.BidNum, " ", stat.ProfitStk.BidMoneySum, " ", stat.ProfitStk.TotalTax)
}
}
fmt.Println("总浮动盈亏:", totalOnlineProfit, "总利润:", totalProfit)
}
time.Sleep(time.Second * 20)
}
}
//
//func Abs(f float64) float64 {
// if f < 0 {
// return float64(-f)
// }
// return float64(f)
//}
func AbsInt(f int32) int32 {
if f < 0 {
return int32(-f)
}
return int32(f)
}
func Float64Fmt(f float64, prec int) float64 {
a := strconv.FormatFloat(f, 'f', prec, 64)
ff, err := strconv.ParseFloat(a, 64)
if err != nil {
fmt.Println(err)
}
return ff
}
func updateArray(stk *STK, index int, newOrder *Response) {
//去除原先的同一委托订单 加入新的订单放入末尾
fmt.Println("stk: %v index %v neworder %v", stk, index, newOrder)
if len(stk.orderArray) == 0 {
stk.orderArray = append(stk.orderArray, newOrder)
} else {
if index == len(stk.orderArray)-1 {
stk.orderArray[index] = newOrder
} else {
stk.orderArray = append(stk.orderArray[:index], stk.orderArray[index+1:]...)
stk.orderArray = append(stk.orderArray, newOrder)
}
}
}
| 算均价", stat.SpaceStk.AvgPrice)
//算费用 买是万三 卖是千一加上万三
var stattax float64
if newO | conditional_block |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.