python_code
stringlengths 0
679k
| repo_name
stringlengths 9
41
| file_path
stringlengths 6
149
|
|---|---|---|
"""
QAPI types generator
Copyright IBM, Corp. 2011
Copyright (c) 2013-2018 Red Hat Inc.
Authors:
Anthony Liguori <aliguori@us.ibm.com>
Michael Roth <mdroth@linux.vnet.ibm.com>
Markus Armbruster <armbru@redhat.com>
This work is licensed under the terms of the GNU GPL, version 2.
# See the COPYING file in the top-level directory.
"""
from typing import List, Optional
from .common import c_enum_const, c_name, mcgen
from .gen import QAPISchemaModularCVisitor, ifcontext
from .schema import (
QAPISchema,
QAPISchemaEnumMember,
QAPISchemaFeature,
QAPISchemaIfCond,
QAPISchemaObjectType,
QAPISchemaObjectTypeMember,
QAPISchemaType,
QAPISchemaVariants,
)
from .source import QAPISourceInfo
# variants must be emitted before their container; track what has already
# been output
objects_seen = set()
def gen_enum_lookup(name: str,
members: List[QAPISchemaEnumMember],
prefix: Optional[str] = None) -> str:
ret = mcgen('''
const QEnumLookup %(c_name)s_lookup = {
.array = (const char *const[]) {
''',
c_name=c_name(name))
for memb in members:
ret += memb.ifcond.gen_if()
index = c_enum_const(name, memb.name, prefix)
ret += mcgen('''
[%(index)s] = "%(name)s",
''',
index=index, name=memb.name)
ret += memb.ifcond.gen_endif()
ret += mcgen('''
},
.size = %(max_index)s
};
''',
max_index=c_enum_const(name, '_MAX', prefix))
return ret
def gen_enum(name: str,
members: List[QAPISchemaEnumMember],
prefix: Optional[str] = None) -> str:
# append automatically generated _MAX value
enum_members = members + [QAPISchemaEnumMember('_MAX', None)]
ret = mcgen('''
typedef enum %(c_name)s {
''',
c_name=c_name(name))
for memb in enum_members:
ret += memb.ifcond.gen_if()
ret += mcgen('''
%(c_enum)s,
''',
c_enum=c_enum_const(name, memb.name, prefix))
ret += memb.ifcond.gen_endif()
ret += mcgen('''
} %(c_name)s;
''',
c_name=c_name(name))
ret += mcgen('''
#define %(c_name)s_str(val) \\
qapi_enum_lookup(&%(c_name)s_lookup, (val))
extern const QEnumLookup %(c_name)s_lookup;
''',
c_name=c_name(name))
return ret
def gen_fwd_object_or_array(name: str) -> str:
return mcgen('''
typedef struct %(c_name)s %(c_name)s;
''',
c_name=c_name(name))
def gen_array(name: str, element_type: QAPISchemaType) -> str:
return mcgen('''
struct %(c_name)s {
%(c_name)s *next;
%(c_type)s value;
};
''',
c_name=c_name(name), c_type=element_type.c_type())
def gen_struct_members(members: List[QAPISchemaObjectTypeMember]) -> str:
ret = ''
for memb in members:
ret += memb.ifcond.gen_if()
if memb.optional:
ret += mcgen('''
bool has_%(c_name)s;
''',
c_name=c_name(memb.name))
ret += mcgen('''
%(c_type)s %(c_name)s;
''',
c_type=memb.type.c_type(), c_name=c_name(memb.name))
ret += memb.ifcond.gen_endif()
return ret
def gen_object(name: str, ifcond: QAPISchemaIfCond,
base: Optional[QAPISchemaObjectType],
members: List[QAPISchemaObjectTypeMember],
variants: Optional[QAPISchemaVariants]) -> str:
if name in objects_seen:
return ''
objects_seen.add(name)
ret = ''
for var in variants.variants if variants else ():
obj = var.type
if not isinstance(obj, QAPISchemaObjectType):
continue
ret += gen_object(obj.name, obj.ifcond, obj.base,
obj.local_members, obj.variants)
ret += mcgen('''
''')
ret += ifcond.gen_if()
ret += mcgen('''
struct %(c_name)s {
''',
c_name=c_name(name))
if base:
if not base.is_implicit():
ret += mcgen('''
/* Members inherited from %(c_name)s: */
''',
c_name=base.c_name())
ret += gen_struct_members(base.members)
if not base.is_implicit():
ret += mcgen('''
/* Own members: */
''')
ret += gen_struct_members(members)
if variants:
ret += gen_variants(variants)
# Make sure that all structs have at least one member; this avoids
# potential issues with attempting to malloc space for zero-length
# structs in C, and also incompatibility with C++ (where an empty
# struct is size 1).
if (not base or base.is_empty()) and not members and not variants:
ret += mcgen('''
char qapi_dummy_for_empty_struct;
''')
ret += mcgen('''
};
''')
ret += ifcond.gen_endif()
return ret
def gen_upcast(name: str, base: QAPISchemaObjectType) -> str:
# C makes const-correctness ugly. We have to cast away const to let
# this function work for both const and non-const obj.
return mcgen('''
static inline %(base)s *qapi_%(c_name)s_base(const %(c_name)s *obj)
{
return (%(base)s *)obj;
}
''',
c_name=c_name(name), base=base.c_name())
def gen_variants(variants: QAPISchemaVariants) -> str:
ret = mcgen('''
union { /* union tag is @%(c_name)s */
''',
c_name=c_name(variants.tag_member.name))
for var in variants.variants:
if var.type.name == 'q_empty':
continue
ret += var.ifcond.gen_if()
ret += mcgen('''
%(c_type)s %(c_name)s;
''',
c_type=var.type.c_unboxed_type(),
c_name=c_name(var.name))
ret += var.ifcond.gen_endif()
ret += mcgen('''
} u;
''')
return ret
def gen_type_cleanup_decl(name: str) -> str:
ret = mcgen('''
void qapi_free_%(c_name)s(%(c_name)s *obj);
G_DEFINE_AUTOPTR_CLEANUP_FUNC(%(c_name)s, qapi_free_%(c_name)s)
''',
c_name=c_name(name))
return ret
def gen_type_cleanup(name: str) -> str:
ret = mcgen('''
void qapi_free_%(c_name)s(%(c_name)s *obj)
{
Visitor *v;
if (!obj) {
return;
}
v = qapi_dealloc_visitor_new();
visit_type_%(c_name)s(v, NULL, &obj, NULL);
visit_free(v);
}
''',
c_name=c_name(name))
return ret
class QAPISchemaGenTypeVisitor(QAPISchemaModularCVisitor):
def __init__(self, prefix: str):
super().__init__(
prefix, 'qapi-types', ' * Schema-defined QAPI types',
' * Built-in QAPI types', __doc__)
def _begin_builtin_module(self) -> None:
self._genc.preamble_add(mcgen('''
#include "qemu/osdep.h"
#include "qapi/dealloc-visitor.h"
#include "qapi/qapi-builtin-types.h"
#include "qapi/qapi-builtin-visit.h"
'''))
self._genh.preamble_add(mcgen('''
#include "qapi/util.h"
'''))
def _begin_user_module(self, name: str) -> None:
types = self._module_basename('qapi-types', name)
visit = self._module_basename('qapi-visit', name)
self._genc.preamble_add(mcgen('''
#include "qemu/osdep.h"
#include "qapi/dealloc-visitor.h"
#include "%(types)s.h"
#include "%(visit)s.h"
''',
types=types, visit=visit))
self._genh.preamble_add(mcgen('''
#include "qapi/qapi-builtin-types.h"
'''))
def visit_begin(self, schema: QAPISchema) -> None:
# gen_object() is recursive, ensure it doesn't visit the empty type
objects_seen.add(schema.the_empty_object_type.name)
def _gen_type_cleanup(self, name: str) -> None:
self._genh.add(gen_type_cleanup_decl(name))
self._genc.add(gen_type_cleanup(name))
def visit_enum_type(self,
name: str,
info: Optional[QAPISourceInfo],
ifcond: QAPISchemaIfCond,
features: List[QAPISchemaFeature],
members: List[QAPISchemaEnumMember],
prefix: Optional[str]) -> None:
with ifcontext(ifcond, self._genh, self._genc):
self._genh.preamble_add(gen_enum(name, members, prefix))
self._genc.add(gen_enum_lookup(name, members, prefix))
def visit_array_type(self,
name: str,
info: Optional[QAPISourceInfo],
ifcond: QAPISchemaIfCond,
element_type: QAPISchemaType) -> None:
with ifcontext(ifcond, self._genh, self._genc):
self._genh.preamble_add(gen_fwd_object_or_array(name))
self._genh.add(gen_array(name, element_type))
self._gen_type_cleanup(name)
def visit_object_type(self,
name: str,
info: Optional[QAPISourceInfo],
ifcond: QAPISchemaIfCond,
features: List[QAPISchemaFeature],
base: Optional[QAPISchemaObjectType],
members: List[QAPISchemaObjectTypeMember],
variants: Optional[QAPISchemaVariants]) -> None:
# Nothing to do for the special empty builtin
if name == 'q_empty':
return
with ifcontext(ifcond, self._genh):
self._genh.preamble_add(gen_fwd_object_or_array(name))
self._genh.add(gen_object(name, ifcond, base, members, variants))
with ifcontext(ifcond, self._genh, self._genc):
if base and not base.is_implicit():
self._genh.add(gen_upcast(name, base))
# TODO Worth changing the visitor signature, so we could
# directly use rather than repeat type.is_implicit()?
if not name.startswith('q_'):
# implicit types won't be directly allocated/freed
self._gen_type_cleanup(name)
def visit_alternate_type(self,
name: str,
info: Optional[QAPISourceInfo],
ifcond: QAPISchemaIfCond,
features: List[QAPISchemaFeature],
variants: QAPISchemaVariants) -> None:
with ifcontext(ifcond, self._genh):
self._genh.preamble_add(gen_fwd_object_or_array(name))
self._genh.add(gen_object(name, ifcond, None,
[variants.tag_member], variants))
with ifcontext(ifcond, self._genh, self._genc):
self._gen_type_cleanup(name)
def gen_types(schema: QAPISchema,
output_dir: str,
prefix: str,
opt_builtins: bool) -> None:
vis = QAPISchemaGenTypeVisitor(prefix)
schema.visit(vis)
vis.write(output_dir, opt_builtins)
|
nvtrust-main
|
infrastructure/kvm/qemu/qemu_source/scripts/qapi/types.py
|
"""
QAPI introspection generator
Copyright (C) 2015-2021 Red Hat, Inc.
Authors:
Markus Armbruster <armbru@redhat.com>
John Snow <jsnow@redhat.com>
This work is licensed under the terms of the GNU GPL, version 2.
See the COPYING file in the top-level directory.
"""
from typing import (
Any,
Dict,
Generic,
List,
Optional,
Sequence,
TypeVar,
Union,
)
from .common import c_name, mcgen
from .gen import QAPISchemaMonolithicCVisitor
from .schema import (
QAPISchema,
QAPISchemaArrayType,
QAPISchemaBuiltinType,
QAPISchemaEntity,
QAPISchemaEnumMember,
QAPISchemaFeature,
QAPISchemaIfCond,
QAPISchemaObjectType,
QAPISchemaObjectTypeMember,
QAPISchemaType,
QAPISchemaVariant,
QAPISchemaVariants,
)
from .source import QAPISourceInfo
# This module constructs a tree data structure that is used to
# generate the introspection information for QEMU. It is shaped
# like a JSON value.
#
# A complexity over JSON is that our values may or may not be annotated.
#
# Un-annotated values may be:
# Scalar: str, bool, None.
# Non-scalar: List, Dict
# _value = Union[str, bool, None, Dict[str, JSONValue], List[JSONValue]]
#
# With optional annotations, the type of all values is:
# JSONValue = Union[_Value, Annotated[_Value]]
#
# Sadly, mypy does not support recursive types; so the _Stub alias is used to
# mark the imprecision in the type model where we'd otherwise use JSONValue.
_Stub = Any
_Scalar = Union[str, bool, None]
_NonScalar = Union[Dict[str, _Stub], List[_Stub]]
_Value = Union[_Scalar, _NonScalar]
JSONValue = Union[_Value, 'Annotated[_Value]']
# These types are based on structures defined in QEMU's schema, so we
# lack precise types for them here. Python 3.6 does not offer
# TypedDict constructs, so they are broadly typed here as simple
# Python Dicts.
SchemaInfo = Dict[str, object]
SchemaInfoObject = Dict[str, object]
SchemaInfoObjectVariant = Dict[str, object]
SchemaInfoObjectMember = Dict[str, object]
SchemaInfoCommand = Dict[str, object]
_ValueT = TypeVar('_ValueT', bound=_Value)
class Annotated(Generic[_ValueT]):
"""
Annotated generally contains a SchemaInfo-like type (as a dict),
But it also used to wrap comments/ifconds around scalar leaf values,
for the benefit of features and enums.
"""
# TODO: Remove after Python 3.7 adds @dataclass:
# pylint: disable=too-few-public-methods
def __init__(self, value: _ValueT, ifcond: QAPISchemaIfCond,
comment: Optional[str] = None):
self.value = value
self.comment: Optional[str] = comment
self.ifcond = ifcond
def _tree_to_qlit(obj: JSONValue,
level: int = 0,
dict_value: bool = False) -> str:
"""
Convert the type tree into a QLIT C string, recursively.
:param obj: The value to convert.
This value may not be Annotated when dict_value is True.
:param level: The indentation level for this particular value.
:param dict_value: True when the value being processed belongs to a
dict key; which suppresses the output indent.
"""
def indent(level: int) -> str:
return level * 4 * ' '
if isinstance(obj, Annotated):
# NB: _tree_to_qlit is called recursively on the values of a
# key:value pair; those values can't be decorated with
# comments or conditionals.
msg = "dict values cannot have attached comments or if-conditionals."
assert not dict_value, msg
ret = ''
if obj.comment:
ret += indent(level) + f"/* {obj.comment} */\n"
if obj.ifcond.is_present():
ret += obj.ifcond.gen_if()
ret += _tree_to_qlit(obj.value, level)
if obj.ifcond.is_present():
ret += '\n' + obj.ifcond.gen_endif()
return ret
ret = ''
if not dict_value:
ret += indent(level)
# Scalars:
if obj is None:
ret += 'QLIT_QNULL'
elif isinstance(obj, str):
ret += f"QLIT_QSTR({to_c_string(obj)})"
elif isinstance(obj, bool):
ret += f"QLIT_QBOOL({str(obj).lower()})"
# Non-scalars:
elif isinstance(obj, list):
ret += 'QLIT_QLIST(((QLitObject[]) {\n'
for value in obj:
ret += _tree_to_qlit(value, level + 1).strip('\n') + '\n'
ret += indent(level + 1) + '{}\n'
ret += indent(level) + '}))'
elif isinstance(obj, dict):
ret += 'QLIT_QDICT(((QLitDictEntry[]) {\n'
for key, value in sorted(obj.items()):
ret += indent(level + 1) + "{{ {:s}, {:s} }},\n".format(
to_c_string(key),
_tree_to_qlit(value, level + 1, dict_value=True)
)
ret += indent(level + 1) + '{}\n'
ret += indent(level) + '}))'
else:
raise NotImplementedError(
f"type '{type(obj).__name__}' not implemented"
)
if level > 0:
ret += ','
return ret
def to_c_string(string: str) -> str:
return '"' + string.replace('\\', r'\\').replace('"', r'\"') + '"'
class QAPISchemaGenIntrospectVisitor(QAPISchemaMonolithicCVisitor):
def __init__(self, prefix: str, unmask: bool):
super().__init__(
prefix, 'qapi-introspect',
' * QAPI/QMP schema introspection', __doc__)
self._unmask = unmask
self._schema: Optional[QAPISchema] = None
self._trees: List[Annotated[SchemaInfo]] = []
self._used_types: List[QAPISchemaType] = []
self._name_map: Dict[str, str] = {}
self._genc.add(mcgen('''
#include "qemu/osdep.h"
#include "%(prefix)sqapi-introspect.h"
''',
prefix=prefix))
def visit_begin(self, schema: QAPISchema) -> None:
self._schema = schema
def visit_end(self) -> None:
# visit the types that are actually used
for typ in self._used_types:
typ.visit(self)
# generate C
name = c_name(self._prefix, protect=False) + 'qmp_schema_qlit'
self._genh.add(mcgen('''
#include "qapi/qmp/qlit.h"
extern const QLitObject %(c_name)s;
''',
c_name=c_name(name)))
self._genc.add(mcgen('''
const QLitObject %(c_name)s = %(c_string)s;
''',
c_name=c_name(name),
c_string=_tree_to_qlit(self._trees)))
self._schema = None
self._trees = []
self._used_types = []
self._name_map = {}
def visit_needed(self, entity: QAPISchemaEntity) -> bool:
# Ignore types on first pass; visit_end() will pick up used types
return not isinstance(entity, QAPISchemaType)
def _name(self, name: str) -> str:
if self._unmask:
return name
if name not in self._name_map:
self._name_map[name] = '%d' % len(self._name_map)
return self._name_map[name]
def _use_type(self, typ: QAPISchemaType) -> str:
assert self._schema is not None
# Map the various integer types to plain int
if typ.json_type() == 'int':
typ = self._schema.lookup_type('int')
elif (isinstance(typ, QAPISchemaArrayType) and
typ.element_type.json_type() == 'int'):
typ = self._schema.lookup_type('intList')
# Add type to work queue if new
if typ not in self._used_types:
self._used_types.append(typ)
# Clients should examine commands and events, not types. Hide
# type names as integers to reduce the temptation. Also, it
# saves a few characters on the wire.
if isinstance(typ, QAPISchemaBuiltinType):
return typ.name
if isinstance(typ, QAPISchemaArrayType):
return '[' + self._use_type(typ.element_type) + ']'
return self._name(typ.name)
@staticmethod
def _gen_features(features: Sequence[QAPISchemaFeature]
) -> List[Annotated[str]]:
return [Annotated(f.name, f.ifcond) for f in features]
def _gen_tree(self, name: str, mtype: str, obj: Dict[str, object],
ifcond: QAPISchemaIfCond = QAPISchemaIfCond(),
features: Sequence[QAPISchemaFeature] = ()) -> None:
"""
Build and append a SchemaInfo object to self._trees.
:param name: The SchemaInfo's name.
:param mtype: The SchemaInfo's meta-type.
:param obj: Additional SchemaInfo members, as appropriate for
the meta-type.
:param ifcond: Conditionals to apply to the SchemaInfo.
:param features: The SchemaInfo's features.
Will be omitted from the output if empty.
"""
comment: Optional[str] = None
if mtype not in ('command', 'event', 'builtin', 'array'):
if not self._unmask:
# Output a comment to make it easy to map masked names
# back to the source when reading the generated output.
comment = f'"{self._name(name)}" = {name}'
name = self._name(name)
obj['name'] = name
obj['meta-type'] = mtype
if features:
obj['features'] = self._gen_features(features)
self._trees.append(Annotated(obj, ifcond, comment))
def _gen_member(self, member: QAPISchemaObjectTypeMember
) -> Annotated[SchemaInfoObjectMember]:
obj: SchemaInfoObjectMember = {
'name': member.name,
'type': self._use_type(member.type)
}
if member.optional:
obj['default'] = None
if member.features:
obj['features'] = self._gen_features(member.features)
return Annotated(obj, member.ifcond)
def _gen_variant(self, variant: QAPISchemaVariant
) -> Annotated[SchemaInfoObjectVariant]:
obj: SchemaInfoObjectVariant = {
'case': variant.name,
'type': self._use_type(variant.type)
}
return Annotated(obj, variant.ifcond)
def visit_builtin_type(self, name: str, info: Optional[QAPISourceInfo],
json_type: str) -> None:
self._gen_tree(name, 'builtin', {'json-type': json_type})
def visit_enum_type(self, name: str, info: Optional[QAPISourceInfo],
ifcond: QAPISchemaIfCond,
features: List[QAPISchemaFeature],
members: List[QAPISchemaEnumMember],
prefix: Optional[str]) -> None:
self._gen_tree(
name, 'enum',
{'values': [Annotated(m.name, m.ifcond) for m in members]},
ifcond, features
)
def visit_array_type(self, name: str, info: Optional[QAPISourceInfo],
ifcond: QAPISchemaIfCond,
element_type: QAPISchemaType) -> None:
element = self._use_type(element_type)
self._gen_tree('[' + element + ']', 'array', {'element-type': element},
ifcond)
def visit_object_type_flat(self, name: str, info: Optional[QAPISourceInfo],
ifcond: QAPISchemaIfCond,
features: List[QAPISchemaFeature],
members: List[QAPISchemaObjectTypeMember],
variants: Optional[QAPISchemaVariants]) -> None:
obj: SchemaInfoObject = {
'members': [self._gen_member(m) for m in members]
}
if variants:
obj['tag'] = variants.tag_member.name
obj['variants'] = [self._gen_variant(v) for v in variants.variants]
self._gen_tree(name, 'object', obj, ifcond, features)
def visit_alternate_type(self, name: str, info: Optional[QAPISourceInfo],
ifcond: QAPISchemaIfCond,
features: List[QAPISchemaFeature],
variants: QAPISchemaVariants) -> None:
self._gen_tree(
name, 'alternate',
{'members': [Annotated({'type': self._use_type(m.type)},
m.ifcond)
for m in variants.variants]},
ifcond, features
)
def visit_command(self, name: str, info: Optional[QAPISourceInfo],
ifcond: QAPISchemaIfCond,
features: List[QAPISchemaFeature],
arg_type: Optional[QAPISchemaObjectType],
ret_type: Optional[QAPISchemaType], gen: bool,
success_response: bool, boxed: bool, allow_oob: bool,
allow_preconfig: bool, coroutine: bool) -> None:
assert self._schema is not None
arg_type = arg_type or self._schema.the_empty_object_type
ret_type = ret_type or self._schema.the_empty_object_type
obj: SchemaInfoCommand = {
'arg-type': self._use_type(arg_type),
'ret-type': self._use_type(ret_type)
}
if allow_oob:
obj['allow-oob'] = allow_oob
self._gen_tree(name, 'command', obj, ifcond, features)
def visit_event(self, name: str, info: Optional[QAPISourceInfo],
ifcond: QAPISchemaIfCond,
features: List[QAPISchemaFeature],
arg_type: Optional[QAPISchemaObjectType],
boxed: bool) -> None:
assert self._schema is not None
arg_type = arg_type or self._schema.the_empty_object_type
self._gen_tree(name, 'event', {'arg-type': self._use_type(arg_type)},
ifcond, features)
def gen_introspect(schema: QAPISchema, output_dir: str, prefix: str,
opt_unmask: bool) -> None:
vis = QAPISchemaGenIntrospectVisitor(prefix, opt_unmask)
schema.visit(vis)
vis.write(output_dir)
|
nvtrust-main
|
infrastructure/kvm/qemu/qemu_source/scripts/qapi/introspect.py
|
# -*- coding: utf-8 -*-
#
# QAPI schema parser
#
# Copyright IBM, Corp. 2011
# Copyright (c) 2013-2019 Red Hat Inc.
#
# Authors:
# Anthony Liguori <aliguori@us.ibm.com>
# Markus Armbruster <armbru@redhat.com>
# Marc-André Lureau <marcandre.lureau@redhat.com>
# Kevin Wolf <kwolf@redhat.com>
#
# This work is licensed under the terms of the GNU GPL, version 2.
# See the COPYING file in the top-level directory.
from collections import OrderedDict
import os
import re
from typing import (
TYPE_CHECKING,
Dict,
List,
Optional,
Set,
Union,
)
from .common import must_match
from .error import QAPISemError, QAPISourceError
from .source import QAPISourceInfo
if TYPE_CHECKING:
# pylint: disable=cyclic-import
# TODO: Remove cycle. [schema -> expr -> parser -> schema]
from .schema import QAPISchemaFeature, QAPISchemaMember
#: Represents a single Top Level QAPI schema expression.
TopLevelExpr = Dict[str, object]
# Return value alias for get_expr().
_ExprValue = Union[List[object], Dict[str, object], str, bool]
# FIXME: Consolidate and centralize definitions for TopLevelExpr,
# _ExprValue, _JSONValue, and _JSONObject; currently scattered across
# several modules.
class QAPIParseError(QAPISourceError):
"""Error class for all QAPI schema parsing errors."""
def __init__(self, parser: 'QAPISchemaParser', msg: str):
col = 1
for ch in parser.src[parser.line_pos:parser.pos]:
if ch == '\t':
col = (col + 7) % 8 + 1
else:
col += 1
super().__init__(parser.info, msg, col)
class QAPISchemaParser:
"""
Parse QAPI schema source.
Parse a JSON-esque schema file and process directives. See
qapi-code-gen.txt section "Schema Syntax" for the exact syntax.
Grammatical validation is handled later by `expr.check_exprs()`.
:param fname: Source file name.
:param previously_included:
The absolute names of previously included source files,
if being invoked from another parser.
:param incl_info:
`QAPISourceInfo` belonging to the parent module.
``None`` implies this is the root module.
:ivar exprs: Resulting parsed expressions.
:ivar docs: Resulting parsed documentation blocks.
:raise OSError: For problems reading the root schema document.
:raise QAPIError: For errors in the schema source.
"""
def __init__(self,
fname: str,
previously_included: Optional[Set[str]] = None,
incl_info: Optional[QAPISourceInfo] = None):
self._fname = fname
self._included = previously_included or set()
self._included.add(os.path.abspath(self._fname))
self.src = ''
# Lexer state (see `accept` for details):
self.info = QAPISourceInfo(self._fname, incl_info)
self.tok: Union[None, str] = None
self.pos = 0
self.cursor = 0
self.val: Optional[Union[bool, str]] = None
self.line_pos = 0
# Parser output:
self.exprs: List[Dict[str, object]] = []
self.docs: List[QAPIDoc] = []
# Showtime!
self._parse()
def _parse(self) -> None:
"""
Parse the QAPI schema document.
:return: None. Results are stored in ``.exprs`` and ``.docs``.
"""
cur_doc = None
# May raise OSError; allow the caller to handle it.
with open(self._fname, 'r', encoding='utf-8') as fp:
self.src = fp.read()
if self.src == '' or self.src[-1] != '\n':
self.src += '\n'
# Prime the lexer:
self.accept()
# Parse until done:
while self.tok is not None:
info = self.info
if self.tok == '#':
self.reject_expr_doc(cur_doc)
for cur_doc in self.get_doc(info):
self.docs.append(cur_doc)
continue
expr = self.get_expr()
if not isinstance(expr, dict):
raise QAPISemError(
info, "top-level expression must be an object")
if 'include' in expr:
self.reject_expr_doc(cur_doc)
if len(expr) != 1:
raise QAPISemError(info, "invalid 'include' directive")
include = expr['include']
if not isinstance(include, str):
raise QAPISemError(info,
"value of 'include' must be a string")
incl_fname = os.path.join(os.path.dirname(self._fname),
include)
self.exprs.append({'expr': {'include': incl_fname},
'info': info})
exprs_include = self._include(include, info, incl_fname,
self._included)
if exprs_include:
self.exprs.extend(exprs_include.exprs)
self.docs.extend(exprs_include.docs)
elif "pragma" in expr:
self.reject_expr_doc(cur_doc)
if len(expr) != 1:
raise QAPISemError(info, "invalid 'pragma' directive")
pragma = expr['pragma']
if not isinstance(pragma, dict):
raise QAPISemError(
info, "value of 'pragma' must be an object")
for name, value in pragma.items():
self._pragma(name, value, info)
else:
expr_elem = {'expr': expr,
'info': info}
if cur_doc:
if not cur_doc.symbol:
raise QAPISemError(
cur_doc.info, "definition documentation required")
expr_elem['doc'] = cur_doc
self.exprs.append(expr_elem)
cur_doc = None
self.reject_expr_doc(cur_doc)
@staticmethod
def reject_expr_doc(doc: Optional['QAPIDoc']) -> None:
if doc and doc.symbol:
raise QAPISemError(
doc.info,
"documentation for '%s' is not followed by the definition"
% doc.symbol)
@staticmethod
def _include(include: str,
info: QAPISourceInfo,
incl_fname: str,
previously_included: Set[str]
) -> Optional['QAPISchemaParser']:
incl_abs_fname = os.path.abspath(incl_fname)
# catch inclusion cycle
inf: Optional[QAPISourceInfo] = info
while inf:
if incl_abs_fname == os.path.abspath(inf.fname):
raise QAPISemError(info, "inclusion loop for %s" % include)
inf = inf.parent
# skip multiple include of the same file
if incl_abs_fname in previously_included:
return None
try:
return QAPISchemaParser(incl_fname, previously_included, info)
except OSError as err:
raise QAPISemError(
info,
f"can't read include file '{incl_fname}': {err.strerror}"
) from err
@staticmethod
def _pragma(name: str, value: object, info: QAPISourceInfo) -> None:
def check_list_str(name: str, value: object) -> List[str]:
if (not isinstance(value, list) or
any(not isinstance(elt, str) for elt in value)):
raise QAPISemError(
info,
"pragma %s must be a list of strings" % name)
return value
pragma = info.pragma
if name == 'doc-required':
if not isinstance(value, bool):
raise QAPISemError(info,
"pragma 'doc-required' must be boolean")
pragma.doc_required = value
elif name == 'command-name-exceptions':
pragma.command_name_exceptions = check_list_str(name, value)
elif name == 'command-returns-exceptions':
pragma.command_returns_exceptions = check_list_str(name, value)
elif name == 'member-name-exceptions':
pragma.member_name_exceptions = check_list_str(name, value)
else:
raise QAPISemError(info, "unknown pragma '%s'" % name)
def accept(self, skip_comment: bool = True) -> None:
"""
Read and store the next token.
:param skip_comment:
When false, return COMMENT tokens ("#").
This is used when reading documentation blocks.
:return:
None. Several instance attributes are updated instead:
- ``.tok`` represents the token type. See below for values.
- ``.info`` describes the token's source location.
- ``.val`` is the token's value, if any. See below.
- ``.pos`` is the buffer index of the first character of
the token.
* Single-character tokens:
These are "{", "}", ":", ",", "[", and "]".
``.tok`` holds the single character and ``.val`` is None.
* Multi-character tokens:
* COMMENT:
This token is not normally returned by the lexer, but it can
be when ``skip_comment`` is False. ``.tok`` is "#", and
``.val`` is a string including all chars until end-of-line,
including the "#" itself.
* STRING:
``.tok`` is "'", the single quote. ``.val`` contains the
string, excluding the surrounding quotes.
* TRUE and FALSE:
``.tok`` is either "t" or "f", ``.val`` will be the
corresponding bool value.
* EOF:
``.tok`` and ``.val`` will both be None at EOF.
"""
while True:
self.tok = self.src[self.cursor]
self.pos = self.cursor
self.cursor += 1
self.val = None
if self.tok == '#':
if self.src[self.cursor] == '#':
# Start of doc comment
skip_comment = False
self.cursor = self.src.find('\n', self.cursor)
if not skip_comment:
self.val = self.src[self.pos:self.cursor]
return
elif self.tok in '{}:,[]':
return
elif self.tok == "'":
# Note: we accept only printable ASCII
string = ''
esc = False
while True:
ch = self.src[self.cursor]
self.cursor += 1
if ch == '\n':
raise QAPIParseError(self, "missing terminating \"'\"")
if esc:
# Note: we recognize only \\ because we have
# no use for funny characters in strings
if ch != '\\':
raise QAPIParseError(self,
"unknown escape \\%s" % ch)
esc = False
elif ch == '\\':
esc = True
continue
elif ch == "'":
self.val = string
return
if ord(ch) < 32 or ord(ch) >= 127:
raise QAPIParseError(
self, "funny character in string")
string += ch
elif self.src.startswith('true', self.pos):
self.val = True
self.cursor += 3
return
elif self.src.startswith('false', self.pos):
self.val = False
self.cursor += 4
return
elif self.tok == '\n':
if self.cursor == len(self.src):
self.tok = None
return
self.info = self.info.next_line()
self.line_pos = self.cursor
elif not self.tok.isspace():
# Show up to next structural, whitespace or quote
# character
match = must_match('[^[\\]{}:,\\s\'"]+',
self.src[self.cursor-1:])
raise QAPIParseError(self, "stray '%s'" % match.group(0))
def get_members(self) -> Dict[str, object]:
expr: Dict[str, object] = OrderedDict()
if self.tok == '}':
self.accept()
return expr
if self.tok != "'":
raise QAPIParseError(self, "expected string or '}'")
while True:
key = self.val
assert isinstance(key, str) # Guaranteed by tok == "'"
self.accept()
if self.tok != ':':
raise QAPIParseError(self, "expected ':'")
self.accept()
if key in expr:
raise QAPIParseError(self, "duplicate key '%s'" % key)
expr[key] = self.get_expr()
if self.tok == '}':
self.accept()
return expr
if self.tok != ',':
raise QAPIParseError(self, "expected ',' or '}'")
self.accept()
if self.tok != "'":
raise QAPIParseError(self, "expected string")
def get_values(self) -> List[object]:
expr: List[object] = []
if self.tok == ']':
self.accept()
return expr
if self.tok not in tuple("{['tf"):
raise QAPIParseError(
self, "expected '{', '[', ']', string, or boolean")
while True:
expr.append(self.get_expr())
if self.tok == ']':
self.accept()
return expr
if self.tok != ',':
raise QAPIParseError(self, "expected ',' or ']'")
self.accept()
def get_expr(self) -> _ExprValue:
expr: _ExprValue
if self.tok == '{':
self.accept()
expr = self.get_members()
elif self.tok == '[':
self.accept()
expr = self.get_values()
elif self.tok in tuple("'tf"):
assert isinstance(self.val, (str, bool))
expr = self.val
self.accept()
else:
raise QAPIParseError(
self, "expected '{', '[', string, or boolean")
return expr
def get_doc(self, info: QAPISourceInfo) -> List['QAPIDoc']:
if self.val != '##':
raise QAPIParseError(
self, "junk after '##' at start of documentation comment")
docs = []
cur_doc = QAPIDoc(self, info)
self.accept(False)
while self.tok == '#':
assert isinstance(self.val, str)
if self.val.startswith('##'):
# End of doc comment
if self.val != '##':
raise QAPIParseError(
self,
"junk after '##' at end of documentation comment")
cur_doc.end_comment()
docs.append(cur_doc)
self.accept()
return docs
if self.val.startswith('# ='):
if cur_doc.symbol:
raise QAPIParseError(
self,
"unexpected '=' markup in definition documentation")
if cur_doc.body.text:
cur_doc.end_comment()
docs.append(cur_doc)
cur_doc = QAPIDoc(self, info)
cur_doc.append(self.val)
self.accept(False)
raise QAPIParseError(self, "documentation comment must end with '##'")
class QAPIDoc:
"""
A documentation comment block, either definition or free-form
Definition documentation blocks consist of
* a body section: one line naming the definition, followed by an
overview (any number of lines)
* argument sections: a description of each argument (for commands
and events) or member (for structs, unions and alternates)
* features sections: a description of each feature flag
* additional (non-argument) sections, possibly tagged
Free-form documentation blocks consist only of a body section.
"""
class Section:
# pylint: disable=too-few-public-methods
def __init__(self, parser: QAPISchemaParser,
name: Optional[str] = None, indent: int = 0):
# parser, for error messages about indentation
self._parser = parser
# optional section name (argument/member or section name)
self.name = name
self.text = ''
# the expected indent level of the text of this section
self._indent = indent
def append(self, line: str) -> None:
# Strip leading spaces corresponding to the expected indent level
# Blank lines are always OK.
if line:
indent = must_match(r'\s*', line).end()
if indent < self._indent:
raise QAPIParseError(
self._parser,
"unexpected de-indent (expected at least %d spaces)" %
self._indent)
line = line[self._indent:]
self.text += line.rstrip() + '\n'
class ArgSection(Section):
def __init__(self, parser: QAPISchemaParser,
name: str, indent: int = 0):
super().__init__(parser, name, indent)
self.member: Optional['QAPISchemaMember'] = None
def connect(self, member: 'QAPISchemaMember') -> None:
self.member = member
class NullSection(Section):
"""
Immutable dummy section for use at the end of a doc block.
"""
# pylint: disable=too-few-public-methods
def append(self, line: str) -> None:
assert False, "Text appended after end_comment() called."
def __init__(self, parser: QAPISchemaParser, info: QAPISourceInfo):
# self._parser is used to report errors with QAPIParseError. The
# resulting error position depends on the state of the parser.
# It happens to be the beginning of the comment. More or less
# servicable, but action at a distance.
self._parser = parser
self.info = info
self.symbol: Optional[str] = None
self.body = QAPIDoc.Section(parser)
# dicts mapping parameter/feature names to their ArgSection
self.args: Dict[str, QAPIDoc.ArgSection] = OrderedDict()
self.features: Dict[str, QAPIDoc.ArgSection] = OrderedDict()
self.sections: List[QAPIDoc.Section] = []
# the current section
self._section = self.body
self._append_line = self._append_body_line
def has_section(self, name: str) -> bool:
"""Return True if we have a section with this name."""
for i in self.sections:
if i.name == name:
return True
return False
def append(self, line: str) -> None:
"""
Parse a comment line and add it to the documentation.
The way that the line is dealt with depends on which part of
the documentation we're parsing right now:
* The body section: ._append_line is ._append_body_line
* An argument section: ._append_line is ._append_args_line
* A features section: ._append_line is ._append_features_line
* An additional section: ._append_line is ._append_various_line
"""
line = line[1:]
if not line:
self._append_freeform(line)
return
if line[0] != ' ':
raise QAPIParseError(self._parser, "missing space after #")
line = line[1:]
self._append_line(line)
def end_comment(self) -> None:
self._switch_section(QAPIDoc.NullSection(self._parser))
@staticmethod
def _is_section_tag(name: str) -> bool:
return name in ('Returns:', 'Since:',
# those are often singular or plural
'Note:', 'Notes:',
'Example:', 'Examples:',
'TODO:')
def _append_body_line(self, line: str) -> None:
"""
Process a line of documentation text in the body section.
If this a symbol line and it is the section's first line, this
is a definition documentation block for that symbol.
If it's a definition documentation block, another symbol line
begins the argument section for the argument named by it, and
a section tag begins an additional section. Start that
section and append the line to it.
Else, append the line to the current section.
"""
name = line.split(' ', 1)[0]
# FIXME not nice: things like '# @foo:' and '# @foo: ' aren't
# recognized, and get silently treated as ordinary text
if not self.symbol and not self.body.text and line.startswith('@'):
if not line.endswith(':'):
raise QAPIParseError(self._parser, "line should end with ':'")
self.symbol = line[1:-1]
# Invalid names are not checked here, but the name provided MUST
# match the following definition, which *is* validated in expr.py.
if not self.symbol:
raise QAPIParseError(
self._parser, "name required after '@'")
elif self.symbol:
# This is a definition documentation block
if name.startswith('@') and name.endswith(':'):
self._append_line = self._append_args_line
self._append_args_line(line)
elif line == 'Features:':
self._append_line = self._append_features_line
elif self._is_section_tag(name):
self._append_line = self._append_various_line
self._append_various_line(line)
else:
self._append_freeform(line)
else:
# This is a free-form documentation block
self._append_freeform(line)
def _append_args_line(self, line: str) -> None:
"""
Process a line of documentation text in an argument section.
A symbol line begins the next argument section, a section tag
section or a non-indented line after a blank line begins an
additional section. Start that section and append the line to
it.
Else, append the line to the current section.
"""
name = line.split(' ', 1)[0]
if name.startswith('@') and name.endswith(':'):
# If line is "@arg: first line of description", find
# the index of 'f', which is the indent we expect for any
# following lines. We then remove the leading "@arg:"
# from line and replace it with spaces so that 'f' has the
# same index as it did in the original line and can be
# handled the same way we will handle following lines.
indent = must_match(r'@\S*:\s*', line).end()
line = line[indent:]
if not line:
# Line was just the "@arg:" header; following lines
# are not indented
indent = 0
else:
line = ' ' * indent + line
self._start_args_section(name[1:-1], indent)
elif self._is_section_tag(name):
self._append_line = self._append_various_line
self._append_various_line(line)
return
elif (self._section.text.endswith('\n\n')
and line and not line[0].isspace()):
if line == 'Features:':
self._append_line = self._append_features_line
else:
self._start_section()
self._append_line = self._append_various_line
self._append_various_line(line)
return
self._append_freeform(line)
def _append_features_line(self, line: str) -> None:
name = line.split(' ', 1)[0]
if name.startswith('@') and name.endswith(':'):
# If line is "@arg: first line of description", find
# the index of 'f', which is the indent we expect for any
# following lines. We then remove the leading "@arg:"
# from line and replace it with spaces so that 'f' has the
# same index as it did in the original line and can be
# handled the same way we will handle following lines.
indent = must_match(r'@\S*:\s*', line).end()
line = line[indent:]
if not line:
# Line was just the "@arg:" header; following lines
# are not indented
indent = 0
else:
line = ' ' * indent + line
self._start_features_section(name[1:-1], indent)
elif self._is_section_tag(name):
self._append_line = self._append_various_line
self._append_various_line(line)
return
elif (self._section.text.endswith('\n\n')
and line and not line[0].isspace()):
self._start_section()
self._append_line = self._append_various_line
self._append_various_line(line)
return
self._append_freeform(line)
def _append_various_line(self, line: str) -> None:
"""
Process a line of documentation text in an additional section.
A symbol line is an error.
A section tag begins an additional section. Start that
section and append the line to it.
Else, append the line to the current section.
"""
name = line.split(' ', 1)[0]
if name.startswith('@') and name.endswith(':'):
raise QAPIParseError(self._parser,
"'%s' can't follow '%s' section"
% (name, self.sections[0].name))
if self._is_section_tag(name):
# If line is "Section: first line of description", find
# the index of 'f', which is the indent we expect for any
# following lines. We then remove the leading "Section:"
# from line and replace it with spaces so that 'f' has the
# same index as it did in the original line and can be
# handled the same way we will handle following lines.
indent = must_match(r'\S*:\s*', line).end()
line = line[indent:]
if not line:
# Line was just the "Section:" header; following lines
# are not indented
indent = 0
else:
line = ' ' * indent + line
self._start_section(name[:-1], indent)
self._append_freeform(line)
def _start_symbol_section(
self,
symbols_dict: Dict[str, 'QAPIDoc.ArgSection'],
name: str,
indent: int) -> None:
# FIXME invalid names other than the empty string aren't flagged
if not name:
raise QAPIParseError(self._parser, "invalid parameter name")
if name in symbols_dict:
raise QAPIParseError(self._parser,
"'%s' parameter name duplicated" % name)
assert not self.sections
new_section = QAPIDoc.ArgSection(self._parser, name, indent)
self._switch_section(new_section)
symbols_dict[name] = new_section
def _start_args_section(self, name: str, indent: int) -> None:
self._start_symbol_section(self.args, name, indent)
def _start_features_section(self, name: str, indent: int) -> None:
self._start_symbol_section(self.features, name, indent)
def _start_section(self, name: Optional[str] = None,
indent: int = 0) -> None:
if name in ('Returns', 'Since') and self.has_section(name):
raise QAPIParseError(self._parser,
"duplicated '%s' section" % name)
new_section = QAPIDoc.Section(self._parser, name, indent)
self._switch_section(new_section)
self.sections.append(new_section)
def _switch_section(self, new_section: 'QAPIDoc.Section') -> None:
text = self._section.text = self._section.text.strip()
# Only the 'body' section is allowed to have an empty body.
# All other sections, including anonymous ones, must have text.
if self._section != self.body and not text:
# We do not create anonymous sections unless there is
# something to put in them; this is a parser bug.
assert self._section.name
raise QAPIParseError(
self._parser,
"empty doc section '%s'" % self._section.name)
self._section = new_section
def _append_freeform(self, line: str) -> None:
match = re.match(r'(@\S+:)', line)
if match:
raise QAPIParseError(self._parser,
"'%s' not allowed in free-form documentation"
% match.group(1))
self._section.append(line)
def connect_member(self, member: 'QAPISchemaMember') -> None:
if member.name not in self.args:
# Undocumented TODO outlaw
self.args[member.name] = QAPIDoc.ArgSection(self._parser,
member.name)
self.args[member.name].connect(member)
def connect_feature(self, feature: 'QAPISchemaFeature') -> None:
if feature.name not in self.features:
raise QAPISemError(feature.info,
"feature '%s' lacks documentation"
% feature.name)
self.features[feature.name].connect(feature)
def check_expr(self, expr: TopLevelExpr) -> None:
if self.has_section('Returns') and 'command' not in expr:
raise QAPISemError(self.info,
"'Returns:' is only valid for commands")
def check(self) -> None:
def check_args_section(
args: Dict[str, QAPIDoc.ArgSection], what: str
) -> None:
bogus = [name for name, section in args.items()
if not section.member]
if bogus:
raise QAPISemError(
self.info,
"documented %s%s '%s' %s not exist" % (
what,
"s" if len(bogus) > 1 else "",
"', '".join(bogus),
"do" if len(bogus) > 1 else "does"
))
check_args_section(self.args, 'member')
check_args_section(self.features, 'feature')
|
nvtrust-main
|
infrastructure/kvm/qemu/qemu_source/scripts/qapi/parser.py
|
#
# QAPI helper library
#
# Copyright IBM, Corp. 2011
# Copyright (c) 2013-2018 Red Hat Inc.
#
# Authors:
# Anthony Liguori <aliguori@us.ibm.com>
# Markus Armbruster <armbru@redhat.com>
#
# This work is licensed under the terms of the GNU GPL, version 2.
# See the COPYING file in the top-level directory.
import re
from typing import (
Any,
Dict,
Match,
Optional,
Sequence,
Union,
)
#: Magic string that gets removed along with all space to its right.
EATSPACE = '\033EATSPACE.'
POINTER_SUFFIX = ' *' + EATSPACE
def camel_to_upper(value: str) -> str:
"""
Converts CamelCase to CAMEL_CASE.
Examples::
ENUMName -> ENUM_NAME
EnumName1 -> ENUM_NAME1
ENUM_NAME -> ENUM_NAME
ENUM_NAME1 -> ENUM_NAME1
ENUM_Name2 -> ENUM_NAME2
ENUM24_Name -> ENUM24_NAME
"""
c_fun_str = c_name(value, False)
if value.isupper():
return c_fun_str
new_name = ''
length = len(c_fun_str)
for i in range(length):
char = c_fun_str[i]
# When char is upper case and no '_' appears before, do more checks
if char.isupper() and (i > 0) and c_fun_str[i - 1] != '_':
if i < length - 1 and c_fun_str[i + 1].islower():
new_name += '_'
elif c_fun_str[i - 1].isdigit():
new_name += '_'
new_name += char
return new_name.lstrip('_').upper()
def c_enum_const(type_name: str,
const_name: str,
prefix: Optional[str] = None) -> str:
"""
Generate a C enumeration constant name.
:param type_name: The name of the enumeration.
:param const_name: The name of this constant.
:param prefix: Optional, prefix that overrides the type_name.
"""
if prefix is not None:
type_name = prefix
return camel_to_upper(type_name) + '_' + c_name(const_name, False).upper()
def c_name(name: str, protect: bool = True) -> str:
"""
Map ``name`` to a valid C identifier.
Used for converting 'name' from a 'name':'type' qapi definition
into a generated struct member, as well as converting type names
into substrings of a generated C function name.
'__a.b_c' -> '__a_b_c', 'x-foo' -> 'x_foo'
protect=True: 'int' -> 'q_int'; protect=False: 'int' -> 'int'
:param name: The name to map.
:param protect: If true, avoid returning certain ticklish identifiers
(like C keywords) by prepending ``q_``.
"""
# ANSI X3J11/88-090, 3.1.1
c89_words = set(['auto', 'break', 'case', 'char', 'const', 'continue',
'default', 'do', 'double', 'else', 'enum', 'extern',
'float', 'for', 'goto', 'if', 'int', 'long', 'register',
'return', 'short', 'signed', 'sizeof', 'static',
'struct', 'switch', 'typedef', 'union', 'unsigned',
'void', 'volatile', 'while'])
# ISO/IEC 9899:1999, 6.4.1
c99_words = set(['inline', 'restrict', '_Bool', '_Complex', '_Imaginary'])
# ISO/IEC 9899:2011, 6.4.1
c11_words = set(['_Alignas', '_Alignof', '_Atomic', '_Generic',
'_Noreturn', '_Static_assert', '_Thread_local'])
# GCC http://gcc.gnu.org/onlinedocs/gcc-4.7.1/gcc/C-Extensions.html
# excluding _.*
gcc_words = set(['asm', 'typeof'])
# C++ ISO/IEC 14882:2003 2.11
cpp_words = set(['bool', 'catch', 'class', 'const_cast', 'delete',
'dynamic_cast', 'explicit', 'false', 'friend', 'mutable',
'namespace', 'new', 'operator', 'private', 'protected',
'public', 'reinterpret_cast', 'static_cast', 'template',
'this', 'throw', 'true', 'try', 'typeid', 'typename',
'using', 'virtual', 'wchar_t',
# alternative representations
'and', 'and_eq', 'bitand', 'bitor', 'compl', 'not',
'not_eq', 'or', 'or_eq', 'xor', 'xor_eq'])
# namespace pollution:
polluted_words = set(['unix', 'errno', 'mips', 'sparc', 'i386'])
name = re.sub(r'[^A-Za-z0-9_]', '_', name)
if protect and (name in (c89_words | c99_words | c11_words | gcc_words
| cpp_words | polluted_words)
or name[0].isdigit()):
return 'q_' + name
return name
class Indentation:
"""
Indentation level management.
:param initial: Initial number of spaces, default 0.
"""
def __init__(self, initial: int = 0) -> None:
self._level = initial
def __repr__(self) -> str:
return "{}({:d})".format(type(self).__name__, self._level)
def __str__(self) -> str:
"""Return the current indentation as a string of spaces."""
return ' ' * self._level
def increase(self, amount: int = 4) -> None:
"""Increase the indentation level by ``amount``, default 4."""
self._level += amount
def decrease(self, amount: int = 4) -> None:
"""Decrease the indentation level by ``amount``, default 4."""
assert amount <= self._level
self._level -= amount
#: Global, current indent level for code generation.
indent = Indentation()
def cgen(code: str, **kwds: object) -> str:
"""
Generate ``code`` with ``kwds`` interpolated.
Obey `indent`, and strip `EATSPACE`.
"""
raw = code % kwds
pfx = str(indent)
if pfx:
raw = re.sub(r'^(?!(#|$))', pfx, raw, flags=re.MULTILINE)
return re.sub(re.escape(EATSPACE) + r' *', '', raw)
def mcgen(code: str, **kwds: object) -> str:
if code[0] == '\n':
code = code[1:]
return cgen(code, **kwds)
def c_fname(filename: str) -> str:
return re.sub(r'[^A-Za-z0-9_]', '_', filename)
def guardstart(name: str) -> str:
return mcgen('''
#ifndef %(name)s
#define %(name)s
''',
name=c_fname(name).upper())
def guardend(name: str) -> str:
return mcgen('''
#endif /* %(name)s */
''',
name=c_fname(name).upper())
def gen_ifcond(ifcond: Optional[Union[str, Dict[str, Any]]],
cond_fmt: str, not_fmt: str,
all_operator: str, any_operator: str) -> str:
def do_gen(ifcond: Union[str, Dict[str, Any]],
need_parens: bool) -> str:
if isinstance(ifcond, str):
return cond_fmt % ifcond
assert isinstance(ifcond, dict) and len(ifcond) == 1
if 'not' in ifcond:
return not_fmt % do_gen(ifcond['not'], True)
if 'all' in ifcond:
gen = gen_infix(all_operator, ifcond['all'])
else:
gen = gen_infix(any_operator, ifcond['any'])
if need_parens:
gen = '(' + gen + ')'
return gen
def gen_infix(operator: str, operands: Sequence[Any]) -> str:
return operator.join([do_gen(o, True) for o in operands])
if not ifcond:
return ''
return do_gen(ifcond, False)
def cgen_ifcond(ifcond: Optional[Union[str, Dict[str, Any]]]) -> str:
return gen_ifcond(ifcond, 'defined(%s)', '!%s', ' && ', ' || ')
def docgen_ifcond(ifcond: Optional[Union[str, Dict[str, Any]]]) -> str:
# TODO Doc generated for conditions needs polish
return gen_ifcond(ifcond, '%s', 'not %s', ' and ', ' or ')
def gen_if(cond: str) -> str:
if not cond:
return ''
return mcgen('''
#if %(cond)s
''', cond=cond)
def gen_endif(cond: str) -> str:
if not cond:
return ''
return mcgen('''
#endif /* %(cond)s */
''', cond=cond)
def must_match(pattern: str, string: str) -> Match[str]:
match = re.match(pattern, string)
assert match is not None
return match
|
nvtrust-main
|
infrastructure/kvm/qemu/qemu_source/scripts/qapi/common.py
|
# This work is licensed under the terms of the GNU GPL, version 2 or later.
# See the COPYING file in the top-level directory.
"""
QAPI Generator
This is the main entry point for generating C code from the QAPI schema.
"""
import argparse
import sys
from typing import Optional
from .commands import gen_commands
from .common import must_match
from .error import QAPIError
from .events import gen_events
from .introspect import gen_introspect
from .schema import QAPISchema
from .types import gen_types
from .visit import gen_visit
def invalid_prefix_char(prefix: str) -> Optional[str]:
match = must_match(r'([A-Za-z_.-][A-Za-z0-9_.-]*)?', prefix)
if match.end() != len(prefix):
return prefix[match.end()]
return None
def generate(schema_file: str,
output_dir: str,
prefix: str,
unmask: bool = False,
builtins: bool = False) -> None:
"""
Generate C code for the given schema into the target directory.
:param schema_file: The primary QAPI schema file.
:param output_dir: The output directory to store generated code.
:param prefix: Optional C-code prefix for symbol names.
:param unmask: Expose non-ABI names through introspection?
:param builtins: Generate code for built-in types?
:raise QAPIError: On failures.
"""
assert invalid_prefix_char(prefix) is None
schema = QAPISchema(schema_file)
gen_types(schema, output_dir, prefix, builtins)
gen_visit(schema, output_dir, prefix, builtins)
gen_commands(schema, output_dir, prefix)
gen_events(schema, output_dir, prefix)
gen_introspect(schema, output_dir, prefix, unmask)
def main() -> int:
"""
gapi-gen executable entry point.
Expects arguments via sys.argv, see --help for details.
:return: int, 0 on success, 1 on failure.
"""
parser = argparse.ArgumentParser(
description='Generate code from a QAPI schema')
parser.add_argument('-b', '--builtins', action='store_true',
help="generate code for built-in types")
parser.add_argument('-o', '--output-dir', action='store',
default='',
help="write output to directory OUTPUT_DIR")
parser.add_argument('-p', '--prefix', action='store',
default='',
help="prefix for symbols")
parser.add_argument('-u', '--unmask-non-abi-names', action='store_true',
dest='unmask',
help="expose non-ABI names in introspection")
parser.add_argument('schema', action='store')
args = parser.parse_args()
funny_char = invalid_prefix_char(args.prefix)
if funny_char:
msg = f"funny character '{funny_char}' in argument of --prefix"
print(f"{sys.argv[0]}: {msg}", file=sys.stderr)
return 1
try:
generate(args.schema,
output_dir=args.output_dir,
prefix=args.prefix,
unmask=args.unmask,
builtins=args.builtins)
except QAPIError as err:
print(f"{sys.argv[0]}: {str(err)}", file=sys.stderr)
return 1
return 0
|
nvtrust-main
|
infrastructure/kvm/qemu/qemu_source/scripts/qapi/main.py
|
#
# QAPI frontend source file info
#
# Copyright (c) 2019 Red Hat Inc.
#
# Authors:
# Markus Armbruster <armbru@redhat.com>
#
# This work is licensed under the terms of the GNU GPL, version 2.
# See the COPYING file in the top-level directory.
import copy
from typing import List, Optional, TypeVar
class QAPISchemaPragma:
# Replace with @dataclass in Python 3.7+
# pylint: disable=too-few-public-methods
def __init__(self) -> None:
# Are documentation comments required?
self.doc_required = False
# Commands whose names may use '_'
self.command_name_exceptions: List[str] = []
# Commands allowed to return a non-dictionary
self.command_returns_exceptions: List[str] = []
# Types whose member names may violate case conventions
self.member_name_exceptions: List[str] = []
class QAPISourceInfo:
T = TypeVar('T', bound='QAPISourceInfo')
def __init__(self, fname: str, parent: Optional['QAPISourceInfo']):
self.fname = fname
self.line = 1
self.parent = parent
self.pragma: QAPISchemaPragma = (
parent.pragma if parent else QAPISchemaPragma()
)
self.defn_meta: Optional[str] = None
self.defn_name: Optional[str] = None
def set_defn(self, meta: str, name: str) -> None:
self.defn_meta = meta
self.defn_name = name
def next_line(self: T) -> T:
info = copy.copy(self)
info.line += 1
return info
def loc(self) -> str:
return f"{self.fname}:{self.line}"
def in_defn(self) -> str:
if self.defn_name:
return "%s: In %s '%s':\n" % (self.fname,
self.defn_meta, self.defn_name)
return ''
def include_path(self) -> str:
ret = ''
parent = self.parent
while parent:
ret = 'In file included from %s:\n' % parent.loc() + ret
parent = parent.parent
return ret
def __str__(self) -> str:
return self.include_path() + self.in_defn() + self.loc()
|
nvtrust-main
|
infrastructure/kvm/qemu/qemu_source/scripts/qapi/source.py
|
"""
QAPI command marshaller generator
Copyright IBM, Corp. 2011
Copyright (C) 2014-2018 Red Hat, Inc.
Authors:
Anthony Liguori <aliguori@us.ibm.com>
Michael Roth <mdroth@linux.vnet.ibm.com>
Markus Armbruster <armbru@redhat.com>
This work is licensed under the terms of the GNU GPL, version 2.
See the COPYING file in the top-level directory.
"""
from typing import (
Dict,
List,
Optional,
Set,
)
from .common import c_name, mcgen
from .gen import (
QAPIGenC,
QAPISchemaModularCVisitor,
build_params,
ifcontext,
)
from .schema import (
QAPISchema,
QAPISchemaFeature,
QAPISchemaIfCond,
QAPISchemaObjectType,
QAPISchemaType,
)
from .source import QAPISourceInfo
def gen_command_decl(name: str,
arg_type: Optional[QAPISchemaObjectType],
boxed: bool,
ret_type: Optional[QAPISchemaType]) -> str:
return mcgen('''
%(c_type)s qmp_%(c_name)s(%(params)s);
''',
c_type=(ret_type and ret_type.c_type()) or 'void',
c_name=c_name(name),
params=build_params(arg_type, boxed, 'Error **errp'))
def gen_call(name: str,
arg_type: Optional[QAPISchemaObjectType],
boxed: bool,
ret_type: Optional[QAPISchemaType]) -> str:
ret = ''
argstr = ''
if boxed:
assert arg_type
argstr = '&arg, '
elif arg_type:
assert not arg_type.variants
for memb in arg_type.members:
if memb.optional:
argstr += 'arg.has_%s, ' % c_name(memb.name)
argstr += 'arg.%s, ' % c_name(memb.name)
lhs = ''
if ret_type:
lhs = 'retval = '
ret = mcgen('''
%(lhs)sqmp_%(c_name)s(%(args)s&err);
error_propagate(errp, err);
''',
c_name=c_name(name), args=argstr, lhs=lhs)
if ret_type:
ret += mcgen('''
if (err) {
goto out;
}
qmp_marshal_output_%(c_name)s(retval, ret, errp);
''',
c_name=ret_type.c_name())
return ret
def gen_marshal_output(ret_type: QAPISchemaType) -> str:
return mcgen('''
static void qmp_marshal_output_%(c_name)s(%(c_type)s ret_in,
QObject **ret_out, Error **errp)
{
Visitor *v;
v = qobject_output_visitor_new_qmp(ret_out);
if (visit_type_%(c_name)s(v, "unused", &ret_in, errp)) {
visit_complete(v, ret_out);
}
visit_free(v);
v = qapi_dealloc_visitor_new();
visit_type_%(c_name)s(v, "unused", &ret_in, NULL);
visit_free(v);
}
''',
c_type=ret_type.c_type(), c_name=ret_type.c_name())
def build_marshal_proto(name: str) -> str:
return ('void qmp_marshal_%s(QDict *args, QObject **ret, Error **errp)'
% c_name(name))
def gen_marshal_decl(name: str) -> str:
return mcgen('''
%(proto)s;
''',
proto=build_marshal_proto(name))
def gen_marshal(name: str,
arg_type: Optional[QAPISchemaObjectType],
boxed: bool,
ret_type: Optional[QAPISchemaType]) -> str:
have_args = boxed or (arg_type and not arg_type.is_empty())
if have_args:
assert arg_type is not None
arg_type_c_name = arg_type.c_name()
ret = mcgen('''
%(proto)s
{
Error *err = NULL;
bool ok = false;
Visitor *v;
''',
proto=build_marshal_proto(name))
if ret_type:
ret += mcgen('''
%(c_type)s retval;
''',
c_type=ret_type.c_type())
if have_args:
ret += mcgen('''
%(c_name)s arg = {0};
''',
c_name=arg_type_c_name)
ret += mcgen('''
v = qobject_input_visitor_new_qmp(QOBJECT(args));
if (!visit_start_struct(v, NULL, NULL, 0, errp)) {
goto out;
}
''')
if have_args:
ret += mcgen('''
if (visit_type_%(c_arg_type)s_members(v, &arg, errp)) {
ok = visit_check_struct(v, errp);
}
''',
c_arg_type=arg_type_c_name)
else:
ret += mcgen('''
ok = visit_check_struct(v, errp);
''')
ret += mcgen('''
visit_end_struct(v, NULL);
if (!ok) {
goto out;
}
''')
ret += gen_call(name, arg_type, boxed, ret_type)
ret += mcgen('''
out:
visit_free(v);
''')
ret += mcgen('''
v = qapi_dealloc_visitor_new();
visit_start_struct(v, NULL, NULL, 0, NULL);
''')
if have_args:
ret += mcgen('''
visit_type_%(c_arg_type)s_members(v, &arg, NULL);
''',
c_arg_type=arg_type_c_name)
ret += mcgen('''
visit_end_struct(v, NULL);
visit_free(v);
''')
ret += mcgen('''
}
''')
return ret
def gen_register_command(name: str,
features: List[QAPISchemaFeature],
success_response: bool,
allow_oob: bool,
allow_preconfig: bool,
coroutine: bool) -> str:
options = []
if 'deprecated' in [f.name for f in features]:
options += ['QCO_DEPRECATED']
if not success_response:
options += ['QCO_NO_SUCCESS_RESP']
if allow_oob:
options += ['QCO_ALLOW_OOB']
if allow_preconfig:
options += ['QCO_ALLOW_PRECONFIG']
if coroutine:
options += ['QCO_COROUTINE']
if not options:
options = ['QCO_NO_OPTIONS']
ret = mcgen('''
qmp_register_command(cmds, "%(name)s",
qmp_marshal_%(c_name)s, %(opts)s);
''',
name=name, c_name=c_name(name),
opts=" | ".join(options))
return ret
class QAPISchemaGenCommandVisitor(QAPISchemaModularCVisitor):
def __init__(self, prefix: str):
super().__init__(
prefix, 'qapi-commands',
' * Schema-defined QAPI/QMP commands', None, __doc__)
self._visited_ret_types: Dict[QAPIGenC, Set[QAPISchemaType]] = {}
def _begin_user_module(self, name: str) -> None:
self._visited_ret_types[self._genc] = set()
commands = self._module_basename('qapi-commands', name)
types = self._module_basename('qapi-types', name)
visit = self._module_basename('qapi-visit', name)
self._genc.add(mcgen('''
#include "qemu/osdep.h"
#include "qapi/compat-policy.h"
#include "qapi/visitor.h"
#include "qapi/qmp/qdict.h"
#include "qapi/dealloc-visitor.h"
#include "qapi/error.h"
#include "%(visit)s.h"
#include "%(commands)s.h"
''',
commands=commands, visit=visit))
self._genh.add(mcgen('''
#include "%(types)s.h"
''',
types=types))
def visit_begin(self, schema: QAPISchema) -> None:
self._add_module('./init', ' * QAPI Commands initialization')
self._genh.add(mcgen('''
#include "qapi/qmp/dispatch.h"
void %(c_prefix)sqmp_init_marshal(QmpCommandList *cmds);
''',
c_prefix=c_name(self._prefix, protect=False)))
self._genc.add(mcgen('''
#include "qemu/osdep.h"
#include "%(prefix)sqapi-commands.h"
#include "%(prefix)sqapi-init-commands.h"
void %(c_prefix)sqmp_init_marshal(QmpCommandList *cmds)
{
QTAILQ_INIT(cmds);
''',
prefix=self._prefix,
c_prefix=c_name(self._prefix, protect=False)))
def visit_end(self) -> None:
with self._temp_module('./init'):
self._genc.add(mcgen('''
}
'''))
def visit_command(self,
name: str,
info: Optional[QAPISourceInfo],
ifcond: QAPISchemaIfCond,
features: List[QAPISchemaFeature],
arg_type: Optional[QAPISchemaObjectType],
ret_type: Optional[QAPISchemaType],
gen: bool,
success_response: bool,
boxed: bool,
allow_oob: bool,
allow_preconfig: bool,
coroutine: bool) -> None:
if not gen:
return
# FIXME: If T is a user-defined type, the user is responsible
# for making this work, i.e. to make T's condition the
# conjunction of the T-returning commands' conditions. If T
# is a built-in type, this isn't possible: the
# qmp_marshal_output_T() will be generated unconditionally.
if ret_type and ret_type not in self._visited_ret_types[self._genc]:
self._visited_ret_types[self._genc].add(ret_type)
with ifcontext(ret_type.ifcond,
self._genh, self._genc):
self._genc.add(gen_marshal_output(ret_type))
with ifcontext(ifcond, self._genh, self._genc):
self._genh.add(gen_command_decl(name, arg_type, boxed, ret_type))
self._genh.add(gen_marshal_decl(name))
self._genc.add(gen_marshal(name, arg_type, boxed, ret_type))
with self._temp_module('./init'):
with ifcontext(ifcond, self._genh, self._genc):
self._genc.add(gen_register_command(
name, features, success_response, allow_oob,
allow_preconfig, coroutine))
def gen_commands(schema: QAPISchema,
output_dir: str,
prefix: str) -> None:
vis = QAPISchemaGenCommandVisitor(prefix)
schema.visit(vis)
vis.write(output_dir)
|
nvtrust-main
|
infrastructure/kvm/qemu/qemu_source/scripts/qapi/commands.py
|
# -*- coding: utf-8 -*-
#
# QAPI schema internal representation
#
# Copyright (c) 2015-2019 Red Hat Inc.
#
# Authors:
# Markus Armbruster <armbru@redhat.com>
# Eric Blake <eblake@redhat.com>
# Marc-André Lureau <marcandre.lureau@redhat.com>
#
# This work is licensed under the terms of the GNU GPL, version 2.
# See the COPYING file in the top-level directory.
# TODO catching name collisions in generated code would be nice
from collections import OrderedDict
import os
import re
from typing import Optional
from .common import (
POINTER_SUFFIX,
c_name,
cgen_ifcond,
docgen_ifcond,
gen_endif,
gen_if,
)
from .error import QAPIError, QAPISemError, QAPISourceError
from .expr import check_exprs
from .parser import QAPISchemaParser
class QAPISchemaIfCond:
def __init__(self, ifcond=None):
self.ifcond = ifcond
def _cgen(self):
return cgen_ifcond(self.ifcond)
def gen_if(self):
return gen_if(self._cgen())
def gen_endif(self):
return gen_endif(self._cgen())
def docgen(self):
return docgen_ifcond(self.ifcond)
def is_present(self):
return bool(self.ifcond)
class QAPISchemaEntity:
meta: Optional[str] = None
def __init__(self, name: str, info, doc, ifcond=None, features=None):
assert name is None or isinstance(name, str)
for f in features or []:
assert isinstance(f, QAPISchemaFeature)
f.set_defined_in(name)
self.name = name
self._module = None
# For explicitly defined entities, info points to the (explicit)
# definition. For builtins (and their arrays), info is None.
# For implicitly defined entities, info points to a place that
# triggered the implicit definition (there may be more than one
# such place).
self.info = info
self.doc = doc
self._ifcond = ifcond or QAPISchemaIfCond()
self.features = features or []
self._checked = False
def c_name(self):
return c_name(self.name)
def check(self, schema):
assert not self._checked
seen = {}
for f in self.features:
f.check_clash(self.info, seen)
self._checked = True
def connect_doc(self, doc=None):
doc = doc or self.doc
if doc:
for f in self.features:
doc.connect_feature(f)
def check_doc(self):
if self.doc:
self.doc.check()
def _set_module(self, schema, info):
assert self._checked
fname = info.fname if info else QAPISchemaModule.BUILTIN_MODULE_NAME
self._module = schema.module_by_fname(fname)
self._module.add_entity(self)
def set_module(self, schema):
self._set_module(schema, self.info)
@property
def ifcond(self):
assert self._checked
return self._ifcond
def is_implicit(self):
return not self.info
def visit(self, visitor):
assert self._checked
def describe(self):
assert self.meta
return "%s '%s'" % (self.meta, self.name)
class QAPISchemaVisitor:
def visit_begin(self, schema):
pass
def visit_end(self):
pass
def visit_module(self, name):
pass
def visit_needed(self, entity):
# Default to visiting everything
return True
def visit_include(self, name, info):
pass
def visit_builtin_type(self, name, info, json_type):
pass
def visit_enum_type(self, name, info, ifcond, features, members, prefix):
pass
def visit_array_type(self, name, info, ifcond, element_type):
pass
def visit_object_type(self, name, info, ifcond, features,
base, members, variants):
pass
def visit_object_type_flat(self, name, info, ifcond, features,
members, variants):
pass
def visit_alternate_type(self, name, info, ifcond, features, variants):
pass
def visit_command(self, name, info, ifcond, features,
arg_type, ret_type, gen, success_response, boxed,
allow_oob, allow_preconfig, coroutine):
pass
def visit_event(self, name, info, ifcond, features, arg_type, boxed):
pass
class QAPISchemaModule:
BUILTIN_MODULE_NAME = './builtin'
def __init__(self, name):
self.name = name
self._entity_list = []
@staticmethod
def is_system_module(name: str) -> bool:
"""
System modules are internally defined modules.
Their names start with the "./" prefix.
"""
return name.startswith('./')
@classmethod
def is_user_module(cls, name: str) -> bool:
"""
User modules are those defined by the user in qapi JSON files.
They do not start with the "./" prefix.
"""
return not cls.is_system_module(name)
@classmethod
def is_builtin_module(cls, name: str) -> bool:
"""
The built-in module is a single System module for the built-in types.
It is always "./builtin".
"""
return name == cls.BUILTIN_MODULE_NAME
def add_entity(self, ent):
self._entity_list.append(ent)
def visit(self, visitor):
visitor.visit_module(self.name)
for entity in self._entity_list:
if visitor.visit_needed(entity):
entity.visit(visitor)
class QAPISchemaInclude(QAPISchemaEntity):
def __init__(self, sub_module, info):
super().__init__(None, info, None)
self._sub_module = sub_module
def visit(self, visitor):
super().visit(visitor)
visitor.visit_include(self._sub_module.name, self.info)
class QAPISchemaType(QAPISchemaEntity):
# Return the C type for common use.
# For the types we commonly box, this is a pointer type.
def c_type(self):
pass
# Return the C type to be used in a parameter list.
def c_param_type(self):
return self.c_type()
# Return the C type to be used where we suppress boxing.
def c_unboxed_type(self):
return self.c_type()
def json_type(self):
pass
def alternate_qtype(self):
json2qtype = {
'null': 'QTYPE_QNULL',
'string': 'QTYPE_QSTRING',
'number': 'QTYPE_QNUM',
'int': 'QTYPE_QNUM',
'boolean': 'QTYPE_QBOOL',
'object': 'QTYPE_QDICT'
}
return json2qtype.get(self.json_type())
def doc_type(self):
if self.is_implicit():
return None
return self.name
def check(self, schema):
QAPISchemaEntity.check(self, schema)
if 'deprecated' in [f.name for f in self.features]:
raise QAPISemError(
self.info, "feature 'deprecated' is not supported for types")
def describe(self):
assert self.meta
return "%s type '%s'" % (self.meta, self.name)
class QAPISchemaBuiltinType(QAPISchemaType):
meta = 'built-in'
def __init__(self, name, json_type, c_type):
super().__init__(name, None, None)
assert not c_type or isinstance(c_type, str)
assert json_type in ('string', 'number', 'int', 'boolean', 'null',
'value')
self._json_type_name = json_type
self._c_type_name = c_type
def c_name(self):
return self.name
def c_type(self):
return self._c_type_name
def c_param_type(self):
if self.name == 'str':
return 'const ' + self._c_type_name
return self._c_type_name
def json_type(self):
return self._json_type_name
def doc_type(self):
return self.json_type()
def visit(self, visitor):
super().visit(visitor)
visitor.visit_builtin_type(self.name, self.info, self.json_type())
class QAPISchemaEnumType(QAPISchemaType):
meta = 'enum'
def __init__(self, name, info, doc, ifcond, features, members, prefix):
super().__init__(name, info, doc, ifcond, features)
for m in members:
assert isinstance(m, QAPISchemaEnumMember)
m.set_defined_in(name)
assert prefix is None or isinstance(prefix, str)
self.members = members
self.prefix = prefix
def check(self, schema):
super().check(schema)
seen = {}
for m in self.members:
m.check_clash(self.info, seen)
def connect_doc(self, doc=None):
super().connect_doc(doc)
doc = doc or self.doc
for m in self.members:
m.connect_doc(doc)
def is_implicit(self):
# See QAPISchema._def_predefineds()
return self.name == 'QType'
def c_type(self):
return c_name(self.name)
def member_names(self):
return [m.name for m in self.members]
def json_type(self):
return 'string'
def visit(self, visitor):
super().visit(visitor)
visitor.visit_enum_type(
self.name, self.info, self.ifcond, self.features,
self.members, self.prefix)
class QAPISchemaArrayType(QAPISchemaType):
meta = 'array'
def __init__(self, name, info, element_type):
super().__init__(name, info, None)
assert isinstance(element_type, str)
self._element_type_name = element_type
self.element_type = None
def check(self, schema):
super().check(schema)
self.element_type = schema.resolve_type(
self._element_type_name, self.info,
self.info and self.info.defn_meta)
assert not isinstance(self.element_type, QAPISchemaArrayType)
def set_module(self, schema):
self._set_module(schema, self.element_type.info)
@property
def ifcond(self):
assert self._checked
return self.element_type.ifcond
def is_implicit(self):
return True
def c_type(self):
return c_name(self.name) + POINTER_SUFFIX
def json_type(self):
return 'array'
def doc_type(self):
elt_doc_type = self.element_type.doc_type()
if not elt_doc_type:
return None
return 'array of ' + elt_doc_type
def visit(self, visitor):
super().visit(visitor)
visitor.visit_array_type(self.name, self.info, self.ifcond,
self.element_type)
def describe(self):
assert self.meta
return "%s type ['%s']" % (self.meta, self._element_type_name)
class QAPISchemaObjectType(QAPISchemaType):
def __init__(self, name, info, doc, ifcond, features,
base, local_members, variants):
# struct has local_members, optional base, and no variants
# union has base, variants, and no local_members
super().__init__(name, info, doc, ifcond, features)
self.meta = 'union' if variants else 'struct'
assert base is None or isinstance(base, str)
for m in local_members:
assert isinstance(m, QAPISchemaObjectTypeMember)
m.set_defined_in(name)
if variants is not None:
assert isinstance(variants, QAPISchemaVariants)
variants.set_defined_in(name)
self._base_name = base
self.base = None
self.local_members = local_members
self.variants = variants
self.members = None
def check(self, schema):
# This calls another type T's .check() exactly when the C
# struct emitted by gen_object() contains that T's C struct
# (pointers don't count).
if self.members is not None:
# A previous .check() completed: nothing to do
return
if self._checked:
# Recursed: C struct contains itself
raise QAPISemError(self.info,
"object %s contains itself" % self.name)
super().check(schema)
assert self._checked and self.members is None
seen = OrderedDict()
if self._base_name:
self.base = schema.resolve_type(self._base_name, self.info,
"'base'")
if (not isinstance(self.base, QAPISchemaObjectType)
or self.base.variants):
raise QAPISemError(
self.info,
"'base' requires a struct type, %s isn't"
% self.base.describe())
self.base.check(schema)
self.base.check_clash(self.info, seen)
for m in self.local_members:
m.check(schema)
m.check_clash(self.info, seen)
members = seen.values()
if self.variants:
self.variants.check(schema, seen)
self.variants.check_clash(self.info, seen)
self.members = members # mark completed
# Check that the members of this type do not cause duplicate JSON members,
# and update seen to track the members seen so far. Report any errors
# on behalf of info, which is not necessarily self.info
def check_clash(self, info, seen):
assert self._checked
assert not self.variants # not implemented
for m in self.members:
m.check_clash(info, seen)
def connect_doc(self, doc=None):
super().connect_doc(doc)
doc = doc or self.doc
if self.base and self.base.is_implicit():
self.base.connect_doc(doc)
for m in self.local_members:
m.connect_doc(doc)
def is_implicit(self):
# See QAPISchema._make_implicit_object_type(), as well as
# _def_predefineds()
return self.name.startswith('q_')
def is_empty(self):
assert self.members is not None
return not self.members and not self.variants
def c_name(self):
assert self.name != 'q_empty'
return super().c_name()
def c_type(self):
assert not self.is_implicit()
return c_name(self.name) + POINTER_SUFFIX
def c_unboxed_type(self):
return c_name(self.name)
def json_type(self):
return 'object'
def visit(self, visitor):
super().visit(visitor)
visitor.visit_object_type(
self.name, self.info, self.ifcond, self.features,
self.base, self.local_members, self.variants)
visitor.visit_object_type_flat(
self.name, self.info, self.ifcond, self.features,
self.members, self.variants)
class QAPISchemaAlternateType(QAPISchemaType):
meta = 'alternate'
def __init__(self, name, info, doc, ifcond, features, variants):
super().__init__(name, info, doc, ifcond, features)
assert isinstance(variants, QAPISchemaVariants)
assert variants.tag_member
variants.set_defined_in(name)
variants.tag_member.set_defined_in(self.name)
self.variants = variants
def check(self, schema):
super().check(schema)
self.variants.tag_member.check(schema)
# Not calling self.variants.check_clash(), because there's nothing
# to clash with
self.variants.check(schema, {})
# Alternate branch names have no relation to the tag enum values;
# so we have to check for potential name collisions ourselves.
seen = {}
types_seen = {}
for v in self.variants.variants:
v.check_clash(self.info, seen)
qtype = v.type.alternate_qtype()
if not qtype:
raise QAPISemError(
self.info,
"%s cannot use %s"
% (v.describe(self.info), v.type.describe()))
conflicting = set([qtype])
if qtype == 'QTYPE_QSTRING':
if isinstance(v.type, QAPISchemaEnumType):
for m in v.type.members:
if m.name in ['on', 'off']:
conflicting.add('QTYPE_QBOOL')
if re.match(r'[-+0-9.]', m.name):
# lazy, could be tightened
conflicting.add('QTYPE_QNUM')
else:
conflicting.add('QTYPE_QNUM')
conflicting.add('QTYPE_QBOOL')
for qt in conflicting:
if qt in types_seen:
raise QAPISemError(
self.info,
"%s can't be distinguished from '%s'"
% (v.describe(self.info), types_seen[qt]))
types_seen[qt] = v.name
def connect_doc(self, doc=None):
super().connect_doc(doc)
doc = doc or self.doc
for v in self.variants.variants:
v.connect_doc(doc)
def c_type(self):
return c_name(self.name) + POINTER_SUFFIX
def json_type(self):
return 'value'
def visit(self, visitor):
super().visit(visitor)
visitor.visit_alternate_type(
self.name, self.info, self.ifcond, self.features, self.variants)
class QAPISchemaVariants:
def __init__(self, tag_name, info, tag_member, variants):
# Unions pass tag_name but not tag_member.
# Alternates pass tag_member but not tag_name.
# After check(), tag_member is always set.
assert bool(tag_member) != bool(tag_name)
assert (isinstance(tag_name, str) or
isinstance(tag_member, QAPISchemaObjectTypeMember))
for v in variants:
assert isinstance(v, QAPISchemaVariant)
self._tag_name = tag_name
self.info = info
self.tag_member = tag_member
self.variants = variants
def set_defined_in(self, name):
for v in self.variants:
v.set_defined_in(name)
def check(self, schema, seen):
if self._tag_name: # union
self.tag_member = seen.get(c_name(self._tag_name))
base = "'base'"
# Pointing to the base type when not implicit would be
# nice, but we don't know it here
if not self.tag_member or self._tag_name != self.tag_member.name:
raise QAPISemError(
self.info,
"discriminator '%s' is not a member of %s"
% (self._tag_name, base))
# Here we do:
base_type = schema.lookup_type(self.tag_member.defined_in)
assert base_type
if not base_type.is_implicit():
base = "base type '%s'" % self.tag_member.defined_in
if not isinstance(self.tag_member.type, QAPISchemaEnumType):
raise QAPISemError(
self.info,
"discriminator member '%s' of %s must be of enum type"
% (self._tag_name, base))
if self.tag_member.optional:
raise QAPISemError(
self.info,
"discriminator member '%s' of %s must not be optional"
% (self._tag_name, base))
if self.tag_member.ifcond.is_present():
raise QAPISemError(
self.info,
"discriminator member '%s' of %s must not be conditional"
% (self._tag_name, base))
else: # alternate
assert isinstance(self.tag_member.type, QAPISchemaEnumType)
assert not self.tag_member.optional
assert not self.tag_member.ifcond.is_present()
if self._tag_name: # union
# branches that are not explicitly covered get an empty type
cases = {v.name for v in self.variants}
for m in self.tag_member.type.members:
if m.name not in cases:
v = QAPISchemaVariant(m.name, self.info,
'q_empty', m.ifcond)
v.set_defined_in(self.tag_member.defined_in)
self.variants.append(v)
if not self.variants:
raise QAPISemError(self.info, "union has no branches")
for v in self.variants:
v.check(schema)
# Union names must match enum values; alternate names are
# checked separately. Use 'seen' to tell the two apart.
if seen:
if v.name not in self.tag_member.type.member_names():
raise QAPISemError(
self.info,
"branch '%s' is not a value of %s"
% (v.name, self.tag_member.type.describe()))
if (not isinstance(v.type, QAPISchemaObjectType)
or v.type.variants):
raise QAPISemError(
self.info,
"%s cannot use %s"
% (v.describe(self.info), v.type.describe()))
v.type.check(schema)
def check_clash(self, info, seen):
for v in self.variants:
# Reset seen map for each variant, since qapi names from one
# branch do not affect another branch
v.type.check_clash(info, dict(seen))
class QAPISchemaMember:
""" Represents object members, enum members and features """
role = 'member'
def __init__(self, name, info, ifcond=None):
assert isinstance(name, str)
self.name = name
self.info = info
self.ifcond = ifcond or QAPISchemaIfCond()
self.defined_in = None
def set_defined_in(self, name):
assert not self.defined_in
self.defined_in = name
def check_clash(self, info, seen):
cname = c_name(self.name)
if cname in seen:
raise QAPISemError(
info,
"%s collides with %s"
% (self.describe(info), seen[cname].describe(info)))
seen[cname] = self
def connect_doc(self, doc):
if doc:
doc.connect_member(self)
def describe(self, info):
role = self.role
defined_in = self.defined_in
assert defined_in
if defined_in.startswith('q_obj_'):
# See QAPISchema._make_implicit_object_type() - reverse the
# mapping there to create a nice human-readable description
defined_in = defined_in[6:]
if defined_in.endswith('-arg'):
# Implicit type created for a command's dict 'data'
assert role == 'member'
role = 'parameter'
elif defined_in.endswith('-base'):
# Implicit type created for a union's dict 'base'
role = 'base ' + role
else:
assert False
elif defined_in != info.defn_name:
return "%s '%s' of type '%s'" % (role, self.name, defined_in)
return "%s '%s'" % (role, self.name)
class QAPISchemaEnumMember(QAPISchemaMember):
role = 'value'
class QAPISchemaFeature(QAPISchemaMember):
role = 'feature'
class QAPISchemaObjectTypeMember(QAPISchemaMember):
def __init__(self, name, info, typ, optional, ifcond=None, features=None):
super().__init__(name, info, ifcond)
assert isinstance(typ, str)
assert isinstance(optional, bool)
for f in features or []:
assert isinstance(f, QAPISchemaFeature)
f.set_defined_in(name)
self._type_name = typ
self.type = None
self.optional = optional
self.features = features or []
def check(self, schema):
assert self.defined_in
self.type = schema.resolve_type(self._type_name, self.info,
self.describe)
seen = {}
for f in self.features:
f.check_clash(self.info, seen)
def connect_doc(self, doc):
super().connect_doc(doc)
if doc:
for f in self.features:
doc.connect_feature(f)
class QAPISchemaVariant(QAPISchemaObjectTypeMember):
role = 'branch'
def __init__(self, name, info, typ, ifcond=None):
super().__init__(name, info, typ, False, ifcond)
class QAPISchemaCommand(QAPISchemaEntity):
meta = 'command'
def __init__(self, name, info, doc, ifcond, features,
arg_type, ret_type,
gen, success_response, boxed, allow_oob, allow_preconfig,
coroutine):
super().__init__(name, info, doc, ifcond, features)
assert not arg_type or isinstance(arg_type, str)
assert not ret_type or isinstance(ret_type, str)
self._arg_type_name = arg_type
self.arg_type = None
self._ret_type_name = ret_type
self.ret_type = None
self.gen = gen
self.success_response = success_response
self.boxed = boxed
self.allow_oob = allow_oob
self.allow_preconfig = allow_preconfig
self.coroutine = coroutine
def check(self, schema):
super().check(schema)
if self._arg_type_name:
self.arg_type = schema.resolve_type(
self._arg_type_name, self.info, "command's 'data'")
if not isinstance(self.arg_type, QAPISchemaObjectType):
raise QAPISemError(
self.info,
"command's 'data' cannot take %s"
% self.arg_type.describe())
if self.arg_type.variants and not self.boxed:
raise QAPISemError(
self.info,
"command's 'data' can take %s only with 'boxed': true"
% self.arg_type.describe())
if self._ret_type_name:
self.ret_type = schema.resolve_type(
self._ret_type_name, self.info, "command's 'returns'")
if self.name not in self.info.pragma.command_returns_exceptions:
typ = self.ret_type
if isinstance(typ, QAPISchemaArrayType):
typ = self.ret_type.element_type
assert typ
if not isinstance(typ, QAPISchemaObjectType):
raise QAPISemError(
self.info,
"command's 'returns' cannot take %s"
% self.ret_type.describe())
def connect_doc(self, doc=None):
super().connect_doc(doc)
doc = doc or self.doc
if doc:
if self.arg_type and self.arg_type.is_implicit():
self.arg_type.connect_doc(doc)
def visit(self, visitor):
super().visit(visitor)
visitor.visit_command(
self.name, self.info, self.ifcond, self.features,
self.arg_type, self.ret_type, self.gen, self.success_response,
self.boxed, self.allow_oob, self.allow_preconfig,
self.coroutine)
class QAPISchemaEvent(QAPISchemaEntity):
meta = 'event'
def __init__(self, name, info, doc, ifcond, features, arg_type, boxed):
super().__init__(name, info, doc, ifcond, features)
assert not arg_type or isinstance(arg_type, str)
self._arg_type_name = arg_type
self.arg_type = None
self.boxed = boxed
def check(self, schema):
super().check(schema)
if self._arg_type_name:
self.arg_type = schema.resolve_type(
self._arg_type_name, self.info, "event's 'data'")
if not isinstance(self.arg_type, QAPISchemaObjectType):
raise QAPISemError(
self.info,
"event's 'data' cannot take %s"
% self.arg_type.describe())
if self.arg_type.variants and not self.boxed:
raise QAPISemError(
self.info,
"event's 'data' can take %s only with 'boxed': true"
% self.arg_type.describe())
def connect_doc(self, doc=None):
super().connect_doc(doc)
doc = doc or self.doc
if doc:
if self.arg_type and self.arg_type.is_implicit():
self.arg_type.connect_doc(doc)
def visit(self, visitor):
super().visit(visitor)
visitor.visit_event(
self.name, self.info, self.ifcond, self.features,
self.arg_type, self.boxed)
class QAPISchema:
def __init__(self, fname):
self.fname = fname
try:
parser = QAPISchemaParser(fname)
except OSError as err:
raise QAPIError(
f"can't read schema file '{fname}': {err.strerror}"
) from err
exprs = check_exprs(parser.exprs)
self.docs = parser.docs
self._entity_list = []
self._entity_dict = {}
self._module_dict = OrderedDict()
self._schema_dir = os.path.dirname(fname)
self._make_module(QAPISchemaModule.BUILTIN_MODULE_NAME)
self._make_module(fname)
self._predefining = True
self._def_predefineds()
self._predefining = False
self._def_exprs(exprs)
self.check()
def _def_entity(self, ent):
# Only the predefined types are allowed to not have info
assert ent.info or self._predefining
self._entity_list.append(ent)
if ent.name is None:
return
# TODO reject names that differ only in '_' vs. '.' vs. '-',
# because they're liable to clash in generated C.
other_ent = self._entity_dict.get(ent.name)
if other_ent:
if other_ent.info:
where = QAPISourceError(other_ent.info, "previous definition")
raise QAPISemError(
ent.info,
"'%s' is already defined\n%s" % (ent.name, where))
raise QAPISemError(
ent.info, "%s is already defined" % other_ent.describe())
self._entity_dict[ent.name] = ent
def lookup_entity(self, name, typ=None):
ent = self._entity_dict.get(name)
if typ and not isinstance(ent, typ):
return None
return ent
def lookup_type(self, name):
return self.lookup_entity(name, QAPISchemaType)
def resolve_type(self, name, info, what):
typ = self.lookup_type(name)
if not typ:
if callable(what):
what = what(info)
raise QAPISemError(
info, "%s uses unknown type '%s'" % (what, name))
return typ
def _module_name(self, fname: str) -> str:
if QAPISchemaModule.is_system_module(fname):
return fname
return os.path.relpath(fname, self._schema_dir)
def _make_module(self, fname):
name = self._module_name(fname)
if name not in self._module_dict:
self._module_dict[name] = QAPISchemaModule(name)
return self._module_dict[name]
def module_by_fname(self, fname):
name = self._module_name(fname)
return self._module_dict[name]
def _def_include(self, expr, info, doc):
include = expr['include']
assert doc is None
self._def_entity(QAPISchemaInclude(self._make_module(include), info))
def _def_builtin_type(self, name, json_type, c_type):
self._def_entity(QAPISchemaBuiltinType(name, json_type, c_type))
# Instantiating only the arrays that are actually used would
# be nice, but we can't as long as their generated code
# (qapi-builtin-types.[ch]) may be shared by some other
# schema.
self._make_array_type(name, None)
def _def_predefineds(self):
for t in [('str', 'string', 'char' + POINTER_SUFFIX),
('number', 'number', 'double'),
('int', 'int', 'int64_t'),
('int8', 'int', 'int8_t'),
('int16', 'int', 'int16_t'),
('int32', 'int', 'int32_t'),
('int64', 'int', 'int64_t'),
('uint8', 'int', 'uint8_t'),
('uint16', 'int', 'uint16_t'),
('uint32', 'int', 'uint32_t'),
('uint64', 'int', 'uint64_t'),
('size', 'int', 'uint64_t'),
('bool', 'boolean', 'bool'),
('any', 'value', 'QObject' + POINTER_SUFFIX),
('null', 'null', 'QNull' + POINTER_SUFFIX)]:
self._def_builtin_type(*t)
self.the_empty_object_type = QAPISchemaObjectType(
'q_empty', None, None, None, None, None, [], None)
self._def_entity(self.the_empty_object_type)
qtypes = ['none', 'qnull', 'qnum', 'qstring', 'qdict', 'qlist',
'qbool']
qtype_values = self._make_enum_members(
[{'name': n} for n in qtypes], None)
self._def_entity(QAPISchemaEnumType('QType', None, None, None, None,
qtype_values, 'QTYPE'))
def _make_features(self, features, info):
if features is None:
return []
return [QAPISchemaFeature(f['name'], info,
QAPISchemaIfCond(f.get('if')))
for f in features]
def _make_enum_members(self, values, info):
return [QAPISchemaEnumMember(v['name'], info,
QAPISchemaIfCond(v.get('if')))
for v in values]
def _make_array_type(self, element_type, info):
name = element_type + 'List' # reserved by check_defn_name_str()
if not self.lookup_type(name):
self._def_entity(QAPISchemaArrayType(name, info, element_type))
return name
def _make_implicit_object_type(self, name, info, ifcond, role, members):
if not members:
return None
# See also QAPISchemaObjectTypeMember.describe()
name = 'q_obj_%s-%s' % (name, role)
typ = self.lookup_entity(name, QAPISchemaObjectType)
if typ:
# The implicit object type has multiple users. This can
# only be a duplicate definition, which will be flagged
# later.
pass
else:
self._def_entity(QAPISchemaObjectType(
name, info, None, ifcond, None, None, members, None))
return name
def _def_enum_type(self, expr, info, doc):
name = expr['enum']
data = expr['data']
prefix = expr.get('prefix')
ifcond = QAPISchemaIfCond(expr.get('if'))
features = self._make_features(expr.get('features'), info)
self._def_entity(QAPISchemaEnumType(
name, info, doc, ifcond, features,
self._make_enum_members(data, info), prefix))
def _make_member(self, name, typ, ifcond, features, info):
optional = False
if name.startswith('*'):
name = name[1:]
optional = True
if isinstance(typ, list):
assert len(typ) == 1
typ = self._make_array_type(typ[0], info)
return QAPISchemaObjectTypeMember(name, info, typ, optional, ifcond,
self._make_features(features, info))
def _make_members(self, data, info):
return [self._make_member(key, value['type'],
QAPISchemaIfCond(value.get('if')),
value.get('features'), info)
for (key, value) in data.items()]
def _def_struct_type(self, expr, info, doc):
name = expr['struct']
base = expr.get('base')
data = expr['data']
ifcond = QAPISchemaIfCond(expr.get('if'))
features = self._make_features(expr.get('features'), info)
self._def_entity(QAPISchemaObjectType(
name, info, doc, ifcond, features, base,
self._make_members(data, info),
None))
def _make_variant(self, case, typ, ifcond, info):
return QAPISchemaVariant(case, info, typ, ifcond)
def _def_union_type(self, expr, info, doc):
name = expr['union']
base = expr['base']
tag_name = expr['discriminator']
data = expr['data']
ifcond = QAPISchemaIfCond(expr.get('if'))
features = self._make_features(expr.get('features'), info)
if isinstance(base, dict):
base = self._make_implicit_object_type(
name, info, ifcond,
'base', self._make_members(base, info))
variants = [
self._make_variant(key, value['type'],
QAPISchemaIfCond(value.get('if')),
info)
for (key, value) in data.items()]
members = []
self._def_entity(
QAPISchemaObjectType(name, info, doc, ifcond, features,
base, members,
QAPISchemaVariants(
tag_name, info, None, variants)))
def _def_alternate_type(self, expr, info, doc):
name = expr['alternate']
data = expr['data']
ifcond = QAPISchemaIfCond(expr.get('if'))
features = self._make_features(expr.get('features'), info)
variants = [
self._make_variant(key, value['type'],
QAPISchemaIfCond(value.get('if')),
info)
for (key, value) in data.items()]
tag_member = QAPISchemaObjectTypeMember('type', info, 'QType', False)
self._def_entity(
QAPISchemaAlternateType(name, info, doc, ifcond, features,
QAPISchemaVariants(
None, info, tag_member, variants)))
def _def_command(self, expr, info, doc):
name = expr['command']
data = expr.get('data')
rets = expr.get('returns')
gen = expr.get('gen', True)
success_response = expr.get('success-response', True)
boxed = expr.get('boxed', False)
allow_oob = expr.get('allow-oob', False)
allow_preconfig = expr.get('allow-preconfig', False)
coroutine = expr.get('coroutine', False)
ifcond = QAPISchemaIfCond(expr.get('if'))
features = self._make_features(expr.get('features'), info)
if isinstance(data, OrderedDict):
data = self._make_implicit_object_type(
name, info, ifcond,
'arg', self._make_members(data, info))
if isinstance(rets, list):
assert len(rets) == 1
rets = self._make_array_type(rets[0], info)
self._def_entity(QAPISchemaCommand(name, info, doc, ifcond, features,
data, rets,
gen, success_response,
boxed, allow_oob, allow_preconfig,
coroutine))
def _def_event(self, expr, info, doc):
name = expr['event']
data = expr.get('data')
boxed = expr.get('boxed', False)
ifcond = QAPISchemaIfCond(expr.get('if'))
features = self._make_features(expr.get('features'), info)
if isinstance(data, OrderedDict):
data = self._make_implicit_object_type(
name, info, ifcond,
'arg', self._make_members(data, info))
self._def_entity(QAPISchemaEvent(name, info, doc, ifcond, features,
data, boxed))
def _def_exprs(self, exprs):
for expr_elem in exprs:
expr = expr_elem['expr']
info = expr_elem['info']
doc = expr_elem.get('doc')
if 'enum' in expr:
self._def_enum_type(expr, info, doc)
elif 'struct' in expr:
self._def_struct_type(expr, info, doc)
elif 'union' in expr:
self._def_union_type(expr, info, doc)
elif 'alternate' in expr:
self._def_alternate_type(expr, info, doc)
elif 'command' in expr:
self._def_command(expr, info, doc)
elif 'event' in expr:
self._def_event(expr, info, doc)
elif 'include' in expr:
self._def_include(expr, info, doc)
else:
assert False
def check(self):
for ent in self._entity_list:
ent.check(self)
ent.connect_doc()
ent.check_doc()
for ent in self._entity_list:
ent.set_module(self)
def visit(self, visitor):
visitor.visit_begin(self)
for mod in self._module_dict.values():
mod.visit(visitor)
visitor.visit_end()
|
nvtrust-main
|
infrastructure/kvm/qemu/qemu_source/scripts/qapi/schema.py
|
# -*- coding: utf-8 -*-
#
# Copyright IBM, Corp. 2011
# Copyright (c) 2013-2021 Red Hat Inc.
#
# Authors:
# Anthony Liguori <aliguori@us.ibm.com>
# Markus Armbruster <armbru@redhat.com>
# Eric Blake <eblake@redhat.com>
# Marc-André Lureau <marcandre.lureau@redhat.com>
# John Snow <jsnow@redhat.com>
#
# This work is licensed under the terms of the GNU GPL, version 2.
# See the COPYING file in the top-level directory.
"""
Normalize and validate (context-free) QAPI schema expression structures.
`QAPISchemaParser` parses a QAPI schema into abstract syntax trees
consisting of dict, list, str, bool, and int nodes. This module ensures
that these nested structures have the correct type(s) and key(s) where
appropriate for the QAPI context-free grammar.
The QAPI schema expression language allows for certain syntactic sugar;
this module also handles the normalization process of these nested
structures.
See `check_exprs` for the main entry point.
See `schema.QAPISchema` for processing into native Python data
structures and contextual semantic validation.
"""
import re
from typing import (
Collection,
Dict,
Iterable,
List,
Optional,
Union,
cast,
)
from .common import c_name
from .error import QAPISemError
from .parser import QAPIDoc
from .source import QAPISourceInfo
# Deserialized JSON objects as returned by the parser.
# The values of this mapping are not necessary to exhaustively type
# here (and also not practical as long as mypy lacks recursive
# types), because the purpose of this module is to interrogate that
# type.
_JSONObject = Dict[str, object]
# See check_name_str(), below.
valid_name = re.compile(r'(__[a-z0-9.-]+_)?'
r'(x-)?'
r'([a-z][a-z0-9_-]*)$', re.IGNORECASE)
def check_name_is_str(name: object,
info: QAPISourceInfo,
source: str) -> None:
"""
Ensure that ``name`` is a ``str``.
:raise QAPISemError: When ``name`` fails validation.
"""
if not isinstance(name, str):
raise QAPISemError(info, "%s requires a string name" % source)
def check_name_str(name: str, info: QAPISourceInfo, source: str) -> str:
"""
Ensure that ``name`` is a valid QAPI name.
A valid name consists of ASCII letters, digits, ``-``, and ``_``,
starting with a letter. It may be prefixed by a downstream prefix
of the form __RFQDN_, or the experimental prefix ``x-``. If both
prefixes are present, the __RFDQN_ prefix goes first.
A valid name cannot start with ``q_``, which is reserved.
:param name: Name to check.
:param info: QAPI schema source file information.
:param source: Error string describing what ``name`` belongs to.
:raise QAPISemError: When ``name`` fails validation.
:return: The stem of the valid name, with no prefixes.
"""
# Reserve the entire 'q_' namespace for c_name(), and for 'q_empty'
# and 'q_obj_*' implicit type names.
match = valid_name.match(name)
if not match or c_name(name, False).startswith('q_'):
raise QAPISemError(info, "%s has an invalid name" % source)
return match.group(3)
def check_name_upper(name: str, info: QAPISourceInfo, source: str) -> None:
"""
Ensure that ``name`` is a valid event name.
This means it must be a valid QAPI name as checked by
`check_name_str()`, but where the stem prohibits lowercase
characters and ``-``.
:param name: Name to check.
:param info: QAPI schema source file information.
:param source: Error string describing what ``name`` belongs to.
:raise QAPISemError: When ``name`` fails validation.
"""
stem = check_name_str(name, info, source)
if re.search(r'[a-z-]', stem):
raise QAPISemError(
info, "name of %s must not use lowercase or '-'" % source)
def check_name_lower(name: str, info: QAPISourceInfo, source: str,
permit_upper: bool = False,
permit_underscore: bool = False) -> None:
"""
Ensure that ``name`` is a valid command or member name.
This means it must be a valid QAPI name as checked by
`check_name_str()`, but where the stem prohibits uppercase
characters and ``_``.
:param name: Name to check.
:param info: QAPI schema source file information.
:param source: Error string describing what ``name`` belongs to.
:param permit_upper: Additionally permit uppercase.
:param permit_underscore: Additionally permit ``_``.
:raise QAPISemError: When ``name`` fails validation.
"""
stem = check_name_str(name, info, source)
if ((not permit_upper and re.search(r'[A-Z]', stem))
or (not permit_underscore and '_' in stem)):
raise QAPISemError(
info, "name of %s must not use uppercase or '_'" % source)
def check_name_camel(name: str, info: QAPISourceInfo, source: str) -> None:
"""
Ensure that ``name`` is a valid user-defined type name.
This means it must be a valid QAPI name as checked by
`check_name_str()`, but where the stem must be in CamelCase.
:param name: Name to check.
:param info: QAPI schema source file information.
:param source: Error string describing what ``name`` belongs to.
:raise QAPISemError: When ``name`` fails validation.
"""
stem = check_name_str(name, info, source)
if not re.match(r'[A-Z][A-Za-z0-9]*[a-z][A-Za-z0-9]*$', stem):
raise QAPISemError(info, "name of %s must use CamelCase" % source)
def check_defn_name_str(name: str, info: QAPISourceInfo, meta: str) -> None:
"""
Ensure that ``name`` is a valid definition name.
Based on the value of ``meta``, this means that:
- 'event' names adhere to `check_name_upper()`.
- 'command' names adhere to `check_name_lower()`.
- Else, meta is a type, and must pass `check_name_camel()`.
These names must not end with ``List``.
:param name: Name to check.
:param info: QAPI schema source file information.
:param meta: Meta-type name of the QAPI expression.
:raise QAPISemError: When ``name`` fails validation.
"""
if meta == 'event':
check_name_upper(name, info, meta)
elif meta == 'command':
check_name_lower(
name, info, meta,
permit_underscore=name in info.pragma.command_name_exceptions)
else:
check_name_camel(name, info, meta)
if name.endswith('List'):
raise QAPISemError(
info, "%s name should not end in 'List'" % meta)
def check_keys(value: _JSONObject,
info: QAPISourceInfo,
source: str,
required: Collection[str],
optional: Collection[str]) -> None:
"""
Ensure that a dict has a specific set of keys.
:param value: The dict to check.
:param info: QAPI schema source file information.
:param source: Error string describing this ``value``.
:param required: Keys that *must* be present.
:param optional: Keys that *may* be present.
:raise QAPISemError: When unknown keys are present.
"""
def pprint(elems: Iterable[str]) -> str:
return ', '.join("'" + e + "'" for e in sorted(elems))
missing = set(required) - set(value)
if missing:
raise QAPISemError(
info,
"%s misses key%s %s"
% (source, 's' if len(missing) > 1 else '',
pprint(missing)))
allowed = set(required) | set(optional)
unknown = set(value) - allowed
if unknown:
raise QAPISemError(
info,
"%s has unknown key%s %s\nValid keys are %s."
% (source, 's' if len(unknown) > 1 else '',
pprint(unknown), pprint(allowed)))
def check_flags(expr: _JSONObject, info: QAPISourceInfo) -> None:
"""
Ensure flag members (if present) have valid values.
:param expr: The expression to validate.
:param info: QAPI schema source file information.
:raise QAPISemError:
When certain flags have an invalid value, or when
incompatible flags are present.
"""
for key in ('gen', 'success-response'):
if key in expr and expr[key] is not False:
raise QAPISemError(
info, "flag '%s' may only use false value" % key)
for key in ('boxed', 'allow-oob', 'allow-preconfig', 'coroutine'):
if key in expr and expr[key] is not True:
raise QAPISemError(
info, "flag '%s' may only use true value" % key)
if 'allow-oob' in expr and 'coroutine' in expr:
# This is not necessarily a fundamental incompatibility, but
# we don't have a use case and the desired semantics isn't
# obvious. The simplest solution is to forbid it until we get
# a use case for it.
raise QAPISemError(info, "flags 'allow-oob' and 'coroutine' "
"are incompatible")
def check_if(expr: _JSONObject, info: QAPISourceInfo, source: str) -> None:
"""
Validate the ``if`` member of an object.
The ``if`` member may be either a ``str`` or a dict.
:param expr: The expression containing the ``if`` member to validate.
:param info: QAPI schema source file information.
:param source: Error string describing ``expr``.
:raise QAPISemError:
When the "if" member fails validation, or when there are no
non-empty conditions.
:return: None
"""
def _check_if(cond: Union[str, object]) -> None:
if isinstance(cond, str):
if not re.fullmatch(r'[A-Z][A-Z0-9_]*', cond):
raise QAPISemError(
info,
"'if' condition '%s' of %s is not a valid identifier"
% (cond, source))
return
if not isinstance(cond, dict):
raise QAPISemError(
info,
"'if' condition of %s must be a string or an object" % source)
check_keys(cond, info, "'if' condition of %s" % source, [],
["all", "any", "not"])
if len(cond) != 1:
raise QAPISemError(
info,
"'if' condition of %s has conflicting keys" % source)
if 'not' in cond:
_check_if(cond['not'])
elif 'all' in cond:
_check_infix('all', cond['all'])
else:
_check_infix('any', cond['any'])
def _check_infix(operator: str, operands: object) -> None:
if not isinstance(operands, list):
raise QAPISemError(
info,
"'%s' condition of %s must be an array"
% (operator, source))
if not operands:
raise QAPISemError(
info, "'if' condition [] of %s is useless" % source)
for operand in operands:
_check_if(operand)
ifcond = expr.get('if')
if ifcond is None:
return
_check_if(ifcond)
def normalize_members(members: object) -> None:
"""
Normalize a "members" value.
If ``members`` is a dict, for every value in that dict, if that
value is not itself already a dict, normalize it to
``{'type': value}``.
:forms:
:sugared: ``Dict[str, Union[str, TypeRef]]``
:canonical: ``Dict[str, TypeRef]``
:param members: The members value to normalize.
:return: None, ``members`` is normalized in-place as needed.
"""
if isinstance(members, dict):
for key, arg in members.items():
if isinstance(arg, dict):
continue
members[key] = {'type': arg}
def check_type(value: Optional[object],
info: QAPISourceInfo,
source: str,
allow_array: bool = False,
allow_dict: Union[bool, str] = False) -> None:
"""
Normalize and validate the QAPI type of ``value``.
Python types of ``str`` or ``None`` are always allowed.
:param value: The value to check.
:param info: QAPI schema source file information.
:param source: Error string describing this ``value``.
:param allow_array:
Allow a ``List[str]`` of length 1, which indicates an array of
the type named by the list element.
:param allow_dict:
Allow a dict. Its members can be struct type members or union
branches. When the value of ``allow_dict`` is in pragma
``member-name-exceptions``, the dict's keys may violate the
member naming rules. The dict members are normalized in place.
:raise QAPISemError: When ``value`` fails validation.
:return: None, ``value`` is normalized in-place as needed.
"""
if value is None:
return
# Type name
if isinstance(value, str):
return
# Array type
if isinstance(value, list):
if not allow_array:
raise QAPISemError(info, "%s cannot be an array" % source)
if len(value) != 1 or not isinstance(value[0], str):
raise QAPISemError(info,
"%s: array type must contain single type name" %
source)
return
# Anonymous type
if not allow_dict:
raise QAPISemError(info, "%s should be a type name" % source)
if not isinstance(value, dict):
raise QAPISemError(info,
"%s should be an object or type name" % source)
permissive = False
if isinstance(allow_dict, str):
permissive = allow_dict in info.pragma.member_name_exceptions
# value is a dictionary, check that each member is okay
for (key, arg) in value.items():
key_source = "%s member '%s'" % (source, key)
if key.startswith('*'):
key = key[1:]
check_name_lower(key, info, key_source,
permit_upper=permissive,
permit_underscore=permissive)
if c_name(key, False) == 'u' or c_name(key, False).startswith('has_'):
raise QAPISemError(info, "%s uses reserved name" % key_source)
check_keys(arg, info, key_source, ['type'], ['if', 'features'])
check_if(arg, info, key_source)
check_features(arg.get('features'), info)
check_type(arg['type'], info, key_source, allow_array=True)
def check_features(features: Optional[object],
info: QAPISourceInfo) -> None:
"""
Normalize and validate the ``features`` member.
``features`` may be a ``list`` of either ``str`` or ``dict``.
Any ``str`` element will be normalized to ``{'name': element}``.
:forms:
:sugared: ``List[Union[str, Feature]]``
:canonical: ``List[Feature]``
:param features: The features member value to validate.
:param info: QAPI schema source file information.
:raise QAPISemError: When ``features`` fails validation.
:return: None, ``features`` is normalized in-place as needed.
"""
if features is None:
return
if not isinstance(features, list):
raise QAPISemError(info, "'features' must be an array")
features[:] = [f if isinstance(f, dict) else {'name': f}
for f in features]
for feat in features:
source = "'features' member"
assert isinstance(feat, dict)
check_keys(feat, info, source, ['name'], ['if'])
check_name_is_str(feat['name'], info, source)
source = "%s '%s'" % (source, feat['name'])
check_name_str(feat['name'], info, source)
check_if(feat, info, source)
def check_enum(expr: _JSONObject, info: QAPISourceInfo) -> None:
"""
Normalize and validate this expression as an ``enum`` definition.
:param expr: The expression to validate.
:param info: QAPI schema source file information.
:raise QAPISemError: When ``expr`` is not a valid ``enum``.
:return: None, ``expr`` is normalized in-place as needed.
"""
name = expr['enum']
members = expr['data']
prefix = expr.get('prefix')
if not isinstance(members, list):
raise QAPISemError(info, "'data' must be an array")
if prefix is not None and not isinstance(prefix, str):
raise QAPISemError(info, "'prefix' must be a string")
permissive = name in info.pragma.member_name_exceptions
members[:] = [m if isinstance(m, dict) else {'name': m}
for m in members]
for member in members:
source = "'data' member"
check_keys(member, info, source, ['name'], ['if'])
member_name = member['name']
check_name_is_str(member_name, info, source)
source = "%s '%s'" % (source, member_name)
# Enum members may start with a digit
if member_name[0].isdigit():
member_name = 'd' + member_name # Hack: hide the digit
check_name_lower(member_name, info, source,
permit_upper=permissive,
permit_underscore=permissive)
check_if(member, info, source)
def check_struct(expr: _JSONObject, info: QAPISourceInfo) -> None:
"""
Normalize and validate this expression as a ``struct`` definition.
:param expr: The expression to validate.
:param info: QAPI schema source file information.
:raise QAPISemError: When ``expr`` is not a valid ``struct``.
:return: None, ``expr`` is normalized in-place as needed.
"""
name = cast(str, expr['struct']) # Checked in check_exprs
members = expr['data']
check_type(members, info, "'data'", allow_dict=name)
check_type(expr.get('base'), info, "'base'")
def check_union(expr: _JSONObject, info: QAPISourceInfo) -> None:
"""
Normalize and validate this expression as a ``union`` definition.
:param expr: The expression to validate.
:param info: QAPI schema source file information.
:raise QAPISemError: when ``expr`` is not a valid ``union``.
:return: None, ``expr`` is normalized in-place as needed.
"""
name = cast(str, expr['union']) # Checked in check_exprs
base = expr['base']
discriminator = expr['discriminator']
members = expr['data']
check_type(base, info, "'base'", allow_dict=name)
check_name_is_str(discriminator, info, "'discriminator'")
if not isinstance(members, dict):
raise QAPISemError(info, "'data' must be an object")
for (key, value) in members.items():
source = "'data' member '%s'" % key
check_keys(value, info, source, ['type'], ['if'])
check_if(value, info, source)
check_type(value['type'], info, source, allow_array=not base)
def check_alternate(expr: _JSONObject, info: QAPISourceInfo) -> None:
"""
Normalize and validate this expression as an ``alternate`` definition.
:param expr: The expression to validate.
:param info: QAPI schema source file information.
:raise QAPISemError: When ``expr`` is not a valid ``alternate``.
:return: None, ``expr`` is normalized in-place as needed.
"""
members = expr['data']
if not members:
raise QAPISemError(info, "'data' must not be empty")
if not isinstance(members, dict):
raise QAPISemError(info, "'data' must be an object")
for (key, value) in members.items():
source = "'data' member '%s'" % key
check_name_lower(key, info, source)
check_keys(value, info, source, ['type'], ['if'])
check_if(value, info, source)
check_type(value['type'], info, source)
def check_command(expr: _JSONObject, info: QAPISourceInfo) -> None:
"""
Normalize and validate this expression as a ``command`` definition.
:param expr: The expression to validate.
:param info: QAPI schema source file information.
:raise QAPISemError: When ``expr`` is not a valid ``command``.
:return: None, ``expr`` is normalized in-place as needed.
"""
args = expr.get('data')
rets = expr.get('returns')
boxed = expr.get('boxed', False)
if boxed and args is None:
raise QAPISemError(info, "'boxed': true requires 'data'")
check_type(args, info, "'data'", allow_dict=not boxed)
check_type(rets, info, "'returns'", allow_array=True)
def check_event(expr: _JSONObject, info: QAPISourceInfo) -> None:
"""
Normalize and validate this expression as an ``event`` definition.
:param expr: The expression to validate.
:param info: QAPI schema source file information.
:raise QAPISemError: When ``expr`` is not a valid ``event``.
:return: None, ``expr`` is normalized in-place as needed.
"""
args = expr.get('data')
boxed = expr.get('boxed', False)
if boxed and args is None:
raise QAPISemError(info, "'boxed': true requires 'data'")
check_type(args, info, "'data'", allow_dict=not boxed)
def check_exprs(exprs: List[_JSONObject]) -> List[_JSONObject]:
"""
Validate and normalize a list of parsed QAPI schema expressions.
This function accepts a list of expressions and metadata as returned
by the parser. It destructively normalizes the expressions in-place.
:param exprs: The list of expressions to normalize and validate.
:raise QAPISemError: When any expression fails validation.
:return: The same list of expressions (now modified).
"""
for expr_elem in exprs:
# Expression
assert isinstance(expr_elem['expr'], dict)
for key in expr_elem['expr'].keys():
assert isinstance(key, str)
expr: _JSONObject = expr_elem['expr']
# QAPISourceInfo
assert isinstance(expr_elem['info'], QAPISourceInfo)
info: QAPISourceInfo = expr_elem['info']
# Optional[QAPIDoc]
tmp = expr_elem.get('doc')
assert tmp is None or isinstance(tmp, QAPIDoc)
doc: Optional[QAPIDoc] = tmp
if 'include' in expr:
continue
metas = expr.keys() & {'enum', 'struct', 'union', 'alternate',
'command', 'event'}
if len(metas) != 1:
raise QAPISemError(
info,
"expression must have exactly one key"
" 'enum', 'struct', 'union', 'alternate',"
" 'command', 'event'")
meta = metas.pop()
check_name_is_str(expr[meta], info, "'%s'" % meta)
name = cast(str, expr[meta])
info.set_defn(meta, name)
check_defn_name_str(name, info, meta)
if doc:
if doc.symbol != name:
raise QAPISemError(
info, "documentation comment is for '%s'" % doc.symbol)
doc.check_expr(expr)
elif info.pragma.doc_required:
raise QAPISemError(info,
"documentation comment required")
if meta == 'enum':
check_keys(expr, info, meta,
['enum', 'data'], ['if', 'features', 'prefix'])
check_enum(expr, info)
elif meta == 'union':
check_keys(expr, info, meta,
['union', 'base', 'discriminator', 'data'],
['if', 'features'])
normalize_members(expr.get('base'))
normalize_members(expr['data'])
check_union(expr, info)
elif meta == 'alternate':
check_keys(expr, info, meta,
['alternate', 'data'], ['if', 'features'])
normalize_members(expr['data'])
check_alternate(expr, info)
elif meta == 'struct':
check_keys(expr, info, meta,
['struct', 'data'], ['base', 'if', 'features'])
normalize_members(expr['data'])
check_struct(expr, info)
elif meta == 'command':
check_keys(expr, info, meta,
['command'],
['data', 'returns', 'boxed', 'if', 'features',
'gen', 'success-response', 'allow-oob',
'allow-preconfig', 'coroutine'])
normalize_members(expr.get('data'))
check_command(expr, info)
elif meta == 'event':
check_keys(expr, info, meta,
['event'], ['data', 'boxed', 'if', 'features'])
normalize_members(expr.get('data'))
check_event(expr, info)
else:
assert False, 'unexpected meta type'
check_if(expr, info, meta)
check_features(expr.get('features'), info)
check_flags(expr, info)
return exprs
|
nvtrust-main
|
infrastructure/kvm/qemu/qemu_source/scripts/qapi/expr.py
|
# -*- coding: utf-8 -*-
"""
Generic management for the 'vcpu' property.
"""
__author__ = "Lluís Vilanova <vilanova@ac.upc.edu>"
__copyright__ = "Copyright 2016, Lluís Vilanova <vilanova@ac.upc.edu>"
__license__ = "GPL version 2 or (at your option) any later version"
__maintainer__ = "Stefan Hajnoczi"
__email__ = "stefanha@redhat.com"
from tracetool import Arguments, try_import
def transform_event(event):
"""Transform event to comply with the 'vcpu' property (if present)."""
if "vcpu" in event.properties:
# events with 'tcg-trans' and 'tcg-exec' are auto-generated from
# already-patched events
assert "tcg-trans" not in event.properties
assert "tcg-exec" not in event.properties
event.args = Arguments([("void *", "__cpu"), event.args])
if "tcg" in event.properties:
fmt = "\"cpu=%p \""
event.fmt = [fmt + event.fmt[0],
fmt + event.fmt[1]]
else:
fmt = "\"cpu=%p \""
event.fmt = fmt + event.fmt
return event
def transform_args(format, event, *args, **kwargs):
"""Transforms the arguments to suit the specified format.
The format module must implement function 'vcpu_args', which receives the
implicit arguments added by the 'vcpu' property, and must return suitable
arguments for the given format.
The function is only called for events with the 'vcpu' property.
Parameters
==========
format : str
Format module name.
event : Event
args, kwargs
Passed to 'vcpu_transform_args'.
Returns
=======
Arguments
The transformed arguments, including the non-implicit ones.
"""
if "vcpu" in event.properties:
ok, func = try_import("tracetool.format." + format,
"vcpu_transform_args")
assert ok
assert func
return Arguments([func(event.args[:1], *args, **kwargs),
event.args[1:]])
else:
return event.args
|
nvtrust-main
|
infrastructure/kvm/qemu/qemu_source/scripts/tracetool/vcpu.py
|
# -*- coding: utf-8 -*-
"""
Machinery for generating tracing-related intermediate files.
"""
__author__ = "Lluís Vilanova <vilanova@ac.upc.edu>"
__copyright__ = "Copyright 2012-2017, Lluís Vilanova <vilanova@ac.upc.edu>"
__license__ = "GPL version 2 or (at your option) any later version"
__maintainer__ = "Stefan Hajnoczi"
__email__ = "stefanha@redhat.com"
import re
import sys
import weakref
import tracetool.format
import tracetool.backend
import tracetool.transform
def error_write(*lines):
"""Write a set of error lines."""
sys.stderr.writelines("\n".join(lines) + "\n")
def error(*lines):
"""Write a set of error lines and exit."""
error_write(*lines)
sys.exit(1)
out_lineno = 1
out_filename = '<none>'
out_fobj = sys.stdout
def out_open(filename):
global out_filename, out_fobj
out_filename = filename
out_fobj = open(filename, 'wt')
def out(*lines, **kwargs):
"""Write a set of output lines.
You can use kwargs as a shorthand for mapping variables when formatting all
the strings in lines.
The 'out_lineno' kwarg is automatically added to reflect the current output
file line number. The 'out_next_lineno' kwarg is also automatically added
with the next output line number. The 'out_filename' kwarg is automatically
added with the output filename.
"""
global out_lineno
output = []
for l in lines:
kwargs['out_lineno'] = out_lineno
kwargs['out_next_lineno'] = out_lineno + 1
kwargs['out_filename'] = out_filename
output.append(l % kwargs)
out_lineno += 1
out_fobj.writelines("\n".join(output) + "\n")
# We only want to allow standard C types or fixed sized
# integer types. We don't want QEMU specific types
# as we can't assume trace backends can resolve all the
# typedefs
ALLOWED_TYPES = [
"int",
"long",
"short",
"char",
"bool",
"unsigned",
"signed",
"int8_t",
"uint8_t",
"int16_t",
"uint16_t",
"int32_t",
"uint32_t",
"int64_t",
"uint64_t",
"void",
"size_t",
"ssize_t",
"uintptr_t",
"ptrdiff_t",
# Magic substitution is done by tracetool
"TCGv",
]
def validate_type(name):
bits = name.split(" ")
for bit in bits:
bit = re.sub("\*", "", bit)
if bit == "":
continue
if bit == "const":
continue
if bit not in ALLOWED_TYPES:
raise ValueError("Argument type '%s' is not allowed. "
"Only standard C types and fixed size integer "
"types should be used. struct, union, and "
"other complex pointer types should be "
"declared as 'void *'" % name)
class Arguments:
"""Event arguments description."""
def __init__(self, args):
"""
Parameters
----------
args :
List of (type, name) tuples or Arguments objects.
"""
self._args = []
for arg in args:
if isinstance(arg, Arguments):
self._args.extend(arg._args)
else:
self._args.append(arg)
def copy(self):
"""Create a new copy."""
return Arguments(list(self._args))
@staticmethod
def build(arg_str):
"""Build and Arguments instance from an argument string.
Parameters
----------
arg_str : str
String describing the event arguments.
"""
res = []
for arg in arg_str.split(","):
arg = arg.strip()
if not arg:
raise ValueError("Empty argument (did you forget to use 'void'?)")
if arg == 'void':
continue
if '*' in arg:
arg_type, identifier = arg.rsplit('*', 1)
arg_type += '*'
identifier = identifier.strip()
else:
arg_type, identifier = arg.rsplit(None, 1)
validate_type(arg_type)
res.append((arg_type, identifier))
return Arguments(res)
def __getitem__(self, index):
if isinstance(index, slice):
return Arguments(self._args[index])
else:
return self._args[index]
def __iter__(self):
"""Iterate over the (type, name) pairs."""
return iter(self._args)
def __len__(self):
"""Number of arguments."""
return len(self._args)
def __str__(self):
"""String suitable for declaring function arguments."""
if len(self._args) == 0:
return "void"
else:
return ", ".join([ " ".join([t, n]) for t,n in self._args ])
def __repr__(self):
"""Evaluable string representation for this object."""
return "Arguments(\"%s\")" % str(self)
def names(self):
"""List of argument names."""
return [ name for _, name in self._args ]
def types(self):
"""List of argument types."""
return [ type_ for type_, _ in self._args ]
def casted(self):
"""List of argument names casted to their type."""
return ["(%s)%s" % (type_, name) for type_, name in self._args]
def transform(self, *trans):
"""Return a new Arguments instance with transformed types.
The types in the resulting Arguments instance are transformed according
to tracetool.transform.transform_type.
"""
res = []
for type_, name in self._args:
res.append((tracetool.transform.transform_type(type_, *trans),
name))
return Arguments(res)
class Event(object):
"""Event description.
Attributes
----------
name : str
The event name.
fmt : str
The event format string.
properties : set(str)
Properties of the event.
args : Arguments
The event arguments.
lineno : int
The line number in the input file.
filename : str
The path to the input file.
"""
_CRE = re.compile("((?P<props>[\w\s]+)\s+)?"
"(?P<name>\w+)"
"\((?P<args>[^)]*)\)"
"\s*"
"(?:(?:(?P<fmt_trans>\".+),)?\s*(?P<fmt>\".+))?"
"\s*")
_VALID_PROPS = set(["disable", "tcg", "tcg-trans", "tcg-exec", "vcpu"])
def __init__(self, name, props, fmt, args, lineno, filename, orig=None,
event_trans=None, event_exec=None):
"""
Parameters
----------
name : string
Event name.
props : list of str
Property names.
fmt : str, list of str
Event printing format string(s).
args : Arguments
Event arguments.
lineno : int
The line number in the input file.
filename : str
The path to the input file.
orig : Event or None
Original Event before transformation/generation.
event_trans : Event or None
Generated translation-time event ("tcg" property).
event_exec : Event or None
Generated execution-time event ("tcg" property).
"""
self.name = name
self.properties = props
self.fmt = fmt
self.args = args
self.lineno = int(lineno)
self.filename = str(filename)
self.event_trans = event_trans
self.event_exec = event_exec
if len(args) > 10:
raise ValueError("Event '%s' has more than maximum permitted "
"argument count" % name)
if orig is None:
self.original = weakref.ref(self)
else:
self.original = orig
unknown_props = set(self.properties) - self._VALID_PROPS
if len(unknown_props) > 0:
raise ValueError("Unknown properties: %s"
% ", ".join(unknown_props))
assert isinstance(self.fmt, str) or len(self.fmt) == 2
def copy(self):
"""Create a new copy."""
return Event(self.name, list(self.properties), self.fmt,
self.args.copy(), self.lineno, self.filename,
self, self.event_trans, self.event_exec)
@staticmethod
def build(line_str, lineno, filename):
"""Build an Event instance from a string.
Parameters
----------
line_str : str
Line describing the event.
lineno : int
Line number in input file.
filename : str
Path to input file.
"""
m = Event._CRE.match(line_str)
assert m is not None
groups = m.groupdict('')
name = groups["name"]
props = groups["props"].split()
fmt = groups["fmt"]
fmt_trans = groups["fmt_trans"]
if fmt.find("%m") != -1 or fmt_trans.find("%m") != -1:
raise ValueError("Event format '%m' is forbidden, pass the error "
"as an explicit trace argument")
if fmt.endswith(r'\n"'):
raise ValueError("Event format must not end with a newline "
"character")
if len(fmt_trans) > 0:
fmt = [fmt_trans, fmt]
args = Arguments.build(groups["args"])
if "tcg-trans" in props:
raise ValueError("Invalid property 'tcg-trans'")
if "tcg-exec" in props:
raise ValueError("Invalid property 'tcg-exec'")
if "tcg" not in props and not isinstance(fmt, str):
raise ValueError("Only events with 'tcg' property can have two format strings")
if "tcg" in props and isinstance(fmt, str):
raise ValueError("Events with 'tcg' property must have two format strings")
event = Event(name, props, fmt, args, lineno, filename)
# add implicit arguments when using the 'vcpu' property
import tracetool.vcpu
event = tracetool.vcpu.transform_event(event)
return event
def __repr__(self):
"""Evaluable string representation for this object."""
if isinstance(self.fmt, str):
fmt = self.fmt
else:
fmt = "%s, %s" % (self.fmt[0], self.fmt[1])
return "Event('%s %s(%s) %s')" % (" ".join(self.properties),
self.name,
self.args,
fmt)
# Star matching on PRI is dangerous as one might have multiple
# arguments with that format, hence the non-greedy version of it.
_FMT = re.compile("(%[\d\.]*\w+|%.*?PRI\S+)")
def formats(self):
"""List conversion specifiers in the argument print format string."""
assert not isinstance(self.fmt, list)
return self._FMT.findall(self.fmt)
QEMU_TRACE = "trace_%(name)s"
QEMU_TRACE_NOCHECK = "_nocheck__" + QEMU_TRACE
QEMU_TRACE_TCG = QEMU_TRACE + "_tcg"
QEMU_DSTATE = "_TRACE_%(NAME)s_DSTATE"
QEMU_BACKEND_DSTATE = "TRACE_%(NAME)s_BACKEND_DSTATE"
QEMU_EVENT = "_TRACE_%(NAME)s_EVENT"
def api(self, fmt=None):
if fmt is None:
fmt = Event.QEMU_TRACE
return fmt % {"name": self.name, "NAME": self.name.upper()}
def transform(self, *trans):
"""Return a new Event with transformed Arguments."""
return Event(self.name,
list(self.properties),
self.fmt,
self.args.transform(*trans),
self.lineno,
self.filename,
self)
def read_events(fobj, fname):
"""Generate the output for the given (format, backends) pair.
Parameters
----------
fobj : file
Event description file.
fname : str
Name of event file
Returns a list of Event objects
"""
events = []
for lineno, line in enumerate(fobj, 1):
if line[-1] != '\n':
raise ValueError("%s does not end with a new line" % fname)
if not line.strip():
continue
if line.lstrip().startswith('#'):
continue
try:
event = Event.build(line, lineno, fname)
except ValueError as e:
arg0 = 'Error at %s:%d: %s' % (fname, lineno, e.args[0])
e.args = (arg0,) + e.args[1:]
raise
# transform TCG-enabled events
if "tcg" not in event.properties:
events.append(event)
else:
event_trans = event.copy()
event_trans.name += "_trans"
event_trans.properties += ["tcg-trans"]
event_trans.fmt = event.fmt[0]
# ignore TCG arguments
args_trans = []
for atrans, aorig in zip(
event_trans.transform(tracetool.transform.TCG_2_HOST).args,
event.args):
if atrans == aorig:
args_trans.append(atrans)
event_trans.args = Arguments(args_trans)
event_exec = event.copy()
event_exec.name += "_exec"
event_exec.properties += ["tcg-exec"]
event_exec.fmt = event.fmt[1]
event_exec.args = event_exec.args.transform(tracetool.transform.TCG_2_HOST)
new_event = [event_trans, event_exec]
event.event_trans, event.event_exec = new_event
events.extend(new_event)
return events
class TracetoolError (Exception):
"""Exception for calls to generate."""
pass
def try_import(mod_name, attr_name=None, attr_default=None):
"""Try to import a module and get an attribute from it.
Parameters
----------
mod_name : str
Module name.
attr_name : str, optional
Name of an attribute in the module.
attr_default : optional
Default value if the attribute does not exist in the module.
Returns
-------
A pair indicating whether the module could be imported and the module or
object or attribute value.
"""
try:
module = __import__(mod_name, globals(), locals(), ["__package__"])
if attr_name is None:
return True, module
return True, getattr(module, str(attr_name), attr_default)
except ImportError:
return False, None
def generate(events, group, format, backends,
binary=None, probe_prefix=None):
"""Generate the output for the given (format, backends) pair.
Parameters
----------
events : list
list of Event objects to generate for
group: str
Name of the tracing group
format : str
Output format name.
backends : list
Output backend names.
binary : str or None
See tracetool.backend.dtrace.BINARY.
probe_prefix : str or None
See tracetool.backend.dtrace.PROBEPREFIX.
"""
# fix strange python error (UnboundLocalError tracetool)
import tracetool
format = str(format)
if len(format) == 0:
raise TracetoolError("format not set")
if not tracetool.format.exists(format):
raise TracetoolError("unknown format: %s" % format)
if len(backends) == 0:
raise TracetoolError("no backends specified")
for backend in backends:
if not tracetool.backend.exists(backend):
raise TracetoolError("unknown backend: %s" % backend)
backend = tracetool.backend.Wrapper(backends, format)
import tracetool.backend.dtrace
tracetool.backend.dtrace.BINARY = binary
tracetool.backend.dtrace.PROBEPREFIX = probe_prefix
tracetool.format.generate(events, format, backend, group)
|
nvtrust-main
|
infrastructure/kvm/qemu/qemu_source/scripts/tracetool/__init__.py
|
# -*- coding: utf-8 -*-
"""
Type-transformation rules.
"""
__author__ = "Lluís Vilanova <vilanova@ac.upc.edu>"
__copyright__ = "Copyright 2012-2016, Lluís Vilanova <vilanova@ac.upc.edu>"
__license__ = "GPL version 2 or (at your option) any later version"
__maintainer__ = "Stefan Hajnoczi"
__email__ = "stefanha@redhat.com"
def _transform_type(type_, trans):
if isinstance(trans, str):
return trans
elif isinstance(trans, dict):
if type_ in trans:
return _transform_type(type_, trans[type_])
elif None in trans:
return _transform_type(type_, trans[None])
else:
return type_
elif callable(trans):
return trans(type_)
else:
raise ValueError("Invalid type transformation rule: %s" % trans)
def transform_type(type_, *trans):
"""Return a new type transformed according to the given rules.
Applies each of the transformation rules in trans in order.
If an element of trans is a string, return it.
If an element of trans is a function, call it with type_ as its only
argument.
If an element of trans is a dict, search type_ in its keys. If type_ is
a key, use the value as a transformation rule for type_. Otherwise, if
None is a key use the value as a transformation rule for type_.
Otherwise, return type_.
Parameters
----------
type_ : str
Type to transform.
trans : list of function or dict
Type transformation rules.
"""
if len(trans) == 0:
raise ValueError
res = type_
for t in trans:
res = _transform_type(res, t)
return res
##################################################
# tcg -> host
def _tcg_2_host(type_):
if type_ == "TCGv":
# force a fixed-size type (target-independent)
return "uint64_t"
else:
return type_
TCG_2_HOST = {
"TCGv_i32": "uint32_t",
"TCGv_i64": "uint64_t",
"TCGv_ptr": "void *",
None: _tcg_2_host,
}
##################################################
# host -> host compatible with tcg sizes
HOST_2_TCG_COMPAT = {
"uint8_t": "uint32_t",
"uint16_t": "uint32_t",
}
##################################################
# host/tcg -> tcg
def _host_2_tcg(type_):
if type_.startswith("TCGv"):
return type_
raise ValueError("Don't know how to translate '%s' into a TCG type\n" % type_)
HOST_2_TCG = {
"uint32_t": "TCGv_i32",
"uint64_t": "TCGv_i64",
"void *" : "TCGv_ptr",
"CPUArchState *": "TCGv_env",
None: _host_2_tcg,
}
##################################################
# tcg -> tcg helper definition
def _tcg_2_helper_def(type_):
if type_ == "TCGv":
return "target_ulong"
else:
return type_
TCG_2_TCG_HELPER_DEF = {
"TCGv_i32": "uint32_t",
"TCGv_i64": "uint64_t",
"TCGv_ptr": "void *",
None: _tcg_2_helper_def,
}
##################################################
# tcg -> tcg helper declaration
def _tcg_2_tcg_helper_decl_error(type_):
raise ValueError("Don't know how to translate type '%s' into a TCG helper declaration type\n" % type_)
TCG_2_TCG_HELPER_DECL = {
"TCGv" : "tl",
"TCGv_ptr": "ptr",
"TCGv_i32": "i32",
"TCGv_i64": "i64",
"TCGv_env": "env",
None: _tcg_2_tcg_helper_decl_error,
}
##################################################
# host/tcg -> tcg temporal constant allocation
def _host_2_tcg_tmp_new(type_):
if type_.startswith("TCGv"):
return "tcg_temp_new_nop"
raise ValueError("Don't know how to translate type '%s' into a TCG temporal allocation" % type_)
HOST_2_TCG_TMP_NEW = {
"uint32_t": "tcg_const_i32",
"uint64_t": "tcg_const_i64",
"void *" : "tcg_const_ptr",
None: _host_2_tcg_tmp_new,
}
##################################################
# host/tcg -> tcg temporal constant deallocation
def _host_2_tcg_tmp_free(type_):
if type_.startswith("TCGv"):
return "tcg_temp_free_nop"
raise ValueError("Don't know how to translate type '%s' into a TCG temporal deallocation" % type_)
HOST_2_TCG_TMP_FREE = {
"uint32_t": "tcg_temp_free_i32",
"uint64_t": "tcg_temp_free_i64",
"void *" : "tcg_temp_free_ptr",
None: _host_2_tcg_tmp_free,
}
|
nvtrust-main
|
infrastructure/kvm/qemu/qemu_source/scripts/tracetool/transform.py
|
# -*- coding: utf-8 -*-
"""
DTrace/SystemTAP backend.
"""
__author__ = "Lluís Vilanova <vilanova@ac.upc.edu>"
__copyright__ = "Copyright 2012-2017, Lluís Vilanova <vilanova@ac.upc.edu>"
__license__ = "GPL version 2 or (at your option) any later version"
__maintainer__ = "Stefan Hajnoczi"
__email__ = "stefanha@redhat.com"
from tracetool import out
PUBLIC = True
PROBEPREFIX = None
def probeprefix():
if PROBEPREFIX is None:
raise ValueError("you must set PROBEPREFIX")
return PROBEPREFIX
BINARY = None
def binary():
if BINARY is None:
raise ValueError("you must set BINARY")
return BINARY
def generate_h_begin(events, group):
if group == "root":
header = "trace-dtrace-root.h"
else:
header = "trace-dtrace-%s.h" % group
# Workaround for ust backend, which also includes <sys/sdt.h> and may
# require SDT_USE_VARIADIC to be defined. If dtrace includes <sys/sdt.h>
# first without defining SDT_USE_VARIADIC then ust breaks because the
# STAP_PROBEV() macro is not defined.
out('#ifndef SDT_USE_VARIADIC')
out('#define SDT_USE_VARIADIC 1')
out('#endif')
out('#include "%s"' % header,
'')
out('#undef SDT_USE_VARIADIC')
# SystemTap defines <provider>_<name>_ENABLED() but other DTrace
# implementations might not.
for e in events:
out('#ifndef QEMU_%(uppername)s_ENABLED',
'#define QEMU_%(uppername)s_ENABLED() true',
'#endif',
uppername=e.name.upper())
def generate_h(event, group):
out(' QEMU_%(uppername)s(%(argnames)s);',
uppername=event.name.upper(),
argnames=", ".join(event.args.names()))
def generate_h_backend_dstate(event, group):
out(' QEMU_%(uppername)s_ENABLED() || \\',
uppername=event.name.upper())
|
nvtrust-main
|
infrastructure/kvm/qemu/qemu_source/scripts/tracetool/backend/dtrace.py
|
# -*- coding: utf-8 -*-
"""
Stderr built-in backend.
"""
__author__ = "Lluís Vilanova <vilanova@ac.upc.edu>"
__copyright__ = "Copyright 2012-2017, Lluís Vilanova <vilanova@ac.upc.edu>"
__license__ = "GPL version 2 or (at your option) any later version"
__maintainer__ = "Stefan Hajnoczi"
__email__ = "stefanha@redhat.com"
from tracetool import out
PUBLIC = True
def generate_h_begin(events, group):
out('#include "qemu/log-for-trace.h"',
'#include "qemu/error-report.h"',
'')
def generate_h(event, group):
argnames = ", ".join(event.args.names())
if len(event.args) > 0:
argnames = ", " + argnames
if "vcpu" in event.properties:
# already checked on the generic format code
cond = "true"
else:
cond = "trace_event_get_state(%s)" % ("TRACE_" + event.name.upper())
out(' if (%(cond)s && qemu_loglevel_mask(LOG_TRACE)) {',
' if (message_with_timestamp) {',
' struct timeval _now;',
' gettimeofday(&_now, NULL);',
'#line %(event_lineno)d "%(event_filename)s"',
' qemu_log("%%d@%%zu.%%06zu:%(name)s " %(fmt)s "\\n",',
' qemu_get_thread_id(),',
' (size_t)_now.tv_sec, (size_t)_now.tv_usec',
' %(argnames)s);',
'#line %(out_next_lineno)d "%(out_filename)s"',
' } else {',
'#line %(event_lineno)d "%(event_filename)s"',
' qemu_log("%(name)s " %(fmt)s "\\n"%(argnames)s);',
'#line %(out_next_lineno)d "%(out_filename)s"',
' }',
' }',
cond=cond,
event_lineno=event.lineno,
event_filename=event.filename,
name=event.name,
fmt=event.fmt.rstrip("\n"),
argnames=argnames)
def generate_h_backend_dstate(event, group):
out(' trace_event_get_state_dynamic_by_id(%(event_id)s) || \\',
event_id="TRACE_" + event.name.upper())
|
nvtrust-main
|
infrastructure/kvm/qemu/qemu_source/scripts/tracetool/backend/log.py
|
# -*- coding: utf-8 -*-
"""
Backend management.
Creating new backends
---------------------
A new backend named 'foo-bar' corresponds to Python module
'tracetool/backend/foo_bar.py'.
A backend module should provide a docstring, whose first non-empty line will be
considered its short description.
All backends must generate their contents through the 'tracetool.out' routine.
Backend attributes
------------------
========= ====================================================================
Attribute Description
========= ====================================================================
PUBLIC If exists and is set to 'True', the backend is considered "public".
========= ====================================================================
Backend functions
-----------------
All the following functions are optional, and no output will be generated if
they do not exist.
=============================== ==============================================
Function Description
=============================== ==============================================
generate_<format>_begin(events) Generate backend- and format-specific file
header contents.
generate_<format>_end(events) Generate backend- and format-specific file
footer contents.
generate_<format>(event) Generate backend- and format-specific contents
for the given event.
=============================== ==============================================
"""
__author__ = "Lluís Vilanova <vilanova@ac.upc.edu>"
__copyright__ = "Copyright 2012-2014, Lluís Vilanova <vilanova@ac.upc.edu>"
__license__ = "GPL version 2 or (at your option) any later version"
__maintainer__ = "Stefan Hajnoczi"
__email__ = "stefanha@redhat.com"
import os
import tracetool
def get_list(only_public = False):
"""Get a list of (name, description) pairs."""
res = [("nop", "Tracing disabled.")]
modnames = []
for filename in os.listdir(tracetool.backend.__path__[0]):
if filename.endswith('.py') and filename != '__init__.py':
modnames.append(filename.rsplit('.', 1)[0])
for modname in sorted(modnames):
module = tracetool.try_import("tracetool.backend." + modname)
# just in case; should never fail unless non-module files are put there
if not module[0]:
continue
module = module[1]
public = getattr(module, "PUBLIC", False)
if only_public and not public:
continue
doc = module.__doc__
if doc is None:
doc = ""
doc = doc.strip().split("\n")[0]
name = modname.replace("_", "-")
res.append((name, doc))
return res
def exists(name):
"""Return whether the given backend exists."""
if len(name) == 0:
return False
if name == "nop":
return True
name = name.replace("-", "_")
return tracetool.try_import("tracetool.backend." + name)[1]
class Wrapper:
def __init__(self, backends, format):
self._backends = [backend.replace("-", "_") for backend in backends]
self._format = format.replace("-", "_")
for backend in self._backends:
assert exists(backend)
assert tracetool.format.exists(self._format)
def _run_function(self, name, *args, **kwargs):
for backend in self._backends:
func = tracetool.try_import("tracetool.backend." + backend,
name % self._format, None)[1]
if func is not None:
func(*args, **kwargs)
def generate_begin(self, events, group):
self._run_function("generate_%s_begin", events, group)
def generate(self, event, group):
self._run_function("generate_%s", event, group)
def generate_backend_dstate(self, event, group):
self._run_function("generate_%s_backend_dstate", event, group)
def generate_end(self, events, group):
self._run_function("generate_%s_end", events, group)
|
nvtrust-main
|
infrastructure/kvm/qemu/qemu_source/scripts/tracetool/backend/__init__.py
|
# -*- coding: utf-8 -*-
"""
Ftrace built-in backend.
"""
__author__ = "Eiichi Tsukata <eiichi.tsukata.xh@hitachi.com>"
__copyright__ = "Copyright (C) 2013 Hitachi, Ltd."
__license__ = "GPL version 2 or (at your option) any later version"
__maintainer__ = "Stefan Hajnoczi"
__email__ = "stefanha@redhat.com"
from tracetool import out
PUBLIC = True
def generate_h_begin(events, group):
out('#include "trace/ftrace.h"',
'')
def generate_h(event, group):
argnames = ", ".join(event.args.names())
if len(event.args) > 0:
argnames = ", " + argnames
out(' {',
' char ftrace_buf[MAX_TRACE_STRLEN];',
' int unused __attribute__ ((unused));',
' int trlen;',
' if (trace_event_get_state(%(event_id)s)) {',
'#line %(event_lineno)d "%(event_filename)s"',
' trlen = snprintf(ftrace_buf, MAX_TRACE_STRLEN,',
' "%(name)s " %(fmt)s "\\n" %(argnames)s);',
'#line %(out_next_lineno)d "%(out_filename)s"',
' trlen = MIN(trlen, MAX_TRACE_STRLEN - 1);',
' unused = write(trace_marker_fd, ftrace_buf, trlen);',
' }',
' }',
name=event.name,
args=event.args,
event_id="TRACE_" + event.name.upper(),
event_lineno=event.lineno,
event_filename=event.filename,
fmt=event.fmt.rstrip("\n"),
argnames=argnames)
def generate_h_backend_dstate(event, group):
out(' trace_event_get_state_dynamic_by_id(%(event_id)s) || \\',
event_id="TRACE_" + event.name.upper())
|
nvtrust-main
|
infrastructure/kvm/qemu/qemu_source/scripts/tracetool/backend/ftrace.py
|
# -*- coding: utf-8 -*-
"""
LTTng User Space Tracing backend.
"""
__author__ = "Lluís Vilanova <vilanova@ac.upc.edu>"
__copyright__ = "Copyright 2012-2017, Lluís Vilanova <vilanova@ac.upc.edu>"
__license__ = "GPL version 2 or (at your option) any later version"
__maintainer__ = "Stefan Hajnoczi"
__email__ = "stefanha@redhat.com"
from tracetool import out
PUBLIC = True
def generate_h_begin(events, group):
header = 'trace-ust-' + group + '.h'
out('#include <lttng/tracepoint.h>',
'#include "%s"' % header,
'',
'/* tracepoint_enabled() was introduced in LTTng UST 2.7 */',
'#ifndef tracepoint_enabled',
'#define tracepoint_enabled(a, b) true',
'#endif',
'')
def generate_h(event, group):
argnames = ", ".join(event.args.names())
if len(event.args) > 0:
argnames = ", " + argnames
out(' tracepoint(qemu, %(name)s%(tp_args)s);',
name=event.name,
tp_args=argnames)
def generate_h_backend_dstate(event, group):
out(' tracepoint_enabled(qemu, %(name)s) || \\',
name=event.name)
|
nvtrust-main
|
infrastructure/kvm/qemu/qemu_source/scripts/tracetool/backend/ust.py
|
# -*- coding: utf-8 -*-
"""
Syslog built-in backend.
"""
__author__ = "Paul Durrant <paul.durrant@citrix.com>"
__copyright__ = "Copyright 2016, Citrix Systems Inc."
__license__ = "GPL version 2 or (at your option) any later version"
__maintainer__ = "Stefan Hajnoczi"
__email__ = "stefanha@redhat.com"
from tracetool import out
PUBLIC = True
def generate_h_begin(events, group):
out('#include <syslog.h>',
'')
def generate_h(event, group):
argnames = ", ".join(event.args.names())
if len(event.args) > 0:
argnames = ", " + argnames
if "vcpu" in event.properties:
# already checked on the generic format code
cond = "true"
else:
cond = "trace_event_get_state(%s)" % ("TRACE_" + event.name.upper())
out(' if (%(cond)s) {',
'#line %(event_lineno)d "%(event_filename)s"',
' syslog(LOG_INFO, "%(name)s " %(fmt)s %(argnames)s);',
'#line %(out_next_lineno)d "%(out_filename)s"',
' }',
cond=cond,
event_lineno=event.lineno,
event_filename=event.filename,
name=event.name,
fmt=event.fmt.rstrip("\n"),
argnames=argnames)
def generate_h_backend_dstate(event, group):
out(' trace_event_get_state_dynamic_by_id(%(event_id)s) || \\',
event_id="TRACE_" + event.name.upper())
|
nvtrust-main
|
infrastructure/kvm/qemu/qemu_source/scripts/tracetool/backend/syslog.py
|
# -*- coding: utf-8 -*-
"""
Simple built-in backend.
"""
__author__ = "Lluís Vilanova <vilanova@ac.upc.edu>"
__copyright__ = "Copyright 2012-2017, Lluís Vilanova <vilanova@ac.upc.edu>"
__license__ = "GPL version 2 or (at your option) any later version"
__maintainer__ = "Stefan Hajnoczi"
__email__ = "stefanha@redhat.com"
from tracetool import out
PUBLIC = True
def is_string(arg):
strtype = ('const char*', 'char*', 'const char *', 'char *')
arg_strip = arg.lstrip()
if arg_strip.startswith(strtype) and arg_strip.count('*') == 1:
return True
else:
return False
def generate_h_begin(events, group):
for event in events:
out('void _simple_%(api)s(%(args)s);',
api=event.api(),
args=event.args)
out('')
def generate_h(event, group):
out(' _simple_%(api)s(%(args)s);',
api=event.api(),
args=", ".join(event.args.names()))
def generate_h_backend_dstate(event, group):
out(' trace_event_get_state_dynamic_by_id(%(event_id)s) || \\',
event_id="TRACE_" + event.name.upper())
def generate_c_begin(events, group):
out('#include "qemu/osdep.h"',
'#include "trace/control.h"',
'#include "trace/simple.h"',
'')
def generate_c(event, group):
out('void _simple_%(api)s(%(args)s)',
'{',
' TraceBufferRecord rec;',
api=event.api(),
args=event.args)
sizes = []
for type_, name in event.args:
if is_string(type_):
out(' size_t arg%(name)s_len = %(name)s ? MIN(strlen(%(name)s), MAX_TRACE_STRLEN) : 0;',
name=name)
strsizeinfo = "4 + arg%s_len" % name
sizes.append(strsizeinfo)
else:
sizes.append("8")
sizestr = " + ".join(sizes)
if len(event.args) == 0:
sizestr = '0'
event_id = 'TRACE_' + event.name.upper()
if "vcpu" in event.properties:
# already checked on the generic format code
cond = "true"
else:
cond = "trace_event_get_state(%s)" % event_id
out('',
' if (!%(cond)s) {',
' return;',
' }',
'',
' if (trace_record_start(&rec, %(event_obj)s.id, %(size_str)s)) {',
' return; /* Trace Buffer Full, Event Dropped ! */',
' }',
cond=cond,
event_obj=event.api(event.QEMU_EVENT),
size_str=sizestr)
if len(event.args) > 0:
for type_, name in event.args:
# string
if is_string(type_):
out(' trace_record_write_str(&rec, %(name)s, arg%(name)s_len);',
name=name)
# pointer var (not string)
elif type_.endswith('*'):
out(' trace_record_write_u64(&rec, (uintptr_t)(uint64_t *)%(name)s);',
name=name)
# primitive data type
else:
out(' trace_record_write_u64(&rec, (uint64_t)%(name)s);',
name=name)
out(' trace_record_finish(&rec);',
'}',
'')
|
nvtrust-main
|
infrastructure/kvm/qemu/qemu_source/scripts/tracetool/backend/simple.py
|
# -*- coding: utf-8 -*-
"""
trace/generated-ust-provider.h
"""
__author__ = "Mohamad Gebai <mohamad.gebai@polymtl.ca>"
__copyright__ = "Copyright 2012, Mohamad Gebai <mohamad.gebai@polymtl.ca>"
__license__ = "GPL version 2 or (at your option) any later version"
__maintainer__ = "Stefan Hajnoczi"
__email__ = "stefanha@redhat.com"
from tracetool import out
def generate(events, backend, group):
events = [e for e in events
if "disabled" not in e.properties]
if group == "all":
include = "trace-ust-all.h"
else:
include = "trace-ust.h"
out('/* This file is autogenerated by tracetool, do not edit. */',
'',
'#undef TRACEPOINT_PROVIDER',
'#define TRACEPOINT_PROVIDER qemu',
'',
'#undef TRACEPOINT_INCLUDE_FILE',
'#define TRACEPOINT_INCLUDE_FILE ./%s' % include,
'',
'#if !defined (TRACE_%s_GENERATED_UST_H) || \\' % group.upper(),
' defined(TRACEPOINT_HEADER_MULTI_READ)',
'#define TRACE_%s_GENERATED_UST_H' % group.upper(),
'',
'#include <lttng/tracepoint.h>',
'',
'/*',
' * LTTng ust 2.0 does not allow you to use TP_ARGS(void) for tracepoints',
' * requiring no arguments. We define these macros introduced in more recent'
' * versions of LTTng ust as a workaround',
' */',
'#ifndef _TP_EXPROTO1',
'#define _TP_EXPROTO1(a) void',
'#endif',
'#ifndef _TP_EXDATA_PROTO1',
'#define _TP_EXDATA_PROTO1(a) void *__tp_data',
'#endif',
'#ifndef _TP_EXDATA_VAR1',
'#define _TP_EXDATA_VAR1(a) __tp_data',
'#endif',
'#ifndef _TP_EXVAR1',
'#define _TP_EXVAR1(a)',
'#endif',
'')
for e in events:
if len(e.args) > 0:
out('TRACEPOINT_EVENT(',
' qemu,',
' %(name)s,',
' TP_ARGS(%(args)s),',
' TP_FIELDS(',
name=e.name,
args=", ".join(", ".join(i) for i in e.args))
types = e.args.types()
names = e.args.names()
fmts = e.formats()
for t,n,f in zip(types, names, fmts):
if ('char *' in t) or ('char*' in t):
out(' ctf_string(' + n + ', ' + n + ')')
elif ("%p" in f) or ("x" in f) or ("PRIx" in f):
out(' ctf_integer_hex('+ t + ', ' + n + ', ' + n + ')')
elif ("ptr" in t) or ("*" in t):
out(' ctf_integer_hex('+ t + ', ' + n + ', ' + n + ')')
elif ('int' in t) or ('long' in t) or ('unsigned' in t) \
or ('size_t' in t) or ('bool' in t):
out(' ctf_integer(' + t + ', ' + n + ', ' + n + ')')
elif ('double' in t) or ('float' in t):
out(' ctf_float(' + t + ', ' + n + ', ' + n + ')')
elif ('void *' in t) or ('void*' in t):
out(' ctf_integer_hex(unsigned long, ' + n + ', ' + n + ')')
out(' )',
')',
'')
else:
out('TRACEPOINT_EVENT(',
' qemu,',
' %(name)s,',
' TP_ARGS(void),',
' TP_FIELDS()',
')',
'',
name=e.name)
out('#endif /* TRACE_%s_GENERATED_UST_H */' % group.upper(),
'',
'/* This part must be outside ifdef protection */',
'#include <lttng/tracepoint-event.h>')
|
nvtrust-main
|
infrastructure/kvm/qemu/qemu_source/scripts/tracetool/format/ust_events_h.py
|
# -*- coding: utf-8 -*-
"""
trace/generated-ust.c
"""
__author__ = "Mohamad Gebai <mohamad.gebai@polymtl.ca>"
__copyright__ = "Copyright 2012, Mohamad Gebai <mohamad.gebai@polymtl.ca>"
__license__ = "GPL version 2 or (at your option) any later version"
__maintainer__ = "Stefan Hajnoczi"
__email__ = "stefanha@redhat.com"
from tracetool import out
def generate(events, backend, group):
events = [e for e in events
if "disabled" not in e.properties]
out('/* This file is autogenerated by tracetool, do not edit. */',
'',
'#include "qemu/osdep.h"',
'',
'#define TRACEPOINT_DEFINE',
'#define TRACEPOINT_CREATE_PROBES',
'',
'/* If gcc version 4.7 or older is used, LTTng ust gives a warning when compiling with',
' -Wredundant-decls.',
' */',
'#pragma GCC diagnostic ignored "-Wredundant-decls"',
'',
'#include "trace-ust-all.h"')
|
nvtrust-main
|
infrastructure/kvm/qemu/qemu_source/scripts/tracetool/format/ust_events_c.py
|
# -*- coding: utf-8 -*-
"""
Format management.
Creating new formats
--------------------
A new format named 'foo-bar' corresponds to Python module
'tracetool/format/foo_bar.py'.
A format module should provide a docstring, whose first non-empty line will be
considered its short description.
All formats must generate their contents through the 'tracetool.out' routine.
Format functions
----------------
======== ==================================================================
Function Description
======== ==================================================================
generate Called to generate a format-specific file.
======== ==================================================================
"""
__author__ = "Lluís Vilanova <vilanova@ac.upc.edu>"
__copyright__ = "Copyright 2012-2014, Lluís Vilanova <vilanova@ac.upc.edu>"
__license__ = "GPL version 2 or (at your option) any later version"
__maintainer__ = "Stefan Hajnoczi"
__email__ = "stefanha@redhat.com"
import os
import tracetool
def get_list():
"""Get a list of (name, description) pairs."""
res = []
modnames = []
for filename in os.listdir(tracetool.format.__path__[0]):
if filename.endswith('.py') and filename != '__init__.py':
modnames.append(filename.rsplit('.', 1)[0])
for modname in sorted(modnames):
module = tracetool.try_import("tracetool.format." + modname)
# just in case; should never fail unless non-module files are put there
if not module[0]:
continue
module = module[1]
doc = module.__doc__
if doc is None:
doc = ""
doc = doc.strip().split("\n")[0]
name = modname.replace("_", "-")
res.append((name, doc))
return res
def exists(name):
"""Return whether the given format exists."""
if len(name) == 0:
return False
name = name.replace("-", "_")
return tracetool.try_import("tracetool.format." + name)[1]
def generate(events, format, backend, group):
if not exists(format):
raise ValueError("unknown format: %s" % format)
format = format.replace("-", "_")
func = tracetool.try_import("tracetool.format." + format,
"generate")[1]
if func is None:
raise AttributeError("format has no 'generate': %s" % format)
func(events, backend, group)
|
nvtrust-main
|
infrastructure/kvm/qemu/qemu_source/scripts/tracetool/format/__init__.py
|
# -*- coding: utf-8 -*-
"""
trace/generated-tracers.dtrace (DTrace only).
"""
__author__ = "Lluís Vilanova <vilanova@ac.upc.edu>"
__copyright__ = "Copyright 2012-2014, Lluís Vilanova <vilanova@ac.upc.edu>"
__license__ = "GPL version 2 or (at your option) any later version"
__maintainer__ = "Stefan Hajnoczi"
__email__ = "stefanha@redhat.com"
from tracetool import out
from sys import platform
# Reserved keywords from
# https://wikis.oracle.com/display/DTrace/Types,+Operators+and+Expressions
RESERVED_WORDS = (
'auto', 'goto', 'sizeof', 'break', 'if', 'static', 'case', 'import',
'string', 'char', 'inline', 'stringof', 'const', 'int', 'struct',
'continue', 'long', 'switch', 'counter', 'offsetof', 'this',
'default', 'probe', 'translator', 'do', 'provider', 'typedef',
'double', 'register', 'union', 'else', 'restrict', 'unsigned',
'enum', 'return', 'void', 'extern', 'self', 'volatile', 'float',
'short', 'while', 'for', 'signed', 'xlate',
)
def generate(events, backend, group):
events = [e for e in events
if "disable" not in e.properties]
# SystemTap's dtrace(1) warns about empty "provider qemu {}" but is happy
# with an empty file. Avoid the warning.
# But dtrace on macOS can't deal with empty files.
if not events and platform != "darwin":
return
out('/* This file is autogenerated by tracetool, do not edit. */'
'',
'provider qemu {')
for e in events:
args = []
for type_, name in e.args:
if platform == "darwin":
# macOS dtrace accepts only C99 _Bool
if type_ == 'bool':
type_ = '_Bool'
if type_ == 'bool *':
type_ = '_Bool *'
# It converts int8_t * in probe points to char * in header
# files and introduces [-Wpointer-sign] warning.
# Avoid it by changing probe type to signed char * beforehand.
if type_ == 'int8_t *':
type_ = 'signed char *'
# SystemTap dtrace(1) emits a warning when long long is used
type_ = type_.replace('unsigned long long', 'uint64_t')
type_ = type_.replace('signed long long', 'int64_t')
type_ = type_.replace('long long', 'int64_t')
if name in RESERVED_WORDS:
name += '_'
args.append(type_ + ' ' + name)
# Define prototype for probe arguments
out('',
'probe %(name)s(%(args)s);',
name=e.name,
args=','.join(args))
out('',
'};')
|
nvtrust-main
|
infrastructure/kvm/qemu/qemu_source/scripts/tracetool/format/d.py
|
# -*- coding: utf-8 -*-
"""
Generate trace/generated-helpers-wrappers.h.
"""
__author__ = "Lluís Vilanova <vilanova@ac.upc.edu>"
__copyright__ = "Copyright 2012-2016, Lluís Vilanova <vilanova@ac.upc.edu>"
__license__ = "GPL version 2 or (at your option) any later version"
__maintainer__ = "Stefan Hajnoczi"
__email__ = "stefanha@redhat.com"
from tracetool import out
from tracetool.transform import *
import tracetool.vcpu
def generate(events, backend, group):
events = [e for e in events
if "disable" not in e.properties]
out('/* This file is autogenerated by tracetool, do not edit. */',
'',
'#define tcg_temp_new_nop(v) (v)',
'#define tcg_temp_free_nop(v)',
'',
)
for e in events:
if "tcg-exec" not in e.properties:
continue
# tracetool.generate always transforms types to host
e_args = tracetool.vcpu.transform_args("tcg_helper_c", e.original, "wrapper")
# mixed-type to TCG helper bridge
args_tcg_compat = e_args.transform(HOST_2_TCG_COMPAT)
code_new = [
"%(tcg_type)s __%(name)s = %(tcg_func)s(%(name)s);" %
{"tcg_type": transform_type(type_, HOST_2_TCG),
"tcg_func": transform_type(type_, HOST_2_TCG_TMP_NEW),
"name": name}
for (type_, name) in args_tcg_compat
]
code_free = [
"%(tcg_func)s(__%(name)s);" %
{"tcg_func": transform_type(type_, HOST_2_TCG_TMP_FREE),
"name": name}
for (type_, name) in args_tcg_compat
]
gen_name = "gen_helper_" + e.api()
out('static inline void %(name)s(%(args)s)',
'{',
' %(code_new)s',
' %(proxy_name)s(%(tmp_names)s);',
' %(code_free)s',
'}',
name=gen_name,
args=e_args,
proxy_name=gen_name + "_proxy",
code_new="\n ".join(code_new),
code_free="\n ".join(code_free),
tmp_names=", ".join(["__%s" % name for _, name in e_args]),
)
|
nvtrust-main
|
infrastructure/kvm/qemu/qemu_source/scripts/tracetool/format/tcg_helper_wrapper_h.py
|
# -*- coding: utf-8 -*-
"""
Generate trace/generated-helpers.h.
"""
__author__ = "Lluís Vilanova <vilanova@ac.upc.edu>"
__copyright__ = "Copyright 2012-2016, Lluís Vilanova <vilanova@ac.upc.edu>"
__license__ = "GPL version 2 or (at your option) any later version"
__maintainer__ = "Stefan Hajnoczi"
__email__ = "stefanha@redhat.com"
from tracetool import out
from tracetool.transform import *
import tracetool.vcpu
def generate(events, backend, group):
events = [e for e in events
if "disable" not in e.properties]
out('/* This file is autogenerated by tracetool, do not edit. */',
'',
)
for e in events:
if "tcg-exec" not in e.properties:
continue
# TCG helper proxy declaration
fmt = "DEF_HELPER_FLAGS_%(argc)d(%(name)s, %(flags)svoid%(types)s)"
e_args = tracetool.vcpu.transform_args("tcg_helper_c", e.original, "header")
args = e_args.transform(HOST_2_TCG_COMPAT, HOST_2_TCG,
TCG_2_TCG_HELPER_DECL)
types = ", ".join(args.types())
if types != "":
types = ", " + types
flags = "TCG_CALL_NO_RWG, "
out(fmt,
flags=flags,
argc=len(args),
name=e.api() + "_proxy",
types=types,
)
|
nvtrust-main
|
infrastructure/kvm/qemu/qemu_source/scripts/tracetool/format/tcg_helper_h.py
|
# -*- coding: utf-8 -*-
"""
trace/generated-tracers.c
"""
__author__ = "Lluís Vilanova <vilanova@ac.upc.edu>"
__copyright__ = "Copyright 2012-2014, Lluís Vilanova <vilanova@ac.upc.edu>"
__license__ = "GPL version 2 or (at your option) any later version"
__maintainer__ = "Stefan Hajnoczi"
__email__ = "stefanha@redhat.com"
from tracetool import out
def generate(events, backend, group):
active_events = [e for e in events
if "disable" not in e.properties]
header = "trace-" + group + ".h"
out('/* This file is autogenerated by tracetool, do not edit. */',
'',
'#include "qemu/osdep.h"',
'#include "qemu/module.h"',
'#include "%s"' % header,
'')
for e in events:
out('uint16_t %s;' % e.api(e.QEMU_DSTATE))
for e in events:
if "vcpu" in e.properties:
vcpu_id = 0
else:
vcpu_id = "TRACE_VCPU_EVENT_NONE"
out('TraceEvent %(event)s = {',
' .id = 0,',
' .vcpu_id = %(vcpu_id)s,',
' .name = \"%(name)s\",',
' .sstate = %(sstate)s,',
' .dstate = &%(dstate)s ',
'};',
event = e.api(e.QEMU_EVENT),
vcpu_id = vcpu_id,
name = e.name,
sstate = "TRACE_%s_ENABLED" % e.name.upper(),
dstate = e.api(e.QEMU_DSTATE))
out('TraceEvent *%(group)s_trace_events[] = {',
group = group.lower())
for e in events:
out(' &%(event)s,', event = e.api(e.QEMU_EVENT))
out(' NULL,',
'};',
'')
out('static void trace_%(group)s_register_events(void)',
'{',
' trace_event_register_group(%(group)s_trace_events);',
'}',
'trace_init(trace_%(group)s_register_events)',
group = group.lower())
backend.generate_begin(active_events, group)
for event in active_events:
backend.generate(event, group)
backend.generate_end(active_events, group)
|
nvtrust-main
|
infrastructure/kvm/qemu/qemu_source/scripts/tracetool/format/c.py
|
# -*- coding: utf-8 -*-
"""
Generate .h file for TCG code generation.
"""
__author__ = "Lluís Vilanova <vilanova@ac.upc.edu>"
__copyright__ = "Copyright 2012-2017, Lluís Vilanova <vilanova@ac.upc.edu>"
__license__ = "GPL version 2 or (at your option) any later version"
__maintainer__ = "Stefan Hajnoczi"
__email__ = "stefanha@redhat.com"
from tracetool import out, Arguments
import tracetool.vcpu
def vcpu_transform_args(args):
assert len(args) == 1
return Arguments([
args,
# NOTE: this name must be kept in sync with the one in "tcg_h"
# NOTE: Current helper code uses TCGv_env (CPUArchState*)
("TCGv_env", "__tcg_" + args.names()[0]),
])
def generate(events, backend, group):
if group == "root":
header = "trace/trace-root.h"
else:
header = "trace.h"
out('/* This file is autogenerated by tracetool, do not edit. */',
'/* You must include this file after the inclusion of helper.h */',
'',
'#ifndef TRACE_%s_GENERATED_TCG_TRACERS_H' % group.upper(),
'#define TRACE_%s_GENERATED_TCG_TRACERS_H' % group.upper(),
'',
'#include "exec/helper-proto.h"',
'#include "%s"' % header,
'',
)
for e in events:
# just keep one of them
if "tcg-exec" not in e.properties:
continue
out('static inline void %(name_tcg)s(%(args)s)',
'{',
name_tcg=e.original.api(e.QEMU_TRACE_TCG),
args=tracetool.vcpu.transform_args("tcg_h", e.original))
if "disable" not in e.properties:
args_trans = e.original.event_trans.args
args_exec = tracetool.vcpu.transform_args(
"tcg_helper_c", e.original.event_exec, "wrapper")
if "vcpu" in e.properties:
trace_cpu = e.args.names()[0]
cond = "trace_event_get_vcpu_state(%(cpu)s,"\
" TRACE_%(id)s)"\
% dict(
cpu=trace_cpu,
id=e.original.event_exec.name.upper())
else:
cond = "true"
out(' %(name_trans)s(%(argnames_trans)s);',
' if (%(cond)s) {',
' gen_helper_%(name_exec)s(%(argnames_exec)s);',
' }',
name_trans=e.original.event_trans.api(e.QEMU_TRACE),
name_exec=e.original.event_exec.api(e.QEMU_TRACE),
argnames_trans=", ".join(args_trans.names()),
argnames_exec=", ".join(args_exec.names()),
cond=cond)
out('}')
out('',
'#endif /* TRACE_%s_GENERATED_TCG_TRACERS_H */' % group.upper())
|
nvtrust-main
|
infrastructure/kvm/qemu/qemu_source/scripts/tracetool/format/tcg_h.py
|
# -*- coding: utf-8 -*-
"""
Generate .stp file (DTrace with SystemTAP only).
"""
__author__ = "Lluís Vilanova <vilanova@ac.upc.edu>"
__copyright__ = "Copyright 2012-2014, Lluís Vilanova <vilanova@ac.upc.edu>"
__license__ = "GPL version 2 or (at your option) any later version"
__maintainer__ = "Stefan Hajnoczi"
__email__ = "stefanha@redhat.com"
from tracetool import out
from tracetool.backend.dtrace import binary, probeprefix
# Technically 'self' is not used by systemtap yet, but
# they recommended we keep it in the reserved list anyway
RESERVED_WORDS = (
'break', 'catch', 'continue', 'delete', 'else', 'for',
'foreach', 'function', 'global', 'if', 'in', 'limit',
'long', 'next', 'probe', 'return', 'self', 'string',
'try', 'while'
)
def stap_escape(identifier):
# Append underscore to reserved keywords
if identifier in RESERVED_WORDS:
return identifier + '_'
return identifier
def generate(events, backend, group):
events = [e for e in events
if "disable" not in e.properties]
out('/* This file is autogenerated by tracetool, do not edit. */',
'')
for e in events:
# Define prototype for probe arguments
out('probe %(probeprefix)s.%(name)s = process("%(binary)s").mark("%(name)s")',
'{',
probeprefix=probeprefix(),
name=e.name,
binary=binary())
i = 1
if len(e.args) > 0:
for name in e.args.names():
name = stap_escape(name)
out(' %s = $arg%d;' % (name, i))
i += 1
out('}')
out()
|
nvtrust-main
|
infrastructure/kvm/qemu/qemu_source/scripts/tracetool/format/stap.py
|
# -*- coding: utf-8 -*-
"""
Generate .stp file that printfs log messages (DTrace with SystemTAP only).
"""
__author__ = "Daniel P. Berrange <berrange@redhat.com>"
__copyright__ = "Copyright (C) 2014-2019, Red Hat, Inc."
__license__ = "GPL version 2 or (at your option) any later version"
__maintainer__ = "Daniel Berrange"
__email__ = "berrange@redhat.com"
import re
from tracetool import out
from tracetool.backend.dtrace import binary, probeprefix
from tracetool.backend.simple import is_string
from tracetool.format.stap import stap_escape
def global_var_name(name):
return probeprefix().replace(".", "_") + "_" + name
STATE_SKIP = 0
STATE_LITERAL = 1
STATE_MACRO = 2
def c_macro_to_format(macro):
if macro.startswith("PRI"):
return macro[3]
raise Exception("Unhandled macro '%s'" % macro)
def c_fmt_to_stap(fmt):
state = 0
bits = []
literal = ""
macro = ""
escape = 0;
for i in range(len(fmt)):
if fmt[i] == '\\':
if escape:
escape = 0
else:
escape = 1
if state != STATE_LITERAL:
raise Exception("Unexpected escape outside string literal")
literal = literal + fmt[i]
elif fmt[i] == '"' and not escape:
if state == STATE_LITERAL:
state = STATE_SKIP
bits.append(literal)
literal = ""
else:
if state == STATE_MACRO:
bits.append(c_macro_to_format(macro))
macro = ""
state = STATE_LITERAL
elif fmt[i] == ' ' or fmt[i] == '\t':
if state == STATE_MACRO:
bits.append(c_macro_to_format(macro))
macro = ""
state = STATE_SKIP
elif state == STATE_LITERAL:
literal = literal + fmt[i]
else:
escape = 0
if state == STATE_SKIP:
state = STATE_MACRO
if state == STATE_LITERAL:
literal = literal + fmt[i]
else:
macro = macro + fmt[i]
if state == STATE_MACRO:
bits.append(c_macro_to_format(macro))
elif state == STATE_LITERAL:
bits.append(literal)
# All variables in systemtap are 64-bit in size
# The "%l" integer size qualifier is thus redundant
# and "%ll" is not valid at all. Similarly the size_t
# based "%z" size qualifier is not valid. We just
# strip all size qualifiers for sanity.
fmt = re.sub("%(\d*)(l+|z)(x|u|d)", "%\\1\\3", "".join(bits))
return fmt
def generate(events, backend, group):
out('/* This file is autogenerated by tracetool, do not edit. */',
'')
for event_id, e in enumerate(events):
if 'disable' in e.properties:
continue
out('probe %(probeprefix)s.log.%(name)s = %(probeprefix)s.%(name)s ?',
'{',
probeprefix=probeprefix(),
name=e.name)
# Get references to userspace strings
for type_, name in e.args:
name = stap_escape(name)
if is_string(type_):
out(' try {',
' arg%(name)s_str = %(name)s ? ' +
'user_string_n(%(name)s, 512) : "<null>"',
' } catch {}',
name=name)
# Determine systemtap's view of variable names
fields = ["pid()", "gettimeofday_ns()"]
for type_, name in e.args:
name = stap_escape(name)
if is_string(type_):
fields.append("arg" + name + "_str")
else:
fields.append(name)
# Emit the entire record in a single SystemTap printf()
arg_str = ', '.join(arg for arg in fields)
fmt_str = "%d@%d " + e.name + " " + c_fmt_to_stap(e.fmt) + "\\n"
out(' printf("%(fmt_str)s", %(arg_str)s)',
fmt_str=fmt_str, arg_str=arg_str)
out('}')
out()
|
nvtrust-main
|
infrastructure/kvm/qemu/qemu_source/scripts/tracetool/format/log_stap.py
|
# -*- coding: utf-8 -*-
"""
Generate .stp file that outputs simpletrace binary traces (DTrace with SystemTAP only).
"""
__author__ = "Stefan Hajnoczi <redhat.com>"
__copyright__ = "Copyright (C) 2014, Red Hat, Inc."
__license__ = "GPL version 2 or (at your option) any later version"
__maintainer__ = "Stefan Hajnoczi"
__email__ = "stefanha@redhat.com"
from tracetool import out
from tracetool.backend.dtrace import probeprefix
from tracetool.backend.simple import is_string
from tracetool.format.stap import stap_escape
def global_var_name(name):
return probeprefix().replace(".", "_") + "_" + name
def generate(events, backend, group):
out('/* This file is autogenerated by tracetool, do not edit. */',
'')
for event_id, e in enumerate(events):
if 'disable' in e.properties:
continue
out('probe %(probeprefix)s.simpletrace.%(name)s = %(probeprefix)s.%(name)s ?',
'{',
probeprefix=probeprefix(),
name=e.name)
# Calculate record size
sizes = ['24'] # sizeof(TraceRecord)
for type_, name in e.args:
name = stap_escape(name)
if is_string(type_):
out(' try {',
' arg%(name)s_str = %(name)s ? user_string_n(%(name)s, 512) : "<null>"',
' } catch {}',
' arg%(name)s_len = strlen(arg%(name)s_str)',
name=name)
sizes.append('4 + arg%s_len' % name)
else:
sizes.append('8')
sizestr = ' + '.join(sizes)
# Generate format string and value pairs for record header and arguments
fields = [('8b', str(event_id)),
('8b', 'gettimeofday_ns()'),
('4b', sizestr),
('4b', 'pid()')]
for type_, name in e.args:
name = stap_escape(name)
if is_string(type_):
fields.extend([('4b', 'arg%s_len' % name),
('.*s', 'arg%s_len, arg%s_str' % (name, name))])
else:
fields.append(('8b', name))
# Emit the entire record in a single SystemTap printf()
fmt_str = '%'.join(fmt for fmt, _ in fields)
arg_str = ', '.join(arg for _, arg in fields)
out(' printf("%%8b%%%(fmt_str)s", 1, %(arg_str)s)',
fmt_str=fmt_str, arg_str=arg_str)
out('}')
out()
|
nvtrust-main
|
infrastructure/kvm/qemu/qemu_source/scripts/tracetool/format/simpletrace_stap.py
|
# -*- coding: utf-8 -*-
"""
Generate trace/generated-helpers.c.
"""
__author__ = "Lluís Vilanova <vilanova@ac.upc.edu>"
__copyright__ = "Copyright 2012-2017, Lluís Vilanova <vilanova@ac.upc.edu>"
__license__ = "GPL version 2 or (at your option) any later version"
__maintainer__ = "Stefan Hajnoczi"
__email__ = "stefanha@redhat.com"
from tracetool import Arguments, out
from tracetool.transform import *
import tracetool.vcpu
def vcpu_transform_args(args, mode):
assert len(args) == 1
# NOTE: this name must be kept in sync with the one in "tcg_h"
args = Arguments([(args.types()[0], "__tcg_" + args.names()[0])])
if mode == "code":
return Arguments([
# Does cast from helper requirements to tracing types
("CPUState *", "env_cpu(%s)" % args.names()[0]),
])
else:
args = Arguments([
# NOTE: Current helper code uses TCGv_env (CPUArchState*)
("CPUArchState *", args.names()[0]),
])
if mode == "header":
return args
elif mode == "wrapper":
return args.transform(HOST_2_TCG)
else:
assert False
def generate(events, backend, group):
if group == "root":
header = "trace/trace-root.h"
else:
header = "trace.h"
events = [e for e in events
if "disable" not in e.properties]
out('/* This file is autogenerated by tracetool, do not edit. */',
'',
'#include "qemu/osdep.h"',
'#include "cpu.h"',
'#include "exec/helper-proto.h"',
'#include "%s"' % header,
'',
)
for e in events:
if "tcg-exec" not in e.properties:
continue
e_args_api = tracetool.vcpu.transform_args(
"tcg_helper_c", e.original, "header").transform(
HOST_2_TCG_COMPAT, TCG_2_TCG_HELPER_DEF)
e_args_call = tracetool.vcpu.transform_args(
"tcg_helper_c", e, "code")
out('void %(name_tcg)s(%(args_api)s)',
'{',
# NOTE: the check was already performed at TCG-generation time
' %(name)s(%(args_call)s);',
'}',
name_tcg="helper_%s_proxy" % e.api(),
name=e.api(e.QEMU_TRACE_NOCHECK),
args_api=e_args_api,
args_call=", ".join(e_args_call.casted()),
)
|
nvtrust-main
|
infrastructure/kvm/qemu/qemu_source/scripts/tracetool/format/tcg_helper_c.py
|
# -*- coding: utf-8 -*-
"""
trace/generated-tracers.h
"""
__author__ = "Lluís Vilanova <vilanova@ac.upc.edu>"
__copyright__ = "Copyright 2012-2017, Lluís Vilanova <vilanova@ac.upc.edu>"
__license__ = "GPL version 2 or (at your option) any later version"
__maintainer__ = "Stefan Hajnoczi"
__email__ = "stefanha@redhat.com"
from tracetool import out
def generate(events, backend, group):
if group == "root":
header = "trace/control-vcpu.h"
else:
header = "trace/control.h"
out('/* This file is autogenerated by tracetool, do not edit. */',
'',
'#ifndef TRACE_%s_GENERATED_TRACERS_H' % group.upper(),
'#define TRACE_%s_GENERATED_TRACERS_H' % group.upper(),
'',
'#include "%s"' % header,
'')
for e in events:
out('extern TraceEvent %(event)s;',
event = e.api(e.QEMU_EVENT))
for e in events:
out('extern uint16_t %s;' % e.api(e.QEMU_DSTATE))
# static state
for e in events:
if 'disable' in e.properties:
enabled = 0
else:
enabled = 1
if "tcg-exec" in e.properties:
# a single define for the two "sub-events"
out('#define TRACE_%(name)s_ENABLED %(enabled)d',
name=e.original.name.upper(),
enabled=enabled)
out('#define TRACE_%s_ENABLED %d' % (e.name.upper(), enabled))
backend.generate_begin(events, group)
for e in events:
# tracer-specific dstate
out('',
'#define %(api)s() ( \\',
api=e.api(e.QEMU_BACKEND_DSTATE))
if "disable" not in e.properties:
backend.generate_backend_dstate(e, group)
out(' false)')
# tracer without checks
out('',
'static inline void %(api)s(%(args)s)',
'{',
api=e.api(e.QEMU_TRACE_NOCHECK),
args=e.args)
if "disable" not in e.properties:
backend.generate(e, group)
out('}')
# tracer wrapper with checks (per-vCPU tracing)
if "vcpu" in e.properties:
trace_cpu = next(iter(e.args))[1]
cond = "trace_event_get_vcpu_state(%(cpu)s,"\
" TRACE_%(id)s)"\
% dict(
cpu=trace_cpu,
id=e.name.upper())
else:
cond = "true"
out('',
'static inline void %(api)s(%(args)s)',
'{',
' if (%(cond)s) {',
' %(api_nocheck)s(%(names)s);',
' }',
'}',
api=e.api(),
api_nocheck=e.api(e.QEMU_TRACE_NOCHECK),
args=e.args,
names=", ".join(e.args.names()),
cond=cond)
backend.generate_end(events, group)
out('#endif /* TRACE_%s_GENERATED_TRACERS_H */' % group.upper())
|
nvtrust-main
|
infrastructure/kvm/qemu/qemu_source/scripts/tracetool/format/h.py
|
#!/usr/bin/env python3
# QEMU library
#
# Copyright (C) 2020 Red Hat Inc.
#
# Authors:
# Eduardo Habkost <ehabkost@redhat.com>
#
# This work is licensed under the terms of the GNU GPL, version 2. See
# the COPYING file in the top-level directory.
#
import sys
import argparse
import os
import os.path
import re
from typing import *
from codeconverter.patching import FileInfo, match_class_dict, FileList
import codeconverter.qom_macros
from codeconverter.qom_type_info import TI_FIELDS, type_infos, TypeInfoVar
import logging
logger = logging.getLogger(__name__)
DBG = logger.debug
INFO = logger.info
WARN = logger.warning
def process_all_files(parser: argparse.ArgumentParser, args: argparse.Namespace) -> None:
DBG("filenames: %r", args.filenames)
files = FileList()
files.extend(FileInfo(files, fn, args.force) for fn in args.filenames)
for f in files:
DBG('opening %s', f.filename)
f.load()
if args.table:
fields = ['filename', 'variable_name'] + TI_FIELDS
print('\t'.join(fields))
for f in files:
for t in f.matches_of_type(TypeInfoVar):
assert isinstance(t, TypeInfoVar)
values = [f.filename, t.name] + \
[t.get_raw_initializer_value(f)
for f in TI_FIELDS]
DBG('values: %r', values)
assert all('\t' not in v for v in values)
values = [v.replace('\n', ' ').replace('"', '') for v in values]
print('\t'.join(values))
return
match_classes = match_class_dict()
if not args.patterns:
parser.error("--pattern is required")
classes = [p for arg in args.patterns
for p in re.split(r'[\s,]', arg)
if p.strip()]
for c in classes:
if c not in match_classes \
or not match_classes[c].regexp:
print("Invalid pattern name: %s" % (c), file=sys.stderr)
print("Valid patterns:", file=sys.stderr)
print(PATTERN_HELP, file=sys.stderr)
sys.exit(1)
DBG("classes: %r", classes)
files.patch_content(max_passes=args.passes, class_names=classes)
for f in files:
#alltypes.extend(f.type_infos)
#full_types.extend(f.full_types())
if not args.dry_run:
if args.inplace:
f.patch_inplace()
if args.diff:
f.show_diff()
if not args.diff and not args.inplace:
f.write_to_file(sys.stdout)
sys.stdout.flush()
PATTERN_HELP = ('\n'.join(" %s: %s" % (n, str(c.__doc__).strip())
for (n,c) in sorted(match_class_dict().items())
if c.has_replacement_rule()))
def main() -> None:
p = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter)
p.add_argument('filenames', nargs='+')
p.add_argument('--passes', type=int, default=1,
help="Number of passes (0 means unlimited)")
p.add_argument('--pattern', required=True, action='append',
default=[], dest='patterns',
help="Pattern to scan for")
p.add_argument('--inplace', '-i', action='store_true',
help="Patch file in place")
p.add_argument('--dry-run', action='store_true',
help="Don't patch files or print patching results")
p.add_argument('--force', '-f', action='store_true',
help="Perform changes even if not completely safe")
p.add_argument('--diff', action='store_true',
help="Print diff output on stdout")
p.add_argument('--debug', '-d', action='store_true',
help="Enable debugging")
p.add_argument('--verbose', '-v', action='store_true',
help="Verbose logging on stderr")
p.add_argument('--table', action='store_true',
help="Print CSV table of type information")
p.add_argument_group("Valid pattern names",
PATTERN_HELP)
args = p.parse_args()
loglevel = (logging.DEBUG if args.debug
else logging.INFO if args.verbose
else logging.WARN)
logging.basicConfig(format='%(levelname)s: %(message)s', level=loglevel)
DBG("args: %r", args)
process_all_files(p, args)
if __name__ == '__main__':
main()
|
nvtrust-main
|
infrastructure/kvm/qemu/qemu_source/scripts/codeconverter/converter.py
|
# Copyright (C) 2020 Red Hat Inc.
#
# Authors:
# Eduardo Habkost <ehabkost@redhat.com>
#
# This work is licensed under the terms of the GNU GPL, version 2. See
# the COPYING file in the top-level directory.
import re
from .regexps import *
from .patching import *
from .utils import *
from .qom_macros import *
TI_FIELDS = [ 'name', 'parent', 'abstract', 'interfaces',
'instance_size', 'instance_init', 'instance_post_init', 'instance_finalize',
'class_size', 'class_init', 'class_base_init', 'class_data']
RE_TI_FIELD_NAME = OR(*TI_FIELDS)
RE_TI_FIELD_INIT = S(r'[ \t]*', NAMED('comments', RE_COMMENTS),
r'\.', NAMED('field', RE_TI_FIELD_NAME), r'\s*=\s*',
NAMED('value', RE_EXPRESSION), r'[ \t]*,?[ \t]*\n')
RE_TI_FIELDS = M(RE_TI_FIELD_INIT)
RE_TYPEINFO_START = S(r'^[ \t]*', M(r'(static|const)\s+', name='modifiers'), r'TypeInfo\s+',
NAMED('name', RE_IDENTIFIER), r'\s*=\s*{[ \t]*\n')
ParsedArray = List[str]
ParsedInitializerValue = Union[str, ParsedArray]
class InitializerValue(NamedTuple):
raw: str
parsed: Optional[ParsedInitializerValue]
match: Optional[Match]
class ArrayItem(FileMatch):
regexp = RE_ARRAY_ITEM
class ArrayInitializer(FileMatch):
regexp = RE_ARRAY
def parsed(self) -> ParsedArray:
#DBG('parse_array: %r', m.group(0))
return [m.group('arrayitem') for m in self.group_finditer(ArrayItem, 'arrayitems')]
class FieldInitializer(FileMatch):
regexp = RE_TI_FIELD_INIT
@property
def raw(self) -> str:
return self.group('value')
@property
def parsed(self) -> ParsedInitializerValue:
parsed: ParsedInitializerValue = self.raw
#DBG("parse_initializer_value: %r", s)
array = self.try_group_match(ArrayInitializer, 'value')
if array:
assert isinstance(array, ArrayInitializer)
return array.parsed()
return parsed
TypeInfoInitializers = Dict[str, FieldInitializer]
class TypeDefinition(FileMatch):
"""
Common base class for type definitions (TypeInfo variables or OBJECT_DEFINE* macros)
"""
@property
def instancetype(self) -> Optional[str]:
return self.group('instancetype')
@property
def classtype(self) -> Optional[str]:
return self.group('classtype')
@property
def uppercase(self) -> Optional[str]:
return self.group('uppercase')
@property
def parent_uppercase(self) -> str:
return self.group('parent_uppercase')
@property
def initializers(self) -> Optional[TypeInfoInitializers]:
if getattr(self, '_inititalizers', None):
self._initializers: TypeInfoInitializers
return self._initializers
fields = self.group('fields')
if fields is None:
return None
d = dict((fm.group('field'), fm)
for fm in self.group_finditer(FieldInitializer, 'fields'))
self._initializers = d # type: ignore
return self._initializers
class TypeInfoVar(TypeDefinition):
"""TypeInfo variable declaration with initializer"""
regexp = S(NAMED('begin', RE_TYPEINFO_START),
M(NAMED('fields', RE_TI_FIELDS),
NAMED('endcomments', SP, RE_COMMENTS),
NAMED('end', r'};?\n'),
n='?', name='fullspec'))
def is_static(self) -> bool:
return 'static' in self.group('modifiers')
def is_const(self) -> bool:
return 'const' in self.group('modifiers')
def is_full(self) -> bool:
return bool(self.group('fullspec'))
def get_initializers(self) -> TypeInfoInitializers:
"""Helper for code that needs to deal with missing initializer info"""
if self.initializers is None:
return {}
return self.initializers
def get_raw_initializer_value(self, field: str, default: str = '') -> str:
initializers = self.get_initializers()
if field in initializers:
return initializers[field].raw
else:
return default
@property
def typename(self) -> Optional[str]:
return self.get_raw_initializer_value('name')
@property
def uppercase(self) -> Optional[str]:
typename = self.typename
if not typename:
return None
if not typename.startswith('TYPE_'):
return None
return typename[len('TYPE_'):]
@property
def classtype(self) -> Optional[str]:
class_size = self.get_raw_initializer_value('class_size')
if not class_size:
return None
m = re.fullmatch(RE_SIZEOF, class_size)
if not m:
return None
return m.group('sizeoftype')
@property
def instancetype(self) -> Optional[str]:
instance_size = self.get_raw_initializer_value('instance_size')
if not instance_size:
return None
m = re.fullmatch(RE_SIZEOF, instance_size)
if not m:
return None
return m.group('sizeoftype')
#def extract_identifiers(self) -> Optional[TypeIdentifiers]:
# """Try to extract identifiers from names being used"""
# DBG("extracting idenfiers from %s", self.name)
#uppercase = None
#if typename and re.fullmatch(RE_IDENTIFIER, typename) and typename.startswith("TYPE_"):
# uppercase = typename[len('TYPE_'):]
#lowercase = None
#funcs = set()
#prefixes = set()
#for field,suffix in [('instance_init', '_init'),
# ('instance_finalize', '_finalize'),
# ('class_init', '_class_init')]:
# if field not in values:
# continue
# func = values[field].raw
# funcs.add(func)
# if func.endswith(suffix):
# prefixes.add(func[:-len(suffix)])
# else:
# self.warn("function name %s doesn't have expected %s suffix",
# func, suffix)
#if len(prefixes) == 1:
# lowercase = prefixes.pop()
#elif len(prefixes) > 1:
# self.warn("inconsistent function names: %s", ' '.join(funcs))
#.parent = TYPE_##PARENT_MODULE_OBJ_NAME, \
#return TypeIdentifiers(typename=typename,
# uppercase=uppercase, lowercase=lowercase,
# instancetype=instancetype, classtype=classtype)
def append_field(self, field: str, value: str) -> Patch:
"""Generate patch appending a field initializer"""
content = f' .{field} = {value},\n'
fm = self.group_match('fields')
assert fm
return fm.append(content)
def patch_field(self, field: str, replacement: str) -> Patch:
"""Generate patch replacing a field initializer"""
initializers = self.initializers
assert initializers
value = initializers.get(field)
assert value
return value.make_patch(replacement)
def remove_field(self, field: str) -> Iterable[Patch]:
initializers = self.initializers
assert initializers
if field in initializers:
yield self.patch_field(field, '')
def remove_fields(self, *fields: str) -> Iterable[Patch]:
for f in fields:
yield from self.remove_field(f)
def patch_field_value(self, field: str, replacement: str) -> Patch:
"""Replace just the value of a field initializer"""
initializers = self.initializers
assert initializers
value = initializers.get(field)
assert value
vm = value.group_match('value')
assert vm
return vm.make_patch(replacement)
class RemoveRedundantClassSize(TypeInfoVar):
"""Remove class_size when using OBJECT_DECLARE_SIMPLE_TYPE"""
def gen_patches(self) -> Iterable[Patch]:
initializers = self.initializers
if initializers is None:
return
if 'class_size' not in initializers:
return
self.debug("Handling %s", self.name)
m = re.fullmatch(RE_SIZEOF, initializers['class_size'].raw)
if not m:
self.warn("%s class_size is not sizeof?", self.name)
return
classtype = m.group('sizeoftype')
if not classtype.endswith('Class'):
self.warn("%s class size type (%s) is not *Class?", self.name, classtype)
return
self.debug("classtype is %s", classtype)
instancetype = classtype[:-len('Class')]
self.debug("intanceypte is %s", instancetype)
self.debug("searching for simpletype declaration using %s as InstanceType", instancetype)
decl = self.allfiles.find_match(OldStyleObjectDeclareSimpleType,
instancetype, 'instancetype')
if not decl:
self.debug("No simpletype declaration found for %s", instancetype)
return
self.debug("Found simple type declaration")
decl.debug("declaration is here")
yield from self.remove_field('class_size')
class RemoveDeclareSimpleTypeArg(OldStyleObjectDeclareSimpleType):
"""Remove class_size when using OBJECT_DECLARE_SIMPLE_TYPE"""
def gen_patches(self) -> Iterable[Patch]:
c = (f'OBJECT_DECLARE_SIMPLE_TYPE({self.group("instancetype")}, {self.group("lowercase")},\n'
f' {self.group("uppercase")})\n')
yield self.make_patch(c)
class UseDeclareTypeExtended(TypeInfoVar):
"""Replace TypeInfo variable with OBJECT_DEFINE_TYPE_EXTENDED"""
def gen_patches(self) -> Iterable[Patch]:
# this will just ensure the caches for find_match() and matches_for_type()
# will be loaded in advance:
find_type_checkers(self.allfiles, 'xxxxxxxxxxxxxxxxx')
if not self.is_static():
self.info("Skipping non-static TypeInfo variable")
return
type_info_macro = self.file.find_match(TypeInfoMacro, self.name)
if not type_info_macro:
self.warn("TYPE_INFO(%s) line not found", self.name)
return
values = self.initializers
if values is None:
return
if 'name' not in values:
self.warn("name not set in TypeInfo variable %s", self.name)
return
typename = values['name'].raw
if 'parent' not in values:
self.warn("parent not set in TypeInfo variable %s", self.name)
return
parent_typename = values['parent'].raw
instancetype = None
if 'instance_size' in values:
m = re.fullmatch(RE_SIZEOF, values['instance_size'].raw)
if m:
instancetype = m.group('sizeoftype')
else:
self.warn("can't extract instance type in TypeInfo variable %s", self.name)
self.warn("instance_size is set to: %r", values['instance_size'].raw)
return
classtype = None
if 'class_size' in values:
m = re.fullmatch(RE_SIZEOF, values['class_size'].raw)
if m:
classtype = m.group('sizeoftype')
else:
self.warn("can't extract class type in TypeInfo variable %s", self.name)
self.warn("class_size is set to: %r", values['class_size'].raw)
return
#for t in (typename, parent_typename):
# if not re.fullmatch(RE_IDENTIFIER, t):
# self.info("type name is not a macro/constant")
# if instancetype or classtype:
# self.warn("macro/constant type name is required for instance/class type")
# if not self.file.force:
# return
# Now, the challenge is to find out the right MODULE_OBJ_NAME for the
# type and for the parent type
self.info("TypeInfo variable for %s is here", typename)
uppercase = find_typename_uppercase(self.allfiles, typename)
if not uppercase:
self.info("Can't find right uppercase name for %s", typename)
if instancetype or classtype:
self.warn("Can't find right uppercase name for %s", typename)
self.warn("This will make type validation difficult in the future")
return
parent_uppercase = find_typename_uppercase(self.allfiles, parent_typename)
if not parent_uppercase:
self.info("Can't find right uppercase name for parent type (%s)", parent_typename)
if instancetype or classtype:
self.warn("Can't find right uppercase name for parent type (%s)", parent_typename)
self.warn("This will make type validation difficult in the future")
return
ok = True
#checkers: List[TypeCheckerDeclaration] = list(find_type_checkers(self.allfiles, uppercase))
#for c in checkers:
# c.info("instance type checker declaration (%s) is here", c.group('uppercase'))
#if not checkers:
# self.info("No type checkers declared for %s", uppercase)
# if instancetype or classtype:
# self.warn("Can't find where type checkers for %s (%s) are declared. We will need them to validate sizes of %s",
# typename, uppercase, self.name)
if not instancetype:
instancetype = 'void'
if not classtype:
classtype = 'void'
#checker_instancetypes = set(c.instancetype for c in checkers
# if c.instancetype is not None)
#if len(checker_instancetypes) > 1:
# self.warn("ambiguous set of type checkers")
# for c in checkers:
# c.warn("instancetype is %s here", c.instancetype)
# ok = False
#elif len(checker_instancetypes) == 1:
# checker_instancetype = checker_instancetypes.pop()
# DBG("checker instance type: %r", checker_instancetype)
# if instancetype != checker_instancetype:
# self.warn("type at instance_size is %r. Should instance_size be set to sizeof(%s) ?",
# instancetype, checker_instancetype)
# ok = False
#else:
# if instancetype != 'void':
# self.warn("instance type checker for %s (%s) not found", typename, instancetype)
# ok = False
#checker_classtypes = set(c.classtype for c in checkers
# if c.classtype is not None)
#if len(checker_classtypes) > 1:
# self.warn("ambiguous set of type checkers")
# for c in checkers:
# c.warn("classtype is %s here", c.classtype)
# ok = False
#elif len(checker_classtypes) == 1:
# checker_classtype = checker_classtypes.pop()
# DBG("checker class type: %r", checker_classtype)
# if classtype != checker_classtype:
# self.warn("type at class_size is %r. Should class_size be set to sizeof(%s) ?",
# classtype, checker_classtype)
# ok = False
#else:
# if classtype != 'void':
# self.warn("class type checker for %s (%s) not found", typename, classtype)
# ok = False
#if not ok:
# for c in checkers:
# c.warn("Type checker declaration for %s (%s) is here",
# typename, type(c).__name__)
# return
#if parent_decl is None:
# self.warn("Can't find where parent type %s is declared", parent_typename)
#yield self.prepend(f'DECLARE_TYPE_NAME({uppercase}, {typename})\n')
#if not instancetype:
# yield self.prepend(f'DECLARE_INSTANCE_TYPE({uppercase}, void)\n')
#if not classtype:
# yield self.prepend(f'DECLARE_CLASS_TYPE({uppercase}, void)\n')
self.info("%s can be patched!", self.name)
replaced_fields = ['name', 'parent', 'instance_size', 'class_size']
begin = self.group_match('begin')
newbegin = f'OBJECT_DEFINE_TYPE_EXTENDED({self.name},\n'
newbegin += f' {instancetype}, {classtype},\n'
newbegin += f' {uppercase}, {parent_uppercase}'
if set(values.keys()) - set(replaced_fields):
newbegin += ',\n'
yield begin.make_patch(newbegin)
yield from self.remove_fields(*replaced_fields)
end = self.group_match('end')
yield end.make_patch(')\n')
yield type_info_macro.make_removal_patch()
class ObjectDefineTypeExtended(TypeDefinition):
"""OBJECT_DEFINE_TYPE_EXTENDED usage"""
regexp = S(r'^[ \t]*OBJECT_DEFINE_TYPE_EXTENDED\s*\(\s*',
NAMED('name', RE_IDENTIFIER), r'\s*,\s*',
NAMED('instancetype', RE_IDENTIFIER), r'\s*,\s*',
NAMED('classtype', RE_IDENTIFIER), r'\s*,\s*',
NAMED('uppercase', RE_IDENTIFIER), r'\s*,\s*',
NAMED('parent_uppercase', RE_IDENTIFIER),
M(r',\s*\n',
NAMED('fields', RE_TI_FIELDS),
n='?'),
r'\s*\);?\n?')
class ObjectDefineType(TypeDefinition):
"""OBJECT_DEFINE_TYPE usage"""
regexp = S(r'^[ \t]*OBJECT_DEFINE_TYPE\s*\(\s*',
NAMED('lowercase', RE_IDENTIFIER), r'\s*,\s*',
NAMED('uppercase', RE_IDENTIFIER), r'\s*,\s*',
NAMED('parent_uppercase', RE_IDENTIFIER),
M(r',\s*\n',
NAMED('fields', RE_TI_FIELDS),
n='?'),
r'\s*\);?\n?')
def find_type_definitions(files: FileList, uppercase: str) -> Iterable[TypeDefinition]:
types: List[Type[TypeDefinition]] = [TypeInfoVar, ObjectDefineType, ObjectDefineTypeExtended]
for t in types:
for m in files.matches_of_type(t):
m.debug("uppercase: %s", m.uppercase)
yield from (m for t in types
for m in files.matches_of_type(t)
if m.uppercase == uppercase)
class AddDeclareVoidClassType(TypeDeclarationFixup):
"""Will add DECLARE_CLASS_TYPE(..., void) if possible"""
def gen_patches_for_type(self, uppercase: str,
checkers: List[TypeDeclaration],
fields: Dict[str, Optional[str]]) -> Iterable[Patch]:
defs = list(find_type_definitions(self.allfiles, uppercase))
if len(defs) > 1:
self.warn("multiple definitions for %s", uppercase)
for d in defs:
d.warn("definition found here")
return
elif len(defs) == 0:
self.warn("type definition for %s not found", uppercase)
return
d = defs[0]
if d.classtype is None:
d.info("definition for %s has classtype, skipping", uppercase)
return
class_type_checkers = [c for c in checkers
if c.classtype is not None]
if class_type_checkers:
for c in class_type_checkers:
c.warn("class type checker for %s is present here", uppercase)
return
_,last_checker = max((m.start(), m) for m in checkers)
s = f'DECLARE_CLASS_TYPE({uppercase}, void)\n'
yield last_checker.append(s)
class AddDeclareVoidInstanceType(FileMatch):
"""Will add DECLARE_INSTANCE_TYPE(..., void) if possible"""
regexp = S(r'^[ \t]*#[ \t]*define', CPP_SPACE,
NAMED('name', r'TYPE_[a-zA-Z0-9_]+\b'),
CPP_SPACE, r'.*\n')
def gen_patches(self) -> Iterable[Patch]:
assert self.name.startswith('TYPE_')
uppercase = self.name[len('TYPE_'):]
defs = list(find_type_definitions(self.allfiles, uppercase))
if len(defs) > 1:
self.warn("multiple definitions for %s", uppercase)
for d in defs:
d.warn("definition found here")
return
elif len(defs) == 0:
self.warn("type definition for %s not found", uppercase)
return
d = defs[0]
instancetype = d.instancetype
if instancetype is not None and instancetype != 'void':
return
instance_checkers = [c for c in find_type_checkers(self.allfiles, uppercase)
if c.instancetype]
if instance_checkers:
d.warn("instance type checker for %s already declared", uppercase)
for c in instance_checkers:
c.warn("instance checker for %s is here", uppercase)
return
s = f'DECLARE_INSTANCE_TYPE({uppercase}, void)\n'
yield self.append(s)
class AddObjectDeclareType(DeclareObjCheckers):
"""Will add OBJECT_DECLARE_TYPE(...) if possible"""
def gen_patches(self) -> Iterable[Patch]:
uppercase = self.uppercase
typename = self.group('typename')
instancetype = self.group('instancetype')
classtype = self.group('classtype')
if typename != f'TYPE_{uppercase}':
self.warn("type name mismatch: %s vs %s", typename, uppercase)
return
typedefs = [(t,self.allfiles.find_matches(SimpleTypedefMatch, t))
for t in (instancetype, classtype)]
for t,tds in typedefs:
if not tds:
self.warn("typedef %s not found", t)
return
for td in tds:
td_type = td.group('typedef_type')
if td_type != f'struct {t}':
self.warn("typedef mismatch: %s is defined as %s", t, td_type)
td.warn("typedef is here")
return
# look for reuse of same struct type
other_instance_checkers = [c for c in find_type_checkers(self.allfiles, instancetype, 'instancetype')
if c.uppercase != uppercase]
if other_instance_checkers:
self.warn("typedef %s is being reused", instancetype)
for ic in other_instance_checkers:
ic.warn("%s is reused here", instancetype)
if not self.file.force:
return
decl_types: List[Type[TypeDeclaration]] = [DeclareClassCheckers, DeclareObjCheckers]
class_decls = [m for t in decl_types
for m in self.allfiles.find_matches(t, uppercase, 'uppercase')]
defs = list(find_type_definitions(self.allfiles, uppercase))
if len(defs) > 1:
self.warn("multiple definitions for %s", uppercase)
for d in defs:
d.warn("definition found here")
if not self.file.force:
return
elif len(defs) == 0:
self.warn("type definition for %s not found", uppercase)
if not self.file.force:
return
else:
d = defs[0]
if d.instancetype != instancetype:
self.warn("mismatching instance type for %s (%s)", uppercase, instancetype)
d.warn("instance type declared here (%s)", d.instancetype)
if not self.file.force:
return
if d.classtype != classtype:
self.warn("mismatching class type for %s (%s)", uppercase, classtype)
d.warn("class type declared here (%s)", d.classtype)
if not self.file.force:
return
assert self.file.original_content
for t,tds in typedefs:
assert tds
for td in tds:
if td.file is not self.file:
continue
# delete typedefs that are truly redundant:
# 1) defined after DECLARE_OBJ_CHECKERS
if td.start() > self.start():
yield td.make_removal_patch()
# 2) defined before DECLARE_OBJ_CHECKERS, but unused
elif not re.search(r'\b'+t+r'\b', self.file.original_content[td.end():self.start()]):
yield td.make_removal_patch()
c = (f'OBJECT_DECLARE_TYPE({instancetype}, {classtype}, {uppercase})\n')
yield self.make_patch(c)
class AddObjectDeclareSimpleType(DeclareInstanceChecker):
"""Will add OBJECT_DECLARE_SIMPLE_TYPE(...) if possible"""
def gen_patches(self) -> Iterable[Patch]:
uppercase = self.uppercase
typename = self.group('typename')
instancetype = self.group('instancetype')
if typename != f'TYPE_{uppercase}':
self.warn("type name mismatch: %s vs %s", typename, uppercase)
return
typedefs = [(t,self.allfiles.find_matches(SimpleTypedefMatch, t))
for t in (instancetype,)]
for t,tds in typedefs:
if not tds:
self.warn("typedef %s not found", t)
return
for td in tds:
td_type = td.group('typedef_type')
if td_type != f'struct {t}':
self.warn("typedef mismatch: %s is defined as %s", t, td_type)
td.warn("typedef is here")
return
# look for reuse of same struct type
other_instance_checkers = [c for c in find_type_checkers(self.allfiles, instancetype, 'instancetype')
if c.uppercase != uppercase]
if other_instance_checkers:
self.warn("typedef %s is being reused", instancetype)
for ic in other_instance_checkers:
ic.warn("%s is reused here", instancetype)
if not self.file.force:
return
decl_types: List[Type[TypeDeclaration]] = [DeclareClassCheckers, DeclareObjCheckers]
class_decls = [m for t in decl_types
for m in self.allfiles.find_matches(t, uppercase, 'uppercase')]
if class_decls:
self.warn("class type declared for %s", uppercase)
for cd in class_decls:
cd.warn("class declaration found here")
return
defs = list(find_type_definitions(self.allfiles, uppercase))
if len(defs) > 1:
self.warn("multiple definitions for %s", uppercase)
for d in defs:
d.warn("definition found here")
if not self.file.force:
return
elif len(defs) == 0:
self.warn("type definition for %s not found", uppercase)
if not self.file.force:
return
else:
d = defs[0]
if d.instancetype != instancetype:
self.warn("mismatching instance type for %s (%s)", uppercase, instancetype)
d.warn("instance type declared here (%s)", d.instancetype)
if not self.file.force:
return
if d.classtype:
self.warn("class type set for %s", uppercase)
d.warn("class type declared here")
if not self.file.force:
return
assert self.file.original_content
for t,tds in typedefs:
assert tds
for td in tds:
if td.file is not self.file:
continue
# delete typedefs that are truly redundant:
# 1) defined after DECLARE_OBJ_CHECKERS
if td.start() > self.start():
yield td.make_removal_patch()
# 2) defined before DECLARE_OBJ_CHECKERS, but unused
elif not re.search(r'\b'+t+r'\b', self.file.original_content[td.end():self.start()]):
yield td.make_removal_patch()
c = (f'OBJECT_DECLARE_SIMPLE_TYPE({instancetype}, {uppercase})\n')
yield self.make_patch(c)
class TypeInfoStringName(TypeInfoVar):
"""Replace hardcoded type names with TYPE_ constant"""
def gen_patches(self) -> Iterable[Patch]:
values = self.initializers
if values is None:
return
if 'name' not in values:
self.warn("name not set in TypeInfo variable %s", self.name)
return
typename = values['name'].raw
if re.fullmatch(RE_IDENTIFIER, typename):
return
self.warn("name %s is not an identifier", typename)
#all_defines = [m for m in self.allfiles.matches_of_type(ExpressionDefine)]
#self.debug("all_defines: %r", all_defines)
constants = [m for m in self.allfiles.matches_of_type(ExpressionDefine)
if m.group('value').strip() == typename.strip()]
if not constants:
self.warn("No macro for %s found", typename)
return
if len(constants) > 1:
self.warn("I don't know which macro to use: %r", constants)
return
yield self.patch_field_value('name', constants[0].name)
class RedundantTypeSizes(TypeInfoVar):
"""Remove redundant instance_size/class_size from TypeInfo vars"""
def gen_patches(self) -> Iterable[Patch]:
values = self.initializers
if values is None:
return
if 'name' not in values:
self.warn("name not set in TypeInfo variable %s", self.name)
return
typename = values['name'].raw
if 'parent' not in values:
self.warn("parent not set in TypeInfo variable %s", self.name)
return
parent_typename = values['parent'].raw
if 'instance_size' not in values and 'class_size' not in values:
self.debug("no need to validate %s", self.name)
return
instance_decls = find_type_checkers(self.allfiles, typename)
if instance_decls:
self.debug("won't touch TypeInfo var that has type checkers")
return
parent = find_type_info(self.allfiles, parent_typename)
if not parent:
self.warn("Can't find TypeInfo for %s", parent_typename)
return
if 'instance_size' in values and parent.get_raw_initializer_value('instance_size') != values['instance_size'].raw:
self.info("instance_size mismatch")
parent.info("parent type declared here")
return
if 'class_size' in values and parent.get_raw_initializer_value('class_size') != values['class_size'].raw:
self.info("class_size mismatch")
parent.info("parent type declared here")
return
self.debug("will patch variable %s", self.name)
if 'instance_size' in values:
self.debug("deleting instance_size")
yield self.patch_field('instance_size', '')
if 'class_size' in values:
self.debug("deleting class_size")
yield self.patch_field('class_size', '')
#class TypeInfoVarInitFuncs(TypeInfoVar):
# """TypeInfo variable
# Will create missing init functions
# """
# def gen_patches(self) -> Iterable[Patch]:
# values = self.initializers
# if values is None:
# self.warn("type not parsed completely: %s", self.name)
# return
#
# macro = self.file.find_match(TypeInfoVar, self.name)
# if macro is None:
# self.warn("No TYPE_INFO macro for %s", self.name)
# return
#
# ids = self.extract_identifiers()
# if ids is None:
# return
#
# DBG("identifiers extracted: %r", ids)
# fields = set(values.keys())
# if ids.lowercase:
# if 'instance_init' not in fields:
# yield self.prepend(('static void %s_init(Object *obj)\n'
# '{\n'
# '}\n\n') % (ids.lowercase))
# yield self.append_field('instance_init', ids.lowercase+'_init')
#
# if 'instance_finalize' not in fields:
# yield self.prepend(('static void %s_finalize(Object *obj)\n'
# '{\n'
# '}\n\n') % (ids.lowercase))
# yield self.append_field('instance_finalize', ids.lowercase+'_finalize')
#
#
# if 'class_init' not in fields:
# yield self.prepend(('static void %s_class_init(ObjectClass *oc, void *data)\n'
# '{\n'
# '}\n\n') % (ids.lowercase))
# yield self.append_field('class_init', ids.lowercase+'_class_init')
class TypeInitMacro(FileMatch):
"""Use of type_init(...) macro"""
regexp = S(r'^[ \t]*type_init\s*\(\s*', NAMED('name', RE_IDENTIFIER), r'\s*\);?[ \t]*\n')
class DeleteEmptyTypeInitFunc(TypeInitMacro):
"""Delete empty function declared using type_init(...)"""
def gen_patches(self) -> Iterable[Patch]:
fn = self.file.find_match(StaticVoidFunction, self.name)
DBG("function for %s: %s", self.name, fn)
if fn and fn.body == '':
yield fn.make_patch('')
yield self.make_patch('')
class StaticVoidFunction(FileMatch):
"""simple static void function
(no replacement rules)
"""
#NOTE: just like RE_FULL_STRUCT, this doesn't parse any of the body contents
# of the function. Tt will just look for "}" in the beginning of a line
regexp = S(r'static\s+void\s+', NAMED('name', RE_IDENTIFIER), r'\s*\(\s*void\s*\)\n',
r'{\n',
NAMED('body',
# acceptable inside the function body:
# - lines starting with space or tab
# - empty lines
# - preprocessor directives
OR(r'[ \t][^\n]*\n',
r'#[^\n]*\n',
r'\n',
repeat='*')),
r'};?\n')
@property
def body(self) -> str:
return self.group('body')
def has_preprocessor_directive(self) -> bool:
return bool(re.search(r'^[ \t]*#', self.body, re.MULTILINE))
def find_containing_func(m: FileMatch) -> Optional['StaticVoidFunction']:
"""Return function containing this match"""
for fn in m.file.matches_of_type(StaticVoidFunction):
if fn.contains(m):
return fn
return None
class TypeRegisterStaticCall(FileMatch):
"""type_register_static() call
Will be replaced by TYPE_INFO() macro
"""
regexp = S(r'^[ \t]*', NAMED('func_name', 'type_register_static'),
r'\s*\(&\s*', NAMED('name', RE_IDENTIFIER), r'\s*\);[ \t]*\n')
class UseTypeInfo(TypeRegisterStaticCall):
"""Replace type_register_static() call with TYPE_INFO declaration"""
def gen_patches(self) -> Iterable[Patch]:
fn = find_containing_func(self)
if fn:
DBG("%r is inside %r", self, fn)
type_init = self.file.find_match(TypeInitMacro, fn.name)
if type_init is None:
self.warn("can't find type_init(%s) line", fn.name)
if not self.file.force:
return
else:
self.warn("can't identify the function where type_register_static(&%s) is called", self.name)
if not self.file.force:
return
#if fn.has_preprocessor_directive() and not self.file.force:
# self.warn("function %s has preprocessor directives, this requires --force", fn.name)
# return
var = self.file.find_match(TypeInfoVar, self.name)
if var is None:
self.warn("can't find TypeInfo var declaration for %s", self.name)
return
if not var.is_full():
self.warn("variable declaration %s wasn't parsed fully", var.name)
if not self.file.force:
return
if fn and fn.contains(var):
self.warn("TypeInfo %s variable is inside a function", self.name)
if not self.file.force:
return
# delete type_register_static() call:
yield self.make_patch('')
# append TYPE_REGISTER(...) after variable declaration:
yield var.append(f'TYPE_INFO({self.name})\n')
class TypeRegisterCall(FileMatch):
"""type_register_static() call"""
regexp = S(r'^[ \t]*', NAMED('func_name', 'type_register'),
r'\s*\(&\s*', NAMED('name', RE_IDENTIFIER), r'\s*\);[ \t]*\n')
class MakeTypeRegisterStatic(TypeRegisterCall):
"""Make type_register() call static if variable is static const"""
def gen_patches(self):
var = self.file.find_match(TypeInfoVar, self.name)
if var is None:
self.warn("can't find TypeInfo var declaration for %s", self.name)
return
if var.is_static() and var.is_const():
yield self.group_match('func_name').make_patch('type_register_static')
class MakeTypeRegisterNotStatic(TypeRegisterStaticCall):
"""Make type_register() call static if variable is static const"""
def gen_patches(self):
var = self.file.find_match(TypeInfoVar, self.name)
if var is None:
self.warn("can't find TypeInfo var declaration for %s", self.name)
return
if not var.is_static() or not var.is_const():
yield self.group_match('func_name').make_patch('type_register')
class TypeInfoMacro(FileMatch):
"""TYPE_INFO macro usage"""
regexp = S(r'^[ \t]*TYPE_INFO\s*\(\s*', NAMED('name', RE_IDENTIFIER), r'\s*\)[ \t]*;?[ \t]*\n')
def find_type_info(files: RegexpScanner, name: str) -> Optional[TypeInfoVar]:
ti = [ti for ti in files.matches_of_type(TypeInfoVar)
if ti.get_raw_initializer_value('name') == name]
DBG("type info vars: %r", ti)
if len(ti) > 1:
DBG("multiple TypeInfo vars found for %s", name)
return None
if len(ti) == 0:
DBG("no TypeInfo var found for %s", name)
return None
return ti[0]
class CreateClassStruct(DeclareInstanceChecker):
"""Replace DECLARE_INSTANCE_CHECKER with OBJECT_DECLARE_SIMPLE_TYPE"""
def gen_patches(self) -> Iterable[Patch]:
typename = self.group('typename')
DBG("looking for TypeInfo variable for %s", typename)
var = find_type_info(self.allfiles, typename)
if var is None:
self.warn("no TypeInfo var found for %s", typename)
return
assert var.initializers
if 'class_size' in var.initializers:
self.warn("class size already set for TypeInfo %s", var.name)
return
classtype = self.group('instancetype')+'Class'
return
yield
#TODO: need to find out what's the parent class type...
#yield var.append_field('class_size', f'sizeof({classtype})')
#c = (f'OBJECT_DECLARE_SIMPLE_TYPE({instancetype}, {lowercase},\n'
# f' MODULE_OBJ_NAME, ParentClassType)\n')
#yield self.make_patch(c)
def type_infos(file: FileInfo) -> Iterable[TypeInfoVar]:
return file.matches_of_type(TypeInfoVar)
def full_types(file: FileInfo) -> Iterable[TypeInfoVar]:
return [t for t in type_infos(file) if t.is_full()]
def partial_types(file: FileInfo) -> Iterable[TypeInfoVar]:
return [t for t in type_infos(file) if not t.is_full()]
|
nvtrust-main
|
infrastructure/kvm/qemu/qemu_source/scripts/codeconverter/codeconverter/qom_type_info.py
|
# Copyright (C) 2020 Red Hat Inc.
#
# Authors:
# Eduardo Habkost <ehabkost@redhat.com>
#
# This work is licensed under the terms of the GNU GPL, version 2. See
# the COPYING file in the top-level directory.
from tempfile import NamedTemporaryFile
from .patching import FileInfo, FileMatch, Patch, FileList
from .regexps import *
class BasicPattern(FileMatch):
regexp = '[abc]{3}'
@property
def name(self):
return self.group(0)
def replacement(self) -> str:
# replace match with the middle character repeated 5 times
return self.group(0)[1].upper()*5
def test_pattern_patching():
of = NamedTemporaryFile('wt')
of.writelines(['one line\n',
'this pattern will be patched: defbbahij\n',
'third line\n',
'another pattern: jihaabfed'])
of.flush()
files = FileList()
f = FileInfo(files, of.name)
f.load()
matches = f.matches_of_type(BasicPattern)
assert len(matches) == 2
p2 = matches[1]
# manually add patch, to see if .append() works:
f.patches.append(p2.append('XXX'))
# apply all patches:
f.gen_patches(matches)
patched = f.get_patched_content()
assert patched == ('one line\n'+
'this pattern will be patched: defBBBBBhij\n'+
'third line\n'+
'another pattern: jihAAAAAXXXfed')
class Function(FileMatch):
regexp = S(r'BEGIN\s+', NAMED('name', RE_IDENTIFIER), r'\n',
r'(.*\n)*?END\n')
class Statement(FileMatch):
regexp = S(r'^\s*', NAMED('name', RE_IDENTIFIER), r'\(\)\n')
def test_container_match():
of = NamedTemporaryFile('wt')
of.writelines(['statement1()\n',
'statement2()\n',
'BEGIN function1\n',
' statement3()\n',
' statement4()\n',
'END\n',
'BEGIN function2\n',
' statement5()\n',
' statement6()\n',
'END\n',
'statement7()\n'])
of.flush()
files = FileList()
f = FileInfo(files, of.name)
f.load()
assert len(f.matches_of_type(Function)) == 2
print(' '.join(m.name for m in f.matches_of_type(Statement)))
assert len(f.matches_of_type(Statement)) == 7
f1 = f.find_match(Function, 'function1')
f2 = f.find_match(Function, 'function2')
st1 = f.find_match(Statement, 'statement1')
st2 = f.find_match(Statement, 'statement2')
st3 = f.find_match(Statement, 'statement3')
st4 = f.find_match(Statement, 'statement4')
st5 = f.find_match(Statement, 'statement5')
st6 = f.find_match(Statement, 'statement6')
st7 = f.find_match(Statement, 'statement7')
assert not f1.contains(st1)
assert not f1.contains(st2)
assert not f1.contains(st2)
assert f1.contains(st3)
assert f1.contains(st4)
assert not f1.contains(st5)
assert not f1.contains(st6)
assert not f1.contains(st7)
assert not f2.contains(st1)
assert not f2.contains(st2)
assert not f2.contains(st2)
assert not f2.contains(st3)
assert not f2.contains(st4)
assert f2.contains(st5)
assert f2.contains(st6)
assert not f2.contains(st7)
|
nvtrust-main
|
infrastructure/kvm/qemu/qemu_source/scripts/codeconverter/codeconverter/test_patching.py
|
nvtrust-main
|
infrastructure/kvm/qemu/qemu_source/scripts/codeconverter/codeconverter/__init__.py
|
|
# Copyright (C) 2020 Red Hat Inc.
#
# Authors:
# Eduardo Habkost <ehabkost@redhat.com>
#
# This work is licensed under the terms of the GNU GPL, version 2. See
# the COPYING file in the top-level directory.
import re
from itertools import chain
from typing import *
from .regexps import *
from .patching import *
from .utils import *
import logging
logger = logging.getLogger(__name__)
DBG = logger.debug
INFO = logger.info
WARN = logger.warning
# simple expressions:
RE_CONSTANT = OR(RE_STRING, RE_NUMBER)
class DefineDirective(FileMatch):
"""Match any #define directive"""
regexp = S(r'^[ \t]*#[ \t]*define', CPP_SPACE, NAMED('name', RE_IDENTIFIER), r'\b')
class ExpressionDefine(FileMatch):
"""Simple #define preprocessor directive for an expression"""
regexp = S(r'^[ \t]*#[ \t]*define', CPP_SPACE, NAMED('name', RE_IDENTIFIER),
CPP_SPACE, NAMED('value', RE_EXPRESSION), r'[ \t]*\n')
def provided_identifiers(self) -> Iterable[RequiredIdentifier]:
yield RequiredIdentifier('constant', self.group('name'))
class ConstantDefine(ExpressionDefine):
"""Simple #define preprocessor directive for a number or string constant"""
regexp = S(r'^[ \t]*#[ \t]*define', CPP_SPACE, NAMED('name', RE_IDENTIFIER),
CPP_SPACE, NAMED('value', RE_CONSTANT), r'[ \t]*\n')
class TypeIdentifiers(NamedTuple):
"""Type names found in type declarations"""
# TYPE_MYDEVICE
typename: Optional[str]
# MYDEVICE
uppercase: Optional[str] = None
# MyDevice
instancetype: Optional[str] = None
# MyDeviceClass
classtype: Optional[str] = None
# my_device
lowercase: Optional[str] = None
def allfields(self):
return tuple(getattr(self, f) for f in self._fields)
def merge(self, other: 'TypeIdentifiers') -> Optional['TypeIdentifiers']:
"""Check if identifiers match, return new identifier with complete list"""
if any(not opt_compare(a, b) for a,b in zip(self, other)):
return None
return TypeIdentifiers(*(merge(a, b) for a,b in zip(self, other)))
def __str__(self) -> str:
values = ((f, getattr(self, f)) for f in self._fields)
s = ', '.join('%s=%s' % (f,v) for f,v in values if v is not None)
return f'{s}'
def check_consistency(self) -> List[str]:
"""Check if identifiers are consistent with each other,
return list of problems (or empty list if everything seems consistent)
"""
r = []
if self.typename is None:
r.append("typename (TYPE_MYDEVICE) is unavailable")
if self.uppercase is None:
r.append("uppercase name is unavailable")
if (self.instancetype is not None
and self.classtype is not None
and self.classtype != f'{self.instancetype}Class'):
r.append("class typedef %s doesn't match instance typedef %s" %
(self.classtype, self.instancetype))
if (self.uppercase is not None
and self.typename is not None
and f'TYPE_{self.uppercase}' != self.typename):
r.append("uppercase name (%s) doesn't match type name (%s)" %
(self.uppercase, self.typename))
return r
class TypedefMatch(FileMatch):
"""typedef declaration"""
def provided_identifiers(self) -> Iterable[RequiredIdentifier]:
yield RequiredIdentifier('type', self.group('name'))
class SimpleTypedefMatch(TypedefMatch):
"""Simple typedef declaration
(no replacement rules)"""
regexp = S(r'^[ \t]*typedef', SP,
NAMED('typedef_type', RE_TYPE), SP,
NAMED('name', RE_IDENTIFIER), r'\s*;[ \t]*\n')
RE_MACRO_DEFINE = S(r'^[ \t]*#\s*define\s+', NAMED('name', RE_IDENTIFIER),
r'\s*\(\s*', RE_IDENTIFIER, r'\s*\)', CPP_SPACE)
RE_STRUCT_ATTRIBUTE = r'QEMU_PACKED'
# This doesn't parse the struct definitions completely, it just assumes
# the closing brackets are going to be in an unindented line:
RE_FULL_STRUCT = S('struct', SP, M(RE_IDENTIFIER, n='?', name='structname'), SP,
NAMED('body', r'{\n',
# acceptable inside the struct body:
# - lines starting with space or tab
# - empty lines
# - preprocessor directives
# - comments
OR(r'[ \t][^\n]*\n',
r'#[^\n]*\n',
r'\n',
S(r'[ \t]*', RE_COMMENT, r'[ \t]*\n'),
repeat='*?'),
r'}', M(RE_STRUCT_ATTRIBUTE, SP, n='*')))
RE_STRUCT_TYPEDEF = S(r'^[ \t]*typedef', SP, RE_FULL_STRUCT, SP,
NAMED('name', RE_IDENTIFIER), r'\s*;[ \t]*\n')
class FullStructTypedefMatch(TypedefMatch):
"""typedef struct [SomeStruct] { ...} SomeType
Will be replaced by separate struct declaration + typedef
"""
regexp = RE_STRUCT_TYPEDEF
def make_structname(self) -> str:
"""Make struct name for struct+typedef split"""
name = self.group('structname')
if not name:
name = self.name
return name
def strip_typedef(self) -> Patch:
"""generate patch that will strip typedef from the struct declartion
The caller is responsible for readding the typedef somewhere else.
"""
name = self.make_structname()
body = self.group('body')
return self.make_patch(f'struct {name} {body};\n')
def make_simple_typedef(self) -> str:
structname = self.make_structname()
name = self.name
return f'typedef struct {structname} {name};\n'
def move_typedef(self, position) -> Iterator[Patch]:
"""Generate patches to move typedef elsewhere"""
yield self.strip_typedef()
yield Patch(position, position, self.make_simple_typedef())
def split_typedef(self) -> Iterator[Patch]:
"""Split into struct definition + typedef in-place"""
yield self.strip_typedef()
yield self.append(self.make_simple_typedef())
class StructTypedefSplit(FullStructTypedefMatch):
"""split struct+typedef declaration"""
def gen_patches(self) -> Iterator[Patch]:
if self.group('structname'):
yield from self.split_typedef()
class DuplicatedTypedefs(SimpleTypedefMatch):
"""Delete ALL duplicate typedefs (unsafe)"""
def gen_patches(self) -> Iterable[Patch]:
other_td = [td for td in chain(self.file.matches_of_type(SimpleTypedefMatch),
self.file.matches_of_type(FullStructTypedefMatch))
if td.name == self.name]
DBG("other_td: %r", other_td)
if any(td.start() < self.start() for td in other_td):
# patch only if handling the first typedef
return
for td in other_td:
if isinstance(td, SimpleTypedefMatch):
DBG("other td: %r", td.match.groupdict())
if td.group('typedef_type') != self.group('typedef_type'):
yield td.make_removal_patch()
elif isinstance(td, FullStructTypedefMatch):
DBG("other td: %r", td.match.groupdict())
if self.group('typedef_type') == 'struct '+td.group('structname'):
yield td.strip_typedef()
class QOMDuplicatedTypedefs(DuplicatedTypedefs):
"""Delete duplicate typedefs if used by QOM type"""
def gen_patches(self) -> Iterable[Patch]:
qom_macros = [TypeCheckMacro, DeclareInstanceChecker, DeclareClassCheckers, DeclareObjCheckers]
qom_matches = chain(*(self.file.matches_of_type(t) for t in qom_macros))
in_use = any(RequiredIdentifier('type', self.name) in m.required_identifiers()
for m in qom_matches)
if in_use:
yield from DuplicatedTypedefs.gen_patches(self)
class QOMStructTypedefSplit(FullStructTypedefMatch):
"""split struct+typedef declaration if used by QOM type"""
def gen_patches(self) -> Iterator[Patch]:
qom_macros = [TypeCheckMacro, DeclareInstanceChecker, DeclareClassCheckers, DeclareObjCheckers]
qom_matches = chain(*(self.file.matches_of_type(t) for t in qom_macros))
in_use = any(RequiredIdentifier('type', self.name) in m.required_identifiers()
for m in qom_matches)
if in_use:
yield from self.split_typedef()
def typedefs(file: FileInfo) -> Iterable[TypedefMatch]:
return (cast(TypedefMatch, m)
for m in chain(file.matches_of_type(SimpleTypedefMatch),
file.matches_of_type(FullStructTypedefMatch)))
def find_typedef(f: FileInfo, name: Optional[str]) -> Optional[TypedefMatch]:
if not name:
return None
for td in typedefs(f):
if td.name == name:
return td
return None
CHECKER_MACROS = ['OBJECT_CHECK', 'OBJECT_CLASS_CHECK', 'OBJECT_GET_CLASS']
CheckerMacroName = Literal['OBJECT_CHECK', 'OBJECT_CLASS_CHECK', 'OBJECT_GET_CLASS']
RE_CHECK_MACRO = \
S(RE_MACRO_DEFINE,
OR(*CHECKER_MACROS, name='checker'),
M(r'\s*\(\s*', OR(NAMED('typedefname', RE_IDENTIFIER), RE_TYPE, name='c_type'), r'\s*,', CPP_SPACE,
OPTIONAL_PARS(RE_IDENTIFIER), r',', CPP_SPACE,
NAMED('qom_typename', RE_IDENTIFIER), r'\s*\)\n',
n='?', name='check_args'))
EXPECTED_CHECKER_SUFFIXES: List[Tuple[CheckerMacroName, str]] = [
('OBJECT_GET_CLASS', '_GET_CLASS'),
('OBJECT_CLASS_CHECK', '_CLASS'),
]
class TypeCheckMacro(FileMatch):
"""OBJECT_CHECK/OBJECT_CLASS_CHECK/OBJECT_GET_CLASS macro definitions
Will be replaced by DECLARE_*_CHECKERS macro
"""
regexp = RE_CHECK_MACRO
@property
def checker(self) -> CheckerMacroName:
"""Name of checker macro being used"""
return self.group('checker') # type: ignore
@property
def typedefname(self) -> Optional[str]:
return self.group('typedefname')
def find_typedef(self) -> Optional[TypedefMatch]:
return find_typedef(self.file, self.typedefname)
def sanity_check(self) -> None:
DBG("groups: %r", self.match.groups())
if not self.group('check_args'):
self.warn("type check macro not parsed completely: %s", self.name)
return
DBG("type identifiers: %r", self.type_identifiers)
if self.typedefname and self.find_typedef() is None:
self.warn("typedef used by %s not found", self.name)
def find_matching_macros(self) -> List['TypeCheckMacro']:
"""Find other check macros that generate the same macro names
The returned list will always be sorted.
"""
my_ids = self.type_identifiers
assert my_ids
return [m for m in self.file.matches_of_type(TypeCheckMacro)
if m.type_identifiers is not None
and my_ids.uppercase is not None
and (my_ids.uppercase == m.type_identifiers.uppercase
or my_ids.typename == m.type_identifiers.typename)]
def merge_ids(self, matches: List['TypeCheckMacro']) -> Optional[TypeIdentifiers]:
"""Try to merge info about type identifiers from all matches in a list"""
if not matches:
return None
r = matches[0].type_identifiers
if r is None:
return None
for m in matches[1:]:
assert m.type_identifiers
new = r.merge(m.type_identifiers)
if new is None:
self.warn("macro %s identifiers (%s) don't match macro %s (%s)",
matches[0].name, r, m.name, m.type_identifiers)
return None
r = new
return r
def required_identifiers(self) -> Iterable[RequiredIdentifier]:
yield RequiredIdentifier('include', '"qom/object.h"')
if self.type_identifiers is None:
return
# to make sure typedefs will be moved above all related macros,
# return dependencies from all of them, not just this match
for m in self.find_matching_macros():
yield RequiredIdentifier('type', m.group('c_type'))
yield RequiredIdentifier('constant', m.group('qom_typename'))
@property
def type_identifiers(self) -> Optional[TypeIdentifiers]:
"""Extract type identifier information from match"""
typename = self.group('qom_typename')
c_type = self.group('c_type')
if not typename or not c_type:
return None
typedef = self.group('typedefname')
classtype = None
instancetype = None
uppercase = None
expected_suffix = dict(EXPECTED_CHECKER_SUFFIXES).get(self.checker)
# here the available data depends on the checker macro being called:
# - we need to remove the suffix from the macro name
# - depending on the macro type, we know the class type name, or
# the instance type name
if self.checker in ('OBJECT_GET_CLASS', 'OBJECT_CLASS_CHECK'):
classtype = c_type
elif self.checker == 'OBJECT_CHECK':
instancetype = c_type
uppercase = self.name
else:
assert False
if expected_suffix and self.name.endswith(expected_suffix):
uppercase = self.name[:-len(expected_suffix)]
return TypeIdentifiers(typename=typename, classtype=classtype,
instancetype=instancetype, uppercase=uppercase)
def gen_patches(self) -> Iterable[Patch]:
# the implementation is a bit tricky because we need to group
# macros dealing with the same type into a single declaration
if self.type_identifiers is None:
self.warn("couldn't extract type information from macro %s", self.name)
return
if self.name == 'INTERFACE_CLASS':
# INTERFACE_CLASS is special and won't be patched
return
for checker,suffix in EXPECTED_CHECKER_SUFFIXES:
if self.name.endswith(suffix):
if self.checker != checker:
self.warn("macro %s is using macro %s instead of %s", self.name, self.checker, checker)
return
break
matches = self.find_matching_macros()
DBG("found %d matching macros: %s", len(matches), ' '.join(m.name for m in matches))
# we will generate patches only when processing the first macro:
if matches[0].start != self.start:
DBG("skipping %s (will patch when handling %s)", self.name, matches[0].name)
return
ids = self.merge_ids(matches)
if ids is None:
DBG("type identifier mismatch, won't patch %s", self.name)
return
if not ids.uppercase:
self.warn("macro %s doesn't follow the expected name pattern", self.name)
return
if not ids.typename:
self.warn("macro %s: couldn't extract type name", self.name)
return
#issues = ids.check_consistency()
#if issues:
# for i in issues:
# self.warn("inconsistent identifiers: %s", i)
names = [n for n in (ids.instancetype, ids.classtype, ids.uppercase, ids.typename)
if n is not None]
if len(set(names)) != len(names):
self.warn("duplicate names used by macro: %r", ids)
return
assert ids.classtype or ids.instancetype
assert ids.typename
assert ids.uppercase
if ids.classtype and ids.instancetype:
new_decl = (f'DECLARE_OBJ_CHECKERS({ids.instancetype}, {ids.classtype},\n'
f' {ids.uppercase}, {ids.typename})\n')
elif ids.classtype:
new_decl = (f'DECLARE_CLASS_CHECKERS({ids.classtype}, {ids.uppercase},\n'
f' {ids.typename})\n')
elif ids.instancetype:
new_decl = (f'DECLARE_INSTANCE_CHECKER({ids.instancetype}, {ids.uppercase},\n'
f' {ids.typename})\n')
else:
assert False
# we need to ensure the typedefs are already available
issues = []
for t in [ids.instancetype, ids.classtype]:
if not t:
continue
if re.fullmatch(RE_STRUCT_TYPE, t):
self.info("type %s is not a typedef", t)
continue
td = find_typedef(self.file, t)
#if not td and self.allfiles.find_file('include/qemu/typedefs.h'):
#
if not td:
# it is OK if the typedef is in typedefs.h
f = self.allfiles.find_file('include/qemu/typedefs.h')
if f and find_typedef(f, t):
self.info("typedef %s found in typedefs.h", t)
continue
issues.append("couldn't find typedef %s" % (t))
elif td.start() > self.start():
issues.append("typedef %s need to be moved earlier in the file" % (td.name))
for issue in issues:
self.warn(issue)
if issues and not self.file.force:
return
# delete all matching macros and add new declaration:
for m in matches:
yield m.make_patch('')
for issue in issues:
yield self.prepend("/* FIXME: %s */\n" % (issue))
yield self.append(new_decl)
class InterfaceCheckMacro(FileMatch):
"""Type checking macro using INTERFACE_CHECK
Will be replaced by DECLARE_INTERFACE_CHECKER
"""
regexp = S(RE_MACRO_DEFINE,
'INTERFACE_CHECK',
r'\s*\(\s*', OR(NAMED('instancetype', RE_IDENTIFIER), RE_TYPE, name='c_type'),
r'\s*,', CPP_SPACE,
OPTIONAL_PARS(RE_IDENTIFIER), r',', CPP_SPACE,
NAMED('qom_typename', RE_IDENTIFIER), r'\s*\)\n')
def required_identifiers(self) -> Iterable[RequiredIdentifier]:
yield RequiredIdentifier('include', '"qom/object.h"')
yield RequiredIdentifier('type', self.group('instancetype'))
yield RequiredIdentifier('constant', self.group('qom_typename'))
def gen_patches(self) -> Iterable[Patch]:
if self.file.filename_matches('qom/object.h'):
self.debug("skipping object.h")
return
typename = self.group('qom_typename')
uppercase = self.name
instancetype = self.group('instancetype')
c = f"DECLARE_INTERFACE_CHECKER({instancetype}, {uppercase},\n"+\
f" {typename})\n"
yield self.make_patch(c)
class TypeDeclaration(FileMatch):
"""Parent class to all type declarations"""
@property
def instancetype(self) -> Optional[str]:
return self.getgroup('instancetype')
@property
def classtype(self) -> Optional[str]:
return self.getgroup('classtype')
@property
def typename(self) -> Optional[str]:
return self.getgroup('typename')
class TypeCheckerDeclaration(TypeDeclaration):
"""Parent class to all type checker declarations"""
@property
def typename(self) -> str:
return self.group('typename')
@property
def uppercase(self) -> str:
return self.group('uppercase')
class DeclareInstanceChecker(TypeCheckerDeclaration):
"""DECLARE_INSTANCE_CHECKER use"""
#TODO: replace lonely DECLARE_INSTANCE_CHECKER with DECLARE_OBJ_CHECKERS
# if all types are found.
# This will require looking up the correct class type in the TypeInfo
# structs in another file
regexp = S(r'^[ \t]*DECLARE_INSTANCE_CHECKER\s*\(\s*',
NAMED('instancetype', RE_TYPE), r'\s*,\s*',
NAMED('uppercase', RE_IDENTIFIER), r'\s*,\s*',
OR(RE_IDENTIFIER, RE_STRING, RE_MACRO_CONCAT, RE_FUN_CALL, name='typename'), SP,
r'\)[ \t]*;?[ \t]*\n')
def required_identifiers(self) -> Iterable[RequiredIdentifier]:
yield RequiredIdentifier('include', '"qom/object.h"')
yield RequiredIdentifier('constant', self.group('typename'))
yield RequiredIdentifier('type', self.group('instancetype'))
class DeclareInterfaceChecker(TypeCheckerDeclaration):
"""DECLARE_INTERFACE_CHECKER use"""
regexp = S(r'^[ \t]*DECLARE_INTERFACE_CHECKER\s*\(\s*',
NAMED('instancetype', RE_TYPE), r'\s*,\s*',
NAMED('uppercase', RE_IDENTIFIER), r'\s*,\s*',
OR(RE_IDENTIFIER, RE_STRING, RE_MACRO_CONCAT, RE_FUN_CALL, name='typename'), SP,
r'\)[ \t]*;?[ \t]*\n')
def required_identifiers(self) -> Iterable[RequiredIdentifier]:
yield RequiredIdentifier('include', '"qom/object.h"')
yield RequiredIdentifier('constant', self.group('typename'))
yield RequiredIdentifier('type', self.group('instancetype'))
class DeclareInstanceType(TypeDeclaration):
"""DECLARE_INSTANCE_TYPE use"""
regexp = S(r'^[ \t]*DECLARE_INSTANCE_TYPE\s*\(\s*',
NAMED('uppercase', RE_IDENTIFIER), r'\s*,\s*',
NAMED('instancetype', RE_TYPE), SP,
r'\)[ \t]*;?[ \t]*\n')
def required_identifiers(self) -> Iterable[RequiredIdentifier]:
yield RequiredIdentifier('include', '"qom/object.h"')
yield RequiredIdentifier('type', self.group('instancetype'))
class DeclareClassType(TypeDeclaration):
"""DECLARE_CLASS_TYPE use"""
regexp = S(r'^[ \t]*DECLARE_CLASS_TYPE\s*\(\s*',
NAMED('uppercase', RE_IDENTIFIER), r'\s*,\s*',
NAMED('classtype', RE_TYPE), SP,
r'\)[ \t]*;?[ \t]*\n')
def required_identifiers(self) -> Iterable[RequiredIdentifier]:
yield RequiredIdentifier('include', '"qom/object.h"')
yield RequiredIdentifier('type', self.group('classtype'))
class DeclareClassCheckers(TypeCheckerDeclaration):
"""DECLARE_CLASS_CHECKER use"""
regexp = S(r'^[ \t]*DECLARE_CLASS_CHECKERS\s*\(\s*',
NAMED('classtype', RE_TYPE), r'\s*,\s*',
NAMED('uppercase', RE_IDENTIFIER), r'\s*,\s*',
OR(RE_IDENTIFIER, RE_STRING, RE_MACRO_CONCAT, RE_FUN_CALL, name='typename'), SP,
r'\)[ \t]*;?[ \t]*\n')
def required_identifiers(self) -> Iterable[RequiredIdentifier]:
yield RequiredIdentifier('include', '"qom/object.h"')
yield RequiredIdentifier('constant', self.group('typename'))
yield RequiredIdentifier('type', self.group('classtype'))
class DeclareObjCheckers(TypeCheckerDeclaration):
"""DECLARE_OBJ_CHECKERS use"""
#TODO: detect when OBJECT_DECLARE_SIMPLE_TYPE can be used
regexp = S(r'^[ \t]*DECLARE_OBJ_CHECKERS\s*\(\s*',
NAMED('instancetype', RE_TYPE), r'\s*,\s*',
NAMED('classtype', RE_TYPE), r'\s*,\s*',
NAMED('uppercase', RE_IDENTIFIER), r'\s*,\s*',
OR(RE_IDENTIFIER, RE_STRING, RE_MACRO_CONCAT, RE_FUN_CALL, name='typename'), SP,
r'\)[ \t]*;?[ \t]*\n')
def required_identifiers(self) -> Iterable[RequiredIdentifier]:
yield RequiredIdentifier('include', '"qom/object.h"')
yield RequiredIdentifier('constant', self.group('typename'))
yield RequiredIdentifier('type', self.group('classtype'))
yield RequiredIdentifier('type', self.group('instancetype'))
class TypeDeclarationFixup(FileMatch):
"""Common base class for code that will look at a set of type declarations"""
regexp = RE_FILE_BEGIN
def gen_patches(self) -> Iterable[Patch]:
if self.file.filename_matches('qom/object.h'):
self.debug("skipping object.h")
return
# group checkers by uppercase name:
decl_types: List[Type[TypeDeclaration]] = [DeclareInstanceChecker, DeclareInstanceType,
DeclareClassCheckers, DeclareClassType,
DeclareObjCheckers]
checker_dict: Dict[str, List[TypeDeclaration]] = {}
for t in decl_types:
for m in self.file.matches_of_type(t):
checker_dict.setdefault(m.group('uppercase'), []).append(m)
self.debug("checker_dict: %r", checker_dict)
for uppercase,checkers in checker_dict.items():
fields = ('instancetype', 'classtype', 'uppercase', 'typename')
fvalues = dict((field, set(getattr(m, field) for m in checkers
if getattr(m, field, None) is not None))
for field in fields)
for field,values in fvalues.items():
if len(values) > 1:
for c in checkers:
c.warn("%s mismatch (%s)", field, ' '.join(values))
return
field_dict = dict((f, v.pop() if v else None) for f,v in fvalues.items())
yield from self.gen_patches_for_type(uppercase, checkers, field_dict)
def find_conflicts(self, uppercase: str, checkers: List[TypeDeclaration]) -> bool:
"""Look for conflicting declarations that would make it unsafe to add new ones"""
conflicting: List[FileMatch] = []
# conflicts in the same file:
conflicting.extend(chain(self.file.find_matches(DefineDirective, uppercase),
self.file.find_matches(DeclareInterfaceChecker, uppercase, 'uppercase'),
self.file.find_matches(DeclareClassType, uppercase, 'uppercase'),
self.file.find_matches(DeclareInstanceType, uppercase, 'uppercase')))
# conflicts in another file:
conflicting.extend(o for o in chain(self.allfiles.find_matches(DeclareInstanceChecker, uppercase, 'uppercase'),
self.allfiles.find_matches(DeclareClassCheckers, uppercase, 'uppercase'),
self.allfiles.find_matches(DeclareInterfaceChecker, uppercase, 'uppercase'),
self.allfiles.find_matches(DefineDirective, uppercase))
if o is not None and o.file != self.file
# if both are .c files, there's no conflict at all:
and not (o.file.filename.suffix == '.c' and
self.file.filename.suffix == '.c'))
if conflicting:
for c in checkers:
c.warn("skipping due to conflicting %s macro", uppercase)
for o in conflicting:
if o is None:
continue
o.warn("conflicting %s macro is here", uppercase)
return True
return False
def gen_patches_for_type(self, uppercase: str,
checkers: List[TypeDeclaration],
fields: Dict[str, Optional[str]]) -> Iterable[Patch]:
"""Should be reimplemented by subclasses"""
return
yield
class DeclareVoidTypes(TypeDeclarationFixup):
"""Add DECLARE_*_TYPE(..., void) when there's no declared type"""
regexp = RE_FILE_BEGIN
def gen_patches_for_type(self, uppercase: str,
checkers: List[TypeDeclaration],
fields: Dict[str, Optional[str]]) -> Iterable[Patch]:
if self.find_conflicts(uppercase, checkers):
return
#_,last_checker = max((m.start(), m) for m in checkers)
_,first_checker = min((m.start(), m) for m in checkers)
if not any(m.instancetype for m in checkers):
yield first_checker.prepend(f'DECLARE_INSTANCE_TYPE({uppercase}, void)\n')
if not any(m.classtype for m in checkers):
yield first_checker.prepend(f'DECLARE_CLASS_TYPE({uppercase}, void)\n')
#if not all(len(v) == 1 for v in fvalues.values()):
# return
#
#final_values = dict((field, values.pop())
# for field,values in fvalues.items())
#s = (f"DECLARE_OBJ_CHECKERS({final_values['instancetype']}, {final_values['classtype']},\n"+
# f" {final_values['uppercase']}, {final_values['typename']})\n")
#for c in checkers:
# yield c.make_removal_patch()
#yield last_checker.append(s)
class AddDeclareTypeName(TypeDeclarationFixup):
"""Add DECLARE_TYPE_NAME declarations if necessary"""
def gen_patches_for_type(self, uppercase: str,
checkers: List[TypeDeclaration],
fields: Dict[str, Optional[str]]) -> Iterable[Patch]:
typename = fields.get('typename')
if typename is None:
self.warn("typename unavailable")
return
if typename == f'TYPE_{uppercase}':
self.info("already using TYPE_%s as type name", uppercase)
return
if self.file.find_match(DeclareTypeName, uppercase, 'uppercase'):
self.info("type name for %s already declared", uppercase)
return
_,first_checker = min((m.start(), m) for m in checkers)
s = f'DECLARE_TYPE_NAME({uppercase}, {typename})\n'
yield first_checker.prepend(s)
class TrivialClassStruct(FileMatch):
"""Trivial class struct"""
regexp = S(r'^[ \t]*struct\s*', NAMED('name', RE_IDENTIFIER),
r'\s*{\s*', NAMED('parent_struct', RE_IDENTIFIER), r'\s*parent(_class)?\s*;\s*};\n')
class DeclareTypeName(FileMatch):
"""DECLARE_TYPE_NAME usage"""
regexp = S(r'^[ \t]*DECLARE_TYPE_NAME\s*\(',
NAMED('uppercase', RE_IDENTIFIER), r'\s*,\s*',
OR(RE_IDENTIFIER, RE_STRING, RE_MACRO_CONCAT, RE_FUN_CALL, name='typename'),
r'\s*\);?[ \t]*\n')
class ObjectDeclareType(TypeCheckerDeclaration):
"""OBJECT_DECLARE_TYPE usage
Will be replaced with OBJECT_DECLARE_SIMPLE_TYPE if possible
"""
regexp = S(r'^[ \t]*OBJECT_DECLARE_TYPE\s*\(',
NAMED('instancetype', RE_TYPE), r'\s*,\s*',
NAMED('classtype', RE_TYPE), r'\s*,\s*',
NAMED('uppercase', RE_IDENTIFIER), SP,
r'\)[ \t]*;?[ \t]*\n')
def gen_patches(self):
DBG("groups: %r", self.match.groupdict())
trivial_struct = self.file.find_match(TrivialClassStruct, self.group('classtype'))
if trivial_struct:
d = self.match.groupdict().copy()
d['parent_struct'] = trivial_struct.group("parent_struct")
yield trivial_struct.make_removal_patch()
c = ("OBJECT_DECLARE_SIMPLE_TYPE(%(instancetype)s, %(lowercase)s,\n"
" %(uppercase)s, %(parent_struct)s)\n" % d)
yield self.make_patch(c)
class ObjectDeclareSimpleType(TypeCheckerDeclaration):
"""OBJECT_DECLARE_SIMPLE_TYPE usage"""
regexp = S(r'^[ \t]*OBJECT_DECLARE_SIMPLE_TYPE\s*\(',
NAMED('instancetype', RE_TYPE), r'\s*,\s*',
NAMED('uppercase', RE_IDENTIFIER), SP,
r'\)[ \t]*;?[ \t]*\n')
class OldStyleObjectDeclareSimpleType(TypeCheckerDeclaration):
"""OBJECT_DECLARE_SIMPLE_TYPE usage (old API)"""
regexp = S(r'^[ \t]*OBJECT_DECLARE_SIMPLE_TYPE\s*\(',
NAMED('instancetype', RE_TYPE), r'\s*,\s*',
NAMED('lowercase', RE_IDENTIFIER), r'\s*,\s*',
NAMED('uppercase', RE_IDENTIFIER), r'\s*,\s*',
NAMED('parent_classtype', RE_TYPE), SP,
r'\)[ \t]*;?[ \t]*\n')
@property
def classtype(self) -> Optional[str]:
instancetype = self.instancetype
assert instancetype
return f"{instancetype}Class"
def find_typename_uppercase(files: FileList, typename: str) -> Optional[str]:
"""Try to find what's the right MODULE_OBJ_NAME for a given type name"""
decl = files.find_match(DeclareTypeName, name=typename, group='typename')
if decl:
return decl.group('uppercase')
if typename.startswith('TYPE_'):
return typename[len('TYPE_'):]
return None
def find_type_checkers(files:FileList, name:str, group:str='uppercase') -> Iterable[TypeCheckerDeclaration]:
"""Find usage of DECLARE*CHECKER macro"""
c: Type[TypeCheckerDeclaration]
for c in (DeclareInstanceChecker, DeclareClassCheckers, DeclareObjCheckers, ObjectDeclareType, ObjectDeclareSimpleType):
yield from files.find_matches(c, name=name, group=group)
class Include(FileMatch):
"""#include directive"""
regexp = RE_INCLUDE
def provided_identifiers(self) -> Iterable[RequiredIdentifier]:
yield RequiredIdentifier('include', self.group('includepath'))
class InitialIncludes(FileMatch):
"""Initial #include block"""
regexp = S(RE_FILE_BEGIN,
M(SP, RE_COMMENTS,
r'^[ \t]*#[ \t]*ifndef[ \t]+', RE_IDENTIFIER, r'[ \t]*\n',
n='?', name='ifndef_block'),
M(SP, RE_COMMENTS,
OR(RE_INCLUDE, RE_SIMPLEDEFINE),
n='*', name='includes'))
class SymbolUserList(NamedTuple):
definitions: List[FileMatch]
users: List[FileMatch]
class MoveSymbols(FileMatch):
"""Handle missing symbols
- Move typedefs and defines when necessary
- Add missing #include lines when necessary
"""
regexp = RE_FILE_BEGIN
def gen_patches(self) -> Iterator[Patch]:
if self.file.filename_matches('qom/object.h'):
self.debug("skipping object.h")
return
index: Dict[RequiredIdentifier, SymbolUserList] = {}
definition_classes = [SimpleTypedefMatch, FullStructTypedefMatch, ConstantDefine, Include]
user_classes = [TypeCheckMacro, DeclareObjCheckers, DeclareInstanceChecker, DeclareClassCheckers, InterfaceCheckMacro]
# first we scan for all symbol definitions and usage:
for dc in definition_classes:
defs = self.file.matches_of_type(dc)
for d in defs:
DBG("scanning %r", d)
for i in d.provided_identifiers():
index.setdefault(i, SymbolUserList([], [])).definitions.append(d)
DBG("index: %r", list(index.keys()))
for uc in user_classes:
users = self.file.matches_of_type(uc)
for u in users:
for i in u.required_identifiers():
index.setdefault(i, SymbolUserList([], [])).users.append(u)
# validate all symbols:
for i,ul in index.items():
if not ul.users:
# unused symbol
continue
# symbol not defined
if len(ul.definitions) == 0:
if i.type == 'include':
includes, = self.file.matches_of_type(InitialIncludes)
#FIXME: don't do this if we're already inside qom/object.h
yield includes.append(f'#include {i.name}\n')
else:
u.warn("definition of %s %s not found in file", i.type, i.name)
continue
# symbol defined twice:
if len(ul.definitions) > 1:
ul.definitions[1].warn("%s defined twice", i.name)
ul.definitions[0].warn("previously defined here")
continue
# symbol defined. check if all users are after its definition:
assert len(ul.definitions) == 1
definition = ul.definitions[0]
DBG("handling repositioning of %r", definition)
earliest = min(ul.users, key=lambda u: u.start())
if earliest.start() > definition.start():
DBG("%r is OK", definition)
continue
DBG("%r needs to be moved", definition)
if isinstance(definition, SimpleTypedefMatch) \
or isinstance(definition, ConstantDefine):
# simple typedef or define can be moved directly:
yield definition.make_removal_patch()
yield earliest.prepend(definition.group(0))
elif isinstance(definition, FullStructTypedefMatch) \
and definition.group('structname'):
# full struct typedef is more complex: we need to remove
# the typedef
yield from definition.move_typedef(earliest.start())
else:
definition.warn("definition of %s %s needs to be moved earlier in the file", i.type, i.name)
earliest.warn("definition of %s %s is used here", i.type, i.name)
class EmptyPreprocessorConditional(FileMatch):
"""Delete empty preprocessor conditionals"""
regexp = r'^[ \t]*#(if|ifdef)[ \t].*\n+[ \t]*#endif[ \t]*\n'
def gen_patches(self) -> Iterable[Patch]:
yield self.make_removal_patch()
|
nvtrust-main
|
infrastructure/kvm/qemu/qemu_source/scripts/codeconverter/codeconverter/qom_macros.py
|
# Copyright (C) 2020 Red Hat Inc.
#
# Authors:
# Eduardo Habkost <ehabkost@redhat.com>
#
# This work is licensed under the terms of the GNU GPL, version 2. See
# the COPYING file in the top-level directory.
from typing import IO, Match, NamedTuple, Optional, Literal, Iterable, Type, Dict, List, Any, TypeVar, NewType, Tuple, Union
from pathlib import Path
from itertools import chain
from tempfile import NamedTemporaryFile
import os
import re
import subprocess
from io import StringIO
import logging
logger = logging.getLogger(__name__)
DBG = logger.debug
INFO = logger.info
WARN = logger.warning
ERROR = logger.error
from .utils import *
T = TypeVar('T')
class Patch(NamedTuple):
# start inside file.original_content
start: int
# end position inside file.original_content
end: int
# replacement string for file.original_content[start:end]
replacement: str
IdentifierType = Literal['type', 'symbol', 'include', 'constant']
class RequiredIdentifier(NamedTuple):
type: IdentifierType
name: str
class FileMatch:
"""Base class for regex matches
Subclasses just need to set the `regexp` class attribute
"""
regexp: Optional[str] = None
def __init__(self, f: 'FileInfo', m: Match) -> None:
self.file: 'FileInfo' = f
self.match: Match[str] = m
@property
def name(self) -> str:
if 'name' not in self.match.groupdict():
return '[no name]'
return self.group('name')
@classmethod
def compiled_re(klass):
return re.compile(klass.regexp, re.MULTILINE)
def start(self) -> int:
return self.match.start()
def end(self) -> int:
return self.match.end()
def line_col(self) -> LineAndColumn:
return self.file.line_col(self.start())
def group(self, group: Union[int, str]) -> str:
return self.match.group(group)
def getgroup(self, group: str) -> Optional[str]:
if group not in self.match.groupdict():
return None
return self.match.group(group)
def log(self, level, fmt, *args) -> None:
pos = self.line_col()
logger.log(level, '%s:%d:%d: '+fmt, self.file.filename, pos.line, pos.col, *args)
def debug(self, fmt, *args) -> None:
self.log(logging.DEBUG, fmt, *args)
def info(self, fmt, *args) -> None:
self.log(logging.INFO, fmt, *args)
def warn(self, fmt, *args) -> None:
self.log(logging.WARNING, fmt, *args)
def error(self, fmt, *args) -> None:
self.log(logging.ERROR, fmt, *args)
def sub(self, original: str, replacement: str) -> str:
"""Replace content
XXX: this won't use the match position, but will just
replace all strings that look like the original match.
This should be enough for all the patterns used in this
script.
"""
return original.replace(self.group(0), replacement)
def sanity_check(self) -> None:
"""Sanity check match, and print warnings if necessary"""
pass
def replacement(self) -> Optional[str]:
"""Return replacement text for pattern, to use new code conventions"""
return None
def make_patch(self, replacement: str) -> 'Patch':
"""Make patch replacing the content of this match"""
return Patch(self.start(), self.end(), replacement)
def make_subpatch(self, start: int, end: int, replacement: str) -> 'Patch':
return Patch(self.start() + start, self.start() + end, replacement)
def make_removal_patch(self) -> 'Patch':
"""Make patch removing contents of match completely"""
return self.make_patch('')
def append(self, s: str) -> 'Patch':
"""Make patch appending string after this match"""
return Patch(self.end(), self.end(), s)
def prepend(self, s: str) -> 'Patch':
"""Make patch prepending string before this match"""
return Patch(self.start(), self.start(), s)
def gen_patches(self) -> Iterable['Patch']:
"""Patch source code contents to use new code patterns"""
replacement = self.replacement()
if replacement is not None:
yield self.make_patch(replacement)
@classmethod
def has_replacement_rule(klass) -> bool:
return (klass.gen_patches is not FileMatch.gen_patches
or klass.replacement is not FileMatch.replacement)
def contains(self, other: 'FileMatch') -> bool:
return other.start() >= self.start() and other.end() <= self.end()
def __repr__(self) -> str:
start = self.file.line_col(self.start())
end = self.file.line_col(self.end() - 1)
return '<%s %s at %d:%d-%d:%d: %r>' % (self.__class__.__name__,
self.name,
start.line, start.col,
end.line, end.col, self.group(0)[:100])
def required_identifiers(self) -> Iterable[RequiredIdentifier]:
"""Can be implemented by subclasses to keep track of identifier references
This method will be used by the code that moves declarations around the file,
to make sure we find the right spot for them.
"""
raise NotImplementedError()
def provided_identifiers(self) -> Iterable[RequiredIdentifier]:
"""Can be implemented by subclasses to keep track of identifier references
This method will be used by the code that moves declarations around the file,
to make sure we find the right spot for them.
"""
raise NotImplementedError()
@classmethod
def finditer(klass, content: str, pos=0, endpos=-1) -> Iterable[Match]:
"""Helper for re.finditer()"""
if endpos >= 0:
content = content[:endpos]
return klass.compiled_re().finditer(content, pos)
@classmethod
def domatch(klass, content: str, pos=0, endpos=-1) -> Optional[Match]:
"""Helper for re.match()"""
if endpos >= 0:
content = content[:endpos]
return klass.compiled_re().match(content, pos)
def group_finditer(self, klass: Type['FileMatch'], group: Union[str, int]) -> Iterable['FileMatch']:
assert self.file.original_content
return (klass(self.file, m)
for m in klass.finditer(self.file.original_content,
self.match.start(group),
self.match.end(group)))
def try_group_match(self, klass: Type['FileMatch'], group: Union[str, int]) -> Optional['FileMatch']:
assert self.file.original_content
m = klass.domatch(self.file.original_content,
self.match.start(group),
self.match.end(group))
if not m:
return None
else:
return klass(self.file, m)
def group_match(self, group: Union[str, int]) -> 'FileMatch':
m = self.try_group_match(FullMatch, group)
assert m
return m
@property
def allfiles(self) -> 'FileList':
return self.file.allfiles
class FullMatch(FileMatch):
"""Regexp that will match all contents of string
Useful when used with group_match()
"""
regexp = r'(?s).*' # (?s) is re.DOTALL
def all_subclasses(c: Type[FileMatch]) -> Iterable[Type[FileMatch]]:
for sc in c.__subclasses__():
yield sc
yield from all_subclasses(sc)
def match_class_dict() -> Dict[str, Type[FileMatch]]:
d = dict((t.__name__, t) for t in all_subclasses(FileMatch))
return d
def names(matches: Iterable[FileMatch]) -> Iterable[str]:
return [m.name for m in matches]
class PatchingError(Exception):
pass
class OverLappingPatchesError(PatchingError):
pass
def apply_patches(s: str, patches: Iterable[Patch]) -> str:
"""Apply a sequence of patches to string
>>> apply_patches('abcdefg', [Patch(2,2,'xxx'), Patch(0, 1, 'yy')])
'yybxxxcdefg'
"""
r = StringIO()
last = 0
def patch_sort_key(item: Tuple[int, Patch]) -> Tuple[int, int, int]:
"""Patches are sorted by byte position,
patches at the same byte position are applied in the order
they were generated.
"""
i,p = item
return (p.start, p.end, i)
for i,p in sorted(enumerate(patches), key=patch_sort_key):
DBG("Applying patch at position %d (%s) - %d (%s): %r",
p.start, line_col(s, p.start),
p.end, line_col(s, p.end),
p.replacement)
if last > p.start:
raise OverLappingPatchesError("Overlapping patch at position %d (%s), last patch at %d (%s)" % \
(p.start, line_col(s, p.start), last, line_col(s, last)))
r.write(s[last:p.start])
r.write(p.replacement)
last = p.end
r.write(s[last:])
return r.getvalue()
class RegexpScanner:
def __init__(self) -> None:
self.match_index: Dict[Type[Any], List[FileMatch]] = {}
self.match_name_index: Dict[Tuple[Type[Any], str, str], Optional[FileMatch]] = {}
def _matches_of_type(self, klass: Type[Any]) -> Iterable[FileMatch]:
raise NotImplementedError()
def matches_of_type(self, t: Type[T]) -> List[T]:
if t not in self.match_index:
self.match_index[t] = list(self._matches_of_type(t))
return self.match_index[t] # type: ignore
def find_matches(self, t: Type[T], name: str, group: str='name') -> List[T]:
indexkey = (t, name, group)
if indexkey in self.match_name_index:
return self.match_name_index[indexkey] # type: ignore
r: List[T] = []
for m in self.matches_of_type(t):
assert isinstance(m, FileMatch)
if m.getgroup(group) == name:
r.append(m) # type: ignore
self.match_name_index[indexkey] = r # type: ignore
return r
def find_match(self, t: Type[T], name: str, group: str='name') -> Optional[T]:
l = self.find_matches(t, name, group)
if not l:
return None
if len(l) > 1:
logger.warn("multiple matches found for %r (%s=%r)", t, group, name)
return None
return l[0]
def reset_index(self) -> None:
self.match_index.clear()
self.match_name_index.clear()
class FileInfo(RegexpScanner):
filename: Path
original_content: Optional[str] = None
def __init__(self, files: 'FileList', filename: os.PathLike, force:bool=False) -> None:
super().__init__()
self.allfiles = files
self.filename = Path(filename)
self.patches: List[Patch] = []
self.force = force
def __repr__(self) -> str:
return f'<FileInfo {repr(self.filename)}>'
def filename_matches(self, name: str) -> bool:
nameparts = Path(name).parts
return self.filename.parts[-len(nameparts):] == nameparts
def line_col(self, start: int) -> LineAndColumn:
"""Return line and column for a match object inside original_content"""
return line_col(self.original_content, start)
def _matches_of_type(self, klass: Type[Any]) -> List[FileMatch]:
"""Build FileMatch objects for each match of regexp"""
if not hasattr(klass, 'regexp') or klass.regexp is None:
return []
assert hasattr(klass, 'regexp')
DBG("%s: scanning for %s", self.filename, klass.__name__)
DBG("regexp: %s", klass.regexp)
matches = [klass(self, m) for m in klass.finditer(self.original_content)]
DBG('%s: %d matches found for %s: %s', self.filename, len(matches),
klass.__name__,' '.join(names(matches)))
return matches
def find_match(self, t: Type[T], name: str, group: str='name') -> Optional[T]:
for m in self.matches_of_type(t):
assert isinstance(m, FileMatch)
if m.getgroup(group) == name:
return m # type: ignore
return None
def reset_content(self, s:str):
self.original_content = s
self.patches.clear()
self.reset_index()
self.allfiles.reset_index()
def load(self) -> None:
if self.original_content is not None:
return
with open(self.filename, 'rt') as f:
self.reset_content(f.read())
@property
def all_matches(self) -> Iterable[FileMatch]:
lists = list(self.match_index.values())
return (m for l in lists
for m in l)
def gen_patches(self, matches: List[FileMatch]) -> None:
for m in matches:
DBG("Generating patches for %r", m)
for i,p in enumerate(m.gen_patches()):
DBG("patch %d generated by %r:", i, m)
DBG("replace contents at %s-%s with %r",
self.line_col(p.start), self.line_col(p.end), p.replacement)
self.patches.append(p)
def scan_for_matches(self, class_names: Optional[List[str]]=None) -> Iterable[FileMatch]:
DBG("class names: %r", class_names)
class_dict = match_class_dict()
if class_names is None:
DBG("default class names")
class_names = list(name for name,klass in class_dict.items()
if klass.has_replacement_rule())
DBG("class_names: %r", class_names)
for cn in class_names:
matches = self.matches_of_type(class_dict[cn])
DBG('%d matches found for %s: %s',
len(matches), cn, ' '.join(names(matches)))
yield from matches
def apply_patches(self) -> None:
"""Replace self.original_content after applying patches from self.patches"""
self.reset_content(self.get_patched_content())
def get_patched_content(self) -> str:
assert self.original_content is not None
return apply_patches(self.original_content, self.patches)
def write_to_file(self, f: IO[str]) -> None:
f.write(self.get_patched_content())
def write_to_filename(self, filename: os.PathLike) -> None:
with open(filename, 'wt') as of:
self.write_to_file(of)
def patch_inplace(self) -> None:
newfile = self.filename.with_suffix('.changed')
self.write_to_filename(newfile)
os.rename(newfile, self.filename)
def show_diff(self) -> None:
with NamedTemporaryFile('wt') as f:
self.write_to_file(f)
f.flush()
subprocess.call(['diff', '-u', self.filename, f.name])
def ref(self):
return TypeInfoReference
class FileList(RegexpScanner):
def __init__(self):
super().__init__()
self.files: List[FileInfo] = []
def extend(self, *args, **kwargs):
self.files.extend(*args, **kwargs)
def __iter__(self):
return iter(self.files)
def _matches_of_type(self, klass: Type[Any]) -> Iterable[FileMatch]:
return chain(*(f._matches_of_type(klass) for f in self.files))
def find_file(self, name: str) -> Optional[FileInfo]:
"""Get file with path ending with @name"""
for f in self.files:
if f.filename_matches(name):
return f
else:
return None
def one_pass(self, class_names: List[str]) -> int:
total_patches = 0
for f in self.files:
INFO("Scanning file %s", f.filename)
matches = list(f.scan_for_matches(class_names))
INFO("Generating patches for file %s", f.filename)
f.gen_patches(matches)
total_patches += len(f.patches)
if total_patches:
for f in self.files:
try:
f.apply_patches()
except PatchingError:
logger.exception("%s: failed to patch file", f.filename)
return total_patches
def patch_content(self, max_passes, class_names: List[str]) -> None:
"""Multi-pass content patching loop
We run multiple passes because there are rules that will
delete init functions once they become empty.
"""
passes = 0
total_patches = 0
DBG("max_passes: %r", max_passes)
while not max_passes or max_passes <= 0 or passes < max_passes:
passes += 1
INFO("Running pass: %d", passes)
count = self.one_pass(class_names)
DBG("patch content: pass %d: %d patches generated", passes, count)
total_patches += count
DBG("%d patches applied total in %d passes", total_patches, passes)
|
nvtrust-main
|
infrastructure/kvm/qemu/qemu_source/scripts/codeconverter/codeconverter/patching.py
|
# Copyright (C) 2020 Red Hat Inc.
#
# Authors:
# Eduardo Habkost <ehabkost@redhat.com>
#
# This work is licensed under the terms of the GNU GPL, version 2. See
# the COPYING file in the top-level directory.
from .regexps import *
from .qom_macros import *
from .qom_type_info import *
def test_res() -> None:
def fullmatch(regexp, s):
return re.fullmatch(regexp, s, re.MULTILINE)
assert fullmatch(RE_IDENTIFIER, 'sizeof')
assert fullmatch(RE_IDENTIFIER, 'X86CPU')
assert fullmatch(RE_FUN_CALL, 'sizeof(X86CPU)')
assert fullmatch(RE_IDENTIFIER, 'X86_CPU_TYPE_NAME')
assert fullmatch(RE_SIMPLE_VALUE, '"base"')
print(RE_FUN_CALL)
assert fullmatch(RE_FUN_CALL, 'X86_CPU_TYPE_NAME("base")')
print(RE_TI_FIELD_INIT)
assert fullmatch(RE_TI_FIELD_INIT, '.name = X86_CPU_TYPE_NAME("base"),\n')
assert fullmatch(RE_MACRO_CONCAT, 'TYPE_ASPEED_GPIO "-ast2600"')
assert fullmatch(RE_EXPRESSION, 'TYPE_ASPEED_GPIO "-ast2600"')
print(RE_MACRO_DEFINE)
assert re.search(RE_MACRO_DEFINE, r'''
#define OFFSET_CHECK(c) \
do { \
if (!(c)) { \
goto bad_offset; \
} \
} while (0)
''', re.MULTILINE)
print(RE_CHECK_MACRO)
print(CPP_SPACE)
assert not re.match(RE_CHECK_MACRO, r'''
#define OFFSET_CHECK(c) \
do { \
if (!(c)) { \
goto bad_offset; \
} \
} while (0)''', re.MULTILINE)
print(RE_CHECK_MACRO)
assert fullmatch(RE_CHECK_MACRO, r'''#define PCI_DEVICE(obj) \
OBJECT_CHECK(PCIDevice, (obj), TYPE_PCI_DEVICE)
''')
assert fullmatch(RE_CHECK_MACRO, r'''#define COLLIE_MACHINE(obj) \
OBJECT_CHECK(CollieMachineState, obj, TYPE_COLLIE_MACHINE)
''')
print(RE_TYPEINFO_START)
assert re.search(RE_TYPEINFO_START, r'''
cc->open = qmp_chardev_open_file;
}
static const TypeInfo char_file_type_info = {
.name = TYPE_CHARDEV_FILE,
#ifdef _WIN32
.parent = TYPE_CHARDEV_WIN,
''', re.MULTILINE)
assert re.search(RE_TYPEINFO_START, r'''
TypeInfo ti = {
.name = armsse_variants[i].name,
.parent = TYPE_ARMSSE,
.class_init = armsse_class_init,
.class_data = (void *)&armsse_variants[i],
};''', re.MULTILINE)
print(RE_ARRAY_ITEM)
assert fullmatch(RE_ARRAY_ITEM, '{ TYPE_HOTPLUG_HANDLER },')
assert fullmatch(RE_ARRAY_ITEM, '{ TYPE_ACPI_DEVICE_IF },')
assert fullmatch(RE_ARRAY_ITEM, '{ }')
assert fullmatch(RE_ARRAY_CAST, '(InterfaceInfo[])')
assert fullmatch(RE_ARRAY, '''(InterfaceInfo[]) {
{ TYPE_HOTPLUG_HANDLER },
{ TYPE_ACPI_DEVICE_IF },
{ }
}''')
print(RE_COMMENT)
assert fullmatch(RE_COMMENT, r'''/* multi-line
* comment
*/''')
print(RE_TI_FIELDS)
assert fullmatch(RE_TI_FIELDS,
r'''/* could be TYPE_SYS_BUS_DEVICE (or LPC etc) */
.parent = TYPE_DEVICE,
''')
assert fullmatch(RE_TI_FIELDS, r'''.name = TYPE_TPM_CRB,
/* could be TYPE_SYS_BUS_DEVICE (or LPC etc) */
.parent = TYPE_DEVICE,
.instance_size = sizeof(CRBState),
.class_init = tpm_crb_class_init,
.interfaces = (InterfaceInfo[]) {
{ TYPE_TPM_IF },
{ }
}
''')
assert fullmatch(RE_TI_FIELDS + SP + RE_COMMENTS,
r'''.name = TYPE_PALM_MISC_GPIO,
.parent = TYPE_SYS_BUS_DEVICE,
.instance_size = sizeof(PalmMiscGPIOState),
.instance_init = palm_misc_gpio_init,
/*
* No class init required: device has no internal state so does not
* need to set up reset or vmstate, and has no realize method.
*/''')
print(TypeInfoVar.regexp)
test_empty = 'static const TypeInfo x86_base_cpu_type_info = {\n'+\
'};\n';
assert fullmatch(TypeInfoVar.regexp, test_empty)
test_simple = r'''
static const TypeInfo x86_base_cpu_type_info = {
.name = X86_CPU_TYPE_NAME("base"),
.parent = TYPE_X86_CPU,
.class_init = x86_cpu_base_class_init,
};
'''
assert re.search(TypeInfoVar.regexp, test_simple, re.MULTILINE)
test_interfaces = r'''
static const TypeInfo acpi_ged_info = {
.name = TYPE_ACPI_GED,
.parent = TYPE_SYS_BUS_DEVICE,
.instance_size = sizeof(AcpiGedState),
.instance_init = acpi_ged_initfn,
.class_init = acpi_ged_class_init,
.interfaces = (InterfaceInfo[]) {
{ TYPE_HOTPLUG_HANDLER },
{ TYPE_ACPI_DEVICE_IF },
{ }
}
};
'''
assert re.search(TypeInfoVar.regexp, test_interfaces, re.MULTILINE)
test_comments = r'''
static const TypeInfo palm_misc_gpio_info = {
.name = TYPE_PALM_MISC_GPIO,
.parent = TYPE_SYS_BUS_DEVICE,
.instance_size = sizeof(PalmMiscGPIOState),
.instance_init = palm_misc_gpio_init,
/*
* No class init required: device has no internal state so does not
* need to set up reset or vmstate, and has no realize method.
*/
};
'''
assert re.search(TypeInfoVar.regexp, test_comments, re.MULTILINE)
test_comments = r'''
static const TypeInfo tpm_crb_info = {
.name = TYPE_TPM_CRB,
/* could be TYPE_SYS_BUS_DEVICE (or LPC etc) */
.parent = TYPE_DEVICE,
.instance_size = sizeof(CRBState),
.class_init = tpm_crb_class_init,
.interfaces = (InterfaceInfo[]) {
{ TYPE_TPM_IF },
{ }
}
};
'''
assert re.search(TypeInfoVar.regexp, test_comments, re.MULTILINE)
def test_struct_re():
print('---')
print(RE_STRUCT_TYPEDEF)
assert re.search(RE_STRUCT_TYPEDEF, r'''
typedef struct TCGState {
AccelState parent_obj;
bool mttcg_enabled;
unsigned long tb_size;
} TCGState;
''', re.MULTILINE)
assert re.search(RE_STRUCT_TYPEDEF, r'''
typedef struct {
ISADevice parent_obj;
QEMUSoundCard card;
uint32_t freq;
uint32_t port;
int ticking[2];
int enabled;
int active;
int bufpos;
#ifdef DEBUG
int64_t exp[2];
#endif
int16_t *mixbuf;
uint64_t dexp[2];
SWVoiceOut *voice;
int left, pos, samples;
QEMUAudioTimeStamp ats;
FM_OPL *opl;
PortioList port_list;
} AdlibState;
''', re.MULTILINE)
false_positive = r'''
typedef struct dma_pagetable_entry {
int32_t frame;
int32_t owner;
} A B C D E;
struct foo {
int x;
} some_variable;
'''
assert not re.search(RE_STRUCT_TYPEDEF, false_positive, re.MULTILINE)
def test_initial_includes():
print(InitialIncludes.regexp)
c = '''
#ifndef HW_FLASH_H
#define HW_FLASH_H
/* NOR flash devices */
#include "qom/object.h"
#include "exec/hwaddr.h"
/* pflash_cfi01.c */
'''
print(repr(list(m.groupdict() for m in InitialIncludes.finditer(c))))
m = InitialIncludes.domatch(c)
assert m
print(repr(m.group(0)))
assert m.group(0).endswith('#include "exec/hwaddr.h"\n')
c = '''#ifndef QEMU_VIRTIO_9P_H
#define QEMU_VIRTIO_9P_H
#include "standard-headers/linux/virtio_9p.h"
#include "hw/virtio/virtio.h"
#include "9p.h"
'''
print(repr(list(m.groupdict() for m in InitialIncludes.finditer(c))))
m = InitialIncludes.domatch(c)
assert m
print(repr(m.group(0)))
assert m.group(0).endswith('#include "9p.h"\n')
c = '''#include "qom/object.h"
/*
* QEMU ES1370 emulation
...
*/
/* #define DEBUG_ES1370 */
/* #define VERBOSE_ES1370 */
#define SILENT_ES1370
#include "qemu/osdep.h"
#include "hw/audio/soundhw.h"
#include "audio/audio.h"
#include "hw/pci/pci.h"
#include "migration/vmstate.h"
#include "qemu/module.h"
#include "sysemu/dma.h"
/* Missing stuff:
SCTRL_P[12](END|ST)INC
'''
print(repr(list(m.groupdict() for m in InitialIncludes.finditer(c))))
m = InitialIncludes.domatch(c)
assert m
print(repr(m.group(0)))
assert m.group(0).endswith('#include "sysemu/dma.h"\n')
|
nvtrust-main
|
infrastructure/kvm/qemu/qemu_source/scripts/codeconverter/codeconverter/test_regexps.py
|
# Copyright (C) 2020 Red Hat Inc.
#
# Authors:
# Eduardo Habkost <ehabkost@redhat.com>
#
# This work is licensed under the terms of the GNU GPL, version 2. See
# the COPYING file in the top-level directory.
from typing import *
import logging
logger = logging.getLogger(__name__)
DBG = logger.debug
INFO = logger.info
WARN = logger.warning
T = TypeVar('T')
def opt_compare(a: T, b: T) -> bool:
"""Compare two values, ignoring mismatches if one of them is None"""
return (a is None) or (b is None) or (a == b)
def merge(a: T, b: T) -> T:
"""Merge two values if they matched using opt_compare()"""
assert opt_compare(a, b)
if a is None:
return b
else:
return a
def test_comp_merge():
assert opt_compare(None, 1) == True
assert opt_compare(2, None) == True
assert opt_compare(1, 1) == True
assert opt_compare(1, 2) == False
assert merge(None, None) is None
assert merge(None, 10) == 10
assert merge(10, None) == 10
assert merge(10, 10) == 10
LineNumber = NewType('LineNumber', int)
ColumnNumber = NewType('ColumnNumber', int)
class LineAndColumn(NamedTuple):
line: int
col: int
def __str__(self):
return '%d:%d' % (self.line, self.col)
def line_col(s, position: int) -> LineAndColumn:
"""Return line and column for a char position in string
Character position starts in 0, but lines and columns start in 1.
"""
before = s[:position]
lines = before.split('\n')
line = len(lines)
col = len(lines[-1]) + 1
return LineAndColumn(line, col)
def test_line_col():
assert line_col('abc\ndefg\nhijkl', 0) == (1, 1)
assert line_col('abc\ndefg\nhijkl', 2) == (1, 3)
assert line_col('abc\ndefg\nhijkl', 3) == (1, 4)
assert line_col('abc\ndefg\nhijkl', 4) == (2, 1)
assert line_col('abc\ndefg\nhijkl', 10) == (3, 2)
def not_optional(arg: Optional[T]) -> T:
assert arg is not None
return arg
__all__ = ['not_optional', 'opt_compare', 'merge', 'line_col', 'LineAndColumn']
|
nvtrust-main
|
infrastructure/kvm/qemu/qemu_source/scripts/codeconverter/codeconverter/utils.py
|
# Copyright (C) 2020 Red Hat Inc.
#
# Authors:
# Eduardo Habkost <ehabkost@redhat.com>
#
# This work is licensed under the terms of the GNU GPL, version 2. See
# the COPYING file in the top-level directory.
"""Helpers for creation of regular expressions"""
import re
import logging
logger = logging.getLogger(__name__)
DBG = logger.debug
INFO = logger.info
WARN = logger.warning
def S(*regexps) -> str:
"""Just a shortcut to concatenate multiple regexps more easily"""
return ''.join(regexps)
def P(*regexps, name=None, capture=False, repeat='') -> str:
"""Just add parenthesis around regexp(s), with optional name or repeat suffix"""
s = S(*regexps)
if name:
return f'(?P<{name}>{s}){repeat}'
elif capture:
return f'({s}){repeat}'
else:
return f'(?:{s}){repeat}'
def NAMED(name, *regexps) -> str:
"""Make named group using <P<name>...) syntax
>>> NAMED('mygroup', 'xyz', 'abc')
'(?P<mygroup>xyzabc)'
"""
return P(*regexps, name=name)
def OR(*regexps, **kwargs) -> str:
"""Build (a|b|c) regexp"""
return P('|'.join(regexps), **kwargs)
def M(*regexps, n='*', name=None) -> str:
"""Add repetition qualifier to regexp(s)
>>> M('a', 'b')
'(?:ab)*'
>>> M('a' , 'b', n='+')
'(?:ab)+'
>>> M('a' , 'b', n='{2,3}', name='name')
'(?P<name>(?:ab){2,3})'
"""
r = P(*regexps, repeat=n)
if name:
r = NAMED(name, r)
return r
# helper to make parenthesis optional around regexp
OPTIONAL_PARS = lambda R: OR(S(r'\(\s*', R, r'\s*\)'), R)
def test_optional_pars():
r = OPTIONAL_PARS('abc')+'$'
assert re.match(r, 'abc')
assert re.match(r, '(abc)')
assert not re.match(r, '(abcd)')
assert not re.match(r, '(abc')
assert not re.match(r, 'abc)')
# this disables the MULTILINE flag, so it will match at the
# beginning of the file:
RE_FILE_BEGIN = r'(?-m:^)'
# C primitives:
SP = r'\s*'
RE_COMMENT = r'//[^\n]*$|/\*([^*]|\*[^/])*\*/'
RE_COMMENTS = M(RE_COMMENT + SP)
RE_IDENTIFIER = r'[a-zA-Z_][a-zA-Z0-9_]*(?![a-zA-Z0-9])'
RE_STRING = r'\"([^\"\\]|\\[a-z\"])*\"'
RE_NUMBER = r'[0-9]+|0x[0-9a-fA-F]+'
# space or escaped newlines:
CPP_SPACE = OR(r'\s', r'\\\n', repeat='+')
RE_PATH = '[a-zA-Z0-9/_.-]+'
RE_INCLUDEPATH = OR(S(r'\"', RE_PATH, r'\"'),
S(r'<', RE_PATH, r'>'))
RE_INCLUDE = S(r'^[ \t]*#[ \t]*include[ \t]+', NAMED('includepath', RE_INCLUDEPATH), r'[ \t]*\n')
RE_SIMPLEDEFINE = S(r'^[ \t]*#[ \t]*define[ \t]+', RE_IDENTIFIER, r'[ \t]*\n')
RE_STRUCT_TYPE = S(r'struct\s+', RE_IDENTIFIER)
RE_TYPE = OR(RE_IDENTIFIER, RE_STRUCT_TYPE)
RE_MACRO_CONCAT = M(S(OR(RE_IDENTIFIER, RE_STRING), SP), n='{2,}')
RE_SIMPLE_VALUE = OR(RE_IDENTIFIER, RE_STRING, RE_NUMBER)
RE_FUN_CALL = S(RE_IDENTIFIER, r'\s*\(\s*', RE_SIMPLE_VALUE, r'\s*\)')
RE_SIZEOF = S(r'sizeof\s*\(\s*', NAMED('sizeoftype', RE_TYPE), r'\s*\)')
RE_ADDRESS = S(r'&\s*', RE_IDENTIFIER)
RE_ARRAY_ITEM = S(r'{\s*', NAMED('arrayitem', M(RE_SIMPLE_VALUE, n='?')), r'\s*}\s*,?')
RE_ARRAY_CAST = S(r'\(\s*', RE_IDENTIFIER, r'\s*\[\s*\]\)')
RE_ARRAY_ITEMS = M(S(RE_ARRAY_ITEM, SP))
RE_ARRAY = S(M(RE_ARRAY_CAST, n='?'), r'\s*{\s*',
NAMED('arrayitems', RE_ARRAY_ITEMS),
r'}')
# NOTE: this covers a very small subset of valid expressions
RE_EXPRESSION = OR(RE_SIZEOF, RE_FUN_CALL, RE_MACRO_CONCAT, RE_SIMPLE_VALUE,
RE_ARRAY, RE_ADDRESS)
|
nvtrust-main
|
infrastructure/kvm/qemu/qemu_source/scripts/codeconverter/codeconverter/regexps.py
|
#
# GDB debugging support
#
# Copyright (c) 2015 Linaro Ltd
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see
# <http://www.gnu.org/licenses/gpl-2.0.html>
#
# We don't need to do anything in our init file currently.
"""
Support routines for debugging QEMU under GDB
"""
__license__ = "GPL version 2 or (at your option) any later version"
|
nvtrust-main
|
infrastructure/kvm/qemu/qemu_source/scripts/qemugdb/__init__.py
|
#
# GDB debugging support
#
# Copyright 2012 Red Hat, Inc. and/or its affiliates
#
# Authors:
# Avi Kivity <avi@redhat.com>
#
# This work is licensed under the terms of the GNU GPL, version 2
# or later. See the COPYING file in the top-level directory.
import gdb
VOID_PTR = gdb.lookup_type('void').pointer()
def get_fs_base():
'''Fetch %fs base value using arch_prctl(ARCH_GET_FS). This is
pthread_self().'''
# %rsp - 120 is scratch space according to the SystemV ABI
old = gdb.parse_and_eval('*(uint64_t*)($rsp - 120)')
gdb.execute('call (int)arch_prctl(0x1003, $rsp - 120)', False, True)
fs_base = gdb.parse_and_eval('*(uint64_t*)($rsp - 120)')
gdb.execute('set *(uint64_t*)($rsp - 120) = %s' % old, False, True)
return fs_base
def pthread_self():
'''Fetch pthread_self() from the glibc start_thread function.'''
f = gdb.newest_frame()
while f.name() != 'start_thread':
f = f.older()
if f is None:
return get_fs_base()
try:
return f.read_var("arg")
except ValueError:
return get_fs_base()
def get_glibc_pointer_guard():
'''Fetch glibc pointer guard value'''
fs_base = pthread_self()
return gdb.parse_and_eval('*(uint64_t*)((uint64_t)%s + 0x30)' % fs_base)
def glibc_ptr_demangle(val, pointer_guard):
'''Undo effect of glibc's PTR_MANGLE()'''
return gdb.parse_and_eval('(((uint64_t)%s >> 0x11) | ((uint64_t)%s << (64 - 0x11))) ^ (uint64_t)%s' % (val, val, pointer_guard))
def get_jmpbuf_regs(jmpbuf):
JB_RBX = 0
JB_RBP = 1
JB_R12 = 2
JB_R13 = 3
JB_R14 = 4
JB_R15 = 5
JB_RSP = 6
JB_PC = 7
pointer_guard = get_glibc_pointer_guard()
return {'rbx': jmpbuf[JB_RBX],
'rbp': glibc_ptr_demangle(jmpbuf[JB_RBP], pointer_guard),
'rsp': glibc_ptr_demangle(jmpbuf[JB_RSP], pointer_guard),
'r12': jmpbuf[JB_R12],
'r13': jmpbuf[JB_R13],
'r14': jmpbuf[JB_R14],
'r15': jmpbuf[JB_R15],
'rip': glibc_ptr_demangle(jmpbuf[JB_PC], pointer_guard) }
def bt_jmpbuf(jmpbuf):
'''Backtrace a jmpbuf'''
regs = get_jmpbuf_regs(jmpbuf)
old = dict()
# remember current stack frame and select the topmost
# so that register modifications don't wreck it
selected_frame = gdb.selected_frame()
gdb.newest_frame().select()
for i in regs:
old[i] = gdb.parse_and_eval('(uint64_t)$%s' % i)
for i in regs:
gdb.execute('set $%s = %s' % (i, regs[i]))
gdb.execute('bt')
for i in regs:
gdb.execute('set $%s = %s' % (i, old[i]))
selected_frame.select()
def co_cast(co):
return co.cast(gdb.lookup_type('CoroutineUContext').pointer())
def coroutine_to_jmpbuf(co):
coroutine_pointer = co_cast(co)
return coroutine_pointer['env']['__jmpbuf']
class CoroutineCommand(gdb.Command):
'''Display coroutine backtrace'''
def __init__(self):
gdb.Command.__init__(self, 'qemu coroutine', gdb.COMMAND_DATA,
gdb.COMPLETE_NONE)
def invoke(self, arg, from_tty):
argv = gdb.string_to_argv(arg)
if len(argv) != 1:
gdb.write('usage: qemu coroutine <coroutine-pointer>\n')
return
bt_jmpbuf(coroutine_to_jmpbuf(gdb.parse_and_eval(argv[0])))
class CoroutineBt(gdb.Command):
'''Display backtrace including coroutine switches'''
def __init__(self):
gdb.Command.__init__(self, 'qemu bt', gdb.COMMAND_STACK,
gdb.COMPLETE_NONE)
def invoke(self, arg, from_tty):
gdb.execute("bt")
if gdb.parse_and_eval("qemu_in_coroutine()") == False:
return
co_ptr = gdb.parse_and_eval("qemu_coroutine_self()")
while True:
co = co_cast(co_ptr)
co_ptr = co["base"]["caller"]
if co_ptr == 0:
break
gdb.write("Coroutine at " + str(co_ptr) + ":\n")
bt_jmpbuf(coroutine_to_jmpbuf(co_ptr))
class CoroutineSPFunction(gdb.Function):
def __init__(self):
gdb.Function.__init__(self, 'qemu_coroutine_sp')
def invoke(self, addr):
return get_jmpbuf_regs(coroutine_to_jmpbuf(addr))['rsp'].cast(VOID_PTR)
class CoroutinePCFunction(gdb.Function):
def __init__(self):
gdb.Function.__init__(self, 'qemu_coroutine_pc')
def invoke(self, addr):
return get_jmpbuf_regs(coroutine_to_jmpbuf(addr))['rip'].cast(VOID_PTR)
|
nvtrust-main
|
infrastructure/kvm/qemu/qemu_source/scripts/qemugdb/coroutine.py
|
#
# GDB debugging support: aio/iohandler debug
#
# Copyright (c) 2015 Red Hat, Inc.
#
# Author: Dr. David Alan Gilbert <dgilbert@redhat.com>
#
# This work is licensed under the terms of the GNU GPL, version 2 or
# later. See the COPYING file in the top-level directory.
#
import gdb
from qemugdb import coroutine
def isnull(ptr):
return ptr == gdb.Value(0).cast(ptr.type)
def dump_aiocontext(context, verbose):
'''Display a dump and backtrace for an aiocontext'''
cur = context['aio_handlers']['lh_first']
# Get pointers to functions we're going to process specially
sym_fd_coroutine_enter = gdb.parse_and_eval('fd_coroutine_enter')
while not isnull(cur):
entry = cur.dereference()
gdb.write('----\n%s\n' % entry)
if verbose and cur['io_read'] == sym_fd_coroutine_enter:
coptr = (cur['opaque'].cast(gdb.lookup_type('FDYieldUntilData').pointer()))['co']
coptr = coptr.cast(gdb.lookup_type('CoroutineUContext').pointer())
coroutine.bt_jmpbuf(coptr['env']['__jmpbuf'])
cur = cur['node']['le_next'];
gdb.write('----\n')
class HandlersCommand(gdb.Command):
'''Display aio handlers'''
def __init__(self):
gdb.Command.__init__(self, 'qemu handlers', gdb.COMMAND_DATA,
gdb.COMPLETE_NONE)
def invoke(self, arg, from_tty):
verbose = False
argv = gdb.string_to_argv(arg)
if len(argv) > 0 and argv[0] == '--verbose':
verbose = True
argv.pop(0)
if len(argv) > 1:
gdb.write('usage: qemu handlers [--verbose] [handler]\n')
return
if len(argv) == 1:
handlers_name = argv[0]
else:
handlers_name = 'qemu_aio_context'
dump_aiocontext(gdb.parse_and_eval(handlers_name), verbose)
|
nvtrust-main
|
infrastructure/kvm/qemu/qemu_source/scripts/qemugdb/aio.py
|
# -*- coding: utf-8 -*-
# GDB debugging support
#
# Copyright 2017 Linaro Ltd
#
# Author: Alex Bennée <alex.bennee@linaro.org>
#
# This work is licensed under the terms of the GNU GPL, version 2 or later.
# See the COPYING file in the top-level directory.
#
# SPDX-License-Identifier: GPL-2.0-or-later
# 'qemu timers' -- display the current timerlists
import gdb
class TimersCommand(gdb.Command):
'''Display the current QEMU timers'''
def __init__(self):
'Register the class as a gdb command'
gdb.Command.__init__(self, 'qemu timers', gdb.COMMAND_DATA,
gdb.COMPLETE_NONE)
def dump_timers(self, timer):
"Follow a timer and recursively dump each one in the list."
# timer should be of type QemuTimer
gdb.write(" timer %s/%s (cb:%s,opq:%s)\n" % (
timer['expire_time'],
timer['scale'],
timer['cb'],
timer['opaque']))
if int(timer['next']) > 0:
self.dump_timers(timer['next'])
def process_timerlist(self, tlist, ttype):
gdb.write("Processing %s timers\n" % (ttype))
gdb.write(" clock %s is enabled:%s, last:%s\n" % (
tlist['clock']['type'],
tlist['clock']['enabled'],
tlist['clock']['last']))
if int(tlist['active_timers']) > 0:
self.dump_timers(tlist['active_timers'])
def invoke(self, arg, from_tty):
'Run the command'
main_timers = gdb.parse_and_eval("main_loop_tlg")
# This will break if QEMUClockType in timer.h is redfined
self.process_timerlist(main_timers['tl'][0], "Realtime")
self.process_timerlist(main_timers['tl'][1], "Virtual")
self.process_timerlist(main_timers['tl'][2], "Host")
self.process_timerlist(main_timers['tl'][3], "Virtual RT")
|
nvtrust-main
|
infrastructure/kvm/qemu/qemu_source/scripts/qemugdb/timers.py
|
#
# GDB debugging support
#
# Copyright 2012 Red Hat, Inc. and/or its affiliates
#
# Authors:
# Avi Kivity <avi@redhat.com>
#
# This work is licensed under the terms of the GNU GPL, version 2 or
# later. See the COPYING file in the top-level directory.
# 'qemu mtree' -- display the memory hierarchy
import gdb
def isnull(ptr):
return ptr == gdb.Value(0).cast(ptr.type)
def int128(p):
'''Read an Int128 type to a python integer.
QEMU can be built with native Int128 support so we need to detect
if the value is a structure or the native type.
'''
if p.type.code == gdb.TYPE_CODE_STRUCT:
return int(p['lo']) + (int(p['hi']) << 64)
else:
return int(("%s" % p), 16)
class MtreeCommand(gdb.Command):
'''Display the memory tree hierarchy'''
def __init__(self):
gdb.Command.__init__(self, 'qemu mtree', gdb.COMMAND_DATA,
gdb.COMPLETE_NONE)
self.queue = []
def invoke(self, arg, from_tty):
self.seen = set()
self.queue_root('address_space_memory')
self.queue_root('address_space_io')
self.process_queue()
def queue_root(self, varname):
ptr = gdb.parse_and_eval(varname)['root']
self.queue.append(ptr)
def process_queue(self):
while self.queue:
ptr = self.queue.pop(0)
if int(ptr) in self.seen:
continue
self.print_item(ptr)
def print_item(self, ptr, offset = gdb.Value(0), level = 0):
self.seen.add(int(ptr))
addr = ptr['addr']
addr += offset
size = int128(ptr['size'])
alias = ptr['alias']
klass = ''
if not isnull(alias):
klass = ' (alias)'
elif not isnull(ptr['ops']):
klass = ' (I/O)'
elif bool(ptr['ram']):
klass = ' (RAM)'
gdb.write('%s%016x-%016x %s%s (@ %s)\n'
% (' ' * level,
int(addr),
int(addr + (size - 1)),
ptr['name'].string(),
klass,
ptr,
),
gdb.STDOUT)
if not isnull(alias):
gdb.write('%s alias: %s@%016x (@ %s)\n' %
(' ' * level,
alias['name'].string(),
int(ptr['alias_offset']),
alias,
),
gdb.STDOUT)
self.queue.append(alias)
subregion = ptr['subregions']['tqh_first']
level += 1
while not isnull(subregion):
self.print_item(subregion, addr, level)
subregion = subregion['subregions_link']['tqe_next']
|
nvtrust-main
|
infrastructure/kvm/qemu/qemu_source/scripts/qemugdb/mtree.py
|
# -*- coding: utf-8 -*-
#
# GDB debugging support, TCG status
#
# Copyright 2016 Linaro Ltd
#
# Authors:
# Alex Bennée <alex.bennee@linaro.org>
#
# This work is licensed under the terms of the GNU GPL, version 2 or
# later. See the COPYING file in the top-level directory.
# 'qemu tcg-lock-status' -- display the TCG lock status across threads
import gdb
class TCGLockStatusCommand(gdb.Command):
'''Display TCG Execution Status'''
def __init__(self):
gdb.Command.__init__(self, 'qemu tcg-lock-status', gdb.COMMAND_DATA,
gdb.COMPLETE_NONE)
def invoke(self, arg, from_tty):
gdb.write("Thread, BQL (iothread_mutex), Replay, Blocked?\n")
for thread in gdb.inferiors()[0].threads():
thread.switch()
iothread = gdb.parse_and_eval("iothread_locked")
replay = gdb.parse_and_eval("replay_locked")
frame = gdb.selected_frame()
if frame.name() == "__lll_lock_wait":
frame.older().select()
mutex = gdb.parse_and_eval("mutex")
owner = gdb.parse_and_eval("mutex->__data.__owner")
blocked = ("__lll_lock_wait waiting on %s from %d" %
(mutex, owner))
else:
blocked = "not blocked"
gdb.write("%d/%d, %s, %s, %s\n" % (thread.num, thread.ptid[1],
iothread, replay, blocked))
|
nvtrust-main
|
infrastructure/kvm/qemu/qemu_source/scripts/qemugdb/tcg.py
|
#!/usr/bin/env python3
# Print the percentage of instructions spent in each phase of QEMU
# execution.
#
# Syntax:
# dissect.py [-h] -- <qemu executable> [<qemu executable options>] \
# <target executable> [<target executable options>]
#
# [-h] - Print the script arguments help message.
#
# Example of usage:
# dissect.py -- qemu-arm coulomb_double-arm
#
# This file is a part of the project "TCG Continuous Benchmarking".
#
# Copyright (C) 2020 Ahmed Karaman <ahmedkhaledkaraman@gmail.com>
# Copyright (C) 2020 Aleksandar Markovic <aleksandar.qemu.devel@gmail.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
import argparse
import os
import subprocess
import sys
import tempfile
def get_JIT_line(callgrind_data):
"""
Search for the first instance of the JIT call in
the callgrind_annotate output when ran using --tree=caller
This is equivalent to the self number of instructions of JIT.
Parameters:
callgrind_data (list): callgrind_annotate output
Returns:
(int): Line number
"""
line = -1
for i in range(len(callgrind_data)):
if callgrind_data[i].strip('\n') and \
callgrind_data[i].split()[-1] == "[???]":
line = i
break
if line == -1:
sys.exit("Couldn't locate the JIT call ... Exiting.")
return line
def main():
# Parse the command line arguments
parser = argparse.ArgumentParser(
usage='dissect.py [-h] -- '
'<qemu executable> [<qemu executable options>] '
'<target executable> [<target executable options>]')
parser.add_argument('command', type=str, nargs='+', help=argparse.SUPPRESS)
args = parser.parse_args()
# Extract the needed variables from the args
command = args.command
# Insure that valgrind is installed
check_valgrind = subprocess.run(
["which", "valgrind"], stdout=subprocess.DEVNULL)
if check_valgrind.returncode:
sys.exit("Please install valgrind before running the script.")
# Save all intermediate files in a temporary directory
with tempfile.TemporaryDirectory() as tmpdirname:
# callgrind output file path
data_path = os.path.join(tmpdirname, "callgrind.data")
# callgrind_annotate output file path
annotate_out_path = os.path.join(tmpdirname, "callgrind_annotate.out")
# Run callgrind
callgrind = subprocess.run((["valgrind",
"--tool=callgrind",
"--callgrind-out-file=" + data_path]
+ command),
stdout=subprocess.DEVNULL,
stderr=subprocess.PIPE)
if callgrind.returncode:
sys.exit(callgrind.stderr.decode("utf-8"))
# Save callgrind_annotate output
with open(annotate_out_path, "w") as output:
callgrind_annotate = subprocess.run(
["callgrind_annotate", data_path, "--tree=caller"],
stdout=output,
stderr=subprocess.PIPE)
if callgrind_annotate.returncode:
sys.exit(callgrind_annotate.stderr.decode("utf-8"))
# Read the callgrind_annotate output to callgrind_data[]
callgrind_data = []
with open(annotate_out_path, 'r') as data:
callgrind_data = data.readlines()
# Line number with the total number of instructions
total_instructions_line_number = 20
# Get the total number of instructions
total_instructions_line_data = \
callgrind_data[total_instructions_line_number]
total_instructions = total_instructions_line_data.split()[0]
total_instructions = int(total_instructions.replace(',', ''))
# Line number with the JIT self number of instructions
JIT_self_instructions_line_number = get_JIT_line(callgrind_data)
# Get the JIT self number of instructions
JIT_self_instructions_line_data = \
callgrind_data[JIT_self_instructions_line_number]
JIT_self_instructions = JIT_self_instructions_line_data.split()[0]
JIT_self_instructions = int(JIT_self_instructions.replace(',', ''))
# Line number with the JIT self + inclusive number of instructions
# It's the line above the first JIT call when running with --tree=caller
JIT_total_instructions_line_number = JIT_self_instructions_line_number-1
# Get the JIT self + inclusive number of instructions
JIT_total_instructions_line_data = \
callgrind_data[JIT_total_instructions_line_number]
JIT_total_instructions = JIT_total_instructions_line_data.split()[0]
JIT_total_instructions = int(JIT_total_instructions.replace(',', ''))
# Calculate number of instructions in helpers and code generation
helpers_instructions = JIT_total_instructions-JIT_self_instructions
code_generation_instructions = total_instructions-JIT_total_instructions
# Print results (Insert commas in large numbers)
# Print total number of instructions
print('{:<20}{:>20}\n'.
format("Total Instructions:",
format(total_instructions, ',')))
# Print code generation instructions and percentage
print('{:<20}{:>20}\t{:>6.3f}%'.
format("Code Generation:",
format(code_generation_instructions, ","),
(code_generation_instructions / total_instructions) * 100))
# Print JIT instructions and percentage
print('{:<20}{:>20}\t{:>6.3f}%'.
format("JIT Execution:",
format(JIT_self_instructions, ","),
(JIT_self_instructions / total_instructions) * 100))
# Print helpers instructions and percentage
print('{:<20}{:>20}\t{:>6.3f}%'.
format("Helpers:",
format(helpers_instructions, ","),
(helpers_instructions/total_instructions)*100))
if __name__ == "__main__":
main()
|
nvtrust-main
|
infrastructure/kvm/qemu/qemu_source/scripts/performance/dissect.py
|
#!/usr/bin/env python3
# Print the top N most executed functions in QEMU using perf.
# Syntax:
# topN_perf.py [-h] [-n] <number of displayed top functions> -- \
# <qemu executable> [<qemu executable options>] \
# <target executable> [<target execurable options>]
#
# [-h] - Print the script arguments help message.
# [-n] - Specify the number of top functions to print.
# - If this flag is not specified, the tool defaults to 25.
#
# Example of usage:
# topN_perf.py -n 20 -- qemu-arm coulomb_double-arm
#
# This file is a part of the project "TCG Continuous Benchmarking".
#
# Copyright (C) 2020 Ahmed Karaman <ahmedkhaledkaraman@gmail.com>
# Copyright (C) 2020 Aleksandar Markovic <aleksandar.qemu.devel@gmail.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
import argparse
import os
import subprocess
import sys
# Parse the command line arguments
parser = argparse.ArgumentParser(
usage='topN_perf.py [-h] [-n] <number of displayed top functions > -- '
'<qemu executable> [<qemu executable options>] '
'<target executable> [<target executable options>]')
parser.add_argument('-n', dest='top', type=int, default=25,
help='Specify the number of top functions to print.')
parser.add_argument('command', type=str, nargs='+', help=argparse.SUPPRESS)
args = parser.parse_args()
# Extract the needed variables from the args
command = args.command
top = args.top
# Insure that perf is installed
check_perf_presence = subprocess.run(["which", "perf"],
stdout=subprocess.DEVNULL)
if check_perf_presence.returncode:
sys.exit("Please install perf before running the script!")
# Insure user has previllage to run perf
check_perf_executability = subprocess.run(["perf", "stat", "ls", "/"],
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL)
if check_perf_executability.returncode:
sys.exit(
"""
Error:
You may not have permission to collect stats.
Consider tweaking /proc/sys/kernel/perf_event_paranoid,
which controls use of the performance events system by
unprivileged users (without CAP_SYS_ADMIN).
-1: Allow use of (almost) all events by all users
Ignore mlock limit after perf_event_mlock_kb without CAP_IPC_LOCK
0: Disallow ftrace function tracepoint by users without CAP_SYS_ADMIN
Disallow raw tracepoint access by users without CAP_SYS_ADMIN
1: Disallow CPU event access by users without CAP_SYS_ADMIN
2: Disallow kernel profiling by users without CAP_SYS_ADMIN
To make this setting permanent, edit /etc/sysctl.conf too, e.g.:
kernel.perf_event_paranoid = -1
* Alternatively, you can run this script under sudo privileges.
"""
)
# Run perf record
perf_record = subprocess.run((["perf", "record", "--output=/tmp/perf.data"] +
command),
stdout=subprocess.DEVNULL,
stderr=subprocess.PIPE)
if perf_record.returncode:
os.unlink('/tmp/perf.data')
sys.exit(perf_record.stderr.decode("utf-8"))
# Save perf report output to /tmp/perf_report.out
with open("/tmp/perf_report.out", "w") as output:
perf_report = subprocess.run(
["perf", "report", "--input=/tmp/perf.data", "--stdio"],
stdout=output,
stderr=subprocess.PIPE)
if perf_report.returncode:
os.unlink('/tmp/perf.data')
output.close()
os.unlink('/tmp/perf_report.out')
sys.exit(perf_report.stderr.decode("utf-8"))
# Read the reported data to functions[]
functions = []
with open("/tmp/perf_report.out", "r") as data:
# Only read lines that are not comments (comments start with #)
# Only read lines that are not empty
functions = [line for line in data.readlines() if line and line[0]
!= '#' and line[0] != "\n"]
# Limit the number of top functions to "top"
number_of_top_functions = top if len(functions) > top else len(functions)
# Store the data of the top functions in top_functions[]
top_functions = functions[:number_of_top_functions]
# Print table header
print('{:>4} {:>10} {:<30} {}\n{} {} {} {}'.format('No.',
'Percentage',
'Name',
'Invoked by',
'-' * 4,
'-' * 10,
'-' * 30,
'-' * 25))
# Print top N functions
for (index, function) in enumerate(top_functions, start=1):
function_data = function.split()
function_percentage = function_data[0]
function_name = function_data[-1]
function_invoker = ' '.join(function_data[2:-2])
print('{:>4} {:>10} {:<30} {}'.format(index,
function_percentage,
function_name,
function_invoker))
# Remove intermediate files
os.unlink('/tmp/perf.data')
os.unlink('/tmp/perf_report.out')
|
nvtrust-main
|
infrastructure/kvm/qemu/qemu_source/scripts/performance/topN_perf.py
|
#!/usr/bin/env python3
# Print the top N most executed functions in QEMU using callgrind.
# Syntax:
# topN_callgrind.py [-h] [-n] <number of displayed top functions> -- \
# <qemu executable> [<qemu executable options>] \
# <target executable> [<target execurable options>]
#
# [-h] - Print the script arguments help message.
# [-n] - Specify the number of top functions to print.
# - If this flag is not specified, the tool defaults to 25.
#
# Example of usage:
# topN_callgrind.py -n 20 -- qemu-arm coulomb_double-arm
#
# This file is a part of the project "TCG Continuous Benchmarking".
#
# Copyright (C) 2020 Ahmed Karaman <ahmedkhaledkaraman@gmail.com>
# Copyright (C) 2020 Aleksandar Markovic <aleksandar.qemu.devel@gmail.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
import argparse
import os
import subprocess
import sys
# Parse the command line arguments
parser = argparse.ArgumentParser(
usage='topN_callgrind.py [-h] [-n] <number of displayed top functions> -- '
'<qemu executable> [<qemu executable options>] '
'<target executable> [<target executable options>]')
parser.add_argument('-n', dest='top', type=int, default=25,
help='Specify the number of top functions to print.')
parser.add_argument('command', type=str, nargs='+', help=argparse.SUPPRESS)
args = parser.parse_args()
# Extract the needed variables from the args
command = args.command
top = args.top
# Insure that valgrind is installed
check_valgrind_presence = subprocess.run(["which", "valgrind"],
stdout=subprocess.DEVNULL)
if check_valgrind_presence.returncode:
sys.exit("Please install valgrind before running the script!")
# Run callgrind
callgrind = subprocess.run((
["valgrind", "--tool=callgrind", "--callgrind-out-file=/tmp/callgrind.data"]
+ command),
stdout=subprocess.DEVNULL,
stderr=subprocess.PIPE)
if callgrind.returncode:
sys.exit(callgrind.stderr.decode("utf-8"))
# Save callgrind_annotate output to /tmp/callgrind_annotate.out
with open("/tmp/callgrind_annotate.out", "w") as output:
callgrind_annotate = subprocess.run(["callgrind_annotate",
"/tmp/callgrind.data"],
stdout=output,
stderr=subprocess.PIPE)
if callgrind_annotate.returncode:
os.unlink('/tmp/callgrind.data')
output.close()
os.unlink('/tmp/callgrind_annotate.out')
sys.exit(callgrind_annotate.stderr.decode("utf-8"))
# Read the callgrind_annotate output to callgrind_data[]
callgrind_data = []
with open('/tmp/callgrind_annotate.out', 'r') as data:
callgrind_data = data.readlines()
# Line number with the total number of instructions
total_instructions_line_number = 20
# Get the total number of instructions
total_instructions_line_data = callgrind_data[total_instructions_line_number]
total_number_of_instructions = total_instructions_line_data.split(' ')[0]
total_number_of_instructions = int(
total_number_of_instructions.replace(',', ''))
# Line number with the top function
first_func_line = 25
# Number of functions recorded by callgrind, last two lines are always empty
number_of_functions = len(callgrind_data) - first_func_line - 2
# Limit the number of top functions to "top"
number_of_top_functions = (top if number_of_functions >
top else number_of_functions)
# Store the data of the top functions in top_functions[]
top_functions = callgrind_data[first_func_line:
first_func_line + number_of_top_functions]
# Print table header
print('{:>4} {:>10} {:<30} {}\n{} {} {} {}'.format('No.',
'Percentage',
'Function Name',
'Source File',
'-' * 4,
'-' * 10,
'-' * 30,
'-' * 30,
))
# Print top N functions
for (index, function) in enumerate(top_functions, start=1):
function_data = function.split()
# Calculate function percentage
function_instructions = float(function_data[0].replace(',', ''))
function_percentage = (function_instructions /
total_number_of_instructions)*100
# Get function name and source files path
function_source_file, function_name = function_data[1].split(':')
# Print extracted data
print('{:>4} {:>9.3f}% {:<30} {}'.format(index,
round(function_percentage, 3),
function_name,
function_source_file))
# Remove intermediate files
os.unlink('/tmp/callgrind.data')
os.unlink('/tmp/callgrind_annotate.out')
|
nvtrust-main
|
infrastructure/kvm/qemu/qemu_source/scripts/performance/topN_callgrind.py
|
#!/usr/bin/env python3
#
# Module information generator
#
# Copyright Red Hat, Inc. 2015 - 2016
#
# Authors:
# Marc Mari <markmb@redhat.com>
#
# This work is licensed under the terms of the GNU GPL, version 2.
# See the COPYING file in the top-level directory.
import sys
import os
def get_string_struct(line):
data = line.split()
# data[0] -> struct element name
# data[1] -> =
# data[2] -> value
return data[2].replace('"', '')[:-1]
def add_module(fheader, library, format_name, protocol_name):
lines = []
lines.append('.library_name = "' + library + '",')
if format_name != "":
lines.append('.format_name = "' + format_name + '",')
if protocol_name != "":
lines.append('.protocol_name = "' + protocol_name + '",')
text = '\n '.join(lines)
fheader.write('\n {\n ' + text + '\n },')
def process_file(fheader, filename):
# This parser assumes the coding style rules are being followed
with open(filename, "r") as cfile:
found_start = False
library, _ = os.path.splitext(os.path.basename(filename))
for line in cfile:
if found_start:
line = line.replace('\n', '')
if line.find(".format_name") != -1:
format_name = get_string_struct(line)
elif line.find(".protocol_name") != -1:
protocol_name = get_string_struct(line)
elif line == "};":
add_module(fheader, library, format_name, protocol_name)
found_start = False
elif line.find("static BlockDriver") != -1:
found_start = True
format_name = ""
protocol_name = ""
def print_top(fheader):
fheader.write('''/* AUTOMATICALLY GENERATED, DO NOT MODIFY */
/*
* QEMU Block Module Infrastructure
*
* Authors:
* Marc Mari <markmb@redhat.com>
*/
''')
fheader.write('''#ifndef QEMU_MODULE_BLOCK_H
#define QEMU_MODULE_BLOCK_H
static const struct {
const char *format_name;
const char *protocol_name;
const char *library_name;
} block_driver_modules[] = {''')
def print_bottom(fheader):
fheader.write('''
};
#endif
''')
if __name__ == '__main__':
# First argument: output file
# All other arguments: modules source files (.c)
output_file = sys.argv[1]
with open(output_file, 'w') as fheader:
print_top(fheader)
for filename in sys.argv[2:]:
if os.path.isfile(filename):
process_file(fheader, filename)
else:
print("File " + filename + " does not exist.", file=sys.stderr)
sys.exit(1)
print_bottom(fheader)
sys.exit(0)
|
nvtrust-main
|
infrastructure/kvm/qemu/qemu_source/scripts/modules/module_block.py
|
#!/usr/bin/env python3
#
# Simple benchmarking framework
#
# Copyright (c) 2019 Virtuozzo International GmbH.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import math
import tabulate
# We want leading whitespace for difference row cells (see below)
tabulate.PRESERVE_WHITESPACE = True
def format_value(x, stdev):
stdev_pr = stdev / x * 100
if stdev_pr < 1.5:
# don't care too much
return f'{x:.2g}'
else:
return f'{x:.2g} ± {math.ceil(stdev_pr)}%'
def result_to_text(result):
"""Return text representation of bench_one() returned dict."""
if 'average' in result:
s = format_value(result['average'], result['stdev'])
if 'n-failed' in result:
s += '\n({} failed)'.format(result['n-failed'])
return s
else:
return 'FAILED'
def results_dimension(results):
dim = None
for case in results['cases']:
for env in results['envs']:
res = results['tab'][case['id']][env['id']]
if dim is None:
dim = res['dimension']
else:
assert dim == res['dimension']
assert dim in ('iops', 'seconds')
return dim
def results_to_text(results):
"""Return text representation of bench() returned dict."""
n_columns = len(results['envs'])
named_columns = n_columns > 2
dim = results_dimension(results)
tab = []
if named_columns:
# Environment columns are named A, B, ...
tab.append([''] + [chr(ord('A') + i) for i in range(n_columns)])
tab.append([''] + [c['id'] for c in results['envs']])
for case in results['cases']:
row = [case['id']]
case_results = results['tab'][case['id']]
for env in results['envs']:
res = case_results[env['id']]
row.append(result_to_text(res))
tab.append(row)
# Add row of difference between columns. For each column starting from
# B we calculate difference with all previous columns.
row = ['', ''] # case name and first column
for i in range(1, n_columns):
cell = ''
env = results['envs'][i]
res = case_results[env['id']]
if 'average' not in res:
# Failed result
row.append(cell)
continue
for j in range(0, i):
env_j = results['envs'][j]
res_j = case_results[env_j['id']]
cell += ' '
if 'average' not in res_j:
# Failed result
cell += '--'
continue
col_j = tab[0][j + 1] if named_columns else ''
diff_pr = round((res['average'] - res_j['average']) /
res_j['average'] * 100)
cell += f' {col_j}{diff_pr:+}%'
row.append(cell)
tab.append(row)
return f'All results are in {dim}\n\n' + tabulate.tabulate(tab)
if __name__ == '__main__':
import sys
import json
if len(sys.argv) < 2:
print(f'USAGE: {sys.argv[0]} results.json')
exit(1)
with open(sys.argv[1]) as f:
print(results_to_text(json.load(f)))
|
nvtrust-main
|
infrastructure/kvm/qemu/qemu_source/scripts/simplebench/results_to_text.py
|
# Parser for test templates
#
# Copyright (c) 2021 Virtuozzo International GmbH.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import itertools
from lark import Lark
grammar = """
start: ( text | column_switch | row_switch )+
column_switch: "{" text ["|" text]+ "}"
row_switch: "[" text ["|" text]+ "]"
text: /[^|{}\[\]]+/
"""
parser = Lark(grammar)
class Templater:
def __init__(self, template):
self.tree = parser.parse(template)
c_switches = []
r_switches = []
for x in self.tree.children:
if x.data == 'column_switch':
c_switches.append([el.children[0].value for el in x.children])
elif x.data == 'row_switch':
r_switches.append([el.children[0].value for el in x.children])
self.columns = list(itertools.product(*c_switches))
self.rows = list(itertools.product(*r_switches))
def gen(self, column, row):
i = 0
j = 0
result = []
for x in self.tree.children:
if x.data == 'text':
result.append(x.children[0].value)
elif x.data == 'column_switch':
result.append(column[i])
i += 1
elif x.data == 'row_switch':
result.append(row[j])
j += 1
return ''.join(result)
|
nvtrust-main
|
infrastructure/kvm/qemu/qemu_source/scripts/simplebench/table_templater.py
|
#!/usr/bin/env python3
#
# Benchmark preallocate filter
#
# Copyright (c) 2020 Virtuozzo International GmbH.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import sys
import os
import subprocess
import re
import json
import simplebench
from results_to_text import results_to_text
def qemu_img_bench(args):
p = subprocess.run(args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
universal_newlines=True)
if p.returncode == 0:
try:
m = re.search(r'Run completed in (\d+.\d+) seconds.', p.stdout)
return {'seconds': float(m.group(1))}
except Exception:
return {'error': f'failed to parse qemu-img output: {p.stdout}'}
else:
return {'error': f'qemu-img failed: {p.returncode}: {p.stdout}'}
def bench_func(env, case):
fname = f"{case['dir']}/prealloc-test.qcow2"
try:
os.remove(fname)
except OSError:
pass
subprocess.run([env['qemu-img-binary'], 'create', '-f', 'qcow2', fname,
'16G'], stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL, check=True)
args = [env['qemu-img-binary'], 'bench', '-c', str(case['count']),
'-d', '64', '-s', case['block-size'], '-t', 'none', '-n', '-w']
if env['prealloc']:
args += ['--image-opts',
'driver=qcow2,file.driver=preallocate,file.file.driver=file,'
f'file.file.filename={fname}']
else:
args += ['-f', 'qcow2', fname]
return qemu_img_bench(args)
def auto_count_bench_func(env, case):
case['count'] = 100
while True:
res = bench_func(env, case)
if 'error' in res:
return res
if res['seconds'] >= 1:
break
case['count'] *= 10
if res['seconds'] < 5:
case['count'] = round(case['count'] * 5 / res['seconds'])
res = bench_func(env, case)
if 'error' in res:
return res
res['iops'] = case['count'] / res['seconds']
return res
if __name__ == '__main__':
if len(sys.argv) < 2:
print(f'USAGE: {sys.argv[0]} <qemu-img binary> '
'DISK_NAME:DIR_PATH ...')
exit(1)
qemu_img = sys.argv[1]
envs = [
{
'id': 'no-prealloc',
'qemu-img-binary': qemu_img,
'prealloc': False
},
{
'id': 'prealloc',
'qemu-img-binary': qemu_img,
'prealloc': True
}
]
aligned_cases = []
unaligned_cases = []
for disk in sys.argv[2:]:
name, path = disk.split(':')
aligned_cases.append({
'id': f'{name}, aligned sequential 16k',
'block-size': '16k',
'dir': path
})
unaligned_cases.append({
'id': f'{name}, unaligned sequential 64k',
'block-size': '16k',
'dir': path
})
result = simplebench.bench(auto_count_bench_func, envs,
aligned_cases + unaligned_cases, count=5)
print(results_to_text(result))
with open('results.json', 'w') as f:
json.dump(result, f, indent=4)
|
nvtrust-main
|
infrastructure/kvm/qemu/qemu_source/scripts/simplebench/bench_prealloc.py
|
#!/usr/bin/env python3
#
# Benchmark block jobs
#
# Copyright (c) 2019 Virtuozzo International GmbH.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import sys
import os
import subprocess
import socket
import json
sys.path.append(os.path.join(os.path.dirname(__file__), '..', '..', 'python'))
from qemu.machine import QEMUMachine
from qemu.qmp import QMPConnectError
def bench_block_job(cmd, cmd_args, qemu_args):
"""Benchmark block-job
cmd -- qmp command to run block-job (like blockdev-backup)
cmd_args -- dict of qmp command arguments
qemu_args -- list of Qemu command line arguments, including path to Qemu
binary
Returns {'seconds': int} on success and {'error': str} on failure, dict may
contain addional 'vm-log' field. Return value is compatible with
simplebench lib.
"""
vm = QEMUMachine(qemu_args[0], args=qemu_args[1:])
try:
vm.launch()
except OSError as e:
return {'error': 'popen failed: ' + str(e)}
except (QMPConnectError, socket.timeout):
return {'error': 'qemu failed: ' + str(vm.get_log())}
try:
res = vm.qmp(cmd, **cmd_args)
if res != {'return': {}}:
vm.shutdown()
return {'error': '"{}" command failed: {}'.format(cmd, str(res))}
e = vm.event_wait('JOB_STATUS_CHANGE')
assert e['data']['status'] == 'created'
start_ms = e['timestamp']['seconds'] * 1000000 + \
e['timestamp']['microseconds']
e = vm.events_wait((('BLOCK_JOB_READY', None),
('BLOCK_JOB_COMPLETED', None),
('BLOCK_JOB_FAILED', None)), timeout=True)
if e['event'] not in ('BLOCK_JOB_READY', 'BLOCK_JOB_COMPLETED'):
vm.shutdown()
return {'error': 'block-job failed: ' + str(e),
'vm-log': vm.get_log()}
if 'error' in e['data']:
vm.shutdown()
return {'error': 'block-job failed: ' + e['data']['error'],
'vm-log': vm.get_log()}
end_ms = e['timestamp']['seconds'] * 1000000 + \
e['timestamp']['microseconds']
finally:
vm.shutdown()
return {'seconds': (end_ms - start_ms) / 1000000.0}
def get_image_size(path):
out = subprocess.run(['qemu-img', 'info', '--out=json', path],
stdout=subprocess.PIPE, check=True).stdout
return json.loads(out)['virtual-size']
def get_blockdev_size(obj):
img = obj['filename'] if 'filename' in obj else obj['file']['filename']
return get_image_size(img)
# Bench backup or mirror
def bench_block_copy(qemu_binary, cmd, cmd_options, source, target):
"""Helper to run bench_block_job() for mirror or backup"""
assert cmd in ('blockdev-backup', 'blockdev-mirror')
if target['driver'] == 'qcow2':
try:
os.remove(target['file']['filename'])
except OSError:
pass
subprocess.run(['qemu-img', 'create', '-f', 'qcow2',
target['file']['filename'],
str(get_blockdev_size(source))],
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL, check=True)
source['node-name'] = 'source'
target['node-name'] = 'target'
cmd_options['job-id'] = 'job0'
cmd_options['device'] = 'source'
cmd_options['target'] = 'target'
cmd_options['sync'] = 'full'
return bench_block_job(cmd, cmd_options,
[qemu_binary,
'-blockdev', json.dumps(source),
'-blockdev', json.dumps(target)])
def drv_file(filename, o_direct=True):
node = {'driver': 'file', 'filename': filename}
if o_direct:
node['cache'] = {'direct': True}
node['aio'] = 'native'
return node
def drv_nbd(host, port):
return {'driver': 'nbd',
'server': {'type': 'inet', 'host': host, 'port': port}}
def drv_qcow2(file):
return {'driver': 'qcow2', 'file': file}
if __name__ == '__main__':
import sys
if len(sys.argv) < 4:
print('USAGE: {} <qmp block-job command name> '
'<json string of arguments for the command> '
'<qemu binary path and arguments>'.format(sys.argv[0]))
exit(1)
res = bench_block_job(sys.argv[1], json.loads(sys.argv[2]), sys.argv[3:])
if 'seconds' in res:
print('{:.2f}'.format(res['seconds']))
else:
print(res)
|
nvtrust-main
|
infrastructure/kvm/qemu/qemu_source/scripts/simplebench/bench_block_job.py
|
#!/usr/bin/env python3
#
# Process img-bench test templates
#
# Copyright (c) 2021 Virtuozzo International GmbH.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import sys
import subprocess
import re
import json
import simplebench
from results_to_text import results_to_text
from table_templater import Templater
def bench_func(env, case):
test = templater.gen(env['data'], case['data'])
p = subprocess.run(test, shell=True, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT, universal_newlines=True)
if p.returncode == 0:
try:
m = re.search(r'Run completed in (\d+.\d+) seconds.', p.stdout)
return {'seconds': float(m.group(1))}
except Exception:
return {'error': f'failed to parse qemu-img output: {p.stdout}'}
else:
return {'error': f'qemu-img failed: {p.returncode}: {p.stdout}'}
if __name__ == '__main__':
if len(sys.argv) > 1:
print("""
Usage: img_bench_templater.py < path/to/test-template.sh
This script generates performance tests from a test template (example below),
runs them, and displays the results in a table. The template is read from
stdin. It must be written in bash and end with a `qemu-img bench` invocation
(whose result is parsed to get the test instance’s result).
Use the following syntax in the template to create the various different test
instances:
column templating: {var1|var2|...} - test will use different values in
different columns. You may use several {} constructions in the test, in this
case product of all choice-sets will be used.
row templating: [var1|var2|...] - similar thing to define rows (test-cases)
Test template example:
Assume you want to compare two qemu-img binaries, called qemu-img-old and
qemu-img-new in your build directory in two test-cases with 4K writes and 64K
writes. The template may look like this:
qemu_img=/path/to/qemu/build/qemu-img-{old|new}
$qemu_img create -f qcow2 /ssd/x.qcow2 1G
$qemu_img bench -c 100 -d 8 [-s 4K|-s 64K] -w -t none -n /ssd/x.qcow2
When passing this to stdin of img_bench_templater.py, the resulting comparison
table will contain two columns (for two binaries) and two rows (for two
test-cases).
In addition to displaying the results, script also stores results in JSON
format into results.json file in current directory.
""")
sys.exit()
templater = Templater(sys.stdin.read())
envs = [{'id': ' / '.join(x), 'data': x} for x in templater.columns]
cases = [{'id': ' / '.join(x), 'data': x} for x in templater.rows]
result = simplebench.bench(bench_func, envs, cases, count=5,
initial_run=False)
print(results_to_text(result))
with open('results.json', 'w') as f:
json.dump(result, f, indent=4)
|
nvtrust-main
|
infrastructure/kvm/qemu/qemu_source/scripts/simplebench/img_bench_templater.py
|
#!/usr/bin/env python
#
# Simple benchmarking framework
#
# Copyright (c) 2019 Virtuozzo International GmbH.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import statistics
import subprocess
import time
def do_drop_caches():
subprocess.run('sync; echo 3 > /proc/sys/vm/drop_caches', shell=True,
check=True)
def bench_one(test_func, test_env, test_case, count=5, initial_run=True,
slow_limit=100, drop_caches=False):
"""Benchmark one test-case
test_func -- benchmarking function with prototype
test_func(env, case), which takes test_env and test_case
arguments and on success returns dict with 'seconds' or
'iops' (or both) fields, specifying the benchmark result.
If both 'iops' and 'seconds' provided, the 'iops' is
considered the main, and 'seconds' is just an additional
info. On failure test_func should return {'error': str}.
Returned dict may contain any other additional fields.
test_env -- test environment - opaque first argument for test_func
test_case -- test case - opaque second argument for test_func
count -- how many times to call test_func, to calculate average
initial_run -- do initial run of test_func, which don't get into result
slow_limit -- stop at slow run (that exceedes the slow_limit by seconds).
(initial run is not measured)
drop_caches -- drop caches before each run
Returns dict with the following fields:
'runs': list of test_func results
'dimension': dimension of results, may be 'seconds' or 'iops'
'average': average value (iops or seconds) per run (exists only if at
least one run succeeded)
'stdev': standard deviation of results
(exists only if at least one run succeeded)
'n-failed': number of failed runs (exists only if at least one run
failed)
"""
if initial_run:
print(' #initial run:')
do_drop_caches()
print(' ', test_func(test_env, test_case))
runs = []
for i in range(count):
t = time.time()
print(' #run {}'.format(i+1))
do_drop_caches()
res = test_func(test_env, test_case)
print(' ', res)
runs.append(res)
if time.time() - t > slow_limit:
print(' - run is too slow, stop here')
break
count = len(runs)
result = {'runs': runs}
succeeded = [r for r in runs if ('seconds' in r or 'iops' in r)]
if succeeded:
if 'iops' in succeeded[0]:
assert all('iops' in r for r in succeeded)
dim = 'iops'
else:
assert all('seconds' in r for r in succeeded)
assert all('iops' not in r for r in succeeded)
dim = 'seconds'
result['dimension'] = dim
result['average'] = statistics.mean(r[dim] for r in succeeded)
if len(succeeded) == 1:
result['stdev'] = 0
else:
result['stdev'] = statistics.stdev(r[dim] for r in succeeded)
if len(succeeded) < count:
result['n-failed'] = count - len(succeeded)
return result
def bench(test_func, test_envs, test_cases, *args, **vargs):
"""Fill benchmark table
test_func -- benchmarking function, see bench_one for description
test_envs -- list of test environments, see bench_one
test_cases -- list of test cases, see bench_one
args, vargs -- additional arguments for bench_one
Returns dict with the following fields:
'envs': test_envs
'cases': test_cases
'tab': filled 2D array, where cell [i][j] is bench_one result for
test_cases[i] for test_envs[j] (i.e., rows are test cases and
columns are test environments)
"""
tab = {}
results = {
'envs': test_envs,
'cases': test_cases,
'tab': tab
}
n = 1
n_tests = len(test_envs) * len(test_cases)
for env in test_envs:
for case in test_cases:
print('Testing {}/{}: {} :: {}'.format(n, n_tests,
env['id'], case['id']))
if case['id'] not in tab:
tab[case['id']] = {}
tab[case['id']][env['id']] = bench_one(test_func, env, case,
*args, **vargs)
n += 1
print('Done')
return results
|
nvtrust-main
|
infrastructure/kvm/qemu/qemu_source/scripts/simplebench/simplebench.py
|
#!/usr/bin/env python3
#
# Test to compare performance of write requests for two qemu-img binary files.
#
# The idea of the test comes from intention to check the benefit of c8bb23cbdbe
# "qcow2: skip writing zero buffers to empty COW areas".
#
# Copyright (c) 2020 Virtuozzo International GmbH.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import sys
import os
import subprocess
import simplebench
from results_to_text import results_to_text
def bench_func(env, case):
""" Handle one "cell" of benchmarking table. """
return bench_write_req(env['qemu_img'], env['image_name'],
case['block_size'], case['block_offset'],
case['cluster_size'])
def qemu_img_pipe(*args):
'''Run qemu-img and return its output'''
subp = subprocess.Popen(list(args),
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
universal_newlines=True)
exitcode = subp.wait()
if exitcode < 0:
sys.stderr.write('qemu-img received signal %i: %s\n'
% (-exitcode, ' '.join(list(args))))
return subp.communicate()[0]
def bench_write_req(qemu_img, image_name, block_size, block_offset,
cluster_size):
"""Benchmark write requests
The function creates a QCOW2 image with the given path/name. Then it runs
the 'qemu-img bench' command and makes series of write requests on the
image clusters. Finally, it returns the total time of the write operations
on the disk.
qemu_img -- path to qemu_img executable file
image_name -- QCOW2 image name to create
block_size -- size of a block to write to clusters
block_offset -- offset of the block in clusters
cluster_size -- size of the image cluster
Returns {'seconds': int} on success and {'error': str} on failure.
Return value is compatible with simplebench lib.
"""
if not os.path.isfile(qemu_img):
print(f'File not found: {qemu_img}')
sys.exit(1)
image_dir = os.path.dirname(os.path.abspath(image_name))
if not os.path.isdir(image_dir):
print(f'Path not found: {image_name}')
sys.exit(1)
image_size = 1024 * 1024 * 1024
args_create = [qemu_img, 'create', '-f', 'qcow2', '-o',
f'cluster_size={cluster_size}',
image_name, str(image_size)]
count = int(image_size / cluster_size) - 1
step = str(cluster_size)
args_bench = [qemu_img, 'bench', '-w', '-n', '-t', 'none', '-c',
str(count), '-s', f'{block_size}', '-o', str(block_offset),
'-S', step, '-f', 'qcow2', image_name]
try:
qemu_img_pipe(*args_create)
except OSError as e:
os.remove(image_name)
return {'error': 'qemu_img create failed: ' + str(e)}
try:
ret = qemu_img_pipe(*args_bench)
except OSError as e:
os.remove(image_name)
return {'error': 'qemu_img bench failed: ' + str(e)}
os.remove(image_name)
if 'seconds' in ret:
ret_list = ret.split()
index = ret_list.index('seconds.')
return {'seconds': float(ret_list[index-1])}
else:
return {'error': 'qemu_img bench failed: ' + ret}
if __name__ == '__main__':
if len(sys.argv) < 4:
program = os.path.basename(sys.argv[0])
print(f'USAGE: {program} <path to qemu-img binary file> '
'<path to another qemu-img to compare performance with> '
'<full or relative name for QCOW2 image to create>')
exit(1)
# Test-cases are "rows" in benchmark resulting table, 'id' is a caption
# for the row, other fields are handled by bench_func.
test_cases = [
{
'id': '<cluster front>',
'block_size': 4096,
'block_offset': 0,
'cluster_size': 1048576
},
{
'id': '<cluster middle>',
'block_size': 4096,
'block_offset': 524288,
'cluster_size': 1048576
},
{
'id': '<cross cluster>',
'block_size': 1048576,
'block_offset': 4096,
'cluster_size': 1048576
},
{
'id': '<cluster 64K>',
'block_size': 4096,
'block_offset': 0,
'cluster_size': 65536
},
]
# Test-envs are "columns" in benchmark resulting table, 'id is a caption
# for the column, other fields are handled by bench_func.
# Set the paths below to desired values
test_envs = [
{
'id': '<qemu-img binary 1>',
'qemu_img': f'{sys.argv[1]}',
'image_name': f'{sys.argv[3]}'
},
{
'id': '<qemu-img binary 2>',
'qemu_img': f'{sys.argv[2]}',
'image_name': f'{sys.argv[3]}'
},
]
result = simplebench.bench(bench_func, test_envs, test_cases, count=3,
initial_run=False)
print(results_to_text(result))
|
nvtrust-main
|
infrastructure/kvm/qemu/qemu_source/scripts/simplebench/bench_write_req.py
|
#!/usr/bin/env python3
#
# Benchmark example
#
# Copyright (c) 2019 Virtuozzo International GmbH.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import simplebench
from results_to_text import results_to_text
from bench_block_job import bench_block_copy, drv_file, drv_nbd
def bench_func(env, case):
""" Handle one "cell" of benchmarking table. """
return bench_block_copy(env['qemu_binary'], env['cmd'], {}
case['source'], case['target'])
# You may set the following five variables to correct values, to turn this
# example to real benchmark.
ssd_source = '/path-to-raw-source-image-at-ssd'
ssd_target = '/path-to-raw-target-image-at-ssd'
hdd_target = '/path-to-raw-source-image-at-hdd'
nbd_ip = 'nbd-ip-addr'
nbd_port = 'nbd-port-number'
# Test-cases are "rows" in benchmark resulting table, 'id' is a caption for
# the row, other fields are handled by bench_func.
test_cases = [
{
'id': 'ssd -> ssd',
'source': drv_file(ssd_source),
'target': drv_file(ssd_target)
},
{
'id': 'ssd -> hdd',
'source': drv_file(ssd_source),
'target': drv_file(hdd_target)
},
{
'id': 'ssd -> nbd',
'source': drv_file(ssd_source),
'target': drv_nbd(nbd_ip, nbd_port)
},
]
# Test-envs are "columns" in benchmark resulting table, 'id is a caption for
# the column, other fields are handled by bench_func.
test_envs = [
{
'id': 'backup-1',
'cmd': 'blockdev-backup',
'qemu_binary': '/path-to-qemu-binary-1'
},
{
'id': 'backup-2',
'cmd': 'blockdev-backup',
'qemu_binary': '/path-to-qemu-binary-2'
},
{
'id': 'mirror',
'cmd': 'blockdev-mirror',
'qemu_binary': '/path-to-qemu-binary-1'
}
]
result = simplebench.bench(bench_func, test_envs, test_cases, count=3)
print(results_to_text(result))
|
nvtrust-main
|
infrastructure/kvm/qemu/qemu_source/scripts/simplebench/bench-example.py
|
#!/usr/bin/env python3
#
# Bench backup block-job
#
# Copyright (c) 2020 Virtuozzo International GmbH.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import argparse
import json
import simplebench
from results_to_text import results_to_text
from bench_block_job import bench_block_copy, drv_file, drv_nbd, drv_qcow2
def bench_func(env, case):
""" Handle one "cell" of benchmarking table. """
cmd_options = env['cmd-options'] if 'cmd-options' in env else {}
return bench_block_copy(env['qemu-binary'], env['cmd'],
cmd_options,
case['source'], case['target'])
def bench(args):
test_cases = []
# paths with colon not supported, so we just split by ':'
dirs = dict(d.split(':') for d in args.dir)
nbd_drv = None
if args.nbd:
nbd = args.nbd.split(':')
host = nbd[0]
port = '10809' if len(nbd) == 1 else nbd[1]
nbd_drv = drv_nbd(host, port)
for t in args.test:
src, dst = t.split(':')
if src == 'nbd' and dst == 'nbd':
raise ValueError("Can't use 'nbd' label for both src and dst")
if (src == 'nbd' or dst == 'nbd') and not nbd_drv:
raise ValueError("'nbd' label used but --nbd is not given")
if src == 'nbd':
source = nbd_drv
elif args.qcow2_sources:
source = drv_qcow2(drv_file(dirs[src] + '/test-source.qcow2'))
else:
source = drv_file(dirs[src] + '/test-source')
if dst == 'nbd':
test_cases.append({'id': t, 'source': source, 'target': nbd_drv})
continue
if args.target_cache == 'both':
target_caches = ['direct', 'cached']
else:
target_caches = [args.target_cache]
for c in target_caches:
o_direct = c == 'direct'
fname = dirs[dst] + '/test-target'
if args.compressed:
fname += '.qcow2'
target = drv_file(fname, o_direct=o_direct)
if args.compressed:
target = drv_qcow2(target)
test_id = t
if args.target_cache == 'both':
test_id += f'({c})'
test_cases.append({'id': test_id, 'source': source,
'target': target})
binaries = [] # list of (<label>, <path>, [<options>])
for i, q in enumerate(args.env):
name_path = q.split(':')
if len(name_path) == 1:
label = f'q{i}'
path_opts = name_path[0].split(',')
else:
assert len(name_path) == 2 # paths with colon not supported
label = name_path[0]
path_opts = name_path[1].split(',')
binaries.append((label, path_opts[0], path_opts[1:]))
test_envs = []
bin_paths = {}
for i, q in enumerate(args.env):
opts = q.split(',')
label_path = opts[0]
opts = opts[1:]
if ':' in label_path:
# path with colon inside is not supported
label, path = label_path.split(':')
bin_paths[label] = path
elif label_path in bin_paths:
label = label_path
path = bin_paths[label]
else:
path = label_path
label = f'q{i}'
bin_paths[label] = path
x_perf = {}
is_mirror = False
for opt in opts:
if opt == 'mirror':
is_mirror = True
elif opt == 'copy-range=on':
x_perf['use-copy-range'] = True
elif opt == 'copy-range=off':
x_perf['use-copy-range'] = False
elif opt.startswith('max-workers='):
x_perf['max-workers'] = int(opt.split('=')[1])
backup_options = {}
if x_perf:
backup_options['x-perf'] = x_perf
if args.compressed:
backup_options['compress'] = True
if is_mirror:
assert not x_perf
test_envs.append({
'id': f'mirror({label})',
'cmd': 'blockdev-mirror',
'qemu-binary': path
})
else:
test_envs.append({
'id': f'backup({label})\n' + '\n'.join(opts),
'cmd': 'blockdev-backup',
'cmd-options': backup_options,
'qemu-binary': path
})
result = simplebench.bench(bench_func, test_envs, test_cases,
count=args.count, initial_run=args.initial_run,
drop_caches=args.drop_caches)
with open('results.json', 'w') as f:
json.dump(result, f, indent=4)
print(results_to_text(result))
class ExtendAction(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
items = getattr(namespace, self.dest) or []
items.extend(values)
setattr(namespace, self.dest, items)
if __name__ == '__main__':
p = argparse.ArgumentParser('Backup benchmark', epilog='''
ENV format
(LABEL:PATH|LABEL|PATH)[,max-workers=N][,use-copy-range=(on|off)][,mirror]
LABEL short name for the binary
PATH path to the binary
max-workers set x-perf.max-workers of backup job
use-copy-range set x-perf.use-copy-range of backup job
mirror use mirror job instead of backup''',
formatter_class=argparse.RawTextHelpFormatter)
p.add_argument('--env', nargs='+', help='''\
Qemu binaries with labels and options, see below
"ENV format" section''',
action=ExtendAction)
p.add_argument('--dir', nargs='+', help='''\
Directories, each containing "test-source" and/or
"test-target" files, raw images to used in
benchmarking. File path with label, like
label:/path/to/directory''',
action=ExtendAction)
p.add_argument('--nbd', help='''\
host:port for remote NBD image, (or just host, for
default port 10809). Use it in tests, label is "nbd"
(but you cannot create test nbd:nbd).''')
p.add_argument('--test', nargs='+', help='''\
Tests, in form source-dir-label:target-dir-label''',
action=ExtendAction)
p.add_argument('--compressed', help='''\
Use compressed backup. It automatically means
automatically creating qcow2 target with
lazy_refcounts for each test run''', action='store_true')
p.add_argument('--qcow2-sources', help='''\
Use test-source.qcow2 images as sources instead of
test-source raw images''', action='store_true')
p.add_argument('--target-cache', help='''\
Setup cache for target nodes. Options:
direct: default, use O_DIRECT and aio=native
cached: use system cache (Qemu default) and aio=threads (Qemu default)
both: generate two test cases for each src:dst pair''',
default='direct', choices=('direct', 'cached', 'both'))
p.add_argument('--count', type=int, default=3, help='''\
Number of test runs per table cell''')
# BooleanOptionalAction helps to support --no-initial-run option
p.add_argument('--initial-run', action=argparse.BooleanOptionalAction,
help='''\
Do additional initial run per cell which doesn't count in result,
default true''')
p.add_argument('--drop-caches', action='store_true', help='''\
Do "sync; echo 3 > /proc/sys/vm/drop_caches" before each test run''')
bench(p.parse_args())
|
nvtrust-main
|
infrastructure/kvm/qemu/qemu_source/scripts/simplebench/bench-backup.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Convert plain qtest traces to C or Bash reproducers
Use this to help build bug-reports or create in-tree reproducers for bugs.
Note: This will not format C code for you. Pipe the output through
clang-format -style="{BasedOnStyle: llvm, IndentWidth: 4, ColumnLimit: 90}"
or similar
"""
import sys
import os
import argparse
import textwrap
from datetime import date
__author__ = "Alexander Bulekov <alxndr@bu.edu>"
__copyright__ = "Copyright (C) 2021, Red Hat, Inc."
__license__ = "GPL version 2 or (at your option) any later version"
__maintainer__ = "Alexander Bulekov"
__email__ = "alxndr@bu.edu"
def c_header(owner):
return """/*
* Autogenerated Fuzzer Test Case
*
* Copyright (c) {date} {owner}
*
* This work is licensed under the terms of the GNU GPL, version 2 or later.
* See the COPYING file in the top-level directory.
*/
#include "qemu/osdep.h"
#include "libqos/libqtest.h"
""".format(date=date.today().year, owner=owner)
def c_comment(s):
""" Return a multi-line C comment. Assume the text is already wrapped """
return "/*\n * " + "\n * ".join(s.splitlines()) + "\n*/"
def print_c_function(s):
print("/* ")
for l in s.splitlines():
print(" * {}".format(l))
def bash_reproducer(path, args, trace):
result = '\\\n'.join(textwrap.wrap("cat << EOF | {} {}".format(path, args),
72, break_on_hyphens=False,
drop_whitespace=False))
for l in trace.splitlines():
result += "\n" + '\\\n'.join(textwrap.wrap(l,72,drop_whitespace=False))
result += "\nEOF"
return result
def c_reproducer(name, args, trace):
result = []
result.append("""static void {}(void)\n{{""".format(name))
# libqtest will add its own qtest args, so get rid of them
args = args.replace("-accel qtest","")
args = args.replace(",accel=qtest","")
args = args.replace("-machine accel=qtest","")
args = args.replace("-qtest stdio","")
result.append("""QTestState *s = qtest_init("{}");""".format(args))
for l in trace.splitlines():
param = l.split()
cmd = param[0]
if cmd == "write":
buf = param[3][2:] #Get the 0x... buffer and trim the "0x"
assert len(buf)%2 == 0
bufbytes = [buf[i:i+2] for i in range(0, len(buf), 2)]
bufstring = '\\x'+'\\x'.join(bufbytes)
addr = param[1]
size = param[2]
result.append("""qtest_bufwrite(s, {}, "{}", {});""".format(
addr, bufstring, size))
elif cmd.startswith("in") or cmd.startswith("read"):
result.append("qtest_{}(s, {});".format(
cmd, param[1]))
elif cmd.startswith("out") or cmd.startswith("write"):
result.append("qtest_{}(s, {}, {});".format(
cmd, param[1], param[2]))
elif cmd == "clock_step":
if len(param) ==1:
result.append("qtest_clock_step_next(s);")
else:
result.append("qtest_clock_step(s, {});".format(param[1]))
result.append("qtest_quit(s);\n}")
return "\n".join(result)
def c_main(name, arch):
return """int main(int argc, char **argv)
{{
const char *arch = qtest_get_arch();
g_test_init(&argc, &argv, NULL);
if (strcmp(arch, "{arch}") == 0) {{
qtest_add_func("fuzz/{name}",{name});
}}
return g_test_run();
}}""".format(name=name, arch=arch)
def main():
parser = argparse.ArgumentParser()
group = parser.add_mutually_exclusive_group()
group.add_argument("-bash", help="Only output a copy-pastable bash command",
action="store_true")
group.add_argument("-c", help="Only output a c function",
action="store_true")
parser.add_argument('-owner', help="If generating complete C source code, \
this specifies the Copyright owner",
nargs='?', default="<name of author>")
parser.add_argument("-no_comment", help="Don't include a bash reproducer \
as a comment in the C reproducers",
action="store_true")
parser.add_argument('-name', help="The name of the c function",
nargs='?', default="test_fuzz")
parser.add_argument('input_trace', help="input QTest command sequence \
(stdin by default)",
nargs='?', type=argparse.FileType('r'),
default=sys.stdin)
args = parser.parse_args()
qemu_path = os.getenv("QEMU_PATH")
qemu_args = os.getenv("QEMU_ARGS")
if not qemu_args or not qemu_path:
print("Please set QEMU_PATH and QEMU_ARGS environment variables")
sys.exit(1)
bash_args = qemu_args
if " -qtest stdio" not in qemu_args:
bash_args += " -qtest stdio"
arch = qemu_path.split("-")[-1]
trace = args.input_trace.read().strip()
if args.bash :
print(bash_reproducer(qemu_path, bash_args, trace))
else:
output = ""
if not args.c:
output += c_header(args.owner) + "\n"
if not args.no_comment:
output += c_comment(bash_reproducer(qemu_path, bash_args, trace))
output += c_reproducer(args.name, qemu_args, trace)
if not args.c:
output += c_main(args.name, arch)
print(output)
if __name__ == '__main__':
main()
|
nvtrust-main
|
infrastructure/kvm/qemu/qemu_source/scripts/oss-fuzz/output_reproducer.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
This takes a crashing qtest trace and tries to remove superflous operations
"""
import sys
import os
import subprocess
import time
import struct
QEMU_ARGS = None
QEMU_PATH = None
TIMEOUT = 5
CRASH_TOKEN = None
# Minimization levels
M1 = False # try removing IO commands iteratively
M2 = False # try setting bits in operand of write/out to zero
write_suffix_lookup = {"b": (1, "B"),
"w": (2, "H"),
"l": (4, "L"),
"q": (8, "Q")}
def usage():
sys.exit("""\
Usage:
QEMU_PATH="/path/to/qemu" QEMU_ARGS="args" {} [Options] input_trace output_trace
By default, will try to use the second-to-last line in the output to identify
whether the crash occred. Optionally, manually set a string that idenitifes the
crash by setting CRASH_TOKEN=
Options:
-M1: enable a loop around the remove minimizer, which may help decrease some
timing dependant instructions. Off by default.
-M2: try setting bits in operand of write/out to zero. Off by default.
""".format((sys.argv[0])))
deduplication_note = """\n\
Note: While trimming the input, sometimes the mutated trace triggers a different
type crash but indicates the same bug. Under this situation, our minimizer is
incapable of recognizing and stopped from removing it. In the future, we may
use a more sophisticated crash case deduplication method.
\n"""
def check_if_trace_crashes(trace, path):
with open(path, "w") as tracefile:
tracefile.write("".join(trace))
rc = subprocess.Popen("timeout -s 9 {timeout}s {qemu_path} {qemu_args} 2>&1\
< {trace_path}".format(timeout=TIMEOUT,
qemu_path=QEMU_PATH,
qemu_args=QEMU_ARGS,
trace_path=path),
shell=True,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
encoding="utf-8")
global CRASH_TOKEN
if CRASH_TOKEN is None:
try:
outs, _ = rc.communicate(timeout=5)
CRASH_TOKEN = " ".join(outs.splitlines()[-2].split()[0:3])
except subprocess.TimeoutExpired:
print("subprocess.TimeoutExpired")
return False
print("Identifying Crashes by this string: {}".format(CRASH_TOKEN))
global deduplication_note
print(deduplication_note)
return True
for line in iter(rc.stdout.readline, ""):
if "CLOSED" in line:
return False
if CRASH_TOKEN in line:
return True
print("\nWarning:")
print(" There is no 'CLOSED'or CRASH_TOKEN in the stdout of subprocess.")
print(" Usually this indicates a different type of crash.\n")
return False
# If previous write commands write the same length of data at the same
# interval, we view it as a hint.
def split_write_hint(newtrace, i):
HINT_LEN = 3 # > 2
if i <=(HINT_LEN-1):
return None
#find previous continuous write traces
k = 0
l = i-1
writes = []
while (k != HINT_LEN and l >= 0):
if newtrace[l].startswith("write "):
writes.append(newtrace[l])
k += 1
l -= 1
elif newtrace[l] == "":
l -= 1
else:
return None
if k != HINT_LEN:
return None
length = int(writes[0].split()[2], 16)
for j in range(1, HINT_LEN):
if length != int(writes[j].split()[2], 16):
return None
step = int(writes[0].split()[1], 16) - int(writes[1].split()[1], 16)
for j in range(1, HINT_LEN-1):
if step != int(writes[j].split()[1], 16) - \
int(writes[j+1].split()[1], 16):
return None
return (int(writes[0].split()[1], 16)+step, length)
def remove_lines(newtrace, outpath):
remove_step = 1
i = 0
while i < len(newtrace):
# 1.) Try to remove lines completely and reproduce the crash.
# If it works, we're done.
if (i+remove_step) >= len(newtrace):
remove_step = 1
prior = newtrace[i:i+remove_step]
for j in range(i, i+remove_step):
newtrace[j] = ""
print("Removing {lines} ...\n".format(lines=prior))
if check_if_trace_crashes(newtrace, outpath):
i += remove_step
# Double the number of lines to remove for next round
remove_step *= 2
continue
# Failed to remove multiple IOs, fast recovery
if remove_step > 1:
for j in range(i, i+remove_step):
newtrace[j] = prior[j-i]
remove_step = 1
continue
newtrace[i] = prior[0] # remove_step = 1
# 2.) Try to replace write{bwlq} commands with a write addr, len
# command. Since this can require swapping endianness, try both LE and
# BE options. We do this, so we can "trim" the writes in (3)
if (newtrace[i].startswith("write") and not
newtrace[i].startswith("write ")):
suffix = newtrace[i].split()[0][-1]
assert(suffix in write_suffix_lookup)
addr = int(newtrace[i].split()[1], 16)
value = int(newtrace[i].split()[2], 16)
for endianness in ['<', '>']:
data = struct.pack("{end}{size}".format(end=endianness,
size=write_suffix_lookup[suffix][1]),
value)
newtrace[i] = "write {addr} {size} 0x{data}\n".format(
addr=hex(addr),
size=hex(write_suffix_lookup[suffix][0]),
data=data.hex())
if(check_if_trace_crashes(newtrace, outpath)):
break
else:
newtrace[i] = prior[0]
# 3.) If it is a qtest write command: write addr len data, try to split
# it into two separate write commands. If splitting the data operand
# from length/2^n bytes to the left does not work, try to move the pivot
# to the right side, then add one to n, until length/2^n == 0. The idea
# is to prune unneccessary bytes from long writes, while accommodating
# arbitrary MemoryRegion access sizes and alignments.
# This algorithm will fail under some rare situations.
# e.g., xxxxxxxxxuxxxxxx (u is the unnecessary byte)
if newtrace[i].startswith("write "):
addr = int(newtrace[i].split()[1], 16)
length = int(newtrace[i].split()[2], 16)
data = newtrace[i].split()[3][2:]
if length > 1:
# Can we get a hint from previous writes?
hint = split_write_hint(newtrace, i)
if hint is not None:
hint_addr = hint[0]
hint_len = hint[1]
if hint_addr >= addr and hint_addr+hint_len <= addr+length:
newtrace[i] = "write {addr} {size} 0x{data}\n".format(
addr=hex(hint_addr),
size=hex(hint_len),
data=data[(hint_addr-addr)*2:\
(hint_addr-addr)*2+hint_len*2])
if check_if_trace_crashes(newtrace, outpath):
# next round
i += 1
continue
newtrace[i] = prior[0]
# Try splitting it using a binary approach
leftlength = int(length/2)
rightlength = length - leftlength
newtrace.insert(i+1, "")
power = 1
while leftlength > 0:
newtrace[i] = "write {addr} {size} 0x{data}\n".format(
addr=hex(addr),
size=hex(leftlength),
data=data[:leftlength*2])
newtrace[i+1] = "write {addr} {size} 0x{data}\n".format(
addr=hex(addr+leftlength),
size=hex(rightlength),
data=data[leftlength*2:])
if check_if_trace_crashes(newtrace, outpath):
break
# move the pivot to right side
if leftlength < rightlength:
rightlength, leftlength = leftlength, rightlength
continue
power += 1
leftlength = int(length/pow(2, power))
rightlength = length - leftlength
if check_if_trace_crashes(newtrace, outpath):
i -= 1
else:
newtrace[i] = prior[0]
del newtrace[i+1]
i += 1
def clear_bits(newtrace, outpath):
# try setting bits in operands of out/write to zero
i = 0
while i < len(newtrace):
if (not newtrace[i].startswith("write ") and not
newtrace[i].startswith("out")):
i += 1
continue
# write ADDR SIZE DATA
# outx ADDR VALUE
print("\nzero setting bits: {}".format(newtrace[i]))
prefix = " ".join(newtrace[i].split()[:-1])
data = newtrace[i].split()[-1]
data_bin = bin(int(data, 16))
data_bin_list = list(data_bin)
for j in range(2, len(data_bin_list)):
prior = newtrace[i]
if (data_bin_list[j] == '1'):
data_bin_list[j] = '0'
data_try = hex(int("".join(data_bin_list), 2))
# It seems qtest only accepts padded hex-values.
if len(data_try) % 2 == 1:
data_try = data_try[:2] + "0" + data_try[2:]
newtrace[i] = "{prefix} {data_try}\n".format(
prefix=prefix,
data_try=data_try)
if not check_if_trace_crashes(newtrace, outpath):
data_bin_list[j] = '1'
newtrace[i] = prior
i += 1
def minimize_trace(inpath, outpath):
global TIMEOUT
with open(inpath) as f:
trace = f.readlines()
start = time.time()
if not check_if_trace_crashes(trace, outpath):
sys.exit("The input qtest trace didn't cause a crash...")
end = time.time()
print("Crashed in {} seconds".format(end-start))
TIMEOUT = (end-start)*5
print("Setting the timeout for {} seconds".format(TIMEOUT))
newtrace = trace[:]
global M1, M2
# remove lines
old_len = len(newtrace) + 1
while(old_len > len(newtrace)):
old_len = len(newtrace)
print("trace lenth = ", old_len)
remove_lines(newtrace, outpath)
if not M1 and not M2:
break
newtrace = list(filter(lambda s: s != "", newtrace))
assert(check_if_trace_crashes(newtrace, outpath))
# set bits to zero
if M2:
clear_bits(newtrace, outpath)
assert(check_if_trace_crashes(newtrace, outpath))
if __name__ == '__main__':
if len(sys.argv) < 3:
usage()
if "-M1" in sys.argv:
M1 = True
if "-M2" in sys.argv:
M2 = True
QEMU_PATH = os.getenv("QEMU_PATH")
QEMU_ARGS = os.getenv("QEMU_ARGS")
if QEMU_PATH is None or QEMU_ARGS is None:
usage()
# if "accel" not in QEMU_ARGS:
# QEMU_ARGS += " -accel qtest"
CRASH_TOKEN = os.getenv("CRASH_TOKEN")
QEMU_ARGS += " -qtest stdio -monitor none -serial none "
minimize_trace(sys.argv[-2], sys.argv[-1])
|
nvtrust-main
|
infrastructure/kvm/qemu/qemu_source/scripts/oss-fuzz/minimize_qtest_trace.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Use this to convert qtest log info from a generic fuzzer input into a qtest
trace that you can feed into a standard qemu-system process. Example usage:
QEMU_FUZZ_ARGS="-machine q35,accel=qtest" QEMU_FUZZ_OBJECTS="*" \
./i386-softmmu/qemu-fuzz-i386 --fuzz-target=generic-pci-fuzz
# .. Finds some crash
QTEST_LOG=1 FUZZ_SERIALIZE_QTEST=1 \
QEMU_FUZZ_ARGS="-machine q35,accel=qtest" QEMU_FUZZ_OBJECTS="*" \
./i386-softmmu/qemu-fuzz-i386 --fuzz-target=generic-pci-fuzz
/path/to/crash 2> qtest_log_output
scripts/oss-fuzz/reorder_fuzzer_qtest_trace.py qtest_log_output > qtest_trace
./i386-softmmu/qemu-fuzz-i386 -machine q35,accel=qtest \
-qtest stdio < qtest_trace
### Details ###
Some fuzzer make use of hooks that allow us to populate some memory range, just
before a DMA read from that range. This means that the fuzzer can produce
activity that looks like:
[start] read from mmio addr
[end] read from mmio addr
[start] write to pio addr
[start] fill a DMA buffer just in time
[end] fill a DMA buffer just in time
[start] fill a DMA buffer just in time
[end] fill a DMA buffer just in time
[end] write to pio addr
[start] read from mmio addr
[end] read from mmio addr
We annotate these "nested" DMA writes, so with QTEST_LOG=1 the QTest trace
might look something like:
[R +0.028431] readw 0x10000
[R +0.028434] outl 0xc000 0xbeef # Triggers a DMA read from 0xbeef and 0xbf00
[DMA][R +0.034639] write 0xbeef 0x2 0xAAAA
[DMA][R +0.034639] write 0xbf00 0x2 0xBBBB
[R +0.028431] readw 0xfc000
This script would reorder the above trace so it becomes:
readw 0x10000
write 0xbeef 0x2 0xAAAA
write 0xbf00 0x2 0xBBBB
outl 0xc000 0xbeef
readw 0xfc000
I.e. by the time, 0xc000 tries to read from DMA, those DMA buffers have already
been set up, removing the need for the DMA hooks. We can simply provide this
reordered trace via -qtest stdio to reproduce the input
Note: this won't work for traces where the device tries to read from the same
DMA region twice in between MMIO/PIO commands. E.g:
[R +0.028434] outl 0xc000 0xbeef
[DMA][R +0.034639] write 0xbeef 0x2 0xAAAA
[DMA][R +0.034639] write 0xbeef 0x2 0xBBBB
The fuzzer will annotate suspected double-fetches with [DOUBLE-FETCH]. This
script looks for these tags and warns the users that the resulting trace might
not reproduce the bug.
"""
import sys
__author__ = "Alexander Bulekov <alxndr@bu.edu>"
__copyright__ = "Copyright (C) 2020, Red Hat, Inc."
__license__ = "GPL version 2 or (at your option) any later version"
__maintainer__ = "Alexander Bulekov"
__email__ = "alxndr@bu.edu"
def usage():
sys.exit("Usage: {} /path/to/qtest_log_output".format((sys.argv[0])))
def main(filename):
with open(filename, "r") as f:
trace = f.readlines()
# Leave only lines that look like logged qtest commands
trace[:] = [x.strip() for x in trace if "[R +" in x
or "[S +" in x and "CLOSED" not in x]
for i in range(len(trace)):
if i+1 < len(trace):
if "[DMA]" in trace[i+1]:
if "[DOUBLE-FETCH]" in trace[i+1]:
sys.stderr.write("Warning: Likely double fetch on line"
"{}.\n There will likely be problems "
"reproducing behavior with the "
"resulting qtest trace\n\n".format(i+1))
trace[i], trace[i+1] = trace[i+1], trace[i]
for line in trace:
print(line.split("]")[-1].strip())
if __name__ == '__main__':
if len(sys.argv) == 1:
usage()
main(sys.argv[1])
|
nvtrust-main
|
infrastructure/kvm/qemu/qemu_source/scripts/oss-fuzz/reorder_fuzzer_qtest_trace.py
|
# -*- coding: utf-8 -*-
#
# The Linux Kernel documentation build configuration file, created by
# sphinx-quickstart on Fri Feb 12 13:51:46 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import sphinx
# Get Sphinx version
major, minor, patch = sphinx.version_info[:3]
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('sphinx'))
from load_config import loadConfig
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
needs_sphinx = '1.7'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['kerneldoc', 'rstFlatTable', 'kernel_include',
'kfigure', 'sphinx.ext.ifconfig', 'automarkup',
'maintainers_include', 'sphinx.ext.autosectionlabel',
'kernel_abi', 'kernel_feat']
if major >= 3:
if (major > 3) or (minor > 0 or patch >= 2):
# Sphinx c function parser is more pedantic with regards to type
# checking. Due to that, having macros at c:function cause problems.
# Those needed to be scaped by using c_id_attributes[] array
c_id_attributes = [
# GCC Compiler types not parsed by Sphinx:
"__restrict__",
# include/linux/compiler_types.h:
"__iomem",
"__kernel",
"noinstr",
"notrace",
"__percpu",
"__rcu",
"__user",
# include/linux/compiler_attributes.h:
"__alias",
"__aligned",
"__aligned_largest",
"__always_inline",
"__assume_aligned",
"__cold",
"__attribute_const__",
"__copy",
"__pure",
"__designated_init",
"__visible",
"__printf",
"__scanf",
"__gnu_inline",
"__malloc",
"__mode",
"__no_caller_saved_registers",
"__noclone",
"__nonstring",
"__noreturn",
"__packed",
"__pure",
"__section",
"__always_unused",
"__maybe_unused",
"__used",
"__weak",
"noinline",
# include/linux/memblock.h:
"__init_memblock",
"__meminit",
# include/linux/init.h:
"__init",
"__ref",
# include/linux/linkage.h:
"asmlinkage",
]
else:
extensions.append('cdomain')
# Ensure that autosectionlabel will produce unique names
autosectionlabel_prefix_document = True
autosectionlabel_maxdepth = 2
extensions.append("sphinx.ext.imgmath")
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'The Linux Kernel'
copyright = 'The kernel development community'
author = 'The kernel development community'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# In a normal build, version and release are are set to KERNELVERSION and
# KERNELRELEASE, respectively, from the Makefile via Sphinx command line
# arguments.
#
# The following code tries to extract the information by reading the Makefile,
# when Sphinx is run directly (e.g. by Read the Docs).
try:
makefile_version = None
makefile_patchlevel = None
for line in open('../Makefile'):
key, val = [x.strip() for x in line.split('=', 2)]
if key == 'VERSION':
makefile_version = val
elif key == 'PATCHLEVEL':
makefile_patchlevel = val
if makefile_version and makefile_patchlevel:
break
except:
pass
finally:
if makefile_version and makefile_patchlevel:
version = release = makefile_version + '.' + makefile_patchlevel
else:
version = release = "unknown version"
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = 'en'
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['output']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
primary_domain = 'c'
highlight_language = 'none'
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
# Default theme
html_theme = 'sphinx_rtd_theme'
html_css_files = []
if "DOCS_THEME" in os.environ:
html_theme = os.environ["DOCS_THEME"]
if html_theme == 'sphinx_rtd_theme' or html_theme == 'sphinx_rtd_dark_mode':
# Read the Docs theme
try:
import sphinx_rtd_theme
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_css_files = [
'theme_overrides.css',
]
# Read the Docs dark mode override theme
if html_theme == 'sphinx_rtd_dark_mode':
try:
import sphinx_rtd_dark_mode
extensions.append('sphinx_rtd_dark_mode')
except ImportError:
html_theme == 'sphinx_rtd_theme'
if html_theme == 'sphinx_rtd_theme':
# Add color-specific RTD normal mode
html_css_files.append('theme_rtd_colors.css')
except ImportError:
html_theme = 'classic'
if "DOCS_CSS" in os.environ:
css = os.environ["DOCS_CSS"].split(" ")
for l in css:
html_css_files.append(l)
if major <= 1 and minor < 8:
html_context = {
'css_files': [],
}
for l in html_css_files:
html_context['css_files'].append('_static/' + l)
if html_theme == 'classic':
html_theme_options = {
'rightsidebar': False,
'stickysidebar': True,
'collapsiblesidebar': True,
'externalrefs': False,
'footerbgcolor': "white",
'footertextcolor': "white",
'sidebarbgcolor': "white",
'sidebarbtncolor': "black",
'sidebartextcolor': "black",
'sidebarlinkcolor': "#686bff",
'relbarbgcolor': "#133f52",
'relbartextcolor': "white",
'relbarlinkcolor': "white",
'bgcolor': "white",
'textcolor': "black",
'headbgcolor': "#f2f2f2",
'headtextcolor': "#20435c",
'headlinkcolor': "#c60f0f",
'linkcolor': "#355f7c",
'visitedlinkcolor': "#355f7c",
'codebgcolor': "#3f3f3f",
'codetextcolor': "white",
'bodyfont': "serif",
'headfont': "sans-serif",
}
sys.stderr.write("Using %s theme\n" % html_theme)
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['sphinx-static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
html_use_smartypants = False
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'TheLinuxKerneldoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
'papersize': 'a4paper',
# The font size ('10pt', '11pt' or '12pt').
'pointsize': '11pt',
# Latex figure (float) alignment
#'figure_align': 'htbp',
# Don't mangle with UTF-8 chars
'inputenc': '',
'utf8extra': '',
# Set document margins
'sphinxsetup': '''
hmargin=0.5in, vmargin=1in,
parsedliteralwraps=true,
verbatimhintsturnover=false,
''',
# For CJK One-half spacing, need to be in front of hyperref
'extrapackages': r'\usepackage{setspace}',
# Additional stuff for the LaTeX preamble.
'preamble': '''
% Use some font with UTF-8 support with XeLaTeX
\\usepackage{fontspec}
\\setsansfont{DejaVu Sans}
\\setromanfont{DejaVu Serif}
\\setmonofont{DejaVu Sans Mono}
''',
}
# Fix reference escape troubles with Sphinx 1.4.x
if major == 1:
latex_elements['preamble'] += '\\renewcommand*{\\DUrole}[2]{ #2 }\n'
# Load kerneldoc specific LaTeX settings
latex_elements['preamble'] += '''
% Load kerneldoc specific LaTeX settings
\\input{kerneldoc-preamble.sty}
'''
# With Sphinx 1.6, it is possible to change the Bg color directly
# by using:
# \definecolor{sphinxnoteBgColor}{RGB}{204,255,255}
# \definecolor{sphinxwarningBgColor}{RGB}{255,204,204}
# \definecolor{sphinxattentionBgColor}{RGB}{255,255,204}
# \definecolor{sphinximportantBgColor}{RGB}{192,255,204}
#
# However, it require to use sphinx heavy box with:
#
# \renewenvironment{sphinxlightbox} {%
# \\begin{sphinxheavybox}
# }
# \\end{sphinxheavybox}
# }
#
# Unfortunately, the implementation is buggy: if a note is inside a
# table, it isn't displayed well. So, for now, let's use boring
# black and white notes.
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
# Sorted in alphabetical order
latex_documents = [
]
# Add all other index files from Documentation/ subdirectories
for fn in os.listdir('.'):
doc = os.path.join(fn, "index")
if os.path.exists(doc + ".rst"):
has = False
for l in latex_documents:
if l[0] == doc:
has = True
break
if not has:
latex_documents.append((doc, fn + '.tex',
'Linux %s Documentation' % fn.capitalize(),
'The kernel development community',
'manual'))
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# Additional LaTeX stuff to be copied to build directory
latex_additional_files = [
'sphinx/kerneldoc-preamble.sty',
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'thelinuxkernel', 'The Linux Kernel Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'TheLinuxKernel', 'The Linux Kernel Documentation',
author, 'TheLinuxKernel', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# -- Options for Epub output ----------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
epub_author = author
epub_publisher = author
epub_copyright = copyright
# The basename for the epub file. It defaults to the project name.
#epub_basename = project
# The HTML theme for the epub output. Since the default themes are not
# optimized for small screen space, using the same theme for HTML and epub
# output is usually not wise. This defaults to 'epub', a theme designed to save
# visual space.
#epub_theme = 'epub'
# The language of the text. It defaults to the language option
# or 'en' if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#epub_cover = ()
# A sequence of (type, uri, title) tuples for the guide element of content.opf.
#epub_guide = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files that should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
# Choose between 'default' and 'includehidden'.
#epub_tocscope = 'default'
# Fix unsupported image types using the Pillow.
#epub_fix_images = False
# Scale large images.
#epub_max_image_width = 0
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#epub_show_urls = 'inline'
# If false, no index is generated.
#epub_use_index = True
#=======
# rst2pdf
#
# Grouping the document tree into PDF files. List of tuples
# (source start file, target name, title, author, options).
#
# See the Sphinx chapter of https://ralsina.me/static/manual.pdf
#
# FIXME: Do not add the index file here; the result will be too big. Adding
# multiple PDF files here actually tries to get the cross-referencing right
# *between* PDF files.
pdf_documents = [
('kernel-documentation', u'Kernel', u'Kernel', u'J. Random Bozo'),
]
# kernel-doc extension configuration for running Sphinx directly (e.g. by Read
# the Docs). In a normal build, these are supplied from the Makefile via command
# line arguments.
kerneldoc_bin = '../scripts/kernel-doc'
kerneldoc_srctree = '..'
# ------------------------------------------------------------------------------
# Since loadConfig overwrites settings from the global namespace, it has to be
# the last statement in the conf.py file
# ------------------------------------------------------------------------------
loadConfig(globals())
|
nvtrust-main
|
infrastructure/linux/linux_source/Documentation/conf.py
|
#!/usr/bin/env python
# add symbolic names to read_msr / write_msr in trace
# decode_msr msr-index.h < trace
import sys
import re
msrs = dict()
with open(sys.argv[1] if len(sys.argv) > 1 else "msr-index.h", "r") as f:
for j in f:
m = re.match(r'#define (MSR_\w+)\s+(0x[0-9a-fA-F]+)', j)
if m:
msrs[int(m.group(2), 16)] = m.group(1)
extra_ranges = (
( "MSR_LASTBRANCH_%d_FROM_IP", 0x680, 0x69F ),
( "MSR_LASTBRANCH_%d_TO_IP", 0x6C0, 0x6DF ),
( "LBR_INFO_%d", 0xdc0, 0xddf ),
)
for j in sys.stdin:
m = re.search(r'(read|write)_msr:\s+([0-9a-f]+)', j)
if m:
r = None
num = int(m.group(2), 16)
if num in msrs:
r = msrs[num]
else:
for er in extra_ranges:
if er[1] <= num <= er[2]:
r = er[0] % (num - er[1],)
break
if r:
j = j.replace(" " + m.group(2), " " + r + "(" + m.group(2) + ")")
print j,
|
nvtrust-main
|
infrastructure/linux/linux_source/Documentation/trace/postprocess/decode_msr.py
|
#!/usr/bin/env python
# The TCM v4 multi-protocol fabric module generation script for drivers/target/$NEW_MOD
#
# Copyright (c) 2010 Rising Tide Systems
# Copyright (c) 2010 Linux-iSCSI.org
#
# Author: nab@kernel.org
#
import os, sys
import subprocess as sub
import string
import re
import optparse
tcm_dir = ""
fabric_ops = []
fabric_mod_dir = ""
fabric_mod_port = ""
fabric_mod_init_port = ""
def tcm_mod_err(msg):
print msg
sys.exit(1)
def tcm_mod_create_module_subdir(fabric_mod_dir_var):
if os.path.isdir(fabric_mod_dir_var) == True:
return 1
print "Creating fabric_mod_dir: " + fabric_mod_dir_var
ret = os.mkdir(fabric_mod_dir_var)
if ret:
tcm_mod_err("Unable to mkdir " + fabric_mod_dir_var)
return
def tcm_mod_build_FC_include(fabric_mod_dir_var, fabric_mod_name):
global fabric_mod_port
global fabric_mod_init_port
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n"
buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_tpg {\n"
buf += " /* FC lport target portal group tag for TCM */\n"
buf += " u16 lport_tpgt;\n"
buf += " /* Pointer back to " + fabric_mod_name + "_lport */\n"
buf += " struct " + fabric_mod_name + "_lport *lport;\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n"
buf += " struct se_portal_group se_tpg;\n"
buf += "};\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_lport {\n"
buf += " /* Binary World Wide unique Port Name for FC Target Lport */\n"
buf += " u64 lport_wwpn;\n"
buf += " /* ASCII formatted WWPN for FC Target Lport */\n"
buf += " char lport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_lport() */\n"
buf += " struct se_wwn lport_wwn;\n"
buf += "};\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
fabric_mod_port = "lport"
fabric_mod_init_port = "nport"
return
def tcm_mod_build_SAS_include(fabric_mod_dir_var, fabric_mod_name):
global fabric_mod_port
global fabric_mod_init_port
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n"
buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_tpg {\n"
buf += " /* SAS port target portal group tag for TCM */\n"
buf += " u16 tport_tpgt;\n"
buf += " /* Pointer back to " + fabric_mod_name + "_tport */\n"
buf += " struct " + fabric_mod_name + "_tport *tport;\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n"
buf += " struct se_portal_group se_tpg;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tport {\n"
buf += " /* Binary World Wide unique Port Name for SAS Target port */\n"
buf += " u64 tport_wwpn;\n"
buf += " /* ASCII formatted WWPN for SAS Target port */\n"
buf += " char tport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tport() */\n"
buf += " struct se_wwn tport_wwn;\n"
buf += "};\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
fabric_mod_port = "tport"
fabric_mod_init_port = "iport"
return
def tcm_mod_build_iSCSI_include(fabric_mod_dir_var, fabric_mod_name):
global fabric_mod_port
global fabric_mod_init_port
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n"
buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_tpg {\n"
buf += " /* iSCSI target portal group tag for TCM */\n"
buf += " u16 tport_tpgt;\n"
buf += " /* Pointer back to " + fabric_mod_name + "_tport */\n"
buf += " struct " + fabric_mod_name + "_tport *tport;\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n"
buf += " struct se_portal_group se_tpg;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tport {\n"
buf += " /* ASCII formatted TargetName for IQN */\n"
buf += " char tport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tport() */\n"
buf += " struct se_wwn tport_wwn;\n"
buf += "};\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
fabric_mod_port = "tport"
fabric_mod_init_port = "iport"
return
def tcm_mod_build_base_includes(proto_ident, fabric_mod_dir_val, fabric_mod_name):
if proto_ident == "FC":
tcm_mod_build_FC_include(fabric_mod_dir_val, fabric_mod_name)
elif proto_ident == "SAS":
tcm_mod_build_SAS_include(fabric_mod_dir_val, fabric_mod_name)
elif proto_ident == "iSCSI":
tcm_mod_build_iSCSI_include(fabric_mod_dir_val, fabric_mod_name)
else:
print "Unsupported proto_ident: " + proto_ident
sys.exit(1)
return
def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name):
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_configfs.c"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#include <linux/module.h>\n"
buf += "#include <linux/moduleparam.h>\n"
buf += "#include <linux/version.h>\n"
buf += "#include <generated/utsrelease.h>\n"
buf += "#include <linux/utsname.h>\n"
buf += "#include <linux/init.h>\n"
buf += "#include <linux/slab.h>\n"
buf += "#include <linux/kthread.h>\n"
buf += "#include <linux/types.h>\n"
buf += "#include <linux/string.h>\n"
buf += "#include <linux/configfs.h>\n"
buf += "#include <linux/ctype.h>\n"
buf += "#include <asm/unaligned.h>\n"
buf += "#include <scsi/scsi_proto.h>\n\n"
buf += "#include <target/target_core_base.h>\n"
buf += "#include <target/target_core_fabric.h>\n"
buf += "#include \"" + fabric_mod_name + "_base.h\"\n"
buf += "#include \"" + fabric_mod_name + "_fabric.h\"\n\n"
buf += "static const struct target_core_fabric_ops " + fabric_mod_name + "_ops;\n\n"
buf += "static struct se_portal_group *" + fabric_mod_name + "_make_tpg(\n"
buf += " struct se_wwn *wwn,\n"
buf += " struct config_group *group,\n"
buf += " const char *name)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + "*" + fabric_mod_port + " = container_of(wwn,\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + ", " + fabric_mod_port + "_wwn);\n\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg;\n"
buf += " unsigned long tpgt;\n"
buf += " int ret;\n\n"
buf += " if (strstr(name, \"tpgt_\") != name)\n"
buf += " return ERR_PTR(-EINVAL);\n"
buf += " if (kstrtoul(name + 5, 10, &tpgt) || tpgt > UINT_MAX)\n"
buf += " return ERR_PTR(-EINVAL);\n\n"
buf += " tpg = kzalloc(sizeof(struct " + fabric_mod_name + "_tpg), GFP_KERNEL);\n"
buf += " if (!tpg) {\n"
buf += " printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_tpg\");\n"
buf += " return ERR_PTR(-ENOMEM);\n"
buf += " }\n"
buf += " tpg->" + fabric_mod_port + " = " + fabric_mod_port + ";\n"
buf += " tpg->" + fabric_mod_port + "_tpgt = tpgt;\n\n"
if proto_ident == "FC":
buf += " ret = core_tpg_register(wwn, &tpg->se_tpg, SCSI_PROTOCOL_FCP);\n"
elif proto_ident == "SAS":
buf += " ret = core_tpg_register(wwn, &tpg->se_tpg, SCSI_PROTOCOL_SAS);\n"
elif proto_ident == "iSCSI":
buf += " ret = core_tpg_register(wwn, &tpg->se_tpg, SCSI_PROTOCOL_ISCSI);\n"
buf += " if (ret < 0) {\n"
buf += " kfree(tpg);\n"
buf += " return NULL;\n"
buf += " }\n"
buf += " return &tpg->se_tpg;\n"
buf += "}\n\n"
buf += "static void " + fabric_mod_name + "_drop_tpg(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n\n"
buf += " core_tpg_deregister(se_tpg);\n"
buf += " kfree(tpg);\n"
buf += "}\n\n"
buf += "static struct se_wwn *" + fabric_mod_name + "_make_" + fabric_mod_port + "(\n"
buf += " struct target_fabric_configfs *tf,\n"
buf += " struct config_group *group,\n"
buf += " const char *name)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + ";\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " u64 wwpn = 0;\n\n"
buf += " /* if (" + fabric_mod_name + "_parse_wwn(name, &wwpn, 1) < 0)\n"
buf += " return ERR_PTR(-EINVAL); */\n\n"
buf += " " + fabric_mod_port + " = kzalloc(sizeof(struct " + fabric_mod_name + "_" + fabric_mod_port + "), GFP_KERNEL);\n"
buf += " if (!" + fabric_mod_port + ") {\n"
buf += " printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_" + fabric_mod_port + "\");\n"
buf += " return ERR_PTR(-ENOMEM);\n"
buf += " }\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " " + fabric_mod_port + "->" + fabric_mod_port + "_wwpn = wwpn;\n"
buf += " /* " + fabric_mod_name + "_format_wwn(&" + fabric_mod_port + "->" + fabric_mod_port + "_name[0], " + fabric_mod_name.upper() + "_NAMELEN, wwpn); */\n\n"
buf += " return &" + fabric_mod_port + "->" + fabric_mod_port + "_wwn;\n"
buf += "}\n\n"
buf += "static void " + fabric_mod_name + "_drop_" + fabric_mod_port + "(struct se_wwn *wwn)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = container_of(wwn,\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + ", " + fabric_mod_port + "_wwn);\n"
buf += " kfree(" + fabric_mod_port + ");\n"
buf += "}\n\n"
buf += "static const struct target_core_fabric_ops " + fabric_mod_name + "_ops = {\n"
buf += " .module = THIS_MODULE,\n"
buf += " .name = \"" + fabric_mod_name + "\",\n"
buf += " .get_fabric_name = " + fabric_mod_name + "_get_fabric_name,\n"
buf += " .tpg_get_wwn = " + fabric_mod_name + "_get_fabric_wwn,\n"
buf += " .tpg_get_tag = " + fabric_mod_name + "_get_tag,\n"
buf += " .tpg_check_demo_mode = " + fabric_mod_name + "_check_false,\n"
buf += " .tpg_check_demo_mode_cache = " + fabric_mod_name + "_check_true,\n"
buf += " .tpg_check_demo_mode_write_protect = " + fabric_mod_name + "_check_true,\n"
buf += " .tpg_check_prod_mode_write_protect = " + fabric_mod_name + "_check_false,\n"
buf += " .tpg_get_inst_index = " + fabric_mod_name + "_tpg_get_inst_index,\n"
buf += " .release_cmd = " + fabric_mod_name + "_release_cmd,\n"
buf += " .sess_get_index = " + fabric_mod_name + "_sess_get_index,\n"
buf += " .sess_get_initiator_sid = NULL,\n"
buf += " .write_pending = " + fabric_mod_name + "_write_pending,\n"
buf += " .set_default_node_attributes = " + fabric_mod_name + "_set_default_node_attrs,\n"
buf += " .get_cmd_state = " + fabric_mod_name + "_get_cmd_state,\n"
buf += " .queue_data_in = " + fabric_mod_name + "_queue_data_in,\n"
buf += " .queue_status = " + fabric_mod_name + "_queue_status,\n"
buf += " .queue_tm_rsp = " + fabric_mod_name + "_queue_tm_rsp,\n"
buf += " .aborted_task = " + fabric_mod_name + "_aborted_task,\n"
buf += " /*\n"
buf += " * Setup function pointers for generic logic in target_core_fabric_configfs.c\n"
buf += " */\n"
buf += " .fabric_make_wwn = " + fabric_mod_name + "_make_" + fabric_mod_port + ",\n"
buf += " .fabric_drop_wwn = " + fabric_mod_name + "_drop_" + fabric_mod_port + ",\n"
buf += " .fabric_make_tpg = " + fabric_mod_name + "_make_tpg,\n"
buf += " .fabric_drop_tpg = " + fabric_mod_name + "_drop_tpg,\n"
buf += "};\n\n"
buf += "static int __init " + fabric_mod_name + "_init(void)\n"
buf += "{\n"
buf += " return target_register_template(&" + fabric_mod_name + "_ops);\n"
buf += "};\n\n"
buf += "static void __exit " + fabric_mod_name + "_exit(void)\n"
buf += "{\n"
buf += " target_unregister_template(&" + fabric_mod_name + "_ops);\n"
buf += "};\n\n"
buf += "MODULE_DESCRIPTION(\"" + fabric_mod_name.upper() + " series fabric driver\");\n"
buf += "MODULE_LICENSE(\"GPL\");\n"
buf += "module_init(" + fabric_mod_name + "_init);\n"
buf += "module_exit(" + fabric_mod_name + "_exit);\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
return
def tcm_mod_scan_fabric_ops(tcm_dir):
fabric_ops_api = tcm_dir + "include/target/target_core_fabric.h"
print "Using tcm_mod_scan_fabric_ops: " + fabric_ops_api
process_fo = 0;
p = open(fabric_ops_api, 'r')
line = p.readline()
while line:
if process_fo == 0 and re.search('struct target_core_fabric_ops {', line):
line = p.readline()
continue
if process_fo == 0:
process_fo = 1;
line = p.readline()
# Search for function pointer
if not re.search('\(\*', line):
continue
fabric_ops.append(line.rstrip())
continue
line = p.readline()
# Search for function pointer
if not re.search('\(\*', line):
continue
fabric_ops.append(line.rstrip())
p.close()
return
def tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir_var, fabric_mod_name):
buf = ""
bufi = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_fabric.c"
print "Writing file: " + f
p = open(f, 'w')
if not p:
tcm_mod_err("Unable to open file: " + f)
fi = fabric_mod_dir_var + "/" + fabric_mod_name + "_fabric.h"
print "Writing file: " + fi
pi = open(fi, 'w')
if not pi:
tcm_mod_err("Unable to open file: " + fi)
buf = "#include <linux/slab.h>\n"
buf += "#include <linux/kthread.h>\n"
buf += "#include <linux/types.h>\n"
buf += "#include <linux/list.h>\n"
buf += "#include <linux/types.h>\n"
buf += "#include <linux/string.h>\n"
buf += "#include <linux/ctype.h>\n"
buf += "#include <asm/unaligned.h>\n"
buf += "#include <scsi/scsi_common.h>\n"
buf += "#include <scsi/scsi_proto.h>\n"
buf += "#include <target/target_core_base.h>\n"
buf += "#include <target/target_core_fabric.h>\n"
buf += "#include \"" + fabric_mod_name + "_base.h\"\n"
buf += "#include \"" + fabric_mod_name + "_fabric.h\"\n\n"
buf += "int " + fabric_mod_name + "_check_true(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 1;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_check_true(struct se_portal_group *);\n"
buf += "int " + fabric_mod_name + "_check_false(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_check_false(struct se_portal_group *);\n"
total_fabric_ops = len(fabric_ops)
i = 0
while i < total_fabric_ops:
fo = fabric_ops[i]
i += 1
# print "fabric_ops: " + fo
if re.search('get_fabric_name', fo):
buf += "char *" + fabric_mod_name + "_get_fabric_name(void)\n"
buf += "{\n"
buf += " return \"" + fabric_mod_name + "\";\n"
buf += "}\n\n"
bufi += "char *" + fabric_mod_name + "_get_fabric_name(void);\n"
continue
if re.search('get_wwn', fo):
buf += "char *" + fabric_mod_name + "_get_fabric_wwn(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n\n"
buf += " return &" + fabric_mod_port + "->" + fabric_mod_port + "_name[0];\n"
buf += "}\n\n"
bufi += "char *" + fabric_mod_name + "_get_fabric_wwn(struct se_portal_group *);\n"
if re.search('get_tag', fo):
buf += "u16 " + fabric_mod_name + "_get_tag(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " return tpg->" + fabric_mod_port + "_tpgt;\n"
buf += "}\n\n"
bufi += "u16 " + fabric_mod_name + "_get_tag(struct se_portal_group *);\n"
if re.search('tpg_get_inst_index\)\(', fo):
buf += "u32 " + fabric_mod_name + "_tpg_get_inst_index(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 1;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_tpg_get_inst_index(struct se_portal_group *);\n"
if re.search('\*release_cmd\)\(', fo):
buf += "void " + fabric_mod_name + "_release_cmd(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_release_cmd(struct se_cmd *);\n"
if re.search('sess_get_index\)\(', fo):
buf += "u32 " + fabric_mod_name + "_sess_get_index(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_sess_get_index(struct se_session *);\n"
if re.search('write_pending\)\(', fo):
buf += "int " + fabric_mod_name + "_write_pending(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_write_pending(struct se_cmd *);\n"
if re.search('set_default_node_attributes\)\(', fo):
buf += "void " + fabric_mod_name + "_set_default_node_attrs(struct se_node_acl *nacl)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_set_default_node_attrs(struct se_node_acl *);\n"
if re.search('get_cmd_state\)\(', fo):
buf += "int " + fabric_mod_name + "_get_cmd_state(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_get_cmd_state(struct se_cmd *);\n"
if re.search('queue_data_in\)\(', fo):
buf += "int " + fabric_mod_name + "_queue_data_in(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_queue_data_in(struct se_cmd *);\n"
if re.search('queue_status\)\(', fo):
buf += "int " + fabric_mod_name + "_queue_status(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_queue_status(struct se_cmd *);\n"
if re.search('queue_tm_rsp\)\(', fo):
buf += "void " + fabric_mod_name + "_queue_tm_rsp(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_queue_tm_rsp(struct se_cmd *);\n"
if re.search('aborted_task\)\(', fo):
buf += "void " + fabric_mod_name + "_aborted_task(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_aborted_task(struct se_cmd *);\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
ret = pi.write(bufi)
if ret:
tcm_mod_err("Unable to write fi: " + fi)
pi.close()
return
def tcm_mod_build_kbuild(fabric_mod_dir_var, fabric_mod_name):
buf = ""
f = fabric_mod_dir_var + "/Makefile"
print "Writing file: " + f
p = open(f, 'w')
if not p:
tcm_mod_err("Unable to open file: " + f)
buf += fabric_mod_name + "-objs := " + fabric_mod_name + "_fabric.o \\\n"
buf += " " + fabric_mod_name + "_configfs.o\n"
buf += "obj-$(CONFIG_" + fabric_mod_name.upper() + ") += " + fabric_mod_name + ".o\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
return
def tcm_mod_build_kconfig(fabric_mod_dir_var, fabric_mod_name):
buf = ""
f = fabric_mod_dir_var + "/Kconfig"
print "Writing file: " + f
p = open(f, 'w')
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "config " + fabric_mod_name.upper() + "\n"
buf += " tristate \"" + fabric_mod_name.upper() + " fabric module\"\n"
buf += " depends on TARGET_CORE && CONFIGFS_FS\n"
buf += " default n\n"
buf += " help\n"
buf += " Say Y here to enable the " + fabric_mod_name.upper() + " fabric module\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
return
def tcm_mod_add_kbuild(tcm_dir, fabric_mod_name):
buf = "obj-$(CONFIG_" + fabric_mod_name.upper() + ") += " + fabric_mod_name.lower() + "/\n"
kbuild = tcm_dir + "/drivers/target/Makefile"
f = open(kbuild, 'a')
f.write(buf)
f.close()
return
def tcm_mod_add_kconfig(tcm_dir, fabric_mod_name):
buf = "source \"drivers/target/" + fabric_mod_name.lower() + "/Kconfig\"\n"
kconfig = tcm_dir + "/drivers/target/Kconfig"
f = open(kconfig, 'a')
f.write(buf)
f.close()
return
def main(modname, proto_ident):
# proto_ident = "FC"
# proto_ident = "SAS"
# proto_ident = "iSCSI"
tcm_dir = os.getcwd();
tcm_dir += "/../../"
print "tcm_dir: " + tcm_dir
fabric_mod_name = modname
fabric_mod_dir = tcm_dir + "drivers/target/" + fabric_mod_name
print "Set fabric_mod_name: " + fabric_mod_name
print "Set fabric_mod_dir: " + fabric_mod_dir
print "Using proto_ident: " + proto_ident
if proto_ident != "FC" and proto_ident != "SAS" and proto_ident != "iSCSI":
print "Unsupported proto_ident: " + proto_ident
sys.exit(1)
ret = tcm_mod_create_module_subdir(fabric_mod_dir)
if ret:
print "tcm_mod_create_module_subdir() failed because module already exists!"
sys.exit(1)
tcm_mod_build_base_includes(proto_ident, fabric_mod_dir, fabric_mod_name)
tcm_mod_scan_fabric_ops(tcm_dir)
tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir, fabric_mod_name)
tcm_mod_build_configfs(proto_ident, fabric_mod_dir, fabric_mod_name)
tcm_mod_build_kbuild(fabric_mod_dir, fabric_mod_name)
tcm_mod_build_kconfig(fabric_mod_dir, fabric_mod_name)
input = raw_input("Would you like to add " + fabric_mod_name + " to drivers/target/Makefile..? [yes,no]: ")
if input == "yes" or input == "y":
tcm_mod_add_kbuild(tcm_dir, fabric_mod_name)
input = raw_input("Would you like to add " + fabric_mod_name + " to drivers/target/Kconfig..? [yes,no]: ")
if input == "yes" or input == "y":
tcm_mod_add_kconfig(tcm_dir, fabric_mod_name)
return
parser = optparse.OptionParser()
parser.add_option('-m', '--modulename', help='Module name', dest='modname',
action='store', nargs=1, type='string')
parser.add_option('-p', '--protoident', help='Protocol Ident', dest='protoident',
action='store', nargs=1, type='string')
(opts, args) = parser.parse_args()
mandatories = ['modname', 'protoident']
for m in mandatories:
if not opts.__dict__[m]:
print "mandatory option is missing\n"
parser.print_help()
exit(-1)
if __name__ == "__main__":
main(str(opts.modname), opts.protoident)
|
nvtrust-main
|
infrastructure/linux/linux_source/Documentation/target/tcm_mod_builder.py
|
#!/usr/bin/env python
# Copyright 2009 Simon Arlott
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the Free
# Software Foundation; either version 2 of the License, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 59
# Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
# Usage: cxacru-cf.py < cxacru-cf.bin
# Output: values string suitable for the sysfs adsl_config attribute
#
# Warning: cxacru-cf.bin with MD5 hash cdbac2689969d5ed5d4850f117702110
# contains mis-aligned values which will stop the modem from being able
# to make a connection. If the first and last two bytes are removed then
# the values become valid, but the modulation will be forced to ANSI
# T1.413 only which may not be appropriate.
#
# The original binary format is a packed list of le32 values.
import sys
import struct
i = 0
while True:
buf = sys.stdin.read(4)
if len(buf) == 0:
break
elif len(buf) != 4:
sys.stdout.write("\n")
sys.stderr.write("Error: read {0} not 4 bytes\n".format(len(buf)))
sys.exit(1)
if i > 0:
sys.stdout.write(" ")
sys.stdout.write("{0:x}={1}".format(i, struct.unpack("<I", buf)[0]))
i += 1
sys.stdout.write("\n")
|
nvtrust-main
|
infrastructure/linux/linux_source/Documentation/networking/device_drivers/atm/cxacru-cf.py
|
# coding=utf-8
# SPDX-License-Identifier: GPL-2.0
#
u"""
kernel-feat
~~~~~~~~~~~
Implementation of the ``kernel-feat`` reST-directive.
:copyright: Copyright (C) 2016 Markus Heiser
:copyright: Copyright (C) 2016-2019 Mauro Carvalho Chehab
:maintained-by: Mauro Carvalho Chehab <mchehab+samsung@kernel.org>
:license: GPL Version 2, June 1991 see Linux/COPYING for details.
The ``kernel-feat`` (:py:class:`KernelFeat`) directive calls the
scripts/get_feat.pl script to parse the Kernel ABI files.
Overview of directive's argument and options.
.. code-block:: rst
.. kernel-feat:: <ABI directory location>
:debug:
The argument ``<ABI directory location>`` is required. It contains the
location of the ABI files to be parsed.
``debug``
Inserts a code-block with the *raw* reST. Sometimes it is helpful to see
what reST is generated.
"""
import codecs
import os
import re
import subprocess
import sys
from os import path
from docutils import nodes, statemachine
from docutils.statemachine import ViewList
from docutils.parsers.rst import directives, Directive
from docutils.utils.error_reporting import ErrorString
from sphinx.util.docutils import switch_source_input
__version__ = '1.0'
def setup(app):
app.add_directive("kernel-feat", KernelFeat)
return dict(
version = __version__
, parallel_read_safe = True
, parallel_write_safe = True
)
class KernelFeat(Directive):
u"""KernelFeat (``kernel-feat``) directive"""
required_arguments = 1
optional_arguments = 2
has_content = False
final_argument_whitespace = True
option_spec = {
"debug" : directives.flag
}
def warn(self, message, **replace):
replace["fname"] = self.state.document.current_source
replace["line_no"] = replace.get("line_no", self.lineno)
message = ("%(fname)s:%(line_no)s: [kernel-feat WARN] : " + message) % replace
self.state.document.settings.env.app.warn(message, prefix="")
def run(self):
doc = self.state.document
if not doc.settings.file_insertion_enabled:
raise self.warning("docutils: file insertion disabled")
env = doc.settings.env
cwd = path.dirname(doc.current_source)
cmd = "get_feat.pl rest --enable-fname --dir "
cmd += self.arguments[0]
if len(self.arguments) > 1:
cmd += " --arch " + self.arguments[1]
srctree = path.abspath(os.environ["srctree"])
fname = cmd
# extend PATH with $(srctree)/scripts
path_env = os.pathsep.join([
srctree + os.sep + "scripts",
os.environ["PATH"]
])
shell_env = os.environ.copy()
shell_env["PATH"] = path_env
shell_env["srctree"] = srctree
lines = self.runCmd(cmd, shell=True, cwd=cwd, env=shell_env)
line_regex = re.compile("^\.\. FILE (\S+)$")
out_lines = ""
for line in lines.split("\n"):
match = line_regex.search(line)
if match:
fname = match.group(1)
# Add the file to Sphinx build dependencies
env.note_dependency(os.path.abspath(fname))
else:
out_lines += line + "\n"
nodeList = self.nestedParse(out_lines, fname)
return nodeList
def runCmd(self, cmd, **kwargs):
u"""Run command ``cmd`` and return its stdout as unicode."""
try:
proc = subprocess.Popen(
cmd
, stdout = subprocess.PIPE
, stderr = subprocess.PIPE
, **kwargs
)
out, err = proc.communicate()
out, err = codecs.decode(out, 'utf-8'), codecs.decode(err, 'utf-8')
if proc.returncode != 0:
raise self.severe(
u"command '%s' failed with return code %d"
% (cmd, proc.returncode)
)
except OSError as exc:
raise self.severe(u"problems with '%s' directive: %s."
% (self.name, ErrorString(exc)))
return out
def nestedParse(self, lines, fname):
content = ViewList()
node = nodes.section()
if "debug" in self.options:
code_block = "\n\n.. code-block:: rst\n :linenos:\n"
for l in lines.split("\n"):
code_block += "\n " + l
lines = code_block + "\n\n"
for c, l in enumerate(lines.split("\n")):
content.append(l, fname, c)
buf = self.state.memo.title_styles, self.state.memo.section_level, self.state.memo.reporter
with switch_source_input(self.state, content):
self.state.nested_parse(content, 0, node, match_titles=1)
return node.children
|
nvtrust-main
|
infrastructure/linux/linux_source/Documentation/sphinx/kernel_feat.py
|
# -*- coding: utf-8; mode: python -*-
# pylint: disable=W0141,C0113,C0103,C0325
u"""
cdomain
~~~~~~~
Replacement for the sphinx c-domain.
:copyright: Copyright (C) 2016 Markus Heiser
:license: GPL Version 2, June 1991 see Linux/COPYING for details.
List of customizations:
* Moved the *duplicate C object description* warnings for function
declarations in the nitpicky mode. See Sphinx documentation for
the config values for ``nitpick`` and ``nitpick_ignore``.
* Add option 'name' to the "c:function:" directive. With option 'name' the
ref-name of a function can be modified. E.g.::
.. c:function:: int ioctl( int fd, int request )
:name: VIDIOC_LOG_STATUS
The func-name (e.g. ioctl) remains in the output but the ref-name changed
from 'ioctl' to 'VIDIOC_LOG_STATUS'. The function is referenced by::
* :c:func:`VIDIOC_LOG_STATUS` or
* :any:`VIDIOC_LOG_STATUS` (``:any:`` needs sphinx 1.3)
* Handle signatures of function-like macros well. Don't try to deduce
arguments types of function-like macros.
"""
from docutils import nodes
from docutils.parsers.rst import directives
import sphinx
from sphinx import addnodes
from sphinx.domains.c import c_funcptr_sig_re, c_sig_re
from sphinx.domains.c import CObject as Base_CObject
from sphinx.domains.c import CDomain as Base_CDomain
from itertools import chain
import re
__version__ = '1.1'
# Get Sphinx version
major, minor, patch = sphinx.version_info[:3]
# Namespace to be prepended to the full name
namespace = None
#
# Handle trivial newer c domain tags that are part of Sphinx 3.1 c domain tags
# - Store the namespace if ".. c:namespace::" tag is found
#
RE_namespace = re.compile(r'^\s*..\s*c:namespace::\s*(\S+)\s*$')
def markup_namespace(match):
global namespace
namespace = match.group(1)
return ""
#
# Handle c:macro for function-style declaration
#
RE_macro = re.compile(r'^\s*..\s*c:macro::\s*(\S+)\s+(\S.*)\s*$')
def markup_macro(match):
return ".. c:function:: " + match.group(1) + ' ' + match.group(2)
#
# Handle newer c domain tags that are evaluated as .. c:type: for
# backward-compatibility with Sphinx < 3.0
#
RE_ctype = re.compile(r'^\s*..\s*c:(struct|union|enum|enumerator|alias)::\s*(.*)$')
def markup_ctype(match):
return ".. c:type:: " + match.group(2)
#
# Handle newer c domain tags that are evaluated as :c:type: for
# backward-compatibility with Sphinx < 3.0
#
RE_ctype_refs = re.compile(r':c:(var|struct|union|enum|enumerator)::`([^\`]+)`')
def markup_ctype_refs(match):
return ":c:type:`" + match.group(2) + '`'
#
# Simply convert :c:expr: and :c:texpr: into a literal block.
#
RE_expr = re.compile(r':c:(expr|texpr):`([^\`]+)`')
def markup_c_expr(match):
return '\ ``' + match.group(2) + '``\ '
#
# Parse Sphinx 3.x C markups, replacing them by backward-compatible ones
#
def c_markups(app, docname, source):
result = ""
markup_func = {
RE_namespace: markup_namespace,
RE_expr: markup_c_expr,
RE_macro: markup_macro,
RE_ctype: markup_ctype,
RE_ctype_refs: markup_ctype_refs,
}
lines = iter(source[0].splitlines(True))
for n in lines:
match_iterators = [regex.finditer(n) for regex in markup_func]
matches = sorted(chain(*match_iterators), key=lambda m: m.start())
for m in matches:
n = n[:m.start()] + markup_func[m.re](m) + n[m.end():]
result = result + n
source[0] = result
#
# Now implements support for the cdomain namespacing logic
#
def setup(app):
# Handle easy Sphinx 3.1+ simple new tags: :c:expr and .. c:namespace::
app.connect('source-read', c_markups)
if (major == 1 and minor < 8):
app.override_domain(CDomain)
else:
app.add_domain(CDomain, override=True)
return dict(
version = __version__,
parallel_read_safe = True,
parallel_write_safe = True
)
class CObject(Base_CObject):
"""
Description of a C language object.
"""
option_spec = {
"name" : directives.unchanged
}
def handle_func_like_macro(self, sig, signode):
u"""Handles signatures of function-like macros.
If the objtype is 'function' and the the signature ``sig`` is a
function-like macro, the name of the macro is returned. Otherwise
``False`` is returned. """
global namespace
if not self.objtype == 'function':
return False
m = c_funcptr_sig_re.match(sig)
if m is None:
m = c_sig_re.match(sig)
if m is None:
raise ValueError('no match')
rettype, fullname, arglist, _const = m.groups()
arglist = arglist.strip()
if rettype or not arglist:
return False
arglist = arglist.replace('`', '').replace('\\ ', '') # remove markup
arglist = [a.strip() for a in arglist.split(",")]
# has the first argument a type?
if len(arglist[0].split(" ")) > 1:
return False
# This is a function-like macro, it's arguments are typeless!
signode += addnodes.desc_name(fullname, fullname)
paramlist = addnodes.desc_parameterlist()
signode += paramlist
for argname in arglist:
param = addnodes.desc_parameter('', '', noemph=True)
# separate by non-breaking space in the output
param += nodes.emphasis(argname, argname)
paramlist += param
if namespace:
fullname = namespace + "." + fullname
return fullname
def handle_signature(self, sig, signode):
"""Transform a C signature into RST nodes."""
global namespace
fullname = self.handle_func_like_macro(sig, signode)
if not fullname:
fullname = super(CObject, self).handle_signature(sig, signode)
if "name" in self.options:
if self.objtype == 'function':
fullname = self.options["name"]
else:
# FIXME: handle :name: value of other declaration types?
pass
else:
if namespace:
fullname = namespace + "." + fullname
return fullname
def add_target_and_index(self, name, sig, signode):
# for C API items we add a prefix since names are usually not qualified
# by a module name and so easily clash with e.g. section titles
targetname = 'c.' + name
if targetname not in self.state.document.ids:
signode['names'].append(targetname)
signode['ids'].append(targetname)
signode['first'] = (not self.names)
self.state.document.note_explicit_target(signode)
inv = self.env.domaindata['c']['objects']
if (name in inv and self.env.config.nitpicky):
if self.objtype == 'function':
if ('c:func', name) not in self.env.config.nitpick_ignore:
self.state_machine.reporter.warning(
'duplicate C object description of %s, ' % name +
'other instance in ' + self.env.doc2path(inv[name][0]),
line=self.lineno)
inv[name] = (self.env.docname, self.objtype)
indextext = self.get_index_text(name)
if indextext:
self.indexnode['entries'].append(
('single', indextext, targetname, '', None))
class CDomain(Base_CDomain):
"""C language domain."""
name = 'c'
label = 'C'
directives = {
'function': CObject,
'member': CObject,
'macro': CObject,
'type': CObject,
'var': CObject,
}
|
nvtrust-main
|
infrastructure/linux/linux_source/Documentation/sphinx/cdomain.py
|
#!/usr/bin/env python3
# -*- coding: utf-8; mode: python -*-
# pylint: disable=R0903, C0330, R0914, R0912, E0401
u"""
kernel-include
~~~~~~~~~~~~~~
Implementation of the ``kernel-include`` reST-directive.
:copyright: Copyright (C) 2016 Markus Heiser
:license: GPL Version 2, June 1991 see linux/COPYING for details.
The ``kernel-include`` reST-directive is a replacement for the ``include``
directive. The ``kernel-include`` directive expand environment variables in
the path name and allows to include files from arbitrary locations.
.. hint::
Including files from arbitrary locations (e.g. from ``/etc``) is a
security risk for builders. This is why the ``include`` directive from
docutils *prohibit* pathnames pointing to locations *above* the filesystem
tree where the reST document with the include directive is placed.
Substrings of the form $name or ${name} are replaced by the value of
environment variable name. Malformed variable names and references to
non-existing variables are left unchanged.
"""
# ==============================================================================
# imports
# ==============================================================================
import os.path
from docutils import io, nodes, statemachine
from docutils.utils.error_reporting import SafeString, ErrorString
from docutils.parsers.rst import directives
from docutils.parsers.rst.directives.body import CodeBlock, NumberLines
from docutils.parsers.rst.directives.misc import Include
__version__ = '1.0'
# ==============================================================================
def setup(app):
# ==============================================================================
app.add_directive("kernel-include", KernelInclude)
return dict(
version = __version__,
parallel_read_safe = True,
parallel_write_safe = True
)
# ==============================================================================
class KernelInclude(Include):
# ==============================================================================
u"""KernelInclude (``kernel-include``) directive"""
def run(self):
env = self.state.document.settings.env
path = os.path.realpath(
os.path.expandvars(self.arguments[0]))
# to get a bit security back, prohibit /etc:
if path.startswith(os.sep + "etc"):
raise self.severe(
'Problems with "%s" directive, prohibited path: %s'
% (self.name, path))
self.arguments[0] = path
env.note_dependency(os.path.abspath(path))
#return super(KernelInclude, self).run() # won't work, see HINTs in _run()
return self._run()
def _run(self):
"""Include a file as part of the content of this reST file."""
# HINT: I had to copy&paste the whole Include.run method. I'am not happy
# with this, but due to security reasons, the Include.run method does
# not allow absolute or relative pathnames pointing to locations *above*
# the filesystem tree where the reST document is placed.
if not self.state.document.settings.file_insertion_enabled:
raise self.warning('"%s" directive disabled.' % self.name)
source = self.state_machine.input_lines.source(
self.lineno - self.state_machine.input_offset - 1)
source_dir = os.path.dirname(os.path.abspath(source))
path = directives.path(self.arguments[0])
if path.startswith('<') and path.endswith('>'):
path = os.path.join(self.standard_include_path, path[1:-1])
path = os.path.normpath(os.path.join(source_dir, path))
# HINT: this is the only line I had to change / commented out:
#path = utils.relative_path(None, path)
path = nodes.reprunicode(path)
encoding = self.options.get(
'encoding', self.state.document.settings.input_encoding)
e_handler=self.state.document.settings.input_encoding_error_handler
tab_width = self.options.get(
'tab-width', self.state.document.settings.tab_width)
try:
self.state.document.settings.record_dependencies.add(path)
include_file = io.FileInput(source_path=path,
encoding=encoding,
error_handler=e_handler)
except UnicodeEncodeError as error:
raise self.severe('Problems with "%s" directive path:\n'
'Cannot encode input file path "%s" '
'(wrong locale?).' %
(self.name, SafeString(path)))
except IOError as error:
raise self.severe('Problems with "%s" directive path:\n%s.' %
(self.name, ErrorString(error)))
startline = self.options.get('start-line', None)
endline = self.options.get('end-line', None)
try:
if startline or (endline is not None):
lines = include_file.readlines()
rawtext = ''.join(lines[startline:endline])
else:
rawtext = include_file.read()
except UnicodeError as error:
raise self.severe('Problem with "%s" directive:\n%s' %
(self.name, ErrorString(error)))
# start-after/end-before: no restrictions on newlines in match-text,
# and no restrictions on matching inside lines vs. line boundaries
after_text = self.options.get('start-after', None)
if after_text:
# skip content in rawtext before *and incl.* a matching text
after_index = rawtext.find(after_text)
if after_index < 0:
raise self.severe('Problem with "start-after" option of "%s" '
'directive:\nText not found.' % self.name)
rawtext = rawtext[after_index + len(after_text):]
before_text = self.options.get('end-before', None)
if before_text:
# skip content in rawtext after *and incl.* a matching text
before_index = rawtext.find(before_text)
if before_index < 0:
raise self.severe('Problem with "end-before" option of "%s" '
'directive:\nText not found.' % self.name)
rawtext = rawtext[:before_index]
include_lines = statemachine.string2lines(rawtext, tab_width,
convert_whitespace=True)
if 'literal' in self.options:
# Convert tabs to spaces, if `tab_width` is positive.
if tab_width >= 0:
text = rawtext.expandtabs(tab_width)
else:
text = rawtext
literal_block = nodes.literal_block(rawtext, source=path,
classes=self.options.get('class', []))
literal_block.line = 1
self.add_name(literal_block)
if 'number-lines' in self.options:
try:
startline = int(self.options['number-lines'] or 1)
except ValueError:
raise self.error(':number-lines: with non-integer '
'start value')
endline = startline + len(include_lines)
if text.endswith('\n'):
text = text[:-1]
tokens = NumberLines([([], text)], startline, endline)
for classes, value in tokens:
if classes:
literal_block += nodes.inline(value, value,
classes=classes)
else:
literal_block += nodes.Text(value, value)
else:
literal_block += nodes.Text(text, text)
return [literal_block]
if 'code' in self.options:
self.options['source'] = path
codeblock = CodeBlock(self.name,
[self.options.pop('code')], # arguments
self.options,
include_lines, # content
self.lineno,
self.content_offset,
self.block_text,
self.state,
self.state_machine)
return codeblock.run()
self.state_machine.insert_input(include_lines, path)
return []
|
nvtrust-main
|
infrastructure/linux/linux_source/Documentation/sphinx/kernel_include.py
|
#!/usr/bin/env python
# SPDX-License-Identifier: GPL-2.0
# -*- coding: utf-8; mode: python -*-
# pylint: disable=R0903, C0330, R0914, R0912, E0401
u"""
maintainers-include
~~~~~~~~~~~~~~~~~~~
Implementation of the ``maintainers-include`` reST-directive.
:copyright: Copyright (C) 2019 Kees Cook <keescook@chromium.org>
:license: GPL Version 2, June 1991 see linux/COPYING for details.
The ``maintainers-include`` reST-directive performs extensive parsing
specific to the Linux kernel's standard "MAINTAINERS" file, in an
effort to avoid needing to heavily mark up the original plain text.
"""
import sys
import re
import os.path
from docutils import statemachine
from docutils.utils.error_reporting import ErrorString
from docutils.parsers.rst import Directive
from docutils.parsers.rst.directives.misc import Include
__version__ = '1.0'
def setup(app):
app.add_directive("maintainers-include", MaintainersInclude)
return dict(
version = __version__,
parallel_read_safe = True,
parallel_write_safe = True
)
class MaintainersInclude(Include):
u"""MaintainersInclude (``maintainers-include``) directive"""
required_arguments = 0
def parse_maintainers(self, path):
"""Parse all the MAINTAINERS lines into ReST for human-readability"""
result = list()
result.append(".. _maintainers:")
result.append("")
# Poor man's state machine.
descriptions = False
maintainers = False
subsystems = False
# Field letter to field name mapping.
field_letter = None
fields = dict()
prev = None
field_prev = ""
field_content = ""
for line in open(path):
# Have we reached the end of the preformatted Descriptions text?
if descriptions and line.startswith('Maintainers'):
descriptions = False
# Ensure a blank line following the last "|"-prefixed line.
result.append("")
# Start subsystem processing? This is to skip processing the text
# between the Maintainers heading and the first subsystem name.
if maintainers and not subsystems:
if re.search('^[A-Z0-9]', line):
subsystems = True
# Drop needless input whitespace.
line = line.rstrip()
# Linkify all non-wildcard refs to ReST files in Documentation/.
pat = '(Documentation/([^\s\?\*]*)\.rst)'
m = re.search(pat, line)
if m:
# maintainers.rst is in a subdirectory, so include "../".
line = re.sub(pat, ':doc:`%s <../%s>`' % (m.group(2), m.group(2)), line)
# Check state machine for output rendering behavior.
output = None
if descriptions:
# Escape the escapes in preformatted text.
output = "| %s" % (line.replace("\\", "\\\\"))
# Look for and record field letter to field name mappings:
# R: Designated *reviewer*: FullName <address@domain>
m = re.search("\s(\S):\s", line)
if m:
field_letter = m.group(1)
if field_letter and not field_letter in fields:
m = re.search("\*([^\*]+)\*", line)
if m:
fields[field_letter] = m.group(1)
elif subsystems:
# Skip empty lines: subsystem parser adds them as needed.
if len(line) == 0:
continue
# Subsystem fields are batched into "field_content"
if line[1] != ':':
# Render a subsystem entry as:
# SUBSYSTEM NAME
# ~~~~~~~~~~~~~~
# Flush pending field content.
output = field_content + "\n\n"
field_content = ""
# Collapse whitespace in subsystem name.
heading = re.sub("\s+", " ", line)
output = output + "%s\n%s" % (heading, "~" * len(heading))
field_prev = ""
else:
# Render a subsystem field as:
# :Field: entry
# entry...
field, details = line.split(':', 1)
details = details.strip()
# Mark paths (and regexes) as literal text for improved
# readability and to escape any escapes.
if field in ['F', 'N', 'X', 'K']:
# But only if not already marked :)
if not ':doc:' in details:
details = '``%s``' % (details)
# Comma separate email field continuations.
if field == field_prev and field_prev in ['M', 'R', 'L']:
field_content = field_content + ","
# Do not repeat field names, so that field entries
# will be collapsed together.
if field != field_prev:
output = field_content + "\n"
field_content = ":%s:" % (fields.get(field, field))
field_content = field_content + "\n\t%s" % (details)
field_prev = field
else:
output = line
# Re-split on any added newlines in any above parsing.
if output != None:
for separated in output.split('\n'):
result.append(separated)
# Update the state machine when we find heading separators.
if line.startswith('----------'):
if prev.startswith('Descriptions'):
descriptions = True
if prev.startswith('Maintainers'):
maintainers = True
# Retain previous line for state machine transitions.
prev = line
# Flush pending field contents.
if field_content != "":
for separated in field_content.split('\n'):
result.append(separated)
output = "\n".join(result)
# For debugging the pre-rendered results...
#print(output, file=open("/tmp/MAINTAINERS.rst", "w"))
self.state_machine.insert_input(
statemachine.string2lines(output), path)
def run(self):
"""Include the MAINTAINERS file as part of this reST file."""
if not self.state.document.settings.file_insertion_enabled:
raise self.warning('"%s" directive disabled.' % self.name)
# Walk up source path directories to find Documentation/../
path = self.state_machine.document.attributes['source']
path = os.path.realpath(path)
tail = path
while tail != "Documentation" and tail != "":
(path, tail) = os.path.split(path)
# Append "MAINTAINERS"
path = os.path.join(path, "MAINTAINERS")
try:
self.state.document.settings.record_dependencies.add(path)
lines = self.parse_maintainers(path)
except IOError as error:
raise self.severe('Problems with "%s" directive path:\n%s.' %
(self.name, ErrorString(error)))
return []
|
nvtrust-main
|
infrastructure/linux/linux_source/Documentation/sphinx/maintainers_include.py
|
# -*- coding: utf-8; mode: python -*-
# coding=utf-8
# SPDX-License-Identifier: GPL-2.0
#
u"""
kernel-abi
~~~~~~~~~~
Implementation of the ``kernel-abi`` reST-directive.
:copyright: Copyright (C) 2016 Markus Heiser
:copyright: Copyright (C) 2016-2020 Mauro Carvalho Chehab
:maintained-by: Mauro Carvalho Chehab <mchehab+huawei@kernel.org>
:license: GPL Version 2, June 1991 see Linux/COPYING for details.
The ``kernel-abi`` (:py:class:`KernelCmd`) directive calls the
scripts/get_abi.pl script to parse the Kernel ABI files.
Overview of directive's argument and options.
.. code-block:: rst
.. kernel-abi:: <ABI directory location>
:debug:
The argument ``<ABI directory location>`` is required. It contains the
location of the ABI files to be parsed.
``debug``
Inserts a code-block with the *raw* reST. Sometimes it is helpful to see
what reST is generated.
"""
import codecs
import os
import subprocess
import sys
import re
import kernellog
from os import path
from docutils import nodes, statemachine
from docutils.statemachine import ViewList
from docutils.parsers.rst import directives, Directive
from docutils.utils.error_reporting import ErrorString
from sphinx.util.docutils import switch_source_input
__version__ = '1.0'
def setup(app):
app.add_directive("kernel-abi", KernelCmd)
return dict(
version = __version__
, parallel_read_safe = True
, parallel_write_safe = True
)
class KernelCmd(Directive):
u"""KernelABI (``kernel-abi``) directive"""
required_arguments = 1
optional_arguments = 2
has_content = False
final_argument_whitespace = True
option_spec = {
"debug" : directives.flag,
"rst" : directives.unchanged
}
def run(self):
doc = self.state.document
if not doc.settings.file_insertion_enabled:
raise self.warning("docutils: file insertion disabled")
env = doc.settings.env
cwd = path.dirname(doc.current_source)
cmd = "get_abi.pl rest --enable-lineno --dir "
cmd += self.arguments[0]
if 'rst' in self.options:
cmd += " --rst-source"
srctree = path.abspath(os.environ["srctree"])
fname = cmd
# extend PATH with $(srctree)/scripts
path_env = os.pathsep.join([
srctree + os.sep + "scripts",
os.environ["PATH"]
])
shell_env = os.environ.copy()
shell_env["PATH"] = path_env
shell_env["srctree"] = srctree
lines = self.runCmd(cmd, shell=True, cwd=cwd, env=shell_env)
nodeList = self.nestedParse(lines, self.arguments[0])
return nodeList
def runCmd(self, cmd, **kwargs):
u"""Run command ``cmd`` and return its stdout as unicode."""
try:
proc = subprocess.Popen(
cmd
, stdout = subprocess.PIPE
, stderr = subprocess.PIPE
, **kwargs
)
out, err = proc.communicate()
out, err = codecs.decode(out, 'utf-8'), codecs.decode(err, 'utf-8')
if proc.returncode != 0:
raise self.severe(
u"command '%s' failed with return code %d"
% (cmd, proc.returncode)
)
except OSError as exc:
raise self.severe(u"problems with '%s' directive: %s."
% (self.name, ErrorString(exc)))
return out
def nestedParse(self, lines, fname):
env = self.state.document.settings.env
content = ViewList()
node = nodes.section()
if "debug" in self.options:
code_block = "\n\n.. code-block:: rst\n :linenos:\n"
for l in lines.split("\n"):
code_block += "\n " + l
lines = code_block + "\n\n"
line_regex = re.compile("^\.\. LINENO (\S+)\#([0-9]+)$")
ln = 0
n = 0
f = fname
for line in lines.split("\n"):
n = n + 1
match = line_regex.search(line)
if match:
new_f = match.group(1)
# Sphinx parser is lazy: it stops parsing contents in the
# middle, if it is too big. So, handle it per input file
if new_f != f and content:
self.do_parse(content, node)
content = ViewList()
# Add the file to Sphinx build dependencies
env.note_dependency(os.path.abspath(f))
f = new_f
# sphinx counts lines from 0
ln = int(match.group(2)) - 1
else:
content.append(line, f, ln)
kernellog.info(self.state.document.settings.env.app, "%s: parsed %i lines" % (fname, n))
if content:
self.do_parse(content, node)
return node.children
def do_parse(self, content, node):
with switch_source_input(self.state, content):
self.state.nested_parse(content, 0, node, match_titles=1)
|
nvtrust-main
|
infrastructure/linux/linux_source/Documentation/sphinx/kernel_abi.py
|
#!/usr/bin/env python3
# -*- coding: utf-8; mode: python -*-
# pylint: disable=C0330, R0903, R0912
u"""
flat-table
~~~~~~~~~~
Implementation of the ``flat-table`` reST-directive.
:copyright: Copyright (C) 2016 Markus Heiser
:license: GPL Version 2, June 1991 see linux/COPYING for details.
The ``flat-table`` (:py:class:`FlatTable`) is a double-stage list similar to
the ``list-table`` with some additional features:
* *column-span*: with the role ``cspan`` a cell can be extended through
additional columns
* *row-span*: with the role ``rspan`` a cell can be extended through
additional rows
* *auto span* rightmost cell of a table row over the missing cells on the
right side of that table-row. With Option ``:fill-cells:`` this behavior
can be changed from *auto span* to *auto fill*, which automatically inserts
(empty) cells instead of spanning the last cell.
Options:
* header-rows: [int] count of header rows
* stub-columns: [int] count of stub columns
* widths: [[int] [int] ... ] widths of columns
* fill-cells: instead of autospann missing cells, insert missing cells
roles:
* cspan: [int] additionale columns (*morecols*)
* rspan: [int] additionale rows (*morerows*)
"""
# ==============================================================================
# imports
# ==============================================================================
from docutils import nodes
from docutils.parsers.rst import directives, roles
from docutils.parsers.rst.directives.tables import Table
from docutils.utils import SystemMessagePropagation
# ==============================================================================
# common globals
# ==============================================================================
__version__ = '1.0'
# ==============================================================================
def setup(app):
# ==============================================================================
app.add_directive("flat-table", FlatTable)
roles.register_local_role('cspan', c_span)
roles.register_local_role('rspan', r_span)
return dict(
version = __version__,
parallel_read_safe = True,
parallel_write_safe = True
)
# ==============================================================================
def c_span(name, rawtext, text, lineno, inliner, options=None, content=None):
# ==============================================================================
# pylint: disable=W0613
options = options if options is not None else {}
content = content if content is not None else []
nodelist = [colSpan(span=int(text))]
msglist = []
return nodelist, msglist
# ==============================================================================
def r_span(name, rawtext, text, lineno, inliner, options=None, content=None):
# ==============================================================================
# pylint: disable=W0613
options = options if options is not None else {}
content = content if content is not None else []
nodelist = [rowSpan(span=int(text))]
msglist = []
return nodelist, msglist
# ==============================================================================
class rowSpan(nodes.General, nodes.Element): pass # pylint: disable=C0103,C0321
class colSpan(nodes.General, nodes.Element): pass # pylint: disable=C0103,C0321
# ==============================================================================
# ==============================================================================
class FlatTable(Table):
# ==============================================================================
u"""FlatTable (``flat-table``) directive"""
option_spec = {
'name': directives.unchanged
, 'class': directives.class_option
, 'header-rows': directives.nonnegative_int
, 'stub-columns': directives.nonnegative_int
, 'widths': directives.positive_int_list
, 'fill-cells' : directives.flag }
def run(self):
if not self.content:
error = self.state_machine.reporter.error(
'The "%s" directive is empty; content required.' % self.name,
nodes.literal_block(self.block_text, self.block_text),
line=self.lineno)
return [error]
title, messages = self.make_title()
node = nodes.Element() # anonymous container for parsing
self.state.nested_parse(self.content, self.content_offset, node)
tableBuilder = ListTableBuilder(self)
tableBuilder.parseFlatTableNode(node)
tableNode = tableBuilder.buildTableNode()
# SDK.CONSOLE() # print --> tableNode.asdom().toprettyxml()
if title:
tableNode.insert(0, title)
return [tableNode] + messages
# ==============================================================================
class ListTableBuilder(object):
# ==============================================================================
u"""Builds a table from a double-stage list"""
def __init__(self, directive):
self.directive = directive
self.rows = []
self.max_cols = 0
def buildTableNode(self):
colwidths = self.directive.get_column_widths(self.max_cols)
if isinstance(colwidths, tuple):
# Since docutils 0.13, get_column_widths returns a (widths,
# colwidths) tuple, where widths is a string (i.e. 'auto').
# See https://sourceforge.net/p/docutils/patches/120/.
colwidths = colwidths[1]
stub_columns = self.directive.options.get('stub-columns', 0)
header_rows = self.directive.options.get('header-rows', 0)
table = nodes.table()
tgroup = nodes.tgroup(cols=len(colwidths))
table += tgroup
for colwidth in colwidths:
colspec = nodes.colspec(colwidth=colwidth)
# FIXME: It seems, that the stub method only works well in the
# absence of rowspan (observed by the html builder, the docutils-xml
# build seems OK). This is not extraordinary, because there exists
# no table directive (except *this* flat-table) which allows to
# define coexistent of rowspan and stubs (there was no use-case
# before flat-table). This should be reviewed (later).
if stub_columns:
colspec.attributes['stub'] = 1
stub_columns -= 1
tgroup += colspec
stub_columns = self.directive.options.get('stub-columns', 0)
if header_rows:
thead = nodes.thead()
tgroup += thead
for row in self.rows[:header_rows]:
thead += self.buildTableRowNode(row)
tbody = nodes.tbody()
tgroup += tbody
for row in self.rows[header_rows:]:
tbody += self.buildTableRowNode(row)
return table
def buildTableRowNode(self, row_data, classes=None):
classes = [] if classes is None else classes
row = nodes.row()
for cell in row_data:
if cell is None:
continue
cspan, rspan, cellElements = cell
attributes = {"classes" : classes}
if rspan:
attributes['morerows'] = rspan
if cspan:
attributes['morecols'] = cspan
entry = nodes.entry(**attributes)
entry.extend(cellElements)
row += entry
return row
def raiseError(self, msg):
error = self.directive.state_machine.reporter.error(
msg
, nodes.literal_block(self.directive.block_text
, self.directive.block_text)
, line = self.directive.lineno )
raise SystemMessagePropagation(error)
def parseFlatTableNode(self, node):
u"""parses the node from a :py:class:`FlatTable` directive's body"""
if len(node) != 1 or not isinstance(node[0], nodes.bullet_list):
self.raiseError(
'Error parsing content block for the "%s" directive: '
'exactly one bullet list expected.' % self.directive.name )
for rowNum, rowItem in enumerate(node[0]):
row = self.parseRowItem(rowItem, rowNum)
self.rows.append(row)
self.roundOffTableDefinition()
def roundOffTableDefinition(self):
u"""Round off the table definition.
This method rounds off the table definition in :py:member:`rows`.
* This method inserts the needed ``None`` values for the missing cells
arising from spanning cells over rows and/or columns.
* recount the :py:member:`max_cols`
* Autospan or fill (option ``fill-cells``) missing cells on the right
side of the table-row
"""
y = 0
while y < len(self.rows):
x = 0
while x < len(self.rows[y]):
cell = self.rows[y][x]
if cell is None:
x += 1
continue
cspan, rspan = cell[:2]
# handle colspan in current row
for c in range(cspan):
try:
self.rows[y].insert(x+c+1, None)
except: # pylint: disable=W0702
# the user sets ambiguous rowspans
pass # SDK.CONSOLE()
# handle colspan in spanned rows
for r in range(rspan):
for c in range(cspan + 1):
try:
self.rows[y+r+1].insert(x+c, None)
except: # pylint: disable=W0702
# the user sets ambiguous rowspans
pass # SDK.CONSOLE()
x += 1
y += 1
# Insert the missing cells on the right side. For this, first
# re-calculate the max columns.
for row in self.rows:
if self.max_cols < len(row):
self.max_cols = len(row)
# fill with empty cells or cellspan?
fill_cells = False
if 'fill-cells' in self.directive.options:
fill_cells = True
for row in self.rows:
x = self.max_cols - len(row)
if x and not fill_cells:
if row[-1] is None:
row.append( ( x - 1, 0, []) )
else:
cspan, rspan, content = row[-1]
row[-1] = (cspan + x, rspan, content)
elif x and fill_cells:
for i in range(x):
row.append( (0, 0, nodes.comment()) )
def pprint(self):
# for debugging
retVal = "[ "
for row in self.rows:
retVal += "[ "
for col in row:
if col is None:
retVal += ('%r' % col)
retVal += "\n , "
else:
content = col[2][0].astext()
if len (content) > 30:
content = content[:30] + "..."
retVal += ('(cspan=%s, rspan=%s, %r)'
% (col[0], col[1], content))
retVal += "]\n , "
retVal = retVal[:-2]
retVal += "]\n , "
retVal = retVal[:-2]
return retVal + "]"
def parseRowItem(self, rowItem, rowNum):
row = []
childNo = 0
error = False
cell = None
target = None
for child in rowItem:
if (isinstance(child , nodes.comment)
or isinstance(child, nodes.system_message)):
pass
elif isinstance(child , nodes.target):
target = child
elif isinstance(child, nodes.bullet_list):
childNo += 1
cell = child
else:
error = True
break
if childNo != 1 or error:
self.raiseError(
'Error parsing content block for the "%s" directive: '
'two-level bullet list expected, but row %s does not '
'contain a second-level bullet list.'
% (self.directive.name, rowNum + 1))
for cellItem in cell:
cspan, rspan, cellElements = self.parseCellItem(cellItem)
if target is not None:
cellElements.insert(0, target)
row.append( (cspan, rspan, cellElements) )
return row
def parseCellItem(self, cellItem):
# search and remove cspan, rspan colspec from the first element in
# this listItem (field).
cspan = rspan = 0
if not len(cellItem):
return cspan, rspan, []
for elem in cellItem[0]:
if isinstance(elem, colSpan):
cspan = elem.get("span")
elem.parent.remove(elem)
continue
if isinstance(elem, rowSpan):
rspan = elem.get("span")
elem.parent.remove(elem)
continue
return cspan, rspan, cellItem[:]
|
nvtrust-main
|
infrastructure/linux/linux_source/Documentation/sphinx/rstFlatTable.py
|
# SPDX-License-Identifier: GPL-2.0
#
# Sphinx has deprecated its older logging interface, but the replacement
# only goes back to 1.6. So here's a wrapper layer to keep around for
# as long as we support 1.4.
#
# We don't support 1.4 anymore, but we'll keep the wrappers around until
# we change all the code to not use them anymore :)
#
import sphinx
from sphinx.util import logging
logger = logging.getLogger('kerneldoc')
def warn(app, message):
logger.warning(message)
def verbose(app, message):
logger.verbose(message)
def info(app, message):
logger.info(message)
|
nvtrust-main
|
infrastructure/linux/linux_source/Documentation/sphinx/kernellog.py
|
# -*- coding: utf-8; mode: python -*-
# pylint: disable=R0903, C0330, R0914, R0912, E0401
import os
import sys
from sphinx.util.pycompat import execfile_
# ------------------------------------------------------------------------------
def loadConfig(namespace):
# ------------------------------------------------------------------------------
u"""Load an additional configuration file into *namespace*.
The name of the configuration file is taken from the environment
``SPHINX_CONF``. The external configuration file extends (or overwrites) the
configuration values from the origin ``conf.py``. With this you are able to
maintain *build themes*. """
config_file = os.environ.get("SPHINX_CONF", None)
if (config_file is not None
and os.path.normpath(namespace["__file__"]) != os.path.normpath(config_file) ):
config_file = os.path.abspath(config_file)
# Let's avoid one conf.py file just due to latex_documents
start = config_file.find('Documentation/')
if start >= 0:
start = config_file.find('/', start + 1)
end = config_file.rfind('/')
if start >= 0 and end > 0:
dir = config_file[start + 1:end]
print("source directory: %s" % dir)
new_latex_docs = []
latex_documents = namespace['latex_documents']
for l in latex_documents:
if l[0].find(dir + '/') == 0:
has = True
fn = l[0][len(dir) + 1:]
new_latex_docs.append((fn, l[1], l[2], l[3], l[4]))
break
namespace['latex_documents'] = new_latex_docs
# If there is an extra conf.py file, load it
if os.path.isfile(config_file):
sys.stdout.write("load additional sphinx-config: %s\n" % config_file)
config = namespace.copy()
config['__file__'] = config_file
execfile_(config_file, config)
del config['__file__']
namespace.update(config)
else:
config = namespace.copy()
config['tags'].add("subproject")
namespace.update(config)
|
nvtrust-main
|
infrastructure/linux/linux_source/Documentation/sphinx/load_config.py
|
# SPDX-License-Identifier: GPL-2.0
# Copyright 2019 Jonathan Corbet <corbet@lwn.net>
#
# Apply kernel-specific tweaks after the initial document processing
# has been done.
#
from docutils import nodes
import sphinx
from sphinx import addnodes
if sphinx.version_info[0] < 2 or \
sphinx.version_info[0] == 2 and sphinx.version_info[1] < 1:
from sphinx.environment import NoUri
else:
from sphinx.errors import NoUri
import re
from itertools import chain
#
# Python 2 lacks re.ASCII...
#
try:
ascii_p3 = re.ASCII
except AttributeError:
ascii_p3 = 0
#
# Regex nastiness. Of course.
# Try to identify "function()" that's not already marked up some
# other way. Sphinx doesn't like a lot of stuff right after a
# :c:func: block (i.e. ":c:func:`mmap()`s" flakes out), so the last
# bit tries to restrict matches to things that won't create trouble.
#
RE_function = re.compile(r'\b(([a-zA-Z_]\w+)\(\))', flags=ascii_p3)
#
# Sphinx 2 uses the same :c:type role for struct, union, enum and typedef
#
RE_generic_type = re.compile(r'\b(struct|union|enum|typedef)\s+([a-zA-Z_]\w+)',
flags=ascii_p3)
#
# Sphinx 3 uses a different C role for each one of struct, union, enum and
# typedef
#
RE_struct = re.compile(r'\b(struct)\s+([a-zA-Z_]\w+)', flags=ascii_p3)
RE_union = re.compile(r'\b(union)\s+([a-zA-Z_]\w+)', flags=ascii_p3)
RE_enum = re.compile(r'\b(enum)\s+([a-zA-Z_]\w+)', flags=ascii_p3)
RE_typedef = re.compile(r'\b(typedef)\s+([a-zA-Z_]\w+)', flags=ascii_p3)
#
# Detects a reference to a documentation page of the form Documentation/... with
# an optional extension
#
RE_doc = re.compile(r'(\bDocumentation/)?((\.\./)*[\w\-/]+)\.(rst|txt)')
RE_namespace = re.compile(r'^\s*..\s*c:namespace::\s*(\S+)\s*$')
#
# Reserved C words that we should skip when cross-referencing
#
Skipnames = [ 'for', 'if', 'register', 'sizeof', 'struct', 'unsigned' ]
#
# Many places in the docs refer to common system calls. It is
# pointless to try to cross-reference them and, as has been known
# to happen, somebody defining a function by these names can lead
# to the creation of incorrect and confusing cross references. So
# just don't even try with these names.
#
Skipfuncs = [ 'open', 'close', 'read', 'write', 'fcntl', 'mmap',
'select', 'poll', 'fork', 'execve', 'clone', 'ioctl',
'socket' ]
c_namespace = ''
def markup_refs(docname, app, node):
t = node.astext()
done = 0
repl = [ ]
#
# Associate each regex with the function that will markup its matches
#
markup_func_sphinx2 = {RE_doc: markup_doc_ref,
RE_function: markup_c_ref,
RE_generic_type: markup_c_ref}
markup_func_sphinx3 = {RE_doc: markup_doc_ref,
RE_function: markup_func_ref_sphinx3,
RE_struct: markup_c_ref,
RE_union: markup_c_ref,
RE_enum: markup_c_ref,
RE_typedef: markup_c_ref}
if sphinx.version_info[0] >= 3:
markup_func = markup_func_sphinx3
else:
markup_func = markup_func_sphinx2
match_iterators = [regex.finditer(t) for regex in markup_func]
#
# Sort all references by the starting position in text
#
sorted_matches = sorted(chain(*match_iterators), key=lambda m: m.start())
for m in sorted_matches:
#
# Include any text prior to match as a normal text node.
#
if m.start() > done:
repl.append(nodes.Text(t[done:m.start()]))
#
# Call the function associated with the regex that matched this text and
# append its return to the text
#
repl.append(markup_func[m.re](docname, app, m))
done = m.end()
if done < len(t):
repl.append(nodes.Text(t[done:]))
return repl
#
# In sphinx3 we can cross-reference to C macro and function, each one with its
# own C role, but both match the same regex, so we try both.
#
def markup_func_ref_sphinx3(docname, app, match):
class_str = ['c-func', 'c-macro']
reftype_str = ['function', 'macro']
cdom = app.env.domains['c']
#
# Go through the dance of getting an xref out of the C domain
#
base_target = match.group(2)
target_text = nodes.Text(match.group(0))
xref = None
possible_targets = [base_target]
# Check if this document has a namespace, and if so, try
# cross-referencing inside it first.
if c_namespace:
possible_targets.insert(0, c_namespace + "." + base_target)
if base_target not in Skipnames:
for target in possible_targets:
if target not in Skipfuncs:
for class_s, reftype_s in zip(class_str, reftype_str):
lit_text = nodes.literal(classes=['xref', 'c', class_s])
lit_text += target_text
pxref = addnodes.pending_xref('', refdomain = 'c',
reftype = reftype_s,
reftarget = target, modname = None,
classname = None)
#
# XXX The Latex builder will throw NoUri exceptions here,
# work around that by ignoring them.
#
try:
xref = cdom.resolve_xref(app.env, docname, app.builder,
reftype_s, target, pxref,
lit_text)
except NoUri:
xref = None
if xref:
return xref
return target_text
def markup_c_ref(docname, app, match):
class_str = {# Sphinx 2 only
RE_function: 'c-func',
RE_generic_type: 'c-type',
# Sphinx 3+ only
RE_struct: 'c-struct',
RE_union: 'c-union',
RE_enum: 'c-enum',
RE_typedef: 'c-type',
}
reftype_str = {# Sphinx 2 only
RE_function: 'function',
RE_generic_type: 'type',
# Sphinx 3+ only
RE_struct: 'struct',
RE_union: 'union',
RE_enum: 'enum',
RE_typedef: 'type',
}
cdom = app.env.domains['c']
#
# Go through the dance of getting an xref out of the C domain
#
base_target = match.group(2)
target_text = nodes.Text(match.group(0))
xref = None
possible_targets = [base_target]
# Check if this document has a namespace, and if so, try
# cross-referencing inside it first.
if c_namespace:
possible_targets.insert(0, c_namespace + "." + base_target)
if base_target not in Skipnames:
for target in possible_targets:
if not (match.re == RE_function and target in Skipfuncs):
lit_text = nodes.literal(classes=['xref', 'c', class_str[match.re]])
lit_text += target_text
pxref = addnodes.pending_xref('', refdomain = 'c',
reftype = reftype_str[match.re],
reftarget = target, modname = None,
classname = None)
#
# XXX The Latex builder will throw NoUri exceptions here,
# work around that by ignoring them.
#
try:
xref = cdom.resolve_xref(app.env, docname, app.builder,
reftype_str[match.re], target, pxref,
lit_text)
except NoUri:
xref = None
if xref:
return xref
return target_text
#
# Try to replace a documentation reference of the form Documentation/... with a
# cross reference to that page
#
def markup_doc_ref(docname, app, match):
stddom = app.env.domains['std']
#
# Go through the dance of getting an xref out of the std domain
#
absolute = match.group(1)
target = match.group(2)
if absolute:
target = "/" + target
xref = None
pxref = addnodes.pending_xref('', refdomain = 'std', reftype = 'doc',
reftarget = target, modname = None,
classname = None, refexplicit = False)
#
# XXX The Latex builder will throw NoUri exceptions here,
# work around that by ignoring them.
#
try:
xref = stddom.resolve_xref(app.env, docname, app.builder, 'doc',
target, pxref, None)
except NoUri:
xref = None
#
# Return the xref if we got it; otherwise just return the plain text.
#
if xref:
return xref
else:
return nodes.Text(match.group(0))
def get_c_namespace(app, docname):
source = app.env.doc2path(docname)
with open(source) as f:
for l in f:
match = RE_namespace.search(l)
if match:
return match.group(1)
return ''
def auto_markup(app, doctree, name):
global c_namespace
c_namespace = get_c_namespace(app, name)
def text_but_not_a_reference(node):
# The nodes.literal test catches ``literal text``, its purpose is to
# avoid adding cross-references to functions that have been explicitly
# marked with cc:func:.
if not isinstance(node, nodes.Text) or isinstance(node.parent, nodes.literal):
return False
child_of_reference = False
parent = node.parent
while parent:
if isinstance(parent, nodes.Referential):
child_of_reference = True
break
parent = parent.parent
return not child_of_reference
#
# This loop could eventually be improved on. Someday maybe we
# want a proper tree traversal with a lot of awareness of which
# kinds of nodes to prune. But this works well for now.
#
for para in doctree.traverse(nodes.paragraph):
for node in para.traverse(condition=text_but_not_a_reference):
node.parent.replace(node, markup_refs(name, app, node))
def setup(app):
app.connect('doctree-resolved', auto_markup)
return {
'parallel_read_safe': True,
'parallel_write_safe': True,
}
|
nvtrust-main
|
infrastructure/linux/linux_source/Documentation/sphinx/automarkup.py
|
# coding=utf-8
#
# Copyright © 2016 Intel Corporation
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice (including the next
# paragraph) shall be included in all copies or substantial portions of the
# Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
# Authors:
# Jani Nikula <jani.nikula@intel.com>
#
# Please make sure this works on both python2 and python3.
#
import codecs
import os
import subprocess
import sys
import re
import glob
from docutils import nodes, statemachine
from docutils.statemachine import ViewList
from docutils.parsers.rst import directives, Directive
import sphinx
from sphinx.util.docutils import switch_source_input
import kernellog
__version__ = '1.0'
class KernelDocDirective(Directive):
"""Extract kernel-doc comments from the specified file"""
required_argument = 1
optional_arguments = 4
option_spec = {
'doc': directives.unchanged_required,
'export': directives.unchanged,
'internal': directives.unchanged,
'identifiers': directives.unchanged,
'no-identifiers': directives.unchanged,
'functions': directives.unchanged,
}
has_content = False
def run(self):
env = self.state.document.settings.env
cmd = [env.config.kerneldoc_bin, '-rst', '-enable-lineno']
# Pass the version string to kernel-doc, as it needs to use a different
# dialect, depending what the C domain supports for each specific
# Sphinx versions
cmd += ['-sphinx-version', sphinx.__version__]
filename = env.config.kerneldoc_srctree + '/' + self.arguments[0]
export_file_patterns = []
# Tell sphinx of the dependency
env.note_dependency(os.path.abspath(filename))
tab_width = self.options.get('tab-width', self.state.document.settings.tab_width)
# 'function' is an alias of 'identifiers'
if 'functions' in self.options:
self.options['identifiers'] = self.options.get('functions')
# FIXME: make this nicer and more robust against errors
if 'export' in self.options:
cmd += ['-export']
export_file_patterns = str(self.options.get('export')).split()
elif 'internal' in self.options:
cmd += ['-internal']
export_file_patterns = str(self.options.get('internal')).split()
elif 'doc' in self.options:
cmd += ['-function', str(self.options.get('doc'))]
elif 'identifiers' in self.options:
identifiers = self.options.get('identifiers').split()
if identifiers:
for i in identifiers:
cmd += ['-function', i]
else:
cmd += ['-no-doc-sections']
if 'no-identifiers' in self.options:
no_identifiers = self.options.get('no-identifiers').split()
if no_identifiers:
for i in no_identifiers:
cmd += ['-nosymbol', i]
for pattern in export_file_patterns:
for f in glob.glob(env.config.kerneldoc_srctree + '/' + pattern):
env.note_dependency(os.path.abspath(f))
cmd += ['-export-file', f]
cmd += [filename]
try:
kernellog.verbose(env.app,
'calling kernel-doc \'%s\'' % (" ".join(cmd)))
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = p.communicate()
out, err = codecs.decode(out, 'utf-8'), codecs.decode(err, 'utf-8')
if p.returncode != 0:
sys.stderr.write(err)
kernellog.warn(env.app,
'kernel-doc \'%s\' failed with return code %d' % (" ".join(cmd), p.returncode))
return [nodes.error(None, nodes.paragraph(text = "kernel-doc missing"))]
elif env.config.kerneldoc_verbosity > 0:
sys.stderr.write(err)
lines = statemachine.string2lines(out, tab_width, convert_whitespace=True)
result = ViewList()
lineoffset = 0;
line_regex = re.compile("^\.\. LINENO ([0-9]+)$")
for line in lines:
match = line_regex.search(line)
if match:
# sphinx counts lines from 0
lineoffset = int(match.group(1)) - 1
# we must eat our comments since the upset the markup
else:
doc = env.srcdir + "/" + env.docname + ":" + str(self.lineno)
result.append(line, doc + ": " + filename, lineoffset)
lineoffset += 1
node = nodes.section()
self.do_parse(result, node)
return node.children
except Exception as e: # pylint: disable=W0703
kernellog.warn(env.app, 'kernel-doc \'%s\' processing failed with: %s' %
(" ".join(cmd), str(e)))
return [nodes.error(None, nodes.paragraph(text = "kernel-doc missing"))]
def do_parse(self, result, node):
with switch_source_input(self.state, result):
self.state.nested_parse(result, 0, node, match_titles=1)
def setup(app):
app.add_config_value('kerneldoc_bin', None, 'env')
app.add_config_value('kerneldoc_srctree', None, 'env')
app.add_config_value('kerneldoc_verbosity', 1, 'env')
app.add_directive('kernel-doc', KernelDocDirective)
return dict(
version = __version__,
parallel_read_safe = True,
parallel_write_safe = True
)
|
nvtrust-main
|
infrastructure/linux/linux_source/Documentation/sphinx/kerneldoc.py
|
# -*- coding: utf-8; mode: python -*-
# pylint: disable=C0103, R0903, R0912, R0915
u"""
scalable figure and image handling
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Sphinx extension which implements scalable image handling.
:copyright: Copyright (C) 2016 Markus Heiser
:license: GPL Version 2, June 1991 see Linux/COPYING for details.
The build for image formats depend on image's source format and output's
destination format. This extension implement methods to simplify image
handling from the author's POV. Directives like ``kernel-figure`` implement
methods *to* always get the best output-format even if some tools are not
installed. For more details take a look at ``convert_image(...)`` which is
the core of all conversions.
* ``.. kernel-image``: for image handling / a ``.. image::`` replacement
* ``.. kernel-figure``: for figure handling / a ``.. figure::`` replacement
* ``.. kernel-render``: for render markup / a concept to embed *render*
markups (or languages). Supported markups (see ``RENDER_MARKUP_EXT``)
- ``DOT``: render embedded Graphviz's **DOC**
- ``SVG``: render embedded Scalable Vector Graphics (**SVG**)
- ... *developable*
Used tools:
* ``dot(1)``: Graphviz (https://www.graphviz.org). If Graphviz is not
available, the DOT language is inserted as literal-block.
For conversion to PDF, ``rsvg-convert(1)`` of librsvg
(https://gitlab.gnome.org/GNOME/librsvg) is used when available.
* SVG to PDF: To generate PDF, you need at least one of this tools:
- ``convert(1)``: ImageMagick (https://www.imagemagick.org)
- ``inkscape(1)``: Inkscape (https://inkscape.org/)
List of customizations:
* generate PDF from SVG / used by PDF (LaTeX) builder
* generate SVG (html-builder) and PDF (latex-builder) from DOT files.
DOT: see https://www.graphviz.org/content/dot-language
"""
import os
from os import path
import subprocess
from hashlib import sha1
import re
from docutils import nodes
from docutils.statemachine import ViewList
from docutils.parsers.rst import directives
from docutils.parsers.rst.directives import images
import sphinx
from sphinx.util.nodes import clean_astext
import kernellog
# Get Sphinx version
major, minor, patch = sphinx.version_info[:3]
if major == 1 and minor > 3:
# patches.Figure only landed in Sphinx 1.4
from sphinx.directives.patches import Figure # pylint: disable=C0413
else:
Figure = images.Figure
__version__ = '1.0.0'
# simple helper
# -------------
def which(cmd):
"""Searches the ``cmd`` in the ``PATH`` environment.
This *which* searches the PATH for executable ``cmd`` . First match is
returned, if nothing is found, ``None` is returned.
"""
envpath = os.environ.get('PATH', None) or os.defpath
for folder in envpath.split(os.pathsep):
fname = folder + os.sep + cmd
if path.isfile(fname):
return fname
def mkdir(folder, mode=0o775):
if not path.isdir(folder):
os.makedirs(folder, mode)
def file2literal(fname):
with open(fname, "r") as src:
data = src.read()
node = nodes.literal_block(data, data)
return node
def isNewer(path1, path2):
"""Returns True if ``path1`` is newer than ``path2``
If ``path1`` exists and is newer than ``path2`` the function returns
``True`` is returned otherwise ``False``
"""
return (path.exists(path1)
and os.stat(path1).st_ctime > os.stat(path2).st_ctime)
def pass_handle(self, node): # pylint: disable=W0613
pass
# setup conversion tools and sphinx extension
# -------------------------------------------
# Graphviz's dot(1) support
dot_cmd = None
# dot(1) -Tpdf should be used
dot_Tpdf = False
# ImageMagick' convert(1) support
convert_cmd = None
# librsvg's rsvg-convert(1) support
rsvg_convert_cmd = None
# Inkscape's inkscape(1) support
inkscape_cmd = None
# Inkscape prior to 1.0 uses different command options
inkscape_ver_one = False
def setup(app):
# check toolchain first
app.connect('builder-inited', setupTools)
# image handling
app.add_directive("kernel-image", KernelImage)
app.add_node(kernel_image,
html = (visit_kernel_image, pass_handle),
latex = (visit_kernel_image, pass_handle),
texinfo = (visit_kernel_image, pass_handle),
text = (visit_kernel_image, pass_handle),
man = (visit_kernel_image, pass_handle), )
# figure handling
app.add_directive("kernel-figure", KernelFigure)
app.add_node(kernel_figure,
html = (visit_kernel_figure, pass_handle),
latex = (visit_kernel_figure, pass_handle),
texinfo = (visit_kernel_figure, pass_handle),
text = (visit_kernel_figure, pass_handle),
man = (visit_kernel_figure, pass_handle), )
# render handling
app.add_directive('kernel-render', KernelRender)
app.add_node(kernel_render,
html = (visit_kernel_render, pass_handle),
latex = (visit_kernel_render, pass_handle),
texinfo = (visit_kernel_render, pass_handle),
text = (visit_kernel_render, pass_handle),
man = (visit_kernel_render, pass_handle), )
app.connect('doctree-read', add_kernel_figure_to_std_domain)
return dict(
version = __version__,
parallel_read_safe = True,
parallel_write_safe = True
)
def setupTools(app):
u"""
Check available build tools and log some *verbose* messages.
This function is called once, when the builder is initiated.
"""
global dot_cmd, dot_Tpdf, convert_cmd, rsvg_convert_cmd # pylint: disable=W0603
global inkscape_cmd, inkscape_ver_one # pylint: disable=W0603
kernellog.verbose(app, "kfigure: check installed tools ...")
dot_cmd = which('dot')
convert_cmd = which('convert')
rsvg_convert_cmd = which('rsvg-convert')
inkscape_cmd = which('inkscape')
if dot_cmd:
kernellog.verbose(app, "use dot(1) from: " + dot_cmd)
try:
dot_Thelp_list = subprocess.check_output([dot_cmd, '-Thelp'],
stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as err:
dot_Thelp_list = err.output
pass
dot_Tpdf_ptn = b'pdf'
dot_Tpdf = re.search(dot_Tpdf_ptn, dot_Thelp_list)
else:
kernellog.warn(app, "dot(1) not found, for better output quality install "
"graphviz from https://www.graphviz.org")
if inkscape_cmd:
kernellog.verbose(app, "use inkscape(1) from: " + inkscape_cmd)
inkscape_ver = subprocess.check_output([inkscape_cmd, '--version'],
stderr=subprocess.DEVNULL)
ver_one_ptn = b'Inkscape 1'
inkscape_ver_one = re.search(ver_one_ptn, inkscape_ver)
convert_cmd = None
rsvg_convert_cmd = None
dot_Tpdf = False
else:
if convert_cmd:
kernellog.verbose(app, "use convert(1) from: " + convert_cmd)
else:
kernellog.verbose(app,
"Neither inkscape(1) nor convert(1) found.\n"
"For SVG to PDF conversion, "
"install either Inkscape (https://inkscape.org/) (preferred) or\n"
"ImageMagick (https://www.imagemagick.org)")
if rsvg_convert_cmd:
kernellog.verbose(app, "use rsvg-convert(1) from: " + rsvg_convert_cmd)
kernellog.verbose(app, "use 'dot -Tsvg' and rsvg-convert(1) for DOT -> PDF conversion")
dot_Tpdf = False
else:
kernellog.verbose(app,
"rsvg-convert(1) not found.\n"
" SVG rendering of convert(1) is done by ImageMagick-native renderer.")
if dot_Tpdf:
kernellog.verbose(app, "use 'dot -Tpdf' for DOT -> PDF conversion")
else:
kernellog.verbose(app, "use 'dot -Tsvg' and convert(1) for DOT -> PDF conversion")
# integrate conversion tools
# --------------------------
RENDER_MARKUP_EXT = {
# The '.ext' must be handled by convert_image(..) function's *in_ext* input.
# <name> : <.ext>
'DOT' : '.dot',
'SVG' : '.svg'
}
def convert_image(img_node, translator, src_fname=None):
"""Convert a image node for the builder.
Different builder prefer different image formats, e.g. *latex* builder
prefer PDF while *html* builder prefer SVG format for images.
This function handles output image formats in dependence of source the
format (of the image) and the translator's output format.
"""
app = translator.builder.app
fname, in_ext = path.splitext(path.basename(img_node['uri']))
if src_fname is None:
src_fname = path.join(translator.builder.srcdir, img_node['uri'])
if not path.exists(src_fname):
src_fname = path.join(translator.builder.outdir, img_node['uri'])
dst_fname = None
# in kernel builds, use 'make SPHINXOPTS=-v' to see verbose messages
kernellog.verbose(app, 'assert best format for: ' + img_node['uri'])
if in_ext == '.dot':
if not dot_cmd:
kernellog.verbose(app,
"dot from graphviz not available / include DOT raw.")
img_node.replace_self(file2literal(src_fname))
elif translator.builder.format == 'latex':
dst_fname = path.join(translator.builder.outdir, fname + '.pdf')
img_node['uri'] = fname + '.pdf'
img_node['candidates'] = {'*': fname + '.pdf'}
elif translator.builder.format == 'html':
dst_fname = path.join(
translator.builder.outdir,
translator.builder.imagedir,
fname + '.svg')
img_node['uri'] = path.join(
translator.builder.imgpath, fname + '.svg')
img_node['candidates'] = {
'*': path.join(translator.builder.imgpath, fname + '.svg')}
else:
# all other builder formats will include DOT as raw
img_node.replace_self(file2literal(src_fname))
elif in_ext == '.svg':
if translator.builder.format == 'latex':
if not inkscape_cmd and convert_cmd is None:
kernellog.warn(app,
"no SVG to PDF conversion available / include SVG raw."
"\nIncluding large raw SVGs can cause xelatex error."
"\nInstall Inkscape (preferred) or ImageMagick.")
img_node.replace_self(file2literal(src_fname))
else:
dst_fname = path.join(translator.builder.outdir, fname + '.pdf')
img_node['uri'] = fname + '.pdf'
img_node['candidates'] = {'*': fname + '.pdf'}
if dst_fname:
# the builder needs not to copy one more time, so pop it if exists.
translator.builder.images.pop(img_node['uri'], None)
_name = dst_fname[len(translator.builder.outdir) + 1:]
if isNewer(dst_fname, src_fname):
kernellog.verbose(app,
"convert: {out}/%s already exists and is newer" % _name)
else:
ok = False
mkdir(path.dirname(dst_fname))
if in_ext == '.dot':
kernellog.verbose(app, 'convert DOT to: {out}/' + _name)
if translator.builder.format == 'latex' and not dot_Tpdf:
svg_fname = path.join(translator.builder.outdir, fname + '.svg')
ok1 = dot2format(app, src_fname, svg_fname)
ok2 = svg2pdf_by_rsvg(app, svg_fname, dst_fname)
ok = ok1 and ok2
else:
ok = dot2format(app, src_fname, dst_fname)
elif in_ext == '.svg':
kernellog.verbose(app, 'convert SVG to: {out}/' + _name)
ok = svg2pdf(app, src_fname, dst_fname)
if not ok:
img_node.replace_self(file2literal(src_fname))
def dot2format(app, dot_fname, out_fname):
"""Converts DOT file to ``out_fname`` using ``dot(1)``.
* ``dot_fname`` pathname of the input DOT file, including extension ``.dot``
* ``out_fname`` pathname of the output file, including format extension
The *format extension* depends on the ``dot`` command (see ``man dot``
option ``-Txxx``). Normally you will use one of the following extensions:
- ``.ps`` for PostScript,
- ``.svg`` or ``svgz`` for Structured Vector Graphics,
- ``.fig`` for XFIG graphics and
- ``.png`` or ``gif`` for common bitmap graphics.
"""
out_format = path.splitext(out_fname)[1][1:]
cmd = [dot_cmd, '-T%s' % out_format, dot_fname]
exit_code = 42
with open(out_fname, "w") as out:
exit_code = subprocess.call(cmd, stdout = out)
if exit_code != 0:
kernellog.warn(app,
"Error #%d when calling: %s" % (exit_code, " ".join(cmd)))
return bool(exit_code == 0)
def svg2pdf(app, svg_fname, pdf_fname):
"""Converts SVG to PDF with ``inkscape(1)`` or ``convert(1)`` command.
Uses ``inkscape(1)`` from Inkscape (https://inkscape.org/) or ``convert(1)``
from ImageMagick (https://www.imagemagick.org) for conversion.
Returns ``True`` on success and ``False`` if an error occurred.
* ``svg_fname`` pathname of the input SVG file with extension (``.svg``)
* ``pdf_name`` pathname of the output PDF file with extension (``.pdf``)
"""
cmd = [convert_cmd, svg_fname, pdf_fname]
cmd_name = 'convert(1)'
if inkscape_cmd:
cmd_name = 'inkscape(1)'
if inkscape_ver_one:
cmd = [inkscape_cmd, '-o', pdf_fname, svg_fname]
else:
cmd = [inkscape_cmd, '-z', '--export-pdf=%s' % pdf_fname, svg_fname]
try:
warning_msg = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
exit_code = 0
except subprocess.CalledProcessError as err:
warning_msg = err.output
exit_code = err.returncode
pass
if exit_code != 0:
kernellog.warn(app, "Error #%d when calling: %s" % (exit_code, " ".join(cmd)))
if warning_msg:
kernellog.warn(app, "Warning msg from %s: %s"
% (cmd_name, str(warning_msg, 'utf-8')))
elif warning_msg:
kernellog.verbose(app, "Warning msg from %s (likely harmless):\n%s"
% (cmd_name, str(warning_msg, 'utf-8')))
return bool(exit_code == 0)
def svg2pdf_by_rsvg(app, svg_fname, pdf_fname):
"""Convert SVG to PDF with ``rsvg-convert(1)`` command.
* ``svg_fname`` pathname of input SVG file, including extension ``.svg``
* ``pdf_fname`` pathname of output PDF file, including extension ``.pdf``
Input SVG file should be the one generated by ``dot2format()``.
SVG -> PDF conversion is done by ``rsvg-convert(1)``.
If ``rsvg-convert(1)`` is unavailable, fall back to ``svg2pdf()``.
"""
if rsvg_convert_cmd is None:
ok = svg2pdf(app, svg_fname, pdf_fname)
else:
cmd = [rsvg_convert_cmd, '--format=pdf', '-o', pdf_fname, svg_fname]
# use stdout and stderr from parent
exit_code = subprocess.call(cmd)
if exit_code != 0:
kernellog.warn(app, "Error #%d when calling: %s" % (exit_code, " ".join(cmd)))
ok = bool(exit_code == 0)
return ok
# image handling
# ---------------------
def visit_kernel_image(self, node): # pylint: disable=W0613
"""Visitor of the ``kernel_image`` Node.
Handles the ``image`` child-node with the ``convert_image(...)``.
"""
img_node = node[0]
convert_image(img_node, self)
class kernel_image(nodes.image):
"""Node for ``kernel-image`` directive."""
pass
class KernelImage(images.Image):
u"""KernelImage directive
Earns everything from ``.. image::`` directive, except *remote URI* and
*glob* pattern. The KernelImage wraps a image node into a
kernel_image node. See ``visit_kernel_image``.
"""
def run(self):
uri = self.arguments[0]
if uri.endswith('.*') or uri.find('://') != -1:
raise self.severe(
'Error in "%s: %s": glob pattern and remote images are not allowed'
% (self.name, uri))
result = images.Image.run(self)
if len(result) == 2 or isinstance(result[0], nodes.system_message):
return result
(image_node,) = result
# wrap image node into a kernel_image node / see visitors
node = kernel_image('', image_node)
return [node]
# figure handling
# ---------------------
def visit_kernel_figure(self, node): # pylint: disable=W0613
"""Visitor of the ``kernel_figure`` Node.
Handles the ``image`` child-node with the ``convert_image(...)``.
"""
img_node = node[0][0]
convert_image(img_node, self)
class kernel_figure(nodes.figure):
"""Node for ``kernel-figure`` directive."""
class KernelFigure(Figure):
u"""KernelImage directive
Earns everything from ``.. figure::`` directive, except *remote URI* and
*glob* pattern. The KernelFigure wraps a figure node into a kernel_figure
node. See ``visit_kernel_figure``.
"""
def run(self):
uri = self.arguments[0]
if uri.endswith('.*') or uri.find('://') != -1:
raise self.severe(
'Error in "%s: %s":'
' glob pattern and remote images are not allowed'
% (self.name, uri))
result = Figure.run(self)
if len(result) == 2 or isinstance(result[0], nodes.system_message):
return result
(figure_node,) = result
# wrap figure node into a kernel_figure node / see visitors
node = kernel_figure('', figure_node)
return [node]
# render handling
# ---------------------
def visit_kernel_render(self, node):
"""Visitor of the ``kernel_render`` Node.
If rendering tools available, save the markup of the ``literal_block`` child
node into a file and replace the ``literal_block`` node with a new created
``image`` node, pointing to the saved markup file. Afterwards, handle the
image child-node with the ``convert_image(...)``.
"""
app = self.builder.app
srclang = node.get('srclang')
kernellog.verbose(app, 'visit kernel-render node lang: "%s"' % (srclang))
tmp_ext = RENDER_MARKUP_EXT.get(srclang, None)
if tmp_ext is None:
kernellog.warn(app, 'kernel-render: "%s" unknown / include raw.' % (srclang))
return
if not dot_cmd and tmp_ext == '.dot':
kernellog.verbose(app, "dot from graphviz not available / include raw.")
return
literal_block = node[0]
code = literal_block.astext()
hashobj = code.encode('utf-8') # str(node.attributes)
fname = path.join('%s-%s' % (srclang, sha1(hashobj).hexdigest()))
tmp_fname = path.join(
self.builder.outdir, self.builder.imagedir, fname + tmp_ext)
if not path.isfile(tmp_fname):
mkdir(path.dirname(tmp_fname))
with open(tmp_fname, "w") as out:
out.write(code)
img_node = nodes.image(node.rawsource, **node.attributes)
img_node['uri'] = path.join(self.builder.imgpath, fname + tmp_ext)
img_node['candidates'] = {
'*': path.join(self.builder.imgpath, fname + tmp_ext)}
literal_block.replace_self(img_node)
convert_image(img_node, self, tmp_fname)
class kernel_render(nodes.General, nodes.Inline, nodes.Element):
"""Node for ``kernel-render`` directive."""
pass
class KernelRender(Figure):
u"""KernelRender directive
Render content by external tool. Has all the options known from the
*figure* directive, plus option ``caption``. If ``caption`` has a
value, a figure node with the *caption* is inserted. If not, a image node is
inserted.
The KernelRender directive wraps the text of the directive into a
literal_block node and wraps it into a kernel_render node. See
``visit_kernel_render``.
"""
has_content = True
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = False
# earn options from 'figure'
option_spec = Figure.option_spec.copy()
option_spec['caption'] = directives.unchanged
def run(self):
return [self.build_node()]
def build_node(self):
srclang = self.arguments[0].strip()
if srclang not in RENDER_MARKUP_EXT.keys():
return [self.state_machine.reporter.warning(
'Unknown source language "%s", use one of: %s.' % (
srclang, ",".join(RENDER_MARKUP_EXT.keys())),
line=self.lineno)]
code = '\n'.join(self.content)
if not code.strip():
return [self.state_machine.reporter.warning(
'Ignoring "%s" directive without content.' % (
self.name),
line=self.lineno)]
node = kernel_render()
node['alt'] = self.options.get('alt','')
node['srclang'] = srclang
literal_node = nodes.literal_block(code, code)
node += literal_node
caption = self.options.get('caption')
if caption:
# parse caption's content
parsed = nodes.Element()
self.state.nested_parse(
ViewList([caption], source=''), self.content_offset, parsed)
caption_node = nodes.caption(
parsed[0].rawsource, '', *parsed[0].children)
caption_node.source = parsed[0].source
caption_node.line = parsed[0].line
figure_node = nodes.figure('', node)
for k,v in self.options.items():
figure_node[k] = v
figure_node += caption_node
node = figure_node
return node
def add_kernel_figure_to_std_domain(app, doctree):
"""Add kernel-figure anchors to 'std' domain.
The ``StandardDomain.process_doc(..)`` method does not know how to resolve
the caption (label) of ``kernel-figure`` directive (it only knows about
standard nodes, e.g. table, figure etc.). Without any additional handling
this will result in a 'undefined label' for kernel-figures.
This handle adds labels of kernel-figure to the 'std' domain labels.
"""
std = app.env.domains["std"]
docname = app.env.docname
labels = std.data["labels"]
for name, explicit in doctree.nametypes.items():
if not explicit:
continue
labelid = doctree.nameids[name]
if labelid is None:
continue
node = doctree.ids[labelid]
if node.tagname == 'kernel_figure':
for n in node.next_node():
if n.tagname == 'caption':
sectname = clean_astext(n)
# add label to std domain
labels[name] = docname, labelid, sectname
break
|
nvtrust-main
|
infrastructure/linux/linux_source/Documentation/sphinx/kfigure.py
|
# -*- coding: utf-8; mode: python -*-
# SPDX-License-Identifier: GPL-2.0
project = 'Linux Media Subsystem Documentation'
# It is possible to run Sphinx in nickpick mode with:
nitpicky = True
# within nit-picking build, do not refer to any intersphinx object
intersphinx_mapping = {}
# In nickpick mode, it will complain about lots of missing references that
#
# 1) are just typedefs like: bool, __u32, etc;
# 2) It will complain for things like: enum, NULL;
# 3) It will complain for symbols that should be on different
# books (but currently aren't ported to ReST)
#
# The list below has a list of such symbols to be ignored in nitpick mode
#
nitpick_ignore = [
("c:func", "clock_gettime"),
("c:func", "close"),
("c:func", "container_of"),
("c:func", "copy_from_user"),
("c:func", "copy_to_user"),
("c:func", "determine_valid_ioctls"),
("c:func", "ERR_PTR"),
("c:func", "i2c_new_client_device"),
("c:func", "ioctl"),
("c:func", "IS_ERR"),
("c:func", "KERNEL_VERSION"),
("c:func", "mmap"),
("c:func", "open"),
("c:func", "pci_name"),
("c:func", "poll"),
("c:func", "PTR_ERR"),
("c:func", "read"),
("c:func", "release"),
("c:func", "set"),
("c:func", "struct fd_set"),
("c:func", "struct pollfd"),
("c:func", "usb_make_path"),
("c:func", "wait_finish"),
("c:func", "wait_prepare"),
("c:func", "write"),
("c:type", "atomic_t"),
("c:type", "bool"),
("c:type", "boolean"),
("c:type", "buf_queue"),
("c:type", "device"),
("c:type", "device_driver"),
("c:type", "device_node"),
("c:type", "enum"),
("c:type", "fd"),
("c:type", "fd_set"),
("c:type", "file"),
("c:type", "i2c_adapter"),
("c:type", "i2c_board_info"),
("c:type", "i2c_client"),
("c:type", "int16_t"),
("c:type", "ktime_t"),
("c:type", "led_classdev_flash"),
("c:type", "list_head"),
("c:type", "lock_class_key"),
("c:type", "module"),
("c:type", "mutex"),
("c:type", "NULL"),
("c:type", "off_t"),
("c:type", "pci_dev"),
("c:type", "pdvbdev"),
("c:type", "poll_table"),
("c:type", "platform_device"),
("c:type", "pollfd"),
("c:type", "poll_table_struct"),
("c:type", "s32"),
("c:type", "s64"),
("c:type", "sd"),
("c:type", "size_t"),
("c:type", "spi_board_info"),
("c:type", "spi_device"),
("c:type", "spi_master"),
("c:type", "ssize_t"),
("c:type", "fb_fix_screeninfo"),
("c:type", "pollfd"),
("c:type", "timeval"),
("c:type", "video_capability"),
("c:type", "timeval"),
("c:type", "__u16"),
("c:type", "u16"),
("c:type", "__u32"),
("c:type", "u32"),
("c:type", "__u64"),
("c:type", "u64"),
("c:type", "u8"),
("c:type", "uint16_t"),
("c:type", "uint32_t"),
("c:type", "union"),
("c:type", "__user"),
("c:type", "usb_device"),
("c:type", "usb_interface"),
("c:type", "v4l2_std_id"),
("c:type", "video_system_t"),
("c:type", "vm_area_struct"),
# Opaque structures
("c:type", "v4l2_m2m_dev"),
]
|
nvtrust-main
|
infrastructure/linux/linux_source/Documentation/userspace-api/media/conf_nitpick.py
|
#!/usr/bin/env python3
# SPDX-License-Identifier: GPL-2.0-only
#
# Tool for analyzing boot timing
# Copyright (c) 2013, Intel Corporation.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms and conditions of the GNU General Public License,
# version 2, as published by the Free Software Foundation.
#
# This program is distributed in the hope it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# Authors:
# Todd Brandt <todd.e.brandt@linux.intel.com>
#
# Description:
# This tool is designed to assist kernel and OS developers in optimizing
# their linux stack's boot time. It creates an html representation of
# the kernel boot timeline up to the start of the init process.
#
# ----------------- LIBRARIES --------------------
import sys
import time
import os
import string
import re
import platform
import shutil
from datetime import datetime, timedelta
from subprocess import call, Popen, PIPE
import sleepgraph as aslib
def pprint(msg):
print(msg)
sys.stdout.flush()
# ----------------- CLASSES --------------------
# Class: SystemValues
# Description:
# A global, single-instance container used to
# store system values and test parameters
class SystemValues(aslib.SystemValues):
title = 'BootGraph'
version = '2.2'
hostname = 'localhost'
testtime = ''
kernel = ''
dmesgfile = ''
ftracefile = ''
htmlfile = 'bootgraph.html'
testdir = ''
kparams = ''
result = ''
useftrace = False
usecallgraph = False
suspendmode = 'boot'
max_graph_depth = 2
graph_filter = 'do_one_initcall'
reboot = False
manual = False
iscronjob = False
timeformat = '%.6f'
bootloader = 'grub'
blexec = []
def __init__(self):
self.hostname = platform.node()
self.testtime = datetime.now().strftime('%Y-%m-%d_%H:%M:%S')
if os.path.exists('/proc/version'):
fp = open('/proc/version', 'r')
val = fp.read().strip()
fp.close()
self.kernel = self.kernelVersion(val)
else:
self.kernel = 'unknown'
self.testdir = datetime.now().strftime('boot-%y%m%d-%H%M%S')
def kernelVersion(self, msg):
return msg.split()[2]
def checkFtraceKernelVersion(self):
val = tuple(map(int, self.kernel.split('-')[0].split('.')))
if val >= (4, 10, 0):
return True
return False
def kernelParams(self):
cmdline = 'initcall_debug log_buf_len=32M'
if self.useftrace:
if self.cpucount > 0:
bs = min(self.memtotal // 2, 2*1024*1024) // self.cpucount
else:
bs = 131072
cmdline += ' trace_buf_size=%dK trace_clock=global '\
'trace_options=nooverwrite,funcgraph-abstime,funcgraph-cpu,'\
'funcgraph-duration,funcgraph-proc,funcgraph-tail,'\
'nofuncgraph-overhead,context-info,graph-time '\
'ftrace=function_graph '\
'ftrace_graph_max_depth=%d '\
'ftrace_graph_filter=%s' % \
(bs, self.max_graph_depth, self.graph_filter)
return cmdline
def setGraphFilter(self, val):
master = self.getBootFtraceFilterFunctions()
fs = ''
for i in val.split(','):
func = i.strip()
if func == '':
doError('badly formatted filter function string')
if '[' in func or ']' in func:
doError('loadable module functions not allowed - "%s"' % func)
if ' ' in func:
doError('spaces found in filter functions - "%s"' % func)
if func not in master:
doError('function "%s" not available for ftrace' % func)
if not fs:
fs = func
else:
fs += ','+func
if not fs:
doError('badly formatted filter function string')
self.graph_filter = fs
def getBootFtraceFilterFunctions(self):
self.rootCheck(True)
fp = open(self.tpath+'available_filter_functions')
fulllist = fp.read().split('\n')
fp.close()
list = []
for i in fulllist:
if not i or ' ' in i or '[' in i or ']' in i:
continue
list.append(i)
return list
def myCronJob(self, line):
if '@reboot' not in line:
return False
if 'bootgraph' in line or 'analyze_boot.py' in line or '-cronjob' in line:
return True
return False
def cronjobCmdString(self):
cmdline = '%s -cronjob' % os.path.abspath(sys.argv[0])
args = iter(sys.argv[1:])
for arg in args:
if arg in ['-h', '-v', '-cronjob', '-reboot', '-verbose']:
continue
elif arg in ['-o', '-dmesg', '-ftrace', '-func']:
next(args)
continue
elif arg == '-result':
cmdline += ' %s "%s"' % (arg, os.path.abspath(next(args)))
continue
elif arg == '-cgskip':
file = self.configFile(next(args))
cmdline += ' %s "%s"' % (arg, os.path.abspath(file))
continue
cmdline += ' '+arg
if self.graph_filter != 'do_one_initcall':
cmdline += ' -func "%s"' % self.graph_filter
cmdline += ' -o "%s"' % os.path.abspath(self.testdir)
return cmdline
def manualRebootRequired(self):
cmdline = self.kernelParams()
pprint('To generate a new timeline manually, follow these steps:\n\n'\
'1. Add the CMDLINE string to your kernel command line.\n'\
'2. Reboot the system.\n'\
'3. After reboot, re-run this tool with the same arguments but no command (w/o -reboot or -manual).\n\n'\
'CMDLINE="%s"' % cmdline)
sys.exit()
def blGrub(self):
blcmd = ''
for cmd in ['update-grub', 'grub-mkconfig', 'grub2-mkconfig']:
if blcmd:
break
blcmd = self.getExec(cmd)
if not blcmd:
doError('[GRUB] missing update command')
if not os.path.exists('/etc/default/grub'):
doError('[GRUB] missing /etc/default/grub')
if 'grub2' in blcmd:
cfg = '/boot/grub2/grub.cfg'
else:
cfg = '/boot/grub/grub.cfg'
if not os.path.exists(cfg):
doError('[GRUB] missing %s' % cfg)
if 'update-grub' in blcmd:
self.blexec = [blcmd]
else:
self.blexec = [blcmd, '-o', cfg]
def getBootLoader(self):
if self.bootloader == 'grub':
self.blGrub()
else:
doError('unknown boot loader: %s' % self.bootloader)
def writeDatafileHeader(self, filename):
self.kparams = open('/proc/cmdline', 'r').read().strip()
fp = open(filename, 'w')
fp.write(self.teststamp+'\n')
fp.write(self.sysstamp+'\n')
fp.write('# command | %s\n' % self.cmdline)
fp.write('# kparams | %s\n' % self.kparams)
fp.close()
sysvals = SystemValues()
# Class: Data
# Description:
# The primary container for test data.
class Data(aslib.Data):
dmesg = {} # root data structure
start = 0.0 # test start
end = 0.0 # test end
dmesgtext = [] # dmesg text file in memory
testnumber = 0
idstr = ''
html_device_id = 0
valid = False
tUserMode = 0.0
boottime = ''
phases = ['kernel', 'user']
do_one_initcall = False
def __init__(self, num):
self.testnumber = num
self.idstr = 'a'
self.dmesgtext = []
self.dmesg = {
'kernel': {'list': dict(), 'start': -1.0, 'end': -1.0, 'row': 0,
'order': 0, 'color': 'linear-gradient(to bottom, #fff, #bcf)'},
'user': {'list': dict(), 'start': -1.0, 'end': -1.0, 'row': 0,
'order': 1, 'color': '#fff'}
}
def deviceTopology(self):
return ''
def newAction(self, phase, name, pid, start, end, ret, ulen):
# new device callback for a specific phase
self.html_device_id += 1
devid = '%s%d' % (self.idstr, self.html_device_id)
list = self.dmesg[phase]['list']
length = -1.0
if(start >= 0 and end >= 0):
length = end - start
i = 2
origname = name
while(name in list):
name = '%s[%d]' % (origname, i)
i += 1
list[name] = {'name': name, 'start': start, 'end': end,
'pid': pid, 'length': length, 'row': 0, 'id': devid,
'ret': ret, 'ulen': ulen }
return name
def deviceMatch(self, pid, cg):
if cg.end - cg.start == 0:
return ''
for p in data.phases:
list = self.dmesg[p]['list']
for devname in list:
dev = list[devname]
if pid != dev['pid']:
continue
if cg.name == 'do_one_initcall':
if(cg.start <= dev['start'] and cg.end >= dev['end'] and dev['length'] > 0):
dev['ftrace'] = cg
self.do_one_initcall = True
return devname
else:
if(cg.start > dev['start'] and cg.end < dev['end']):
if 'ftraces' not in dev:
dev['ftraces'] = []
dev['ftraces'].append(cg)
return devname
return ''
def printDetails(self):
sysvals.vprint('Timeline Details:')
sysvals.vprint(' Host: %s' % sysvals.hostname)
sysvals.vprint(' Kernel: %s' % sysvals.kernel)
sysvals.vprint(' Test time: %s' % sysvals.testtime)
sysvals.vprint(' Boot time: %s' % self.boottime)
for phase in self.phases:
dc = len(self.dmesg[phase]['list'])
sysvals.vprint('%9s mode: %.3f - %.3f (%d initcalls)' % (phase,
self.dmesg[phase]['start']*1000,
self.dmesg[phase]['end']*1000, dc))
# ----------------- FUNCTIONS --------------------
# Function: parseKernelLog
# Description:
# parse a kernel log for boot data
def parseKernelLog():
sysvals.vprint('Analyzing the dmesg data (%s)...' % \
os.path.basename(sysvals.dmesgfile))
phase = 'kernel'
data = Data(0)
data.dmesg['kernel']['start'] = data.start = ktime = 0.0
sysvals.stamp = {
'time': datetime.now().strftime('%B %d %Y, %I:%M:%S %p'),
'host': sysvals.hostname,
'mode': 'boot', 'kernel': ''}
tp = aslib.TestProps()
devtemp = dict()
if(sysvals.dmesgfile):
lf = open(sysvals.dmesgfile, 'rb')
else:
lf = Popen('dmesg', stdout=PIPE).stdout
for line in lf:
line = aslib.ascii(line).replace('\r\n', '')
# grab the stamp and sysinfo
if re.match(tp.stampfmt, line):
tp.stamp = line
continue
elif re.match(tp.sysinfofmt, line):
tp.sysinfo = line
continue
elif re.match(tp.cmdlinefmt, line):
tp.cmdline = line
continue
elif re.match(tp.kparamsfmt, line):
tp.kparams = line
continue
idx = line.find('[')
if idx > 1:
line = line[idx:]
m = re.match('[ \t]*(\[ *)(?P<ktime>[0-9\.]*)(\]) (?P<msg>.*)', line)
if(not m):
continue
ktime = float(m.group('ktime'))
if(ktime > 120):
break
msg = m.group('msg')
data.dmesgtext.append(line)
if(ktime == 0.0 and re.match('^Linux version .*', msg)):
if(not sysvals.stamp['kernel']):
sysvals.stamp['kernel'] = sysvals.kernelVersion(msg)
continue
m = re.match('.* setting system clock to (?P<d>[0-9\-]*)[ A-Z](?P<t>[0-9:]*) UTC.*', msg)
if(m):
bt = datetime.strptime(m.group('d')+' '+m.group('t'), '%Y-%m-%d %H:%M:%S')
bt = bt - timedelta(seconds=int(ktime))
data.boottime = bt.strftime('%Y-%m-%d_%H:%M:%S')
sysvals.stamp['time'] = bt.strftime('%B %d %Y, %I:%M:%S %p')
continue
m = re.match('^calling *(?P<f>.*)\+.* @ (?P<p>[0-9]*)', msg)
if(m):
func = m.group('f')
pid = int(m.group('p'))
devtemp[func] = (ktime, pid)
continue
m = re.match('^initcall *(?P<f>.*)\+.* returned (?P<r>.*) after (?P<t>.*) usecs', msg)
if(m):
data.valid = True
data.end = ktime
f, r, t = m.group('f', 'r', 't')
if(f in devtemp):
start, pid = devtemp[f]
data.newAction(phase, f, pid, start, ktime, int(r), int(t))
del devtemp[f]
continue
if(re.match('^Freeing unused kernel .*', msg)):
data.tUserMode = ktime
data.dmesg['kernel']['end'] = ktime
data.dmesg['user']['start'] = ktime
phase = 'user'
if tp.stamp:
sysvals.stamp = 0
tp.parseStamp(data, sysvals)
data.dmesg['user']['end'] = data.end
lf.close()
return data
# Function: parseTraceLog
# Description:
# Check if trace is available and copy to a temp file
def parseTraceLog(data):
sysvals.vprint('Analyzing the ftrace data (%s)...' % \
os.path.basename(sysvals.ftracefile))
# if available, calculate cgfilter allowable ranges
cgfilter = []
if len(sysvals.cgfilter) > 0:
for p in data.phases:
list = data.dmesg[p]['list']
for i in sysvals.cgfilter:
if i in list:
cgfilter.append([list[i]['start']-0.0001,
list[i]['end']+0.0001])
# parse the trace log
ftemp = dict()
tp = aslib.TestProps()
tp.setTracerType('function_graph')
tf = open(sysvals.ftracefile, 'r')
for line in tf:
if line[0] == '#':
continue
m = re.match(tp.ftrace_line_fmt, line.strip())
if(not m):
continue
m_time, m_proc, m_pid, m_msg, m_dur = \
m.group('time', 'proc', 'pid', 'msg', 'dur')
t = float(m_time)
if len(cgfilter) > 0:
allow = False
for r in cgfilter:
if t >= r[0] and t < r[1]:
allow = True
break
if not allow:
continue
if t > data.end:
break
if(m_time and m_pid and m_msg):
t = aslib.FTraceLine(m_time, m_msg, m_dur)
pid = int(m_pid)
else:
continue
if t.fevent or t.fkprobe:
continue
key = (m_proc, pid)
if(key not in ftemp):
ftemp[key] = []
ftemp[key].append(aslib.FTraceCallGraph(pid, sysvals))
cg = ftemp[key][-1]
res = cg.addLine(t)
if(res != 0):
ftemp[key].append(aslib.FTraceCallGraph(pid, sysvals))
if(res == -1):
ftemp[key][-1].addLine(t)
tf.close()
# add the callgraph data to the device hierarchy
for key in ftemp:
proc, pid = key
for cg in ftemp[key]:
if len(cg.list) < 1 or cg.invalid or (cg.end - cg.start == 0):
continue
if(not cg.postProcess()):
pprint('Sanity check failed for %s-%d' % (proc, pid))
continue
# match cg data to devices
devname = data.deviceMatch(pid, cg)
if not devname:
kind = 'Orphan'
if cg.partial:
kind = 'Partial'
sysvals.vprint('%s callgraph found for %s %s-%d [%f - %f]' %\
(kind, cg.name, proc, pid, cg.start, cg.end))
elif len(cg.list) > 1000000:
pprint('WARNING: the callgraph found for %s is massive! (%d lines)' %\
(devname, len(cg.list)))
# Function: retrieveLogs
# Description:
# Create copies of dmesg and/or ftrace for later processing
def retrieveLogs():
# check ftrace is configured first
if sysvals.useftrace:
tracer = sysvals.fgetVal('current_tracer').strip()
if tracer != 'function_graph':
doError('ftrace not configured for a boot callgraph')
# create the folder and get dmesg
sysvals.systemInfo(aslib.dmidecode(sysvals.mempath))
sysvals.initTestOutput('boot')
sysvals.writeDatafileHeader(sysvals.dmesgfile)
call('dmesg >> '+sysvals.dmesgfile, shell=True)
if not sysvals.useftrace:
return
# get ftrace
sysvals.writeDatafileHeader(sysvals.ftracefile)
call('cat '+sysvals.tpath+'trace >> '+sysvals.ftracefile, shell=True)
# Function: colorForName
# Description:
# Generate a repeatable color from a list for a given name
def colorForName(name):
list = [
('c1', '#ec9999'),
('c2', '#ffc1a6'),
('c3', '#fff0a6'),
('c4', '#adf199'),
('c5', '#9fadea'),
('c6', '#a699c1'),
('c7', '#ad99b4'),
('c8', '#eaffea'),
('c9', '#dcecfb'),
('c10', '#ffffea')
]
i = 0
total = 0
count = len(list)
while i < len(name):
total += ord(name[i])
i += 1
return list[total % count]
def cgOverview(cg, minlen):
stats = dict()
large = []
for l in cg.list:
if l.fcall and l.depth == 1:
if l.length >= minlen:
large.append(l)
if l.name not in stats:
stats[l.name] = [0, 0.0]
stats[l.name][0] += (l.length * 1000.0)
stats[l.name][1] += 1
return (large, stats)
# Function: createBootGraph
# Description:
# Create the output html file from the resident test data
# Arguments:
# testruns: array of Data objects from parseKernelLog or parseTraceLog
# Output:
# True if the html file was created, false if it failed
def createBootGraph(data):
# html function templates
html_srccall = '<div id={6} title="{5}" class="srccall" style="left:{1}%;top:{2}px;height:{3}px;width:{4}%;line-height:{3}px;">{0}</div>\n'
html_timetotal = '<table class="time1">\n<tr>'\
'<td class="blue">Init process starts @ <b>{0} ms</b></td>'\
'<td class="blue">Last initcall ends @ <b>{1} ms</b></td>'\
'</tr>\n</table>\n'
# device timeline
devtl = aslib.Timeline(100, 20)
# write the test title and general info header
devtl.createHeader(sysvals, sysvals.stamp)
# Generate the header for this timeline
t0 = data.start
tMax = data.end
tTotal = tMax - t0
if(tTotal == 0):
pprint('ERROR: No timeline data')
return False
user_mode = '%.0f'%(data.tUserMode*1000)
last_init = '%.0f'%(tTotal*1000)
devtl.html += html_timetotal.format(user_mode, last_init)
# determine the maximum number of rows we need to draw
devlist = []
for p in data.phases:
list = data.dmesg[p]['list']
for devname in list:
d = aslib.DevItem(0, p, list[devname])
devlist.append(d)
devtl.getPhaseRows(devlist, 0, 'start')
devtl.calcTotalRows()
# draw the timeline background
devtl.createZoomBox()
devtl.html += devtl.html_tblock.format('boot', '0', '100', devtl.scaleH)
for p in data.phases:
phase = data.dmesg[p]
length = phase['end']-phase['start']
left = '%.3f' % (((phase['start']-t0)*100.0)/tTotal)
width = '%.3f' % ((length*100.0)/tTotal)
devtl.html += devtl.html_phase.format(left, width, \
'%.3f'%devtl.scaleH, '%.3f'%devtl.bodyH, \
phase['color'], '')
# draw the device timeline
num = 0
devstats = dict()
for phase in data.phases:
list = data.dmesg[phase]['list']
for devname in sorted(list):
cls, color = colorForName(devname)
dev = list[devname]
info = '@|%.3f|%.3f|%.3f|%d' % (dev['start']*1000.0, dev['end']*1000.0,
dev['ulen']/1000.0, dev['ret'])
devstats[dev['id']] = {'info':info}
dev['color'] = color
height = devtl.phaseRowHeight(0, phase, dev['row'])
top = '%.6f' % ((dev['row']*height) + devtl.scaleH)
left = '%.6f' % (((dev['start']-t0)*100)/tTotal)
width = '%.6f' % (((dev['end']-dev['start'])*100)/tTotal)
length = ' (%0.3f ms) ' % ((dev['end']-dev['start'])*1000)
devtl.html += devtl.html_device.format(dev['id'],
devname+length+phase+'_mode', left, top, '%.3f'%height,
width, devname, ' '+cls, '')
rowtop = devtl.phaseRowTop(0, phase, dev['row'])
height = '%.6f' % (devtl.rowH / 2)
top = '%.6f' % (rowtop + devtl.scaleH + (devtl.rowH / 2))
if data.do_one_initcall:
if('ftrace' not in dev):
continue
cg = dev['ftrace']
large, stats = cgOverview(cg, 0.001)
devstats[dev['id']]['fstat'] = stats
for l in large:
left = '%f' % (((l.time-t0)*100)/tTotal)
width = '%f' % (l.length*100/tTotal)
title = '%s (%0.3fms)' % (l.name, l.length * 1000.0)
devtl.html += html_srccall.format(l.name, left,
top, height, width, title, 'x%d'%num)
num += 1
continue
if('ftraces' not in dev):
continue
for cg in dev['ftraces']:
left = '%f' % (((cg.start-t0)*100)/tTotal)
width = '%f' % ((cg.end-cg.start)*100/tTotal)
cglen = (cg.end - cg.start) * 1000.0
title = '%s (%0.3fms)' % (cg.name, cglen)
cg.id = 'x%d' % num
devtl.html += html_srccall.format(cg.name, left,
top, height, width, title, dev['id']+cg.id)
num += 1
# draw the time scale, try to make the number of labels readable
devtl.createTimeScale(t0, tMax, tTotal, 'boot')
devtl.html += '</div>\n'
# timeline is finished
devtl.html += '</div>\n</div>\n'
# draw a legend which describes the phases by color
devtl.html += '<div class="legend">\n'
pdelta = 20.0
pmargin = 36.0
for phase in data.phases:
order = '%.2f' % ((data.dmesg[phase]['order'] * pdelta) + pmargin)
devtl.html += devtl.html_legend.format(order, \
data.dmesg[phase]['color'], phase+'_mode', phase[0])
devtl.html += '</div>\n'
hf = open(sysvals.htmlfile, 'w')
# add the css
extra = '\
.c1 {background:rgba(209,0,0,0.4);}\n\
.c2 {background:rgba(255,102,34,0.4);}\n\
.c3 {background:rgba(255,218,33,0.4);}\n\
.c4 {background:rgba(51,221,0,0.4);}\n\
.c5 {background:rgba(17,51,204,0.4);}\n\
.c6 {background:rgba(34,0,102,0.4);}\n\
.c7 {background:rgba(51,0,68,0.4);}\n\
.c8 {background:rgba(204,255,204,0.4);}\n\
.c9 {background:rgba(169,208,245,0.4);}\n\
.c10 {background:rgba(255,255,204,0.4);}\n\
.vt {transform:rotate(-60deg);transform-origin:0 0;}\n\
table.fstat {table-layout:fixed;padding:150px 15px 0 0;font-size:10px;column-width:30px;}\n\
.fstat th {width:55px;}\n\
.fstat td {text-align:left;width:35px;}\n\
.srccall {position:absolute;font-size:10px;z-index:7;overflow:hidden;color:black;text-align:center;white-space:nowrap;border-radius:5px;border:1px solid black;background:linear-gradient(to bottom right,#CCC,#969696);}\n\
.srccall:hover {color:white;font-weight:bold;border:1px solid white;}\n'
aslib.addCSS(hf, sysvals, 1, False, extra)
# write the device timeline
hf.write(devtl.html)
# add boot specific html
statinfo = 'var devstats = {\n'
for n in sorted(devstats):
statinfo += '\t"%s": [\n\t\t"%s",\n' % (n, devstats[n]['info'])
if 'fstat' in devstats[n]:
funcs = devstats[n]['fstat']
for f in sorted(funcs, key=lambda k:(funcs[k], k), reverse=True):
if funcs[f][0] < 0.01 and len(funcs) > 10:
break
statinfo += '\t\t"%f|%s|%d",\n' % (funcs[f][0], f, funcs[f][1])
statinfo += '\t],\n'
statinfo += '};\n'
html = \
'<div id="devicedetailtitle"></div>\n'\
'<div id="devicedetail" style="display:none;">\n'\
'<div id="devicedetail0">\n'
for p in data.phases:
phase = data.dmesg[p]
html += devtl.html_phaselet.format(p+'_mode', '0', '100', phase['color'])
html += '</div>\n</div>\n'\
'<script type="text/javascript">\n'+statinfo+\
'</script>\n'
hf.write(html)
# add the callgraph html
if(sysvals.usecallgraph):
aslib.addCallgraphs(sysvals, hf, data)
# add the test log as a hidden div
if sysvals.testlog and sysvals.logmsg:
hf.write('<div id="testlog" style="display:none;">\n'+sysvals.logmsg+'</div>\n')
# add the dmesg log as a hidden div
if sysvals.dmesglog:
hf.write('<div id="dmesglog" style="display:none;">\n')
for line in data.dmesgtext:
line = line.replace('<', '<').replace('>', '>')
hf.write(line)
hf.write('</div>\n')
# write the footer and close
aslib.addScriptCode(hf, [data])
hf.write('</body>\n</html>\n')
hf.close()
return True
# Function: updateCron
# Description:
# (restore=False) Set the tool to run automatically on reboot
# (restore=True) Restore the original crontab
def updateCron(restore=False):
if not restore:
sysvals.rootUser(True)
crondir = '/var/spool/cron/crontabs/'
if not os.path.exists(crondir):
crondir = '/var/spool/cron/'
if not os.path.exists(crondir):
doError('%s not found' % crondir)
cronfile = crondir+'root'
backfile = crondir+'root-analyze_boot-backup'
cmd = sysvals.getExec('crontab')
if not cmd:
doError('crontab not found')
# on restore: move the backup cron back into place
if restore:
if os.path.exists(backfile):
shutil.move(backfile, cronfile)
call([cmd, cronfile])
return
# backup current cron and install new one with reboot
if os.path.exists(cronfile):
shutil.move(cronfile, backfile)
else:
fp = open(backfile, 'w')
fp.close()
res = -1
try:
fp = open(backfile, 'r')
op = open(cronfile, 'w')
for line in fp:
if not sysvals.myCronJob(line):
op.write(line)
continue
fp.close()
op.write('@reboot python %s\n' % sysvals.cronjobCmdString())
op.close()
res = call([cmd, cronfile])
except Exception as e:
pprint('Exception: %s' % str(e))
shutil.move(backfile, cronfile)
res = -1
if res != 0:
doError('crontab failed')
# Function: updateGrub
# Description:
# update grub.cfg for all kernels with our parameters
def updateGrub(restore=False):
# call update-grub on restore
if restore:
try:
call(sysvals.blexec, stderr=PIPE, stdout=PIPE,
env={'PATH': '.:/sbin:/usr/sbin:/usr/bin:/sbin:/bin'})
except Exception as e:
pprint('Exception: %s\n' % str(e))
return
# extract the option and create a grub config without it
sysvals.rootUser(True)
tgtopt = 'GRUB_CMDLINE_LINUX_DEFAULT'
cmdline = ''
grubfile = '/etc/default/grub'
tempfile = '/etc/default/grub.analyze_boot'
shutil.move(grubfile, tempfile)
res = -1
try:
fp = open(tempfile, 'r')
op = open(grubfile, 'w')
cont = False
for line in fp:
line = line.strip()
if len(line) == 0 or line[0] == '#':
continue
opt = line.split('=')[0].strip()
if opt == tgtopt:
cmdline = line.split('=', 1)[1].strip('\\')
if line[-1] == '\\':
cont = True
elif cont:
cmdline += line.strip('\\')
if line[-1] != '\\':
cont = False
else:
op.write('%s\n' % line)
fp.close()
# if the target option value is in quotes, strip them
sp = '"'
val = cmdline.strip()
if val and (val[0] == '\'' or val[0] == '"'):
sp = val[0]
val = val.strip(sp)
cmdline = val
# append our cmd line options
if len(cmdline) > 0:
cmdline += ' '
cmdline += sysvals.kernelParams()
# write out the updated target option
op.write('\n%s=%s%s%s\n' % (tgtopt, sp, cmdline, sp))
op.close()
res = call(sysvals.blexec)
os.remove(grubfile)
except Exception as e:
pprint('Exception: %s' % str(e))
res = -1
# cleanup
shutil.move(tempfile, grubfile)
if res != 0:
doError('update grub failed')
# Function: updateKernelParams
# Description:
# update boot conf for all kernels with our parameters
def updateKernelParams(restore=False):
# find the boot loader
sysvals.getBootLoader()
if sysvals.bootloader == 'grub':
updateGrub(restore)
# Function: doError Description:
# generic error function for catastrphic failures
# Arguments:
# msg: the error message to print
# help: True if printHelp should be called after, False otherwise
def doError(msg, help=False):
if help == True:
printHelp()
pprint('ERROR: %s\n' % msg)
sysvals.outputResult({'error':msg})
sys.exit()
# Function: printHelp
# Description:
# print out the help text
def printHelp():
pprint('\n%s v%s\n'\
'Usage: bootgraph <options> <command>\n'\
'\n'\
'Description:\n'\
' This tool reads in a dmesg log of linux kernel boot and\n'\
' creates an html representation of the boot timeline up to\n'\
' the start of the init process.\n'\
'\n'\
' If no specific command is given the tool reads the current dmesg\n'\
' and/or ftrace log and creates a timeline\n'\
'\n'\
' Generates output files in subdirectory: boot-yymmdd-HHMMSS\n'\
' HTML output: <hostname>_boot.html\n'\
' raw dmesg output: <hostname>_boot_dmesg.txt\n'\
' raw ftrace output: <hostname>_boot_ftrace.txt\n'\
'\n'\
'Options:\n'\
' -h Print this help text\n'\
' -v Print the current tool version\n'\
' -verbose Print extra information during execution and analysis\n'\
' -addlogs Add the dmesg log to the html output\n'\
' -result fn Export a results table to a text file for parsing.\n'\
' -o name Overrides the output subdirectory name when running a new test\n'\
' default: boot-{date}-{time}\n'\
' [advanced]\n'\
' -fstat Use ftrace to add function detail and statistics (default: disabled)\n'\
' -f/-callgraph Add callgraph detail, can be very large (default: disabled)\n'\
' -maxdepth N limit the callgraph data to N call levels (default: 2)\n'\
' -mincg ms Discard all callgraphs shorter than ms milliseconds (e.g. 0.001 for us)\n'\
' -timeprec N Number of significant digits in timestamps (0:S, 3:ms, [6:us])\n'\
' -expandcg pre-expand the callgraph data in the html output (default: disabled)\n'\
' -func list Limit ftrace to comma-delimited list of functions (default: do_one_initcall)\n'\
' -cgfilter S Filter the callgraph output in the timeline\n'\
' -cgskip file Callgraph functions to skip, off to disable (default: cgskip.txt)\n'\
' -bl name Use the following boot loader for kernel params (default: grub)\n'\
' -reboot Reboot the machine automatically and generate a new timeline\n'\
' -manual Show the steps to generate a new timeline manually (used with -reboot)\n'\
'\n'\
'Other commands:\n'\
' -flistall Print all functions capable of being captured in ftrace\n'\
' -sysinfo Print out system info extracted from BIOS\n'\
' -which exec Print an executable path, should function even without PATH\n'\
' [redo]\n'\
' -dmesg file Create HTML output using dmesg input (used with -ftrace)\n'\
' -ftrace file Create HTML output using ftrace input (used with -dmesg)\n'\
'' % (sysvals.title, sysvals.version))
return True
# ----------------- MAIN --------------------
# exec start (skipped if script is loaded as library)
if __name__ == '__main__':
# loop through the command line arguments
cmd = ''
testrun = True
switchoff = ['disable', 'off', 'false', '0']
simplecmds = ['-sysinfo', '-kpupdate', '-flistall', '-checkbl']
cgskip = ''
if '-f' in sys.argv:
cgskip = sysvals.configFile('cgskip.txt')
args = iter(sys.argv[1:])
mdset = False
for arg in args:
if(arg == '-h'):
printHelp()
sys.exit()
elif(arg == '-v'):
pprint("Version %s" % sysvals.version)
sys.exit()
elif(arg == '-verbose'):
sysvals.verbose = True
elif(arg in simplecmds):
cmd = arg[1:]
elif(arg == '-fstat'):
sysvals.useftrace = True
elif(arg == '-callgraph' or arg == '-f'):
sysvals.useftrace = True
sysvals.usecallgraph = True
elif(arg == '-cgdump'):
sysvals.cgdump = True
elif(arg == '-mincg'):
sysvals.mincglen = aslib.getArgFloat('-mincg', args, 0.0, 10000.0)
elif(arg == '-cgfilter'):
try:
val = next(args)
except:
doError('No callgraph functions supplied', True)
sysvals.setCallgraphFilter(val)
elif(arg == '-cgskip'):
try:
val = next(args)
except:
doError('No file supplied', True)
if val.lower() in switchoff:
cgskip = ''
else:
cgskip = sysvals.configFile(val)
if(not cgskip):
doError('%s does not exist' % cgskip)
elif(arg == '-bl'):
try:
val = next(args)
except:
doError('No boot loader name supplied', True)
if val.lower() not in ['grub']:
doError('Unknown boot loader: %s' % val, True)
sysvals.bootloader = val.lower()
elif(arg == '-timeprec'):
sysvals.setPrecision(aslib.getArgInt('-timeprec', args, 0, 6))
elif(arg == '-maxdepth'):
mdset = True
sysvals.max_graph_depth = aslib.getArgInt('-maxdepth', args, 0, 1000)
elif(arg == '-func'):
try:
val = next(args)
except:
doError('No filter functions supplied', True)
sysvals.useftrace = True
sysvals.usecallgraph = True
sysvals.rootCheck(True)
sysvals.setGraphFilter(val)
elif(arg == '-ftrace'):
try:
val = next(args)
except:
doError('No ftrace file supplied', True)
if(os.path.exists(val) == False):
doError('%s does not exist' % val)
testrun = False
sysvals.ftracefile = val
elif(arg == '-addlogs'):
sysvals.dmesglog = True
elif(arg == '-expandcg'):
sysvals.cgexp = True
elif(arg == '-dmesg'):
try:
val = next(args)
except:
doError('No dmesg file supplied', True)
if(os.path.exists(val) == False):
doError('%s does not exist' % val)
testrun = False
sysvals.dmesgfile = val
elif(arg == '-o'):
try:
val = next(args)
except:
doError('No subdirectory name supplied', True)
sysvals.testdir = sysvals.setOutputFolder(val)
elif(arg == '-result'):
try:
val = next(args)
except:
doError('No result file supplied', True)
sysvals.result = val
elif(arg == '-reboot'):
sysvals.reboot = True
elif(arg == '-manual'):
sysvals.reboot = True
sysvals.manual = True
# remaining options are only for cron job use
elif(arg == '-cronjob'):
sysvals.iscronjob = True
elif(arg == '-which'):
try:
val = next(args)
except:
doError('No executable supplied', True)
out = sysvals.getExec(val)
if not out:
print('%s not found' % val)
sys.exit(1)
print(out)
sys.exit(0)
else:
doError('Invalid argument: '+arg, True)
# compatibility errors and access checks
if(sysvals.iscronjob and (sysvals.reboot or \
sysvals.dmesgfile or sysvals.ftracefile or cmd)):
doError('-cronjob is meant for batch purposes only')
if(sysvals.reboot and (sysvals.dmesgfile or sysvals.ftracefile)):
doError('-reboot and -dmesg/-ftrace are incompatible')
if cmd or sysvals.reboot or sysvals.iscronjob or testrun:
sysvals.rootCheck(True)
if (testrun and sysvals.useftrace) or cmd == 'flistall':
if not sysvals.verifyFtrace():
doError('Ftrace is not properly enabled')
# run utility commands
sysvals.cpuInfo()
if cmd != '':
if cmd == 'kpupdate':
updateKernelParams()
elif cmd == 'flistall':
for f in sysvals.getBootFtraceFilterFunctions():
print(f)
elif cmd == 'checkbl':
sysvals.getBootLoader()
pprint('Boot Loader: %s\n%s' % (sysvals.bootloader, sysvals.blexec))
elif(cmd == 'sysinfo'):
sysvals.printSystemInfo(True)
sys.exit()
# reboot: update grub, setup a cronjob, and reboot
if sysvals.reboot:
if (sysvals.useftrace or sysvals.usecallgraph) and \
not sysvals.checkFtraceKernelVersion():
doError('Ftrace functionality requires kernel v4.10 or newer')
if not sysvals.manual:
updateKernelParams()
updateCron()
call('reboot')
else:
sysvals.manualRebootRequired()
sys.exit()
if sysvals.usecallgraph and cgskip:
sysvals.vprint('Using cgskip file: %s' % cgskip)
sysvals.setCallgraphBlacklist(cgskip)
# cronjob: remove the cronjob, grub changes, and disable ftrace
if sysvals.iscronjob:
updateCron(True)
updateKernelParams(True)
try:
sysvals.fsetVal('0', 'tracing_on')
except:
pass
# testrun: generate copies of the logs
if testrun:
retrieveLogs()
else:
sysvals.setOutputFile()
# process the log data
if sysvals.dmesgfile:
if not mdset:
sysvals.max_graph_depth = 0
data = parseKernelLog()
if(not data.valid):
doError('No initcall data found in %s' % sysvals.dmesgfile)
if sysvals.useftrace and sysvals.ftracefile:
parseTraceLog(data)
if sysvals.cgdump:
data.debugPrint()
sys.exit()
else:
doError('dmesg file required')
sysvals.vprint('Creating the html timeline (%s)...' % sysvals.htmlfile)
sysvals.vprint('Command:\n %s' % sysvals.cmdline)
sysvals.vprint('Kernel parameters:\n %s' % sysvals.kparams)
data.printDetails()
createBootGraph(data)
# if running as root, change output dir owner to sudo_user
if testrun and os.path.isdir(sysvals.testdir) and \
os.getuid() == 0 and 'SUDO_USER' in os.environ:
cmd = 'chown -R {0}:{0} {1} > /dev/null 2>&1'
call(cmd.format(os.environ['SUDO_USER'], sysvals.testdir), shell=True)
sysvals.stamp['boot'] = (data.tUserMode - data.start) * 1000
sysvals.stamp['lastinit'] = data.end * 1000
sysvals.outputResult(sysvals.stamp)
|
nvtrust-main
|
infrastructure/linux/linux_source/tools/power/pm-graph/bootgraph.py
|
#!/usr/bin/env python3
# SPDX-License-Identifier: GPL-2.0-only
#
# Tool for analyzing suspend/resume timing
# Copyright (c) 2013, Intel Corporation.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms and conditions of the GNU General Public License,
# version 2, as published by the Free Software Foundation.
#
# This program is distributed in the hope it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# Authors:
# Todd Brandt <todd.e.brandt@linux.intel.com>
#
# Links:
# Home Page
# https://01.org/pm-graph
# Source repo
# git@github.com:intel/pm-graph
#
# Description:
# This tool is designed to assist kernel and OS developers in optimizing
# their linux stack's suspend/resume time. Using a kernel image built
# with a few extra options enabled, the tool will execute a suspend and
# will capture dmesg and ftrace data until resume is complete. This data
# is transformed into a device timeline and a callgraph to give a quick
# and detailed view of which devices and callbacks are taking the most
# time in suspend/resume. The output is a single html file which can be
# viewed in firefox or chrome.
#
# The following kernel build options are required:
# CONFIG_DEVMEM=y
# CONFIG_PM_DEBUG=y
# CONFIG_PM_SLEEP_DEBUG=y
# CONFIG_FTRACE=y
# CONFIG_FUNCTION_TRACER=y
# CONFIG_FUNCTION_GRAPH_TRACER=y
# CONFIG_KPROBES=y
# CONFIG_KPROBES_ON_FTRACE=y
#
# For kernel versions older than 3.15:
# The following additional kernel parameters are required:
# (e.g. in file /etc/default/grub)
# GRUB_CMDLINE_LINUX_DEFAULT="... initcall_debug log_buf_len=16M ..."
#
# ----------------- LIBRARIES --------------------
import sys
import time
import os
import string
import re
import platform
import signal
import codecs
from datetime import datetime, timedelta
import struct
import configparser
import gzip
from threading import Thread
from subprocess import call, Popen, PIPE
import base64
def pprint(msg):
print(msg)
sys.stdout.flush()
def ascii(text):
return text.decode('ascii', 'ignore')
# ----------------- CLASSES --------------------
# Class: SystemValues
# Description:
# A global, single-instance container used to
# store system values and test parameters
class SystemValues:
title = 'SleepGraph'
version = '5.8'
ansi = False
rs = 0
display = ''
gzip = False
sync = False
wifi = False
verbose = False
testlog = True
dmesglog = True
ftracelog = False
acpidebug = True
tstat = True
mindevlen = 0.0001
mincglen = 0.0
cgphase = ''
cgtest = -1
cgskip = ''
maxfail = 0
multitest = {'run': False, 'count': 1000000, 'delay': 0}
max_graph_depth = 0
callloopmaxgap = 0.0001
callloopmaxlen = 0.005
bufsize = 0
cpucount = 0
memtotal = 204800
memfree = 204800
srgap = 0
cgexp = False
testdir = ''
outdir = ''
tpath = '/sys/kernel/debug/tracing/'
fpdtpath = '/sys/firmware/acpi/tables/FPDT'
epath = '/sys/kernel/debug/tracing/events/power/'
pmdpath = '/sys/power/pm_debug_messages'
acpipath='/sys/module/acpi/parameters/debug_level'
traceevents = [
'suspend_resume',
'wakeup_source_activate',
'wakeup_source_deactivate',
'device_pm_callback_end',
'device_pm_callback_start'
]
logmsg = ''
testcommand = ''
mempath = '/dev/mem'
powerfile = '/sys/power/state'
mempowerfile = '/sys/power/mem_sleep'
diskpowerfile = '/sys/power/disk'
suspendmode = 'mem'
memmode = ''
diskmode = ''
hostname = 'localhost'
prefix = 'test'
teststamp = ''
sysstamp = ''
dmesgstart = 0.0
dmesgfile = ''
ftracefile = ''
htmlfile = 'output.html'
result = ''
rtcwake = True
rtcwaketime = 15
rtcpath = ''
devicefilter = []
cgfilter = []
stamp = 0
execcount = 1
x2delay = 0
skiphtml = False
usecallgraph = False
ftopfunc = 'pm_suspend'
ftop = False
usetraceevents = False
usetracemarkers = True
usekprobes = True
usedevsrc = False
useprocmon = False
notestrun = False
cgdump = False
devdump = False
mixedphaseheight = True
devprops = dict()
cfgdef = dict()
platinfo = []
predelay = 0
postdelay = 0
tmstart = 'SUSPEND START %Y%m%d-%H:%M:%S.%f'
tmend = 'RESUME COMPLETE %Y%m%d-%H:%M:%S.%f'
tracefuncs = {
'sys_sync': {},
'ksys_sync': {},
'__pm_notifier_call_chain': {},
'pm_prepare_console': {},
'pm_notifier_call_chain': {},
'freeze_processes': {},
'freeze_kernel_threads': {},
'pm_restrict_gfp_mask': {},
'acpi_suspend_begin': {},
'acpi_hibernation_begin': {},
'acpi_hibernation_enter': {},
'acpi_hibernation_leave': {},
'acpi_pm_freeze': {},
'acpi_pm_thaw': {},
'acpi_s2idle_end': {},
'acpi_s2idle_sync': {},
'acpi_s2idle_begin': {},
'acpi_s2idle_prepare': {},
'acpi_s2idle_prepare_late': {},
'acpi_s2idle_wake': {},
'acpi_s2idle_wakeup': {},
'acpi_s2idle_restore': {},
'acpi_s2idle_restore_early': {},
'hibernate_preallocate_memory': {},
'create_basic_memory_bitmaps': {},
'swsusp_write': {},
'suspend_console': {},
'acpi_pm_prepare': {},
'syscore_suspend': {},
'arch_enable_nonboot_cpus_end': {},
'syscore_resume': {},
'acpi_pm_finish': {},
'resume_console': {},
'acpi_pm_end': {},
'pm_restore_gfp_mask': {},
'thaw_processes': {},
'pm_restore_console': {},
'CPU_OFF': {
'func':'_cpu_down',
'args_x86_64': {'cpu':'%di:s32'},
'format': 'CPU_OFF[{cpu}]'
},
'CPU_ON': {
'func':'_cpu_up',
'args_x86_64': {'cpu':'%di:s32'},
'format': 'CPU_ON[{cpu}]'
},
}
dev_tracefuncs = {
# general wait/delay/sleep
'msleep': { 'args_x86_64': {'time':'%di:s32'}, 'ub': 1 },
'schedule_timeout': { 'args_x86_64': {'timeout':'%di:s32'}, 'ub': 1 },
'udelay': { 'func':'__const_udelay', 'args_x86_64': {'loops':'%di:s32'}, 'ub': 1 },
'usleep_range': { 'args_x86_64': {'min':'%di:s32', 'max':'%si:s32'}, 'ub': 1 },
'mutex_lock_slowpath': { 'func':'__mutex_lock_slowpath', 'ub': 1 },
'acpi_os_stall': {'ub': 1},
'rt_mutex_slowlock': {'ub': 1},
# ACPI
'acpi_resume_power_resources': {},
'acpi_ps_execute_method': { 'args_x86_64': {
'fullpath':'+0(+40(%di)):string',
}},
# mei_me
'mei_reset': {},
# filesystem
'ext4_sync_fs': {},
# 80211
'ath10k_bmi_read_memory': { 'args_x86_64': {'length':'%cx:s32'} },
'ath10k_bmi_write_memory': { 'args_x86_64': {'length':'%cx:s32'} },
'ath10k_bmi_fast_download': { 'args_x86_64': {'length':'%cx:s32'} },
'iwlagn_mac_start': {},
'iwlagn_alloc_bcast_station': {},
'iwl_trans_pcie_start_hw': {},
'iwl_trans_pcie_start_fw': {},
'iwl_run_init_ucode': {},
'iwl_load_ucode_wait_alive': {},
'iwl_alive_start': {},
'iwlagn_mac_stop': {},
'iwlagn_mac_suspend': {},
'iwlagn_mac_resume': {},
'iwlagn_mac_add_interface': {},
'iwlagn_mac_remove_interface': {},
'iwlagn_mac_change_interface': {},
'iwlagn_mac_config': {},
'iwlagn_configure_filter': {},
'iwlagn_mac_hw_scan': {},
'iwlagn_bss_info_changed': {},
'iwlagn_mac_channel_switch': {},
'iwlagn_mac_flush': {},
# ATA
'ata_eh_recover': { 'args_x86_64': {'port':'+36(%di):s32'} },
# i915
'i915_gem_resume': {},
'i915_restore_state': {},
'intel_opregion_setup': {},
'g4x_pre_enable_dp': {},
'vlv_pre_enable_dp': {},
'chv_pre_enable_dp': {},
'g4x_enable_dp': {},
'vlv_enable_dp': {},
'intel_hpd_init': {},
'intel_opregion_register': {},
'intel_dp_detect': {},
'intel_hdmi_detect': {},
'intel_opregion_init': {},
'intel_fbdev_set_suspend': {},
}
infocmds = [
[0, 'kparams', 'cat', '/proc/cmdline'],
[0, 'mcelog', 'mcelog'],
[0, 'pcidevices', 'lspci', '-tv'],
[0, 'usbdevices', 'lsusb', '-t'],
[1, 'interrupts', 'cat', '/proc/interrupts'],
[1, 'wakeups', 'cat', '/sys/kernel/debug/wakeup_sources'],
[2, 'gpecounts', 'sh', '-c', 'grep -v invalid /sys/firmware/acpi/interrupts/*'],
[2, 'suspendstats', 'sh', '-c', 'grep -v invalid /sys/power/suspend_stats/*'],
[2, 'cpuidle', 'sh', '-c', 'grep -v invalid /sys/devices/system/cpu/cpu*/cpuidle/state*/s2idle/*'],
[2, 'battery', 'sh', '-c', 'grep -v invalid /sys/class/power_supply/*/*'],
]
cgblacklist = []
kprobes = dict()
timeformat = '%.3f'
cmdline = '%s %s' % \
(os.path.basename(sys.argv[0]), ' '.join(sys.argv[1:]))
sudouser = ''
def __init__(self):
self.archargs = 'args_'+platform.machine()
self.hostname = platform.node()
if(self.hostname == ''):
self.hostname = 'localhost'
rtc = "rtc0"
if os.path.exists('/dev/rtc'):
rtc = os.readlink('/dev/rtc')
rtc = '/sys/class/rtc/'+rtc
if os.path.exists(rtc) and os.path.exists(rtc+'/date') and \
os.path.exists(rtc+'/time') and os.path.exists(rtc+'/wakealarm'):
self.rtcpath = rtc
if (hasattr(sys.stdout, 'isatty') and sys.stdout.isatty()):
self.ansi = True
self.testdir = datetime.now().strftime('suspend-%y%m%d-%H%M%S')
if os.getuid() == 0 and 'SUDO_USER' in os.environ and \
os.environ['SUDO_USER']:
self.sudouser = os.environ['SUDO_USER']
def resetlog(self):
self.logmsg = ''
self.platinfo = []
def vprint(self, msg):
self.logmsg += msg+'\n'
if self.verbose or msg.startswith('WARNING:'):
pprint(msg)
def signalHandler(self, signum, frame):
if not self.result:
return
signame = self.signames[signum] if signum in self.signames else 'UNKNOWN'
msg = 'Signal %s caused a tool exit, line %d' % (signame, frame.f_lineno)
self.outputResult({'error':msg})
sys.exit(3)
def signalHandlerInit(self):
capture = ['BUS', 'SYS', 'XCPU', 'XFSZ', 'PWR', 'HUP', 'INT', 'QUIT',
'ILL', 'ABRT', 'FPE', 'SEGV', 'TERM']
self.signames = dict()
for i in capture:
s = 'SIG'+i
try:
signum = getattr(signal, s)
signal.signal(signum, self.signalHandler)
except:
continue
self.signames[signum] = s
def rootCheck(self, fatal=True):
if(os.access(self.powerfile, os.W_OK)):
return True
if fatal:
msg = 'This command requires sysfs mount and root access'
pprint('ERROR: %s\n' % msg)
self.outputResult({'error':msg})
sys.exit(1)
return False
def rootUser(self, fatal=False):
if 'USER' in os.environ and os.environ['USER'] == 'root':
return True
if fatal:
msg = 'This command must be run as root'
pprint('ERROR: %s\n' % msg)
self.outputResult({'error':msg})
sys.exit(1)
return False
def usable(self, file):
return (os.path.exists(file) and os.path.getsize(file) > 0)
def getExec(self, cmd):
try:
fp = Popen(['which', cmd], stdout=PIPE, stderr=PIPE).stdout
out = ascii(fp.read()).strip()
fp.close()
except:
out = ''
if out:
return out
for path in ['/sbin', '/bin', '/usr/sbin', '/usr/bin',
'/usr/local/sbin', '/usr/local/bin']:
cmdfull = os.path.join(path, cmd)
if os.path.exists(cmdfull):
return cmdfull
return out
def setPrecision(self, num):
if num < 0 or num > 6:
return
self.timeformat = '%.{0}f'.format(num)
def setOutputFolder(self, value):
args = dict()
n = datetime.now()
args['date'] = n.strftime('%y%m%d')
args['time'] = n.strftime('%H%M%S')
args['hostname'] = args['host'] = self.hostname
args['mode'] = self.suspendmode
return value.format(**args)
def setOutputFile(self):
if self.dmesgfile != '':
m = re.match('(?P<name>.*)_dmesg\.txt.*', self.dmesgfile)
if(m):
self.htmlfile = m.group('name')+'.html'
if self.ftracefile != '':
m = re.match('(?P<name>.*)_ftrace\.txt.*', self.ftracefile)
if(m):
self.htmlfile = m.group('name')+'.html'
def systemInfo(self, info):
p = m = ''
if 'baseboard-manufacturer' in info:
m = info['baseboard-manufacturer']
elif 'system-manufacturer' in info:
m = info['system-manufacturer']
if 'system-product-name' in info:
p = info['system-product-name']
elif 'baseboard-product-name' in info:
p = info['baseboard-product-name']
if m[:5].lower() == 'intel' and 'baseboard-product-name' in info:
p = info['baseboard-product-name']
c = info['processor-version'] if 'processor-version' in info else ''
b = info['bios-version'] if 'bios-version' in info else ''
r = info['bios-release-date'] if 'bios-release-date' in info else ''
self.sysstamp = '# sysinfo | man:%s | plat:%s | cpu:%s | bios:%s | biosdate:%s | numcpu:%d | memsz:%d | memfr:%d' % \
(m, p, c, b, r, self.cpucount, self.memtotal, self.memfree)
def printSystemInfo(self, fatal=False):
self.rootCheck(True)
out = dmidecode(self.mempath, fatal)
if len(out) < 1:
return
fmt = '%-24s: %s'
for name in sorted(out):
print(fmt % (name, out[name]))
print(fmt % ('cpucount', ('%d' % self.cpucount)))
print(fmt % ('memtotal', ('%d kB' % self.memtotal)))
print(fmt % ('memfree', ('%d kB' % self.memfree)))
def cpuInfo(self):
self.cpucount = 0
fp = open('/proc/cpuinfo', 'r')
for line in fp:
if re.match('^processor[ \t]*:[ \t]*[0-9]*', line):
self.cpucount += 1
fp.close()
fp = open('/proc/meminfo', 'r')
for line in fp:
m = re.match('^MemTotal:[ \t]*(?P<sz>[0-9]*) *kB', line)
if m:
self.memtotal = int(m.group('sz'))
m = re.match('^MemFree:[ \t]*(?P<sz>[0-9]*) *kB', line)
if m:
self.memfree = int(m.group('sz'))
fp.close()
def initTestOutput(self, name):
self.prefix = self.hostname
v = open('/proc/version', 'r').read().strip()
kver = v.split()[2]
fmt = name+'-%m%d%y-%H%M%S'
testtime = datetime.now().strftime(fmt)
self.teststamp = \
'# '+testtime+' '+self.prefix+' '+self.suspendmode+' '+kver
ext = ''
if self.gzip:
ext = '.gz'
self.dmesgfile = \
self.testdir+'/'+self.prefix+'_'+self.suspendmode+'_dmesg.txt'+ext
self.ftracefile = \
self.testdir+'/'+self.prefix+'_'+self.suspendmode+'_ftrace.txt'+ext
self.htmlfile = \
self.testdir+'/'+self.prefix+'_'+self.suspendmode+'.html'
if not os.path.isdir(self.testdir):
os.makedirs(self.testdir)
self.sudoUserchown(self.testdir)
def getValueList(self, value):
out = []
for i in value.split(','):
if i.strip():
out.append(i.strip())
return out
def setDeviceFilter(self, value):
self.devicefilter = self.getValueList(value)
def setCallgraphFilter(self, value):
self.cgfilter = self.getValueList(value)
def skipKprobes(self, value):
for k in self.getValueList(value):
if k in self.tracefuncs:
del self.tracefuncs[k]
if k in self.dev_tracefuncs:
del self.dev_tracefuncs[k]
def setCallgraphBlacklist(self, file):
self.cgblacklist = self.listFromFile(file)
def rtcWakeAlarmOn(self):
call('echo 0 > '+self.rtcpath+'/wakealarm', shell=True)
nowtime = open(self.rtcpath+'/since_epoch', 'r').read().strip()
if nowtime:
nowtime = int(nowtime)
else:
# if hardware time fails, use the software time
nowtime = int(datetime.now().strftime('%s'))
alarm = nowtime + self.rtcwaketime
call('echo %d > %s/wakealarm' % (alarm, self.rtcpath), shell=True)
def rtcWakeAlarmOff(self):
call('echo 0 > %s/wakealarm' % self.rtcpath, shell=True)
def initdmesg(self):
# get the latest time stamp from the dmesg log
lines = Popen('dmesg', stdout=PIPE).stdout.readlines()
ktime = '0'
for line in reversed(lines):
line = ascii(line).replace('\r\n', '')
idx = line.find('[')
if idx > 1:
line = line[idx:]
m = re.match('[ \t]*(\[ *)(?P<ktime>[0-9\.]*)(\]) (?P<msg>.*)', line)
if(m):
ktime = m.group('ktime')
break
self.dmesgstart = float(ktime)
def getdmesg(self, testdata):
op = self.writeDatafileHeader(self.dmesgfile, testdata)
# store all new dmesg lines since initdmesg was called
fp = Popen('dmesg', stdout=PIPE).stdout
for line in fp:
line = ascii(line).replace('\r\n', '')
idx = line.find('[')
if idx > 1:
line = line[idx:]
m = re.match('[ \t]*(\[ *)(?P<ktime>[0-9\.]*)(\]) (?P<msg>.*)', line)
if(not m):
continue
ktime = float(m.group('ktime'))
if ktime > self.dmesgstart:
op.write(line)
fp.close()
op.close()
def listFromFile(self, file):
list = []
fp = open(file)
for i in fp.read().split('\n'):
i = i.strip()
if i and i[0] != '#':
list.append(i)
fp.close()
return list
def addFtraceFilterFunctions(self, file):
for i in self.listFromFile(file):
if len(i) < 2:
continue
self.tracefuncs[i] = dict()
def getFtraceFilterFunctions(self, current):
self.rootCheck(True)
if not current:
call('cat '+self.tpath+'available_filter_functions', shell=True)
return
master = self.listFromFile(self.tpath+'available_filter_functions')
for i in sorted(self.tracefuncs):
if 'func' in self.tracefuncs[i]:
i = self.tracefuncs[i]['func']
if i in master:
print(i)
else:
print(self.colorText(i))
def setFtraceFilterFunctions(self, list):
master = self.listFromFile(self.tpath+'available_filter_functions')
flist = ''
for i in list:
if i not in master:
continue
if ' [' in i:
flist += i.split(' ')[0]+'\n'
else:
flist += i+'\n'
fp = open(self.tpath+'set_graph_function', 'w')
fp.write(flist)
fp.close()
def basicKprobe(self, name):
self.kprobes[name] = {'name': name,'func': name,'args': dict(),'format': name}
def defaultKprobe(self, name, kdata):
k = kdata
for field in ['name', 'format', 'func']:
if field not in k:
k[field] = name
if self.archargs in k:
k['args'] = k[self.archargs]
else:
k['args'] = dict()
k['format'] = name
self.kprobes[name] = k
def kprobeColor(self, name):
if name not in self.kprobes or 'color' not in self.kprobes[name]:
return ''
return self.kprobes[name]['color']
def kprobeDisplayName(self, name, dataraw):
if name not in self.kprobes:
self.basicKprobe(name)
data = ''
quote=0
# first remvoe any spaces inside quotes, and the quotes
for c in dataraw:
if c == '"':
quote = (quote + 1) % 2
if quote and c == ' ':
data += '_'
elif c != '"':
data += c
fmt, args = self.kprobes[name]['format'], self.kprobes[name]['args']
arglist = dict()
# now process the args
for arg in sorted(args):
arglist[arg] = ''
m = re.match('.* '+arg+'=(?P<arg>.*) ', data);
if m:
arglist[arg] = m.group('arg')
else:
m = re.match('.* '+arg+'=(?P<arg>.*)', data);
if m:
arglist[arg] = m.group('arg')
out = fmt.format(**arglist)
out = out.replace(' ', '_').replace('"', '')
return out
def kprobeText(self, kname, kprobe):
name = fmt = func = kname
args = dict()
if 'name' in kprobe:
name = kprobe['name']
if 'format' in kprobe:
fmt = kprobe['format']
if 'func' in kprobe:
func = kprobe['func']
if self.archargs in kprobe:
args = kprobe[self.archargs]
if 'args' in kprobe:
args = kprobe['args']
if re.findall('{(?P<n>[a-z,A-Z,0-9]*)}', func):
doError('Kprobe "%s" has format info in the function name "%s"' % (name, func))
for arg in re.findall('{(?P<n>[a-z,A-Z,0-9]*)}', fmt):
if arg not in args:
doError('Kprobe "%s" is missing argument "%s"' % (name, arg))
val = 'p:%s_cal %s' % (name, func)
for i in sorted(args):
val += ' %s=%s' % (i, args[i])
val += '\nr:%s_ret %s $retval\n' % (name, func)
return val
def addKprobes(self, output=False):
if len(self.kprobes) < 1:
return
if output:
pprint(' kprobe functions in this kernel:')
# first test each kprobe
rejects = []
# sort kprobes: trace, ub-dev, custom, dev
kpl = [[], [], [], []]
linesout = len(self.kprobes)
for name in sorted(self.kprobes):
res = self.colorText('YES', 32)
if not self.testKprobe(name, self.kprobes[name]):
res = self.colorText('NO')
rejects.append(name)
else:
if name in self.tracefuncs:
kpl[0].append(name)
elif name in self.dev_tracefuncs:
if 'ub' in self.dev_tracefuncs[name]:
kpl[1].append(name)
else:
kpl[3].append(name)
else:
kpl[2].append(name)
if output:
pprint(' %s: %s' % (name, res))
kplist = kpl[0] + kpl[1] + kpl[2] + kpl[3]
# remove all failed ones from the list
for name in rejects:
self.kprobes.pop(name)
# set the kprobes all at once
self.fsetVal('', 'kprobe_events')
kprobeevents = ''
for kp in kplist:
kprobeevents += self.kprobeText(kp, self.kprobes[kp])
self.fsetVal(kprobeevents, 'kprobe_events')
if output:
check = self.fgetVal('kprobe_events')
linesack = (len(check.split('\n')) - 1) // 2
pprint(' kprobe functions enabled: %d/%d' % (linesack, linesout))
self.fsetVal('1', 'events/kprobes/enable')
def testKprobe(self, kname, kprobe):
self.fsetVal('0', 'events/kprobes/enable')
kprobeevents = self.kprobeText(kname, kprobe)
if not kprobeevents:
return False
try:
self.fsetVal(kprobeevents, 'kprobe_events')
check = self.fgetVal('kprobe_events')
except:
return False
linesout = len(kprobeevents.split('\n'))
linesack = len(check.split('\n'))
if linesack < linesout:
return False
return True
def setVal(self, val, file):
if not os.path.exists(file):
return False
try:
fp = open(file, 'wb', 0)
fp.write(val.encode())
fp.flush()
fp.close()
except:
return False
return True
def fsetVal(self, val, path):
return self.setVal(val, self.tpath+path)
def getVal(self, file):
res = ''
if not os.path.exists(file):
return res
try:
fp = open(file, 'r')
res = fp.read()
fp.close()
except:
pass
return res
def fgetVal(self, path):
return self.getVal(self.tpath+path)
def cleanupFtrace(self):
if(self.usecallgraph or self.usetraceevents or self.usedevsrc):
self.fsetVal('0', 'events/kprobes/enable')
self.fsetVal('', 'kprobe_events')
self.fsetVal('1024', 'buffer_size_kb')
def setupAllKprobes(self):
for name in self.tracefuncs:
self.defaultKprobe(name, self.tracefuncs[name])
for name in self.dev_tracefuncs:
self.defaultKprobe(name, self.dev_tracefuncs[name])
def isCallgraphFunc(self, name):
if len(self.tracefuncs) < 1 and self.suspendmode == 'command':
return True
for i in self.tracefuncs:
if 'func' in self.tracefuncs[i]:
f = self.tracefuncs[i]['func']
else:
f = i
if name == f:
return True
return False
def initFtrace(self, quiet=False):
if not quiet:
sysvals.printSystemInfo(False)
pprint('INITIALIZING FTRACE...')
# turn trace off
self.fsetVal('0', 'tracing_on')
self.cleanupFtrace()
self.testVal(self.pmdpath, 'basic', '1')
# set the trace clock to global
self.fsetVal('global', 'trace_clock')
self.fsetVal('nop', 'current_tracer')
# set trace buffer to an appropriate value
cpus = max(1, self.cpucount)
if self.bufsize > 0:
tgtsize = self.bufsize
elif self.usecallgraph or self.usedevsrc:
bmax = (1*1024*1024) if self.suspendmode in ['disk', 'command'] \
else (3*1024*1024)
tgtsize = min(self.memfree, bmax)
else:
tgtsize = 65536
while not self.fsetVal('%d' % (tgtsize // cpus), 'buffer_size_kb'):
# if the size failed to set, lower it and keep trying
tgtsize -= 65536
if tgtsize < 65536:
tgtsize = int(self.fgetVal('buffer_size_kb')) * cpus
break
self.vprint('Setting trace buffers to %d kB (%d kB per cpu)' % (tgtsize, tgtsize/cpus))
# initialize the callgraph trace
if(self.usecallgraph):
# set trace type
self.fsetVal('function_graph', 'current_tracer')
self.fsetVal('', 'set_ftrace_filter')
# set trace format options
self.fsetVal('print-parent', 'trace_options')
self.fsetVal('funcgraph-abstime', 'trace_options')
self.fsetVal('funcgraph-cpu', 'trace_options')
self.fsetVal('funcgraph-duration', 'trace_options')
self.fsetVal('funcgraph-proc', 'trace_options')
self.fsetVal('funcgraph-tail', 'trace_options')
self.fsetVal('nofuncgraph-overhead', 'trace_options')
self.fsetVal('context-info', 'trace_options')
self.fsetVal('graph-time', 'trace_options')
self.fsetVal('%d' % self.max_graph_depth, 'max_graph_depth')
cf = ['dpm_run_callback']
if(self.usetraceevents):
cf += ['dpm_prepare', 'dpm_complete']
for fn in self.tracefuncs:
if 'func' in self.tracefuncs[fn]:
cf.append(self.tracefuncs[fn]['func'])
else:
cf.append(fn)
if self.ftop:
self.setFtraceFilterFunctions([self.ftopfunc])
else:
self.setFtraceFilterFunctions(cf)
# initialize the kprobe trace
elif self.usekprobes:
for name in self.tracefuncs:
self.defaultKprobe(name, self.tracefuncs[name])
if self.usedevsrc:
for name in self.dev_tracefuncs:
self.defaultKprobe(name, self.dev_tracefuncs[name])
if not quiet:
pprint('INITIALIZING KPROBES...')
self.addKprobes(self.verbose)
if(self.usetraceevents):
# turn trace events on
events = iter(self.traceevents)
for e in events:
self.fsetVal('1', 'events/power/'+e+'/enable')
# clear the trace buffer
self.fsetVal('', 'trace')
def verifyFtrace(self):
# files needed for any trace data
files = ['buffer_size_kb', 'current_tracer', 'trace', 'trace_clock',
'trace_marker', 'trace_options', 'tracing_on']
# files needed for callgraph trace data
tp = self.tpath
if(self.usecallgraph):
files += [
'available_filter_functions',
'set_ftrace_filter',
'set_graph_function'
]
for f in files:
if(os.path.exists(tp+f) == False):
return False
return True
def verifyKprobes(self):
# files needed for kprobes to work
files = ['kprobe_events', 'events']
tp = self.tpath
for f in files:
if(os.path.exists(tp+f) == False):
return False
return True
def colorText(self, str, color=31):
if not self.ansi:
return str
return '\x1B[%d;40m%s\x1B[m' % (color, str)
def writeDatafileHeader(self, filename, testdata):
fp = self.openlog(filename, 'w')
fp.write('%s\n%s\n# command | %s\n' % (self.teststamp, self.sysstamp, self.cmdline))
for test in testdata:
if 'fw' in test:
fw = test['fw']
if(fw):
fp.write('# fwsuspend %u fwresume %u\n' % (fw[0], fw[1]))
if 'turbo' in test:
fp.write('# turbostat %s\n' % test['turbo'])
if 'wifi' in test:
fp.write('# wifi %s\n' % test['wifi'])
if test['error'] or len(testdata) > 1:
fp.write('# enter_sleep_error %s\n' % test['error'])
return fp
def sudoUserchown(self, dir):
if os.path.exists(dir) and self.sudouser:
cmd = 'chown -R {0}:{0} {1} > /dev/null 2>&1'
call(cmd.format(self.sudouser, dir), shell=True)
def outputResult(self, testdata, num=0):
if not self.result:
return
n = ''
if num > 0:
n = '%d' % num
fp = open(self.result, 'a')
if 'error' in testdata:
fp.write('result%s: fail\n' % n)
fp.write('error%s: %s\n' % (n, testdata['error']))
else:
fp.write('result%s: pass\n' % n)
for v in ['suspend', 'resume', 'boot', 'lastinit']:
if v in testdata:
fp.write('%s%s: %.3f\n' % (v, n, testdata[v]))
for v in ['fwsuspend', 'fwresume']:
if v in testdata:
fp.write('%s%s: %.3f\n' % (v, n, testdata[v] / 1000000.0))
if 'bugurl' in testdata:
fp.write('url%s: %s\n' % (n, testdata['bugurl']))
fp.close()
self.sudoUserchown(self.result)
def configFile(self, file):
dir = os.path.dirname(os.path.realpath(__file__))
if os.path.exists(file):
return file
elif os.path.exists(dir+'/'+file):
return dir+'/'+file
elif os.path.exists(dir+'/config/'+file):
return dir+'/config/'+file
return ''
def openlog(self, filename, mode):
isgz = self.gzip
if mode == 'r':
try:
with gzip.open(filename, mode+'t') as fp:
test = fp.read(64)
isgz = True
except:
isgz = False
if isgz:
return gzip.open(filename, mode+'t')
return open(filename, mode)
def putlog(self, filename, text):
with self.openlog(filename, 'a') as fp:
fp.write(text)
fp.close()
def dlog(self, text):
self.putlog(self.dmesgfile, '# %s\n' % text)
def flog(self, text):
self.putlog(self.ftracefile, text)
def b64unzip(self, data):
try:
out = codecs.decode(base64.b64decode(data), 'zlib').decode()
except:
out = data
return out
def b64zip(self, data):
out = base64.b64encode(codecs.encode(data.encode(), 'zlib')).decode()
return out
def platforminfo(self, cmdafter):
# add platform info on to a completed ftrace file
if not os.path.exists(self.ftracefile):
return False
footer = '#\n'
# add test command string line if need be
if self.suspendmode == 'command' and self.testcommand:
footer += '# platform-testcmd: %s\n' % (self.testcommand)
# get a list of target devices from the ftrace file
props = dict()
tp = TestProps()
tf = self.openlog(self.ftracefile, 'r')
for line in tf:
if tp.stampInfo(line, self):
continue
# parse only valid lines, if this is not one move on
m = re.match(tp.ftrace_line_fmt, line)
if(not m or 'device_pm_callback_start' not in line):
continue
m = re.match('.*: (?P<drv>.*) (?P<d>.*), parent: *(?P<p>.*), .*', m.group('msg'));
if(not m):
continue
dev = m.group('d')
if dev not in props:
props[dev] = DevProps()
tf.close()
# now get the syspath for each target device
for dirname, dirnames, filenames in os.walk('/sys/devices'):
if(re.match('.*/power', dirname) and 'async' in filenames):
dev = dirname.split('/')[-2]
if dev in props and (not props[dev].syspath or len(dirname) < len(props[dev].syspath)):
props[dev].syspath = dirname[:-6]
# now fill in the properties for our target devices
for dev in sorted(props):
dirname = props[dev].syspath
if not dirname or not os.path.exists(dirname):
continue
with open(dirname+'/power/async') as fp:
text = fp.read()
props[dev].isasync = False
if 'enabled' in text:
props[dev].isasync = True
fields = os.listdir(dirname)
if 'product' in fields:
with open(dirname+'/product', 'rb') as fp:
props[dev].altname = ascii(fp.read())
elif 'name' in fields:
with open(dirname+'/name', 'rb') as fp:
props[dev].altname = ascii(fp.read())
elif 'model' in fields:
with open(dirname+'/model', 'rb') as fp:
props[dev].altname = ascii(fp.read())
elif 'description' in fields:
with open(dirname+'/description', 'rb') as fp:
props[dev].altname = ascii(fp.read())
elif 'id' in fields:
with open(dirname+'/id', 'rb') as fp:
props[dev].altname = ascii(fp.read())
elif 'idVendor' in fields and 'idProduct' in fields:
idv, idp = '', ''
with open(dirname+'/idVendor', 'rb') as fp:
idv = ascii(fp.read()).strip()
with open(dirname+'/idProduct', 'rb') as fp:
idp = ascii(fp.read()).strip()
props[dev].altname = '%s:%s' % (idv, idp)
if props[dev].altname:
out = props[dev].altname.strip().replace('\n', ' ')\
.replace(',', ' ').replace(';', ' ')
props[dev].altname = out
# add a devinfo line to the bottom of ftrace
out = ''
for dev in sorted(props):
out += props[dev].out(dev)
footer += '# platform-devinfo: %s\n' % self.b64zip(out)
# add a line for each of these commands with their outputs
for name, cmdline, info in cmdafter:
footer += '# platform-%s: %s | %s\n' % (name, cmdline, self.b64zip(info))
self.flog(footer)
return True
def commonPrefix(self, list):
if len(list) < 2:
return ''
prefix = list[0]
for s in list[1:]:
while s[:len(prefix)] != prefix and prefix:
prefix = prefix[:len(prefix)-1]
if not prefix:
break
if '/' in prefix and prefix[-1] != '/':
prefix = prefix[0:prefix.rfind('/')+1]
return prefix
def dictify(self, text, format):
out = dict()
header = True if format == 1 else False
delim = ' ' if format == 1 else ':'
for line in text.split('\n'):
if header:
header, out['@'] = False, line
continue
line = line.strip()
if delim in line:
data = line.split(delim, 1)
num = re.search(r'[\d]+', data[1])
if format == 2 and num:
out[data[0].strip()] = num.group()
else:
out[data[0].strip()] = data[1]
return out
def cmdinfo(self, begin, debug=False):
out = []
if begin:
self.cmd1 = dict()
for cargs in self.infocmds:
delta, name = cargs[0], cargs[1]
cmdline, cmdpath = ' '.join(cargs[2:]), self.getExec(cargs[2])
if not cmdpath or (begin and not delta):
continue
self.dlog('[%s]' % cmdline)
try:
fp = Popen([cmdpath]+cargs[3:], stdout=PIPE, stderr=PIPE).stdout
info = ascii(fp.read()).strip()
fp.close()
except:
continue
if not debug and begin:
self.cmd1[name] = self.dictify(info, delta)
elif not debug and delta and name in self.cmd1:
before, after = self.cmd1[name], self.dictify(info, delta)
dinfo = ('\t%s\n' % before['@']) if '@' in before else ''
prefix = self.commonPrefix(list(before.keys()))
for key in sorted(before):
if key in after and before[key] != after[key]:
title = key.replace(prefix, '')
if delta == 2:
dinfo += '\t%s : %s -> %s\n' % \
(title, before[key].strip(), after[key].strip())
else:
dinfo += '%10s (start) : %s\n%10s (after) : %s\n' % \
(title, before[key], title, after[key])
dinfo = '\tnothing changed' if not dinfo else dinfo.rstrip()
out.append((name, cmdline, dinfo))
else:
out.append((name, cmdline, '\tnothing' if not info else info))
return out
def testVal(self, file, fmt='basic', value=''):
if file == 'restoreall':
for f in self.cfgdef:
if os.path.exists(f):
fp = open(f, 'w')
fp.write(self.cfgdef[f])
fp.close()
self.cfgdef = dict()
elif value and os.path.exists(file):
fp = open(file, 'r+')
if fmt == 'radio':
m = re.match('.*\[(?P<v>.*)\].*', fp.read())
if m:
self.cfgdef[file] = m.group('v')
elif fmt == 'acpi':
line = fp.read().strip().split('\n')[-1]
m = re.match('.* (?P<v>[0-9A-Fx]*) .*', line)
if m:
self.cfgdef[file] = m.group('v')
else:
self.cfgdef[file] = fp.read().strip()
fp.write(value)
fp.close()
def haveTurbostat(self):
if not self.tstat:
return False
cmd = self.getExec('turbostat')
if not cmd:
return False
fp = Popen([cmd, '-v'], stdout=PIPE, stderr=PIPE).stderr
out = ascii(fp.read()).strip()
fp.close()
if re.match('turbostat version .*', out):
self.vprint(out)
return True
return False
def turbostat(self):
cmd = self.getExec('turbostat')
rawout = keyline = valline = ''
fullcmd = '%s -q -S echo freeze > %s' % (cmd, self.powerfile)
fp = Popen(['sh', '-c', fullcmd], stdout=PIPE, stderr=PIPE).stderr
for line in fp:
line = ascii(line)
rawout += line
if keyline and valline:
continue
if re.match('(?i)Avg_MHz.*', line):
keyline = line.strip().split()
elif keyline:
valline = line.strip().split()
fp.close()
if not keyline or not valline or len(keyline) != len(valline):
errmsg = 'unrecognized turbostat output:\n'+rawout.strip()
self.vprint(errmsg)
if not self.verbose:
pprint(errmsg)
return ''
if self.verbose:
pprint(rawout.strip())
out = []
for key in keyline:
idx = keyline.index(key)
val = valline[idx]
out.append('%s=%s' % (key, val))
return '|'.join(out)
def wifiDetails(self, dev):
try:
info = open('/sys/class/net/%s/device/uevent' % dev, 'r').read().strip()
except:
return dev
vals = [dev]
for prop in info.split('\n'):
if prop.startswith('DRIVER=') or prop.startswith('PCI_ID='):
vals.append(prop.split('=')[-1])
return ':'.join(vals)
def checkWifi(self, dev=''):
try:
w = open('/proc/net/wireless', 'r').read().strip()
except:
return ''
for line in reversed(w.split('\n')):
m = re.match(' *(?P<dev>.*): (?P<stat>[0-9a-f]*) .*', w.split('\n')[-1])
if not m or (dev and dev != m.group('dev')):
continue
return m.group('dev')
return ''
def pollWifi(self, dev, timeout=60):
start = time.time()
while (time.time() - start) < timeout:
w = self.checkWifi(dev)
if w:
return '%s reconnected %.2f' % \
(self.wifiDetails(dev), max(0, time.time() - start))
time.sleep(0.01)
return '%s timeout %d' % (self.wifiDetails(dev), timeout)
def errorSummary(self, errinfo, msg):
found = False
for entry in errinfo:
if re.match(entry['match'], msg):
entry['count'] += 1
if self.hostname not in entry['urls']:
entry['urls'][self.hostname] = [self.htmlfile]
elif self.htmlfile not in entry['urls'][self.hostname]:
entry['urls'][self.hostname].append(self.htmlfile)
found = True
break
if found:
return
arr = msg.split()
for j in range(len(arr)):
if re.match('^[0-9,\-\.]*$', arr[j]):
arr[j] = '[0-9,\-\.]*'
else:
arr[j] = arr[j]\
.replace('\\', '\\\\').replace(']', '\]').replace('[', '\[')\
.replace('.', '\.').replace('+', '\+').replace('*', '\*')\
.replace('(', '\(').replace(')', '\)').replace('}', '\}')\
.replace('{', '\{')
mstr = ' *'.join(arr)
entry = {
'line': msg,
'match': mstr,
'count': 1,
'urls': {self.hostname: [self.htmlfile]}
}
errinfo.append(entry)
def multistat(self, start, idx, finish):
if 'time' in self.multitest:
id = '%d Duration=%dmin' % (idx+1, self.multitest['time'])
else:
id = '%d/%d' % (idx+1, self.multitest['count'])
t = time.time()
if 'start' not in self.multitest:
self.multitest['start'] = self.multitest['last'] = t
self.multitest['total'] = 0.0
pprint('TEST (%s) START' % id)
return
dt = t - self.multitest['last']
if not start:
if idx == 0 and self.multitest['delay'] > 0:
self.multitest['total'] += self.multitest['delay']
pprint('TEST (%s) COMPLETE -- Duration %.1fs' % (id, dt))
return
self.multitest['total'] += dt
self.multitest['last'] = t
avg = self.multitest['total'] / idx
if 'time' in self.multitest:
left = finish - datetime.now()
left -= timedelta(microseconds=left.microseconds)
else:
left = timedelta(seconds=((self.multitest['count'] - idx) * int(avg)))
pprint('TEST (%s) START - Avg Duration %.1fs, Time left %s' % \
(id, avg, str(left)))
def multiinit(self, c, d):
sz, unit = 'count', 'm'
if c.endswith('d') or c.endswith('h') or c.endswith('m'):
sz, unit, c = 'time', c[-1], c[:-1]
self.multitest['run'] = True
self.multitest[sz] = getArgInt('multi: n d (exec count)', c, 1, 1000000, False)
self.multitest['delay'] = getArgInt('multi: n d (delay between tests)', d, 0, 3600, False)
if unit == 'd':
self.multitest[sz] *= 1440
elif unit == 'h':
self.multitest[sz] *= 60
def displayControl(self, cmd):
xset, ret = 'timeout 10 xset -d :0.0 {0}', 0
if self.sudouser:
xset = 'sudo -u %s %s' % (self.sudouser, xset)
if cmd == 'init':
ret = call(xset.format('dpms 0 0 0'), shell=True)
if not ret:
ret = call(xset.format('s off'), shell=True)
elif cmd == 'reset':
ret = call(xset.format('s reset'), shell=True)
elif cmd in ['on', 'off', 'standby', 'suspend']:
b4 = self.displayControl('stat')
ret = call(xset.format('dpms force %s' % cmd), shell=True)
if not ret:
curr = self.displayControl('stat')
self.vprint('Display Switched: %s -> %s' % (b4, curr))
if curr != cmd:
self.vprint('WARNING: Display failed to change to %s' % cmd)
if ret:
self.vprint('WARNING: Display failed to change to %s with xset' % cmd)
return ret
elif cmd == 'stat':
fp = Popen(xset.format('q').split(' '), stdout=PIPE).stdout
ret = 'unknown'
for line in fp:
m = re.match('[\s]*Monitor is (?P<m>.*)', ascii(line))
if(m and len(m.group('m')) >= 2):
out = m.group('m').lower()
ret = out[3:] if out[0:2] == 'in' else out
break
fp.close()
return ret
def setRuntimeSuspend(self, before=True):
if before:
# runtime suspend disable or enable
if self.rs > 0:
self.rstgt, self.rsval, self.rsdir = 'on', 'auto', 'enabled'
else:
self.rstgt, self.rsval, self.rsdir = 'auto', 'on', 'disabled'
pprint('CONFIGURING RUNTIME SUSPEND...')
self.rslist = deviceInfo(self.rstgt)
for i in self.rslist:
self.setVal(self.rsval, i)
pprint('runtime suspend %s on all devices (%d changed)' % (self.rsdir, len(self.rslist)))
pprint('waiting 5 seconds...')
time.sleep(5)
else:
# runtime suspend re-enable or re-disable
for i in self.rslist:
self.setVal(self.rstgt, i)
pprint('runtime suspend settings restored on %d devices' % len(self.rslist))
sysvals = SystemValues()
switchvalues = ['enable', 'disable', 'on', 'off', 'true', 'false', '1', '0']
switchoff = ['disable', 'off', 'false', '0']
suspendmodename = {
'freeze': 'Freeze (S0)',
'standby': 'Standby (S1)',
'mem': 'Suspend (S3)',
'disk': 'Hibernate (S4)'
}
# Class: DevProps
# Description:
# Simple class which holds property values collected
# for all the devices used in the timeline.
class DevProps:
def __init__(self):
self.syspath = ''
self.altname = ''
self.isasync = True
self.xtraclass = ''
self.xtrainfo = ''
def out(self, dev):
return '%s,%s,%d;' % (dev, self.altname, self.isasync)
def debug(self, dev):
pprint('%s:\n\taltname = %s\n\t async = %s' % (dev, self.altname, self.isasync))
def altName(self, dev):
if not self.altname or self.altname == dev:
return dev
return '%s [%s]' % (self.altname, dev)
def xtraClass(self):
if self.xtraclass:
return ' '+self.xtraclass
if not self.isasync:
return ' sync'
return ''
def xtraInfo(self):
if self.xtraclass:
return ' '+self.xtraclass
if self.isasync:
return ' (async)'
return ' (sync)'
# Class: DeviceNode
# Description:
# A container used to create a device hierachy, with a single root node
# and a tree of child nodes. Used by Data.deviceTopology()
class DeviceNode:
def __init__(self, nodename, nodedepth):
self.name = nodename
self.children = []
self.depth = nodedepth
# Class: Data
# Description:
# The primary container for suspend/resume test data. There is one for
# each test run. The data is organized into a cronological hierarchy:
# Data.dmesg {
# phases {
# 10 sequential, non-overlapping phases of S/R
# contents: times for phase start/end, order/color data for html
# devlist {
# device callback or action list for this phase
# device {
# a single device callback or generic action
# contents: start/stop times, pid/cpu/driver info
# parents/children, html id for timeline/callgraph
# optionally includes an ftrace callgraph
# optionally includes dev/ps data
# }
# }
# }
# }
#
class Data:
phasedef = {
'suspend_prepare': {'order': 0, 'color': '#CCFFCC'},
'suspend': {'order': 1, 'color': '#88FF88'},
'suspend_late': {'order': 2, 'color': '#00AA00'},
'suspend_noirq': {'order': 3, 'color': '#008888'},
'suspend_machine': {'order': 4, 'color': '#0000FF'},
'resume_machine': {'order': 5, 'color': '#FF0000'},
'resume_noirq': {'order': 6, 'color': '#FF9900'},
'resume_early': {'order': 7, 'color': '#FFCC00'},
'resume': {'order': 8, 'color': '#FFFF88'},
'resume_complete': {'order': 9, 'color': '#FFFFCC'},
}
errlist = {
'HWERROR' : r'.*\[ *Hardware Error *\].*',
'FWBUG' : r'.*\[ *Firmware Bug *\].*',
'BUG' : r'(?i).*\bBUG\b.*',
'ERROR' : r'(?i).*\bERROR\b.*',
'WARNING' : r'(?i).*\bWARNING\b.*',
'FAULT' : r'(?i).*\bFAULT\b.*',
'FAIL' : r'(?i).*\bFAILED\b.*',
'INVALID' : r'(?i).*\bINVALID\b.*',
'CRASH' : r'(?i).*\bCRASHED\b.*',
'TIMEOUT' : r'(?i).*\bTIMEOUT\b.*',
'IRQ' : r'.*\bgenirq: .*',
'TASKFAIL': r'.*Freezing of tasks *.*',
'ACPI' : r'.*\bACPI *(?P<b>[A-Za-z]*) *Error[: ].*',
'DISKFULL': r'.*\bNo space left on device.*',
'USBERR' : r'.*usb .*device .*, error [0-9-]*',
'ATAERR' : r' *ata[0-9\.]*: .*failed.*',
'MEIERR' : r' *mei.*: .*failed.*',
'TPMERR' : r'(?i) *tpm *tpm[0-9]*: .*error.*',
}
def __init__(self, num):
idchar = 'abcdefghij'
self.start = 0.0 # test start
self.end = 0.0 # test end
self.hwstart = 0 # rtc test start
self.hwend = 0 # rtc test end
self.tSuspended = 0.0 # low-level suspend start
self.tResumed = 0.0 # low-level resume start
self.tKernSus = 0.0 # kernel level suspend start
self.tKernRes = 0.0 # kernel level resume end
self.fwValid = False # is firmware data available
self.fwSuspend = 0 # time spent in firmware suspend
self.fwResume = 0 # time spent in firmware resume
self.html_device_id = 0
self.stamp = 0
self.outfile = ''
self.kerror = False
self.wifi = dict()
self.turbostat = 0
self.enterfail = ''
self.currphase = ''
self.pstl = dict() # process timeline
self.testnumber = num
self.idstr = idchar[num]
self.dmesgtext = [] # dmesg text file in memory
self.dmesg = dict() # root data structure
self.errorinfo = {'suspend':[],'resume':[]}
self.tLow = [] # time spent in low-level suspends (standby/freeze)
self.devpids = []
self.devicegroups = 0
def sortedPhases(self):
return sorted(self.dmesg, key=lambda k:self.dmesg[k]['order'])
def initDevicegroups(self):
# called when phases are all finished being added
for phase in sorted(self.dmesg.keys()):
if '*' in phase:
p = phase.split('*')
pnew = '%s%d' % (p[0], len(p))
self.dmesg[pnew] = self.dmesg.pop(phase)
self.devicegroups = []
for phase in self.sortedPhases():
self.devicegroups.append([phase])
def nextPhase(self, phase, offset):
order = self.dmesg[phase]['order'] + offset
for p in self.dmesg:
if self.dmesg[p]['order'] == order:
return p
return ''
def lastPhase(self, depth=1):
plist = self.sortedPhases()
if len(plist) < depth:
return ''
return plist[-1*depth]
def turbostatInfo(self):
tp = TestProps()
out = {'syslpi':'N/A','pkgpc10':'N/A'}
for line in self.dmesgtext:
m = re.match(tp.tstatfmt, line)
if not m:
continue
for i in m.group('t').split('|'):
if 'SYS%LPI' in i:
out['syslpi'] = i.split('=')[-1]+'%'
elif 'pc10' in i:
out['pkgpc10'] = i.split('=')[-1]+'%'
break
return out
def extractErrorInfo(self):
lf = self.dmesgtext
if len(self.dmesgtext) < 1 and sysvals.dmesgfile:
lf = sysvals.openlog(sysvals.dmesgfile, 'r')
i = 0
tp = TestProps()
list = []
for line in lf:
i += 1
if tp.stampInfo(line, sysvals):
continue
m = re.match('[ \t]*(\[ *)(?P<ktime>[0-9\.]*)(\]) (?P<msg>.*)', line)
if not m:
continue
t = float(m.group('ktime'))
if t < self.start or t > self.end:
continue
dir = 'suspend' if t < self.tSuspended else 'resume'
msg = m.group('msg')
if re.match('capability: warning: .*', msg):
continue
for err in self.errlist:
if re.match(self.errlist[err], msg):
list.append((msg, err, dir, t, i, i))
self.kerror = True
break
tp.msglist = []
for msg, type, dir, t, idx1, idx2 in list:
tp.msglist.append(msg)
self.errorinfo[dir].append((type, t, idx1, idx2))
if self.kerror:
sysvals.dmesglog = True
if len(self.dmesgtext) < 1 and sysvals.dmesgfile:
lf.close()
return tp
def setStart(self, time, msg=''):
self.start = time
if msg:
try:
self.hwstart = datetime.strptime(msg, sysvals.tmstart)
except:
self.hwstart = 0
def setEnd(self, time, msg=''):
self.end = time
if msg:
try:
self.hwend = datetime.strptime(msg, sysvals.tmend)
except:
self.hwend = 0
def isTraceEventOutsideDeviceCalls(self, pid, time):
for phase in self.sortedPhases():
list = self.dmesg[phase]['list']
for dev in list:
d = list[dev]
if(d['pid'] == pid and time >= d['start'] and
time < d['end']):
return False
return True
def sourcePhase(self, start):
for phase in self.sortedPhases():
if 'machine' in phase:
continue
pend = self.dmesg[phase]['end']
if start <= pend:
return phase
return 'resume_complete'
def sourceDevice(self, phaselist, start, end, pid, type):
tgtdev = ''
for phase in phaselist:
list = self.dmesg[phase]['list']
for devname in list:
dev = list[devname]
# pid must match
if dev['pid'] != pid:
continue
devS = dev['start']
devE = dev['end']
if type == 'device':
# device target event is entirely inside the source boundary
if(start < devS or start >= devE or end <= devS or end > devE):
continue
elif type == 'thread':
# thread target event will expand the source boundary
if start < devS:
dev['start'] = start
if end > devE:
dev['end'] = end
tgtdev = dev
break
return tgtdev
def addDeviceFunctionCall(self, displayname, kprobename, proc, pid, start, end, cdata, rdata):
# try to place the call in a device
phases = self.sortedPhases()
tgtdev = self.sourceDevice(phases, start, end, pid, 'device')
# calls with device pids that occur outside device bounds are dropped
# TODO: include these somehow
if not tgtdev and pid in self.devpids:
return False
# try to place the call in a thread
if not tgtdev:
tgtdev = self.sourceDevice(phases, start, end, pid, 'thread')
# create new thread blocks, expand as new calls are found
if not tgtdev:
if proc == '<...>':
threadname = 'kthread-%d' % (pid)
else:
threadname = '%s-%d' % (proc, pid)
tgtphase = self.sourcePhase(start)
self.newAction(tgtphase, threadname, pid, '', start, end, '', ' kth', '')
return self.addDeviceFunctionCall(displayname, kprobename, proc, pid, start, end, cdata, rdata)
# this should not happen
if not tgtdev:
sysvals.vprint('[%f - %f] %s-%d %s %s %s' % \
(start, end, proc, pid, kprobename, cdata, rdata))
return False
# place the call data inside the src element of the tgtdev
if('src' not in tgtdev):
tgtdev['src'] = []
dtf = sysvals.dev_tracefuncs
ubiquitous = False
if kprobename in dtf and 'ub' in dtf[kprobename]:
ubiquitous = True
title = cdata+' '+rdata
mstr = '\(.*\) *(?P<args>.*) *\((?P<caller>.*)\+.* arg1=(?P<ret>.*)'
m = re.match(mstr, title)
if m:
c = m.group('caller')
a = m.group('args').strip()
r = m.group('ret')
if len(r) > 6:
r = ''
else:
r = 'ret=%s ' % r
if ubiquitous and c in dtf and 'ub' in dtf[c]:
return False
color = sysvals.kprobeColor(kprobename)
e = DevFunction(displayname, a, c, r, start, end, ubiquitous, proc, pid, color)
tgtdev['src'].append(e)
return True
def overflowDevices(self):
# get a list of devices that extend beyond the end of this test run
devlist = []
for phase in self.sortedPhases():
list = self.dmesg[phase]['list']
for devname in list:
dev = list[devname]
if dev['end'] > self.end:
devlist.append(dev)
return devlist
def mergeOverlapDevices(self, devlist):
# merge any devices that overlap devlist
for dev in devlist:
devname = dev['name']
for phase in self.sortedPhases():
list = self.dmesg[phase]['list']
if devname not in list:
continue
tdev = list[devname]
o = min(dev['end'], tdev['end']) - max(dev['start'], tdev['start'])
if o <= 0:
continue
dev['end'] = tdev['end']
if 'src' not in dev or 'src' not in tdev:
continue
dev['src'] += tdev['src']
del list[devname]
def usurpTouchingThread(self, name, dev):
# the caller test has priority of this thread, give it to him
for phase in self.sortedPhases():
list = self.dmesg[phase]['list']
if name in list:
tdev = list[name]
if tdev['start'] - dev['end'] < 0.1:
dev['end'] = tdev['end']
if 'src' not in dev:
dev['src'] = []
if 'src' in tdev:
dev['src'] += tdev['src']
del list[name]
break
def stitchTouchingThreads(self, testlist):
# merge any threads between tests that touch
for phase in self.sortedPhases():
list = self.dmesg[phase]['list']
for devname in list:
dev = list[devname]
if 'htmlclass' not in dev or 'kth' not in dev['htmlclass']:
continue
for data in testlist:
data.usurpTouchingThread(devname, dev)
def optimizeDevSrc(self):
# merge any src call loops to reduce timeline size
for phase in self.sortedPhases():
list = self.dmesg[phase]['list']
for dev in list:
if 'src' not in list[dev]:
continue
src = list[dev]['src']
p = 0
for e in sorted(src, key=lambda event: event.time):
if not p or not e.repeat(p):
p = e
continue
# e is another iteration of p, move it into p
p.end = e.end
p.length = p.end - p.time
p.count += 1
src.remove(e)
def trimTimeVal(self, t, t0, dT, left):
if left:
if(t > t0):
if(t - dT < t0):
return t0
return t - dT
else:
return t
else:
if(t < t0 + dT):
if(t > t0):
return t0 + dT
return t + dT
else:
return t
def trimTime(self, t0, dT, left):
self.tSuspended = self.trimTimeVal(self.tSuspended, t0, dT, left)
self.tResumed = self.trimTimeVal(self.tResumed, t0, dT, left)
self.start = self.trimTimeVal(self.start, t0, dT, left)
self.tKernSus = self.trimTimeVal(self.tKernSus, t0, dT, left)
self.tKernRes = self.trimTimeVal(self.tKernRes, t0, dT, left)
self.end = self.trimTimeVal(self.end, t0, dT, left)
for phase in self.sortedPhases():
p = self.dmesg[phase]
p['start'] = self.trimTimeVal(p['start'], t0, dT, left)
p['end'] = self.trimTimeVal(p['end'], t0, dT, left)
list = p['list']
for name in list:
d = list[name]
d['start'] = self.trimTimeVal(d['start'], t0, dT, left)
d['end'] = self.trimTimeVal(d['end'], t0, dT, left)
d['length'] = d['end'] - d['start']
if('ftrace' in d):
cg = d['ftrace']
cg.start = self.trimTimeVal(cg.start, t0, dT, left)
cg.end = self.trimTimeVal(cg.end, t0, dT, left)
for line in cg.list:
line.time = self.trimTimeVal(line.time, t0, dT, left)
if('src' in d):
for e in d['src']:
e.time = self.trimTimeVal(e.time, t0, dT, left)
e.end = self.trimTimeVal(e.end, t0, dT, left)
e.length = e.end - e.time
for dir in ['suspend', 'resume']:
list = []
for e in self.errorinfo[dir]:
type, tm, idx1, idx2 = e
tm = self.trimTimeVal(tm, t0, dT, left)
list.append((type, tm, idx1, idx2))
self.errorinfo[dir] = list
def trimFreezeTime(self, tZero):
# trim out any standby or freeze clock time
lp = ''
for phase in self.sortedPhases():
if 'resume_machine' in phase and 'suspend_machine' in lp:
tS, tR = self.dmesg[lp]['end'], self.dmesg[phase]['start']
tL = tR - tS
if tL <= 0:
continue
left = True if tR > tZero else False
self.trimTime(tS, tL, left)
if 'waking' in self.dmesg[lp]:
tCnt = self.dmesg[lp]['waking'][0]
if self.dmesg[lp]['waking'][1] >= 0.001:
tTry = '-%.0f' % (round(self.dmesg[lp]['waking'][1] * 1000))
else:
tTry = '-%.3f' % (self.dmesg[lp]['waking'][1] * 1000)
text = '%.0f (%s ms waking %d times)' % (tL * 1000, tTry, tCnt)
else:
text = '%.0f' % (tL * 1000)
self.tLow.append(text)
lp = phase
def getMemTime(self):
if not self.hwstart or not self.hwend:
return
stime = (self.tSuspended - self.start) * 1000000
rtime = (self.end - self.tResumed) * 1000000
hws = self.hwstart + timedelta(microseconds=stime)
hwr = self.hwend - timedelta(microseconds=rtime)
self.tLow.append('%.0f'%((hwr - hws).total_seconds() * 1000))
def getTimeValues(self):
sktime = (self.tSuspended - self.tKernSus) * 1000
rktime = (self.tKernRes - self.tResumed) * 1000
return (sktime, rktime)
def setPhase(self, phase, ktime, isbegin, order=-1):
if(isbegin):
# phase start over current phase
if self.currphase:
if 'resume_machine' not in self.currphase:
sysvals.vprint('WARNING: phase %s failed to end' % self.currphase)
self.dmesg[self.currphase]['end'] = ktime
phases = self.dmesg.keys()
color = self.phasedef[phase]['color']
count = len(phases) if order < 0 else order
# create unique name for every new phase
while phase in phases:
phase += '*'
self.dmesg[phase] = {'list': dict(), 'start': -1.0, 'end': -1.0,
'row': 0, 'color': color, 'order': count}
self.dmesg[phase]['start'] = ktime
self.currphase = phase
else:
# phase end without a start
if phase not in self.currphase:
if self.currphase:
sysvals.vprint('WARNING: %s ended instead of %s, ftrace corruption?' % (phase, self.currphase))
else:
sysvals.vprint('WARNING: %s ended without a start, ftrace corruption?' % phase)
return phase
phase = self.currphase
self.dmesg[phase]['end'] = ktime
self.currphase = ''
return phase
def sortedDevices(self, phase):
list = self.dmesg[phase]['list']
return sorted(list, key=lambda k:list[k]['start'])
def fixupInitcalls(self, phase):
# if any calls never returned, clip them at system resume end
phaselist = self.dmesg[phase]['list']
for devname in phaselist:
dev = phaselist[devname]
if(dev['end'] < 0):
for p in self.sortedPhases():
if self.dmesg[p]['end'] > dev['start']:
dev['end'] = self.dmesg[p]['end']
break
sysvals.vprint('%s (%s): callback didnt return' % (devname, phase))
def deviceFilter(self, devicefilter):
for phase in self.sortedPhases():
list = self.dmesg[phase]['list']
rmlist = []
for name in list:
keep = False
for filter in devicefilter:
if filter in name or \
('drv' in list[name] and filter in list[name]['drv']):
keep = True
if not keep:
rmlist.append(name)
for name in rmlist:
del list[name]
def fixupInitcallsThatDidntReturn(self):
# if any calls never returned, clip them at system resume end
for phase in self.sortedPhases():
self.fixupInitcalls(phase)
def phaseOverlap(self, phases):
rmgroups = []
newgroup = []
for group in self.devicegroups:
for phase in phases:
if phase not in group:
continue
for p in group:
if p not in newgroup:
newgroup.append(p)
if group not in rmgroups:
rmgroups.append(group)
for group in rmgroups:
self.devicegroups.remove(group)
self.devicegroups.append(newgroup)
def newActionGlobal(self, name, start, end, pid=-1, color=''):
# which phase is this device callback or action in
phases = self.sortedPhases()
targetphase = 'none'
htmlclass = ''
overlap = 0.0
myphases = []
for phase in phases:
pstart = self.dmesg[phase]['start']
pend = self.dmesg[phase]['end']
# see if the action overlaps this phase
o = max(0, min(end, pend) - max(start, pstart))
if o > 0:
myphases.append(phase)
# set the target phase to the one that overlaps most
if o > overlap:
if overlap > 0 and phase == 'post_resume':
continue
targetphase = phase
overlap = o
# if no target phase was found, pin it to the edge
if targetphase == 'none':
p0start = self.dmesg[phases[0]]['start']
if start <= p0start:
targetphase = phases[0]
else:
targetphase = phases[-1]
if pid == -2:
htmlclass = ' bg'
elif pid == -3:
htmlclass = ' ps'
if len(myphases) > 1:
htmlclass = ' bg'
self.phaseOverlap(myphases)
if targetphase in phases:
newname = self.newAction(targetphase, name, pid, '', start, end, '', htmlclass, color)
return (targetphase, newname)
return False
def newAction(self, phase, name, pid, parent, start, end, drv, htmlclass='', color=''):
# new device callback for a specific phase
self.html_device_id += 1
devid = '%s%d' % (self.idstr, self.html_device_id)
list = self.dmesg[phase]['list']
length = -1.0
if(start >= 0 and end >= 0):
length = end - start
if pid == -2 or name not in sysvals.tracefuncs.keys():
i = 2
origname = name
while(name in list):
name = '%s[%d]' % (origname, i)
i += 1
list[name] = {'name': name, 'start': start, 'end': end, 'pid': pid,
'par': parent, 'length': length, 'row': 0, 'id': devid, 'drv': drv }
if htmlclass:
list[name]['htmlclass'] = htmlclass
if color:
list[name]['color'] = color
return name
def findDevice(self, phase, name):
list = self.dmesg[phase]['list']
mydev = ''
for devname in sorted(list):
if name == devname or re.match('^%s\[(?P<num>[0-9]*)\]$' % name, devname):
mydev = devname
if mydev:
return list[mydev]
return False
def deviceChildren(self, devname, phase):
devlist = []
list = self.dmesg[phase]['list']
for child in list:
if(list[child]['par'] == devname):
devlist.append(child)
return devlist
def maxDeviceNameSize(self, phase):
size = 0
for name in self.dmesg[phase]['list']:
if len(name) > size:
size = len(name)
return size
def printDetails(self):
sysvals.vprint('Timeline Details:')
sysvals.vprint(' test start: %f' % self.start)
sysvals.vprint('kernel suspend start: %f' % self.tKernSus)
tS = tR = False
for phase in self.sortedPhases():
devlist = self.dmesg[phase]['list']
dc, ps, pe = len(devlist), self.dmesg[phase]['start'], self.dmesg[phase]['end']
if not tS and ps >= self.tSuspended:
sysvals.vprint(' machine suspended: %f' % self.tSuspended)
tS = True
if not tR and ps >= self.tResumed:
sysvals.vprint(' machine resumed: %f' % self.tResumed)
tR = True
sysvals.vprint('%20s: %f - %f (%d devices)' % (phase, ps, pe, dc))
if sysvals.devdump:
sysvals.vprint(''.join('-' for i in range(80)))
maxname = '%d' % self.maxDeviceNameSize(phase)
fmt = '%3d) %'+maxname+'s - %f - %f'
c = 1
for name in sorted(devlist):
s = devlist[name]['start']
e = devlist[name]['end']
sysvals.vprint(fmt % (c, name, s, e))
c += 1
sysvals.vprint(''.join('-' for i in range(80)))
sysvals.vprint(' kernel resume end: %f' % self.tKernRes)
sysvals.vprint(' test end: %f' % self.end)
def deviceChildrenAllPhases(self, devname):
devlist = []
for phase in self.sortedPhases():
list = self.deviceChildren(devname, phase)
for dev in sorted(list):
if dev not in devlist:
devlist.append(dev)
return devlist
def masterTopology(self, name, list, depth):
node = DeviceNode(name, depth)
for cname in list:
# avoid recursions
if name == cname:
continue
clist = self.deviceChildrenAllPhases(cname)
cnode = self.masterTopology(cname, clist, depth+1)
node.children.append(cnode)
return node
def printTopology(self, node):
html = ''
if node.name:
info = ''
drv = ''
for phase in self.sortedPhases():
list = self.dmesg[phase]['list']
if node.name in list:
s = list[node.name]['start']
e = list[node.name]['end']
if list[node.name]['drv']:
drv = ' {'+list[node.name]['drv']+'}'
info += ('<li>%s: %.3fms</li>' % (phase, (e-s)*1000))
html += '<li><b>'+node.name+drv+'</b>'
if info:
html += '<ul>'+info+'</ul>'
html += '</li>'
if len(node.children) > 0:
html += '<ul>'
for cnode in node.children:
html += self.printTopology(cnode)
html += '</ul>'
return html
def rootDeviceList(self):
# list of devices graphed
real = []
for phase in self.sortedPhases():
list = self.dmesg[phase]['list']
for dev in sorted(list):
if list[dev]['pid'] >= 0 and dev not in real:
real.append(dev)
# list of top-most root devices
rootlist = []
for phase in self.sortedPhases():
list = self.dmesg[phase]['list']
for dev in sorted(list):
pdev = list[dev]['par']
pid = list[dev]['pid']
if(pid < 0 or re.match('[0-9]*-[0-9]*\.[0-9]*[\.0-9]*\:[\.0-9]*$', pdev)):
continue
if pdev and pdev not in real and pdev not in rootlist:
rootlist.append(pdev)
return rootlist
def deviceTopology(self):
rootlist = self.rootDeviceList()
master = self.masterTopology('', rootlist, 0)
return self.printTopology(master)
def selectTimelineDevices(self, widfmt, tTotal, mindevlen):
# only select devices that will actually show up in html
self.tdevlist = dict()
for phase in self.dmesg:
devlist = []
list = self.dmesg[phase]['list']
for dev in list:
length = (list[dev]['end'] - list[dev]['start']) * 1000
width = widfmt % (((list[dev]['end']-list[dev]['start'])*100)/tTotal)
if length >= mindevlen:
devlist.append(dev)
self.tdevlist[phase] = devlist
def addHorizontalDivider(self, devname, devend):
phase = 'suspend_prepare'
self.newAction(phase, devname, -2, '', \
self.start, devend, '', ' sec', '')
if phase not in self.tdevlist:
self.tdevlist[phase] = []
self.tdevlist[phase].append(devname)
d = DevItem(0, phase, self.dmesg[phase]['list'][devname])
return d
def addProcessUsageEvent(self, name, times):
# get the start and end times for this process
maxC = 0
tlast = 0
start = -1
end = -1
for t in sorted(times):
if tlast == 0:
tlast = t
continue
if name in self.pstl[t]:
if start == -1 or tlast < start:
start = tlast
if end == -1 or t > end:
end = t
tlast = t
if start == -1 or end == -1:
return 0
# add a new action for this process and get the object
out = self.newActionGlobal(name, start, end, -3)
if not out:
return 0
phase, devname = out
dev = self.dmesg[phase]['list'][devname]
# get the cpu exec data
tlast = 0
clast = 0
cpuexec = dict()
for t in sorted(times):
if tlast == 0 or t <= start or t > end:
tlast = t
continue
list = self.pstl[t]
c = 0
if name in list:
c = list[name]
if c > maxC:
maxC = c
if c != clast:
key = (tlast, t)
cpuexec[key] = c
tlast = t
clast = c
dev['cpuexec'] = cpuexec
return maxC
def createProcessUsageEvents(self):
# get an array of process names
proclist = []
for t in sorted(self.pstl):
pslist = self.pstl[t]
for ps in sorted(pslist):
if ps not in proclist:
proclist.append(ps)
# get a list of data points for suspend and resume
tsus = []
tres = []
for t in sorted(self.pstl):
if t < self.tSuspended:
tsus.append(t)
else:
tres.append(t)
# process the events for suspend and resume
if len(proclist) > 0:
sysvals.vprint('Process Execution:')
for ps in proclist:
c = self.addProcessUsageEvent(ps, tsus)
if c > 0:
sysvals.vprint('%25s (sus): %d' % (ps, c))
c = self.addProcessUsageEvent(ps, tres)
if c > 0:
sysvals.vprint('%25s (res): %d' % (ps, c))
def handleEndMarker(self, time, msg=''):
dm = self.dmesg
self.setEnd(time, msg)
self.initDevicegroups()
# give suspend_prepare an end if needed
if 'suspend_prepare' in dm and dm['suspend_prepare']['end'] < 0:
dm['suspend_prepare']['end'] = time
# assume resume machine ends at next phase start
if 'resume_machine' in dm and dm['resume_machine']['end'] < 0:
np = self.nextPhase('resume_machine', 1)
if np:
dm['resume_machine']['end'] = dm[np]['start']
# if kernel resume end not found, assume its the end marker
if self.tKernRes == 0.0:
self.tKernRes = time
# if kernel suspend start not found, assume its the end marker
if self.tKernSus == 0.0:
self.tKernSus = time
# set resume complete to end at end marker
if 'resume_complete' in dm:
dm['resume_complete']['end'] = time
def debugPrint(self):
for p in self.sortedPhases():
list = self.dmesg[p]['list']
for devname in sorted(list):
dev = list[devname]
if 'ftrace' in dev:
dev['ftrace'].debugPrint(' [%s]' % devname)
# Class: DevFunction
# Description:
# A container for kprobe function data we want in the dev timeline
class DevFunction:
def __init__(self, name, args, caller, ret, start, end, u, proc, pid, color):
self.row = 0
self.count = 1
self.name = name
self.args = args
self.caller = caller
self.ret = ret
self.time = start
self.length = end - start
self.end = end
self.ubiquitous = u
self.proc = proc
self.pid = pid
self.color = color
def title(self):
cnt = ''
if self.count > 1:
cnt = '(x%d)' % self.count
l = '%0.3fms' % (self.length * 1000)
if self.ubiquitous:
title = '%s(%s)%s <- %s, %s(%s)' % \
(self.name, self.args, cnt, self.caller, self.ret, l)
else:
title = '%s(%s) %s%s(%s)' % (self.name, self.args, self.ret, cnt, l)
return title.replace('"', '')
def text(self):
if self.count > 1:
text = '%s(x%d)' % (self.name, self.count)
else:
text = self.name
return text
def repeat(self, tgt):
# is the tgt call just a repeat of this call (e.g. are we in a loop)
dt = self.time - tgt.end
# only combine calls if -all- attributes are identical
if tgt.caller == self.caller and \
tgt.name == self.name and tgt.args == self.args and \
tgt.proc == self.proc and tgt.pid == self.pid and \
tgt.ret == self.ret and dt >= 0 and \
dt <= sysvals.callloopmaxgap and \
self.length < sysvals.callloopmaxlen:
return True
return False
# Class: FTraceLine
# Description:
# A container for a single line of ftrace data. There are six basic types:
# callgraph line:
# call: " dpm_run_callback() {"
# return: " }"
# leaf: " dpm_run_callback();"
# trace event:
# tracing_mark_write: SUSPEND START or RESUME COMPLETE
# suspend_resume: phase or custom exec block data
# device_pm_callback: device callback info
class FTraceLine:
def __init__(self, t, m='', d=''):
self.length = 0.0
self.fcall = False
self.freturn = False
self.fevent = False
self.fkprobe = False
self.depth = 0
self.name = ''
self.type = ''
self.time = float(t)
if not m and not d:
return
# is this a trace event
if(d == 'traceevent' or re.match('^ *\/\* *(?P<msg>.*) \*\/ *$', m)):
if(d == 'traceevent'):
# nop format trace event
msg = m
else:
# function_graph format trace event
em = re.match('^ *\/\* *(?P<msg>.*) \*\/ *$', m)
msg = em.group('msg')
emm = re.match('^(?P<call>.*?): (?P<msg>.*)', msg)
if(emm):
self.name = emm.group('msg')
self.type = emm.group('call')
else:
self.name = msg
km = re.match('^(?P<n>.*)_cal$', self.type)
if km:
self.fcall = True
self.fkprobe = True
self.type = km.group('n')
return
km = re.match('^(?P<n>.*)_ret$', self.type)
if km:
self.freturn = True
self.fkprobe = True
self.type = km.group('n')
return
self.fevent = True
return
# convert the duration to seconds
if(d):
self.length = float(d)/1000000
# the indentation determines the depth
match = re.match('^(?P<d> *)(?P<o>.*)$', m)
if(not match):
return
self.depth = self.getDepth(match.group('d'))
m = match.group('o')
# function return
if(m[0] == '}'):
self.freturn = True
if(len(m) > 1):
# includes comment with function name
match = re.match('^} *\/\* *(?P<n>.*) *\*\/$', m)
if(match):
self.name = match.group('n').strip()
# function call
else:
self.fcall = True
# function call with children
if(m[-1] == '{'):
match = re.match('^(?P<n>.*) *\(.*', m)
if(match):
self.name = match.group('n').strip()
# function call with no children (leaf)
elif(m[-1] == ';'):
self.freturn = True
match = re.match('^(?P<n>.*) *\(.*', m)
if(match):
self.name = match.group('n').strip()
# something else (possibly a trace marker)
else:
self.name = m
def isCall(self):
return self.fcall and not self.freturn
def isReturn(self):
return self.freturn and not self.fcall
def isLeaf(self):
return self.fcall and self.freturn
def getDepth(self, str):
return len(str)/2
def debugPrint(self, info=''):
if self.isLeaf():
pprint(' -- %12.6f (depth=%02d): %s(); (%.3f us) %s' % (self.time, \
self.depth, self.name, self.length*1000000, info))
elif self.freturn:
pprint(' -- %12.6f (depth=%02d): %s} (%.3f us) %s' % (self.time, \
self.depth, self.name, self.length*1000000, info))
else:
pprint(' -- %12.6f (depth=%02d): %s() { (%.3f us) %s' % (self.time, \
self.depth, self.name, self.length*1000000, info))
def startMarker(self):
# Is this the starting line of a suspend?
if not self.fevent:
return False
if sysvals.usetracemarkers:
if(self.name.startswith('SUSPEND START')):
return True
return False
else:
if(self.type == 'suspend_resume' and
re.match('suspend_enter\[.*\] begin', self.name)):
return True
return False
def endMarker(self):
# Is this the ending line of a resume?
if not self.fevent:
return False
if sysvals.usetracemarkers:
if(self.name.startswith('RESUME COMPLETE')):
return True
return False
else:
if(self.type == 'suspend_resume' and
re.match('thaw_processes\[.*\] end', self.name)):
return True
return False
# Class: FTraceCallGraph
# Description:
# A container for the ftrace callgraph of a single recursive function.
# This can be a dpm_run_callback, dpm_prepare, or dpm_complete callgraph
# Each instance is tied to a single device in a single phase, and is
# comprised of an ordered list of FTraceLine objects
class FTraceCallGraph:
vfname = 'missing_function_name'
def __init__(self, pid, sv):
self.id = ''
self.invalid = False
self.name = ''
self.partial = False
self.ignore = False
self.start = -1.0
self.end = -1.0
self.list = []
self.depth = 0
self.pid = pid
self.sv = sv
def addLine(self, line):
# if this is already invalid, just leave
if(self.invalid):
if(line.depth == 0 and line.freturn):
return 1
return 0
# invalidate on bad depth
if(self.depth < 0):
self.invalidate(line)
return 0
# ignore data til we return to the current depth
if self.ignore:
if line.depth > self.depth:
return 0
else:
self.list[-1].freturn = True
self.list[-1].length = line.time - self.list[-1].time
self.ignore = False
# if this is a return at self.depth, no more work is needed
if line.depth == self.depth and line.isReturn():
if line.depth == 0:
self.end = line.time
return 1
return 0
# compare current depth with this lines pre-call depth
prelinedep = line.depth
if line.isReturn():
prelinedep += 1
last = 0
lasttime = line.time
if len(self.list) > 0:
last = self.list[-1]
lasttime = last.time
if last.isLeaf():
lasttime += last.length
# handle low misalignments by inserting returns
mismatch = prelinedep - self.depth
warning = self.sv.verbose and abs(mismatch) > 1
info = []
if mismatch < 0:
idx = 0
# add return calls to get the depth down
while prelinedep < self.depth:
self.depth -= 1
if idx == 0 and last and last.isCall():
# special case, turn last call into a leaf
last.depth = self.depth
last.freturn = True
last.length = line.time - last.time
if warning:
info.append(('[make leaf]', last))
else:
vline = FTraceLine(lasttime)
vline.depth = self.depth
vline.name = self.vfname
vline.freturn = True
self.list.append(vline)
if warning:
if idx == 0:
info.append(('', last))
info.append(('[add return]', vline))
idx += 1
if warning:
info.append(('', line))
# handle high misalignments by inserting calls
elif mismatch > 0:
idx = 0
if warning:
info.append(('', last))
# add calls to get the depth up
while prelinedep > self.depth:
if idx == 0 and line.isReturn():
# special case, turn this return into a leaf
line.fcall = True
prelinedep -= 1
if warning:
info.append(('[make leaf]', line))
else:
vline = FTraceLine(lasttime)
vline.depth = self.depth
vline.name = self.vfname
vline.fcall = True
self.list.append(vline)
self.depth += 1
if not last:
self.start = vline.time
if warning:
info.append(('[add call]', vline))
idx += 1
if warning and ('[make leaf]', line) not in info:
info.append(('', line))
if warning:
pprint('WARNING: ftrace data missing, corrections made:')
for i in info:
t, obj = i
if obj:
obj.debugPrint(t)
# process the call and set the new depth
skipadd = False
md = self.sv.max_graph_depth
if line.isCall():
# ignore blacklisted/overdepth funcs
if (md and self.depth >= md - 1) or (line.name in self.sv.cgblacklist):
self.ignore = True
else:
self.depth += 1
elif line.isReturn():
self.depth -= 1
# remove blacklisted/overdepth/empty funcs that slipped through
if (last and last.isCall() and last.depth == line.depth) or \
(md and last and last.depth >= md) or \
(line.name in self.sv.cgblacklist):
while len(self.list) > 0 and self.list[-1].depth > line.depth:
self.list.pop(-1)
if len(self.list) == 0:
self.invalid = True
return 1
self.list[-1].freturn = True
self.list[-1].length = line.time - self.list[-1].time
self.list[-1].name = line.name
skipadd = True
if len(self.list) < 1:
self.start = line.time
# check for a mismatch that returned all the way to callgraph end
res = 1
if mismatch < 0 and self.list[-1].depth == 0 and self.list[-1].freturn:
line = self.list[-1]
skipadd = True
res = -1
if not skipadd:
self.list.append(line)
if(line.depth == 0 and line.freturn):
if(self.start < 0):
self.start = line.time
self.end = line.time
if line.fcall:
self.end += line.length
if self.list[0].name == self.vfname:
self.invalid = True
if res == -1:
self.partial = True
return res
return 0
def invalidate(self, line):
if(len(self.list) > 0):
first = self.list[0]
self.list = []
self.list.append(first)
self.invalid = True
id = 'task %s' % (self.pid)
window = '(%f - %f)' % (self.start, line.time)
if(self.depth < 0):
pprint('Data misalignment for '+id+\
' (buffer overflow), ignoring this callback')
else:
pprint('Too much data for '+id+\
' '+window+', ignoring this callback')
def slice(self, dev):
minicg = FTraceCallGraph(dev['pid'], self.sv)
minicg.name = self.name
mydepth = -1
good = False
for l in self.list:
if(l.time < dev['start'] or l.time > dev['end']):
continue
if mydepth < 0:
if l.name == 'mutex_lock' and l.freturn:
mydepth = l.depth
continue
elif l.depth == mydepth and l.name == 'mutex_unlock' and l.fcall:
good = True
break
l.depth -= mydepth
minicg.addLine(l)
if not good or len(minicg.list) < 1:
return 0
return minicg
def repair(self, enddepth):
# bring the depth back to 0 with additional returns
fixed = False
last = self.list[-1]
for i in reversed(range(enddepth)):
t = FTraceLine(last.time)
t.depth = i
t.freturn = True
fixed = self.addLine(t)
if fixed != 0:
self.end = last.time
return True
return False
def postProcess(self):
if len(self.list) > 0:
self.name = self.list[0].name
stack = dict()
cnt = 0
last = 0
for l in self.list:
# ftrace bug: reported duration is not reliable
# check each leaf and clip it at max possible length
if last and last.isLeaf():
if last.length > l.time - last.time:
last.length = l.time - last.time
if l.isCall():
stack[l.depth] = l
cnt += 1
elif l.isReturn():
if(l.depth not in stack):
if self.sv.verbose:
pprint('Post Process Error: Depth missing')
l.debugPrint()
return False
# calculate call length from call/return lines
cl = stack[l.depth]
cl.length = l.time - cl.time
if cl.name == self.vfname:
cl.name = l.name
stack.pop(l.depth)
l.length = 0
cnt -= 1
last = l
if(cnt == 0):
# trace caught the whole call tree
return True
elif(cnt < 0):
if self.sv.verbose:
pprint('Post Process Error: Depth is less than 0')
return False
# trace ended before call tree finished
return self.repair(cnt)
def deviceMatch(self, pid, data):
found = ''
# add the callgraph data to the device hierarchy
borderphase = {
'dpm_prepare': 'suspend_prepare',
'dpm_complete': 'resume_complete'
}
if(self.name in borderphase):
p = borderphase[self.name]
list = data.dmesg[p]['list']
for devname in list:
dev = list[devname]
if(pid == dev['pid'] and
self.start <= dev['start'] and
self.end >= dev['end']):
cg = self.slice(dev)
if cg:
dev['ftrace'] = cg
found = devname
return found
for p in data.sortedPhases():
if(data.dmesg[p]['start'] <= self.start and
self.start <= data.dmesg[p]['end']):
list = data.dmesg[p]['list']
for devname in sorted(list, key=lambda k:list[k]['start']):
dev = list[devname]
if(pid == dev['pid'] and
self.start <= dev['start'] and
self.end >= dev['end']):
dev['ftrace'] = self
found = devname
break
break
return found
def newActionFromFunction(self, data):
name = self.name
if name in ['dpm_run_callback', 'dpm_prepare', 'dpm_complete']:
return
fs = self.start
fe = self.end
if fs < data.start or fe > data.end:
return
phase = ''
for p in data.sortedPhases():
if(data.dmesg[p]['start'] <= self.start and
self.start < data.dmesg[p]['end']):
phase = p
break
if not phase:
return
out = data.newActionGlobal(name, fs, fe, -2)
if out:
phase, myname = out
data.dmesg[phase]['list'][myname]['ftrace'] = self
def debugPrint(self, info=''):
pprint('%s pid=%d [%f - %f] %.3f us' % \
(self.name, self.pid, self.start, self.end,
(self.end - self.start)*1000000))
for l in self.list:
if l.isLeaf():
pprint('%f (%02d): %s(); (%.3f us)%s' % (l.time, \
l.depth, l.name, l.length*1000000, info))
elif l.freturn:
pprint('%f (%02d): %s} (%.3f us)%s' % (l.time, \
l.depth, l.name, l.length*1000000, info))
else:
pprint('%f (%02d): %s() { (%.3f us)%s' % (l.time, \
l.depth, l.name, l.length*1000000, info))
pprint(' ')
class DevItem:
def __init__(self, test, phase, dev):
self.test = test
self.phase = phase
self.dev = dev
def isa(self, cls):
if 'htmlclass' in self.dev and cls in self.dev['htmlclass']:
return True
return False
# Class: Timeline
# Description:
# A container for a device timeline which calculates
# all the html properties to display it correctly
class Timeline:
html_tblock = '<div id="block{0}" class="tblock" style="left:{1}%;width:{2}%;"><div class="tback" style="height:{3}px"></div>\n'
html_device = '<div id="{0}" title="{1}" class="thread{7}" style="left:{2}%;top:{3}px;height:{4}px;width:{5}%;{8}">{6}</div>\n'
html_phase = '<div class="phase" style="left:{0}%;width:{1}%;top:{2}px;height:{3}px;background:{4}">{5}</div>\n'
html_phaselet = '<div id="{0}" class="phaselet" style="left:{1}%;width:{2}%;background:{3}"></div>\n'
html_legend = '<div id="p{3}" class="square" style="left:{0}%;background:{1}"> {2}</div>\n'
def __init__(self, rowheight, scaleheight):
self.html = ''
self.height = 0 # total timeline height
self.scaleH = scaleheight # timescale (top) row height
self.rowH = rowheight # device row height
self.bodyH = 0 # body height
self.rows = 0 # total timeline rows
self.rowlines = dict()
self.rowheight = dict()
def createHeader(self, sv, stamp):
if(not stamp['time']):
return
self.html += '<div class="version"><a href="https://01.org/pm-graph">%s v%s</a></div>' \
% (sv.title, sv.version)
if sv.logmsg and sv.testlog:
self.html += '<button id="showtest" class="logbtn btnfmt">log</button>'
if sv.dmesglog:
self.html += '<button id="showdmesg" class="logbtn btnfmt">dmesg</button>'
if sv.ftracelog:
self.html += '<button id="showftrace" class="logbtn btnfmt">ftrace</button>'
headline_stamp = '<div class="stamp">{0} {1} {2} {3}</div>\n'
self.html += headline_stamp.format(stamp['host'], stamp['kernel'],
stamp['mode'], stamp['time'])
if 'man' in stamp and 'plat' in stamp and 'cpu' in stamp and \
stamp['man'] and stamp['plat'] and stamp['cpu']:
headline_sysinfo = '<div class="stamp sysinfo">{0} {1} <i>with</i> {2}</div>\n'
self.html += headline_sysinfo.format(stamp['man'], stamp['plat'], stamp['cpu'])
# Function: getDeviceRows
# Description:
# determine how may rows the device funcs will take
# Arguments:
# rawlist: the list of devices/actions for a single phase
# Output:
# The total number of rows needed to display this phase of the timeline
def getDeviceRows(self, rawlist):
# clear all rows and set them to undefined
sortdict = dict()
for item in rawlist:
item.row = -1
sortdict[item] = item.length
sortlist = sorted(sortdict, key=sortdict.get, reverse=True)
remaining = len(sortlist)
rowdata = dict()
row = 1
# try to pack each row with as many ranges as possible
while(remaining > 0):
if(row not in rowdata):
rowdata[row] = []
for i in sortlist:
if(i.row >= 0):
continue
s = i.time
e = i.time + i.length
valid = True
for ritem in rowdata[row]:
rs = ritem.time
re = ritem.time + ritem.length
if(not (((s <= rs) and (e <= rs)) or
((s >= re) and (e >= re)))):
valid = False
break
if(valid):
rowdata[row].append(i)
i.row = row
remaining -= 1
row += 1
return row
# Function: getPhaseRows
# Description:
# Organize the timeline entries into the smallest
# number of rows possible, with no entry overlapping
# Arguments:
# devlist: the list of devices/actions in a group of contiguous phases
# Output:
# The total number of rows needed to display this phase of the timeline
def getPhaseRows(self, devlist, row=0, sortby='length'):
# clear all rows and set them to undefined
remaining = len(devlist)
rowdata = dict()
sortdict = dict()
myphases = []
# initialize all device rows to -1 and calculate devrows
for item in devlist:
dev = item.dev
tp = (item.test, item.phase)
if tp not in myphases:
myphases.append(tp)
dev['row'] = -1
if sortby == 'start':
# sort by start 1st, then length 2nd
sortdict[item] = (-1*float(dev['start']), float(dev['end']) - float(dev['start']))
else:
# sort by length 1st, then name 2nd
sortdict[item] = (float(dev['end']) - float(dev['start']), item.dev['name'])
if 'src' in dev:
dev['devrows'] = self.getDeviceRows(dev['src'])
# sort the devlist by length so that large items graph on top
sortlist = sorted(sortdict, key=sortdict.get, reverse=True)
orderedlist = []
for item in sortlist:
if item.dev['pid'] == -2:
orderedlist.append(item)
for item in sortlist:
if item not in orderedlist:
orderedlist.append(item)
# try to pack each row with as many devices as possible
while(remaining > 0):
rowheight = 1
if(row not in rowdata):
rowdata[row] = []
for item in orderedlist:
dev = item.dev
if(dev['row'] < 0):
s = dev['start']
e = dev['end']
valid = True
for ritem in rowdata[row]:
rs = ritem.dev['start']
re = ritem.dev['end']
if(not (((s <= rs) and (e <= rs)) or
((s >= re) and (e >= re)))):
valid = False
break
if(valid):
rowdata[row].append(item)
dev['row'] = row
remaining -= 1
if 'devrows' in dev and dev['devrows'] > rowheight:
rowheight = dev['devrows']
for t, p in myphases:
if t not in self.rowlines or t not in self.rowheight:
self.rowlines[t] = dict()
self.rowheight[t] = dict()
if p not in self.rowlines[t] or p not in self.rowheight[t]:
self.rowlines[t][p] = dict()
self.rowheight[t][p] = dict()
rh = self.rowH
# section headers should use a different row height
if len(rowdata[row]) == 1 and \
'htmlclass' in rowdata[row][0].dev and \
'sec' in rowdata[row][0].dev['htmlclass']:
rh = 15
self.rowlines[t][p][row] = rowheight
self.rowheight[t][p][row] = rowheight * rh
row += 1
if(row > self.rows):
self.rows = int(row)
return row
def phaseRowHeight(self, test, phase, row):
return self.rowheight[test][phase][row]
def phaseRowTop(self, test, phase, row):
top = 0
for i in sorted(self.rowheight[test][phase]):
if i >= row:
break
top += self.rowheight[test][phase][i]
return top
def calcTotalRows(self):
# Calculate the heights and offsets for the header and rows
maxrows = 0
standardphases = []
for t in self.rowlines:
for p in self.rowlines[t]:
total = 0
for i in sorted(self.rowlines[t][p]):
total += self.rowlines[t][p][i]
if total > maxrows:
maxrows = total
if total == len(self.rowlines[t][p]):
standardphases.append((t, p))
self.height = self.scaleH + (maxrows*self.rowH)
self.bodyH = self.height - self.scaleH
# if there is 1 line per row, draw them the standard way
for t, p in standardphases:
for i in sorted(self.rowheight[t][p]):
self.rowheight[t][p][i] = float(self.bodyH)/len(self.rowlines[t][p])
def createZoomBox(self, mode='command', testcount=1):
# Create bounding box, add buttons
html_zoombox = '<center><button id="zoomin">ZOOM IN +</button><button id="zoomout">ZOOM OUT -</button><button id="zoomdef">ZOOM 1:1</button></center>\n'
html_timeline = '<div id="dmesgzoombox" class="zoombox">\n<div id="{0}" class="timeline" style="height:{1}px">\n'
html_devlist1 = '<button id="devlist1" class="devlist" style="float:left;">Device Detail{0}</button>'
html_devlist2 = '<button id="devlist2" class="devlist" style="float:right;">Device Detail2</button>\n'
if mode != 'command':
if testcount > 1:
self.html += html_devlist2
self.html += html_devlist1.format('1')
else:
self.html += html_devlist1.format('')
self.html += html_zoombox
self.html += html_timeline.format('dmesg', self.height)
# Function: createTimeScale
# Description:
# Create the timescale for a timeline block
# Arguments:
# m0: start time (mode begin)
# mMax: end time (mode end)
# tTotal: total timeline time
# mode: suspend or resume
# Output:
# The html code needed to display the time scale
def createTimeScale(self, m0, mMax, tTotal, mode):
timescale = '<div class="t" style="right:{0}%">{1}</div>\n'
rline = '<div class="t" style="left:0;border-left:1px solid black;border-right:0;">{0}</div>\n'
output = '<div class="timescale">\n'
# set scale for timeline
mTotal = mMax - m0
tS = 0.1
if(tTotal <= 0):
return output+'</div>\n'
if(tTotal > 4):
tS = 1
divTotal = int(mTotal/tS) + 1
divEdge = (mTotal - tS*(divTotal-1))*100/mTotal
for i in range(divTotal):
htmlline = ''
if(mode == 'suspend'):
pos = '%0.3f' % (100 - ((float(i)*tS*100)/mTotal) - divEdge)
val = '%0.fms' % (float(i-divTotal+1)*tS*1000)
if(i == divTotal - 1):
val = mode
htmlline = timescale.format(pos, val)
else:
pos = '%0.3f' % (100 - ((float(i)*tS*100)/mTotal))
val = '%0.fms' % (float(i)*tS*1000)
htmlline = timescale.format(pos, val)
if(i == 0):
htmlline = rline.format(mode)
output += htmlline
self.html += output+'</div>\n'
# Class: TestProps
# Description:
# A list of values describing the properties of these test runs
class TestProps:
stampfmt = '# [a-z]*-(?P<m>[0-9]{2})(?P<d>[0-9]{2})(?P<y>[0-9]{2})-'+\
'(?P<H>[0-9]{2})(?P<M>[0-9]{2})(?P<S>[0-9]{2})'+\
' (?P<host>.*) (?P<mode>.*) (?P<kernel>.*)$'
wififmt = '^# wifi *(?P<d>\S*) *(?P<s>\S*) *(?P<t>[0-9\.]+).*'
tstatfmt = '^# turbostat (?P<t>\S*)'
testerrfmt = '^# enter_sleep_error (?P<e>.*)'
sysinfofmt = '^# sysinfo .*'
cmdlinefmt = '^# command \| (?P<cmd>.*)'
kparamsfmt = '^# kparams \| (?P<kp>.*)'
devpropfmt = '# Device Properties: .*'
pinfofmt = '# platform-(?P<val>[a-z,A-Z,0-9]*): (?P<info>.*)'
tracertypefmt = '# tracer: (?P<t>.*)'
firmwarefmt = '# fwsuspend (?P<s>[0-9]*) fwresume (?P<r>[0-9]*)$'
procexecfmt = 'ps - (?P<ps>.*)$'
ftrace_line_fmt_fg = \
'^ *(?P<time>[0-9\.]*) *\| *(?P<cpu>[0-9]*)\)'+\
' *(?P<proc>.*)-(?P<pid>[0-9]*) *\|'+\
'[ +!#\*@$]*(?P<dur>[0-9\.]*) .*\| (?P<msg>.*)'
ftrace_line_fmt_nop = \
' *(?P<proc>.*)-(?P<pid>[0-9]*) *\[(?P<cpu>[0-9]*)\] *'+\
'(?P<flags>\S*) *(?P<time>[0-9\.]*): *'+\
'(?P<msg>.*)'
machinesuspend = 'machine_suspend\[.*'
def __init__(self):
self.stamp = ''
self.sysinfo = ''
self.cmdline = ''
self.testerror = []
self.turbostat = []
self.wifi = []
self.fwdata = []
self.ftrace_line_fmt = self.ftrace_line_fmt_nop
self.cgformat = False
self.data = 0
self.ktemp = dict()
def setTracerType(self, tracer):
if(tracer == 'function_graph'):
self.cgformat = True
self.ftrace_line_fmt = self.ftrace_line_fmt_fg
elif(tracer == 'nop'):
self.ftrace_line_fmt = self.ftrace_line_fmt_nop
else:
doError('Invalid tracer format: [%s]' % tracer)
def stampInfo(self, line, sv):
if re.match(self.stampfmt, line):
self.stamp = line
return True
elif re.match(self.sysinfofmt, line):
self.sysinfo = line
return True
elif re.match(self.tstatfmt, line):
self.turbostat.append(line)
return True
elif re.match(self.wififmt, line):
self.wifi.append(line)
return True
elif re.match(self.testerrfmt, line):
self.testerror.append(line)
return True
elif re.match(self.firmwarefmt, line):
self.fwdata.append(line)
return True
elif(re.match(self.devpropfmt, line)):
self.parseDevprops(line, sv)
return True
elif(re.match(self.pinfofmt, line)):
self.parsePlatformInfo(line, sv)
return True
m = re.match(self.cmdlinefmt, line)
if m:
self.cmdline = m.group('cmd')
return True
m = re.match(self.tracertypefmt, line)
if(m):
self.setTracerType(m.group('t'))
return True
return False
def parseStamp(self, data, sv):
# global test data
m = re.match(self.stampfmt, self.stamp)
if not self.stamp or not m:
doError('data does not include the expected stamp')
data.stamp = {'time': '', 'host': '', 'mode': ''}
dt = datetime(int(m.group('y'))+2000, int(m.group('m')),
int(m.group('d')), int(m.group('H')), int(m.group('M')),
int(m.group('S')))
data.stamp['time'] = dt.strftime('%B %d %Y, %I:%M:%S %p')
data.stamp['host'] = m.group('host')
data.stamp['mode'] = m.group('mode')
data.stamp['kernel'] = m.group('kernel')
if re.match(self.sysinfofmt, self.sysinfo):
for f in self.sysinfo.split('|'):
if '#' in f:
continue
tmp = f.strip().split(':', 1)
key = tmp[0]
val = tmp[1]
data.stamp[key] = val
sv.hostname = data.stamp['host']
sv.suspendmode = data.stamp['mode']
if sv.suspendmode == 'freeze':
self.machinesuspend = 'timekeeping_freeze\[.*'
else:
self.machinesuspend = 'machine_suspend\[.*'
if sv.suspendmode == 'command' and sv.ftracefile != '':
modes = ['on', 'freeze', 'standby', 'mem', 'disk']
fp = sv.openlog(sv.ftracefile, 'r')
for line in fp:
m = re.match('.* machine_suspend\[(?P<mode>.*)\]', line)
if m and m.group('mode') in ['1', '2', '3', '4']:
sv.suspendmode = modes[int(m.group('mode'))]
data.stamp['mode'] = sv.suspendmode
break
fp.close()
sv.cmdline = self.cmdline
if not sv.stamp:
sv.stamp = data.stamp
# firmware data
if sv.suspendmode == 'mem' and len(self.fwdata) > data.testnumber:
m = re.match(self.firmwarefmt, self.fwdata[data.testnumber])
if m:
data.fwSuspend, data.fwResume = int(m.group('s')), int(m.group('r'))
if(data.fwSuspend > 0 or data.fwResume > 0):
data.fwValid = True
# turbostat data
if len(self.turbostat) > data.testnumber:
m = re.match(self.tstatfmt, self.turbostat[data.testnumber])
if m:
data.turbostat = m.group('t')
# wifi data
if len(self.wifi) > data.testnumber:
m = re.match(self.wififmt, self.wifi[data.testnumber])
if m:
data.wifi = {'dev': m.group('d'), 'stat': m.group('s'),
'time': float(m.group('t'))}
data.stamp['wifi'] = m.group('d')
# sleep mode enter errors
if len(self.testerror) > data.testnumber:
m = re.match(self.testerrfmt, self.testerror[data.testnumber])
if m:
data.enterfail = m.group('e')
def devprops(self, data):
props = dict()
devlist = data.split(';')
for dev in devlist:
f = dev.split(',')
if len(f) < 3:
continue
dev = f[0]
props[dev] = DevProps()
props[dev].altname = f[1]
if int(f[2]):
props[dev].isasync = True
else:
props[dev].isasync = False
return props
def parseDevprops(self, line, sv):
idx = line.index(': ') + 2
if idx >= len(line):
return
props = self.devprops(line[idx:])
if sv.suspendmode == 'command' and 'testcommandstring' in props:
sv.testcommand = props['testcommandstring'].altname
sv.devprops = props
def parsePlatformInfo(self, line, sv):
m = re.match(self.pinfofmt, line)
if not m:
return
name, info = m.group('val'), m.group('info')
if name == 'devinfo':
sv.devprops = self.devprops(sv.b64unzip(info))
return
elif name == 'testcmd':
sv.testcommand = info
return
field = info.split('|')
if len(field) < 2:
return
cmdline = field[0].strip()
output = sv.b64unzip(field[1].strip())
sv.platinfo.append([name, cmdline, output])
# Class: TestRun
# Description:
# A container for a suspend/resume test run. This is necessary as
# there could be more than one, and they need to be separate.
class TestRun:
def __init__(self, dataobj):
self.data = dataobj
self.ftemp = dict()
self.ttemp = dict()
class ProcessMonitor:
def __init__(self):
self.proclist = dict()
self.running = False
def procstat(self):
c = ['cat /proc/[1-9]*/stat 2>/dev/null']
process = Popen(c, shell=True, stdout=PIPE)
running = dict()
for line in process.stdout:
data = ascii(line).split()
pid = data[0]
name = re.sub('[()]', '', data[1])
user = int(data[13])
kern = int(data[14])
kjiff = ujiff = 0
if pid not in self.proclist:
self.proclist[pid] = {'name' : name, 'user' : user, 'kern' : kern}
else:
val = self.proclist[pid]
ujiff = user - val['user']
kjiff = kern - val['kern']
val['user'] = user
val['kern'] = kern
if ujiff > 0 or kjiff > 0:
running[pid] = ujiff + kjiff
process.wait()
out = ''
for pid in running:
jiffies = running[pid]
val = self.proclist[pid]
if out:
out += ','
out += '%s-%s %d' % (val['name'], pid, jiffies)
return 'ps - '+out
def processMonitor(self, tid):
while self.running:
out = self.procstat()
if out:
sysvals.fsetVal(out, 'trace_marker')
def start(self):
self.thread = Thread(target=self.processMonitor, args=(0,))
self.running = True
self.thread.start()
def stop(self):
self.running = False
# ----------------- FUNCTIONS --------------------
# Function: doesTraceLogHaveTraceEvents
# Description:
# Quickly determine if the ftrace log has all of the trace events,
# markers, and/or kprobes required for primary parsing.
def doesTraceLogHaveTraceEvents():
kpcheck = ['_cal: (', '_ret: (']
techeck = ['suspend_resume', 'device_pm_callback']
tmcheck = ['SUSPEND START', 'RESUME COMPLETE']
sysvals.usekprobes = False
fp = sysvals.openlog(sysvals.ftracefile, 'r')
for line in fp:
# check for kprobes
if not sysvals.usekprobes:
for i in kpcheck:
if i in line:
sysvals.usekprobes = True
# check for all necessary trace events
check = techeck[:]
for i in techeck:
if i in line:
check.remove(i)
techeck = check
# check for all necessary trace markers
check = tmcheck[:]
for i in tmcheck:
if i in line:
check.remove(i)
tmcheck = check
fp.close()
sysvals.usetraceevents = True if len(techeck) < 2 else False
sysvals.usetracemarkers = True if len(tmcheck) == 0 else False
# Function: appendIncompleteTraceLog
# Description:
# [deprecated for kernel 3.15 or newer]
# Adds callgraph data which lacks trace event data. This is only
# for timelines generated from 3.15 or older
# Arguments:
# testruns: the array of Data objects obtained from parseKernelLog
def appendIncompleteTraceLog(testruns):
# create TestRun vessels for ftrace parsing
testcnt = len(testruns)
testidx = 0
testrun = []
for data in testruns:
testrun.append(TestRun(data))
# extract the callgraph and traceevent data
sysvals.vprint('Analyzing the ftrace data (%s)...' % \
os.path.basename(sysvals.ftracefile))
tp = TestProps()
tf = sysvals.openlog(sysvals.ftracefile, 'r')
data = 0
for line in tf:
# remove any latent carriage returns
line = line.replace('\r\n', '')
if tp.stampInfo(line, sysvals):
continue
# parse only valid lines, if this is not one move on
m = re.match(tp.ftrace_line_fmt, line)
if(not m):
continue
# gather the basic message data from the line
m_time = m.group('time')
m_pid = m.group('pid')
m_msg = m.group('msg')
if(tp.cgformat):
m_param3 = m.group('dur')
else:
m_param3 = 'traceevent'
if(m_time and m_pid and m_msg):
t = FTraceLine(m_time, m_msg, m_param3)
pid = int(m_pid)
else:
continue
# the line should be a call, return, or event
if(not t.fcall and not t.freturn and not t.fevent):
continue
# look for the suspend start marker
if(t.startMarker()):
data = testrun[testidx].data
tp.parseStamp(data, sysvals)
data.setStart(t.time, t.name)
continue
if(not data):
continue
# find the end of resume
if(t.endMarker()):
data.setEnd(t.time, t.name)
testidx += 1
if(testidx >= testcnt):
break
continue
# trace event processing
if(t.fevent):
continue
# call/return processing
elif sysvals.usecallgraph:
# create a callgraph object for the data
if(pid not in testrun[testidx].ftemp):
testrun[testidx].ftemp[pid] = []
testrun[testidx].ftemp[pid].append(FTraceCallGraph(pid, sysvals))
# when the call is finished, see which device matches it
cg = testrun[testidx].ftemp[pid][-1]
res = cg.addLine(t)
if(res != 0):
testrun[testidx].ftemp[pid].append(FTraceCallGraph(pid, sysvals))
if(res == -1):
testrun[testidx].ftemp[pid][-1].addLine(t)
tf.close()
for test in testrun:
# add the callgraph data to the device hierarchy
for pid in test.ftemp:
for cg in test.ftemp[pid]:
if len(cg.list) < 1 or cg.invalid or (cg.end - cg.start == 0):
continue
if(not cg.postProcess()):
id = 'task %s cpu %s' % (pid, m.group('cpu'))
sysvals.vprint('Sanity check failed for '+\
id+', ignoring this callback')
continue
callstart = cg.start
callend = cg.end
for p in test.data.sortedPhases():
if(test.data.dmesg[p]['start'] <= callstart and
callstart <= test.data.dmesg[p]['end']):
list = test.data.dmesg[p]['list']
for devname in list:
dev = list[devname]
if(pid == dev['pid'] and
callstart <= dev['start'] and
callend >= dev['end']):
dev['ftrace'] = cg
break
# Function: parseTraceLog
# Description:
# Analyze an ftrace log output file generated from this app during
# the execution phase. Used when the ftrace log is the primary data source
# and includes the suspend_resume and device_pm_callback trace events
# The ftrace filename is taken from sysvals
# Output:
# An array of Data objects
def parseTraceLog(live=False):
sysvals.vprint('Analyzing the ftrace data (%s)...' % \
os.path.basename(sysvals.ftracefile))
if(os.path.exists(sysvals.ftracefile) == False):
doError('%s does not exist' % sysvals.ftracefile)
if not live:
sysvals.setupAllKprobes()
ksuscalls = ['ksys_sync', 'pm_prepare_console']
krescalls = ['pm_restore_console']
tracewatch = ['irq_wakeup']
if sysvals.usekprobes:
tracewatch += ['sync_filesystems', 'freeze_processes', 'syscore_suspend',
'syscore_resume', 'resume_console', 'thaw_processes', 'CPU_ON',
'CPU_OFF', 'acpi_suspend']
# extract the callgraph and traceevent data
s2idle_enter = hwsus = False
tp = TestProps()
testruns, testdata = [], []
testrun, data, limbo = 0, 0, True
tf = sysvals.openlog(sysvals.ftracefile, 'r')
phase = 'suspend_prepare'
for line in tf:
# remove any latent carriage returns
line = line.replace('\r\n', '')
if tp.stampInfo(line, sysvals):
continue
# ignore all other commented lines
if line[0] == '#':
continue
# ftrace line: parse only valid lines
m = re.match(tp.ftrace_line_fmt, line)
if(not m):
continue
# gather the basic message data from the line
m_time = m.group('time')
m_proc = m.group('proc')
m_pid = m.group('pid')
m_msg = m.group('msg')
if(tp.cgformat):
m_param3 = m.group('dur')
else:
m_param3 = 'traceevent'
if(m_time and m_pid and m_msg):
t = FTraceLine(m_time, m_msg, m_param3)
pid = int(m_pid)
else:
continue
# the line should be a call, return, or event
if(not t.fcall and not t.freturn and not t.fevent):
continue
# find the start of suspend
if(t.startMarker()):
data, limbo = Data(len(testdata)), False
testdata.append(data)
testrun = TestRun(data)
testruns.append(testrun)
tp.parseStamp(data, sysvals)
data.setStart(t.time, t.name)
data.first_suspend_prepare = True
phase = data.setPhase('suspend_prepare', t.time, True)
continue
if(not data or limbo):
continue
# process cpu exec line
if t.type == 'tracing_mark_write':
m = re.match(tp.procexecfmt, t.name)
if(m):
proclist = dict()
for ps in m.group('ps').split(','):
val = ps.split()
if not val:
continue
name = val[0].replace('--', '-')
proclist[name] = int(val[1])
data.pstl[t.time] = proclist
continue
# find the end of resume
if(t.endMarker()):
if data.tKernRes == 0:
data.tKernRes = t.time
data.handleEndMarker(t.time, t.name)
if(not sysvals.usetracemarkers):
# no trace markers? then quit and be sure to finish recording
# the event we used to trigger resume end
if('thaw_processes' in testrun.ttemp and len(testrun.ttemp['thaw_processes']) > 0):
# if an entry exists, assume this is its end
testrun.ttemp['thaw_processes'][-1]['end'] = t.time
limbo = True
continue
# trace event processing
if(t.fevent):
if(t.type == 'suspend_resume'):
# suspend_resume trace events have two types, begin and end
if(re.match('(?P<name>.*) begin$', t.name)):
isbegin = True
elif(re.match('(?P<name>.*) end$', t.name)):
isbegin = False
else:
continue
if '[' in t.name:
m = re.match('(?P<name>.*)\[.*', t.name)
else:
m = re.match('(?P<name>.*) .*', t.name)
name = m.group('name')
# ignore these events
if(name.split('[')[0] in tracewatch):
continue
# -- phase changes --
# start of kernel suspend
if(re.match('suspend_enter\[.*', t.name)):
if(isbegin and data.tKernSus == 0):
data.tKernSus = t.time
continue
# suspend_prepare start
elif(re.match('dpm_prepare\[.*', t.name)):
if isbegin and data.first_suspend_prepare:
data.first_suspend_prepare = False
if data.tKernSus == 0:
data.tKernSus = t.time
continue
phase = data.setPhase('suspend_prepare', t.time, isbegin)
continue
# suspend start
elif(re.match('dpm_suspend\[.*', t.name)):
phase = data.setPhase('suspend', t.time, isbegin)
continue
# suspend_late start
elif(re.match('dpm_suspend_late\[.*', t.name)):
phase = data.setPhase('suspend_late', t.time, isbegin)
continue
# suspend_noirq start
elif(re.match('dpm_suspend_noirq\[.*', t.name)):
phase = data.setPhase('suspend_noirq', t.time, isbegin)
continue
# suspend_machine/resume_machine
elif(re.match(tp.machinesuspend, t.name)):
lp = data.lastPhase()
if(isbegin):
hwsus = True
if lp.startswith('resume_machine'):
# trim out s2idle loops, track time trying to freeze
llp = data.lastPhase(2)
if llp.startswith('suspend_machine'):
if 'waking' not in data.dmesg[llp]:
data.dmesg[llp]['waking'] = [0, 0.0]
data.dmesg[llp]['waking'][0] += 1
data.dmesg[llp]['waking'][1] += \
t.time - data.dmesg[lp]['start']
data.currphase = ''
del data.dmesg[lp]
continue
phase = data.setPhase('suspend_machine', data.dmesg[lp]['end'], True)
data.setPhase(phase, t.time, False)
if data.tSuspended == 0:
data.tSuspended = t.time
else:
if lp.startswith('resume_machine'):
data.dmesg[lp]['end'] = t.time
continue
phase = data.setPhase('resume_machine', t.time, True)
if(sysvals.suspendmode in ['mem', 'disk']):
susp = phase.replace('resume', 'suspend')
if susp in data.dmesg:
data.dmesg[susp]['end'] = t.time
data.tSuspended = t.time
data.tResumed = t.time
continue
# resume_noirq start
elif(re.match('dpm_resume_noirq\[.*', t.name)):
phase = data.setPhase('resume_noirq', t.time, isbegin)
continue
# resume_early start
elif(re.match('dpm_resume_early\[.*', t.name)):
phase = data.setPhase('resume_early', t.time, isbegin)
continue
# resume start
elif(re.match('dpm_resume\[.*', t.name)):
phase = data.setPhase('resume', t.time, isbegin)
continue
# resume complete start
elif(re.match('dpm_complete\[.*', t.name)):
phase = data.setPhase('resume_complete', t.time, isbegin)
continue
# skip trace events inside devices calls
if(not data.isTraceEventOutsideDeviceCalls(pid, t.time)):
continue
# global events (outside device calls) are graphed
if(name not in testrun.ttemp):
testrun.ttemp[name] = []
# special handling for s2idle_enter
if name == 'machine_suspend':
if hwsus:
s2idle_enter = hwsus = False
elif s2idle_enter and not isbegin:
if(len(testrun.ttemp[name]) > 0):
testrun.ttemp[name][-1]['end'] = t.time
testrun.ttemp[name][-1]['loop'] += 1
elif not s2idle_enter and isbegin:
s2idle_enter = True
testrun.ttemp[name].append({'begin': t.time,
'end': t.time, 'pid': pid, 'loop': 0})
continue
if(isbegin):
# create a new list entry
testrun.ttemp[name].append(\
{'begin': t.time, 'end': t.time, 'pid': pid})
else:
if(len(testrun.ttemp[name]) > 0):
# if an entry exists, assume this is its end
testrun.ttemp[name][-1]['end'] = t.time
# device callback start
elif(t.type == 'device_pm_callback_start'):
if phase not in data.dmesg:
continue
m = re.match('(?P<drv>.*) (?P<d>.*), parent: *(?P<p>.*), .*',\
t.name);
if(not m):
continue
drv = m.group('drv')
n = m.group('d')
p = m.group('p')
if(n and p):
data.newAction(phase, n, pid, p, t.time, -1, drv)
if pid not in data.devpids:
data.devpids.append(pid)
# device callback finish
elif(t.type == 'device_pm_callback_end'):
if phase not in data.dmesg:
continue
m = re.match('(?P<drv>.*) (?P<d>.*), err.*', t.name);
if(not m):
continue
n = m.group('d')
dev = data.findDevice(phase, n)
if dev:
dev['length'] = t.time - dev['start']
dev['end'] = t.time
# kprobe event processing
elif(t.fkprobe):
kprobename = t.type
kprobedata = t.name
key = (kprobename, pid)
# displayname is generated from kprobe data
displayname = ''
if(t.fcall):
displayname = sysvals.kprobeDisplayName(kprobename, kprobedata)
if not displayname:
continue
if(key not in tp.ktemp):
tp.ktemp[key] = []
tp.ktemp[key].append({
'pid': pid,
'begin': t.time,
'end': -1,
'name': displayname,
'cdata': kprobedata,
'proc': m_proc,
})
# start of kernel resume
if(data.tKernSus == 0 and phase == 'suspend_prepare' \
and kprobename in ksuscalls):
data.tKernSus = t.time
elif(t.freturn):
if(key not in tp.ktemp) or len(tp.ktemp[key]) < 1:
continue
e = next((x for x in reversed(tp.ktemp[key]) if x['end'] < 0), 0)
if not e:
continue
e['end'] = t.time
e['rdata'] = kprobedata
# end of kernel resume
if(phase != 'suspend_prepare' and kprobename in krescalls):
if phase in data.dmesg:
data.dmesg[phase]['end'] = t.time
data.tKernRes = t.time
# callgraph processing
elif sysvals.usecallgraph:
# create a callgraph object for the data
key = (m_proc, pid)
if(key not in testrun.ftemp):
testrun.ftemp[key] = []
testrun.ftemp[key].append(FTraceCallGraph(pid, sysvals))
# when the call is finished, see which device matches it
cg = testrun.ftemp[key][-1]
res = cg.addLine(t)
if(res != 0):
testrun.ftemp[key].append(FTraceCallGraph(pid, sysvals))
if(res == -1):
testrun.ftemp[key][-1].addLine(t)
tf.close()
if len(testdata) < 1:
sysvals.vprint('WARNING: ftrace start marker is missing')
if data and not data.devicegroups:
sysvals.vprint('WARNING: ftrace end marker is missing')
data.handleEndMarker(t.time, t.name)
if sysvals.suspendmode == 'command':
for test in testruns:
for p in test.data.sortedPhases():
if p == 'suspend_prepare':
test.data.dmesg[p]['start'] = test.data.start
test.data.dmesg[p]['end'] = test.data.end
else:
test.data.dmesg[p]['start'] = test.data.end
test.data.dmesg[p]['end'] = test.data.end
test.data.tSuspended = test.data.end
test.data.tResumed = test.data.end
test.data.fwValid = False
# dev source and procmon events can be unreadable with mixed phase height
if sysvals.usedevsrc or sysvals.useprocmon:
sysvals.mixedphaseheight = False
# expand phase boundaries so there are no gaps
for data in testdata:
lp = data.sortedPhases()[0]
for p in data.sortedPhases():
if(p != lp and not ('machine' in p and 'machine' in lp)):
data.dmesg[lp]['end'] = data.dmesg[p]['start']
lp = p
for i in range(len(testruns)):
test = testruns[i]
data = test.data
# find the total time range for this test (begin, end)
tlb, tle = data.start, data.end
if i < len(testruns) - 1:
tle = testruns[i+1].data.start
# add the process usage data to the timeline
if sysvals.useprocmon:
data.createProcessUsageEvents()
# add the traceevent data to the device hierarchy
if(sysvals.usetraceevents):
# add actual trace funcs
for name in sorted(test.ttemp):
for event in test.ttemp[name]:
if event['end'] - event['begin'] <= 0:
continue
title = name
if name == 'machine_suspend' and 'loop' in event:
title = 's2idle_enter_%dx' % event['loop']
data.newActionGlobal(title, event['begin'], event['end'], event['pid'])
# add the kprobe based virtual tracefuncs as actual devices
for key in sorted(tp.ktemp):
name, pid = key
if name not in sysvals.tracefuncs:
continue
if pid not in data.devpids:
data.devpids.append(pid)
for e in tp.ktemp[key]:
kb, ke = e['begin'], e['end']
if ke - kb < 0.000001 or tlb > kb or tle <= kb:
continue
color = sysvals.kprobeColor(name)
data.newActionGlobal(e['name'], kb, ke, pid, color)
# add config base kprobes and dev kprobes
if sysvals.usedevsrc:
for key in sorted(tp.ktemp):
name, pid = key
if name in sysvals.tracefuncs or name not in sysvals.dev_tracefuncs:
continue
for e in tp.ktemp[key]:
kb, ke = e['begin'], e['end']
if ke - kb < 0.000001 or tlb > kb or tle <= kb:
continue
data.addDeviceFunctionCall(e['name'], name, e['proc'], pid, kb,
ke, e['cdata'], e['rdata'])
if sysvals.usecallgraph:
# add the callgraph data to the device hierarchy
sortlist = dict()
for key in sorted(test.ftemp):
proc, pid = key
for cg in test.ftemp[key]:
if len(cg.list) < 1 or cg.invalid or (cg.end - cg.start == 0):
continue
if(not cg.postProcess()):
id = 'task %s' % (pid)
sysvals.vprint('Sanity check failed for '+\
id+', ignoring this callback')
continue
# match cg data to devices
devname = ''
if sysvals.suspendmode != 'command':
devname = cg.deviceMatch(pid, data)
if not devname:
sortkey = '%f%f%d' % (cg.start, cg.end, pid)
sortlist[sortkey] = cg
elif len(cg.list) > 1000000 and cg.name != sysvals.ftopfunc:
sysvals.vprint('WARNING: the callgraph for %s is massive (%d lines)' %\
(devname, len(cg.list)))
# create blocks for orphan cg data
for sortkey in sorted(sortlist):
cg = sortlist[sortkey]
name = cg.name
if sysvals.isCallgraphFunc(name):
sysvals.vprint('Callgraph found for task %d: %.3fms, %s' % (cg.pid, (cg.end - cg.start)*1000, name))
cg.newActionFromFunction(data)
if sysvals.suspendmode == 'command':
return (testdata, '')
# fill in any missing phases
error = []
for data in testdata:
tn = '' if len(testdata) == 1 else ('%d' % (data.testnumber + 1))
terr = ''
phasedef = data.phasedef
lp = 'suspend_prepare'
for p in sorted(phasedef, key=lambda k:phasedef[k]['order']):
if p not in data.dmesg:
if not terr:
ph = p if 'machine' in p else lp
terr = '%s%s failed in %s phase' % (sysvals.suspendmode, tn, ph)
pprint('TEST%s FAILED: %s' % (tn, terr))
error.append(terr)
if data.tSuspended == 0:
data.tSuspended = data.dmesg[lp]['end']
if data.tResumed == 0:
data.tResumed = data.dmesg[lp]['end']
data.fwValid = False
sysvals.vprint('WARNING: phase "%s" is missing!' % p)
lp = p
if not terr and 'dev' in data.wifi and data.wifi['stat'] == 'timeout':
terr = '%s%s failed in wifi_resume <i>(%s %.0fs timeout)</i>' % \
(sysvals.suspendmode, tn, data.wifi['dev'], data.wifi['time'])
error.append(terr)
if not terr and data.enterfail:
pprint('test%s FAILED: enter %s failed with %s' % (tn, sysvals.suspendmode, data.enterfail))
terr = 'test%s failed to enter %s mode' % (tn, sysvals.suspendmode)
error.append(terr)
if data.tSuspended == 0:
data.tSuspended = data.tKernRes
if data.tResumed == 0:
data.tResumed = data.tSuspended
if(len(sysvals.devicefilter) > 0):
data.deviceFilter(sysvals.devicefilter)
data.fixupInitcallsThatDidntReturn()
if sysvals.usedevsrc:
data.optimizeDevSrc()
# x2: merge any overlapping devices between test runs
if sysvals.usedevsrc and len(testdata) > 1:
tc = len(testdata)
for i in range(tc - 1):
devlist = testdata[i].overflowDevices()
for j in range(i + 1, tc):
testdata[j].mergeOverlapDevices(devlist)
testdata[0].stitchTouchingThreads(testdata[1:])
return (testdata, ', '.join(error))
# Function: loadKernelLog
# Description:
# [deprecated for kernel 3.15.0 or newer]
# load the dmesg file into memory and fix up any ordering issues
# The dmesg filename is taken from sysvals
# Output:
# An array of empty Data objects with only their dmesgtext attributes set
def loadKernelLog():
sysvals.vprint('Analyzing the dmesg data (%s)...' % \
os.path.basename(sysvals.dmesgfile))
if(os.path.exists(sysvals.dmesgfile) == False):
doError('%s does not exist' % sysvals.dmesgfile)
# there can be multiple test runs in a single file
tp = TestProps()
tp.stamp = datetime.now().strftime('# suspend-%m%d%y-%H%M%S localhost mem unknown')
testruns = []
data = 0
lf = sysvals.openlog(sysvals.dmesgfile, 'r')
for line in lf:
line = line.replace('\r\n', '')
idx = line.find('[')
if idx > 1:
line = line[idx:]
if tp.stampInfo(line, sysvals):
continue
m = re.match('[ \t]*(\[ *)(?P<ktime>[0-9\.]*)(\]) (?P<msg>.*)', line)
if(not m):
continue
msg = m.group("msg")
if(re.match('PM: Syncing filesystems.*', msg)):
if(data):
testruns.append(data)
data = Data(len(testruns))
tp.parseStamp(data, sysvals)
if(not data):
continue
m = re.match('.* *(?P<k>[0-9]\.[0-9]{2}\.[0-9]-.*) .*', msg)
if(m):
sysvals.stamp['kernel'] = m.group('k')
m = re.match('PM: Preparing system for (?P<m>.*) sleep', msg)
if(m):
sysvals.stamp['mode'] = sysvals.suspendmode = m.group('m')
data.dmesgtext.append(line)
lf.close()
if data:
testruns.append(data)
if len(testruns) < 1:
doError('dmesg log has no suspend/resume data: %s' \
% sysvals.dmesgfile)
# fix lines with same timestamp/function with the call and return swapped
for data in testruns:
last = ''
for line in data.dmesgtext:
mc = re.match('.*(\[ *)(?P<t>[0-9\.]*)(\]) calling '+\
'(?P<f>.*)\+ @ .*, parent: .*', line)
mr = re.match('.*(\[ *)(?P<t>[0-9\.]*)(\]) call '+\
'(?P<f>.*)\+ returned .* after (?P<dt>.*) usecs', last)
if(mc and mr and (mc.group('t') == mr.group('t')) and
(mc.group('f') == mr.group('f'))):
i = data.dmesgtext.index(last)
j = data.dmesgtext.index(line)
data.dmesgtext[i] = line
data.dmesgtext[j] = last
last = line
return testruns
# Function: parseKernelLog
# Description:
# [deprecated for kernel 3.15.0 or newer]
# Analyse a dmesg log output file generated from this app during
# the execution phase. Create a set of device structures in memory
# for subsequent formatting in the html output file
# This call is only for legacy support on kernels where the ftrace
# data lacks the suspend_resume or device_pm_callbacks trace events.
# Arguments:
# data: an empty Data object (with dmesgtext) obtained from loadKernelLog
# Output:
# The filled Data object
def parseKernelLog(data):
phase = 'suspend_runtime'
if(data.fwValid):
sysvals.vprint('Firmware Suspend = %u ns, Firmware Resume = %u ns' % \
(data.fwSuspend, data.fwResume))
# dmesg phase match table
dm = {
'suspend_prepare': ['PM: Syncing filesystems.*'],
'suspend': ['PM: Entering [a-z]* sleep.*', 'Suspending console.*'],
'suspend_late': ['PM: suspend of devices complete after.*'],
'suspend_noirq': ['PM: late suspend of devices complete after.*'],
'suspend_machine': ['PM: noirq suspend of devices complete after.*'],
'resume_machine': ['ACPI: Low-level resume complete.*'],
'resume_noirq': ['ACPI: Waking up from system sleep state.*'],
'resume_early': ['PM: noirq resume of devices complete after.*'],
'resume': ['PM: early resume of devices complete after.*'],
'resume_complete': ['PM: resume of devices complete after.*'],
'post_resume': ['.*Restarting tasks \.\.\..*'],
}
if(sysvals.suspendmode == 'standby'):
dm['resume_machine'] = ['PM: Restoring platform NVS memory']
elif(sysvals.suspendmode == 'disk'):
dm['suspend_late'] = ['PM: freeze of devices complete after.*']
dm['suspend_noirq'] = ['PM: late freeze of devices complete after.*']
dm['suspend_machine'] = ['PM: noirq freeze of devices complete after.*']
dm['resume_machine'] = ['PM: Restoring platform NVS memory']
dm['resume_early'] = ['PM: noirq restore of devices complete after.*']
dm['resume'] = ['PM: early restore of devices complete after.*']
dm['resume_complete'] = ['PM: restore of devices complete after.*']
elif(sysvals.suspendmode == 'freeze'):
dm['resume_machine'] = ['ACPI: resume from mwait']
# action table (expected events that occur and show up in dmesg)
at = {
'sync_filesystems': {
'smsg': 'PM: Syncing filesystems.*',
'emsg': 'PM: Preparing system for mem sleep.*' },
'freeze_user_processes': {
'smsg': 'Freezing user space processes .*',
'emsg': 'Freezing remaining freezable tasks.*' },
'freeze_tasks': {
'smsg': 'Freezing remaining freezable tasks.*',
'emsg': 'PM: Entering (?P<mode>[a-z,A-Z]*) sleep.*' },
'ACPI prepare': {
'smsg': 'ACPI: Preparing to enter system sleep state.*',
'emsg': 'PM: Saving platform NVS memory.*' },
'PM vns': {
'smsg': 'PM: Saving platform NVS memory.*',
'emsg': 'Disabling non-boot CPUs .*' },
}
t0 = -1.0
cpu_start = -1.0
prevktime = -1.0
actions = dict()
for line in data.dmesgtext:
# parse each dmesg line into the time and message
m = re.match('[ \t]*(\[ *)(?P<ktime>[0-9\.]*)(\]) (?P<msg>.*)', line)
if(m):
val = m.group('ktime')
try:
ktime = float(val)
except:
continue
msg = m.group('msg')
# initialize data start to first line time
if t0 < 0:
data.setStart(ktime)
t0 = ktime
else:
continue
# check for a phase change line
phasechange = False
for p in dm:
for s in dm[p]:
if(re.match(s, msg)):
phasechange, phase = True, p
break
# hack for determining resume_machine end for freeze
if(not sysvals.usetraceevents and sysvals.suspendmode == 'freeze' \
and phase == 'resume_machine' and \
re.match('calling (?P<f>.*)\+ @ .*, parent: .*', msg)):
data.setPhase(phase, ktime, False)
phase = 'resume_noirq'
data.setPhase(phase, ktime, True)
if phasechange:
if phase == 'suspend_prepare':
data.setPhase(phase, ktime, True)
data.setStart(ktime)
data.tKernSus = ktime
elif phase == 'suspend':
lp = data.lastPhase()
if lp:
data.setPhase(lp, ktime, False)
data.setPhase(phase, ktime, True)
elif phase == 'suspend_late':
lp = data.lastPhase()
if lp:
data.setPhase(lp, ktime, False)
data.setPhase(phase, ktime, True)
elif phase == 'suspend_noirq':
lp = data.lastPhase()
if lp:
data.setPhase(lp, ktime, False)
data.setPhase(phase, ktime, True)
elif phase == 'suspend_machine':
lp = data.lastPhase()
if lp:
data.setPhase(lp, ktime, False)
data.setPhase(phase, ktime, True)
elif phase == 'resume_machine':
lp = data.lastPhase()
if(sysvals.suspendmode in ['freeze', 'standby']):
data.tSuspended = prevktime
if lp:
data.setPhase(lp, prevktime, False)
else:
data.tSuspended = ktime
if lp:
data.setPhase(lp, prevktime, False)
data.tResumed = ktime
data.setPhase(phase, ktime, True)
elif phase == 'resume_noirq':
lp = data.lastPhase()
if lp:
data.setPhase(lp, ktime, False)
data.setPhase(phase, ktime, True)
elif phase == 'resume_early':
lp = data.lastPhase()
if lp:
data.setPhase(lp, ktime, False)
data.setPhase(phase, ktime, True)
elif phase == 'resume':
lp = data.lastPhase()
if lp:
data.setPhase(lp, ktime, False)
data.setPhase(phase, ktime, True)
elif phase == 'resume_complete':
lp = data.lastPhase()
if lp:
data.setPhase(lp, ktime, False)
data.setPhase(phase, ktime, True)
elif phase == 'post_resume':
lp = data.lastPhase()
if lp:
data.setPhase(lp, ktime, False)
data.setEnd(ktime)
data.tKernRes = ktime
break
# -- device callbacks --
if(phase in data.sortedPhases()):
# device init call
if(re.match('calling (?P<f>.*)\+ @ .*, parent: .*', msg)):
sm = re.match('calling (?P<f>.*)\+ @ '+\
'(?P<n>.*), parent: (?P<p>.*)', msg);
f = sm.group('f')
n = sm.group('n')
p = sm.group('p')
if(f and n and p):
data.newAction(phase, f, int(n), p, ktime, -1, '')
# device init return
elif(re.match('call (?P<f>.*)\+ returned .* after '+\
'(?P<t>.*) usecs', msg)):
sm = re.match('call (?P<f>.*)\+ returned .* after '+\
'(?P<t>.*) usecs(?P<a>.*)', msg);
f = sm.group('f')
t = sm.group('t')
list = data.dmesg[phase]['list']
if(f in list):
dev = list[f]
dev['length'] = int(t)
dev['end'] = ktime
# if trace events are not available, these are better than nothing
if(not sysvals.usetraceevents):
# look for known actions
for a in sorted(at):
if(re.match(at[a]['smsg'], msg)):
if(a not in actions):
actions[a] = []
actions[a].append({'begin': ktime, 'end': ktime})
if(re.match(at[a]['emsg'], msg)):
if(a in actions):
actions[a][-1]['end'] = ktime
# now look for CPU on/off events
if(re.match('Disabling non-boot CPUs .*', msg)):
# start of first cpu suspend
cpu_start = ktime
elif(re.match('Enabling non-boot CPUs .*', msg)):
# start of first cpu resume
cpu_start = ktime
elif(re.match('smpboot: CPU (?P<cpu>[0-9]*) is now offline', msg)):
# end of a cpu suspend, start of the next
m = re.match('smpboot: CPU (?P<cpu>[0-9]*) is now offline', msg)
cpu = 'CPU'+m.group('cpu')
if(cpu not in actions):
actions[cpu] = []
actions[cpu].append({'begin': cpu_start, 'end': ktime})
cpu_start = ktime
elif(re.match('CPU(?P<cpu>[0-9]*) is up', msg)):
# end of a cpu resume, start of the next
m = re.match('CPU(?P<cpu>[0-9]*) is up', msg)
cpu = 'CPU'+m.group('cpu')
if(cpu not in actions):
actions[cpu] = []
actions[cpu].append({'begin': cpu_start, 'end': ktime})
cpu_start = ktime
prevktime = ktime
data.initDevicegroups()
# fill in any missing phases
phasedef = data.phasedef
terr, lp = '', 'suspend_prepare'
for p in sorted(phasedef, key=lambda k:phasedef[k]['order']):
if p not in data.dmesg:
if not terr:
pprint('TEST FAILED: %s failed in %s phase' % (sysvals.suspendmode, lp))
terr = '%s failed in %s phase' % (sysvals.suspendmode, lp)
if data.tSuspended == 0:
data.tSuspended = data.dmesg[lp]['end']
if data.tResumed == 0:
data.tResumed = data.dmesg[lp]['end']
sysvals.vprint('WARNING: phase "%s" is missing!' % p)
lp = p
lp = data.sortedPhases()[0]
for p in data.sortedPhases():
if(p != lp and not ('machine' in p and 'machine' in lp)):
data.dmesg[lp]['end'] = data.dmesg[p]['start']
lp = p
if data.tSuspended == 0:
data.tSuspended = data.tKernRes
if data.tResumed == 0:
data.tResumed = data.tSuspended
# fill in any actions we've found
for name in sorted(actions):
for event in actions[name]:
data.newActionGlobal(name, event['begin'], event['end'])
if(len(sysvals.devicefilter) > 0):
data.deviceFilter(sysvals.devicefilter)
data.fixupInitcallsThatDidntReturn()
return True
def callgraphHTML(sv, hf, num, cg, title, color, devid):
html_func_top = '<article id="{0}" class="atop" style="background:{1}">\n<input type="checkbox" class="pf" id="f{2}" checked/><label for="f{2}">{3} {4}</label>\n'
html_func_start = '<article>\n<input type="checkbox" class="pf" id="f{0}" checked/><label for="f{0}">{1} {2}</label>\n'
html_func_end = '</article>\n'
html_func_leaf = '<article>{0} {1}</article>\n'
cgid = devid
if cg.id:
cgid += cg.id
cglen = (cg.end - cg.start) * 1000
if cglen < sv.mincglen:
return num
fmt = '<r>(%.3f ms @ '+sv.timeformat+' to '+sv.timeformat+')</r>'
flen = fmt % (cglen, cg.start, cg.end)
hf.write(html_func_top.format(cgid, color, num, title, flen))
num += 1
for line in cg.list:
if(line.length < 0.000000001):
flen = ''
else:
fmt = '<n>(%.3f ms @ '+sv.timeformat+')</n>'
flen = fmt % (line.length*1000, line.time)
if line.isLeaf():
hf.write(html_func_leaf.format(line.name, flen))
elif line.freturn:
hf.write(html_func_end)
else:
hf.write(html_func_start.format(num, line.name, flen))
num += 1
hf.write(html_func_end)
return num
def addCallgraphs(sv, hf, data):
hf.write('<section id="callgraphs" class="callgraph">\n')
# write out the ftrace data converted to html
num = 0
for p in data.sortedPhases():
if sv.cgphase and p != sv.cgphase:
continue
list = data.dmesg[p]['list']
for d in data.sortedDevices(p):
if len(sv.cgfilter) > 0 and d not in sv.cgfilter:
continue
dev = list[d]
color = 'white'
if 'color' in data.dmesg[p]:
color = data.dmesg[p]['color']
if 'color' in dev:
color = dev['color']
name = d if '[' not in d else d.split('[')[0]
if(d in sv.devprops):
name = sv.devprops[d].altName(d)
if 'drv' in dev and dev['drv']:
name += ' {%s}' % dev['drv']
if sv.suspendmode in suspendmodename:
name += ' '+p
if('ftrace' in dev):
cg = dev['ftrace']
if cg.name == sv.ftopfunc:
name = 'top level suspend/resume call'
num = callgraphHTML(sv, hf, num, cg,
name, color, dev['id'])
if('ftraces' in dev):
for cg in dev['ftraces']:
num = callgraphHTML(sv, hf, num, cg,
name+' → '+cg.name, color, dev['id'])
hf.write('\n\n </section>\n')
def summaryCSS(title, center=True):
tdcenter = 'text-align:center;' if center else ''
out = '<!DOCTYPE html>\n<html>\n<head>\n\
<meta http-equiv="content-type" content="text/html; charset=UTF-8">\n\
<title>'+title+'</title>\n\
<style type=\'text/css\'>\n\
.stamp {width: 100%;text-align:center;background:#888;line-height:30px;color:white;font: 25px Arial;}\n\
table {width:100%;border-collapse: collapse;border:1px solid;}\n\
th {border: 1px solid black;background:#222;color:white;}\n\
td {font: 14px "Times New Roman";'+tdcenter+'}\n\
tr.head td {border: 1px solid black;background:#aaa;}\n\
tr.alt {background-color:#ddd;}\n\
tr.notice {color:red;}\n\
.minval {background-color:#BBFFBB;}\n\
.medval {background-color:#BBBBFF;}\n\
.maxval {background-color:#FFBBBB;}\n\
.head a {color:#000;text-decoration: none;}\n\
</style>\n</head>\n<body>\n'
return out
# Function: createHTMLSummarySimple
# Description:
# Create summary html file for a series of tests
# Arguments:
# testruns: array of Data objects from parseTraceLog
def createHTMLSummarySimple(testruns, htmlfile, title):
# write the html header first (html head, css code, up to body start)
html = summaryCSS('Summary - SleepGraph')
# extract the test data into list
list = dict()
tAvg, tMin, tMax, tMed = [0.0, 0.0], [0.0, 0.0], [0.0, 0.0], [dict(), dict()]
iMin, iMed, iMax = [0, 0], [0, 0], [0, 0]
num = 0
useturbo = usewifi = False
lastmode = ''
cnt = dict()
for data in sorted(testruns, key=lambda v:(v['mode'], v['host'], v['kernel'], v['time'])):
mode = data['mode']
if mode not in list:
list[mode] = {'data': [], 'avg': [0,0], 'min': [0,0], 'max': [0,0], 'med': [0,0]}
if lastmode and lastmode != mode and num > 0:
for i in range(2):
s = sorted(tMed[i])
list[lastmode]['med'][i] = s[int(len(s)//2)]
iMed[i] = tMed[i][list[lastmode]['med'][i]]
list[lastmode]['avg'] = [tAvg[0] / num, tAvg[1] / num]
list[lastmode]['min'] = tMin
list[lastmode]['max'] = tMax
list[lastmode]['idx'] = (iMin, iMed, iMax)
tAvg, tMin, tMax, tMed = [0.0, 0.0], [0.0, 0.0], [0.0, 0.0], [dict(), dict()]
iMin, iMed, iMax = [0, 0], [0, 0], [0, 0]
num = 0
pkgpc10 = syslpi = wifi = ''
if 'pkgpc10' in data and 'syslpi' in data:
pkgpc10, syslpi, useturbo = data['pkgpc10'], data['syslpi'], True
if 'wifi' in data:
wifi, usewifi = data['wifi'], True
res = data['result']
tVal = [float(data['suspend']), float(data['resume'])]
list[mode]['data'].append([data['host'], data['kernel'],
data['time'], tVal[0], tVal[1], data['url'], res,
data['issues'], data['sus_worst'], data['sus_worsttime'],
data['res_worst'], data['res_worsttime'], pkgpc10, syslpi, wifi])
idx = len(list[mode]['data']) - 1
if res.startswith('fail in'):
res = 'fail'
if res not in cnt:
cnt[res] = 1
else:
cnt[res] += 1
if res == 'pass':
for i in range(2):
tMed[i][tVal[i]] = idx
tAvg[i] += tVal[i]
if tMin[i] == 0 or tVal[i] < tMin[i]:
iMin[i] = idx
tMin[i] = tVal[i]
if tMax[i] == 0 or tVal[i] > tMax[i]:
iMax[i] = idx
tMax[i] = tVal[i]
num += 1
lastmode = mode
if lastmode and num > 0:
for i in range(2):
s = sorted(tMed[i])
list[lastmode]['med'][i] = s[int(len(s)//2)]
iMed[i] = tMed[i][list[lastmode]['med'][i]]
list[lastmode]['avg'] = [tAvg[0] / num, tAvg[1] / num]
list[lastmode]['min'] = tMin
list[lastmode]['max'] = tMax
list[lastmode]['idx'] = (iMin, iMed, iMax)
# group test header
desc = []
for ilk in sorted(cnt, reverse=True):
if cnt[ilk] > 0:
desc.append('%d %s' % (cnt[ilk], ilk))
html += '<div class="stamp">%s (%d tests: %s)</div>\n' % (title, len(testruns), ', '.join(desc))
th = '\t<th>{0}</th>\n'
td = '\t<td>{0}</td>\n'
tdh = '\t<td{1}>{0}</td>\n'
tdlink = '\t<td><a href="{0}">html</a></td>\n'
cols = 12
if useturbo:
cols += 2
if usewifi:
cols += 1
colspan = '%d' % cols
# table header
html += '<table>\n<tr>\n' + th.format('#') +\
th.format('Mode') + th.format('Host') + th.format('Kernel') +\
th.format('Test Time') + th.format('Result') + th.format('Issues') +\
th.format('Suspend') + th.format('Resume') +\
th.format('Worst Suspend Device') + th.format('SD Time') +\
th.format('Worst Resume Device') + th.format('RD Time')
if useturbo:
html += th.format('PkgPC10') + th.format('SysLPI')
if usewifi:
html += th.format('Wifi')
html += th.format('Detail')+'</tr>\n'
# export list into html
head = '<tr class="head"><td>{0}</td><td>{1}</td>'+\
'<td colspan='+colspan+' class="sus">Suspend Avg={2} '+\
'<span class=minval><a href="#s{10}min">Min={3}</a></span> '+\
'<span class=medval><a href="#s{10}med">Med={4}</a></span> '+\
'<span class=maxval><a href="#s{10}max">Max={5}</a></span> '+\
'Resume Avg={6} '+\
'<span class=minval><a href="#r{10}min">Min={7}</a></span> '+\
'<span class=medval><a href="#r{10}med">Med={8}</a></span> '+\
'<span class=maxval><a href="#r{10}max">Max={9}</a></span></td>'+\
'</tr>\n'
headnone = '<tr class="head"><td>{0}</td><td>{1}</td><td colspan='+\
colspan+'></td></tr>\n'
for mode in sorted(list):
# header line for each suspend mode
num = 0
tAvg, tMin, tMax, tMed = list[mode]['avg'], list[mode]['min'],\
list[mode]['max'], list[mode]['med']
count = len(list[mode]['data'])
if 'idx' in list[mode]:
iMin, iMed, iMax = list[mode]['idx']
html += head.format('%d' % count, mode.upper(),
'%.3f' % tAvg[0], '%.3f' % tMin[0], '%.3f' % tMed[0], '%.3f' % tMax[0],
'%.3f' % tAvg[1], '%.3f' % tMin[1], '%.3f' % tMed[1], '%.3f' % tMax[1],
mode.lower()
)
else:
iMin = iMed = iMax = [-1, -1, -1]
html += headnone.format('%d' % count, mode.upper())
for d in list[mode]['data']:
# row classes - alternate row color
rcls = ['alt'] if num % 2 == 1 else []
if d[6] != 'pass':
rcls.append('notice')
html += '<tr class="'+(' '.join(rcls))+'">\n' if len(rcls) > 0 else '<tr>\n'
# figure out if the line has sus or res highlighted
idx = list[mode]['data'].index(d)
tHigh = ['', '']
for i in range(2):
tag = 's%s' % mode if i == 0 else 'r%s' % mode
if idx == iMin[i]:
tHigh[i] = ' id="%smin" class=minval title="Minimum"' % tag
elif idx == iMax[i]:
tHigh[i] = ' id="%smax" class=maxval title="Maximum"' % tag
elif idx == iMed[i]:
tHigh[i] = ' id="%smed" class=medval title="Median"' % tag
html += td.format("%d" % (list[mode]['data'].index(d) + 1)) # row
html += td.format(mode) # mode
html += td.format(d[0]) # host
html += td.format(d[1]) # kernel
html += td.format(d[2]) # time
html += td.format(d[6]) # result
html += td.format(d[7]) # issues
html += tdh.format('%.3f ms' % d[3], tHigh[0]) if d[3] else td.format('') # suspend
html += tdh.format('%.3f ms' % d[4], tHigh[1]) if d[4] else td.format('') # resume
html += td.format(d[8]) # sus_worst
html += td.format('%.3f ms' % d[9]) if d[9] else td.format('') # sus_worst time
html += td.format(d[10]) # res_worst
html += td.format('%.3f ms' % d[11]) if d[11] else td.format('') # res_worst time
if useturbo:
html += td.format(d[12]) # pkg_pc10
html += td.format(d[13]) # syslpi
if usewifi:
html += td.format(d[14]) # wifi
html += tdlink.format(d[5]) if d[5] else td.format('') # url
html += '</tr>\n'
num += 1
# flush the data to file
hf = open(htmlfile, 'w')
hf.write(html+'</table>\n</body>\n</html>\n')
hf.close()
def createHTMLDeviceSummary(testruns, htmlfile, title):
html = summaryCSS('Device Summary - SleepGraph', False)
# create global device list from all tests
devall = dict()
for data in testruns:
host, url, devlist = data['host'], data['url'], data['devlist']
for type in devlist:
if type not in devall:
devall[type] = dict()
mdevlist, devlist = devall[type], data['devlist'][type]
for name in devlist:
length = devlist[name]
if name not in mdevlist:
mdevlist[name] = {'name': name, 'host': host,
'worst': length, 'total': length, 'count': 1,
'url': url}
else:
if length > mdevlist[name]['worst']:
mdevlist[name]['worst'] = length
mdevlist[name]['url'] = url
mdevlist[name]['host'] = host
mdevlist[name]['total'] += length
mdevlist[name]['count'] += 1
# generate the html
th = '\t<th>{0}</th>\n'
td = '\t<td align=center>{0}</td>\n'
tdr = '\t<td align=right>{0}</td>\n'
tdlink = '\t<td align=center><a href="{0}">html</a></td>\n'
limit = 1
for type in sorted(devall, reverse=True):
num = 0
devlist = devall[type]
# table header
html += '<div class="stamp">%s (%s devices > %d ms)</div><table>\n' % \
(title, type.upper(), limit)
html += '<tr>\n' + '<th align=right>Device Name</th>' +\
th.format('Average Time') + th.format('Count') +\
th.format('Worst Time') + th.format('Host (worst time)') +\
th.format('Link (worst time)') + '</tr>\n'
for name in sorted(devlist, key=lambda k:(devlist[k]['worst'], \
devlist[k]['total'], devlist[k]['name']), reverse=True):
data = devall[type][name]
data['average'] = data['total'] / data['count']
if data['average'] < limit:
continue
# row classes - alternate row color
rcls = ['alt'] if num % 2 == 1 else []
html += '<tr class="'+(' '.join(rcls))+'">\n' if len(rcls) > 0 else '<tr>\n'
html += tdr.format(data['name']) # name
html += td.format('%.3f ms' % data['average']) # average
html += td.format(data['count']) # count
html += td.format('%.3f ms' % data['worst']) # worst
html += td.format(data['host']) # host
html += tdlink.format(data['url']) # url
html += '</tr>\n'
num += 1
html += '</table>\n'
# flush the data to file
hf = open(htmlfile, 'w')
hf.write(html+'</body>\n</html>\n')
hf.close()
return devall
def createHTMLIssuesSummary(testruns, issues, htmlfile, title, extra=''):
multihost = len([e for e in issues if len(e['urls']) > 1]) > 0
html = summaryCSS('Issues Summary - SleepGraph', False)
total = len(testruns)
# generate the html
th = '\t<th>{0}</th>\n'
td = '\t<td align={0}>{1}</td>\n'
tdlink = '<a href="{1}">{0}</a>'
subtitle = '%d issues' % len(issues) if len(issues) > 0 else 'no issues'
html += '<div class="stamp">%s (%s)</div><table>\n' % (title, subtitle)
html += '<tr>\n' + th.format('Issue') + th.format('Count')
if multihost:
html += th.format('Hosts')
html += th.format('Tests') + th.format('Fail Rate') +\
th.format('First Instance') + '</tr>\n'
num = 0
for e in sorted(issues, key=lambda v:v['count'], reverse=True):
testtotal = 0
links = []
for host in sorted(e['urls']):
links.append(tdlink.format(host, e['urls'][host][0]))
testtotal += len(e['urls'][host])
rate = '%d/%d (%.2f%%)' % (testtotal, total, 100*float(testtotal)/float(total))
# row classes - alternate row color
rcls = ['alt'] if num % 2 == 1 else []
html += '<tr class="'+(' '.join(rcls))+'">\n' if len(rcls) > 0 else '<tr>\n'
html += td.format('left', e['line']) # issue
html += td.format('center', e['count']) # count
if multihost:
html += td.format('center', len(e['urls'])) # hosts
html += td.format('center', testtotal) # test count
html += td.format('center', rate) # test rate
html += td.format('center nowrap', '<br>'.join(links)) # links
html += '</tr>\n'
num += 1
# flush the data to file
hf = open(htmlfile, 'w')
hf.write(html+'</table>\n'+extra+'</body>\n</html>\n')
hf.close()
return issues
def ordinal(value):
suffix = 'th'
if value < 10 or value > 19:
if value % 10 == 1:
suffix = 'st'
elif value % 10 == 2:
suffix = 'nd'
elif value % 10 == 3:
suffix = 'rd'
return '%d%s' % (value, suffix)
# Function: createHTML
# Description:
# Create the output html file from the resident test data
# Arguments:
# testruns: array of Data objects from parseKernelLog or parseTraceLog
# Output:
# True if the html file was created, false if it failed
def createHTML(testruns, testfail):
if len(testruns) < 1:
pprint('ERROR: Not enough test data to build a timeline')
return
kerror = False
for data in testruns:
if data.kerror:
kerror = True
if(sysvals.suspendmode in ['freeze', 'standby']):
data.trimFreezeTime(testruns[-1].tSuspended)
else:
data.getMemTime()
# html function templates
html_error = '<div id="{1}" title="kernel error/warning" class="err" style="right:{0}%">{2}→</div>\n'
html_traceevent = '<div title="{0}" class="traceevent{6}" style="left:{1}%;top:{2}px;height:{3}px;width:{4}%;line-height:{3}px;{7}">{5}</div>\n'
html_cpuexec = '<div class="jiffie" style="left:{0}%;top:{1}px;height:{2}px;width:{3}%;background:{4};"></div>\n'
html_timetotal = '<table class="time1">\n<tr>'\
'<td class="green" title="{3}">{2} Suspend Time: <b>{0} ms</b></td>'\
'<td class="yellow" title="{4}">{2} Resume Time: <b>{1} ms</b></td>'\
'</tr>\n</table>\n'
html_timetotal2 = '<table class="time1">\n<tr>'\
'<td class="green" title="{4}">{3} Suspend Time: <b>{0} ms</b></td>'\
'<td class="gray" title="time spent in low-power mode with clock running">'+sysvals.suspendmode+' time: <b>{1} ms</b></td>'\
'<td class="yellow" title="{5}">{3} Resume Time: <b>{2} ms</b></td>'\
'</tr>\n</table>\n'
html_timetotal3 = '<table class="time1">\n<tr>'\
'<td class="green">Execution Time: <b>{0} ms</b></td>'\
'<td class="yellow">Command: <b>{1}</b></td>'\
'</tr>\n</table>\n'
html_fail = '<table class="testfail"><tr><td>{0}</td></tr></table>\n'
html_kdesc = '<td class="{3}" title="time spent in kernel execution">{0}Kernel {2}: {1} ms</td>'
html_fwdesc = '<td class="{3}" title="time spent in firmware">{0}Firmware {2}: {1} ms</td>'
html_wifdesc = '<td class="yellow" title="time for wifi to reconnect after resume complete ({2})">{0}Wifi Resume: {1}</td>'
# html format variables
scaleH = 20
if kerror:
scaleH = 40
# device timeline
devtl = Timeline(30, scaleH)
# write the test title and general info header
devtl.createHeader(sysvals, testruns[0].stamp)
# Generate the header for this timeline
for data in testruns:
tTotal = data.end - data.start
if(tTotal == 0):
doError('No timeline data')
if sysvals.suspendmode == 'command':
run_time = '%.0f' % (tTotal * 1000)
if sysvals.testcommand:
testdesc = sysvals.testcommand
else:
testdesc = 'unknown'
if(len(testruns) > 1):
testdesc = ordinal(data.testnumber+1)+' '+testdesc
thtml = html_timetotal3.format(run_time, testdesc)
devtl.html += thtml
continue
# typical full suspend/resume header
stot, rtot = sktime, rktime = data.getTimeValues()
ssrc, rsrc, testdesc, testdesc2 = ['kernel'], ['kernel'], 'Kernel', ''
if data.fwValid:
stot += (data.fwSuspend/1000000.0)
rtot += (data.fwResume/1000000.0)
ssrc.append('firmware')
rsrc.append('firmware')
testdesc = 'Total'
if 'time' in data.wifi and data.wifi['stat'] != 'timeout':
rtot += data.end - data.tKernRes + (data.wifi['time'] * 1000.0)
rsrc.append('wifi')
testdesc = 'Total'
suspend_time, resume_time = '%.3f' % stot, '%.3f' % rtot
stitle = 'time from kernel suspend start to %s mode [%s time]' % \
(sysvals.suspendmode, ' & '.join(ssrc))
rtitle = 'time from %s mode to kernel resume complete [%s time]' % \
(sysvals.suspendmode, ' & '.join(rsrc))
if(len(testruns) > 1):
testdesc = testdesc2 = ordinal(data.testnumber+1)
testdesc2 += ' '
if(len(data.tLow) == 0):
thtml = html_timetotal.format(suspend_time, \
resume_time, testdesc, stitle, rtitle)
else:
low_time = '+'.join(data.tLow)
thtml = html_timetotal2.format(suspend_time, low_time, \
resume_time, testdesc, stitle, rtitle)
devtl.html += thtml
if not data.fwValid and 'dev' not in data.wifi:
continue
# extra detail when the times come from multiple sources
thtml = '<table class="time2">\n<tr>'
thtml += html_kdesc.format(testdesc2, '%.3f'%sktime, 'Suspend', 'green')
if data.fwValid:
sftime = '%.3f'%(data.fwSuspend / 1000000.0)
rftime = '%.3f'%(data.fwResume / 1000000.0)
thtml += html_fwdesc.format(testdesc2, sftime, 'Suspend', 'green')
thtml += html_fwdesc.format(testdesc2, rftime, 'Resume', 'yellow')
thtml += html_kdesc.format(testdesc2, '%.3f'%rktime, 'Resume', 'yellow')
if 'time' in data.wifi:
if data.wifi['stat'] != 'timeout':
wtime = '%.0f ms'%(data.end - data.tKernRes + (data.wifi['time'] * 1000.0))
else:
wtime = 'TIMEOUT'
thtml += html_wifdesc.format(testdesc2, wtime, data.wifi['dev'])
thtml += '</tr>\n</table>\n'
devtl.html += thtml
if testfail:
devtl.html += html_fail.format(testfail)
# time scale for potentially multiple datasets
t0 = testruns[0].start
tMax = testruns[-1].end
tTotal = tMax - t0
# determine the maximum number of rows we need to draw
fulllist = []
threadlist = []
pscnt = 0
devcnt = 0
for data in testruns:
data.selectTimelineDevices('%f', tTotal, sysvals.mindevlen)
for group in data.devicegroups:
devlist = []
for phase in group:
for devname in sorted(data.tdevlist[phase]):
d = DevItem(data.testnumber, phase, data.dmesg[phase]['list'][devname])
devlist.append(d)
if d.isa('kth'):
threadlist.append(d)
else:
if d.isa('ps'):
pscnt += 1
else:
devcnt += 1
fulllist.append(d)
if sysvals.mixedphaseheight:
devtl.getPhaseRows(devlist)
if not sysvals.mixedphaseheight:
if len(threadlist) > 0 and len(fulllist) > 0:
if pscnt > 0 and devcnt > 0:
msg = 'user processes & device pm callbacks'
elif pscnt > 0:
msg = 'user processes'
else:
msg = 'device pm callbacks'
d = testruns[0].addHorizontalDivider(msg, testruns[-1].end)
fulllist.insert(0, d)
devtl.getPhaseRows(fulllist)
if len(threadlist) > 0:
d = testruns[0].addHorizontalDivider('asynchronous kernel threads', testruns[-1].end)
threadlist.insert(0, d)
devtl.getPhaseRows(threadlist, devtl.rows)
devtl.calcTotalRows()
# draw the full timeline
devtl.createZoomBox(sysvals.suspendmode, len(testruns))
for data in testruns:
# draw each test run and block chronologically
phases = {'suspend':[],'resume':[]}
for phase in data.sortedPhases():
if data.dmesg[phase]['start'] >= data.tSuspended:
phases['resume'].append(phase)
else:
phases['suspend'].append(phase)
# now draw the actual timeline blocks
for dir in phases:
# draw suspend and resume blocks separately
bname = '%s%d' % (dir[0], data.testnumber)
if dir == 'suspend':
m0 = data.start
mMax = data.tSuspended
left = '%f' % (((m0-t0)*100.0)/tTotal)
else:
m0 = data.tSuspended
mMax = data.end
# in an x2 run, remove any gap between blocks
if len(testruns) > 1 and data.testnumber == 0:
mMax = testruns[1].start
left = '%f' % ((((m0-t0)*100.0)+sysvals.srgap/2)/tTotal)
mTotal = mMax - m0
# if a timeline block is 0 length, skip altogether
if mTotal == 0:
continue
width = '%f' % (((mTotal*100.0)-sysvals.srgap/2)/tTotal)
devtl.html += devtl.html_tblock.format(bname, left, width, devtl.scaleH)
for b in phases[dir]:
# draw the phase color background
phase = data.dmesg[b]
length = phase['end']-phase['start']
left = '%f' % (((phase['start']-m0)*100.0)/mTotal)
width = '%f' % ((length*100.0)/mTotal)
devtl.html += devtl.html_phase.format(left, width, \
'%.3f'%devtl.scaleH, '%.3f'%devtl.bodyH, \
data.dmesg[b]['color'], '')
for e in data.errorinfo[dir]:
# draw red lines for any kernel errors found
type, t, idx1, idx2 = e
id = '%d_%d' % (idx1, idx2)
right = '%f' % (((mMax-t)*100.0)/mTotal)
devtl.html += html_error.format(right, id, type)
for b in phases[dir]:
# draw the devices for this phase
phaselist = data.dmesg[b]['list']
for d in sorted(data.tdevlist[b]):
dname = d if ('[' not in d or 'CPU' in d) else d.split('[')[0]
name, dev = dname, phaselist[d]
drv = xtraclass = xtrainfo = xtrastyle = ''
if 'htmlclass' in dev:
xtraclass = dev['htmlclass']
if 'color' in dev:
xtrastyle = 'background:%s;' % dev['color']
if(d in sysvals.devprops):
name = sysvals.devprops[d].altName(d)
xtraclass = sysvals.devprops[d].xtraClass()
xtrainfo = sysvals.devprops[d].xtraInfo()
elif xtraclass == ' kth':
xtrainfo = ' kernel_thread'
if('drv' in dev and dev['drv']):
drv = ' {%s}' % dev['drv']
rowheight = devtl.phaseRowHeight(data.testnumber, b, dev['row'])
rowtop = devtl.phaseRowTop(data.testnumber, b, dev['row'])
top = '%.3f' % (rowtop + devtl.scaleH)
left = '%f' % (((dev['start']-m0)*100)/mTotal)
width = '%f' % (((dev['end']-dev['start'])*100)/mTotal)
length = ' (%0.3f ms) ' % ((dev['end']-dev['start'])*1000)
title = name+drv+xtrainfo+length
if sysvals.suspendmode == 'command':
title += sysvals.testcommand
elif xtraclass == ' ps':
if 'suspend' in b:
title += 'pre_suspend_process'
else:
title += 'post_resume_process'
else:
title += b
devtl.html += devtl.html_device.format(dev['id'], \
title, left, top, '%.3f'%rowheight, width, \
dname+drv, xtraclass, xtrastyle)
if('cpuexec' in dev):
for t in sorted(dev['cpuexec']):
start, end = t
j = float(dev['cpuexec'][t]) / 5
if j > 1.0:
j = 1.0
height = '%.3f' % (rowheight/3)
top = '%.3f' % (rowtop + devtl.scaleH + 2*rowheight/3)
left = '%f' % (((start-m0)*100)/mTotal)
width = '%f' % ((end-start)*100/mTotal)
color = 'rgba(255, 0, 0, %f)' % j
devtl.html += \
html_cpuexec.format(left, top, height, width, color)
if('src' not in dev):
continue
# draw any trace events for this device
for e in dev['src']:
if e.length == 0:
continue
height = '%.3f' % devtl.rowH
top = '%.3f' % (rowtop + devtl.scaleH + (e.row*devtl.rowH))
left = '%f' % (((e.time-m0)*100)/mTotal)
width = '%f' % (e.length*100/mTotal)
xtrastyle = ''
if e.color:
xtrastyle = 'background:%s;' % e.color
devtl.html += \
html_traceevent.format(e.title(), \
left, top, height, width, e.text(), '', xtrastyle)
# draw the time scale, try to make the number of labels readable
devtl.createTimeScale(m0, mMax, tTotal, dir)
devtl.html += '</div>\n'
# timeline is finished
devtl.html += '</div>\n</div>\n'
# draw a legend which describes the phases by color
if sysvals.suspendmode != 'command':
phasedef = testruns[-1].phasedef
devtl.html += '<div class="legend">\n'
pdelta = 100.0/len(phasedef.keys())
pmargin = pdelta / 4.0
for phase in sorted(phasedef, key=lambda k:phasedef[k]['order']):
id, p = '', phasedef[phase]
for word in phase.split('_'):
id += word[0]
order = '%.2f' % ((p['order'] * pdelta) + pmargin)
name = phase.replace('_', ' ')
devtl.html += devtl.html_legend.format(order, p['color'], name, id)
devtl.html += '</div>\n'
hf = open(sysvals.htmlfile, 'w')
addCSS(hf, sysvals, len(testruns), kerror)
# write the device timeline
hf.write(devtl.html)
hf.write('<div id="devicedetailtitle"></div>\n')
hf.write('<div id="devicedetail" style="display:none;">\n')
# draw the colored boxes for the device detail section
for data in testruns:
hf.write('<div id="devicedetail%d">\n' % data.testnumber)
pscolor = 'linear-gradient(to top left, #ccc, #eee)'
hf.write(devtl.html_phaselet.format('pre_suspend_process', \
'0', '0', pscolor))
for b in data.sortedPhases():
phase = data.dmesg[b]
length = phase['end']-phase['start']
left = '%.3f' % (((phase['start']-t0)*100.0)/tTotal)
width = '%.3f' % ((length*100.0)/tTotal)
hf.write(devtl.html_phaselet.format(b, left, width, \
data.dmesg[b]['color']))
hf.write(devtl.html_phaselet.format('post_resume_process', \
'0', '0', pscolor))
if sysvals.suspendmode == 'command':
hf.write(devtl.html_phaselet.format('cmdexec', '0', '0', pscolor))
hf.write('</div>\n')
hf.write('</div>\n')
# write the ftrace data (callgraph)
if sysvals.cgtest >= 0 and len(testruns) > sysvals.cgtest:
data = testruns[sysvals.cgtest]
else:
data = testruns[-1]
if sysvals.usecallgraph:
addCallgraphs(sysvals, hf, data)
# add the test log as a hidden div
if sysvals.testlog and sysvals.logmsg:
hf.write('<div id="testlog" style="display:none;">\n'+sysvals.logmsg+'</div>\n')
# add the dmesg log as a hidden div
if sysvals.dmesglog and sysvals.dmesgfile:
hf.write('<div id="dmesglog" style="display:none;">\n')
lf = sysvals.openlog(sysvals.dmesgfile, 'r')
for line in lf:
line = line.replace('<', '<').replace('>', '>')
hf.write(line)
lf.close()
hf.write('</div>\n')
# add the ftrace log as a hidden div
if sysvals.ftracelog and sysvals.ftracefile:
hf.write('<div id="ftracelog" style="display:none;">\n')
lf = sysvals.openlog(sysvals.ftracefile, 'r')
for line in lf:
hf.write(line)
lf.close()
hf.write('</div>\n')
# write the footer and close
addScriptCode(hf, testruns)
hf.write('</body>\n</html>\n')
hf.close()
return True
def addCSS(hf, sv, testcount=1, kerror=False, extra=''):
kernel = sv.stamp['kernel']
host = sv.hostname[0].upper()+sv.hostname[1:]
mode = sv.suspendmode
if sv.suspendmode in suspendmodename:
mode = suspendmodename[sv.suspendmode]
title = host+' '+mode+' '+kernel
# various format changes by flags
cgchk = 'checked'
cgnchk = 'not(:checked)'
if sv.cgexp:
cgchk = 'not(:checked)'
cgnchk = 'checked'
hoverZ = 'z-index:8;'
if sv.usedevsrc:
hoverZ = ''
devlistpos = 'absolute'
if testcount > 1:
devlistpos = 'relative'
scaleTH = 20
if kerror:
scaleTH = 60
# write the html header first (html head, css code, up to body start)
html_header = '<!DOCTYPE html>\n<html>\n<head>\n\
<meta http-equiv="content-type" content="text/html; charset=UTF-8">\n\
<title>'+title+'</title>\n\
<style type=\'text/css\'>\n\
body {overflow-y:scroll;}\n\
.stamp {width:100%;text-align:center;background:gray;line-height:30px;color:white;font:25px Arial;}\n\
.stamp.sysinfo {font:10px Arial;}\n\
.callgraph {margin-top:30px;box-shadow:5px 5px 20px black;}\n\
.callgraph article * {padding-left:28px;}\n\
h1 {color:black;font:bold 30px Times;}\n\
t0 {color:black;font:bold 30px Times;}\n\
t1 {color:black;font:30px Times;}\n\
t2 {color:black;font:25px Times;}\n\
t3 {color:black;font:20px Times;white-space:nowrap;}\n\
t4 {color:black;font:bold 30px Times;line-height:60px;white-space:nowrap;}\n\
cS {font:bold 13px Times;}\n\
table {width:100%;}\n\
.gray {background:rgba(80,80,80,0.1);}\n\
.green {background:rgba(204,255,204,0.4);}\n\
.purple {background:rgba(128,0,128,0.2);}\n\
.yellow {background:rgba(255,255,204,0.4);}\n\
.blue {background:rgba(169,208,245,0.4);}\n\
.time1 {font:22px Arial;border:1px solid;}\n\
.time2 {font:15px Arial;border-bottom:1px solid;border-left:1px solid;border-right:1px solid;}\n\
.testfail {font:bold 22px Arial;color:red;border:1px dashed;}\n\
td {text-align:center;}\n\
r {color:#500000;font:15px Tahoma;}\n\
n {color:#505050;font:15px Tahoma;}\n\
.tdhl {color:red;}\n\
.hide {display:none;}\n\
.pf {display:none;}\n\
.pf:'+cgchk+' + label {background:url(\'data:image/svg+xml;utf,<?xml version="1.0" standalone="no"?><svg xmlns="http://www.w3.org/2000/svg" height="18" width="18" version="1.1"><circle cx="9" cy="9" r="8" stroke="black" stroke-width="1" fill="white"/><rect x="4" y="8" width="10" height="2" style="fill:black;stroke-width:0"/><rect x="8" y="4" width="2" height="10" style="fill:black;stroke-width:0"/></svg>\') no-repeat left center;}\n\
.pf:'+cgnchk+' ~ label {background:url(\'data:image/svg+xml;utf,<?xml version="1.0" standalone="no"?><svg xmlns="http://www.w3.org/2000/svg" height="18" width="18" version="1.1"><circle cx="9" cy="9" r="8" stroke="black" stroke-width="1" fill="white"/><rect x="4" y="8" width="10" height="2" style="fill:black;stroke-width:0"/></svg>\') no-repeat left center;}\n\
.pf:'+cgchk+' ~ *:not(:nth-child(2)) {display:none;}\n\
.zoombox {position:relative;width:100%;overflow-x:scroll;-webkit-user-select:none;-moz-user-select:none;user-select:none;}\n\
.timeline {position:relative;font-size:14px;cursor:pointer;width:100%; overflow:hidden;background:linear-gradient(#cccccc, white);}\n\
.thread {position:absolute;height:0%;overflow:hidden;z-index:7;line-height:30px;font-size:14px;border:1px solid;text-align:center;white-space:nowrap;}\n\
.thread.ps {border-radius:3px;background:linear-gradient(to top, #ccc, #eee);}\n\
.thread:hover {background:white;border:1px solid red;'+hoverZ+'}\n\
.thread.sec,.thread.sec:hover {background:black;border:0;color:white;line-height:15px;font-size:10px;}\n\
.hover {background:white;border:1px solid red;'+hoverZ+'}\n\
.hover.sync {background:white;}\n\
.hover.bg,.hover.kth,.hover.sync,.hover.ps {background:white;}\n\
.jiffie {position:absolute;pointer-events: none;z-index:8;}\n\
.traceevent {position:absolute;font-size:10px;z-index:7;overflow:hidden;color:black;text-align:center;white-space:nowrap;border-radius:5px;border:1px solid black;background:linear-gradient(to bottom right,#CCC,#969696);}\n\
.traceevent:hover {color:white;font-weight:bold;border:1px solid white;}\n\
.phase {position:absolute;overflow:hidden;border:0px;text-align:center;}\n\
.phaselet {float:left;overflow:hidden;border:0px;text-align:center;min-height:100px;font-size:24px;}\n\
.t {position:absolute;line-height:'+('%d'%scaleTH)+'px;pointer-events:none;top:0;height:100%;border-right:1px solid black;z-index:6;}\n\
.err {position:absolute;top:0%;height:100%;border-right:3px solid red;color:red;font:bold 14px Times;line-height:18px;}\n\
.legend {position:relative; width:100%; height:40px; text-align:center;margin-bottom:20px}\n\
.legend .square {position:absolute;cursor:pointer;top:10px; width:0px;height:20px;border:1px solid;padding-left:20px;}\n\
button {height:40px;width:200px;margin-bottom:20px;margin-top:20px;font-size:24px;}\n\
.btnfmt {position:relative;float:right;height:25px;width:auto;margin-top:3px;margin-bottom:0;font-size:10px;text-align:center;}\n\
.devlist {position:'+devlistpos+';width:190px;}\n\
a:link {color:white;text-decoration:none;}\n\
a:visited {color:white;}\n\
a:hover {color:white;}\n\
a:active {color:white;}\n\
.version {position:relative;float:left;color:white;font-size:10px;line-height:30px;margin-left:10px;}\n\
#devicedetail {min-height:100px;box-shadow:5px 5px 20px black;}\n\
.tblock {position:absolute;height:100%;background:#ddd;}\n\
.tback {position:absolute;width:100%;background:linear-gradient(#ccc, #ddd);}\n\
.bg {z-index:1;}\n\
'+extra+'\
</style>\n</head>\n<body>\n'
hf.write(html_header)
# Function: addScriptCode
# Description:
# Adds the javascript code to the output html
# Arguments:
# hf: the open html file pointer
# testruns: array of Data objects from parseKernelLog or parseTraceLog
def addScriptCode(hf, testruns):
t0 = testruns[0].start * 1000
tMax = testruns[-1].end * 1000
# create an array in javascript memory with the device details
detail = ' var devtable = [];\n'
for data in testruns:
topo = data.deviceTopology()
detail += ' devtable[%d] = "%s";\n' % (data.testnumber, topo)
detail += ' var bounds = [%f,%f];\n' % (t0, tMax)
# add the code which will manipulate the data in the browser
script_code = \
'<script type="text/javascript">\n'+detail+\
' var resolution = -1;\n'\
' var dragval = [0, 0];\n'\
' function redrawTimescale(t0, tMax, tS) {\n'\
' var rline = \'<div class="t" style="left:0;border-left:1px solid black;border-right:0;">\';\n'\
' var tTotal = tMax - t0;\n'\
' var list = document.getElementsByClassName("tblock");\n'\
' for (var i = 0; i < list.length; i++) {\n'\
' var timescale = list[i].getElementsByClassName("timescale")[0];\n'\
' var m0 = t0 + (tTotal*parseFloat(list[i].style.left)/100);\n'\
' var mTotal = tTotal*parseFloat(list[i].style.width)/100;\n'\
' var mMax = m0 + mTotal;\n'\
' var html = "";\n'\
' var divTotal = Math.floor(mTotal/tS) + 1;\n'\
' if(divTotal > 1000) continue;\n'\
' var divEdge = (mTotal - tS*(divTotal-1))*100/mTotal;\n'\
' var pos = 0.0, val = 0.0;\n'\
' for (var j = 0; j < divTotal; j++) {\n'\
' var htmlline = "";\n'\
' var mode = list[i].id[5];\n'\
' if(mode == "s") {\n'\
' pos = 100 - (((j)*tS*100)/mTotal) - divEdge;\n'\
' val = (j-divTotal+1)*tS;\n'\
' if(j == divTotal - 1)\n'\
' htmlline = \'<div class="t" style="right:\'+pos+\'%"><cS>S→</cS></div>\';\n'\
' else\n'\
' htmlline = \'<div class="t" style="right:\'+pos+\'%">\'+val+\'ms</div>\';\n'\
' } else {\n'\
' pos = 100 - (((j)*tS*100)/mTotal);\n'\
' val = (j)*tS;\n'\
' htmlline = \'<div class="t" style="right:\'+pos+\'%">\'+val+\'ms</div>\';\n'\
' if(j == 0)\n'\
' if(mode == "r")\n'\
' htmlline = rline+"<cS>←R</cS></div>";\n'\
' else\n'\
' htmlline = rline+"<cS>0ms</div>";\n'\
' }\n'\
' html += htmlline;\n'\
' }\n'\
' timescale.innerHTML = html;\n'\
' }\n'\
' }\n'\
' function zoomTimeline() {\n'\
' var dmesg = document.getElementById("dmesg");\n'\
' var zoombox = document.getElementById("dmesgzoombox");\n'\
' var left = zoombox.scrollLeft;\n'\
' var val = parseFloat(dmesg.style.width);\n'\
' var newval = 100;\n'\
' var sh = window.outerWidth / 2;\n'\
' if(this.id == "zoomin") {\n'\
' newval = val * 1.2;\n'\
' if(newval > 910034) newval = 910034;\n'\
' dmesg.style.width = newval+"%";\n'\
' zoombox.scrollLeft = ((left + sh) * newval / val) - sh;\n'\
' } else if (this.id == "zoomout") {\n'\
' newval = val / 1.2;\n'\
' if(newval < 100) newval = 100;\n'\
' dmesg.style.width = newval+"%";\n'\
' zoombox.scrollLeft = ((left + sh) * newval / val) - sh;\n'\
' } else {\n'\
' zoombox.scrollLeft = 0;\n'\
' dmesg.style.width = "100%";\n'\
' }\n'\
' var tS = [10000, 5000, 2000, 1000, 500, 200, 100, 50, 20, 10, 5, 2, 1];\n'\
' var t0 = bounds[0];\n'\
' var tMax = bounds[1];\n'\
' var tTotal = tMax - t0;\n'\
' var wTotal = tTotal * 100.0 / newval;\n'\
' var idx = 7*window.innerWidth/1100;\n'\
' for(var i = 0; (i < tS.length)&&((wTotal / tS[i]) < idx); i++);\n'\
' if(i >= tS.length) i = tS.length - 1;\n'\
' if(tS[i] == resolution) return;\n'\
' resolution = tS[i];\n'\
' redrawTimescale(t0, tMax, tS[i]);\n'\
' }\n'\
' function deviceName(title) {\n'\
' var name = title.slice(0, title.indexOf(" ("));\n'\
' return name;\n'\
' }\n'\
' function deviceHover() {\n'\
' var name = deviceName(this.title);\n'\
' var dmesg = document.getElementById("dmesg");\n'\
' var dev = dmesg.getElementsByClassName("thread");\n'\
' var cpu = -1;\n'\
' if(name.match("CPU_ON\[[0-9]*\]"))\n'\
' cpu = parseInt(name.slice(7));\n'\
' else if(name.match("CPU_OFF\[[0-9]*\]"))\n'\
' cpu = parseInt(name.slice(8));\n'\
' for (var i = 0; i < dev.length; i++) {\n'\
' dname = deviceName(dev[i].title);\n'\
' var cname = dev[i].className.slice(dev[i].className.indexOf("thread"));\n'\
' if((cpu >= 0 && dname.match("CPU_O[NF]*\\\[*"+cpu+"\\\]")) ||\n'\
' (name == dname))\n'\
' {\n'\
' dev[i].className = "hover "+cname;\n'\
' } else {\n'\
' dev[i].className = cname;\n'\
' }\n'\
' }\n'\
' }\n'\
' function deviceUnhover() {\n'\
' var dmesg = document.getElementById("dmesg");\n'\
' var dev = dmesg.getElementsByClassName("thread");\n'\
' for (var i = 0; i < dev.length; i++) {\n'\
' dev[i].className = dev[i].className.slice(dev[i].className.indexOf("thread"));\n'\
' }\n'\
' }\n'\
' function deviceTitle(title, total, cpu) {\n'\
' var prefix = "Total";\n'\
' if(total.length > 3) {\n'\
' prefix = "Average";\n'\
' total[1] = (total[1]+total[3])/2;\n'\
' total[2] = (total[2]+total[4])/2;\n'\
' }\n'\
' var devtitle = document.getElementById("devicedetailtitle");\n'\
' var name = deviceName(title);\n'\
' if(cpu >= 0) name = "CPU"+cpu;\n'\
' var driver = "";\n'\
' var tS = "<t2>(</t2>";\n'\
' var tR = "<t2>)</t2>";\n'\
' if(total[1] > 0)\n'\
' tS = "<t2>("+prefix+" Suspend:</t2><t0> "+total[1].toFixed(3)+" ms</t0> ";\n'\
' if(total[2] > 0)\n'\
' tR = " <t2>"+prefix+" Resume:</t2><t0> "+total[2].toFixed(3)+" ms<t2>)</t2></t0>";\n'\
' var s = title.indexOf("{");\n'\
' var e = title.indexOf("}");\n'\
' if((s >= 0) && (e >= 0))\n'\
' driver = title.slice(s+1, e) + " <t1>@</t1> ";\n'\
' if(total[1] > 0 && total[2] > 0)\n'\
' devtitle.innerHTML = "<t0>"+driver+name+"</t0> "+tS+tR;\n'\
' else\n'\
' devtitle.innerHTML = "<t0>"+title+"</t0>";\n'\
' return name;\n'\
' }\n'\
' function deviceDetail() {\n'\
' var devinfo = document.getElementById("devicedetail");\n'\
' devinfo.style.display = "block";\n'\
' var name = deviceName(this.title);\n'\
' var cpu = -1;\n'\
' if(name.match("CPU_ON\[[0-9]*\]"))\n'\
' cpu = parseInt(name.slice(7));\n'\
' else if(name.match("CPU_OFF\[[0-9]*\]"))\n'\
' cpu = parseInt(name.slice(8));\n'\
' var dmesg = document.getElementById("dmesg");\n'\
' var dev = dmesg.getElementsByClassName("thread");\n'\
' var idlist = [];\n'\
' var pdata = [[]];\n'\
' if(document.getElementById("devicedetail1"))\n'\
' pdata = [[], []];\n'\
' var pd = pdata[0];\n'\
' var total = [0.0, 0.0, 0.0];\n'\
' for (var i = 0; i < dev.length; i++) {\n'\
' dname = deviceName(dev[i].title);\n'\
' if((cpu >= 0 && dname.match("CPU_O[NF]*\\\[*"+cpu+"\\\]")) ||\n'\
' (name == dname))\n'\
' {\n'\
' idlist[idlist.length] = dev[i].id;\n'\
' var tidx = 1;\n'\
' if(dev[i].id[0] == "a") {\n'\
' pd = pdata[0];\n'\
' } else {\n'\
' if(pdata.length == 1) pdata[1] = [];\n'\
' if(total.length == 3) total[3]=total[4]=0.0;\n'\
' pd = pdata[1];\n'\
' tidx = 3;\n'\
' }\n'\
' var info = dev[i].title.split(" ");\n'\
' var pname = info[info.length-1];\n'\
' pd[pname] = parseFloat(info[info.length-3].slice(1));\n'\
' total[0] += pd[pname];\n'\
' if(pname.indexOf("suspend") >= 0)\n'\
' total[tidx] += pd[pname];\n'\
' else\n'\
' total[tidx+1] += pd[pname];\n'\
' }\n'\
' }\n'\
' var devname = deviceTitle(this.title, total, cpu);\n'\
' var left = 0.0;\n'\
' for (var t = 0; t < pdata.length; t++) {\n'\
' pd = pdata[t];\n'\
' devinfo = document.getElementById("devicedetail"+t);\n'\
' var phases = devinfo.getElementsByClassName("phaselet");\n'\
' for (var i = 0; i < phases.length; i++) {\n'\
' if(phases[i].id in pd) {\n'\
' var w = 100.0*pd[phases[i].id]/total[0];\n'\
' var fs = 32;\n'\
' if(w < 8) fs = 4*w | 0;\n'\
' var fs2 = fs*3/4;\n'\
' phases[i].style.width = w+"%";\n'\
' phases[i].style.left = left+"%";\n'\
' phases[i].title = phases[i].id+" "+pd[phases[i].id]+" ms";\n'\
' left += w;\n'\
' var time = "<t4 style=\\"font-size:"+fs+"px\\">"+pd[phases[i].id]+" ms<br></t4>";\n'\
' var pname = "<t3 style=\\"font-size:"+fs2+"px\\">"+phases[i].id.replace(new RegExp("_", "g"), " ")+"</t3>";\n'\
' phases[i].innerHTML = time+pname;\n'\
' } else {\n'\
' phases[i].style.width = "0%";\n'\
' phases[i].style.left = left+"%";\n'\
' }\n'\
' }\n'\
' }\n'\
' if(typeof devstats !== \'undefined\')\n'\
' callDetail(this.id, this.title);\n'\
' var cglist = document.getElementById("callgraphs");\n'\
' if(!cglist) return;\n'\
' var cg = cglist.getElementsByClassName("atop");\n'\
' if(cg.length < 10) return;\n'\
' for (var i = 0; i < cg.length; i++) {\n'\
' cgid = cg[i].id.split("x")[0]\n'\
' if(idlist.indexOf(cgid) >= 0) {\n'\
' cg[i].style.display = "block";\n'\
' } else {\n'\
' cg[i].style.display = "none";\n'\
' }\n'\
' }\n'\
' }\n'\
' function callDetail(devid, devtitle) {\n'\
' if(!(devid in devstats) || devstats[devid].length < 1)\n'\
' return;\n'\
' var list = devstats[devid];\n'\
' var tmp = devtitle.split(" ");\n'\
' var name = tmp[0], phase = tmp[tmp.length-1];\n'\
' var dd = document.getElementById(phase);\n'\
' var total = parseFloat(tmp[1].slice(1));\n'\
' var mlist = [];\n'\
' var maxlen = 0;\n'\
' var info = []\n'\
' for(var i in list) {\n'\
' if(list[i][0] == "@") {\n'\
' info = list[i].split("|");\n'\
' continue;\n'\
' }\n'\
' var tmp = list[i].split("|");\n'\
' var t = parseFloat(tmp[0]), f = tmp[1], c = parseInt(tmp[2]);\n'\
' var p = (t*100.0/total).toFixed(2);\n'\
' mlist[mlist.length] = [f, c, t.toFixed(2), p+"%"];\n'\
' if(f.length > maxlen)\n'\
' maxlen = f.length;\n'\
' }\n'\
' var pad = 5;\n'\
' if(mlist.length == 0) pad = 30;\n'\
' var html = \'<div style="padding-top:\'+pad+\'px"><t3> <b>\'+name+\':</b>\';\n'\
' if(info.length > 2)\n'\
' html += " start=<b>"+info[1]+"</b>, end=<b>"+info[2]+"</b>";\n'\
' if(info.length > 3)\n'\
' html += ", length<i>(w/o overhead)</i>=<b>"+info[3]+" ms</b>";\n'\
' if(info.length > 4)\n'\
' html += ", return=<b>"+info[4]+"</b>";\n'\
' html += "</t3></div>";\n'\
' if(mlist.length > 0) {\n'\
' html += \'<table class=fstat style="padding-top:\'+(maxlen*5)+\'px;"><tr><th>Function</th>\';\n'\
' for(var i in mlist)\n'\
' html += "<td class=vt>"+mlist[i][0]+"</td>";\n'\
' html += "</tr><tr><th>Calls</th>";\n'\
' for(var i in mlist)\n'\
' html += "<td>"+mlist[i][1]+"</td>";\n'\
' html += "</tr><tr><th>Time(ms)</th>";\n'\
' for(var i in mlist)\n'\
' html += "<td>"+mlist[i][2]+"</td>";\n'\
' html += "</tr><tr><th>Percent</th>";\n'\
' for(var i in mlist)\n'\
' html += "<td>"+mlist[i][3]+"</td>";\n'\
' html += "</tr></table>";\n'\
' }\n'\
' dd.innerHTML = html;\n'\
' var height = (maxlen*5)+100;\n'\
' dd.style.height = height+"px";\n'\
' document.getElementById("devicedetail").style.height = height+"px";\n'\
' }\n'\
' function callSelect() {\n'\
' var cglist = document.getElementById("callgraphs");\n'\
' if(!cglist) return;\n'\
' var cg = cglist.getElementsByClassName("atop");\n'\
' for (var i = 0; i < cg.length; i++) {\n'\
' if(this.id == cg[i].id) {\n'\
' cg[i].style.display = "block";\n'\
' } else {\n'\
' cg[i].style.display = "none";\n'\
' }\n'\
' }\n'\
' }\n'\
' function devListWindow(e) {\n'\
' var win = window.open();\n'\
' var html = "<title>"+e.target.innerHTML+"</title>"+\n'\
' "<style type=\\"text/css\\">"+\n'\
' " ul {list-style-type:circle;padding-left:10px;margin-left:10px;}"+\n'\
' "</style>"\n'\
' var dt = devtable[0];\n'\
' if(e.target.id != "devlist1")\n'\
' dt = devtable[1];\n'\
' win.document.write(html+dt);\n'\
' }\n'\
' function errWindow() {\n'\
' var range = this.id.split("_");\n'\
' var idx1 = parseInt(range[0]);\n'\
' var idx2 = parseInt(range[1]);\n'\
' var win = window.open();\n'\
' var log = document.getElementById("dmesglog");\n'\
' var title = "<title>dmesg log</title>";\n'\
' var text = log.innerHTML.split("\\n");\n'\
' var html = "";\n'\
' for(var i = 0; i < text.length; i++) {\n'\
' if(i == idx1) {\n'\
' html += "<e id=target>"+text[i]+"</e>\\n";\n'\
' } else if(i > idx1 && i <= idx2) {\n'\
' html += "<e>"+text[i]+"</e>\\n";\n'\
' } else {\n'\
' html += text[i]+"\\n";\n'\
' }\n'\
' }\n'\
' win.document.write("<style>e{color:red}</style>"+title+"<pre>"+html+"</pre>");\n'\
' win.location.hash = "#target";\n'\
' win.document.close();\n'\
' }\n'\
' function logWindow(e) {\n'\
' var name = e.target.id.slice(4);\n'\
' var win = window.open();\n'\
' var log = document.getElementById(name+"log");\n'\
' var title = "<title>"+document.title.split(" ")[0]+" "+name+" log</title>";\n'\
' win.document.write(title+"<pre>"+log.innerHTML+"</pre>");\n'\
' win.document.close();\n'\
' }\n'\
' function onMouseDown(e) {\n'\
' dragval[0] = e.clientX;\n'\
' dragval[1] = document.getElementById("dmesgzoombox").scrollLeft;\n'\
' document.onmousemove = onMouseMove;\n'\
' }\n'\
' function onMouseMove(e) {\n'\
' var zoombox = document.getElementById("dmesgzoombox");\n'\
' zoombox.scrollLeft = dragval[1] + dragval[0] - e.clientX;\n'\
' }\n'\
' function onMouseUp(e) {\n'\
' document.onmousemove = null;\n'\
' }\n'\
' function onKeyPress(e) {\n'\
' var c = e.charCode;\n'\
' if(c != 42 && c != 43 && c != 45) return;\n'\
' var click = document.createEvent("Events");\n'\
' click.initEvent("click", true, false);\n'\
' if(c == 43) \n'\
' document.getElementById("zoomin").dispatchEvent(click);\n'\
' else if(c == 45)\n'\
' document.getElementById("zoomout").dispatchEvent(click);\n'\
' else if(c == 42)\n'\
' document.getElementById("zoomdef").dispatchEvent(click);\n'\
' }\n'\
' window.addEventListener("resize", function () {zoomTimeline();});\n'\
' window.addEventListener("load", function () {\n'\
' var dmesg = document.getElementById("dmesg");\n'\
' dmesg.style.width = "100%"\n'\
' dmesg.onmousedown = onMouseDown;\n'\
' document.onmouseup = onMouseUp;\n'\
' document.onkeypress = onKeyPress;\n'\
' document.getElementById("zoomin").onclick = zoomTimeline;\n'\
' document.getElementById("zoomout").onclick = zoomTimeline;\n'\
' document.getElementById("zoomdef").onclick = zoomTimeline;\n'\
' var list = document.getElementsByClassName("err");\n'\
' for (var i = 0; i < list.length; i++)\n'\
' list[i].onclick = errWindow;\n'\
' var list = document.getElementsByClassName("logbtn");\n'\
' for (var i = 0; i < list.length; i++)\n'\
' list[i].onclick = logWindow;\n'\
' list = document.getElementsByClassName("devlist");\n'\
' for (var i = 0; i < list.length; i++)\n'\
' list[i].onclick = devListWindow;\n'\
' var dev = dmesg.getElementsByClassName("thread");\n'\
' for (var i = 0; i < dev.length; i++) {\n'\
' dev[i].onclick = deviceDetail;\n'\
' dev[i].onmouseover = deviceHover;\n'\
' dev[i].onmouseout = deviceUnhover;\n'\
' }\n'\
' var dev = dmesg.getElementsByClassName("srccall");\n'\
' for (var i = 0; i < dev.length; i++)\n'\
' dev[i].onclick = callSelect;\n'\
' zoomTimeline();\n'\
' });\n'\
'</script>\n'
hf.write(script_code);
# Function: executeSuspend
# Description:
# Execute system suspend through the sysfs interface, then copy the output
# dmesg and ftrace files to the test output directory.
def executeSuspend(quiet=False):
sv, tp, pm = sysvals, sysvals.tpath, ProcessMonitor()
if sv.wifi:
wifi = sv.checkWifi()
sv.dlog('wifi check, connected device is "%s"' % wifi)
testdata = []
# run these commands to prepare the system for suspend
if sv.display:
if not quiet:
pprint('SET DISPLAY TO %s' % sv.display.upper())
ret = sv.displayControl(sv.display)
sv.dlog('xset display %s, ret = %d' % (sv.display, ret))
time.sleep(1)
if sv.sync:
if not quiet:
pprint('SYNCING FILESYSTEMS')
sv.dlog('syncing filesystems')
call('sync', shell=True)
sv.dlog('read dmesg')
sv.initdmesg()
# start ftrace
if(sv.usecallgraph or sv.usetraceevents):
if not quiet:
pprint('START TRACING')
sv.dlog('start ftrace tracing')
sv.fsetVal('1', 'tracing_on')
if sv.useprocmon:
sv.dlog('start the process monitor')
pm.start()
sv.dlog('run the cmdinfo list before')
sv.cmdinfo(True)
# execute however many s/r runs requested
for count in range(1,sv.execcount+1):
# x2delay in between test runs
if(count > 1 and sv.x2delay > 0):
sv.fsetVal('WAIT %d' % sv.x2delay, 'trace_marker')
time.sleep(sv.x2delay/1000.0)
sv.fsetVal('WAIT END', 'trace_marker')
# start message
if sv.testcommand != '':
pprint('COMMAND START')
else:
if(sv.rtcwake):
pprint('SUSPEND START')
else:
pprint('SUSPEND START (press a key to resume)')
# set rtcwake
if(sv.rtcwake):
if not quiet:
pprint('will issue an rtcwake in %d seconds' % sv.rtcwaketime)
sv.dlog('enable RTC wake alarm')
sv.rtcWakeAlarmOn()
# start of suspend trace marker
if(sv.usecallgraph or sv.usetraceevents):
sv.fsetVal(datetime.now().strftime(sv.tmstart), 'trace_marker')
# predelay delay
if(count == 1 and sv.predelay > 0):
sv.fsetVal('WAIT %d' % sv.predelay, 'trace_marker')
time.sleep(sv.predelay/1000.0)
sv.fsetVal('WAIT END', 'trace_marker')
# initiate suspend or command
sv.dlog('system executing a suspend')
tdata = {'error': ''}
if sv.testcommand != '':
res = call(sv.testcommand+' 2>&1', shell=True);
if res != 0:
tdata['error'] = 'cmd returned %d' % res
else:
mode = sv.suspendmode
if sv.memmode and os.path.exists(sv.mempowerfile):
mode = 'mem'
sv.testVal(sv.mempowerfile, 'radio', sv.memmode)
if sv.diskmode and os.path.exists(sv.diskpowerfile):
mode = 'disk'
sv.testVal(sv.diskpowerfile, 'radio', sv.diskmode)
if sv.acpidebug:
sv.testVal(sv.acpipath, 'acpi', '0xe')
if mode == 'freeze' and sv.haveTurbostat():
# execution will pause here
turbo = sv.turbostat()
if turbo:
tdata['turbo'] = turbo
else:
pf = open(sv.powerfile, 'w')
pf.write(mode)
# execution will pause here
try:
pf.close()
except Exception as e:
tdata['error'] = str(e)
sv.dlog('system returned from resume')
# reset everything
sv.testVal('restoreall')
if(sv.rtcwake):
sv.dlog('disable RTC wake alarm')
sv.rtcWakeAlarmOff()
# postdelay delay
if(count == sv.execcount and sv.postdelay > 0):
sv.fsetVal('WAIT %d' % sv.postdelay, 'trace_marker')
time.sleep(sv.postdelay/1000.0)
sv.fsetVal('WAIT END', 'trace_marker')
# return from suspend
pprint('RESUME COMPLETE')
if(sv.usecallgraph or sv.usetraceevents):
sv.fsetVal(datetime.now().strftime(sv.tmend), 'trace_marker')
if sv.wifi and wifi:
tdata['wifi'] = sv.pollWifi(wifi)
sv.dlog('wifi check, %s' % tdata['wifi'])
if(sv.suspendmode == 'mem' or sv.suspendmode == 'command'):
sv.dlog('read the ACPI FPDT')
tdata['fw'] = getFPDT(False)
testdata.append(tdata)
sv.dlog('run the cmdinfo list after')
cmdafter = sv.cmdinfo(False)
# stop ftrace
if(sv.usecallgraph or sv.usetraceevents):
if sv.useprocmon:
sv.dlog('stop the process monitor')
pm.stop()
sv.fsetVal('0', 'tracing_on')
# grab a copy of the dmesg output
if not quiet:
pprint('CAPTURING DMESG')
sysvals.dlog('EXECUTION TRACE END')
sv.getdmesg(testdata)
# grab a copy of the ftrace output
if(sv.usecallgraph or sv.usetraceevents):
if not quiet:
pprint('CAPTURING TRACE')
op = sv.writeDatafileHeader(sv.ftracefile, testdata)
fp = open(tp+'trace', 'r')
for line in fp:
op.write(line)
op.close()
sv.fsetVal('', 'trace')
sv.platforminfo(cmdafter)
def readFile(file):
if os.path.islink(file):
return os.readlink(file).split('/')[-1]
else:
return sysvals.getVal(file).strip()
# Function: ms2nice
# Description:
# Print out a very concise time string in minutes and seconds
# Output:
# The time string, e.g. "1901m16s"
def ms2nice(val):
val = int(val)
h = val // 3600000
m = (val // 60000) % 60
s = (val // 1000) % 60
if h > 0:
return '%d:%02d:%02d' % (h, m, s)
if m > 0:
return '%02d:%02d' % (m, s)
return '%ds' % s
def yesno(val):
list = {'enabled':'A', 'disabled':'S', 'auto':'E', 'on':'D',
'active':'A', 'suspended':'S', 'suspending':'S'}
if val not in list:
return ' '
return list[val]
# Function: deviceInfo
# Description:
# Detect all the USB hosts and devices currently connected and add
# a list of USB device names to sysvals for better timeline readability
def deviceInfo(output=''):
if not output:
pprint('LEGEND\n'\
'---------------------------------------------------------------------------------------------\n'\
' A = async/sync PM queue (A/S) C = runtime active children\n'\
' R = runtime suspend enabled/disabled (E/D) rACTIVE = runtime active (min/sec)\n'\
' S = runtime status active/suspended (A/S) rSUSPEND = runtime suspend (min/sec)\n'\
' U = runtime usage count\n'\
'---------------------------------------------------------------------------------------------\n'\
'DEVICE NAME A R S U C rACTIVE rSUSPEND\n'\
'---------------------------------------------------------------------------------------------')
res = []
tgtval = 'runtime_status'
lines = dict()
for dirname, dirnames, filenames in os.walk('/sys/devices'):
if(not re.match('.*/power', dirname) or
'control' not in filenames or
tgtval not in filenames):
continue
name = ''
dirname = dirname[:-6]
device = dirname.split('/')[-1]
power = dict()
power[tgtval] = readFile('%s/power/%s' % (dirname, tgtval))
# only list devices which support runtime suspend
if power[tgtval] not in ['active', 'suspended', 'suspending']:
continue
for i in ['product', 'driver', 'subsystem']:
file = '%s/%s' % (dirname, i)
if os.path.exists(file):
name = readFile(file)
break
for i in ['async', 'control', 'runtime_status', 'runtime_usage',
'runtime_active_kids', 'runtime_active_time',
'runtime_suspended_time']:
if i in filenames:
power[i] = readFile('%s/power/%s' % (dirname, i))
if output:
if power['control'] == output:
res.append('%s/power/control' % dirname)
continue
lines[dirname] = '%-26s %-26s %1s %1s %1s %1s %1s %10s %10s' % \
(device[:26], name[:26],
yesno(power['async']), \
yesno(power['control']), \
yesno(power['runtime_status']), \
power['runtime_usage'], \
power['runtime_active_kids'], \
ms2nice(power['runtime_active_time']), \
ms2nice(power['runtime_suspended_time']))
for i in sorted(lines):
print(lines[i])
return res
# Function: getModes
# Description:
# Determine the supported power modes on this system
# Output:
# A string list of the available modes
def getModes():
modes = []
if(os.path.exists(sysvals.powerfile)):
fp = open(sysvals.powerfile, 'r')
modes = fp.read().split()
fp.close()
if(os.path.exists(sysvals.mempowerfile)):
deep = False
fp = open(sysvals.mempowerfile, 'r')
for m in fp.read().split():
memmode = m.strip('[]')
if memmode == 'deep':
deep = True
else:
modes.append('mem-%s' % memmode)
fp.close()
if 'mem' in modes and not deep:
modes.remove('mem')
if('disk' in modes and os.path.exists(sysvals.diskpowerfile)):
fp = open(sysvals.diskpowerfile, 'r')
for m in fp.read().split():
modes.append('disk-%s' % m.strip('[]'))
fp.close()
return modes
# Function: dmidecode
# Description:
# Read the bios tables and pull out system info
# Arguments:
# mempath: /dev/mem or custom mem path
# fatal: True to exit on error, False to return empty dict
# Output:
# A dict object with all available key/values
def dmidecode(mempath, fatal=False):
out = dict()
# the list of values to retrieve, with hardcoded (type, idx)
info = {
'bios-vendor': (0, 4),
'bios-version': (0, 5),
'bios-release-date': (0, 8),
'system-manufacturer': (1, 4),
'system-product-name': (1, 5),
'system-version': (1, 6),
'system-serial-number': (1, 7),
'baseboard-manufacturer': (2, 4),
'baseboard-product-name': (2, 5),
'baseboard-version': (2, 6),
'baseboard-serial-number': (2, 7),
'chassis-manufacturer': (3, 4),
'chassis-type': (3, 5),
'chassis-version': (3, 6),
'chassis-serial-number': (3, 7),
'processor-manufacturer': (4, 7),
'processor-version': (4, 16),
}
if(not os.path.exists(mempath)):
if(fatal):
doError('file does not exist: %s' % mempath)
return out
if(not os.access(mempath, os.R_OK)):
if(fatal):
doError('file is not readable: %s' % mempath)
return out
# by default use legacy scan, but try to use EFI first
memaddr = 0xf0000
memsize = 0x10000
for ep in ['/sys/firmware/efi/systab', '/proc/efi/systab']:
if not os.path.exists(ep) or not os.access(ep, os.R_OK):
continue
fp = open(ep, 'r')
buf = fp.read()
fp.close()
i = buf.find('SMBIOS=')
if i >= 0:
try:
memaddr = int(buf[i+7:], 16)
memsize = 0x20
except:
continue
# read in the memory for scanning
try:
fp = open(mempath, 'rb')
fp.seek(memaddr)
buf = fp.read(memsize)
except:
if(fatal):
doError('DMI table is unreachable, sorry')
else:
pprint('WARNING: /dev/mem is not readable, ignoring DMI data')
return out
fp.close()
# search for either an SM table or DMI table
i = base = length = num = 0
while(i < memsize):
if buf[i:i+4] == b'_SM_' and i < memsize - 16:
length = struct.unpack('H', buf[i+22:i+24])[0]
base, num = struct.unpack('IH', buf[i+24:i+30])
break
elif buf[i:i+5] == b'_DMI_':
length = struct.unpack('H', buf[i+6:i+8])[0]
base, num = struct.unpack('IH', buf[i+8:i+14])
break
i += 16
if base == 0 and length == 0 and num == 0:
if(fatal):
doError('Neither SMBIOS nor DMI were found')
else:
return out
# read in the SM or DMI table
try:
fp = open(mempath, 'rb')
fp.seek(base)
buf = fp.read(length)
except:
if(fatal):
doError('DMI table is unreachable, sorry')
else:
pprint('WARNING: /dev/mem is not readable, ignoring DMI data')
return out
fp.close()
# scan the table for the values we want
count = i = 0
while(count < num and i <= len(buf) - 4):
type, size, handle = struct.unpack('BBH', buf[i:i+4])
n = i + size
while n < len(buf) - 1:
if 0 == struct.unpack('H', buf[n:n+2])[0]:
break
n += 1
data = buf[i+size:n+2].split(b'\0')
for name in info:
itype, idxadr = info[name]
if itype == type:
idx = struct.unpack('B', buf[i+idxadr:i+idxadr+1])[0]
if idx > 0 and idx < len(data) - 1:
s = data[idx-1].decode('utf-8')
if s.strip() and s.strip().lower() != 'to be filled by o.e.m.':
out[name] = s
i = n + 2
count += 1
return out
# Function: getFPDT
# Description:
# Read the acpi bios tables and pull out FPDT, the firmware data
# Arguments:
# output: True to output the info to stdout, False otherwise
def getFPDT(output):
rectype = {}
rectype[0] = 'Firmware Basic Boot Performance Record'
rectype[1] = 'S3 Performance Table Record'
prectype = {}
prectype[0] = 'Basic S3 Resume Performance Record'
prectype[1] = 'Basic S3 Suspend Performance Record'
sysvals.rootCheck(True)
if(not os.path.exists(sysvals.fpdtpath)):
if(output):
doError('file does not exist: %s' % sysvals.fpdtpath)
return False
if(not os.access(sysvals.fpdtpath, os.R_OK)):
if(output):
doError('file is not readable: %s' % sysvals.fpdtpath)
return False
if(not os.path.exists(sysvals.mempath)):
if(output):
doError('file does not exist: %s' % sysvals.mempath)
return False
if(not os.access(sysvals.mempath, os.R_OK)):
if(output):
doError('file is not readable: %s' % sysvals.mempath)
return False
fp = open(sysvals.fpdtpath, 'rb')
buf = fp.read()
fp.close()
if(len(buf) < 36):
if(output):
doError('Invalid FPDT table data, should '+\
'be at least 36 bytes')
return False
table = struct.unpack('4sIBB6s8sI4sI', buf[0:36])
if(output):
pprint('\n'\
'Firmware Performance Data Table (%s)\n'\
' Signature : %s\n'\
' Table Length : %u\n'\
' Revision : %u\n'\
' Checksum : 0x%x\n'\
' OEM ID : %s\n'\
' OEM Table ID : %s\n'\
' OEM Revision : %u\n'\
' Creator ID : %s\n'\
' Creator Revision : 0x%x\n'\
'' % (ascii(table[0]), ascii(table[0]), table[1], table[2],
table[3], ascii(table[4]), ascii(table[5]), table[6],
ascii(table[7]), table[8]))
if(table[0] != b'FPDT'):
if(output):
doError('Invalid FPDT table')
return False
if(len(buf) <= 36):
return False
i = 0
fwData = [0, 0]
records = buf[36:]
try:
fp = open(sysvals.mempath, 'rb')
except:
pprint('WARNING: /dev/mem is not readable, ignoring the FPDT data')
return False
while(i < len(records)):
header = struct.unpack('HBB', records[i:i+4])
if(header[0] not in rectype):
i += header[1]
continue
if(header[1] != 16):
i += header[1]
continue
addr = struct.unpack('Q', records[i+8:i+16])[0]
try:
fp.seek(addr)
first = fp.read(8)
except:
if(output):
pprint('Bad address 0x%x in %s' % (addr, sysvals.mempath))
return [0, 0]
rechead = struct.unpack('4sI', first)
recdata = fp.read(rechead[1]-8)
if(rechead[0] == b'FBPT'):
record = struct.unpack('HBBIQQQQQ', recdata[:48])
if(output):
pprint('%s (%s)\n'\
' Reset END : %u ns\n'\
' OS Loader LoadImage Start : %u ns\n'\
' OS Loader StartImage Start : %u ns\n'\
' ExitBootServices Entry : %u ns\n'\
' ExitBootServices Exit : %u ns'\
'' % (rectype[header[0]], ascii(rechead[0]), record[4], record[5],
record[6], record[7], record[8]))
elif(rechead[0] == b'S3PT'):
if(output):
pprint('%s (%s)' % (rectype[header[0]], ascii(rechead[0])))
j = 0
while(j < len(recdata)):
prechead = struct.unpack('HBB', recdata[j:j+4])
if(prechead[0] not in prectype):
continue
if(prechead[0] == 0):
record = struct.unpack('IIQQ', recdata[j:j+prechead[1]])
fwData[1] = record[2]
if(output):
pprint(' %s\n'\
' Resume Count : %u\n'\
' FullResume : %u ns\n'\
' AverageResume : %u ns'\
'' % (prectype[prechead[0]], record[1],
record[2], record[3]))
elif(prechead[0] == 1):
record = struct.unpack('QQ', recdata[j+4:j+prechead[1]])
fwData[0] = record[1] - record[0]
if(output):
pprint(' %s\n'\
' SuspendStart : %u ns\n'\
' SuspendEnd : %u ns\n'\
' SuspendTime : %u ns'\
'' % (prectype[prechead[0]], record[0],
record[1], fwData[0]))
j += prechead[1]
if(output):
pprint('')
i += header[1]
fp.close()
return fwData
# Function: statusCheck
# Description:
# Verify that the requested command and options will work, and
# print the results to the terminal
# Output:
# True if the test will work, False if not
def statusCheck(probecheck=False):
status = ''
pprint('Checking this system (%s)...' % platform.node())
# check we have root access
res = sysvals.colorText('NO (No features of this tool will work!)')
if(sysvals.rootCheck(False)):
res = 'YES'
pprint(' have root access: %s' % res)
if(res != 'YES'):
pprint(' Try running this script with sudo')
return 'missing root access'
# check sysfs is mounted
res = sysvals.colorText('NO (No features of this tool will work!)')
if(os.path.exists(sysvals.powerfile)):
res = 'YES'
pprint(' is sysfs mounted: %s' % res)
if(res != 'YES'):
return 'sysfs is missing'
# check target mode is a valid mode
if sysvals.suspendmode != 'command':
res = sysvals.colorText('NO')
modes = getModes()
if(sysvals.suspendmode in modes):
res = 'YES'
else:
status = '%s mode is not supported' % sysvals.suspendmode
pprint(' is "%s" a valid power mode: %s' % (sysvals.suspendmode, res))
if(res == 'NO'):
pprint(' valid power modes are: %s' % modes)
pprint(' please choose one with -m')
# check if ftrace is available
res = sysvals.colorText('NO')
ftgood = sysvals.verifyFtrace()
if(ftgood):
res = 'YES'
elif(sysvals.usecallgraph):
status = 'ftrace is not properly supported'
pprint(' is ftrace supported: %s' % res)
# check if kprobes are available
if sysvals.usekprobes:
res = sysvals.colorText('NO')
sysvals.usekprobes = sysvals.verifyKprobes()
if(sysvals.usekprobes):
res = 'YES'
else:
sysvals.usedevsrc = False
pprint(' are kprobes supported: %s' % res)
# what data source are we using
res = 'DMESG'
if(ftgood):
sysvals.usetraceevents = True
for e in sysvals.traceevents:
if not os.path.exists(sysvals.epath+e):
sysvals.usetraceevents = False
if(sysvals.usetraceevents):
res = 'FTRACE (all trace events found)'
pprint(' timeline data source: %s' % res)
# check if rtcwake
res = sysvals.colorText('NO')
if(sysvals.rtcpath != ''):
res = 'YES'
elif(sysvals.rtcwake):
status = 'rtcwake is not properly supported'
pprint(' is rtcwake supported: %s' % res)
# check info commands
pprint(' optional commands this tool may use for info:')
no = sysvals.colorText('MISSING')
yes = sysvals.colorText('FOUND', 32)
for c in ['turbostat', 'mcelog', 'lspci', 'lsusb']:
if c == 'turbostat':
res = yes if sysvals.haveTurbostat() else no
else:
res = yes if sysvals.getExec(c) else no
pprint(' %s: %s' % (c, res))
if not probecheck:
return status
# verify kprobes
if sysvals.usekprobes:
for name in sysvals.tracefuncs:
sysvals.defaultKprobe(name, sysvals.tracefuncs[name])
if sysvals.usedevsrc:
for name in sysvals.dev_tracefuncs:
sysvals.defaultKprobe(name, sysvals.dev_tracefuncs[name])
sysvals.addKprobes(True)
return status
# Function: doError
# Description:
# generic error function for catastrphic failures
# Arguments:
# msg: the error message to print
# help: True if printHelp should be called after, False otherwise
def doError(msg, help=False):
if(help == True):
printHelp()
pprint('ERROR: %s\n' % msg)
sysvals.outputResult({'error':msg})
sys.exit(1)
# Function: getArgInt
# Description:
# pull out an integer argument from the command line with checks
def getArgInt(name, args, min, max, main=True):
if main:
try:
arg = next(args)
except:
doError(name+': no argument supplied', True)
else:
arg = args
try:
val = int(arg)
except:
doError(name+': non-integer value given', True)
if(val < min or val > max):
doError(name+': value should be between %d and %d' % (min, max), True)
return val
# Function: getArgFloat
# Description:
# pull out a float argument from the command line with checks
def getArgFloat(name, args, min, max, main=True):
if main:
try:
arg = next(args)
except:
doError(name+': no argument supplied', True)
else:
arg = args
try:
val = float(arg)
except:
doError(name+': non-numerical value given', True)
if(val < min or val > max):
doError(name+': value should be between %f and %f' % (min, max), True)
return val
def processData(live=False, quiet=False):
if not quiet:
pprint('PROCESSING: %s' % sysvals.htmlfile)
sysvals.vprint('usetraceevents=%s, usetracemarkers=%s, usekprobes=%s' % \
(sysvals.usetraceevents, sysvals.usetracemarkers, sysvals.usekprobes))
error = ''
if(sysvals.usetraceevents):
testruns, error = parseTraceLog(live)
if sysvals.dmesgfile:
for data in testruns:
data.extractErrorInfo()
else:
testruns = loadKernelLog()
for data in testruns:
parseKernelLog(data)
if(sysvals.ftracefile and (sysvals.usecallgraph or sysvals.usetraceevents)):
appendIncompleteTraceLog(testruns)
if not sysvals.stamp:
pprint('ERROR: data does not include the expected stamp')
return (testruns, {'error': 'timeline generation failed'})
shown = ['bios', 'biosdate', 'cpu', 'host', 'kernel', 'man', 'memfr',
'memsz', 'mode', 'numcpu', 'plat', 'time', 'wifi']
sysvals.vprint('System Info:')
for key in sorted(sysvals.stamp):
if key in shown:
sysvals.vprint(' %-8s : %s' % (key.upper(), sysvals.stamp[key]))
sysvals.vprint('Command:\n %s' % sysvals.cmdline)
for data in testruns:
if data.turbostat:
idx, s = 0, 'Turbostat:\n '
for val in data.turbostat.split('|'):
idx += len(val) + 1
if idx >= 80:
idx = 0
s += '\n '
s += val + ' '
sysvals.vprint(s)
data.printDetails()
if len(sysvals.platinfo) > 0:
sysvals.vprint('\nPlatform Info:')
for info in sysvals.platinfo:
sysvals.vprint('[%s - %s]' % (info[0], info[1]))
sysvals.vprint(info[2])
sysvals.vprint('')
if sysvals.cgdump:
for data in testruns:
data.debugPrint()
sys.exit(0)
if len(testruns) < 1:
pprint('ERROR: Not enough test data to build a timeline')
return (testruns, {'error': 'timeline generation failed'})
sysvals.vprint('Creating the html timeline (%s)...' % sysvals.htmlfile)
createHTML(testruns, error)
if not quiet:
pprint('DONE: %s' % sysvals.htmlfile)
data = testruns[0]
stamp = data.stamp
stamp['suspend'], stamp['resume'] = data.getTimeValues()
if data.fwValid:
stamp['fwsuspend'], stamp['fwresume'] = data.fwSuspend, data.fwResume
if error:
stamp['error'] = error
return (testruns, stamp)
# Function: rerunTest
# Description:
# generate an output from an existing set of ftrace/dmesg logs
def rerunTest(htmlfile=''):
if sysvals.ftracefile:
doesTraceLogHaveTraceEvents()
if not sysvals.dmesgfile and not sysvals.usetraceevents:
doError('recreating this html output requires a dmesg file')
if htmlfile:
sysvals.htmlfile = htmlfile
else:
sysvals.setOutputFile()
if os.path.exists(sysvals.htmlfile):
if not os.path.isfile(sysvals.htmlfile):
doError('a directory already exists with this name: %s' % sysvals.htmlfile)
elif not os.access(sysvals.htmlfile, os.W_OK):
doError('missing permission to write to %s' % sysvals.htmlfile)
testruns, stamp = processData()
sysvals.resetlog()
return stamp
# Function: runTest
# Description:
# execute a suspend/resume, gather the logs, and generate the output
def runTest(n=0, quiet=False):
# prepare for the test
sysvals.initTestOutput('suspend')
op = sysvals.writeDatafileHeader(sysvals.dmesgfile, [])
op.write('# EXECUTION TRACE START\n')
op.close()
if n <= 1:
if sysvals.rs != 0:
sysvals.dlog('%sabling runtime suspend' % ('en' if sysvals.rs > 0 else 'dis'))
sysvals.setRuntimeSuspend(True)
if sysvals.display:
ret = sysvals.displayControl('init')
sysvals.dlog('xset display init, ret = %d' % ret)
sysvals.dlog('initialize ftrace')
sysvals.initFtrace(quiet)
# execute the test
executeSuspend(quiet)
sysvals.cleanupFtrace()
if sysvals.skiphtml:
sysvals.outputResult({}, n)
sysvals.sudoUserchown(sysvals.testdir)
return
testruns, stamp = processData(True, quiet)
for data in testruns:
del data
sysvals.sudoUserchown(sysvals.testdir)
sysvals.outputResult(stamp, n)
if 'error' in stamp:
return 2
return 0
def find_in_html(html, start, end, firstonly=True):
cnt, out, list = len(html), [], []
if firstonly:
m = re.search(start, html)
if m:
list.append(m)
else:
list = re.finditer(start, html)
for match in list:
s = match.end()
e = cnt if (len(out) < 1 or s + 10000 > cnt) else s + 10000
m = re.search(end, html[s:e])
if not m:
break
e = s + m.start()
str = html[s:e]
if end == 'ms':
num = re.search(r'[-+]?\d*\.\d+|\d+', str)
str = num.group() if num else 'NaN'
if firstonly:
return str
out.append(str)
if firstonly:
return ''
return out
def data_from_html(file, outpath, issues, fulldetail=False):
html = open(file, 'r').read()
sysvals.htmlfile = os.path.relpath(file, outpath)
# extract general info
suspend = find_in_html(html, 'Kernel Suspend', 'ms')
resume = find_in_html(html, 'Kernel Resume', 'ms')
sysinfo = find_in_html(html, '<div class="stamp sysinfo">', '</div>')
line = find_in_html(html, '<div class="stamp">', '</div>')
stmp = line.split()
if not suspend or not resume or len(stmp) != 8:
return False
try:
dt = datetime.strptime(' '.join(stmp[3:]), '%B %d %Y, %I:%M:%S %p')
except:
return False
sysvals.hostname = stmp[0]
tstr = dt.strftime('%Y/%m/%d %H:%M:%S')
error = find_in_html(html, '<table class="testfail"><tr><td>', '</td>')
if error:
m = re.match('[a-z0-9]* failed in (?P<p>\S*).*', error)
if m:
result = 'fail in %s' % m.group('p')
else:
result = 'fail'
else:
result = 'pass'
# extract error info
tp, ilist = False, []
extra = dict()
log = find_in_html(html, '<div id="dmesglog" style="display:none;">',
'</div>').strip()
if log:
d = Data(0)
d.end = 999999999
d.dmesgtext = log.split('\n')
tp = d.extractErrorInfo()
for msg in tp.msglist:
sysvals.errorSummary(issues, msg)
if stmp[2] == 'freeze':
extra = d.turbostatInfo()
elist = dict()
for dir in d.errorinfo:
for err in d.errorinfo[dir]:
if err[0] not in elist:
elist[err[0]] = 0
elist[err[0]] += 1
for i in elist:
ilist.append('%sx%d' % (i, elist[i]) if elist[i] > 1 else i)
wifi = find_in_html(html, 'Wifi Resume: ', '</td>')
if wifi:
extra['wifi'] = wifi
low = find_in_html(html, 'freeze time: <b>', ' ms</b>')
for lowstr in ['waking', '+']:
if not low:
break
if lowstr not in low:
continue
if lowstr == '+':
issue = 'S2LOOPx%d' % len(low.split('+'))
else:
m = re.match('.*waking *(?P<n>[0-9]*) *times.*', low)
issue = 'S2WAKEx%s' % m.group('n') if m else 'S2WAKExNaN'
match = [i for i in issues if i['match'] == issue]
if len(match) > 0:
match[0]['count'] += 1
if sysvals.hostname not in match[0]['urls']:
match[0]['urls'][sysvals.hostname] = [sysvals.htmlfile]
elif sysvals.htmlfile not in match[0]['urls'][sysvals.hostname]:
match[0]['urls'][sysvals.hostname].append(sysvals.htmlfile)
else:
issues.append({
'match': issue, 'count': 1, 'line': issue,
'urls': {sysvals.hostname: [sysvals.htmlfile]},
})
ilist.append(issue)
# extract device info
devices = dict()
for line in html.split('\n'):
m = re.match(' *<div id=\"[a,0-9]*\" *title=\"(?P<title>.*)\" class=\"thread.*', line)
if not m or 'thread kth' in line or 'thread sec' in line:
continue
m = re.match('(?P<n>.*) \((?P<t>[0-9,\.]*) ms\) (?P<p>.*)', m.group('title'))
if not m:
continue
name, time, phase = m.group('n'), m.group('t'), m.group('p')
if ' async' in name or ' sync' in name:
name = ' '.join(name.split(' ')[:-1])
if phase.startswith('suspend'):
d = 'suspend'
elif phase.startswith('resume'):
d = 'resume'
else:
continue
if d not in devices:
devices[d] = dict()
if name not in devices[d]:
devices[d][name] = 0.0
devices[d][name] += float(time)
# create worst device info
worst = dict()
for d in ['suspend', 'resume']:
worst[d] = {'name':'', 'time': 0.0}
dev = devices[d] if d in devices else 0
if dev and len(dev.keys()) > 0:
n = sorted(dev, key=lambda k:(dev[k], k), reverse=True)[0]
worst[d]['name'], worst[d]['time'] = n, dev[n]
data = {
'mode': stmp[2],
'host': stmp[0],
'kernel': stmp[1],
'sysinfo': sysinfo,
'time': tstr,
'result': result,
'issues': ' '.join(ilist),
'suspend': suspend,
'resume': resume,
'devlist': devices,
'sus_worst': worst['suspend']['name'],
'sus_worsttime': worst['suspend']['time'],
'res_worst': worst['resume']['name'],
'res_worsttime': worst['resume']['time'],
'url': sysvals.htmlfile,
}
for key in extra:
data[key] = extra[key]
if fulldetail:
data['funclist'] = find_in_html(html, '<div title="', '" class="traceevent"', False)
if tp:
for arg in ['-multi ', '-info ']:
if arg in tp.cmdline:
data['target'] = tp.cmdline[tp.cmdline.find(arg):].split()[1]
break
return data
def genHtml(subdir, force=False):
for dirname, dirnames, filenames in os.walk(subdir):
sysvals.dmesgfile = sysvals.ftracefile = sysvals.htmlfile = ''
for filename in filenames:
file = os.path.join(dirname, filename)
if sysvals.usable(file):
if(re.match('.*_dmesg.txt', filename)):
sysvals.dmesgfile = file
elif(re.match('.*_ftrace.txt', filename)):
sysvals.ftracefile = file
sysvals.setOutputFile()
if (sysvals.dmesgfile or sysvals.ftracefile) and sysvals.htmlfile and \
(force or not sysvals.usable(sysvals.htmlfile)):
pprint('FTRACE: %s' % sysvals.ftracefile)
if sysvals.dmesgfile:
pprint('DMESG : %s' % sysvals.dmesgfile)
rerunTest()
# Function: runSummary
# Description:
# create a summary of tests in a sub-directory
def runSummary(subdir, local=True, genhtml=False):
inpath = os.path.abspath(subdir)
outpath = os.path.abspath('.') if local else inpath
pprint('Generating a summary of folder:\n %s' % inpath)
if genhtml:
genHtml(subdir)
target, issues, testruns = '', [], []
desc = {'host':[],'mode':[],'kernel':[]}
for dirname, dirnames, filenames in os.walk(subdir):
for filename in filenames:
if(not re.match('.*.html', filename)):
continue
data = data_from_html(os.path.join(dirname, filename), outpath, issues)
if(not data):
continue
if 'target' in data:
target = data['target']
testruns.append(data)
for key in desc:
if data[key] not in desc[key]:
desc[key].append(data[key])
pprint('Summary files:')
if len(desc['host']) == len(desc['mode']) == len(desc['kernel']) == 1:
title = '%s %s %s' % (desc['host'][0], desc['kernel'][0], desc['mode'][0])
if target:
title += ' %s' % target
else:
title = inpath
createHTMLSummarySimple(testruns, os.path.join(outpath, 'summary.html'), title)
pprint(' summary.html - tabular list of test data found')
createHTMLDeviceSummary(testruns, os.path.join(outpath, 'summary-devices.html'), title)
pprint(' summary-devices.html - kernel device list sorted by total execution time')
createHTMLIssuesSummary(testruns, issues, os.path.join(outpath, 'summary-issues.html'), title)
pprint(' summary-issues.html - kernel issues found sorted by frequency')
# Function: checkArgBool
# Description:
# check if a boolean string value is true or false
def checkArgBool(name, value):
if value in switchvalues:
if value in switchoff:
return False
return True
doError('invalid boolean --> (%s: %s), use "true/false" or "1/0"' % (name, value), True)
return False
# Function: configFromFile
# Description:
# Configure the script via the info in a config file
def configFromFile(file):
Config = configparser.ConfigParser()
Config.read(file)
sections = Config.sections()
overridekprobes = False
overridedevkprobes = False
if 'Settings' in sections:
for opt in Config.options('Settings'):
value = Config.get('Settings', opt).lower()
option = opt.lower()
if(option == 'verbose'):
sysvals.verbose = checkArgBool(option, value)
elif(option == 'addlogs'):
sysvals.dmesglog = sysvals.ftracelog = checkArgBool(option, value)
elif(option == 'dev'):
sysvals.usedevsrc = checkArgBool(option, value)
elif(option == 'proc'):
sysvals.useprocmon = checkArgBool(option, value)
elif(option == 'x2'):
if checkArgBool(option, value):
sysvals.execcount = 2
elif(option == 'callgraph'):
sysvals.usecallgraph = checkArgBool(option, value)
elif(option == 'override-timeline-functions'):
overridekprobes = checkArgBool(option, value)
elif(option == 'override-dev-timeline-functions'):
overridedevkprobes = checkArgBool(option, value)
elif(option == 'skiphtml'):
sysvals.skiphtml = checkArgBool(option, value)
elif(option == 'sync'):
sysvals.sync = checkArgBool(option, value)
elif(option == 'rs' or option == 'runtimesuspend'):
if value in switchvalues:
if value in switchoff:
sysvals.rs = -1
else:
sysvals.rs = 1
else:
doError('invalid value --> (%s: %s), use "enable/disable"' % (option, value), True)
elif(option == 'display'):
disopt = ['on', 'off', 'standby', 'suspend']
if value not in disopt:
doError('invalid value --> (%s: %s), use %s' % (option, value, disopt), True)
sysvals.display = value
elif(option == 'gzip'):
sysvals.gzip = checkArgBool(option, value)
elif(option == 'cgfilter'):
sysvals.setCallgraphFilter(value)
elif(option == 'cgskip'):
if value in switchoff:
sysvals.cgskip = ''
else:
sysvals.cgskip = sysvals.configFile(val)
if(not sysvals.cgskip):
doError('%s does not exist' % sysvals.cgskip)
elif(option == 'cgtest'):
sysvals.cgtest = getArgInt('cgtest', value, 0, 1, False)
elif(option == 'cgphase'):
d = Data(0)
if value not in d.phasedef:
doError('invalid phase --> (%s: %s), valid phases are %s'\
% (option, value, d.phasedef.keys()), True)
sysvals.cgphase = value
elif(option == 'fadd'):
file = sysvals.configFile(value)
if(not file):
doError('%s does not exist' % value)
sysvals.addFtraceFilterFunctions(file)
elif(option == 'result'):
sysvals.result = value
elif(option == 'multi'):
nums = value.split()
if len(nums) != 2:
doError('multi requires 2 integers (exec_count and delay)', True)
sysvals.multiinit(nums[0], nums[1])
elif(option == 'devicefilter'):
sysvals.setDeviceFilter(value)
elif(option == 'expandcg'):
sysvals.cgexp = checkArgBool(option, value)
elif(option == 'srgap'):
if checkArgBool(option, value):
sysvals.srgap = 5
elif(option == 'mode'):
sysvals.suspendmode = value
elif(option == 'command' or option == 'cmd'):
sysvals.testcommand = value
elif(option == 'x2delay'):
sysvals.x2delay = getArgInt('x2delay', value, 0, 60000, False)
elif(option == 'predelay'):
sysvals.predelay = getArgInt('predelay', value, 0, 60000, False)
elif(option == 'postdelay'):
sysvals.postdelay = getArgInt('postdelay', value, 0, 60000, False)
elif(option == 'maxdepth'):
sysvals.max_graph_depth = getArgInt('maxdepth', value, 0, 1000, False)
elif(option == 'rtcwake'):
if value in switchoff:
sysvals.rtcwake = False
else:
sysvals.rtcwake = True
sysvals.rtcwaketime = getArgInt('rtcwake', value, 0, 3600, False)
elif(option == 'timeprec'):
sysvals.setPrecision(getArgInt('timeprec', value, 0, 6, False))
elif(option == 'mindev'):
sysvals.mindevlen = getArgFloat('mindev', value, 0.0, 10000.0, False)
elif(option == 'callloop-maxgap'):
sysvals.callloopmaxgap = getArgFloat('callloop-maxgap', value, 0.0, 1.0, False)
elif(option == 'callloop-maxlen'):
sysvals.callloopmaxgap = getArgFloat('callloop-maxlen', value, 0.0, 1.0, False)
elif(option == 'mincg'):
sysvals.mincglen = getArgFloat('mincg', value, 0.0, 10000.0, False)
elif(option == 'bufsize'):
sysvals.bufsize = getArgInt('bufsize', value, 1, 1024*1024*8, False)
elif(option == 'output-dir'):
sysvals.outdir = sysvals.setOutputFolder(value)
if sysvals.suspendmode == 'command' and not sysvals.testcommand:
doError('No command supplied for mode "command"')
# compatibility errors
if sysvals.usedevsrc and sysvals.usecallgraph:
doError('-dev is not compatible with -f')
if sysvals.usecallgraph and sysvals.useprocmon:
doError('-proc is not compatible with -f')
if overridekprobes:
sysvals.tracefuncs = dict()
if overridedevkprobes:
sysvals.dev_tracefuncs = dict()
kprobes = dict()
kprobesec = 'dev_timeline_functions_'+platform.machine()
if kprobesec in sections:
for name in Config.options(kprobesec):
text = Config.get(kprobesec, name)
kprobes[name] = (text, True)
kprobesec = 'timeline_functions_'+platform.machine()
if kprobesec in sections:
for name in Config.options(kprobesec):
if name in kprobes:
doError('Duplicate timeline function found "%s"' % (name))
text = Config.get(kprobesec, name)
kprobes[name] = (text, False)
for name in kprobes:
function = name
format = name
color = ''
args = dict()
text, dev = kprobes[name]
data = text.split()
i = 0
for val in data:
# bracketted strings are special formatting, read them separately
if val[0] == '[' and val[-1] == ']':
for prop in val[1:-1].split(','):
p = prop.split('=')
if p[0] == 'color':
try:
color = int(p[1], 16)
color = '#'+p[1]
except:
color = p[1]
continue
# first real arg should be the format string
if i == 0:
format = val
# all other args are actual function args
else:
d = val.split('=')
args[d[0]] = d[1]
i += 1
if not function or not format:
doError('Invalid kprobe: %s' % name)
for arg in re.findall('{(?P<n>[a-z,A-Z,0-9]*)}', format):
if arg not in args:
doError('Kprobe "%s" is missing argument "%s"' % (name, arg))
if (dev and name in sysvals.dev_tracefuncs) or (not dev and name in sysvals.tracefuncs):
doError('Duplicate timeline function found "%s"' % (name))
kp = {
'name': name,
'func': function,
'format': format,
sysvals.archargs: args
}
if color:
kp['color'] = color
if dev:
sysvals.dev_tracefuncs[name] = kp
else:
sysvals.tracefuncs[name] = kp
# Function: printHelp
# Description:
# print out the help text
def printHelp():
pprint('\n%s v%s\n'\
'Usage: sudo sleepgraph <options> <commands>\n'\
'\n'\
'Description:\n'\
' This tool is designed to assist kernel and OS developers in optimizing\n'\
' their linux stack\'s suspend/resume time. Using a kernel image built\n'\
' with a few extra options enabled, the tool will execute a suspend and\n'\
' capture dmesg and ftrace data until resume is complete. This data is\n'\
' transformed into a device timeline and an optional callgraph to give\n'\
' a detailed view of which devices/subsystems are taking the most\n'\
' time in suspend/resume.\n'\
'\n'\
' If no specific command is given, the default behavior is to initiate\n'\
' a suspend/resume and capture the dmesg/ftrace output as an html timeline.\n'\
'\n'\
' Generates output files in subdirectory: suspend-yymmdd-HHMMSS\n'\
' HTML output: <hostname>_<mode>.html\n'\
' raw dmesg output: <hostname>_<mode>_dmesg.txt\n'\
' raw ftrace output: <hostname>_<mode>_ftrace.txt\n'\
'\n'\
'Options:\n'\
' -h Print this help text\n'\
' -v Print the current tool version\n'\
' -config fn Pull arguments and config options from file fn\n'\
' -verbose Print extra information during execution and analysis\n'\
' -m mode Mode to initiate for suspend (default: %s)\n'\
' -o name Overrides the output subdirectory name when running a new test\n'\
' default: suspend-{date}-{time}\n'\
' -rtcwake t Wakeup t seconds after suspend, set t to "off" to disable (default: 15)\n'\
' -addlogs Add the dmesg and ftrace logs to the html output\n'\
' -noturbostat Dont use turbostat in freeze mode (default: disabled)\n'\
' -srgap Add a visible gap in the timeline between sus/res (default: disabled)\n'\
' -skiphtml Run the test and capture the trace logs, but skip the timeline (default: disabled)\n'\
' -result fn Export a results table to a text file for parsing.\n'\
' -wifi If a wifi connection is available, check that it reconnects after resume.\n'\
' [testprep]\n'\
' -sync Sync the filesystems before starting the test\n'\
' -rs on/off Enable/disable runtime suspend for all devices, restore all after test\n'\
' -display m Change the display mode to m for the test (on/off/standby/suspend)\n'\
' [advanced]\n'\
' -gzip Gzip the trace and dmesg logs to save space\n'\
' -cmd {s} Run the timeline over a custom command, e.g. "sync -d"\n'\
' -proc Add usermode process info into the timeline (default: disabled)\n'\
' -dev Add kernel function calls and threads to the timeline (default: disabled)\n'\
' -x2 Run two suspend/resumes back to back (default: disabled)\n'\
' -x2delay t Include t ms delay between multiple test runs (default: 0 ms)\n'\
' -predelay t Include t ms delay before 1st suspend (default: 0 ms)\n'\
' -postdelay t Include t ms delay after last resume (default: 0 ms)\n'\
' -mindev ms Discard all device blocks shorter than ms milliseconds (e.g. 0.001 for us)\n'\
' -multi n d Execute <n> consecutive tests at <d> seconds intervals. If <n> is followed\n'\
' by a "d", "h", or "m" execute for <n> days, hours, or mins instead.\n'\
' The outputs will be created in a new subdirectory with a summary page.\n'\
' -maxfail n Abort a -multi run after n consecutive fails (default is 0 = never abort)\n'\
' [debug]\n'\
' -f Use ftrace to create device callgraphs (default: disabled)\n'\
' -ftop Use ftrace on the top level call: "%s" (default: disabled)\n'\
' -maxdepth N limit the callgraph data to N call levels (default: 0=all)\n'\
' -expandcg pre-expand the callgraph data in the html output (default: disabled)\n'\
' -fadd file Add functions to be graphed in the timeline from a list in a text file\n'\
' -filter "d1,d2,..." Filter out all but this comma-delimited list of device names\n'\
' -mincg ms Discard all callgraphs shorter than ms milliseconds (e.g. 0.001 for us)\n'\
' -cgphase P Only show callgraph data for phase P (e.g. suspend_late)\n'\
' -cgtest N Only show callgraph data for test N (e.g. 0 or 1 in an x2 run)\n'\
' -timeprec N Number of significant digits in timestamps (0:S, [3:ms], 6:us)\n'\
' -cgfilter S Filter the callgraph output in the timeline\n'\
' -cgskip file Callgraph functions to skip, off to disable (default: cgskip.txt)\n'\
' -bufsize N Set trace buffer size to N kilo-bytes (default: all of free memory)\n'\
' -devdump Print out all the raw device data for each phase\n'\
' -cgdump Print out all the raw callgraph data\n'\
'\n'\
'Other commands:\n'\
' -modes List available suspend modes\n'\
' -status Test to see if the system is enabled to run this tool\n'\
' -fpdt Print out the contents of the ACPI Firmware Performance Data Table\n'\
' -wificheck Print out wifi connection info\n'\
' -x<mode> Test xset by toggling the given mode (on/off/standby/suspend)\n'\
' -sysinfo Print out system info extracted from BIOS\n'\
' -devinfo Print out the pm settings of all devices which support runtime suspend\n'\
' -cmdinfo Print out all the platform info collected before and after suspend/resume\n'\
' -flist Print the list of functions currently being captured in ftrace\n'\
' -flistall Print all functions capable of being captured in ftrace\n'\
' -summary dir Create a summary of tests in this dir [-genhtml builds missing html]\n'\
' [redo]\n'\
' -ftrace ftracefile Create HTML output using ftrace input (used with -dmesg)\n'\
' -dmesg dmesgfile Create HTML output using dmesg (used with -ftrace)\n'\
'' % (sysvals.title, sysvals.version, sysvals.suspendmode, sysvals.ftopfunc))
return True
# ----------------- MAIN --------------------
# exec start (skipped if script is loaded as library)
if __name__ == '__main__':
genhtml = False
cmd = ''
simplecmds = ['-sysinfo', '-modes', '-fpdt', '-flist', '-flistall',
'-devinfo', '-status', '-xon', '-xoff', '-xstandby', '-xsuspend',
'-xinit', '-xreset', '-xstat', '-wificheck', '-cmdinfo']
if '-f' in sys.argv:
sysvals.cgskip = sysvals.configFile('cgskip.txt')
# loop through the command line arguments
args = iter(sys.argv[1:])
for arg in args:
if(arg == '-m'):
try:
val = next(args)
except:
doError('No mode supplied', True)
if val == 'command' and not sysvals.testcommand:
doError('No command supplied for mode "command"', True)
sysvals.suspendmode = val
elif(arg in simplecmds):
cmd = arg[1:]
elif(arg == '-h'):
printHelp()
sys.exit(0)
elif(arg == '-v'):
pprint("Version %s" % sysvals.version)
sys.exit(0)
elif(arg == '-x2'):
sysvals.execcount = 2
elif(arg == '-x2delay'):
sysvals.x2delay = getArgInt('-x2delay', args, 0, 60000)
elif(arg == '-predelay'):
sysvals.predelay = getArgInt('-predelay', args, 0, 60000)
elif(arg == '-postdelay'):
sysvals.postdelay = getArgInt('-postdelay', args, 0, 60000)
elif(arg == '-f'):
sysvals.usecallgraph = True
elif(arg == '-ftop'):
sysvals.usecallgraph = True
sysvals.ftop = True
sysvals.usekprobes = False
elif(arg == '-skiphtml'):
sysvals.skiphtml = True
elif(arg == '-cgdump'):
sysvals.cgdump = True
elif(arg == '-devdump'):
sysvals.devdump = True
elif(arg == '-genhtml'):
genhtml = True
elif(arg == '-addlogs'):
sysvals.dmesglog = sysvals.ftracelog = True
elif(arg == '-nologs'):
sysvals.dmesglog = sysvals.ftracelog = False
elif(arg == '-addlogdmesg'):
sysvals.dmesglog = True
elif(arg == '-addlogftrace'):
sysvals.ftracelog = True
elif(arg == '-noturbostat'):
sysvals.tstat = False
elif(arg == '-verbose'):
sysvals.verbose = True
elif(arg == '-proc'):
sysvals.useprocmon = True
elif(arg == '-dev'):
sysvals.usedevsrc = True
elif(arg == '-sync'):
sysvals.sync = True
elif(arg == '-wifi'):
sysvals.wifi = True
elif(arg == '-gzip'):
sysvals.gzip = True
elif(arg == '-info'):
try:
val = next(args)
except:
doError('-info requires one string argument', True)
elif(arg == '-desc'):
try:
val = next(args)
except:
doError('-desc requires one string argument', True)
elif(arg == '-rs'):
try:
val = next(args)
except:
doError('-rs requires "enable" or "disable"', True)
if val.lower() in switchvalues:
if val.lower() in switchoff:
sysvals.rs = -1
else:
sysvals.rs = 1
else:
doError('invalid option: %s, use "enable/disable" or "on/off"' % val, True)
elif(arg == '-display'):
try:
val = next(args)
except:
doError('-display requires an mode value', True)
disopt = ['on', 'off', 'standby', 'suspend']
if val.lower() not in disopt:
doError('valid display mode values are %s' % disopt, True)
sysvals.display = val.lower()
elif(arg == '-maxdepth'):
sysvals.max_graph_depth = getArgInt('-maxdepth', args, 0, 1000)
elif(arg == '-rtcwake'):
try:
val = next(args)
except:
doError('No rtcwake time supplied', True)
if val.lower() in switchoff:
sysvals.rtcwake = False
else:
sysvals.rtcwake = True
sysvals.rtcwaketime = getArgInt('-rtcwake', val, 0, 3600, False)
elif(arg == '-timeprec'):
sysvals.setPrecision(getArgInt('-timeprec', args, 0, 6))
elif(arg == '-mindev'):
sysvals.mindevlen = getArgFloat('-mindev', args, 0.0, 10000.0)
elif(arg == '-mincg'):
sysvals.mincglen = getArgFloat('-mincg', args, 0.0, 10000.0)
elif(arg == '-bufsize'):
sysvals.bufsize = getArgInt('-bufsize', args, 1, 1024*1024*8)
elif(arg == '-cgtest'):
sysvals.cgtest = getArgInt('-cgtest', args, 0, 1)
elif(arg == '-cgphase'):
try:
val = next(args)
except:
doError('No phase name supplied', True)
d = Data(0)
if val not in d.phasedef:
doError('invalid phase --> (%s: %s), valid phases are %s'\
% (arg, val, d.phasedef.keys()), True)
sysvals.cgphase = val
elif(arg == '-cgfilter'):
try:
val = next(args)
except:
doError('No callgraph functions supplied', True)
sysvals.setCallgraphFilter(val)
elif(arg == '-skipkprobe'):
try:
val = next(args)
except:
doError('No kprobe functions supplied', True)
sysvals.skipKprobes(val)
elif(arg == '-cgskip'):
try:
val = next(args)
except:
doError('No file supplied', True)
if val.lower() in switchoff:
sysvals.cgskip = ''
else:
sysvals.cgskip = sysvals.configFile(val)
if(not sysvals.cgskip):
doError('%s does not exist' % sysvals.cgskip)
elif(arg == '-callloop-maxgap'):
sysvals.callloopmaxgap = getArgFloat('-callloop-maxgap', args, 0.0, 1.0)
elif(arg == '-callloop-maxlen'):
sysvals.callloopmaxlen = getArgFloat('-callloop-maxlen', args, 0.0, 1.0)
elif(arg == '-cmd'):
try:
val = next(args)
except:
doError('No command string supplied', True)
sysvals.testcommand = val
sysvals.suspendmode = 'command'
elif(arg == '-expandcg'):
sysvals.cgexp = True
elif(arg == '-srgap'):
sysvals.srgap = 5
elif(arg == '-maxfail'):
sysvals.maxfail = getArgInt('-maxfail', args, 0, 1000000)
elif(arg == '-multi'):
try:
c, d = next(args), next(args)
except:
doError('-multi requires two values', True)
sysvals.multiinit(c, d)
elif(arg == '-o'):
try:
val = next(args)
except:
doError('No subdirectory name supplied', True)
sysvals.outdir = sysvals.setOutputFolder(val)
elif(arg == '-config'):
try:
val = next(args)
except:
doError('No text file supplied', True)
file = sysvals.configFile(val)
if(not file):
doError('%s does not exist' % val)
configFromFile(file)
elif(arg == '-fadd'):
try:
val = next(args)
except:
doError('No text file supplied', True)
file = sysvals.configFile(val)
if(not file):
doError('%s does not exist' % val)
sysvals.addFtraceFilterFunctions(file)
elif(arg == '-dmesg'):
try:
val = next(args)
except:
doError('No dmesg file supplied', True)
sysvals.notestrun = True
sysvals.dmesgfile = val
if(os.path.exists(sysvals.dmesgfile) == False):
doError('%s does not exist' % sysvals.dmesgfile)
elif(arg == '-ftrace'):
try:
val = next(args)
except:
doError('No ftrace file supplied', True)
sysvals.notestrun = True
sysvals.ftracefile = val
if(os.path.exists(sysvals.ftracefile) == False):
doError('%s does not exist' % sysvals.ftracefile)
elif(arg == '-summary'):
try:
val = next(args)
except:
doError('No directory supplied', True)
cmd = 'summary'
sysvals.outdir = val
sysvals.notestrun = True
if(os.path.isdir(val) == False):
doError('%s is not accessible' % val)
elif(arg == '-filter'):
try:
val = next(args)
except:
doError('No devnames supplied', True)
sysvals.setDeviceFilter(val)
elif(arg == '-result'):
try:
val = next(args)
except:
doError('No result file supplied', True)
sysvals.result = val
sysvals.signalHandlerInit()
else:
doError('Invalid argument: '+arg, True)
# compatibility errors
if(sysvals.usecallgraph and sysvals.usedevsrc):
doError('-dev is not compatible with -f')
if(sysvals.usecallgraph and sysvals.useprocmon):
doError('-proc is not compatible with -f')
if sysvals.usecallgraph and sysvals.cgskip:
sysvals.vprint('Using cgskip file: %s' % sysvals.cgskip)
sysvals.setCallgraphBlacklist(sysvals.cgskip)
# callgraph size cannot exceed device size
if sysvals.mincglen < sysvals.mindevlen:
sysvals.mincglen = sysvals.mindevlen
# remove existing buffers before calculating memory
if(sysvals.usecallgraph or sysvals.usedevsrc):
sysvals.fsetVal('16', 'buffer_size_kb')
sysvals.cpuInfo()
# just run a utility command and exit
if(cmd != ''):
ret = 0
if(cmd == 'status'):
if not statusCheck(True):
ret = 1
elif(cmd == 'fpdt'):
if not getFPDT(True):
ret = 1
elif(cmd == 'sysinfo'):
sysvals.printSystemInfo(True)
elif(cmd == 'devinfo'):
deviceInfo()
elif(cmd == 'modes'):
pprint(getModes())
elif(cmd == 'flist'):
sysvals.getFtraceFilterFunctions(True)
elif(cmd == 'flistall'):
sysvals.getFtraceFilterFunctions(False)
elif(cmd == 'summary'):
runSummary(sysvals.outdir, True, genhtml)
elif(cmd in ['xon', 'xoff', 'xstandby', 'xsuspend', 'xinit', 'xreset']):
sysvals.verbose = True
ret = sysvals.displayControl(cmd[1:])
elif(cmd == 'xstat'):
pprint('Display Status: %s' % sysvals.displayControl('stat').upper())
elif(cmd == 'wificheck'):
dev = sysvals.checkWifi()
if dev:
print('%s is connected' % sysvals.wifiDetails(dev))
else:
print('No wifi connection found')
elif(cmd == 'cmdinfo'):
for out in sysvals.cmdinfo(False, True):
print('[%s - %s]\n%s\n' % out)
sys.exit(ret)
# if instructed, re-analyze existing data files
if(sysvals.notestrun):
stamp = rerunTest(sysvals.outdir)
sysvals.outputResult(stamp)
sys.exit(0)
# verify that we can run a test
error = statusCheck()
if(error):
doError(error)
# extract mem/disk extra modes and convert
mode = sysvals.suspendmode
if mode.startswith('mem'):
memmode = mode.split('-', 1)[-1] if '-' in mode else 'deep'
if memmode == 'shallow':
mode = 'standby'
elif memmode == 's2idle':
mode = 'freeze'
else:
mode = 'mem'
sysvals.memmode = memmode
sysvals.suspendmode = mode
if mode.startswith('disk-'):
sysvals.diskmode = mode.split('-', 1)[-1]
sysvals.suspendmode = 'disk'
sysvals.systemInfo(dmidecode(sysvals.mempath))
failcnt, ret = 0, 0
if sysvals.multitest['run']:
# run multiple tests in a separate subdirectory
if not sysvals.outdir:
if 'time' in sysvals.multitest:
s = '-%dm' % sysvals.multitest['time']
else:
s = '-x%d' % sysvals.multitest['count']
sysvals.outdir = datetime.now().strftime('suspend-%y%m%d-%H%M%S'+s)
if not os.path.isdir(sysvals.outdir):
os.makedirs(sysvals.outdir)
sysvals.sudoUserchown(sysvals.outdir)
finish = datetime.now()
if 'time' in sysvals.multitest:
finish += timedelta(minutes=sysvals.multitest['time'])
for i in range(sysvals.multitest['count']):
sysvals.multistat(True, i, finish)
if i != 0 and sysvals.multitest['delay'] > 0:
pprint('Waiting %d seconds...' % (sysvals.multitest['delay']))
time.sleep(sysvals.multitest['delay'])
fmt = 'suspend-%y%m%d-%H%M%S'
sysvals.testdir = os.path.join(sysvals.outdir, datetime.now().strftime(fmt))
ret = runTest(i+1, True)
failcnt = 0 if not ret else failcnt + 1
if sysvals.maxfail > 0 and failcnt >= sysvals.maxfail:
pprint('Maximum fail count of %d reached, aborting multitest' % (sysvals.maxfail))
break
time.sleep(5)
sysvals.resetlog()
sysvals.multistat(False, i, finish)
if 'time' in sysvals.multitest and datetime.now() >= finish:
break
if not sysvals.skiphtml:
runSummary(sysvals.outdir, False, False)
sysvals.sudoUserchown(sysvals.outdir)
else:
if sysvals.outdir:
sysvals.testdir = sysvals.outdir
# run the test in the current directory
ret = runTest()
# reset to default values after testing
if sysvals.display:
sysvals.displayControl('reset')
if sysvals.rs != 0:
sysvals.setRuntimeSuspend(False)
sys.exit(ret)
|
nvtrust-main
|
infrastructure/linux/linux_source/tools/power/pm-graph/sleepgraph.py
|
#!/usr/bin/env python
# SPDX-License-Identifier: GPL-2.0-only
# -*- coding: utf-8 -*-
#
""" This utility can be used to debug and tune the performance of the
intel_pstate driver. This utility can be used in two ways:
- If there is Linux trace file with pstate_sample events enabled, then
this utility can parse the trace file and generate performance plots.
- If user has not specified a trace file as input via command line parameters,
then this utility enables and collects trace data for a user specified interval
and generates performance plots.
Prerequisites:
Python version 2.7.x or higher
gnuplot 5.0 or higher
gnuplot-py 1.8 or higher
(Most of the distributions have these required packages. They may be called
gnuplot-py, phython-gnuplot or phython3-gnuplot, gnuplot-nox, ... )
HWP (Hardware P-States are disabled)
Kernel config for Linux trace is enabled
see print_help(): for Usage and Output details
"""
from __future__ import print_function
from datetime import datetime
import subprocess
import os
import time
import re
import signal
import sys
import getopt
import Gnuplot
from numpy import *
from decimal import *
__author__ = "Srinivas Pandruvada"
__copyright__ = " Copyright (c) 2017, Intel Corporation. "
__license__ = "GPL version 2"
MAX_CPUS = 256
# Define the csv file columns
C_COMM = 18
C_GHZ = 17
C_ELAPSED = 16
C_SAMPLE = 15
C_DURATION = 14
C_LOAD = 13
C_BOOST = 12
C_FREQ = 11
C_TSC = 10
C_APERF = 9
C_MPERF = 8
C_TO = 7
C_FROM = 6
C_SCALED = 5
C_CORE = 4
C_USEC = 3
C_SEC = 2
C_CPU = 1
global sample_num, last_sec_cpu, last_usec_cpu, start_time, testname, trace_file
# 11 digits covers uptime to 115 days
getcontext().prec = 11
sample_num =0
last_sec_cpu = [0] * MAX_CPUS
last_usec_cpu = [0] * MAX_CPUS
def print_help(driver_name):
print('%s_tracer.py:'%driver_name)
print(' Usage:')
print(' If the trace file is available, then to simply parse and plot, use (sudo not required):')
print(' ./%s_tracer.py [-c cpus] -t <trace_file> -n <test_name>'%driver_name)
print(' Or')
print(' ./%s_tracer.py [--cpu cpus] ---trace_file <trace_file> --name <test_name>'%driver_name)
print(' To generate trace file, parse and plot, use (sudo required):')
print(' sudo ./%s_tracer.py [-c cpus] -i <interval> -n <test_name> -m <kbytes>'%driver_name)
print(' Or')
print(' sudo ./%s_tracer.py [--cpu cpus] --interval <interval> --name <test_name> --memory <kbytes>'%driver_name)
print(' Optional argument:')
print(' cpus: comma separated list of CPUs')
print(' kbytes: Kilo bytes of memory per CPU to allocate to the trace buffer. Default: 10240')
print(' Output:')
print(' If not already present, creates a "results/test_name" folder in the current working directory with:')
print(' cpu.csv - comma seperated values file with trace contents and some additional calculations.')
print(' cpu???.csv - comma seperated values file for CPU number ???.')
print(' *.png - a variety of PNG format plot files created from the trace contents and the additional calculations.')
print(' Notes:')
print(' Avoid the use of _ (underscore) in test names, because in gnuplot it is a subscript directive.')
print(' Maximum number of CPUs is {0:d}. If there are more the script will abort with an error.'.format(MAX_CPUS))
print(' Off-line CPUs cause the script to list some warnings, and create some empty files. Use the CPU mask feature for a clean run.')
print(' Empty y range warnings for autoscaled plots can occur and can be ignored.')
def plot_perf_busy_with_sample(cpu_index):
""" Plot method to per cpu information """
file_name = 'cpu{:0>3}.csv'.format(cpu_index)
if os.path.exists(file_name):
output_png = "cpu%03d_perf_busy_vs_samples.png" % cpu_index
g_plot = common_all_gnuplot_settings(output_png)
# autoscale this one, no set y1 range
g_plot('set y2range [0:200]')
g_plot('set y2tics 0, 10')
g_plot('set title "{} : cpu perf busy vs. sample : CPU {:0>3} : {:%F %H:%M}"'.format(testname, cpu_index, datetime.now()))
# Override common
g_plot('set xlabel "Samples"')
g_plot('set ylabel "P-State"')
g_plot('set y2label "Scaled Busy/performance/io-busy(%)"')
set_4_plot_linestyles(g_plot)
g_plot('plot "' + file_name + '" using {:d}:{:d} with linespoints linestyle 1 axis x1y2 title "performance",\\'.format(C_SAMPLE, C_CORE))
g_plot('"' + file_name + '" using {:d}:{:d} with linespoints linestyle 2 axis x1y2 title "scaled-busy",\\'.format(C_SAMPLE, C_SCALED))
g_plot('"' + file_name + '" using {:d}:{:d} with linespoints linestyle 3 axis x1y2 title "io-boost",\\'.format(C_SAMPLE, C_BOOST))
g_plot('"' + file_name + '" using {:d}:{:d} with linespoints linestyle 4 axis x1y1 title "P-State"'.format(C_SAMPLE, C_TO))
def plot_perf_busy(cpu_index):
""" Plot some per cpu information """
file_name = 'cpu{:0>3}.csv'.format(cpu_index)
if os.path.exists(file_name):
output_png = "cpu%03d_perf_busy.png" % cpu_index
g_plot = common_all_gnuplot_settings(output_png)
# autoscale this one, no set y1 range
g_plot('set y2range [0:200]')
g_plot('set y2tics 0, 10')
g_plot('set title "{} : perf busy : CPU {:0>3} : {:%F %H:%M}"'.format(testname, cpu_index, datetime.now()))
g_plot('set ylabel "P-State"')
g_plot('set y2label "Scaled Busy/performance/io-busy(%)"')
set_4_plot_linestyles(g_plot)
g_plot('plot "' + file_name + '" using {:d}:{:d} with linespoints linestyle 1 axis x1y2 title "performance",\\'.format(C_ELAPSED, C_CORE))
g_plot('"' + file_name + '" using {:d}:{:d} with linespoints linestyle 2 axis x1y2 title "scaled-busy",\\'.format(C_ELAPSED, C_SCALED))
g_plot('"' + file_name + '" using {:d}:{:d} with linespoints linestyle 3 axis x1y2 title "io-boost",\\'.format(C_ELAPSED, C_BOOST))
g_plot('"' + file_name + '" using {:d}:{:d} with linespoints linestyle 4 axis x1y1 title "P-State"'.format(C_ELAPSED, C_TO))
def plot_durations(cpu_index):
""" Plot per cpu durations """
file_name = 'cpu{:0>3}.csv'.format(cpu_index)
if os.path.exists(file_name):
output_png = "cpu%03d_durations.png" % cpu_index
g_plot = common_all_gnuplot_settings(output_png)
# autoscale this one, no set y range
g_plot('set title "{} : durations : CPU {:0>3} : {:%F %H:%M}"'.format(testname, cpu_index, datetime.now()))
g_plot('set ylabel "Timer Duration (MilliSeconds)"')
# override common
g_plot('set key off')
set_4_plot_linestyles(g_plot)
g_plot('plot "' + file_name + '" using {:d}:{:d} with linespoints linestyle 1 axis x1y1'.format(C_ELAPSED, C_DURATION))
def plot_loads(cpu_index):
""" Plot per cpu loads """
file_name = 'cpu{:0>3}.csv'.format(cpu_index)
if os.path.exists(file_name):
output_png = "cpu%03d_loads.png" % cpu_index
g_plot = common_all_gnuplot_settings(output_png)
g_plot('set yrange [0:100]')
g_plot('set ytics 0, 10')
g_plot('set title "{} : loads : CPU {:0>3} : {:%F %H:%M}"'.format(testname, cpu_index, datetime.now()))
g_plot('set ylabel "CPU load (percent)"')
# override common
g_plot('set key off')
set_4_plot_linestyles(g_plot)
g_plot('plot "' + file_name + '" using {:d}:{:d} with linespoints linestyle 1 axis x1y1'.format(C_ELAPSED, C_LOAD))
def plot_pstate_cpu_with_sample():
""" Plot all cpu information """
if os.path.exists('cpu.csv'):
output_png = 'all_cpu_pstates_vs_samples.png'
g_plot = common_all_gnuplot_settings(output_png)
# autoscale this one, no set y range
# override common
g_plot('set xlabel "Samples"')
g_plot('set ylabel "P-State"')
g_plot('set title "{} : cpu pstate vs. sample : {:%F %H:%M}"'.format(testname, datetime.now()))
title_list = subprocess.check_output('ls cpu???.csv | sed -e \'s/.csv//\'',shell=True).decode('utf-8').replace('\n', ' ')
plot_str = "plot for [i in title_list] i.'.csv' using {:d}:{:d} pt 7 ps 1 title i".format(C_SAMPLE, C_TO)
g_plot('title_list = "{}"'.format(title_list))
g_plot(plot_str)
def plot_pstate_cpu():
""" Plot all cpu information from csv files """
output_png = 'all_cpu_pstates.png'
g_plot = common_all_gnuplot_settings(output_png)
# autoscale this one, no set y range
g_plot('set ylabel "P-State"')
g_plot('set title "{} : cpu pstates : {:%F %H:%M}"'.format(testname, datetime.now()))
# the following command is really cool, but doesn't work with the CPU masking option because it aborts on the first missing file.
# plot_str = 'plot for [i=0:*] file=sprintf("cpu%03d.csv",i) title_s=sprintf("cpu%03d",i) file using 16:7 pt 7 ps 1 title title_s'
#
title_list = subprocess.check_output('ls cpu???.csv | sed -e \'s/.csv//\'',shell=True).decode('utf-8').replace('\n', ' ')
plot_str = "plot for [i in title_list] i.'.csv' using {:d}:{:d} pt 7 ps 1 title i".format(C_ELAPSED, C_TO)
g_plot('title_list = "{}"'.format(title_list))
g_plot(plot_str)
def plot_load_cpu():
""" Plot all cpu loads """
output_png = 'all_cpu_loads.png'
g_plot = common_all_gnuplot_settings(output_png)
g_plot('set yrange [0:100]')
g_plot('set ylabel "CPU load (percent)"')
g_plot('set title "{} : cpu loads : {:%F %H:%M}"'.format(testname, datetime.now()))
title_list = subprocess.check_output('ls cpu???.csv | sed -e \'s/.csv//\'',shell=True).decode('utf-8').replace('\n', ' ')
plot_str = "plot for [i in title_list] i.'.csv' using {:d}:{:d} pt 7 ps 1 title i".format(C_ELAPSED, C_LOAD)
g_plot('title_list = "{}"'.format(title_list))
g_plot(plot_str)
def plot_frequency_cpu():
""" Plot all cpu frequencies """
output_png = 'all_cpu_frequencies.png'
g_plot = common_all_gnuplot_settings(output_png)
# autoscale this one, no set y range
g_plot('set ylabel "CPU Frequency (GHz)"')
g_plot('set title "{} : cpu frequencies : {:%F %H:%M}"'.format(testname, datetime.now()))
title_list = subprocess.check_output('ls cpu???.csv | sed -e \'s/.csv//\'',shell=True).decode('utf-8').replace('\n', ' ')
plot_str = "plot for [i in title_list] i.'.csv' using {:d}:{:d} pt 7 ps 1 title i".format(C_ELAPSED, C_FREQ)
g_plot('title_list = "{}"'.format(title_list))
g_plot(plot_str)
def plot_duration_cpu():
""" Plot all cpu durations """
output_png = 'all_cpu_durations.png'
g_plot = common_all_gnuplot_settings(output_png)
# autoscale this one, no set y range
g_plot('set ylabel "Timer Duration (MilliSeconds)"')
g_plot('set title "{} : cpu durations : {:%F %H:%M}"'.format(testname, datetime.now()))
title_list = subprocess.check_output('ls cpu???.csv | sed -e \'s/.csv//\'',shell=True).decode('utf-8').replace('\n', ' ')
plot_str = "plot for [i in title_list] i.'.csv' using {:d}:{:d} pt 7 ps 1 title i".format(C_ELAPSED, C_DURATION)
g_plot('title_list = "{}"'.format(title_list))
g_plot(plot_str)
def plot_scaled_cpu():
""" Plot all cpu scaled busy """
output_png = 'all_cpu_scaled.png'
g_plot = common_all_gnuplot_settings(output_png)
# autoscale this one, no set y range
g_plot('set ylabel "Scaled Busy (Unitless)"')
g_plot('set title "{} : cpu scaled busy : {:%F %H:%M}"'.format(testname, datetime.now()))
title_list = subprocess.check_output('ls cpu???.csv | sed -e \'s/.csv//\'',shell=True).decode('utf-8').replace('\n', ' ')
plot_str = "plot for [i in title_list] i.'.csv' using {:d}:{:d} pt 7 ps 1 title i".format(C_ELAPSED, C_SCALED)
g_plot('title_list = "{}"'.format(title_list))
g_plot(plot_str)
def plot_boost_cpu():
""" Plot all cpu IO Boosts """
output_png = 'all_cpu_boost.png'
g_plot = common_all_gnuplot_settings(output_png)
g_plot('set yrange [0:100]')
g_plot('set ylabel "CPU IO Boost (percent)"')
g_plot('set title "{} : cpu io boost : {:%F %H:%M}"'.format(testname, datetime.now()))
title_list = subprocess.check_output('ls cpu???.csv | sed -e \'s/.csv//\'',shell=True).decode('utf-8').replace('\n', ' ')
plot_str = "plot for [i in title_list] i.'.csv' using {:d}:{:d} pt 7 ps 1 title i".format(C_ELAPSED, C_BOOST)
g_plot('title_list = "{}"'.format(title_list))
g_plot(plot_str)
def plot_ghz_cpu():
""" Plot all cpu tsc ghz """
output_png = 'all_cpu_ghz.png'
g_plot = common_all_gnuplot_settings(output_png)
# autoscale this one, no set y range
g_plot('set ylabel "TSC Frequency (GHz)"')
g_plot('set title "{} : cpu TSC Frequencies (Sanity check calculation) : {:%F %H:%M}"'.format(testname, datetime.now()))
title_list = subprocess.check_output('ls cpu???.csv | sed -e \'s/.csv//\'',shell=True).decode('utf-8').replace('\n', ' ')
plot_str = "plot for [i in title_list] i.'.csv' using {:d}:{:d} pt 7 ps 1 title i".format(C_ELAPSED, C_GHZ)
g_plot('title_list = "{}"'.format(title_list))
g_plot(plot_str)
def common_all_gnuplot_settings(output_png):
""" common gnuplot settings for multiple CPUs one one graph. """
g_plot = common_gnuplot_settings()
g_plot('set output "' + output_png + '"')
return(g_plot)
def common_gnuplot_settings():
""" common gnuplot settings. """
g_plot = Gnuplot.Gnuplot(persist=1)
# The following line is for rigor only. It seems to be assumed for .csv files
g_plot('set datafile separator \",\"')
g_plot('set ytics nomirror')
g_plot('set xtics nomirror')
g_plot('set xtics font ", 10"')
g_plot('set ytics font ", 10"')
g_plot('set tics out scale 1.0')
g_plot('set grid')
g_plot('set key out horiz')
g_plot('set key bot center')
g_plot('set key samplen 2 spacing .8 font ", 9"')
g_plot('set term png size 1200, 600')
g_plot('set title font ", 11"')
g_plot('set ylabel font ", 10"')
g_plot('set xlabel font ", 10"')
g_plot('set xlabel offset 0, 0.5')
g_plot('set xlabel "Elapsed Time (Seconds)"')
return(g_plot)
def set_4_plot_linestyles(g_plot):
""" set the linestyles used for 4 plots in 1 graphs. """
g_plot('set style line 1 linetype 1 linecolor rgb "green" pointtype -1')
g_plot('set style line 2 linetype 1 linecolor rgb "red" pointtype -1')
g_plot('set style line 3 linetype 1 linecolor rgb "purple" pointtype -1')
g_plot('set style line 4 linetype 1 linecolor rgb "blue" pointtype -1')
def store_csv(cpu_int, time_pre_dec, time_post_dec, core_busy, scaled, _from, _to, mperf, aperf, tsc, freq_ghz, io_boost, common_comm, load, duration_ms, sample_num, elapsed_time, tsc_ghz, cpu_mask):
""" Store master csv file information """
global graph_data_present
if cpu_mask[cpu_int] == 0:
return
try:
f_handle = open('cpu.csv', 'a')
string_buffer = "CPU_%03u, %05u, %06u, %u, %u, %u, %u, %u, %u, %u, %.4f, %u, %.2f, %.3f, %u, %.3f, %.3f, %s\n" % (cpu_int, int(time_pre_dec), int(time_post_dec), int(core_busy), int(scaled), int(_from), int(_to), int(mperf), int(aperf), int(tsc), freq_ghz, int(io_boost), load, duration_ms, sample_num, elapsed_time, tsc_ghz, common_comm)
f_handle.write(string_buffer);
f_handle.close()
except:
print('IO error cpu.csv')
return
graph_data_present = True;
def split_csv(current_max_cpu, cpu_mask):
""" seperate the all csv file into per CPU csv files. """
if os.path.exists('cpu.csv'):
for index in range(0, current_max_cpu + 1):
if cpu_mask[int(index)] != 0:
os.system('grep -m 1 common_cpu cpu.csv > cpu{:0>3}.csv'.format(index))
os.system('grep CPU_{:0>3} cpu.csv >> cpu{:0>3}.csv'.format(index, index))
def fix_ownership(path):
"""Change the owner of the file to SUDO_UID, if required"""
uid = os.environ.get('SUDO_UID')
gid = os.environ.get('SUDO_GID')
if uid is not None:
os.chown(path, int(uid), int(gid))
def cleanup_data_files():
""" clean up existing data files """
if os.path.exists('cpu.csv'):
os.remove('cpu.csv')
f_handle = open('cpu.csv', 'a')
f_handle.write('common_cpu, common_secs, common_usecs, core_busy, scaled_busy, from, to, mperf, aperf, tsc, freq, boost, load, duration_ms, sample_num, elapsed_time, tsc_ghz, common_comm')
f_handle.write('\n')
f_handle.close()
def clear_trace_file():
""" Clear trace file """
try:
f_handle = open('/sys/kernel/debug/tracing/trace', 'w')
f_handle.close()
except:
print('IO error clearing trace file ')
sys.exit(2)
def enable_trace(trace_file):
""" Enable trace """
try:
open(trace_file,'w').write("1")
except:
print('IO error enabling trace ')
sys.exit(2)
def disable_trace(trace_file):
""" Disable trace """
try:
open(trace_file, 'w').write("0")
except:
print('IO error disabling trace ')
sys.exit(2)
def set_trace_buffer_size(memory):
""" Set trace buffer size """
try:
with open('/sys/kernel/debug/tracing/buffer_size_kb', 'w') as fp:
fp.write(memory)
except:
print('IO error setting trace buffer size ')
sys.exit(2)
def free_trace_buffer():
""" Free the trace buffer memory """
try:
open('/sys/kernel/debug/tracing/buffer_size_kb'
, 'w').write("1")
except:
print('IO error freeing trace buffer ')
sys.exit(2)
def read_trace_data(filename, cpu_mask):
""" Read and parse trace data """
global current_max_cpu
global sample_num, last_sec_cpu, last_usec_cpu, start_time
try:
data = open(filename, 'r').read()
except:
print('Error opening ', filename)
sys.exit(2)
for line in data.splitlines():
search_obj = \
re.search(r'(^(.*?)\[)((\d+)[^\]])(.*?)(\d+)([.])(\d+)(.*?core_busy=)(\d+)(.*?scaled=)(\d+)(.*?from=)(\d+)(.*?to=)(\d+)(.*?mperf=)(\d+)(.*?aperf=)(\d+)(.*?tsc=)(\d+)(.*?freq=)(\d+)'
, line)
if search_obj:
cpu = search_obj.group(3)
cpu_int = int(cpu)
cpu = str(cpu_int)
time_pre_dec = search_obj.group(6)
time_post_dec = search_obj.group(8)
core_busy = search_obj.group(10)
scaled = search_obj.group(12)
_from = search_obj.group(14)
_to = search_obj.group(16)
mperf = search_obj.group(18)
aperf = search_obj.group(20)
tsc = search_obj.group(22)
freq = search_obj.group(24)
common_comm = search_obj.group(2).replace(' ', '')
# Not all kernel versions have io_boost field
io_boost = '0'
search_obj = re.search(r'.*?io_boost=(\d+)', line)
if search_obj:
io_boost = search_obj.group(1)
if sample_num == 0 :
start_time = Decimal(time_pre_dec) + Decimal(time_post_dec) / Decimal(1000000)
sample_num += 1
if last_sec_cpu[cpu_int] == 0 :
last_sec_cpu[cpu_int] = time_pre_dec
last_usec_cpu[cpu_int] = time_post_dec
else :
duration_us = (int(time_pre_dec) - int(last_sec_cpu[cpu_int])) * 1000000 + (int(time_post_dec) - int(last_usec_cpu[cpu_int]))
duration_ms = Decimal(duration_us) / Decimal(1000)
last_sec_cpu[cpu_int] = time_pre_dec
last_usec_cpu[cpu_int] = time_post_dec
elapsed_time = Decimal(time_pre_dec) + Decimal(time_post_dec) / Decimal(1000000) - start_time
load = Decimal(int(mperf)*100)/ Decimal(tsc)
freq_ghz = Decimal(freq)/Decimal(1000000)
# Sanity check calculation, typically anomalies indicate missed samples
# However, check for 0 (should never occur)
tsc_ghz = Decimal(0)
if duration_ms != Decimal(0) :
tsc_ghz = Decimal(tsc)/duration_ms/Decimal(1000000)
store_csv(cpu_int, time_pre_dec, time_post_dec, core_busy, scaled, _from, _to, mperf, aperf, tsc, freq_ghz, io_boost, common_comm, load, duration_ms, sample_num, elapsed_time, tsc_ghz, cpu_mask)
if cpu_int > current_max_cpu:
current_max_cpu = cpu_int
# End of for each trace line loop
# Now seperate the main overall csv file into per CPU csv files.
split_csv(current_max_cpu, cpu_mask)
def signal_handler(signal, frame):
print(' SIGINT: Forcing cleanup before exit.')
if interval:
disable_trace(trace_file)
clear_trace_file()
# Free the memory
free_trace_buffer()
sys.exit(0)
if __name__ == "__main__":
trace_file = "/sys/kernel/debug/tracing/events/power/pstate_sample/enable"
signal.signal(signal.SIGINT, signal_handler)
interval = ""
filename = ""
cpu_list = ""
testname = ""
memory = "10240"
graph_data_present = False;
valid1 = False
valid2 = False
cpu_mask = zeros((MAX_CPUS,), dtype=int)
try:
opts, args = getopt.getopt(sys.argv[1:],"ht:i:c:n:m:",["help","trace_file=","interval=","cpu=","name=","memory="])
except getopt.GetoptError:
print_help('intel_pstate')
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print_help('intel_pstate')
sys.exit()
elif opt in ("-t", "--trace_file"):
valid1 = True
location = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
filename = os.path.join(location, arg)
elif opt in ("-i", "--interval"):
valid1 = True
interval = arg
elif opt in ("-c", "--cpu"):
cpu_list = arg
elif opt in ("-n", "--name"):
valid2 = True
testname = arg
elif opt in ("-m", "--memory"):
memory = arg
if not (valid1 and valid2):
print_help('intel_pstate')
sys.exit()
if cpu_list:
for p in re.split("[,]", cpu_list):
if int(p) < MAX_CPUS :
cpu_mask[int(p)] = 1
else:
for i in range (0, MAX_CPUS):
cpu_mask[i] = 1
if not os.path.exists('results'):
os.mkdir('results')
# The regular user needs to own the directory, not root.
fix_ownership('results')
os.chdir('results')
if os.path.exists(testname):
print('The test name directory already exists. Please provide a unique test name. Test re-run not supported, yet.')
sys.exit()
os.mkdir(testname)
# The regular user needs to own the directory, not root.
fix_ownership(testname)
os.chdir(testname)
# Temporary (or perhaps not)
cur_version = sys.version_info
print('python version (should be >= 2.7):')
print(cur_version)
# Left as "cleanup" for potential future re-run ability.
cleanup_data_files()
if interval:
filename = "/sys/kernel/debug/tracing/trace"
clear_trace_file()
set_trace_buffer_size(memory)
enable_trace(trace_file)
print('Sleeping for ', interval, 'seconds')
time.sleep(int(interval))
disable_trace(trace_file)
current_max_cpu = 0
read_trace_data(filename, cpu_mask)
if interval:
clear_trace_file()
# Free the memory
free_trace_buffer()
if graph_data_present == False:
print('No valid data to plot')
sys.exit(2)
for cpu_no in range(0, current_max_cpu + 1):
plot_perf_busy_with_sample(cpu_no)
plot_perf_busy(cpu_no)
plot_durations(cpu_no)
plot_loads(cpu_no)
plot_pstate_cpu_with_sample()
plot_pstate_cpu()
plot_load_cpu()
plot_frequency_cpu()
plot_duration_cpu()
plot_scaled_cpu()
plot_boost_cpu()
plot_ghz_cpu()
# It is preferrable, but not necessary, that the regular user owns the files, not root.
for root, dirs, files in os.walk('.'):
for f in files:
fix_ownership(f)
os.chdir('../../')
|
nvtrust-main
|
infrastructure/linux/linux_source/tools/power/x86/intel_pstate_tracer/intel_pstate_tracer.py
|
#!/usr/bin/env python3
# SPDX-License-Identifier: GPL-2.0-only
# -*- coding: utf-8 -*-
#
""" This utility can be used to debug and tune the performance of the
AMD P-State driver. It imports intel_pstate_tracer to analyze AMD P-State
trace event.
Prerequisites:
Python version 2.7.x or higher
gnuplot 5.0 or higher
gnuplot-py 1.8 or higher
(Most of the distributions have these required packages. They may be called
gnuplot-py, phython-gnuplot or phython3-gnuplot, gnuplot-nox, ... )
Kernel config for Linux trace is enabled
see print_help(): for Usage and Output details
"""
from __future__ import print_function
from datetime import datetime
import subprocess
import os
import time
import re
import signal
import sys
import getopt
import Gnuplot
from numpy import *
from decimal import *
sys.path.append('../intel_pstate_tracer')
#import intel_pstate_tracer
import intel_pstate_tracer as ipt
__license__ = "GPL version 2"
MAX_CPUS = 256
# Define the csv file columns
C_COMM = 15
C_ELAPSED = 14
C_SAMPLE = 13
C_DURATION = 12
C_LOAD = 11
C_TSC = 10
C_APERF = 9
C_MPERF = 8
C_FREQ = 7
C_MAX_PERF = 6
C_DES_PERF = 5
C_MIN_PERF = 4
C_USEC = 3
C_SEC = 2
C_CPU = 1
global sample_num, last_sec_cpu, last_usec_cpu, start_time, test_name, trace_file
getcontext().prec = 11
sample_num =0
last_sec_cpu = [0] * MAX_CPUS
last_usec_cpu = [0] * MAX_CPUS
def plot_per_cpu_freq(cpu_index):
""" Plot per cpu frequency """
file_name = 'cpu{:0>3}.csv'.format(cpu_index)
if os.path.exists(file_name):
output_png = "cpu%03d_frequency.png" % cpu_index
g_plot = ipt.common_gnuplot_settings()
g_plot('set output "' + output_png + '"')
g_plot('set yrange [0:7]')
g_plot('set ytics 0, 1')
g_plot('set ylabel "CPU Frequency (GHz)"')
g_plot('set title "{} : frequency : CPU {:0>3} : {:%F %H:%M}"'.format(test_name, cpu_index, datetime.now()))
g_plot('set ylabel "CPU frequency"')
g_plot('set key off')
ipt.set_4_plot_linestyles(g_plot)
g_plot('plot "' + file_name + '" using {:d}:{:d} with linespoints linestyle 1 axis x1y1'.format(C_ELAPSED, C_FREQ))
def plot_per_cpu_des_perf(cpu_index):
""" Plot per cpu desired perf """
file_name = 'cpu{:0>3}.csv'.format(cpu_index)
if os.path.exists(file_name):
output_png = "cpu%03d_des_perf.png" % cpu_index
g_plot = ipt.common_gnuplot_settings()
g_plot('set output "' + output_png + '"')
g_plot('set yrange [0:255]')
g_plot('set ylabel "des perf"')
g_plot('set title "{} : cpu des perf : CPU {:0>3} : {:%F %H:%M}"'.format(test_name, cpu_index, datetime.now()))
g_plot('set key off')
ipt.set_4_plot_linestyles(g_plot)
g_plot('plot "' + file_name + '" using {:d}:{:d} with linespoints linestyle 1 axis x1y1'.format(C_ELAPSED, C_DES_PERF))
def plot_per_cpu_load(cpu_index):
""" Plot per cpu load """
file_name = 'cpu{:0>3}.csv'.format(cpu_index)
if os.path.exists(file_name):
output_png = "cpu%03d_load.png" % cpu_index
g_plot = ipt.common_gnuplot_settings()
g_plot('set output "' + output_png + '"')
g_plot('set yrange [0:100]')
g_plot('set ytics 0, 10')
g_plot('set ylabel "CPU load (percent)"')
g_plot('set title "{} : cpu load : CPU {:0>3} : {:%F %H:%M}"'.format(test_name, cpu_index, datetime.now()))
g_plot('set key off')
ipt.set_4_plot_linestyles(g_plot)
g_plot('plot "' + file_name + '" using {:d}:{:d} with linespoints linestyle 1 axis x1y1'.format(C_ELAPSED, C_LOAD))
def plot_all_cpu_frequency():
""" Plot all cpu frequencies """
output_png = 'all_cpu_frequencies.png'
g_plot = ipt.common_gnuplot_settings()
g_plot('set output "' + output_png + '"')
g_plot('set ylabel "CPU Frequency (GHz)"')
g_plot('set title "{} : cpu frequencies : {:%F %H:%M}"'.format(test_name, datetime.now()))
title_list = subprocess.check_output('ls cpu???.csv | sed -e \'s/.csv//\'',shell=True).decode('utf-8').replace('\n', ' ')
plot_str = "plot for [i in title_list] i.'.csv' using {:d}:{:d} pt 7 ps 1 title i".format(C_ELAPSED, C_FREQ)
g_plot('title_list = "{}"'.format(title_list))
g_plot(plot_str)
def plot_all_cpu_des_perf():
""" Plot all cpu desired perf """
output_png = 'all_cpu_des_perf.png'
g_plot = ipt.common_gnuplot_settings()
g_plot('set output "' + output_png + '"')
g_plot('set ylabel "des perf"')
g_plot('set title "{} : cpu des perf : {:%F %H:%M}"'.format(test_name, datetime.now()))
title_list = subprocess.check_output('ls cpu???.csv | sed -e \'s/.csv//\'',shell=True).decode('utf-8').replace('\n', ' ')
plot_str = "plot for [i in title_list] i.'.csv' using {:d}:{:d} pt 255 ps 1 title i".format(C_ELAPSED, C_DES_PERF)
g_plot('title_list = "{}"'.format(title_list))
g_plot(plot_str)
def plot_all_cpu_load():
""" Plot all cpu load """
output_png = 'all_cpu_load.png'
g_plot = ipt.common_gnuplot_settings()
g_plot('set output "' + output_png + '"')
g_plot('set yrange [0:100]')
g_plot('set ylabel "CPU load (percent)"')
g_plot('set title "{} : cpu load : {:%F %H:%M}"'.format(test_name, datetime.now()))
title_list = subprocess.check_output('ls cpu???.csv | sed -e \'s/.csv//\'',shell=True).decode('utf-8').replace('\n', ' ')
plot_str = "plot for [i in title_list] i.'.csv' using {:d}:{:d} pt 255 ps 1 title i".format(C_ELAPSED, C_LOAD)
g_plot('title_list = "{}"'.format(title_list))
g_plot(plot_str)
def store_csv(cpu_int, time_pre_dec, time_post_dec, min_perf, des_perf, max_perf, freq_ghz, mperf, aperf, tsc, common_comm, load, duration_ms, sample_num, elapsed_time, cpu_mask):
""" Store master csv file information """
global graph_data_present
if cpu_mask[cpu_int] == 0:
return
try:
f_handle = open('cpu.csv', 'a')
string_buffer = "CPU_%03u, %05u, %06u, %u, %u, %u, %.4f, %u, %u, %u, %.2f, %.3f, %u, %.3f, %s\n" % (cpu_int, int(time_pre_dec), int(time_post_dec), int(min_perf), int(des_perf), int(max_perf), freq_ghz, int(mperf), int(aperf), int(tsc), load, duration_ms, sample_num, elapsed_time, common_comm)
f_handle.write(string_buffer)
f_handle.close()
except:
print('IO error cpu.csv')
return
graph_data_present = True;
def cleanup_data_files():
""" clean up existing data files """
if os.path.exists('cpu.csv'):
os.remove('cpu.csv')
f_handle = open('cpu.csv', 'a')
f_handle.write('common_cpu, common_secs, common_usecs, min_perf, des_perf, max_perf, freq, mperf, aperf, tsc, load, duration_ms, sample_num, elapsed_time, common_comm')
f_handle.write('\n')
f_handle.close()
def read_trace_data(file_name, cpu_mask):
""" Read and parse trace data """
global current_max_cpu
global sample_num, last_sec_cpu, last_usec_cpu, start_time
try:
data = open(file_name, 'r').read()
except:
print('Error opening ', file_name)
sys.exit(2)
for line in data.splitlines():
search_obj = \
re.search(r'(^(.*?)\[)((\d+)[^\]])(.*?)(\d+)([.])(\d+)(.*?amd_min_perf=)(\d+)(.*?amd_des_perf=)(\d+)(.*?amd_max_perf=)(\d+)(.*?freq=)(\d+)(.*?mperf=)(\d+)(.*?aperf=)(\d+)(.*?tsc=)(\d+)'
, line)
if search_obj:
cpu = search_obj.group(3)
cpu_int = int(cpu)
cpu = str(cpu_int)
time_pre_dec = search_obj.group(6)
time_post_dec = search_obj.group(8)
min_perf = search_obj.group(10)
des_perf = search_obj.group(12)
max_perf = search_obj.group(14)
freq = search_obj.group(16)
mperf = search_obj.group(18)
aperf = search_obj.group(20)
tsc = search_obj.group(22)
common_comm = search_obj.group(2).replace(' ', '')
if sample_num == 0 :
start_time = Decimal(time_pre_dec) + Decimal(time_post_dec) / Decimal(1000000)
sample_num += 1
if last_sec_cpu[cpu_int] == 0 :
last_sec_cpu[cpu_int] = time_pre_dec
last_usec_cpu[cpu_int] = time_post_dec
else :
duration_us = (int(time_pre_dec) - int(last_sec_cpu[cpu_int])) * 1000000 + (int(time_post_dec) - int(last_usec_cpu[cpu_int]))
duration_ms = Decimal(duration_us) / Decimal(1000)
last_sec_cpu[cpu_int] = time_pre_dec
last_usec_cpu[cpu_int] = time_post_dec
elapsed_time = Decimal(time_pre_dec) + Decimal(time_post_dec) / Decimal(1000000) - start_time
load = Decimal(int(mperf)*100)/ Decimal(tsc)
freq_ghz = Decimal(freq)/Decimal(1000000)
store_csv(cpu_int, time_pre_dec, time_post_dec, min_perf, des_perf, max_perf, freq_ghz, mperf, aperf, tsc, common_comm, load, duration_ms, sample_num, elapsed_time, cpu_mask)
if cpu_int > current_max_cpu:
current_max_cpu = cpu_int
# Now separate the main overall csv file into per CPU csv files.
ipt.split_csv(current_max_cpu, cpu_mask)
def signal_handler(signal, frame):
print(' SIGINT: Forcing cleanup before exit.')
if interval:
ipt.disable_trace(trace_file)
ipt.clear_trace_file()
ipt.free_trace_buffer()
sys.exit(0)
trace_file = "/sys/kernel/debug/tracing/events/amd_cpu/enable"
signal.signal(signal.SIGINT, signal_handler)
interval = ""
file_name = ""
cpu_list = ""
test_name = ""
memory = "10240"
graph_data_present = False;
valid1 = False
valid2 = False
cpu_mask = zeros((MAX_CPUS,), dtype=int)
try:
opts, args = getopt.getopt(sys.argv[1:],"ht:i:c:n:m:",["help","trace_file=","interval=","cpu=","name=","memory="])
except getopt.GetoptError:
ipt.print_help('amd_pstate')
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print()
sys.exit()
elif opt in ("-t", "--trace_file"):
valid1 = True
location = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
file_name = os.path.join(location, arg)
elif opt in ("-i", "--interval"):
valid1 = True
interval = arg
elif opt in ("-c", "--cpu"):
cpu_list = arg
elif opt in ("-n", "--name"):
valid2 = True
test_name = arg
elif opt in ("-m", "--memory"):
memory = arg
if not (valid1 and valid2):
ipt.print_help('amd_pstate')
sys.exit()
if cpu_list:
for p in re.split("[,]", cpu_list):
if int(p) < MAX_CPUS :
cpu_mask[int(p)] = 1
else:
for i in range (0, MAX_CPUS):
cpu_mask[i] = 1
if not os.path.exists('results'):
os.mkdir('results')
ipt.fix_ownership('results')
os.chdir('results')
if os.path.exists(test_name):
print('The test name directory already exists. Please provide a unique test name. Test re-run not supported, yet.')
sys.exit()
os.mkdir(test_name)
ipt.fix_ownership(test_name)
os.chdir(test_name)
cur_version = sys.version_info
print('python version (should be >= 2.7):')
print(cur_version)
cleanup_data_files()
if interval:
file_name = "/sys/kernel/debug/tracing/trace"
ipt.clear_trace_file()
ipt.set_trace_buffer_size(memory)
ipt.enable_trace(trace_file)
time.sleep(int(interval))
ipt.disable_trace(trace_file)
current_max_cpu = 0
read_trace_data(file_name, cpu_mask)
if interval:
ipt.clear_trace_file()
ipt.free_trace_buffer()
if graph_data_present == False:
print('No valid data to plot')
sys.exit(2)
for cpu_no in range(0, current_max_cpu + 1):
plot_per_cpu_freq(cpu_no)
plot_per_cpu_des_perf(cpu_no)
plot_per_cpu_load(cpu_no)
plot_all_cpu_des_perf()
plot_all_cpu_frequency()
plot_all_cpu_load()
for root, dirs, files in os.walk('.'):
for f in files:
ipt.fix_ownership(f)
os.chdir('../../')
|
nvtrust-main
|
infrastructure/linux/linux_source/tools/power/x86/amd_pstate_tracer/amd_pstate_trace.py
|
#!/usr/bin/env drgn
#
# Copyright (C) 2020 Roman Gushchin <guro@fb.com>
# Copyright (C) 2020 Facebook
from os import stat
import argparse
import sys
from drgn.helpers.linux import list_for_each_entry, list_empty
from drgn.helpers.linux import for_each_page
from drgn.helpers.linux.cpumask import for_each_online_cpu
from drgn.helpers.linux.percpu import per_cpu_ptr
from drgn import container_of, FaultError, Object, cast
DESC = """
This is a drgn script to provide slab statistics for memory cgroups.
It supports cgroup v2 and v1 and can emulate memory.kmem.slabinfo
interface of cgroup v1.
For drgn, visit https://github.com/osandov/drgn.
"""
MEMCGS = {}
OO_SHIFT = 16
OO_MASK = ((1 << OO_SHIFT) - 1)
def err(s):
print('slabinfo.py: error: %s' % s, file=sys.stderr, flush=True)
sys.exit(1)
def find_memcg_ids(css=prog['root_mem_cgroup'].css, prefix=''):
if not list_empty(css.children.address_of_()):
for css in list_for_each_entry('struct cgroup_subsys_state',
css.children.address_of_(),
'sibling'):
name = prefix + '/' + css.cgroup.kn.name.string_().decode('utf-8')
memcg = container_of(css, 'struct mem_cgroup', 'css')
MEMCGS[css.cgroup.kn.id.value_()] = memcg
find_memcg_ids(css, name)
def is_root_cache(s):
try:
return False if s.memcg_params.root_cache else True
except AttributeError:
return True
def cache_name(s):
if is_root_cache(s):
return s.name.string_().decode('utf-8')
else:
return s.memcg_params.root_cache.name.string_().decode('utf-8')
# SLUB
def oo_order(s):
return s.oo.x >> OO_SHIFT
def oo_objects(s):
return s.oo.x & OO_MASK
def count_partial(n, fn):
nr_objs = 0
for slab in list_for_each_entry('struct slab', n.partial.address_of_(),
'slab_list'):
nr_objs += fn(slab)
return nr_objs
def count_free(slab):
return slab.objects - slab.inuse
def slub_get_slabinfo(s, cfg):
nr_slabs = 0
nr_objs = 0
nr_free = 0
for node in range(cfg['nr_nodes']):
n = s.node[node]
nr_slabs += n.nr_slabs.counter.value_()
nr_objs += n.total_objects.counter.value_()
nr_free += count_partial(n, count_free)
return {'active_objs': nr_objs - nr_free,
'num_objs': nr_objs,
'active_slabs': nr_slabs,
'num_slabs': nr_slabs,
'objects_per_slab': oo_objects(s),
'cache_order': oo_order(s),
'limit': 0,
'batchcount': 0,
'shared': 0,
'shared_avail': 0}
def cache_show(s, cfg, objs):
if cfg['allocator'] == 'SLUB':
sinfo = slub_get_slabinfo(s, cfg)
else:
err('SLAB isn\'t supported yet')
if cfg['shared_slab_pages']:
sinfo['active_objs'] = objs
sinfo['num_objs'] = objs
print('%-17s %6lu %6lu %6u %4u %4d'
' : tunables %4u %4u %4u'
' : slabdata %6lu %6lu %6lu' % (
cache_name(s), sinfo['active_objs'], sinfo['num_objs'],
s.size, sinfo['objects_per_slab'], 1 << sinfo['cache_order'],
sinfo['limit'], sinfo['batchcount'], sinfo['shared'],
sinfo['active_slabs'], sinfo['num_slabs'],
sinfo['shared_avail']))
def detect_kernel_config():
cfg = {}
cfg['nr_nodes'] = prog['nr_online_nodes'].value_()
if prog.type('struct kmem_cache').members[1].name == 'flags':
cfg['allocator'] = 'SLUB'
elif prog.type('struct kmem_cache').members[1].name == 'batchcount':
cfg['allocator'] = 'SLAB'
else:
err('Can\'t determine the slab allocator')
cfg['shared_slab_pages'] = False
try:
if prog.type('struct obj_cgroup'):
cfg['shared_slab_pages'] = True
except:
pass
return cfg
def for_each_slab(prog):
PGSlab = 1 << prog.constant('PG_slab')
PGHead = 1 << prog.constant('PG_head')
for page in for_each_page(prog):
try:
if page.flags.value_() & PGSlab:
yield cast('struct slab *', page)
except FaultError:
pass
def main():
parser = argparse.ArgumentParser(description=DESC,
formatter_class=
argparse.RawTextHelpFormatter)
parser.add_argument('cgroup', metavar='CGROUP',
help='Target memory cgroup')
args = parser.parse_args()
try:
cgroup_id = stat(args.cgroup).st_ino
find_memcg_ids()
memcg = MEMCGS[cgroup_id]
except KeyError:
err('Can\'t find the memory cgroup')
cfg = detect_kernel_config()
print('# name <active_objs> <num_objs> <objsize> <objperslab> <pagesperslab>'
' : tunables <limit> <batchcount> <sharedfactor>'
' : slabdata <active_slabs> <num_slabs> <sharedavail>')
if cfg['shared_slab_pages']:
obj_cgroups = set()
stats = {}
caches = {}
# find memcg pointers belonging to the specified cgroup
obj_cgroups.add(memcg.objcg.value_())
for ptr in list_for_each_entry('struct obj_cgroup',
memcg.objcg_list.address_of_(),
'list'):
obj_cgroups.add(ptr.value_())
# look over all slab folios and look for objects belonging
# to the given memory cgroup
for slab in for_each_slab(prog):
objcg_vec_raw = slab.memcg_data.value_()
if objcg_vec_raw == 0:
continue
cache = slab.slab_cache
if not cache:
continue
addr = cache.value_()
caches[addr] = cache
# clear the lowest bit to get the true obj_cgroups
objcg_vec = Object(prog, 'struct obj_cgroup **',
value=objcg_vec_raw & ~1)
if addr not in stats:
stats[addr] = 0
for i in range(oo_objects(cache)):
if objcg_vec[i].value_() in obj_cgroups:
stats[addr] += 1
for addr in caches:
if stats[addr] > 0:
cache_show(caches[addr], cfg, stats[addr])
else:
for s in list_for_each_entry('struct kmem_cache',
memcg.kmem_caches.address_of_(),
'memcg_params.kmem_caches_node'):
cache_show(s, cfg, None)
main()
|
nvtrust-main
|
infrastructure/linux/linux_source/tools/cgroup/memcg_slabinfo.py
|
#!/usr/bin/env python3
#
# Copyright (C) 2019 Tejun Heo <tj@kernel.org>
# Copyright (C) 2019 Andy Newell <newella@fb.com>
# Copyright (C) 2019 Facebook
desc = """
Generate linear IO cost model coefficients used by the blk-iocost
controller. If the target raw testdev is specified, destructive tests
are performed against the whole device; otherwise, on
./iocost-coef-fio.testfile. The result can be written directly to
/sys/fs/cgroup/io.cost.model.
On high performance devices, --numjobs > 1 is needed to achieve
saturation.
See Documentation/admin-guide/cgroup-v2.rst and block/blk-iocost.c
for more details.
"""
import argparse
import re
import json
import glob
import os
import sys
import atexit
import shutil
import tempfile
import subprocess
parser = argparse.ArgumentParser(description=desc,
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('--testdev', metavar='DEV',
help='Raw block device to use for testing, ignores --testfile-size')
parser.add_argument('--testfile-size-gb', type=float, metavar='GIGABYTES', default=16,
help='Testfile size in gigabytes (default: %(default)s)')
parser.add_argument('--duration', type=int, metavar='SECONDS', default=120,
help='Individual test run duration in seconds (default: %(default)s)')
parser.add_argument('--seqio-block-mb', metavar='MEGABYTES', type=int, default=128,
help='Sequential test block size in megabytes (default: %(default)s)')
parser.add_argument('--seq-depth', type=int, metavar='DEPTH', default=64,
help='Sequential test queue depth (default: %(default)s)')
parser.add_argument('--rand-depth', type=int, metavar='DEPTH', default=64,
help='Random test queue depth (default: %(default)s)')
parser.add_argument('--numjobs', type=int, metavar='JOBS', default=1,
help='Number of parallel fio jobs to run (default: %(default)s)')
parser.add_argument('--quiet', action='store_true')
parser.add_argument('--verbose', action='store_true')
def info(msg):
if not args.quiet:
print(msg)
def dbg(msg):
if args.verbose and not args.quiet:
print(msg)
# determine ('DEVNAME', 'MAJ:MIN') for @path
def dir_to_dev(path):
# find the block device the current directory is on
devname = subprocess.run(f'findmnt -nvo SOURCE -T{path}',
stdout=subprocess.PIPE, shell=True).stdout
devname = os.path.basename(devname).decode('utf-8').strip()
# partition -> whole device
parents = glob.glob('/sys/block/*/' + devname)
if len(parents):
devname = os.path.basename(os.path.dirname(parents[0]))
rdev = os.stat(f'/dev/{devname}').st_rdev
return (devname, f'{os.major(rdev)}:{os.minor(rdev)}')
def create_testfile(path, size):
global args
if os.path.isfile(path) and os.stat(path).st_size == size:
return
info(f'Creating testfile {path}')
subprocess.check_call(f'rm -f {path}', shell=True)
subprocess.check_call(f'touch {path}', shell=True)
subprocess.call(f'chattr +C {path}', shell=True)
subprocess.check_call(
f'pv -s {size} -pr /dev/urandom {"-q" if args.quiet else ""} | '
f'dd of={path} count={size} '
f'iflag=count_bytes,fullblock oflag=direct bs=16M status=none',
shell=True)
def run_fio(testfile, duration, iotype, iodepth, blocksize, jobs):
global args
eta = 'never' if args.quiet else 'always'
outfile = tempfile.NamedTemporaryFile()
cmd = (f'fio --direct=1 --ioengine=libaio --name=coef '
f'--filename={testfile} --runtime={round(duration)} '
f'--readwrite={iotype} --iodepth={iodepth} --blocksize={blocksize} '
f'--eta={eta} --output-format json --output={outfile.name} '
f'--time_based --numjobs={jobs}')
if args.verbose:
dbg(f'Running {cmd}')
subprocess.check_call(cmd, shell=True)
with open(outfile.name, 'r') as f:
d = json.loads(f.read())
return sum(j['read']['bw_bytes'] + j['write']['bw_bytes'] for j in d['jobs'])
def restore_elevator_nomerges():
global elevator_path, nomerges_path, elevator, nomerges
info(f'Restoring elevator to {elevator} and nomerges to {nomerges}')
with open(elevator_path, 'w') as f:
f.write(elevator)
with open(nomerges_path, 'w') as f:
f.write(nomerges)
args = parser.parse_args()
missing = False
for cmd in [ 'findmnt', 'pv', 'dd', 'fio' ]:
if not shutil.which(cmd):
print(f'Required command "{cmd}" is missing', file=sys.stderr)
missing = True
if missing:
sys.exit(1)
if args.testdev:
devname = os.path.basename(args.testdev)
rdev = os.stat(f'/dev/{devname}').st_rdev
devno = f'{os.major(rdev)}:{os.minor(rdev)}'
testfile = f'/dev/{devname}'
info(f'Test target: {devname}({devno})')
else:
devname, devno = dir_to_dev('.')
testfile = 'iocost-coef-fio.testfile'
testfile_size = int(args.testfile_size_gb * 2 ** 30)
create_testfile(testfile, testfile_size)
info(f'Test target: {testfile} on {devname}({devno})')
elevator_path = f'/sys/block/{devname}/queue/scheduler'
nomerges_path = f'/sys/block/{devname}/queue/nomerges'
with open(elevator_path, 'r') as f:
elevator = re.sub(r'.*\[(.*)\].*', r'\1', f.read().strip())
with open(nomerges_path, 'r') as f:
nomerges = f.read().strip()
info(f'Temporarily disabling elevator and merges')
atexit.register(restore_elevator_nomerges)
with open(elevator_path, 'w') as f:
f.write('none')
with open(nomerges_path, 'w') as f:
f.write('1')
info('Determining rbps...')
rbps = run_fio(testfile, args.duration, 'read',
1, args.seqio_block_mb * (2 ** 20), args.numjobs)
info(f'\nrbps={rbps}, determining rseqiops...')
rseqiops = round(run_fio(testfile, args.duration, 'read',
args.seq_depth, 4096, args.numjobs) / 4096)
info(f'\nrseqiops={rseqiops}, determining rrandiops...')
rrandiops = round(run_fio(testfile, args.duration, 'randread',
args.rand_depth, 4096, args.numjobs) / 4096)
info(f'\nrrandiops={rrandiops}, determining wbps...')
wbps = run_fio(testfile, args.duration, 'write',
1, args.seqio_block_mb * (2 ** 20), args.numjobs)
info(f'\nwbps={wbps}, determining wseqiops...')
wseqiops = round(run_fio(testfile, args.duration, 'write',
args.seq_depth, 4096, args.numjobs) / 4096)
info(f'\nwseqiops={wseqiops}, determining wrandiops...')
wrandiops = round(run_fio(testfile, args.duration, 'randwrite',
args.rand_depth, 4096, args.numjobs) / 4096)
info(f'\nwrandiops={wrandiops}')
restore_elevator_nomerges()
atexit.unregister(restore_elevator_nomerges)
info('')
print(f'{devno} rbps={rbps} rseqiops={rseqiops} rrandiops={rrandiops} '
f'wbps={wbps} wseqiops={wseqiops} wrandiops={wrandiops}')
|
nvtrust-main
|
infrastructure/linux/linux_source/tools/cgroup/iocost_coef_gen.py
|
#!/usr/bin/env drgn
#
# Copyright (C) 2019 Tejun Heo <tj@kernel.org>
# Copyright (C) 2019 Facebook
desc = """
This is a drgn script to monitor the blk-iocost cgroup controller.
See the comment at the top of block/blk-iocost.c for more details.
For drgn, visit https://github.com/osandov/drgn.
"""
import sys
import re
import time
import json
import math
import drgn
from drgn import container_of
from drgn.helpers.linux.list import list_for_each_entry,list_empty
from drgn.helpers.linux.radixtree import radix_tree_for_each,radix_tree_lookup
import argparse
parser = argparse.ArgumentParser(description=desc,
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('devname', metavar='DEV',
help='Target block device name (e.g. sda)')
parser.add_argument('--cgroup', action='append', metavar='REGEX',
help='Regex for target cgroups, ')
parser.add_argument('--interval', '-i', metavar='SECONDS', type=float, default=1,
help='Monitoring interval in seconds (0 exits immediately '
'after checking requirements)')
parser.add_argument('--json', action='store_true',
help='Output in json')
args = parser.parse_args()
def err(s):
print(s, file=sys.stderr, flush=True)
sys.exit(1)
try:
blkcg_root = prog['blkcg_root']
plid = prog['blkcg_policy_iocost'].plid.value_()
except:
err('The kernel does not have iocost enabled')
IOC_RUNNING = prog['IOC_RUNNING'].value_()
WEIGHT_ONE = prog['WEIGHT_ONE'].value_()
VTIME_PER_SEC = prog['VTIME_PER_SEC'].value_()
VTIME_PER_USEC = prog['VTIME_PER_USEC'].value_()
AUTOP_SSD_FAST = prog['AUTOP_SSD_FAST'].value_()
AUTOP_SSD_DFL = prog['AUTOP_SSD_DFL'].value_()
AUTOP_SSD_QD1 = prog['AUTOP_SSD_QD1'].value_()
AUTOP_HDD = prog['AUTOP_HDD'].value_()
autop_names = {
AUTOP_SSD_FAST: 'ssd_fast',
AUTOP_SSD_DFL: 'ssd_dfl',
AUTOP_SSD_QD1: 'ssd_qd1',
AUTOP_HDD: 'hdd',
}
class BlkgIterator:
def blkcg_name(blkcg):
return blkcg.css.cgroup.kn.name.string_().decode('utf-8')
def walk(self, blkcg, q_id, parent_path):
if not self.include_dying and \
not (blkcg.css.flags.value_() & prog['CSS_ONLINE'].value_()):
return
name = BlkgIterator.blkcg_name(blkcg)
path = parent_path + '/' + name if parent_path else name
blkg = drgn.Object(prog, 'struct blkcg_gq',
address=radix_tree_lookup(blkcg.blkg_tree.address_of_(), q_id))
if not blkg.address_:
return
self.blkgs.append((path if path else '/', blkg))
for c in list_for_each_entry('struct blkcg',
blkcg.css.children.address_of_(), 'css.sibling'):
self.walk(c, q_id, path)
def __init__(self, root_blkcg, q_id, include_dying=False):
self.include_dying = include_dying
self.blkgs = []
self.walk(root_blkcg, q_id, '')
def __iter__(self):
return iter(self.blkgs)
class IocStat:
def __init__(self, ioc):
global autop_names
self.enabled = ioc.enabled.value_()
self.running = ioc.running.value_() == IOC_RUNNING
self.period_ms = ioc.period_us.value_() / 1_000
self.period_at = ioc.period_at.value_() / 1_000_000
self.vperiod_at = ioc.period_at_vtime.value_() / VTIME_PER_SEC
self.vrate_pct = ioc.vtime_base_rate.value_() * 100 / VTIME_PER_USEC
self.busy_level = ioc.busy_level.value_()
self.autop_idx = ioc.autop_idx.value_()
self.user_cost_model = ioc.user_cost_model.value_()
self.user_qos_params = ioc.user_qos_params.value_()
if self.autop_idx in autop_names:
self.autop_name = autop_names[self.autop_idx]
else:
self.autop_name = '?'
def dict(self, now):
return { 'device' : devname,
'timestamp' : now,
'enabled' : self.enabled,
'running' : self.running,
'period_ms' : self.period_ms,
'period_at' : self.period_at,
'period_vtime_at' : self.vperiod_at,
'busy_level' : self.busy_level,
'vrate_pct' : self.vrate_pct, }
def table_preamble_str(self):
state = ('RUN' if self.running else 'IDLE') if self.enabled else 'OFF'
output = f'{devname} {state:4} ' \
f'per={self.period_ms}ms ' \
f'cur_per={self.period_at:.3f}:v{self.vperiod_at:.3f} ' \
f'busy={self.busy_level:+3} ' \
f'vrate={self.vrate_pct:6.2f}% ' \
f'params={self.autop_name}'
if self.user_cost_model or self.user_qos_params:
output += f'({"C" if self.user_cost_model else ""}{"Q" if self.user_qos_params else ""})'
return output
def table_header_str(self):
return f'{"":25} active {"weight":>9} {"hweight%":>13} {"inflt%":>6} ' \
f'{"debt":>7} {"delay":>7} {"usage%"}'
class IocgStat:
def __init__(self, iocg):
ioc = iocg.ioc
blkg = iocg.pd.blkg
self.is_active = not list_empty(iocg.active_list.address_of_())
self.weight = iocg.weight.value_() / WEIGHT_ONE
self.active = iocg.active.value_() / WEIGHT_ONE
self.inuse = iocg.inuse.value_() / WEIGHT_ONE
self.hwa_pct = iocg.hweight_active.value_() * 100 / WEIGHT_ONE
self.hwi_pct = iocg.hweight_inuse.value_() * 100 / WEIGHT_ONE
self.address = iocg.value_()
vdone = iocg.done_vtime.counter.value_()
vtime = iocg.vtime.counter.value_()
vrate = ioc.vtime_rate.counter.value_()
period_vtime = ioc.period_us.value_() * vrate
if period_vtime:
self.inflight_pct = (vtime - vdone) * 100 / period_vtime
else:
self.inflight_pct = 0
self.usage = (100 * iocg.usage_delta_us.value_() /
ioc.period_us.value_()) if self.active else 0
self.debt_ms = iocg.abs_vdebt.value_() / VTIME_PER_USEC / 1000
if blkg.use_delay.counter.value_() != 0:
self.delay_ms = blkg.delay_nsec.counter.value_() / 1_000_000
else:
self.delay_ms = 0
def dict(self, now, path):
out = { 'cgroup' : path,
'timestamp' : now,
'is_active' : self.is_active,
'weight' : self.weight,
'weight_active' : self.active,
'weight_inuse' : self.inuse,
'hweight_active_pct' : self.hwa_pct,
'hweight_inuse_pct' : self.hwi_pct,
'inflight_pct' : self.inflight_pct,
'debt_ms' : self.debt_ms,
'delay_ms' : self.delay_ms,
'usage_pct' : self.usage,
'address' : self.address }
return out
def table_row_str(self, path):
out = f'{path[-28:]:28} ' \
f'{"*" if self.is_active else " "} ' \
f'{round(self.inuse):5}/{round(self.active):5} ' \
f'{self.hwi_pct:6.2f}/{self.hwa_pct:6.2f} ' \
f'{self.inflight_pct:6.2f} ' \
f'{self.debt_ms:7.2f} ' \
f'{self.delay_ms:7.2f} '\
f'{min(self.usage, 999):6.2f}'
out = out.rstrip(':')
return out
# handle args
table_fmt = not args.json
interval = args.interval
devname = args.devname
if args.json:
table_fmt = False
re_str = None
if args.cgroup:
for r in args.cgroup:
if re_str is None:
re_str = r
else:
re_str += '|' + r
filter_re = re.compile(re_str) if re_str else None
# Locate the roots
q_id = None
root_iocg = None
ioc = None
for i, ptr in radix_tree_for_each(blkcg_root.blkg_tree.address_of_()):
blkg = drgn.Object(prog, 'struct blkcg_gq', address=ptr)
try:
if devname == blkg.q.kobj.parent.name.string_().decode('utf-8'):
q_id = blkg.q.id.value_()
if blkg.pd[plid]:
root_iocg = container_of(blkg.pd[plid], 'struct ioc_gq', 'pd')
ioc = root_iocg.ioc
break
except:
pass
if ioc is None:
err(f'Could not find ioc for {devname}');
if interval == 0:
sys.exit(0)
# Keep printing
while True:
now = time.time()
iocstat = IocStat(ioc)
output = ''
if table_fmt:
output += '\n' + iocstat.table_preamble_str()
output += '\n' + iocstat.table_header_str()
else:
output += json.dumps(iocstat.dict(now))
for path, blkg in BlkgIterator(blkcg_root, q_id):
if filter_re and not filter_re.match(path):
continue
if not blkg.pd[plid]:
continue
iocg = container_of(blkg.pd[plid], 'struct ioc_gq', 'pd')
iocg_stat = IocgStat(iocg)
if not filter_re and not iocg_stat.is_active:
continue
if table_fmt:
output += '\n' + iocg_stat.table_row_str(path)
else:
output += '\n' + json.dumps(iocg_stat.dict(now, path))
print(output)
sys.stdout.flush()
time.sleep(interval)
|
nvtrust-main
|
infrastructure/linux/linux_source/tools/cgroup/iocost_monitor.py
|
#!/usr/bin/env python3
# SPDX-License-Identifier: GPL-2.0
from subprocess import PIPE, Popen
import json
import time
import argparse
import collections
import sys
#
# Test port split configuration using devlink-port lanes attribute.
# The test is skipped in case the attribute is not available.
#
# First, check that all the ports with 1 lane fail to split.
# Second, check that all the ports with more than 1 lane can be split
# to all valid configurations (e.g., split to 2, split to 4 etc.)
#
# Kselftest framework requirement - SKIP code is 4
KSFT_SKIP=4
Port = collections.namedtuple('Port', 'bus_info name')
def run_command(cmd, should_fail=False):
"""
Run a command in subprocess.
Return: Tuple of (stdout, stderr).
"""
p = Popen(cmd, stdout=PIPE, stderr=PIPE, shell=True)
stdout, stderr = p.communicate()
stdout, stderr = stdout.decode(), stderr.decode()
if stderr != "" and not should_fail:
print("Error sending command: %s" % cmd)
print(stdout)
print(stderr)
return stdout, stderr
class devlink_ports(object):
"""
Class that holds information on the devlink ports, required to the tests;
if_names: A list of interfaces in the devlink ports.
"""
def get_if_names(dev):
"""
Get a list of physical devlink ports.
Return: Array of tuples (bus_info/port, if_name).
"""
arr = []
cmd = "devlink -j port show"
stdout, stderr = run_command(cmd)
assert stderr == ""
ports = json.loads(stdout)['port']
for port in ports:
if dev in port:
if ports[port]['flavour'] == 'physical':
arr.append(Port(bus_info=port, name=ports[port]['netdev']))
return arr
def __init__(self, dev):
self.if_names = devlink_ports.get_if_names(dev)
def get_max_lanes(port):
"""
Get the $port's maximum number of lanes.
Return: number of lanes, e.g. 1, 2, 4 and 8.
"""
cmd = "devlink -j port show %s" % port
stdout, stderr = run_command(cmd)
assert stderr == ""
values = list(json.loads(stdout)['port'].values())[0]
if 'lanes' in values:
lanes = values['lanes']
else:
lanes = 0
return lanes
def get_split_ability(port):
"""
Get the $port split ability.
Return: split ability, true or false.
"""
cmd = "devlink -j port show %s" % port.name
stdout, stderr = run_command(cmd)
assert stderr == ""
values = list(json.loads(stdout)['port'].values())[0]
return values['splittable']
def split(k, port, should_fail=False):
"""
Split $port into $k ports.
If should_fail == True, the split should fail. Otherwise, should pass.
Return: Array of sub ports after splitting.
If the $port wasn't split, the array will be empty.
"""
cmd = "devlink port split %s count %s" % (port.bus_info, k)
stdout, stderr = run_command(cmd, should_fail=should_fail)
if should_fail:
if not test(stderr != "", "%s is unsplittable" % port.name):
print("split an unsplittable port %s" % port.name)
return create_split_group(port, k)
else:
if stderr == "":
return create_split_group(port, k)
print("didn't split a splittable port %s" % port.name)
return []
def unsplit(port):
"""
Unsplit $port.
"""
cmd = "devlink port unsplit %s" % port
stdout, stderr = run_command(cmd)
test(stderr == "", "Unsplit port %s" % port)
def exists(port, dev):
"""
Check if $port exists in the devlink ports.
Return: True is so, False otherwise.
"""
return any(dev_port.name == port
for dev_port in devlink_ports.get_if_names(dev))
def exists_and_lanes(ports, lanes, dev):
"""
Check if every port in the list $ports exists in the devlink ports and has
$lanes number of lanes after splitting.
Return: True if both are True, False otherwise.
"""
for port in ports:
max_lanes = get_max_lanes(port)
if not exists(port, dev):
print("port %s doesn't exist in devlink ports" % port)
return False
if max_lanes != lanes:
print("port %s has %d lanes, but %s were expected"
% (port, lanes, max_lanes))
return False
return True
def test(cond, msg):
"""
Check $cond and print a message accordingly.
Return: True is pass, False otherwise.
"""
if cond:
print("TEST: %-60s [ OK ]" % msg)
else:
print("TEST: %-60s [FAIL]" % msg)
return cond
def create_split_group(port, k):
"""
Create the split group for $port.
Return: Array with $k elements, which are the split port group.
"""
return list(port.name + "s" + str(i) for i in range(k))
def split_unsplittable_port(port, k):
"""
Test that splitting of unsplittable port fails.
"""
# split to max
new_split_group = split(k, port, should_fail=True)
if new_split_group != []:
unsplit(port.bus_info)
def split_splittable_port(port, k, lanes, dev):
"""
Test that splitting of splittable port passes correctly.
"""
new_split_group = split(k, port)
# Once the split command ends, it takes some time to the sub ifaces'
# to get their names. Use udevadm to continue only when all current udev
# events are handled.
cmd = "udevadm settle"
stdout, stderr = run_command(cmd)
assert stderr == ""
if new_split_group != []:
test(exists_and_lanes(new_split_group, lanes/k, dev),
"split port %s into %s" % (port.name, k))
unsplit(port.bus_info)
def make_parser():
parser = argparse.ArgumentParser(description='A test for port splitting.')
parser.add_argument('--dev',
help='The devlink handle of the device under test. ' +
'The default is the first registered devlink ' +
'handle.')
return parser
def main(cmdline=None):
parser = make_parser()
args = parser.parse_args(cmdline)
dev = args.dev
if not dev:
cmd = "devlink -j dev show"
stdout, stderr = run_command(cmd)
assert stderr == ""
devs = json.loads(stdout)['dev']
if devs:
dev = list(devs.keys())[0]
else:
print("no devlink device was found, test skipped")
sys.exit(KSFT_SKIP)
cmd = "devlink dev show %s" % dev
stdout, stderr = run_command(cmd)
if stderr != "":
print("devlink device %s can not be found" % dev)
sys.exit(1)
ports = devlink_ports(dev)
for port in ports.if_names:
max_lanes = get_max_lanes(port.name)
# If max lanes is 0, do not test port splitting at all
if max_lanes == 0:
continue
# If 1 lane, shouldn't be able to split
elif max_lanes == 1:
test(not get_split_ability(port),
"%s should not be able to split" % port.name)
split_unsplittable_port(port, max_lanes)
# Else, splitting should pass and all the split ports should exist.
else:
lane = max_lanes
test(get_split_ability(port),
"%s should be able to split" % port.name)
while lane > 1:
split_splittable_port(port, lane, max_lanes, dev)
lane //= 2
if __name__ == "__main__":
main()
|
nvtrust-main
|
infrastructure/linux/linux_source/tools/testing/selftests/net/devlink_port_split.py
|
#!/usr/bin/env python3
# SPDX-License-Identifier: GPL-2.0
from struct import pack
from time import sleep
import errno
import glob
import os
import subprocess
try:
import pytest
except ImportError:
print("Unable to import pytest python module.")
print("\nIf not already installed, you may do so with:")
print("\t\tpip3 install pytest")
exit(1)
SOCKETS = glob.glob('/sys/bus/auxiliary/devices/intel_vsec.sdsi.*')
NUM_SOCKETS = len(SOCKETS)
MODULE_NAME = 'intel_sdsi'
DEV_PREFIX = 'intel_vsec.sdsi'
CLASS_DIR = '/sys/bus/auxiliary/devices'
GUID = "0x6dd191"
def read_bin_file(file):
with open(file, mode='rb') as f:
content = f.read()
return content
def get_dev_file_path(socket, file):
return CLASS_DIR + '/' + DEV_PREFIX + '.' + str(socket) + '/' + file
def kmemleak_enabled():
kmemleak = "/sys/kernel/debug/kmemleak"
return os.path.isfile(kmemleak)
class TestSDSiDriver:
def test_driver_loaded(self):
lsmod_p = subprocess.Popen(('lsmod'), stdout=subprocess.PIPE)
result = subprocess.check_output(('grep', '-q', MODULE_NAME), stdin=lsmod_p.stdout)
@pytest.mark.parametrize('socket', range(0, NUM_SOCKETS))
class TestSDSiFilesClass:
def read_value(self, file):
f = open(file, "r")
value = f.read().strip("\n")
return value
def get_dev_folder(self, socket):
return CLASS_DIR + '/' + DEV_PREFIX + '.' + str(socket) + '/'
def test_sysfs_files_exist(self, socket):
folder = self.get_dev_folder(socket)
print (folder)
assert os.path.isfile(folder + "guid") == True
assert os.path.isfile(folder + "provision_akc") == True
assert os.path.isfile(folder + "provision_cap") == True
assert os.path.isfile(folder + "state_certificate") == True
assert os.path.isfile(folder + "registers") == True
def test_sysfs_file_permissions(self, socket):
folder = self.get_dev_folder(socket)
mode = os.stat(folder + "guid").st_mode & 0o777
assert mode == 0o444 # Read all
mode = os.stat(folder + "registers").st_mode & 0o777
assert mode == 0o400 # Read owner
mode = os.stat(folder + "provision_akc").st_mode & 0o777
assert mode == 0o200 # Read owner
mode = os.stat(folder + "provision_cap").st_mode & 0o777
assert mode == 0o200 # Read owner
mode = os.stat(folder + "state_certificate").st_mode & 0o777
assert mode == 0o400 # Read owner
def test_sysfs_file_ownership(self, socket):
folder = self.get_dev_folder(socket)
st = os.stat(folder + "guid")
assert st.st_uid == 0
assert st.st_gid == 0
st = os.stat(folder + "registers")
assert st.st_uid == 0
assert st.st_gid == 0
st = os.stat(folder + "provision_akc")
assert st.st_uid == 0
assert st.st_gid == 0
st = os.stat(folder + "provision_cap")
assert st.st_uid == 0
assert st.st_gid == 0
st = os.stat(folder + "state_certificate")
assert st.st_uid == 0
assert st.st_gid == 0
def test_sysfs_file_sizes(self, socket):
folder = self.get_dev_folder(socket)
if self.read_value(folder + "guid") == GUID:
st = os.stat(folder + "registers")
assert st.st_size == 72
st = os.stat(folder + "provision_akc")
assert st.st_size == 1024
st = os.stat(folder + "provision_cap")
assert st.st_size == 1024
st = os.stat(folder + "state_certificate")
assert st.st_size == 4096
def test_no_seek_allowed(self, socket):
folder = self.get_dev_folder(socket)
rand_file = bytes(os.urandom(8))
f = open(folder + "provision_cap", "wb", 0)
f.seek(1)
with pytest.raises(OSError) as error:
f.write(rand_file)
assert error.value.errno == errno.ESPIPE
f.close()
f = open(folder + "provision_akc", "wb", 0)
f.seek(1)
with pytest.raises(OSError) as error:
f.write(rand_file)
assert error.value.errno == errno.ESPIPE
f.close()
def test_registers_seek(self, socket):
folder = self.get_dev_folder(socket)
# Check that the value read from an offset of the entire
# file is none-zero and the same as the value read
# from seeking to the same location
f = open(folder + "registers", "rb")
data = f.read()
f.seek(64)
id = f.read()
assert id != bytes(0)
assert data[64:] == id
f.close()
@pytest.mark.parametrize('socket', range(0, NUM_SOCKETS))
class TestSDSiMailboxCmdsClass:
def test_provision_akc_eoverflow_1017_bytes(self, socket):
# The buffer for writes is 1k, of with 8 bytes must be
# reserved for the command, leaving 1016 bytes max.
# Check that we get an overflow error for 1017 bytes.
node = get_dev_file_path(socket, "provision_akc")
rand_file = bytes(os.urandom(1017))
f = open(node, 'wb', 0)
with pytest.raises(OSError) as error:
f.write(rand_file)
assert error.value.errno == errno.EOVERFLOW
f.close()
@pytest.mark.parametrize('socket', range(0, NUM_SOCKETS))
class TestSdsiDriverLocksClass:
def test_enodev_when_pci_device_removed(self, socket):
node = get_dev_file_path(socket, "provision_akc")
dev_name = DEV_PREFIX + '.' + str(socket)
driver_dir = CLASS_DIR + '/' + dev_name + "/driver/"
rand_file = bytes(os.urandom(8))
f = open(node, 'wb', 0)
g = open(node, 'wb', 0)
with open(driver_dir + 'unbind', 'w') as k:
print(dev_name, file = k)
with pytest.raises(OSError) as error:
f.write(rand_file)
assert error.value.errno == errno.ENODEV
with pytest.raises(OSError) as error:
g.write(rand_file)
assert error.value.errno == errno.ENODEV
f.close()
g.close()
# Short wait needed to allow file to close before pulling driver
sleep(1)
p = subprocess.Popen(('modprobe', '-r', 'intel_sdsi'))
p.wait()
p = subprocess.Popen(('modprobe', '-r', 'intel_vsec'))
p.wait()
p = subprocess.Popen(('modprobe', 'intel_vsec'))
p.wait()
# Short wait needed to allow driver time to get inserted
# before continuing tests
sleep(1)
def test_memory_leak(self, socket):
if not kmemleak_enabled():
pytest.skip("kmemleak not enabled in kernel")
dev_name = DEV_PREFIX + '.' + str(socket)
driver_dir = CLASS_DIR + '/' + dev_name + "/driver/"
with open(driver_dir + 'unbind', 'w') as k:
print(dev_name, file = k)
sleep(1)
subprocess.check_output(('modprobe', '-r', 'intel_sdsi'))
subprocess.check_output(('modprobe', '-r', 'intel_vsec'))
with open('/sys/kernel/debug/kmemleak', 'w') as f:
print('scan', file = f)
sleep(5)
assert os.stat('/sys/kernel/debug/kmemleak').st_size == 0
subprocess.check_output(('modprobe', 'intel_vsec'))
sleep(1)
|
nvtrust-main
|
infrastructure/linux/linux_source/tools/testing/selftests/drivers/sdsi/sdsi_test.py
|
#!/usr/bin/env python
# SPDX-License-Identifier: GPL-2.0
import subprocess
import json as j
import random
class SkipTest(Exception):
pass
class RandomValuePicker:
"""
Class for storing shared buffer configuration. Can handle 3 different
objects, pool, tcbind and portpool. Provide an interface to get random
values for a specific object type as the follow:
1. Pool:
- random size
2. TcBind:
- random pool number
- random threshold
3. PortPool:
- random threshold
"""
def __init__(self, pools):
self._pools = []
for pool in pools:
self._pools.append(pool)
def _cell_size(self):
return self._pools[0]["cell_size"]
def _get_static_size(self, th):
# For threshold of 16, this works out to be about 12MB on Spectrum-1,
# and about 17MB on Spectrum-2.
return th * 8000 * self._cell_size()
def _get_size(self):
return self._get_static_size(16)
def _get_thtype(self):
return "static"
def _get_th(self, pool):
# Threshold value could be any integer between 3 to 16
th = random.randint(3, 16)
if pool["thtype"] == "dynamic":
return th
else:
return self._get_static_size(th)
def _get_pool(self, direction):
ing_pools = []
egr_pools = []
for pool in self._pools:
if pool["type"] == "ingress":
ing_pools.append(pool)
else:
egr_pools.append(pool)
if direction == "ingress":
arr = ing_pools
else:
arr = egr_pools
return arr[random.randint(0, len(arr) - 1)]
def get_value(self, objid):
if isinstance(objid, Pool):
if objid["pool"] in [4, 8, 9, 10]:
# The threshold type of pools 4, 8, 9 and 10 cannot be changed
raise SkipTest()
else:
return (self._get_size(), self._get_thtype())
if isinstance(objid, TcBind):
if objid["tc"] >= 8:
# Multicast TCs cannot be changed
raise SkipTest()
else:
pool = self._get_pool(objid["type"])
th = self._get_th(pool)
pool_n = pool["pool"]
return (pool_n, th)
if isinstance(objid, PortPool):
pool_n = objid["pool"]
pool = self._pools[pool_n]
assert pool["pool"] == pool_n
th = self._get_th(pool)
return (th,)
class RecordValuePickerException(Exception):
pass
class RecordValuePicker:
"""
Class for storing shared buffer configuration. Can handle 2 different
objects, pool and tcbind. Provide an interface to get the stored values per
object type.
"""
def __init__(self, objlist):
self._recs = []
for item in objlist:
self._recs.append({"objid": item, "value": item.var_tuple()})
def get_value(self, objid):
if isinstance(objid, Pool) and objid["pool"] in [4, 8, 9, 10]:
# The threshold type of pools 4, 8, 9 and 10 cannot be changed
raise SkipTest()
if isinstance(objid, TcBind) and objid["tc"] >= 8:
# Multicast TCs cannot be changed
raise SkipTest()
for rec in self._recs:
if rec["objid"].weak_eq(objid):
return rec["value"]
raise RecordValuePickerException()
def run_cmd(cmd, json=False):
out = subprocess.check_output(cmd, shell=True)
if json:
return j.loads(out)
return out
def run_json_cmd(cmd):
return run_cmd(cmd, json=True)
def log_test(test_name, err_msg=None):
if err_msg:
print("\t%s" % err_msg)
print("TEST: %-80s [FAIL]" % test_name)
else:
print("TEST: %-80s [ OK ]" % test_name)
class CommonItem(dict):
varitems = []
def var_tuple(self):
ret = []
self.varitems.sort()
for key in self.varitems:
ret.append(self[key])
return tuple(ret)
def weak_eq(self, other):
for key in self:
if key in self.varitems:
continue
if self[key] != other[key]:
return False
return True
class CommonList(list):
def get_by(self, by_obj):
for item in self:
if item.weak_eq(by_obj):
return item
return None
def del_by(self, by_obj):
for item in self:
if item.weak_eq(by_obj):
self.remove(item)
class Pool(CommonItem):
varitems = ["size", "thtype"]
def dl_set(self, dlname, size, thtype):
run_cmd("devlink sb pool set {} sb {} pool {} size {} thtype {}".format(dlname, self["sb"],
self["pool"],
size, thtype))
class PoolList(CommonList):
pass
def get_pools(dlname, direction=None):
d = run_json_cmd("devlink sb pool show -j")
pools = PoolList()
for pooldict in d["pool"][dlname]:
if not direction or direction == pooldict["type"]:
pools.append(Pool(pooldict))
return pools
def do_check_pools(dlname, pools, vp):
for pool in pools:
pre_pools = get_pools(dlname)
try:
(size, thtype) = vp.get_value(pool)
except SkipTest:
continue
pool.dl_set(dlname, size, thtype)
post_pools = get_pools(dlname)
pool = post_pools.get_by(pool)
err_msg = None
if pool["size"] != size:
err_msg = "Incorrect pool size (got {}, expected {})".format(pool["size"], size)
if pool["thtype"] != thtype:
err_msg = "Incorrect pool threshold type (got {}, expected {})".format(pool["thtype"], thtype)
pre_pools.del_by(pool)
post_pools.del_by(pool)
if pre_pools != post_pools:
err_msg = "Other pool setup changed as well"
log_test("pool {} of sb {} set verification".format(pool["pool"],
pool["sb"]), err_msg)
def check_pools(dlname, pools):
# Save defaults
record_vp = RecordValuePicker(pools)
# For each pool, set random size and static threshold type
do_check_pools(dlname, pools, RandomValuePicker(pools))
# Restore defaults
do_check_pools(dlname, pools, record_vp)
class TcBind(CommonItem):
varitems = ["pool", "threshold"]
def __init__(self, port, d):
super(TcBind, self).__init__(d)
self["dlportname"] = port.name
def dl_set(self, pool, th):
run_cmd("devlink sb tc bind set {} sb {} tc {} type {} pool {} th {}".format(self["dlportname"],
self["sb"],
self["tc"],
self["type"],
pool, th))
class TcBindList(CommonList):
pass
def get_tcbinds(ports, verify_existence=False):
d = run_json_cmd("devlink sb tc bind show -j -n")
tcbinds = TcBindList()
for port in ports:
err_msg = None
if port.name not in d["tc_bind"] or len(d["tc_bind"][port.name]) == 0:
err_msg = "No tc bind for port"
else:
for tcbinddict in d["tc_bind"][port.name]:
tcbinds.append(TcBind(port, tcbinddict))
if verify_existence:
log_test("tc bind existence for port {} verification".format(port.name), err_msg)
return tcbinds
def do_check_tcbind(ports, tcbinds, vp):
for tcbind in tcbinds:
pre_tcbinds = get_tcbinds(ports)
try:
(pool, th) = vp.get_value(tcbind)
except SkipTest:
continue
tcbind.dl_set(pool, th)
post_tcbinds = get_tcbinds(ports)
tcbind = post_tcbinds.get_by(tcbind)
err_msg = None
if tcbind["pool"] != pool:
err_msg = "Incorrect pool (got {}, expected {})".format(tcbind["pool"], pool)
if tcbind["threshold"] != th:
err_msg = "Incorrect threshold (got {}, expected {})".format(tcbind["threshold"], th)
pre_tcbinds.del_by(tcbind)
post_tcbinds.del_by(tcbind)
if pre_tcbinds != post_tcbinds:
err_msg = "Other tc bind setup changed as well"
log_test("tc bind {}-{} of sb {} set verification".format(tcbind["dlportname"],
tcbind["tc"],
tcbind["sb"]), err_msg)
def check_tcbind(dlname, ports, pools):
tcbinds = get_tcbinds(ports, verify_existence=True)
# Save defaults
record_vp = RecordValuePicker(tcbinds)
# Bind each port and unicast TC (TCs < 8) to a random pool and a random
# threshold
do_check_tcbind(ports, tcbinds, RandomValuePicker(pools))
# Restore defaults
do_check_tcbind(ports, tcbinds, record_vp)
class PortPool(CommonItem):
varitems = ["threshold"]
def __init__(self, port, d):
super(PortPool, self).__init__(d)
self["dlportname"] = port.name
def dl_set(self, th):
run_cmd("devlink sb port pool set {} sb {} pool {} th {}".format(self["dlportname"],
self["sb"],
self["pool"], th))
class PortPoolList(CommonList):
pass
def get_portpools(ports, verify_existence=False):
d = run_json_cmd("devlink sb port pool -j -n")
portpools = PortPoolList()
for port in ports:
err_msg = None
if port.name not in d["port_pool"] or len(d["port_pool"][port.name]) == 0:
err_msg = "No port pool for port"
else:
for portpooldict in d["port_pool"][port.name]:
portpools.append(PortPool(port, portpooldict))
if verify_existence:
log_test("port pool existence for port {} verification".format(port.name), err_msg)
return portpools
def do_check_portpool(ports, portpools, vp):
for portpool in portpools:
pre_portpools = get_portpools(ports)
(th,) = vp.get_value(portpool)
portpool.dl_set(th)
post_portpools = get_portpools(ports)
portpool = post_portpools.get_by(portpool)
err_msg = None
if portpool["threshold"] != th:
err_msg = "Incorrect threshold (got {}, expected {})".format(portpool["threshold"], th)
pre_portpools.del_by(portpool)
post_portpools.del_by(portpool)
if pre_portpools != post_portpools:
err_msg = "Other port pool setup changed as well"
log_test("port pool {}-{} of sb {} set verification".format(portpool["dlportname"],
portpool["pool"],
portpool["sb"]), err_msg)
def check_portpool(dlname, ports, pools):
portpools = get_portpools(ports, verify_existence=True)
# Save defaults
record_vp = RecordValuePicker(portpools)
# For each port pool, set a random threshold
do_check_portpool(ports, portpools, RandomValuePicker(pools))
# Restore defaults
do_check_portpool(ports, portpools, record_vp)
class Port:
def __init__(self, name):
self.name = name
class PortList(list):
pass
def get_ports(dlname):
d = run_json_cmd("devlink port show -j")
ports = PortList()
for name in d["port"]:
if name.find(dlname) == 0 and d["port"][name]["flavour"] == "physical":
ports.append(Port(name))
return ports
def get_device():
devices_info = run_json_cmd("devlink -j dev info")["info"]
for d in devices_info:
if "mlxsw_spectrum" in devices_info[d]["driver"]:
return d
return None
class UnavailableDevlinkNameException(Exception):
pass
def test_sb_configuration():
# Use static seed
random.seed(0)
dlname = get_device()
if not dlname:
raise UnavailableDevlinkNameException()
ports = get_ports(dlname)
pools = get_pools(dlname)
check_pools(dlname, pools)
check_tcbind(dlname, ports, pools)
check_portpool(dlname, ports, pools)
test_sb_configuration()
|
nvtrust-main
|
infrastructure/linux/linux_source/tools/testing/selftests/drivers/net/mlxsw/sharedbuffer_configuration.py
|
#!/usr/bin/env python3
from enum import Enum
class ResultState(Enum):
noresult = -1
skip = 0
success = 1
fail = 2
class TestResult:
def __init__(self, test_id="", test_name=""):
self.test_id = test_id
self.test_name = test_name
self.result = ResultState.noresult
self.failmsg = ""
self.errormsg = ""
self.steps = []
def set_result(self, result):
if (isinstance(result, ResultState)):
self.result = result
return True
else:
raise TypeError('Unknown result type, must be type ResultState')
def get_result(self):
return self.result
def set_errormsg(self, errormsg):
self.errormsg = errormsg
return True
def append_errormsg(self, errormsg):
self.errormsg = '{}\n{}'.format(self.errormsg, errormsg)
def get_errormsg(self):
return self.errormsg
def set_failmsg(self, failmsg):
self.failmsg = failmsg
return True
def append_failmsg(self, failmsg):
self.failmsg = '{}\n{}'.format(self.failmsg, failmsg)
def get_failmsg(self):
return self.failmsg
def add_steps(self, newstep):
if type(newstep) == list:
self.steps.extend(newstep)
elif type(newstep) == str:
self.steps.append(step)
else:
raise TypeError('TdcResults.add_steps() requires a list or str')
def get_executed_steps(self):
return self.steps
class TestSuiteReport():
_testsuite = []
def add_resultdata(self, result_data):
if isinstance(result_data, TestResult):
self._testsuite.append(result_data)
return True
def count_tests(self):
return len(self._testsuite)
def count_failures(self):
return sum(1 for t in self._testsuite if t.result == ResultState.fail)
def count_skips(self):
return sum(1 for t in self._testsuite if t.result == ResultState.skip)
def find_result(self, test_id):
return next((tr for tr in self._testsuite if tr.test_id == test_id), None)
def update_result(self, result_data):
orig = self.find_result(result_data.test_id)
if orig != None:
idx = self._testsuite.index(orig)
self._testsuite[idx] = result_data
else:
self.add_resultdata(result_data)
def format_tap(self):
ftap = ""
ftap += '1..{}\n'.format(self.count_tests())
index = 1
for t in self._testsuite:
if t.result == ResultState.fail:
ftap += 'not '
ftap += 'ok {} {} - {}'.format(str(index), t.test_id, t.test_name)
if t.result == ResultState.skip or t.result == ResultState.noresult:
ftap += ' # skipped - {}\n'.format(t.errormsg)
elif t.result == ResultState.fail:
if len(t.steps) > 0:
ftap += '\tCommands executed in this test case:'
for step in t.steps:
ftap += '\n\t\t{}'.format(step)
ftap += '\n\t{}'.format(t.failmsg)
ftap += '\n'
index += 1
return ftap
def format_xunit(self):
from xml.sax.saxutils import escape
xunit = "<testsuites>\n"
xunit += '\t<testsuite tests=\"{}\" skips=\"{}\">\n'.format(self.count_tests(), self.count_skips())
for t in self._testsuite:
xunit += '\t\t<testcase classname=\"{}\" '.format(escape(t.test_id))
xunit += 'name=\"{}\">\n'.format(escape(t.test_name))
if t.failmsg:
xunit += '\t\t\t<failure>\n'
if len(t.steps) > 0:
xunit += 'Commands executed in this test case:\n'
for step in t.steps:
xunit += '\t{}\n'.format(escape(step))
xunit += 'FAILURE: {}\n'.format(escape(t.failmsg))
xunit += '\t\t\t</failure>\n'
if t.errormsg:
xunit += '\t\t\t<error>\n{}\n'.format(escape(t.errormsg))
xunit += '\t\t\t</error>\n'
if t.result == ResultState.skip:
xunit += '\t\t\t<skipped/>\n'
xunit += '\t\t</testcase>\n'
xunit += '\t</testsuite>\n'
xunit += '</testsuites>\n'
return xunit
|
nvtrust-main
|
infrastructure/linux/linux_source/tools/testing/selftests/tc-testing/TdcResults.py
|
"""
tdc_config_local.py - tdc plugin-writer-specified values
Copyright (C) 2017 bjb@mojatatu.com
"""
import os
ENVIR = os.environ.copy()
ENV_LD_LIBRARY_PATH = os.getenv('LD_LIBRARY_PATH', '')
ENV_OTHER_LIB = os.getenv('OTHER_LIB', '')
# example adding value to NAMES, without editing tdc_config.py
EXTRA_NAMES = dict()
EXTRA_NAMES['SOME_BIN'] = os.path.join(os.getenv('OTHER_BIN', ''), 'some_bin')
# example adding values to ENVIR, without editing tdc_config.py
ENVIR['VALGRIND_LIB'] = '/usr/lib/valgrind'
ENVIR['VALGRIND_BIN'] = '/usr/bin/valgrind'
ENVIR['VGDB_BIN'] = '/usr/bin/vgdb'
|
nvtrust-main
|
infrastructure/linux/linux_source/tools/testing/selftests/tc-testing/tdc_config_local_template.py
|
#!/usr/bin/env python3
class TdcPlugin:
def __init__(self):
super().__init__()
print(' -- {}.__init__'.format(self.sub_class))
def pre_suite(self, testcount, testidlist):
'''run commands before test_runner goes into a test loop'''
self.testcount = testcount
self.testidlist = testidlist
if self.args.verbose > 1:
print(' -- {}.pre_suite'.format(self.sub_class))
def post_suite(self, index):
'''run commands after test_runner completes the test loop
index is the last ordinal number of test that was attempted'''
if self.args.verbose > 1:
print(' -- {}.post_suite'.format(self.sub_class))
def pre_case(self, caseinfo, test_skip):
'''run commands before test_runner does one test'''
if self.args.verbose > 1:
print(' -- {}.pre_case'.format(self.sub_class))
self.args.caseinfo = caseinfo
self.args.test_skip = test_skip
def post_case(self):
'''run commands after test_runner does one test'''
if self.args.verbose > 1:
print(' -- {}.post_case'.format(self.sub_class))
def pre_execute(self):
'''run command before test-runner does the execute step'''
if self.args.verbose > 1:
print(' -- {}.pre_execute'.format(self.sub_class))
def post_execute(self):
'''run command after test-runner does the execute step'''
if self.args.verbose > 1:
print(' -- {}.post_execute'.format(self.sub_class))
def adjust_command(self, stage, command):
'''adjust the command'''
if self.args.verbose > 1:
print(' -- {}.adjust_command {}'.format(self.sub_class, stage))
# if stage == 'pre':
# pass
# elif stage == 'setup':
# pass
# elif stage == 'execute':
# pass
# elif stage == 'verify':
# pass
# elif stage == 'teardown':
# pass
# elif stage == 'post':
# pass
# else:
# pass
return command
def add_args(self, parser):
'''Get the plugin args from the command line'''
self.argparser = parser
return self.argparser
def check_args(self, args, remaining):
'''Check that the args are set correctly'''
self.args = args
if self.args.verbose > 1:
print(' -- {}.check_args'.format(self.sub_class))
|
nvtrust-main
|
infrastructure/linux/linux_source/tools/testing/selftests/tc-testing/TdcPlugin.py
|
"""
# SPDX-License-Identifier: GPL-2.0
tdc_helper.py - tdc helper functions
Copyright (C) 2017 Lucas Bates <lucasb@mojatatu.com>
"""
def get_categorized_testlist(alltests, ucat):
""" Sort the master test list into categories. """
testcases = dict()
for category in ucat:
testcases[category] = list(filter(lambda x: category in x['category'], alltests))
return(testcases)
def get_unique_item(lst):
""" For a list, return a list of the unique items in the list. """
if len(lst) > 1:
return list(set(lst))
else:
return lst
def get_test_categories(alltests):
""" Discover all unique test categories present in the test case file. """
ucat = []
for t in alltests:
ucat.extend(get_unique_item(t['category']))
ucat = get_unique_item(ucat)
return ucat
def list_test_cases(testlist):
""" Print IDs and names of all test cases. """
for curcase in testlist:
print(curcase['id'] + ': (' + ', '.join(curcase['category']) + ") " + curcase['name'])
def list_categories(testlist):
""" Show all categories that are present in a test case file. """
categories = set(map(lambda x: x['category'], testlist))
print("Available categories:")
print(", ".join(str(s) for s in categories))
print("")
def print_list(cmdlist):
""" Print a list of strings prepended with a tab. """
for l in cmdlist:
if (type(l) == list):
print("\t" + str(l[0]))
else:
print("\t" + str(l))
def print_sll(items):
print("\n".join(str(s) for s in items))
def print_test_case(tcase):
""" Pretty-printing of a given test case. """
print('\n==============\nTest {}\t{}\n'.format(tcase['id'], tcase['name']))
for k in tcase.keys():
if (isinstance(tcase[k], list)):
print(k + ":")
print_list(tcase[k])
else:
if not ((k == 'id') or (k == 'name')):
print(k + ": " + str(tcase[k]))
|
nvtrust-main
|
infrastructure/linux/linux_source/tools/testing/selftests/tc-testing/tdc_helper.py
|
#!/usr/bin/env python3
# SPDX-License-Identifier: GPL-2.0
"""
tdc_multibatch.py - a thin wrapper over tdc_batch.py to generate multiple batch
files
Copyright (C) 2019 Vlad Buslov <vladbu@mellanox.com>
"""
import argparse
import os
parser = argparse.ArgumentParser(
description='TC multiple batch file generator')
parser.add_argument("device", help="device name")
parser.add_argument("dir", help="where to put batch files")
parser.add_argument(
"num_filters", type=int, help="how many lines per batch file")
parser.add_argument("num_files", type=int, help="how many batch files")
parser.add_argument(
"operation",
choices=['add', 'del', 'replace'],
help="operation to perform on filters")
parser.add_argument(
"-x",
"--file_prefix",
default="",
help="prefix for generated batch file names")
parser.add_argument(
"-d",
"--duplicate_handles",
action="store_true",
help="duplicate filter handle range in all files")
parser.add_argument(
"-a",
"--handle_start",
type=int,
default=1,
help="start handle range from (default: 1)")
parser.add_argument(
"-m",
"--mac_prefix",
type=int,
default=0,
choices=range(0, 256),
help="add this value to third byte of source MAC address of flower filter"
"(default: 0)")
args = parser.parse_args()
device = args.device
dir = args.dir
file_prefix = args.file_prefix + args.operation + "_"
num_filters = args.num_filters
num_files = args.num_files
operation = args.operation
duplicate_handles = args.duplicate_handles
handle = args.handle_start
mac_prefix = args.mac_prefix
for i in range(num_files):
file = dir + '/' + file_prefix + str(i)
os.system("./tdc_batch.py -n {} -a {} -e {} -m {} {} {}".format(
num_filters, handle, operation, i + mac_prefix, device, file))
if not duplicate_handles:
handle += num_filters
|
nvtrust-main
|
infrastructure/linux/linux_source/tools/testing/selftests/tc-testing/tdc_multibatch.py
|
#!/usr/bin/env python3
"""
tdc_batch.py - a script to generate TC batch file
Copyright (C) 2017 Chris Mi <chrism@mellanox.com>
"""
import argparse
parser = argparse.ArgumentParser(description='TC batch file generator')
parser.add_argument("device", help="device name")
parser.add_argument("file", help="batch file name")
parser.add_argument("-n", "--number", type=int,
help="how many lines in batch file")
parser.add_argument(
"-a",
"--handle_start",
type=int,
default=1,
help="start handle range from (default: 1)")
parser.add_argument("-o", "--skip_sw",
help="skip_sw (offload), by default skip_hw",
action="store_true")
parser.add_argument("-s", "--share_action",
help="all filters share the same action",
action="store_true")
parser.add_argument("-p", "--prio",
help="all filters have different prio",
action="store_true")
parser.add_argument(
"-e",
"--operation",
choices=['add', 'del', 'replace'],
default='add',
help="operation to perform on filters"
"(default: add filter)")
parser.add_argument(
"-m",
"--mac_prefix",
type=int,
default=0,
choices=range(0, 256),
help="third byte of source MAC address of flower filter"
"(default: 0)")
args = parser.parse_args()
device = args.device
file = open(args.file, 'w')
number = 1
if args.number:
number = args.number
handle_start = args.handle_start
skip = "skip_hw"
if args.skip_sw:
skip = "skip_sw"
share_action = ""
if args.share_action:
share_action = "index 1"
prio = "prio 1"
if args.prio:
prio = ""
if number > 0x4000:
number = 0x4000
mac_prefix = args.mac_prefix
def format_add_filter(device, prio, handle, skip, src_mac, dst_mac,
share_action):
return ("filter add dev {} {} protocol ip ingress handle {} "
" flower {} src_mac {} dst_mac {} action drop {}".format(
device, prio, handle, skip, src_mac, dst_mac, share_action))
def format_rep_filter(device, prio, handle, skip, src_mac, dst_mac,
share_action):
return ("filter replace dev {} {} protocol ip ingress handle {} "
" flower {} src_mac {} dst_mac {} action drop {}".format(
device, prio, handle, skip, src_mac, dst_mac, share_action))
def format_del_filter(device, prio, handle, skip, src_mac, dst_mac,
share_action):
return ("filter del dev {} {} protocol ip ingress handle {} "
"flower".format(device, prio, handle))
formatter = format_add_filter
if args.operation == "del":
formatter = format_del_filter
elif args.operation == "replace":
formatter = format_rep_filter
index = 0
for i in range(0x100):
for j in range(0x100):
for k in range(0x100):
mac = ("{:02x}:{:02x}:{:02x}".format(i, j, k))
src_mac = "e4:11:{:02x}:{}".format(mac_prefix, mac)
dst_mac = "e4:12:00:" + mac
cmd = formatter(device, prio, handle_start + index, skip, src_mac,
dst_mac, share_action)
file.write("{}\n".format(cmd))
index += 1
if index >= number:
file.close()
exit(0)
|
nvtrust-main
|
infrastructure/linux/linux_source/tools/testing/selftests/tc-testing/tdc_batch.py
|
#!/usr/bin/env python3
# SPDX-License-Identifier: GPL-2.0
"""
tdc.py - Linux tc (Traffic Control) unit test driver
Copyright (C) 2017 Lucas Bates <lucasb@mojatatu.com>
"""
import re
import os
import sys
import argparse
import importlib
import json
import subprocess
import time
import traceback
from collections import OrderedDict
from string import Template
from tdc_config import *
from tdc_helper import *
import TdcPlugin
from TdcResults import *
class PluginDependencyException(Exception):
def __init__(self, missing_pg):
self.missing_pg = missing_pg
class PluginMgrTestFail(Exception):
def __init__(self, stage, output, message):
self.stage = stage
self.output = output
self.message = message
class PluginMgr:
def __init__(self, argparser):
super().__init__()
self.plugins = {}
self.plugin_instances = []
self.failed_plugins = {}
self.argparser = argparser
# TODO, put plugins in order
plugindir = os.getenv('TDC_PLUGIN_DIR', './plugins')
for dirpath, dirnames, filenames in os.walk(plugindir):
for fn in filenames:
if (fn.endswith('.py') and
not fn == '__init__.py' and
not fn.startswith('#') and
not fn.startswith('.#')):
mn = fn[0:-3]
foo = importlib.import_module('plugins.' + mn)
self.plugins[mn] = foo
self.plugin_instances.append(foo.SubPlugin())
def load_plugin(self, pgdir, pgname):
pgname = pgname[0:-3]
foo = importlib.import_module('{}.{}'.format(pgdir, pgname))
self.plugins[pgname] = foo
self.plugin_instances.append(foo.SubPlugin())
self.plugin_instances[-1].check_args(self.args, None)
def get_required_plugins(self, testlist):
'''
Get all required plugins from the list of test cases and return
all unique items.
'''
reqs = []
for t in testlist:
try:
if 'requires' in t['plugins']:
if isinstance(t['plugins']['requires'], list):
reqs.extend(t['plugins']['requires'])
else:
reqs.append(t['plugins']['requires'])
except KeyError:
continue
reqs = get_unique_item(reqs)
return reqs
def load_required_plugins(self, reqs, parser, args, remaining):
'''
Get all required plugins from the list of test cases and load any plugin
that is not already enabled.
'''
pgd = ['plugin-lib', 'plugin-lib-custom']
pnf = []
for r in reqs:
if r not in self.plugins:
fname = '{}.py'.format(r)
source_path = []
for d in pgd:
pgpath = '{}/{}'.format(d, fname)
if os.path.isfile(pgpath):
source_path.append(pgpath)
if len(source_path) == 0:
print('ERROR: unable to find required plugin {}'.format(r))
pnf.append(fname)
continue
elif len(source_path) > 1:
print('WARNING: multiple copies of plugin {} found, using version found')
print('at {}'.format(source_path[0]))
pgdir = source_path[0]
pgdir = pgdir.split('/')[0]
self.load_plugin(pgdir, fname)
if len(pnf) > 0:
raise PluginDependencyException(pnf)
parser = self.call_add_args(parser)
(args, remaining) = parser.parse_known_args(args=remaining, namespace=args)
return args
def call_pre_suite(self, testcount, testidlist):
for pgn_inst in self.plugin_instances:
pgn_inst.pre_suite(testcount, testidlist)
def call_post_suite(self, index):
for pgn_inst in reversed(self.plugin_instances):
pgn_inst.post_suite(index)
def call_pre_case(self, caseinfo, *, test_skip=False):
for pgn_inst in self.plugin_instances:
try:
pgn_inst.pre_case(caseinfo, test_skip)
except Exception as ee:
print('exception {} in call to pre_case for {} plugin'.
format(ee, pgn_inst.__class__))
print('test_ordinal is {}'.format(test_ordinal))
print('testid is {}'.format(caseinfo['id']))
raise
def call_post_case(self):
for pgn_inst in reversed(self.plugin_instances):
pgn_inst.post_case()
def call_pre_execute(self):
for pgn_inst in self.plugin_instances:
pgn_inst.pre_execute()
def call_post_execute(self):
for pgn_inst in reversed(self.plugin_instances):
pgn_inst.post_execute()
def call_add_args(self, parser):
for pgn_inst in self.plugin_instances:
parser = pgn_inst.add_args(parser)
return parser
def call_check_args(self, args, remaining):
for pgn_inst in self.plugin_instances:
pgn_inst.check_args(args, remaining)
def call_adjust_command(self, stage, command):
for pgn_inst in self.plugin_instances:
command = pgn_inst.adjust_command(stage, command)
return command
def set_args(self, args):
self.args = args
@staticmethod
def _make_argparser(args):
self.argparser = argparse.ArgumentParser(
description='Linux TC unit tests')
def replace_keywords(cmd):
"""
For a given executable command, substitute any known
variables contained within NAMES with the correct values
"""
tcmd = Template(cmd)
subcmd = tcmd.safe_substitute(NAMES)
return subcmd
def exec_cmd(args, pm, stage, command):
"""
Perform any required modifications on an executable command, then run
it in a subprocess and return the results.
"""
if len(command.strip()) == 0:
return None, None
if '$' in command:
command = replace_keywords(command)
command = pm.call_adjust_command(stage, command)
if args.verbose > 0:
print('command "{}"'.format(command))
proc = subprocess.Popen(command,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=ENVIR)
try:
(rawout, serr) = proc.communicate(timeout=NAMES['TIMEOUT'])
if proc.returncode != 0 and len(serr) > 0:
foutput = serr.decode("utf-8", errors="ignore")
else:
foutput = rawout.decode("utf-8", errors="ignore")
except subprocess.TimeoutExpired:
foutput = "Command \"{}\" timed out\n".format(command)
proc.returncode = 255
proc.stdout.close()
proc.stderr.close()
return proc, foutput
def prepare_env(args, pm, stage, prefix, cmdlist, output = None):
"""
Execute the setup/teardown commands for a test case.
Optionally terminate test execution if the command fails.
"""
if args.verbose > 0:
print('{}'.format(prefix))
for cmdinfo in cmdlist:
if isinstance(cmdinfo, list):
exit_codes = cmdinfo[1:]
cmd = cmdinfo[0]
else:
exit_codes = [0]
cmd = cmdinfo
if not cmd:
continue
(proc, foutput) = exec_cmd(args, pm, stage, cmd)
if proc and (proc.returncode not in exit_codes):
print('', file=sys.stderr)
print("{} *** Could not execute: \"{}\"".format(prefix, cmd),
file=sys.stderr)
print("\n{} *** Error message: \"{}\"".format(prefix, foutput),
file=sys.stderr)
print("returncode {}; expected {}".format(proc.returncode,
exit_codes))
print("\n{} *** Aborting test run.".format(prefix), file=sys.stderr)
print("\n\n{} *** stdout ***".format(proc.stdout), file=sys.stderr)
print("\n\n{} *** stderr ***".format(proc.stderr), file=sys.stderr)
raise PluginMgrTestFail(
stage, output,
'"{}" did not complete successfully'.format(prefix))
def run_one_test(pm, args, index, tidx):
global NAMES
result = True
tresult = ""
tap = ""
res = TestResult(tidx['id'], tidx['name'])
if args.verbose > 0:
print("\t====================\n=====> ", end="")
print("Test " + tidx["id"] + ": " + tidx["name"])
if 'skip' in tidx:
if tidx['skip'] == 'yes':
res = TestResult(tidx['id'], tidx['name'])
res.set_result(ResultState.skip)
res.set_errormsg('Test case designated as skipped.')
pm.call_pre_case(tidx, test_skip=True)
pm.call_post_execute()
return res
# populate NAMES with TESTID for this test
NAMES['TESTID'] = tidx['id']
pm.call_pre_case(tidx)
prepare_env(args, pm, 'setup', "-----> prepare stage", tidx["setup"])
if (args.verbose > 0):
print('-----> execute stage')
pm.call_pre_execute()
(p, procout) = exec_cmd(args, pm, 'execute', tidx["cmdUnderTest"])
if p:
exit_code = p.returncode
else:
exit_code = None
pm.call_post_execute()
if (exit_code is None or exit_code != int(tidx["expExitCode"])):
print("exit: {!r}".format(exit_code))
print("exit: {}".format(int(tidx["expExitCode"])))
#print("exit: {!r} {}".format(exit_code, int(tidx["expExitCode"])))
res.set_result(ResultState.fail)
res.set_failmsg('Command exited with {}, expected {}\n{}'.format(exit_code, tidx["expExitCode"], procout))
print(procout)
else:
if args.verbose > 0:
print('-----> verify stage')
match_pattern = re.compile(
str(tidx["matchPattern"]), re.DOTALL | re.MULTILINE)
(p, procout) = exec_cmd(args, pm, 'verify', tidx["verifyCmd"])
if procout:
match_index = re.findall(match_pattern, procout)
if len(match_index) != int(tidx["matchCount"]):
res.set_result(ResultState.fail)
res.set_failmsg('Could not match regex pattern. Verify command output:\n{}'.format(procout))
else:
res.set_result(ResultState.success)
elif int(tidx["matchCount"]) != 0:
res.set_result(ResultState.fail)
res.set_failmsg('No output generated by verify command.')
else:
res.set_result(ResultState.success)
prepare_env(args, pm, 'teardown', '-----> teardown stage', tidx['teardown'], procout)
pm.call_post_case()
index += 1
# remove TESTID from NAMES
del(NAMES['TESTID'])
return res
def test_runner(pm, args, filtered_tests):
"""
Driver function for the unit tests.
Prints information about the tests being run, executes the setup and
teardown commands and the command under test itself. Also determines
success/failure based on the information in the test case and generates
TAP output accordingly.
"""
testlist = filtered_tests
tcount = len(testlist)
index = 1
tap = ''
badtest = None
stage = None
emergency_exit = False
emergency_exit_message = ''
tsr = TestSuiteReport()
try:
pm.call_pre_suite(tcount, [tidx['id'] for tidx in testlist])
except Exception as ee:
ex_type, ex, ex_tb = sys.exc_info()
print('Exception {} {} (caught in pre_suite).'.
format(ex_type, ex))
traceback.print_tb(ex_tb)
emergency_exit_message = 'EMERGENCY EXIT, call_pre_suite failed with exception {} {}\n'.format(ex_type, ex)
emergency_exit = True
stage = 'pre-SUITE'
if emergency_exit:
pm.call_post_suite(index)
return emergency_exit_message
if args.verbose > 1:
print('give test rig 2 seconds to stabilize')
time.sleep(2)
for tidx in testlist:
if "flower" in tidx["category"] and args.device == None:
errmsg = "Tests using the DEV2 variable must define the name of a "
errmsg += "physical NIC with the -d option when running tdc.\n"
errmsg += "Test has been skipped."
if args.verbose > 1:
print(errmsg)
res = TestResult(tidx['id'], tidx['name'])
res.set_result(ResultState.skip)
res.set_errormsg(errmsg)
tsr.add_resultdata(res)
continue
try:
badtest = tidx # in case it goes bad
res = run_one_test(pm, args, index, tidx)
tsr.add_resultdata(res)
except PluginMgrTestFail as pmtf:
ex_type, ex, ex_tb = sys.exc_info()
stage = pmtf.stage
message = pmtf.message
output = pmtf.output
res = TestResult(tidx['id'], tidx['name'])
res.set_result(ResultState.skip)
res.set_errormsg(pmtf.message)
res.set_failmsg(pmtf.output)
tsr.add_resultdata(res)
index += 1
print(message)
print('Exception {} {} (caught in test_runner, running test {} {} {} stage {})'.
format(ex_type, ex, index, tidx['id'], tidx['name'], stage))
print('---------------')
print('traceback')
traceback.print_tb(ex_tb)
print('---------------')
if stage == 'teardown':
print('accumulated output for this test:')
if pmtf.output:
print(pmtf.output)
print('---------------')
break
index += 1
# if we failed in setup or teardown,
# fill in the remaining tests with ok-skipped
count = index
if tcount + 1 != count:
for tidx in testlist[count - 1:]:
res = TestResult(tidx['id'], tidx['name'])
res.set_result(ResultState.skip)
msg = 'skipped - previous {} failed {} {}'.format(stage,
index, badtest.get('id', '--Unknown--'))
res.set_errormsg(msg)
tsr.add_resultdata(res)
count += 1
if args.pause:
print('Want to pause\nPress enter to continue ...')
if input(sys.stdin):
print('got something on stdin')
pm.call_post_suite(index)
return tsr
def has_blank_ids(idlist):
"""
Search the list for empty ID fields and return true/false accordingly.
"""
return not(all(k for k in idlist))
def load_from_file(filename):
"""
Open the JSON file containing the test cases and return them
as list of ordered dictionary objects.
"""
try:
with open(filename) as test_data:
testlist = json.load(test_data, object_pairs_hook=OrderedDict)
except json.JSONDecodeError as jde:
print('IGNORING test case file {}\n\tBECAUSE: {}'.format(filename, jde))
testlist = list()
else:
idlist = get_id_list(testlist)
if (has_blank_ids(idlist)):
for k in testlist:
k['filename'] = filename
return testlist
def args_parse():
"""
Create the argument parser.
"""
parser = argparse.ArgumentParser(description='Linux TC unit tests')
return parser
def set_args(parser):
"""
Set the command line arguments for tdc.
"""
parser.add_argument(
'--outfile', type=str,
help='Path to the file in which results should be saved. ' +
'Default target is the current directory.')
parser.add_argument(
'-p', '--path', type=str,
help='The full path to the tc executable to use')
sg = parser.add_argument_group(
'selection', 'select which test cases: ' +
'files plus directories; filtered by categories plus testids')
ag = parser.add_argument_group(
'action', 'select action to perform on selected test cases')
sg.add_argument(
'-D', '--directory', nargs='+', metavar='DIR',
help='Collect tests from the specified directory(ies) ' +
'(default [tc-tests])')
sg.add_argument(
'-f', '--file', nargs='+', metavar='FILE',
help='Run tests from the specified file(s)')
sg.add_argument(
'-c', '--category', nargs='*', metavar='CATG', default=['+c'],
help='Run tests only from the specified category/ies, ' +
'or if no category/ies is/are specified, list known categories.')
sg.add_argument(
'-e', '--execute', nargs='+', metavar='ID',
help='Execute the specified test cases with specified IDs')
ag.add_argument(
'-l', '--list', action='store_true',
help='List all test cases, or those only within the specified category')
ag.add_argument(
'-s', '--show', action='store_true', dest='showID',
help='Display the selected test cases')
ag.add_argument(
'-i', '--id', action='store_true', dest='gen_id',
help='Generate ID numbers for new test cases')
parser.add_argument(
'-v', '--verbose', action='count', default=0,
help='Show the commands that are being run')
parser.add_argument(
'--format', default='tap', const='tap', nargs='?',
choices=['none', 'xunit', 'tap'],
help='Specify the format for test results. (Default: TAP)')
parser.add_argument('-d', '--device',
help='Execute test cases that use a physical device, ' +
'where DEVICE is its name. (If not defined, tests ' +
'that require a physical device will be skipped)')
parser.add_argument(
'-P', '--pause', action='store_true',
help='Pause execution just before post-suite stage')
return parser
def check_default_settings(args, remaining, pm):
"""
Process any arguments overriding the default settings,
and ensure the settings are correct.
"""
# Allow for overriding specific settings
global NAMES
if args.path != None:
NAMES['TC'] = args.path
if args.device != None:
NAMES['DEV2'] = args.device
if 'TIMEOUT' not in NAMES:
NAMES['TIMEOUT'] = None
if not os.path.isfile(NAMES['TC']):
print("The specified tc path " + NAMES['TC'] + " does not exist.")
exit(1)
pm.call_check_args(args, remaining)
def get_id_list(alltests):
"""
Generate a list of all IDs in the test cases.
"""
return [x["id"] for x in alltests]
def check_case_id(alltests):
"""
Check for duplicate test case IDs.
"""
idl = get_id_list(alltests)
return [x for x in idl if idl.count(x) > 1]
def does_id_exist(alltests, newid):
"""
Check if a given ID already exists in the list of test cases.
"""
idl = get_id_list(alltests)
return (any(newid == x for x in idl))
def generate_case_ids(alltests):
"""
If a test case has a blank ID field, generate a random hex ID for it
and then write the test cases back to disk.
"""
import random
for c in alltests:
if (c["id"] == ""):
while True:
newid = str('{:04x}'.format(random.randrange(16**4)))
if (does_id_exist(alltests, newid)):
continue
else:
c['id'] = newid
break
ufilename = []
for c in alltests:
if ('filename' in c):
ufilename.append(c['filename'])
ufilename = get_unique_item(ufilename)
for f in ufilename:
testlist = []
for t in alltests:
if 'filename' in t:
if t['filename'] == f:
del t['filename']
testlist.append(t)
outfile = open(f, "w")
json.dump(testlist, outfile, indent=4)
outfile.write("\n")
outfile.close()
def filter_tests_by_id(args, testlist):
'''
Remove tests from testlist that are not in the named id list.
If id list is empty, return empty list.
'''
newlist = list()
if testlist and args.execute:
target_ids = args.execute
if isinstance(target_ids, list) and (len(target_ids) > 0):
newlist = list(filter(lambda x: x['id'] in target_ids, testlist))
return newlist
def filter_tests_by_category(args, testlist):
'''
Remove tests from testlist that are not in a named category.
'''
answer = list()
if args.category and testlist:
test_ids = list()
for catg in set(args.category):
if catg == '+c':
continue
print('considering category {}'.format(catg))
for tc in testlist:
if catg in tc['category'] and tc['id'] not in test_ids:
answer.append(tc)
test_ids.append(tc['id'])
return answer
def get_test_cases(args):
"""
If a test case file is specified, retrieve tests from that file.
Otherwise, glob for all json files in subdirectories and load from
each one.
Also, if requested, filter by category, and add tests matching
certain ids.
"""
import fnmatch
flist = []
testdirs = ['tc-tests']
if args.file:
# at least one file was specified - remove the default directory
testdirs = []
for ff in args.file:
if not os.path.isfile(ff):
print("IGNORING file " + ff + "\n\tBECAUSE does not exist.")
else:
flist.append(os.path.abspath(ff))
if args.directory:
testdirs = args.directory
for testdir in testdirs:
for root, dirnames, filenames in os.walk(testdir):
for filename in fnmatch.filter(filenames, '*.json'):
candidate = os.path.abspath(os.path.join(root, filename))
if candidate not in testdirs:
flist.append(candidate)
alltestcases = list()
for casefile in flist:
alltestcases = alltestcases + (load_from_file(casefile))
allcatlist = get_test_categories(alltestcases)
allidlist = get_id_list(alltestcases)
testcases_by_cats = get_categorized_testlist(alltestcases, allcatlist)
idtestcases = filter_tests_by_id(args, alltestcases)
cattestcases = filter_tests_by_category(args, alltestcases)
cat_ids = [x['id'] for x in cattestcases]
if args.execute:
if args.category:
alltestcases = cattestcases + [x for x in idtestcases if x['id'] not in cat_ids]
else:
alltestcases = idtestcases
else:
if cat_ids:
alltestcases = cattestcases
else:
# just accept the existing value of alltestcases,
# which has been filtered by file/directory
pass
return allcatlist, allidlist, testcases_by_cats, alltestcases
def set_operation_mode(pm, parser, args, remaining):
"""
Load the test case data and process remaining arguments to determine
what the script should do for this run, and call the appropriate
function.
"""
ucat, idlist, testcases, alltests = get_test_cases(args)
if args.gen_id:
if (has_blank_ids(idlist)):
alltests = generate_case_ids(alltests)
else:
print("No empty ID fields found in test files.")
exit(0)
duplicate_ids = check_case_id(alltests)
if (len(duplicate_ids) > 0):
print("The following test case IDs are not unique:")
print(str(set(duplicate_ids)))
print("Please correct them before continuing.")
exit(1)
if args.showID:
for atest in alltests:
print_test_case(atest)
exit(0)
if isinstance(args.category, list) and (len(args.category) == 0):
print("Available categories:")
print_sll(ucat)
exit(0)
if args.list:
list_test_cases(alltests)
exit(0)
exit_code = 0 # KSFT_PASS
if len(alltests):
req_plugins = pm.get_required_plugins(alltests)
try:
args = pm.load_required_plugins(req_plugins, parser, args, remaining)
except PluginDependencyException as pde:
print('The following plugins were not found:')
print('{}'.format(pde.missing_pg))
catresults = test_runner(pm, args, alltests)
if catresults.count_failures() != 0:
exit_code = 1 # KSFT_FAIL
if args.format == 'none':
print('Test results output suppression requested\n')
else:
print('\nAll test results: \n')
if args.format == 'xunit':
suffix = 'xml'
res = catresults.format_xunit()
elif args.format == 'tap':
suffix = 'tap'
res = catresults.format_tap()
print(res)
print('\n\n')
if not args.outfile:
fname = 'test-results.{}'.format(suffix)
else:
fname = args.outfile
with open(fname, 'w') as fh:
fh.write(res)
fh.close()
if os.getenv('SUDO_UID') is not None:
os.chown(fname, uid=int(os.getenv('SUDO_UID')),
gid=int(os.getenv('SUDO_GID')))
else:
print('No tests found\n')
exit_code = 4 # KSFT_SKIP
exit(exit_code)
def main():
"""
Start of execution; set up argument parser and get the arguments,
and start operations.
"""
parser = args_parse()
parser = set_args(parser)
pm = PluginMgr(parser)
parser = pm.call_add_args(parser)
(args, remaining) = parser.parse_known_args()
args.NAMES = NAMES
pm.set_args(args)
check_default_settings(args, remaining, pm)
if args.verbose > 2:
print('args is {}'.format(args))
set_operation_mode(pm, parser, args, remaining)
if __name__ == "__main__":
main()
|
nvtrust-main
|
infrastructure/linux/linux_source/tools/testing/selftests/tc-testing/tdc.py
|
"""
# SPDX-License-Identifier: GPL-2.0
tdc_config.py - tdc user-specified values
Copyright (C) 2017 Lucas Bates <lucasb@mojatatu.com>
"""
# Dictionary containing all values that can be substituted in executable
# commands.
NAMES = {
# Substitute your own tc path here
'TC': '/sbin/tc',
# Substitute your own ip path here
'IP': '/sbin/ip',
# Name of veth devices to be created for the namespace
'DEV0': 'v0p0',
'DEV1': 'v0p1',
'DEV2': '',
'DUMMY': 'dummy1',
'ETH': 'eth0',
'BATCH_FILE': './batch.txt',
'BATCH_DIR': 'tmp',
# Length of time in seconds to wait before terminating a command
'TIMEOUT': 24,
# Name of the namespace to use
'NS': 'tcut',
# Directory containing eBPF test programs
'EBPFDIR': './'
}
ENVIR = { }
# put customizations in tdc_config_local.py
try:
from tdc_config_local import *
except ImportError as ie:
pass
try:
NAMES.update(EXTRA_NAMES)
except NameError as ne:
pass
|
nvtrust-main
|
infrastructure/linux/linux_source/tools/testing/selftests/tc-testing/tdc_config.py
|
'''
build ebpf program
'''
import os
import signal
from string import Template
import subprocess
import time
from TdcPlugin import TdcPlugin
from tdc_config import *
class SubPlugin(TdcPlugin):
def __init__(self):
self.sub_class = 'buildebpf/SubPlugin'
self.tap = ''
super().__init__()
def pre_suite(self, testcount, testidlist):
super().pre_suite(testcount, testidlist)
if self.args.buildebpf:
self._ebpf_makeall()
def post_suite(self, index):
super().post_suite(index)
self._ebpf_makeclean()
def add_args(self, parser):
super().add_args(parser)
self.argparser_group = self.argparser.add_argument_group(
'buildebpf',
'options for buildebpfPlugin')
self.argparser_group.add_argument(
'--nobuildebpf', action='store_false', default=True,
dest='buildebpf',
help='Don\'t build eBPF programs')
return self.argparser
def _ebpf_makeall(self):
if self.args.buildebpf:
self._make('all')
def _ebpf_makeclean(self):
if self.args.buildebpf:
self._make('clean')
def _make(self, target):
command = 'make -C {} {}'.format(self.args.NAMES['EBPFDIR'], target)
proc = subprocess.Popen(command,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=os.environ.copy())
(rawout, serr) = proc.communicate()
if proc.returncode != 0 and len(serr) > 0:
foutput = serr.decode("utf-8")
else:
foutput = rawout.decode("utf-8")
proc.stdout.close()
proc.stderr.close()
return proc, foutput
|
nvtrust-main
|
infrastructure/linux/linux_source/tools/testing/selftests/tc-testing/plugin-lib/buildebpfPlugin.py
|
import os
import signal
from string import Template
import subprocess
import time
from TdcPlugin import TdcPlugin
from tdc_config import *
class SubPlugin(TdcPlugin):
def __init__(self):
self.sub_class = 'ns/SubPlugin'
super().__init__()
def pre_suite(self, testcount, testidlist):
'''run commands before test_runner goes into a test loop'''
super().pre_suite(testcount, testidlist)
if self.args.namespace:
self._ns_create()
else:
self._ports_create()
def post_suite(self, index):
'''run commands after test_runner goes into a test loop'''
super().post_suite(index)
if self.args.verbose:
print('{}.post_suite'.format(self.sub_class))
if self.args.namespace:
self._ns_destroy()
else:
self._ports_destroy()
def add_args(self, parser):
super().add_args(parser)
self.argparser_group = self.argparser.add_argument_group(
'netns',
'options for nsPlugin(run commands in net namespace)')
self.argparser_group.add_argument(
'-N', '--no-namespace', action='store_false', default=True,
dest='namespace', help='Don\'t run commands in namespace')
return self.argparser
def adjust_command(self, stage, command):
super().adjust_command(stage, command)
cmdform = 'list'
cmdlist = list()
if not self.args.namespace:
return command
if self.args.verbose:
print('{}.adjust_command'.format(self.sub_class))
if not isinstance(command, list):
cmdform = 'str'
cmdlist = command.split()
else:
cmdlist = command
if stage == 'setup' or stage == 'execute' or stage == 'verify' or stage == 'teardown':
if self.args.verbose:
print('adjust_command: stage is {}; inserting netns stuff in command [{}] list [{}]'.format(stage, command, cmdlist))
cmdlist.insert(0, self.args.NAMES['NS'])
cmdlist.insert(0, 'exec')
cmdlist.insert(0, 'netns')
cmdlist.insert(0, self.args.NAMES['IP'])
else:
pass
if cmdform == 'str':
command = ' '.join(cmdlist)
else:
command = cmdlist
if self.args.verbose:
print('adjust_command: return command [{}]'.format(command))
return command
def _ports_create(self):
cmd = '$IP link add $DEV0 type veth peer name $DEV1'
self._exec_cmd('pre', cmd)
cmd = '$IP link set $DEV0 up'
self._exec_cmd('pre', cmd)
if not self.args.namespace:
cmd = '$IP link set $DEV1 up'
self._exec_cmd('pre', cmd)
def _ports_destroy(self):
cmd = '$IP link del $DEV0'
self._exec_cmd('post', cmd)
def _ns_create(self):
'''
Create the network namespace in which the tests will be run and set up
the required network devices for it.
'''
self._ports_create()
if self.args.namespace:
cmd = '$IP netns add {}'.format(self.args.NAMES['NS'])
self._exec_cmd('pre', cmd)
cmd = '$IP link set $DEV1 netns {}'.format(self.args.NAMES['NS'])
self._exec_cmd('pre', cmd)
cmd = '$IP -n {} link set $DEV1 up'.format(self.args.NAMES['NS'])
self._exec_cmd('pre', cmd)
if self.args.device:
cmd = '$IP link set $DEV2 netns {}'.format(self.args.NAMES['NS'])
self._exec_cmd('pre', cmd)
cmd = '$IP -n {} link set $DEV2 up'.format(self.args.NAMES['NS'])
self._exec_cmd('pre', cmd)
def _ns_destroy(self):
'''
Destroy the network namespace for testing (and any associated network
devices as well)
'''
if self.args.namespace:
cmd = '$IP netns delete {}'.format(self.args.NAMES['NS'])
self._exec_cmd('post', cmd)
def _exec_cmd(self, stage, command):
'''
Perform any required modifications on an executable command, then run
it in a subprocess and return the results.
'''
if '$' in command:
command = self._replace_keywords(command)
self.adjust_command(stage, command)
if self.args.verbose:
print('_exec_cmd: command "{}"'.format(command))
proc = subprocess.Popen(command,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=ENVIR)
(rawout, serr) = proc.communicate()
if proc.returncode != 0 and len(serr) > 0:
foutput = serr.decode("utf-8")
else:
foutput = rawout.decode("utf-8")
proc.stdout.close()
proc.stderr.close()
return proc, foutput
def _replace_keywords(self, cmd):
"""
For a given executable command, substitute any known
variables contained within NAMES with the correct values
"""
tcmd = Template(cmd)
subcmd = tcmd.safe_substitute(self.args.NAMES)
return subcmd
|
nvtrust-main
|
infrastructure/linux/linux_source/tools/testing/selftests/tc-testing/plugin-lib/nsPlugin.py
|
#!/usr/bin/env python3
import os
import signal
from string import Template
import subprocess
import time
from TdcPlugin import TdcPlugin
from tdc_config import *
try:
from scapy.all import *
except ImportError:
print("Unable to import the scapy python module.")
print("\nIf not already installed, you may do so with:")
print("\t\tpip3 install scapy==2.4.2")
exit(1)
class SubPlugin(TdcPlugin):
def __init__(self):
self.sub_class = 'scapy/SubPlugin'
super().__init__()
def post_execute(self):
if 'scapy' not in self.args.caseinfo:
if self.args.verbose:
print('{}.post_execute: no scapy info in test case'.format(self.sub_class))
return
# Check for required fields
lscapyinfo = self.args.caseinfo['scapy']
if type(lscapyinfo) != list:
lscapyinfo = [ lscapyinfo, ]
for scapyinfo in lscapyinfo:
scapy_keys = ['iface', 'count', 'packet']
missing_keys = []
keyfail = False
for k in scapy_keys:
if k not in scapyinfo:
keyfail = True
missing_keys.append(k)
if keyfail:
print('{}: Scapy block present in the test, but is missing info:'
.format(self.sub_class))
print('{}'.format(missing_keys))
pkt = eval(scapyinfo['packet'])
if '$' in scapyinfo['iface']:
tpl = Template(scapyinfo['iface'])
scapyinfo['iface'] = tpl.safe_substitute(NAMES)
for count in range(scapyinfo['count']):
sendp(pkt, iface=scapyinfo['iface'])
|
nvtrust-main
|
infrastructure/linux/linux_source/tools/testing/selftests/tc-testing/plugin-lib/scapyPlugin.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.