repository_name
stringclasses 316
values | func_path_in_repository
stringlengths 6
223
| func_name
stringlengths 1
134
| language
stringclasses 1
value | func_code_string
stringlengths 57
65.5k
| func_documentation_string
stringlengths 1
46.3k
| split_name
stringclasses 1
value | func_code_url
stringlengths 91
315
| called_functions
listlengths 1
156
⌀ | enclosing_scope
stringlengths 2
1.48M
|
|---|---|---|---|---|---|---|---|---|---|
pythongssapi/python-gssapi
|
gssapi/mechs.py
|
Mechanism.from_attrs
|
python
|
def from_attrs(cls, desired_attrs=None, except_attrs=None,
critical_attrs=None):
if isinstance(desired_attrs, roids.OID):
desired_attrs = set([desired_attrs])
if isinstance(except_attrs, roids.OID):
except_attrs = set([except_attrs])
if isinstance(critical_attrs, roids.OID):
critical_attrs = set([critical_attrs])
if rfc5587 is None:
raise NotImplementedError("Your GSSAPI implementation does not "
"have support for RFC 5587")
mechs = rfc5587.indicate_mechs_by_attrs(desired_attrs,
except_attrs,
critical_attrs)
return (cls(mech) for mech in mechs)
|
Get a generator of mechanisms supporting the specified attributes. See
RFC 5587's :func:`indicate_mechs_by_attrs` for more information.
Args:
desired_attrs ([OID]): Desired attributes
except_attrs ([OID]): Except attributes
critical_attrs ([OID]): Critical attributes
Returns:
[Mechanism]: A set of mechanisms having the desired features.
Raises:
GSSError
:requires-ext:`rfc5587`
|
train
|
https://github.com/pythongssapi/python-gssapi/blob/b6efe72aa35a4c1fe21b397e15fcb41611e365ce/gssapi/mechs.py#L167-L200
| null |
class Mechanism(roids.OID):
"""
A GSSAPI Mechanism
This class represents a mechanism and centralizes functions dealing with
mechanisms and can be used with any calls.
It inherits from the low-level GSSAPI :class:`~gssapi.raw.oids.OID` class,
and thus can be used with both low-level and high-level API calls.
"""
def __new__(cls, cpy=None, elements=None):
return super(Mechanism, cls).__new__(cls, cpy, elements)
@property
def name_types(self):
"""
Get the set of name types supported by this mechanism.
"""
return rmisc.inquire_names_for_mech(self)
@property
def _saslname(self):
if rfc5801 is None:
raise NotImplementedError("Your GSSAPI implementation does not "
"have support for RFC 5801")
return rfc5801.inquire_saslname_for_mech(self)
@property
def _attrs(self):
if rfc5587 is None:
raise NotImplementedError("Your GSSAPI implementation does not "
"have support for RFC 5587")
return rfc5587.inquire_attrs_for_mech(self)
def __str__(self):
if issubclass(str, six.text_type):
# Python 3 -- we should return unicode
return self._bytes_desc().decode(_utils._get_encoding())
else:
return self._bytes_desc()
def __unicode__(self):
return self._bytes_desc().decode(_utils._get_encoding())
def _bytes_desc(self):
base = self.dotted_form
if rfc5801 is not None and self._saslname and self._saslname.mech_name:
base = self._saslname.mech_name
if isinstance(base, six.text_type):
base = base.encode(_utils._get_encoding())
return base
def __repr__(self):
"""
Get a name representing the mechanism; always safe to call
"""
base = "<Mechanism (%s)>" % self.dotted_form
if rfc5801 is not None:
base = "<Mechanism %s (%s)>" % (
self._saslname.mech_name.decode('UTF-8'),
self.dotted_form
)
return base
@property
def sasl_name(self):
"""
Get the SASL name for the mechanism
:requires-ext:`rfc5801`
"""
return self._saslname.sasl_mech_name.decode('UTF-8')
@property
def description(self):
"""
Get the description of the mechanism
:requires-ext:`rfc5801`
"""
return self._saslname.mech_description.decode('UTF-8')
@property
def known_attrs(self):
"""
Get the known attributes of the mechanism; returns a set of OIDs
([OID])
:requires-ext:`rfc5587`
"""
return self._attrs.known_mech_attrs
@property
def attrs(self):
"""
Get the attributes of the mechanism; returns a set of OIDs ([OID])
:requires-ext:`rfc5587`
"""
return self._attrs.mech_attrs
@classmethod
def all_mechs(cls):
"""
Get a generator of all mechanisms supported by GSSAPI
"""
return (cls(mech) for mech in rmisc.indicate_mechs())
@classmethod
def from_name(cls, name=None):
"""
Get a generator of mechanisms that may be able to process the name
Args:
name (Name): a name to inquire about
Returns:
[Mechanism]: a set of mechanisms which support this name
Raises:
GSSError
"""
return (cls(mech) for mech in rmisc.inquire_mechs_for_name(name))
@classmethod
def from_sasl_name(cls, name=None):
"""
Create a Mechanism from its SASL name
Args:
name (str): SASL name of the desired mechanism
Returns:
Mechanism: the desired mechanism
Raises:
GSSError
:requires-ext:`rfc5801`
"""
if rfc5801 is None:
raise NotImplementedError("Your GSSAPI implementation does not "
"have support for RFC 5801")
if isinstance(name, six.text_type):
name = name.encode(_utils._get_encoding())
m = rfc5801.inquire_mech_for_saslname(name)
return cls(m)
@classmethod
|
pythongssapi/python-gssapi
|
gssapi/_utils.py
|
import_gssapi_extension
|
python
|
def import_gssapi_extension(name):
try:
path = 'gssapi.raw.ext_{0}'.format(name)
__import__(path)
return sys.modules[path]
except ImportError:
return None
|
Import a GSSAPI extension module
This method imports a GSSAPI extension module based
on the name of the extension (not including the
'ext_' prefix). If the extension is not available,
the method retuns None.
Args:
name (str): the name of the extension
Returns:
module: Either the extension module or None
|
train
|
https://github.com/pythongssapi/python-gssapi/blob/b6efe72aa35a4c1fe21b397e15fcb41611e365ce/gssapi/_utils.py#L10-L30
| null |
import sys
import types
import six
import decorator as deco
from gssapi.raw.misc import GSSError
def flag_property(flag):
def setter(self, val):
if val:
self.flags.add(flag)
else:
self.flags.discard(flag)
def getter(self):
return flag in self.flags
return property(getter, setter)
def inquire_property(name, doc=None):
"""Creates a property based on an inquire result
This method creates a property that calls the
:python:`_inquire` method, and return the value of the
requested information.
Args:
name (str): the name of the 'inquire' result information
Returns:
property: the created property
"""
def inquire_property(self):
if not self._started:
msg = ("Cannot read {0} from a security context whose "
"establishment has not yet been started.")
raise AttributeError(msg)
return getattr(self._inquire(**{name: True}), name)
return property(inquire_property, doc=doc)
# use UTF-8 as the default encoding, like Python 3
_ENCODING = 'UTF-8'
def _get_encoding():
"""Gets the current encoding used for strings.
This value is used to encode and decode string
values like names.
Returns:
str: the current encoding
"""
return _ENCODING
def set_encoding(enc):
"""Sets the current encoding used for strings
This value is used to encode and decode string
values like names.
Args:
enc: the encoding to use
"""
global _ENCODING
_ENCODING = enc
def _encode_dict(d):
"""Encodes any relevant strings in a dict"""
def enc(x):
if isinstance(x, six.text_type):
return x.encode(_ENCODING)
else:
return x
return dict((enc(k), enc(v)) for k, v in six.iteritems(d))
# in case of Python 3, just use exception chaining
@deco.decorator
def catch_and_return_token(func, self, *args, **kwargs):
"""Optionally defer exceptions and return a token instead
When `__DEFER_STEP_ERRORS__` is set on the implementing class
or instance, methods wrapped with this wrapper will
catch and save their :python:`GSSError` exceptions and
instead return the result token attached to the exception.
The exception can be later retrived through :python:`_last_err`
(and :python:`_last_tb` when Python 2 is in use).
"""
try:
return func(self, *args, **kwargs)
except GSSError as e:
if e.token is not None and self.__DEFER_STEP_ERRORS__:
self._last_err = e
# skip the "return func" line above in the traceback
if six.PY2:
self._last_tb = sys.exc_info()[2].tb_next.tb_next
else:
self._last_err.__traceback__ = e.__traceback__.tb_next
return e.token
else:
raise
@deco.decorator
def check_last_err(func, self, *args, **kwargs):
"""Check and raise deferred errors before running the function
This method checks :python:`_last_err` before running the wrapped
function. If present and not None, the exception will be raised
with its original traceback.
"""
if self._last_err is not None:
try:
if six.PY2:
six.reraise(type(self._last_err), self._last_err,
self._last_tb)
else:
# NB(directxman12): not using six.reraise in Python 3 leads
# to cleaner tracebacks, and raise x is valid
# syntax in Python 3 (unlike raise x, y, z)
raise self._last_err
finally:
if six.PY2:
del self._last_tb # in case of cycles, break glass
self._last_err = None
else:
return func(self, *args, **kwargs)
@deco.decorator
def check_last_err(func, self, *args, **kwargs):
if self._last_err is not None:
try:
raise self._last_err
finally:
self._last_err = None
else:
return func(self, *args, **kwargs)
class CheckLastError(type):
"""Check for a deferred error on all methods
This metaclass applies the :python:`check_last_err` decorator
to all methods not prefixed by '_'.
Additionally, it enabled `__DEFER_STEP_ERRORS__` by default.
"""
def __new__(cls, name, parents, attrs):
attrs['__DEFER_STEP_ERRORS__'] = True
for attr_name in attrs:
attr = attrs[attr_name]
# wrap only methods
if not isinstance(attr, types.FunctionType):
continue
if attr_name[0] != '_':
attrs[attr_name] = check_last_err(attr)
return super(CheckLastError, cls).__new__(cls, name, parents, attrs)
|
pythongssapi/python-gssapi
|
gssapi/_utils.py
|
inquire_property
|
python
|
def inquire_property(name, doc=None):
def inquire_property(self):
if not self._started:
msg = ("Cannot read {0} from a security context whose "
"establishment has not yet been started.")
raise AttributeError(msg)
return getattr(self._inquire(**{name: True}), name)
return property(inquire_property, doc=doc)
|
Creates a property based on an inquire result
This method creates a property that calls the
:python:`_inquire` method, and return the value of the
requested information.
Args:
name (str): the name of the 'inquire' result information
Returns:
property: the created property
|
train
|
https://github.com/pythongssapi/python-gssapi/blob/b6efe72aa35a4c1fe21b397e15fcb41611e365ce/gssapi/_utils.py#L46-L68
| null |
import sys
import types
import six
import decorator as deco
from gssapi.raw.misc import GSSError
def import_gssapi_extension(name):
"""Import a GSSAPI extension module
This method imports a GSSAPI extension module based
on the name of the extension (not including the
'ext_' prefix). If the extension is not available,
the method retuns None.
Args:
name (str): the name of the extension
Returns:
module: Either the extension module or None
"""
try:
path = 'gssapi.raw.ext_{0}'.format(name)
__import__(path)
return sys.modules[path]
except ImportError:
return None
def flag_property(flag):
def setter(self, val):
if val:
self.flags.add(flag)
else:
self.flags.discard(flag)
def getter(self):
return flag in self.flags
return property(getter, setter)
# use UTF-8 as the default encoding, like Python 3
_ENCODING = 'UTF-8'
def _get_encoding():
"""Gets the current encoding used for strings.
This value is used to encode and decode string
values like names.
Returns:
str: the current encoding
"""
return _ENCODING
def set_encoding(enc):
"""Sets the current encoding used for strings
This value is used to encode and decode string
values like names.
Args:
enc: the encoding to use
"""
global _ENCODING
_ENCODING = enc
def _encode_dict(d):
"""Encodes any relevant strings in a dict"""
def enc(x):
if isinstance(x, six.text_type):
return x.encode(_ENCODING)
else:
return x
return dict((enc(k), enc(v)) for k, v in six.iteritems(d))
# in case of Python 3, just use exception chaining
@deco.decorator
def catch_and_return_token(func, self, *args, **kwargs):
"""Optionally defer exceptions and return a token instead
When `__DEFER_STEP_ERRORS__` is set on the implementing class
or instance, methods wrapped with this wrapper will
catch and save their :python:`GSSError` exceptions and
instead return the result token attached to the exception.
The exception can be later retrived through :python:`_last_err`
(and :python:`_last_tb` when Python 2 is in use).
"""
try:
return func(self, *args, **kwargs)
except GSSError as e:
if e.token is not None and self.__DEFER_STEP_ERRORS__:
self._last_err = e
# skip the "return func" line above in the traceback
if six.PY2:
self._last_tb = sys.exc_info()[2].tb_next.tb_next
else:
self._last_err.__traceback__ = e.__traceback__.tb_next
return e.token
else:
raise
@deco.decorator
def check_last_err(func, self, *args, **kwargs):
"""Check and raise deferred errors before running the function
This method checks :python:`_last_err` before running the wrapped
function. If present and not None, the exception will be raised
with its original traceback.
"""
if self._last_err is not None:
try:
if six.PY2:
six.reraise(type(self._last_err), self._last_err,
self._last_tb)
else:
# NB(directxman12): not using six.reraise in Python 3 leads
# to cleaner tracebacks, and raise x is valid
# syntax in Python 3 (unlike raise x, y, z)
raise self._last_err
finally:
if six.PY2:
del self._last_tb # in case of cycles, break glass
self._last_err = None
else:
return func(self, *args, **kwargs)
@deco.decorator
def check_last_err(func, self, *args, **kwargs):
if self._last_err is not None:
try:
raise self._last_err
finally:
self._last_err = None
else:
return func(self, *args, **kwargs)
class CheckLastError(type):
"""Check for a deferred error on all methods
This metaclass applies the :python:`check_last_err` decorator
to all methods not prefixed by '_'.
Additionally, it enabled `__DEFER_STEP_ERRORS__` by default.
"""
def __new__(cls, name, parents, attrs):
attrs['__DEFER_STEP_ERRORS__'] = True
for attr_name in attrs:
attr = attrs[attr_name]
# wrap only methods
if not isinstance(attr, types.FunctionType):
continue
if attr_name[0] != '_':
attrs[attr_name] = check_last_err(attr)
return super(CheckLastError, cls).__new__(cls, name, parents, attrs)
|
pythongssapi/python-gssapi
|
gssapi/_utils.py
|
_encode_dict
|
python
|
def _encode_dict(d):
def enc(x):
if isinstance(x, six.text_type):
return x.encode(_ENCODING)
else:
return x
return dict((enc(k), enc(v)) for k, v in six.iteritems(d))
|
Encodes any relevant strings in a dict
|
train
|
https://github.com/pythongssapi/python-gssapi/blob/b6efe72aa35a4c1fe21b397e15fcb41611e365ce/gssapi/_utils.py#L101-L109
| null |
import sys
import types
import six
import decorator as deco
from gssapi.raw.misc import GSSError
def import_gssapi_extension(name):
"""Import a GSSAPI extension module
This method imports a GSSAPI extension module based
on the name of the extension (not including the
'ext_' prefix). If the extension is not available,
the method retuns None.
Args:
name (str): the name of the extension
Returns:
module: Either the extension module or None
"""
try:
path = 'gssapi.raw.ext_{0}'.format(name)
__import__(path)
return sys.modules[path]
except ImportError:
return None
def flag_property(flag):
def setter(self, val):
if val:
self.flags.add(flag)
else:
self.flags.discard(flag)
def getter(self):
return flag in self.flags
return property(getter, setter)
def inquire_property(name, doc=None):
"""Creates a property based on an inquire result
This method creates a property that calls the
:python:`_inquire` method, and return the value of the
requested information.
Args:
name (str): the name of the 'inquire' result information
Returns:
property: the created property
"""
def inquire_property(self):
if not self._started:
msg = ("Cannot read {0} from a security context whose "
"establishment has not yet been started.")
raise AttributeError(msg)
return getattr(self._inquire(**{name: True}), name)
return property(inquire_property, doc=doc)
# use UTF-8 as the default encoding, like Python 3
_ENCODING = 'UTF-8'
def _get_encoding():
"""Gets the current encoding used for strings.
This value is used to encode and decode string
values like names.
Returns:
str: the current encoding
"""
return _ENCODING
def set_encoding(enc):
"""Sets the current encoding used for strings
This value is used to encode and decode string
values like names.
Args:
enc: the encoding to use
"""
global _ENCODING
_ENCODING = enc
# in case of Python 3, just use exception chaining
@deco.decorator
def catch_and_return_token(func, self, *args, **kwargs):
"""Optionally defer exceptions and return a token instead
When `__DEFER_STEP_ERRORS__` is set on the implementing class
or instance, methods wrapped with this wrapper will
catch and save their :python:`GSSError` exceptions and
instead return the result token attached to the exception.
The exception can be later retrived through :python:`_last_err`
(and :python:`_last_tb` when Python 2 is in use).
"""
try:
return func(self, *args, **kwargs)
except GSSError as e:
if e.token is not None and self.__DEFER_STEP_ERRORS__:
self._last_err = e
# skip the "return func" line above in the traceback
if six.PY2:
self._last_tb = sys.exc_info()[2].tb_next.tb_next
else:
self._last_err.__traceback__ = e.__traceback__.tb_next
return e.token
else:
raise
@deco.decorator
def check_last_err(func, self, *args, **kwargs):
"""Check and raise deferred errors before running the function
This method checks :python:`_last_err` before running the wrapped
function. If present and not None, the exception will be raised
with its original traceback.
"""
if self._last_err is not None:
try:
if six.PY2:
six.reraise(type(self._last_err), self._last_err,
self._last_tb)
else:
# NB(directxman12): not using six.reraise in Python 3 leads
# to cleaner tracebacks, and raise x is valid
# syntax in Python 3 (unlike raise x, y, z)
raise self._last_err
finally:
if six.PY2:
del self._last_tb # in case of cycles, break glass
self._last_err = None
else:
return func(self, *args, **kwargs)
@deco.decorator
def check_last_err(func, self, *args, **kwargs):
if self._last_err is not None:
try:
raise self._last_err
finally:
self._last_err = None
else:
return func(self, *args, **kwargs)
class CheckLastError(type):
"""Check for a deferred error on all methods
This metaclass applies the :python:`check_last_err` decorator
to all methods not prefixed by '_'.
Additionally, it enabled `__DEFER_STEP_ERRORS__` by default.
"""
def __new__(cls, name, parents, attrs):
attrs['__DEFER_STEP_ERRORS__'] = True
for attr_name in attrs:
attr = attrs[attr_name]
# wrap only methods
if not isinstance(attr, types.FunctionType):
continue
if attr_name[0] != '_':
attrs[attr_name] = check_last_err(attr)
return super(CheckLastError, cls).__new__(cls, name, parents, attrs)
|
pythongssapi/python-gssapi
|
gssapi/_utils.py
|
catch_and_return_token
|
python
|
def catch_and_return_token(func, self, *args, **kwargs):
try:
return func(self, *args, **kwargs)
except GSSError as e:
if e.token is not None and self.__DEFER_STEP_ERRORS__:
self._last_err = e
# skip the "return func" line above in the traceback
if six.PY2:
self._last_tb = sys.exc_info()[2].tb_next.tb_next
else:
self._last_err.__traceback__ = e.__traceback__.tb_next
return e.token
else:
raise
|
Optionally defer exceptions and return a token instead
When `__DEFER_STEP_ERRORS__` is set on the implementing class
or instance, methods wrapped with this wrapper will
catch and save their :python:`GSSError` exceptions and
instead return the result token attached to the exception.
The exception can be later retrived through :python:`_last_err`
(and :python:`_last_tb` when Python 2 is in use).
|
train
|
https://github.com/pythongssapi/python-gssapi/blob/b6efe72aa35a4c1fe21b397e15fcb41611e365ce/gssapi/_utils.py#L114-L139
| null |
import sys
import types
import six
import decorator as deco
from gssapi.raw.misc import GSSError
def import_gssapi_extension(name):
"""Import a GSSAPI extension module
This method imports a GSSAPI extension module based
on the name of the extension (not including the
'ext_' prefix). If the extension is not available,
the method retuns None.
Args:
name (str): the name of the extension
Returns:
module: Either the extension module or None
"""
try:
path = 'gssapi.raw.ext_{0}'.format(name)
__import__(path)
return sys.modules[path]
except ImportError:
return None
def flag_property(flag):
def setter(self, val):
if val:
self.flags.add(flag)
else:
self.flags.discard(flag)
def getter(self):
return flag in self.flags
return property(getter, setter)
def inquire_property(name, doc=None):
"""Creates a property based on an inquire result
This method creates a property that calls the
:python:`_inquire` method, and return the value of the
requested information.
Args:
name (str): the name of the 'inquire' result information
Returns:
property: the created property
"""
def inquire_property(self):
if not self._started:
msg = ("Cannot read {0} from a security context whose "
"establishment has not yet been started.")
raise AttributeError(msg)
return getattr(self._inquire(**{name: True}), name)
return property(inquire_property, doc=doc)
# use UTF-8 as the default encoding, like Python 3
_ENCODING = 'UTF-8'
def _get_encoding():
"""Gets the current encoding used for strings.
This value is used to encode and decode string
values like names.
Returns:
str: the current encoding
"""
return _ENCODING
def set_encoding(enc):
"""Sets the current encoding used for strings
This value is used to encode and decode string
values like names.
Args:
enc: the encoding to use
"""
global _ENCODING
_ENCODING = enc
def _encode_dict(d):
"""Encodes any relevant strings in a dict"""
def enc(x):
if isinstance(x, six.text_type):
return x.encode(_ENCODING)
else:
return x
return dict((enc(k), enc(v)) for k, v in six.iteritems(d))
# in case of Python 3, just use exception chaining
@deco.decorator
@deco.decorator
def check_last_err(func, self, *args, **kwargs):
"""Check and raise deferred errors before running the function
This method checks :python:`_last_err` before running the wrapped
function. If present and not None, the exception will be raised
with its original traceback.
"""
if self._last_err is not None:
try:
if six.PY2:
six.reraise(type(self._last_err), self._last_err,
self._last_tb)
else:
# NB(directxman12): not using six.reraise in Python 3 leads
# to cleaner tracebacks, and raise x is valid
# syntax in Python 3 (unlike raise x, y, z)
raise self._last_err
finally:
if six.PY2:
del self._last_tb # in case of cycles, break glass
self._last_err = None
else:
return func(self, *args, **kwargs)
@deco.decorator
def check_last_err(func, self, *args, **kwargs):
if self._last_err is not None:
try:
raise self._last_err
finally:
self._last_err = None
else:
return func(self, *args, **kwargs)
class CheckLastError(type):
"""Check for a deferred error on all methods
This metaclass applies the :python:`check_last_err` decorator
to all methods not prefixed by '_'.
Additionally, it enabled `__DEFER_STEP_ERRORS__` by default.
"""
def __new__(cls, name, parents, attrs):
attrs['__DEFER_STEP_ERRORS__'] = True
for attr_name in attrs:
attr = attrs[attr_name]
# wrap only methods
if not isinstance(attr, types.FunctionType):
continue
if attr_name[0] != '_':
attrs[attr_name] = check_last_err(attr)
return super(CheckLastError, cls).__new__(cls, name, parents, attrs)
|
pythongssapi/python-gssapi
|
gssapi/_utils.py
|
check_last_err
|
python
|
def check_last_err(func, self, *args, **kwargs):
if self._last_err is not None:
try:
if six.PY2:
six.reraise(type(self._last_err), self._last_err,
self._last_tb)
else:
# NB(directxman12): not using six.reraise in Python 3 leads
# to cleaner tracebacks, and raise x is valid
# syntax in Python 3 (unlike raise x, y, z)
raise self._last_err
finally:
if six.PY2:
del self._last_tb # in case of cycles, break glass
self._last_err = None
else:
return func(self, *args, **kwargs)
@deco.decorator
def check_last_err(func, self, *args, **kwargs):
if self._last_err is not None:
try:
raise self._last_err
finally:
self._last_err = None
else:
return func(self, *args, **kwargs)
|
Check and raise deferred errors before running the function
This method checks :python:`_last_err` before running the wrapped
function. If present and not None, the exception will be raised
with its original traceback.
|
train
|
https://github.com/pythongssapi/python-gssapi/blob/b6efe72aa35a4c1fe21b397e15fcb41611e365ce/gssapi/_utils.py#L143-L177
| null |
import sys
import types
import six
import decorator as deco
from gssapi.raw.misc import GSSError
def import_gssapi_extension(name):
"""Import a GSSAPI extension module
This method imports a GSSAPI extension module based
on the name of the extension (not including the
'ext_' prefix). If the extension is not available,
the method retuns None.
Args:
name (str): the name of the extension
Returns:
module: Either the extension module or None
"""
try:
path = 'gssapi.raw.ext_{0}'.format(name)
__import__(path)
return sys.modules[path]
except ImportError:
return None
def flag_property(flag):
def setter(self, val):
if val:
self.flags.add(flag)
else:
self.flags.discard(flag)
def getter(self):
return flag in self.flags
return property(getter, setter)
def inquire_property(name, doc=None):
"""Creates a property based on an inquire result
This method creates a property that calls the
:python:`_inquire` method, and return the value of the
requested information.
Args:
name (str): the name of the 'inquire' result information
Returns:
property: the created property
"""
def inquire_property(self):
if not self._started:
msg = ("Cannot read {0} from a security context whose "
"establishment has not yet been started.")
raise AttributeError(msg)
return getattr(self._inquire(**{name: True}), name)
return property(inquire_property, doc=doc)
# use UTF-8 as the default encoding, like Python 3
_ENCODING = 'UTF-8'
def _get_encoding():
"""Gets the current encoding used for strings.
This value is used to encode and decode string
values like names.
Returns:
str: the current encoding
"""
return _ENCODING
def set_encoding(enc):
"""Sets the current encoding used for strings
This value is used to encode and decode string
values like names.
Args:
enc: the encoding to use
"""
global _ENCODING
_ENCODING = enc
def _encode_dict(d):
"""Encodes any relevant strings in a dict"""
def enc(x):
if isinstance(x, six.text_type):
return x.encode(_ENCODING)
else:
return x
return dict((enc(k), enc(v)) for k, v in six.iteritems(d))
# in case of Python 3, just use exception chaining
@deco.decorator
def catch_and_return_token(func, self, *args, **kwargs):
"""Optionally defer exceptions and return a token instead
When `__DEFER_STEP_ERRORS__` is set on the implementing class
or instance, methods wrapped with this wrapper will
catch and save their :python:`GSSError` exceptions and
instead return the result token attached to the exception.
The exception can be later retrived through :python:`_last_err`
(and :python:`_last_tb` when Python 2 is in use).
"""
try:
return func(self, *args, **kwargs)
except GSSError as e:
if e.token is not None and self.__DEFER_STEP_ERRORS__:
self._last_err = e
# skip the "return func" line above in the traceback
if six.PY2:
self._last_tb = sys.exc_info()[2].tb_next.tb_next
else:
self._last_err.__traceback__ = e.__traceback__.tb_next
return e.token
else:
raise
@deco.decorator
class CheckLastError(type):
"""Check for a deferred error on all methods
This metaclass applies the :python:`check_last_err` decorator
to all methods not prefixed by '_'.
Additionally, it enabled `__DEFER_STEP_ERRORS__` by default.
"""
def __new__(cls, name, parents, attrs):
attrs['__DEFER_STEP_ERRORS__'] = True
for attr_name in attrs:
attr = attrs[attr_name]
# wrap only methods
if not isinstance(attr, types.FunctionType):
continue
if attr_name[0] != '_':
attrs[attr_name] = check_last_err(attr)
return super(CheckLastError, cls).__new__(cls, name, parents, attrs)
|
workhorsy/py-cpuinfo
|
cpuinfo/cpuinfo.py
|
_actual_get_cpu_info_from_cpuid
|
python
|
def _actual_get_cpu_info_from_cpuid(queue):
'''
Warning! This function has the potential to crash the Python runtime.
Do not call it directly. Use the _get_cpu_info_from_cpuid function instead.
It will safely call this function in another process.
'''
# Pipe all output to nothing
sys.stdout = open(os.devnull, 'w')
sys.stderr = open(os.devnull, 'w')
# Get the CPU arch and bits
arch, bits = _parse_arch(DataSource.arch_string_raw)
# Return none if this is not an X86 CPU
if not arch in ['X86_32', 'X86_64']:
queue.put(_obj_to_b64({}))
return
# Return none if SE Linux is in enforcing mode
cpuid = CPUID()
if cpuid.is_selinux_enforcing:
queue.put(_obj_to_b64({}))
return
# Get the cpu info from the CPUID register
max_extension_support = cpuid.get_max_extension_support()
cache_info = cpuid.get_cache(max_extension_support)
info = cpuid.get_info()
processor_brand = cpuid.get_processor_brand(max_extension_support)
# Get the Hz and scale
hz_actual = cpuid.get_raw_hz()
hz_actual = _to_decimal_string(hz_actual)
# Get the Hz and scale
hz_advertised, scale = _parse_cpu_brand_string(processor_brand)
info = {
'vendor_id_raw' : cpuid.get_vendor_id(),
'hardware_raw' : '',
'brand_raw' : processor_brand,
'hz_advertised_friendly' : _hz_short_to_friendly(hz_advertised, scale),
'hz_actual_friendly' : _hz_short_to_friendly(hz_actual, 0),
'hz_advertised' : _hz_short_to_full(hz_advertised, scale),
'hz_actual' : _hz_short_to_full(hz_actual, 0),
'l2_cache_size' : _to_friendly_bytes(cache_info['size_kb']),
'l2_cache_line_size' : cache_info['line_size_b'],
'l2_cache_associativity' : hex(cache_info['associativity']),
'stepping' : info['stepping'],
'model' : info['model'],
'family' : info['family'],
'processor_type' : info['processor_type'],
'extended_model' : info['extended_model'],
'extended_family' : info['extended_family'],
'flags' : cpuid.get_flags(max_extension_support)
}
info = {k: v for k, v in info.items() if v}
queue.put(_obj_to_b64(info))
|
Warning! This function has the potential to crash the Python runtime.
Do not call it directly. Use the _get_cpu_info_from_cpuid function instead.
It will safely call this function in another process.
|
train
|
https://github.com/workhorsy/py-cpuinfo/blob/c15afb770c1139bf76215852e17eb4f677ca3d2f/cpuinfo/cpuinfo.py#L1294-L1356
| null |
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
# Copyright (c) 2014-2019, Matthew Brennan Jones <matthew.brennan.jones@gmail.com>
# Py-cpuinfo gets CPU info with pure Python 2 & 3
# It uses the MIT License
# It is hosted at: https://github.com/workhorsy/py-cpuinfo
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
CPUINFO_VERSION = (5, 0, 0)
CPUINFO_VERSION_STRING = '.'.join([str(n) for n in CPUINFO_VERSION])
import os, sys
import platform
import multiprocessing
import ctypes
try:
import _winreg as winreg
except ImportError as err:
try:
import winreg
except ImportError as err:
pass
IS_PY2 = sys.version_info[0] == 2
class DataSource(object):
bits = platform.architecture()[0]
cpu_count = multiprocessing.cpu_count()
is_windows = platform.system().lower() == 'windows'
arch_string_raw = platform.machine()
uname_string_raw = platform.uname()[5]
can_cpuid = True
@staticmethod
def has_proc_cpuinfo():
return os.path.exists('/proc/cpuinfo')
@staticmethod
def has_dmesg():
return len(_program_paths('dmesg')) > 0
@staticmethod
def has_var_run_dmesg_boot():
uname = platform.system().strip().strip('"').strip("'").strip().lower()
return 'linux' in uname and os.path.exists('/var/run/dmesg.boot')
@staticmethod
def has_cpufreq_info():
return len(_program_paths('cpufreq-info')) > 0
@staticmethod
def has_sestatus():
return len(_program_paths('sestatus')) > 0
@staticmethod
def has_sysctl():
return len(_program_paths('sysctl')) > 0
@staticmethod
def has_isainfo():
return len(_program_paths('isainfo')) > 0
@staticmethod
def has_kstat():
return len(_program_paths('kstat')) > 0
@staticmethod
def has_sysinfo():
return len(_program_paths('sysinfo')) > 0
@staticmethod
def has_lscpu():
return len(_program_paths('lscpu')) > 0
@staticmethod
def has_ibm_pa_features():
return len(_program_paths('lsprop')) > 0
@staticmethod
def has_wmic():
returncode, output = _run_and_get_stdout(['wmic', 'os', 'get', 'Version'])
return returncode == 0 and len(output) > 0
@staticmethod
def cat_proc_cpuinfo():
return _run_and_get_stdout(['cat', '/proc/cpuinfo'])
@staticmethod
def cpufreq_info():
return _run_and_get_stdout(['cpufreq-info'])
@staticmethod
def sestatus_b():
return _run_and_get_stdout(['sestatus', '-b'])
@staticmethod
def dmesg_a():
return _run_and_get_stdout(['dmesg', '-a'])
@staticmethod
def cat_var_run_dmesg_boot():
return _run_and_get_stdout(['cat', '/var/run/dmesg.boot'])
@staticmethod
def sysctl_machdep_cpu_hw_cpufrequency():
return _run_and_get_stdout(['sysctl', 'machdep.cpu', 'hw.cpufrequency'])
@staticmethod
def isainfo_vb():
return _run_and_get_stdout(['isainfo', '-vb'])
@staticmethod
def kstat_m_cpu_info():
return _run_and_get_stdout(['kstat', '-m', 'cpu_info'])
@staticmethod
def sysinfo_cpu():
return _run_and_get_stdout(['sysinfo', '-cpu'])
@staticmethod
def lscpu():
return _run_and_get_stdout(['lscpu'])
@staticmethod
def ibm_pa_features():
import glob
ibm_features = glob.glob('/proc/device-tree/cpus/*/ibm,pa-features')
if ibm_features:
return _run_and_get_stdout(['lsprop', ibm_features[0]])
@staticmethod
def wmic_cpu():
return _run_and_get_stdout(['wmic', 'cpu', 'get', 'Name,CurrentClockSpeed,L2CacheSize,L3CacheSize,Description,Caption,Manufacturer', '/format:list'])
@staticmethod
def winreg_processor_brand():
key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, r"Hardware\Description\System\CentralProcessor\0")
processor_brand = winreg.QueryValueEx(key, "ProcessorNameString")[0]
winreg.CloseKey(key)
return processor_brand.strip()
@staticmethod
def winreg_vendor_id_raw():
key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, r"Hardware\Description\System\CentralProcessor\0")
vendor_id_raw = winreg.QueryValueEx(key, "VendorIdentifier")[0]
winreg.CloseKey(key)
return vendor_id_raw
@staticmethod
def winreg_arch_string_raw():
key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, r"SYSTEM\CurrentControlSet\Control\Session Manager\Environment")
arch_string_raw = winreg.QueryValueEx(key, "PROCESSOR_ARCHITECTURE")[0]
winreg.CloseKey(key)
return arch_string_raw
@staticmethod
def winreg_hz_actual():
key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, r"Hardware\Description\System\CentralProcessor\0")
hz_actual = winreg.QueryValueEx(key, "~Mhz")[0]
winreg.CloseKey(key)
hz_actual = _to_decimal_string(hz_actual)
return hz_actual
@staticmethod
def winreg_feature_bits():
key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, r"Hardware\Description\System\CentralProcessor\0")
feature_bits = winreg.QueryValueEx(key, "FeatureSet")[0]
winreg.CloseKey(key)
return feature_bits
def _program_paths(program_name):
paths = []
exts = filter(None, os.environ.get('PATHEXT', '').split(os.pathsep))
path = os.environ['PATH']
for p in os.environ['PATH'].split(os.pathsep):
p = os.path.join(p, program_name)
if os.access(p, os.X_OK):
paths.append(p)
for e in exts:
pext = p + e
if os.access(pext, os.X_OK):
paths.append(pext)
return paths
def _run_and_get_stdout(command, pipe_command=None):
from subprocess import Popen, PIPE
if not pipe_command:
p1 = Popen(command, stdout=PIPE, stderr=PIPE, stdin=PIPE)
output = p1.communicate()[0]
if not IS_PY2:
output = output.decode(encoding='UTF-8')
return p1.returncode, output
else:
p1 = Popen(command, stdout=PIPE, stderr=PIPE, stdin=PIPE)
p2 = Popen(pipe_command, stdin=p1.stdout, stdout=PIPE, stderr=PIPE)
p1.stdout.close()
output = p2.communicate()[0]
if not IS_PY2:
output = output.decode(encoding='UTF-8')
return p2.returncode, output
# Make sure we are running on a supported system
def _check_arch():
arch, bits = _parse_arch(DataSource.arch_string_raw)
if not arch in ['X86_32', 'X86_64', 'ARM_7', 'ARM_8', 'PPC_64']:
raise Exception("py-cpuinfo currently only works on X86 and some PPC and ARM CPUs.")
def _obj_to_b64(thing):
import pickle
import base64
a = thing
b = pickle.dumps(a)
c = base64.b64encode(b)
d = c.decode('utf8')
return d
def _b64_to_obj(thing):
import pickle
import base64
try:
a = base64.b64decode(thing)
b = pickle.loads(a)
return b
except:
return {}
def _utf_to_str(input):
if IS_PY2 and isinstance(input, unicode):
return input.encode('utf-8')
elif isinstance(input, list):
return [_utf_to_str(element) for element in input]
elif isinstance(input, dict):
return {_utf_to_str(key): _utf_to_str(value)
for key, value in input.items()}
else:
return input
def _copy_new_fields(info, new_info):
keys = [
'vendor_id_raw', 'hardware_raw', 'brand_raw', 'hz_advertised_friendly', 'hz_actual_friendly',
'hz_advertised', 'hz_actual', 'arch', 'bits', 'count',
'arch_string_raw', 'uname_string_raw',
'l2_cache_size', 'l2_cache_line_size', 'l2_cache_associativity',
'stepping', 'model', 'family',
'processor_type', 'extended_model', 'extended_family', 'flags',
'l3_cache_size', 'l1_data_cache_size', 'l1_instruction_cache_size'
]
for key in keys:
if new_info.get(key, None) and not info.get(key, None):
info[key] = new_info[key]
elif key == 'flags' and new_info.get('flags'):
for f in new_info['flags']:
if f not in info['flags']: info['flags'].append(f)
info['flags'].sort()
def _get_field_actual(cant_be_number, raw_string, field_names):
for line in raw_string.splitlines():
for field_name in field_names:
field_name = field_name.lower()
if ':' in line:
left, right = line.split(':', 1)
left = left.strip().lower()
right = right.strip()
if left == field_name and len(right) > 0:
if cant_be_number:
if not right.isdigit():
return right
else:
return right
return None
def _get_field(cant_be_number, raw_string, convert_to, default_value, *field_names):
retval = _get_field_actual(cant_be_number, raw_string, field_names)
# Convert the return value
if retval and convert_to:
try:
retval = convert_to(retval)
except:
retval = default_value
# Return the default if there is no return value
if retval is None:
retval = default_value
return retval
def _to_decimal_string(ticks):
try:
# Convert to string
ticks = '{0}'.format(ticks)
# Strip off non numbers and decimal places
ticks = "".join(n for n in ticks if n.isdigit() or n=='.').strip()
if ticks == '':
ticks = '0'
# Add decimal if missing
if '.' not in ticks:
ticks = '{0}.0'.format(ticks)
# Remove trailing zeros
ticks = ticks.rstrip('0')
# Add one trailing zero for empty right side
if ticks.endswith('.'):
ticks = '{0}0'.format(ticks)
# Make sure the number can be converted to a float
ticks = float(ticks)
ticks = '{0}'.format(ticks)
return ticks
except:
return '0.0'
def _hz_short_to_full(ticks, scale):
try:
# Make sure the number can be converted to a float
ticks = float(ticks)
ticks = '{0}'.format(ticks)
# Scale the numbers
hz = ticks.lstrip('0')
old_index = hz.index('.')
hz = hz.replace('.', '')
hz = hz.ljust(scale + old_index+1, '0')
new_index = old_index + scale
hz = '{0}.{1}'.format(hz[:new_index], hz[new_index:])
left, right = hz.split('.')
left, right = int(left), int(right)
return (left, right)
except:
return (0, 0)
def _hz_friendly_to_full(hz_string):
try:
hz_string = hz_string.strip().lower()
hz, scale = (None, None)
if hz_string.endswith('ghz'):
scale = 9
elif hz_string.endswith('mhz'):
scale = 6
elif hz_string.endswith('hz'):
scale = 0
hz = "".join(n for n in hz_string if n.isdigit() or n=='.').strip()
if not '.' in hz:
hz += '.0'
hz, scale = _hz_short_to_full(hz, scale)
return (hz, scale)
except:
return (0, 0)
def _hz_short_to_friendly(ticks, scale):
try:
# Get the raw Hz as a string
left, right = _hz_short_to_full(ticks, scale)
result = '{0}.{1}'.format(left, right)
# Get the location of the dot, and remove said dot
dot_index = result.index('.')
result = result.replace('.', '')
# Get the Hz symbol and scale
symbol = "Hz"
scale = 0
if dot_index > 9:
symbol = "GHz"
scale = 9
elif dot_index > 6:
symbol = "MHz"
scale = 6
elif dot_index > 3:
symbol = "KHz"
scale = 3
# Get the Hz with the dot at the new scaled point
result = '{0}.{1}'.format(result[:-scale-1], result[-scale-1:])
# Format the ticks to have 4 numbers after the decimal
# and remove any superfluous zeroes.
result = '{0:.4f} {1}'.format(float(result), symbol)
result = result.rstrip('0')
return result
except:
return '0.0000 Hz'
def _to_friendly_bytes(input):
import re
if not input:
return input
input = "{0}".format(input)
formats = {
r"^[0-9]+B$" : 'B',
r"^[0-9]+K$" : 'KB',
r"^[0-9]+M$" : 'MB',
r"^[0-9]+G$" : 'GB'
}
for pattern, friendly_size in formats.items():
if re.match(pattern, input):
return "{0} {1}".format(input[ : -1].strip(), friendly_size)
return input
def _parse_cpu_brand_string(cpu_string):
# Just return 0 if the processor brand does not have the Hz
if not 'hz' in cpu_string.lower():
return ('0.0', 0)
hz = cpu_string.lower()
scale = 0
if hz.endswith('mhz'):
scale = 6
elif hz.endswith('ghz'):
scale = 9
if '@' in hz:
hz = hz.split('@')[1]
else:
hz = hz.rsplit(None, 1)[1]
hz = hz.rstrip('mhz').rstrip('ghz').strip()
hz = _to_decimal_string(hz)
return (hz, scale)
def _parse_cpu_brand_string_dx(cpu_string):
import re
# Find all the strings inside brackets ()
starts = [m.start() for m in re.finditer('\(', cpu_string)]
ends = [m.start() for m in re.finditer('\)', cpu_string)]
insides = {k: v for k, v in zip(starts, ends)}
insides = [cpu_string[start+1 : end] for start, end in insides.items()]
# Find all the fields
vendor_id, stepping, model, family = (None, None, None, None)
for inside in insides:
for pair in inside.split(','):
pair = [n.strip() for n in pair.split(':')]
if len(pair) > 1:
name, value = pair[0], pair[1]
if name == 'origin':
vendor_id = value.strip('"')
elif name == 'stepping':
stepping = int(value.lstrip('0x'), 16)
elif name == 'model':
model = int(value.lstrip('0x'), 16)
elif name in ['fam', 'family']:
family = int(value.lstrip('0x'), 16)
# Find the Processor Brand
# Strip off extra strings in brackets at end
brand = cpu_string.strip()
is_working = True
while is_working:
is_working = False
for inside in insides:
full = "({0})".format(inside)
if brand.endswith(full):
brand = brand[ :-len(full)].strip()
is_working = True
# Find the Hz in the brand string
hz_brand, scale = _parse_cpu_brand_string(brand)
# Find Hz inside brackets () after the brand string
if hz_brand == '0.0':
for inside in insides:
hz = inside
for entry in ['GHz', 'MHz', 'Hz']:
if entry in hz:
hz = "CPU @ " + hz[ : hz.find(entry) + len(entry)]
hz_brand, scale = _parse_cpu_brand_string(hz)
break
return (hz_brand, scale, brand, vendor_id, stepping, model, family)
def _parse_dmesg_output(output):
try:
# Get all the dmesg lines that might contain a CPU string
lines = output.split(' CPU0:')[1:] + \
output.split(' CPU1:')[1:] + \
output.split(' CPU:')[1:] + \
output.split('\nCPU0:')[1:] + \
output.split('\nCPU1:')[1:] + \
output.split('\nCPU:')[1:]
lines = [l.split('\n')[0].strip() for l in lines]
# Convert the lines to CPU strings
cpu_strings = [_parse_cpu_brand_string_dx(l) for l in lines]
# Find the CPU string that has the most fields
best_string = None
highest_count = 0
for cpu_string in cpu_strings:
count = sum([n is not None for n in cpu_string])
if count > highest_count:
highest_count = count
best_string = cpu_string
# If no CPU string was found, return {}
if not best_string:
return {}
hz_actual, scale, processor_brand, vendor_id, stepping, model, family = best_string
# Origin
if ' Origin=' in output:
fields = output[output.find(' Origin=') : ].split('\n')[0]
fields = fields.strip().split()
fields = [n.strip().split('=') for n in fields]
fields = [{n[0].strip().lower() : n[1].strip()} for n in fields]
for field in fields:
name = list(field.keys())[0]
value = list(field.values())[0]
if name == 'origin':
vendor_id = value.strip('"')
elif name == 'stepping':
stepping = int(value.lstrip('0x'), 16)
elif name == 'model':
model = int(value.lstrip('0x'), 16)
elif name in ['fam', 'family']:
family = int(value.lstrip('0x'), 16)
# Features
flag_lines = []
for category in [' Features=', ' Features2=', ' AMD Features=', ' AMD Features2=']:
if category in output:
flag_lines.append(output.split(category)[1].split('\n')[0])
flags = []
for line in flag_lines:
line = line.split('<')[1].split('>')[0].lower()
for flag in line.split(','):
flags.append(flag)
flags.sort()
# Convert from GHz/MHz string to Hz
hz_advertised, scale = _parse_cpu_brand_string(processor_brand)
# If advertised hz not found, use the actual hz
if hz_advertised == '0.0':
scale = 6
hz_advertised = _to_decimal_string(hz_actual)
info = {
'vendor_id_raw' : vendor_id,
'brand_raw' : processor_brand,
'stepping' : stepping,
'model' : model,
'family' : family,
'flags' : flags
}
if hz_advertised and hz_advertised != '0.0':
info['hz_advertised_friendly'] = _hz_short_to_friendly(hz_advertised, scale)
info['hz_actual_friendly'] = _hz_short_to_friendly(hz_actual, scale)
if hz_advertised and hz_advertised != '0.0':
info['hz_advertised'] = _hz_short_to_full(hz_advertised, scale)
info['hz_actual'] = _hz_short_to_full(hz_actual, scale)
return {k: v for k, v in info.items() if v}
except:
#raise
pass
return {}
def _parse_arch(arch_string_raw):
import re
arch, bits = None, None
arch_string_raw = arch_string_raw.lower()
# X86
if re.match('^i\d86$|^x86$|^x86_32$|^i86pc$|^ia32$|^ia-32$|^bepc$', arch_string_raw):
arch = 'X86_32'
bits = 32
elif re.match('^x64$|^x86_64$|^x86_64t$|^i686-64$|^amd64$|^ia64$|^ia-64$', arch_string_raw):
arch = 'X86_64'
bits = 64
# ARM
elif re.match('^armv8-a|aarch64$', arch_string_raw):
arch = 'ARM_8'
bits = 64
elif re.match('^armv7$|^armv7[a-z]$|^armv7-[a-z]$|^armv6[a-z]$', arch_string_raw):
arch = 'ARM_7'
bits = 32
elif re.match('^armv8$|^armv8[a-z]$|^armv8-[a-z]$', arch_string_raw):
arch = 'ARM_8'
bits = 32
# PPC
elif re.match('^ppc32$|^prep$|^pmac$|^powermac$', arch_string_raw):
arch = 'PPC_32'
bits = 32
elif re.match('^powerpc$|^ppc64$|^ppc64le$', arch_string_raw):
arch = 'PPC_64'
bits = 64
# SPARC
elif re.match('^sparc32$|^sparc$', arch_string_raw):
arch = 'SPARC_32'
bits = 32
elif re.match('^sparc64$|^sun4u$|^sun4v$', arch_string_raw):
arch = 'SPARC_64'
bits = 64
return (arch, bits)
def _is_bit_set(reg, bit):
mask = 1 << bit
is_set = reg & mask > 0
return is_set
def _is_selinux_enforcing():
# Just return if the SE Linux Status Tool is not installed
if not DataSource.has_sestatus():
return False
# Run the sestatus, and just return if it failed to run
returncode, output = DataSource.sestatus_b()
if returncode != 0:
return False
# Figure out if explicitly in enforcing mode
for line in output.splitlines():
line = line.strip().lower()
if line.startswith("current mode:"):
if line.endswith("enforcing"):
return True
else:
return False
# Figure out if we can execute heap and execute memory
can_selinux_exec_heap = False
can_selinux_exec_memory = False
for line in output.splitlines():
line = line.strip().lower()
if line.startswith("allow_execheap") and line.endswith("on"):
can_selinux_exec_heap = True
elif line.startswith("allow_execmem") and line.endswith("on"):
can_selinux_exec_memory = True
return (not can_selinux_exec_heap or not can_selinux_exec_memory)
class CPUID(object):
def __init__(self):
self.prochandle = None
# Figure out if SE Linux is on and in enforcing mode
self.is_selinux_enforcing = _is_selinux_enforcing()
def _asm_func(self, restype=None, argtypes=(), byte_code=[]):
byte_code = bytes.join(b'', byte_code)
address = None
if DataSource.is_windows:
# Allocate a memory segment the size of the byte code, and make it executable
size = len(byte_code)
# Alloc at least 1 page to ensure we own all pages that we want to change protection on
if size < 0x1000: size = 0x1000
MEM_COMMIT = ctypes.c_ulong(0x1000)
PAGE_READWRITE = ctypes.c_ulong(0x4)
pfnVirtualAlloc = ctypes.windll.kernel32.VirtualAlloc
pfnVirtualAlloc.restype = ctypes.c_void_p
address = pfnVirtualAlloc(None, ctypes.c_size_t(size), MEM_COMMIT, PAGE_READWRITE)
if not address:
raise Exception("Failed to VirtualAlloc")
# Copy the byte code into the memory segment
memmove = ctypes.CFUNCTYPE(ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_size_t)(ctypes._memmove_addr)
if memmove(address, byte_code, size) < 0:
raise Exception("Failed to memmove")
# Enable execute permissions
PAGE_EXECUTE = ctypes.c_ulong(0x10)
old_protect = ctypes.c_ulong(0)
pfnVirtualProtect = ctypes.windll.kernel32.VirtualProtect
res = pfnVirtualProtect(ctypes.c_void_p(address), ctypes.c_size_t(size), PAGE_EXECUTE, ctypes.byref(old_protect))
if not res:
raise Exception("Failed VirtualProtect")
# Flush Instruction Cache
# First, get process Handle
if not self.prochandle:
pfnGetCurrentProcess = ctypes.windll.kernel32.GetCurrentProcess
pfnGetCurrentProcess.restype = ctypes.c_void_p
self.prochandle = ctypes.c_void_p(pfnGetCurrentProcess())
# Actually flush cache
res = ctypes.windll.kernel32.FlushInstructionCache(self.prochandle, ctypes.c_void_p(address), ctypes.c_size_t(size))
if not res:
raise Exception("Failed FlushInstructionCache")
else:
# Allocate a memory segment the size of the byte code
size = len(byte_code)
pfnvalloc = ctypes.pythonapi.valloc
pfnvalloc.restype = ctypes.c_void_p
address = pfnvalloc(ctypes.c_size_t(size))
if not address:
raise Exception("Failed to valloc")
# Mark the memory segment as writeable only
if not self.is_selinux_enforcing:
WRITE = 0x2
if ctypes.pythonapi.mprotect(ctypes.c_void_p(address), size, WRITE) < 0:
raise Exception("Failed to mprotect")
# Copy the byte code into the memory segment
if ctypes.pythonapi.memmove(ctypes.c_void_p(address), byte_code, ctypes.c_size_t(size)) < 0:
raise Exception("Failed to memmove")
# Mark the memory segment as writeable and executable only
if not self.is_selinux_enforcing:
WRITE_EXECUTE = 0x2 | 0x4
if ctypes.pythonapi.mprotect(ctypes.c_void_p(address), size, WRITE_EXECUTE) < 0:
raise Exception("Failed to mprotect")
# Cast the memory segment into a function
functype = ctypes.CFUNCTYPE(restype, *argtypes)
fun = functype(address)
return fun, address
def _run_asm(self, *byte_code):
# Convert the byte code into a function that returns an int
restype = ctypes.c_uint32
argtypes = ()
func, address = self._asm_func(restype, argtypes, byte_code)
# Call the byte code like a function
retval = func()
byte_code = bytes.join(b'', byte_code)
size = ctypes.c_size_t(len(byte_code))
# Free the function memory segment
if DataSource.is_windows:
MEM_RELEASE = ctypes.c_ulong(0x8000)
ctypes.windll.kernel32.VirtualFree(ctypes.c_void_p(address), ctypes.c_size_t(0), MEM_RELEASE)
else:
# Remove the executable tag on the memory
READ_WRITE = 0x1 | 0x2
if ctypes.pythonapi.mprotect(ctypes.c_void_p(address), size, READ_WRITE) < 0:
raise Exception("Failed to mprotect")
ctypes.pythonapi.free(ctypes.c_void_p(address))
return retval
# FIXME: We should not have to use different instructions to
# set eax to 0 or 1, on 32bit and 64bit machines.
def _zero_eax(self):
return (
b"\x31\xC0" # xor eax,eax
)
def _zero_ecx(self):
return (
b"\x31\xC9" # xor ecx,ecx
)
def _one_eax(self):
return (
b"\xB8\x01\x00\x00\x00" # mov eax,0x1"
)
# http://en.wikipedia.org/wiki/CPUID#EAX.3D0:_Get_vendor_ID
def get_vendor_id(self):
# EBX
ebx = self._run_asm(
self._zero_eax(),
b"\x0F\xA2" # cpuid
b"\x89\xD8" # mov ax,bx
b"\xC3" # ret
)
# ECX
ecx = self._run_asm(
self._zero_eax(),
b"\x0f\xa2" # cpuid
b"\x89\xC8" # mov ax,cx
b"\xC3" # ret
)
# EDX
edx = self._run_asm(
self._zero_eax(),
b"\x0f\xa2" # cpuid
b"\x89\xD0" # mov ax,dx
b"\xC3" # ret
)
# Each 4bits is a ascii letter in the name
vendor_id = []
for reg in [ebx, edx, ecx]:
for n in [0, 8, 16, 24]:
vendor_id.append(chr((reg >> n) & 0xFF))
vendor_id = ''.join(vendor_id)
return vendor_id
# http://en.wikipedia.org/wiki/CPUID#EAX.3D1:_Processor_Info_and_Feature_Bits
def get_info(self):
# EAX
eax = self._run_asm(
self._one_eax(),
b"\x0f\xa2" # cpuid
b"\xC3" # ret
)
# Get the CPU info
stepping = (eax >> 0) & 0xF # 4 bits
model = (eax >> 4) & 0xF # 4 bits
family = (eax >> 8) & 0xF # 4 bits
processor_type = (eax >> 12) & 0x3 # 2 bits
extended_model = (eax >> 16) & 0xF # 4 bits
extended_family = (eax >> 20) & 0xFF # 8 bits
return {
'stepping' : stepping,
'model' : model,
'family' : family,
'processor_type' : processor_type,
'extended_model' : extended_model,
'extended_family' : extended_family
}
# http://en.wikipedia.org/wiki/CPUID#EAX.3D80000000h:_Get_Highest_Extended_Function_Supported
def get_max_extension_support(self):
# Check for extension support
max_extension_support = self._run_asm(
b"\xB8\x00\x00\x00\x80" # mov ax,0x80000000
b"\x0f\xa2" # cpuid
b"\xC3" # ret
)
return max_extension_support
# http://en.wikipedia.org/wiki/CPUID#EAX.3D1:_Processor_Info_and_Feature_Bits
def get_flags(self, max_extension_support):
# EDX
edx = self._run_asm(
self._one_eax(),
b"\x0f\xa2" # cpuid
b"\x89\xD0" # mov ax,dx
b"\xC3" # ret
)
# ECX
ecx = self._run_asm(
self._one_eax(),
b"\x0f\xa2" # cpuid
b"\x89\xC8" # mov ax,cx
b"\xC3" # ret
)
# Get the CPU flags
flags = {
'fpu' : _is_bit_set(edx, 0),
'vme' : _is_bit_set(edx, 1),
'de' : _is_bit_set(edx, 2),
'pse' : _is_bit_set(edx, 3),
'tsc' : _is_bit_set(edx, 4),
'msr' : _is_bit_set(edx, 5),
'pae' : _is_bit_set(edx, 6),
'mce' : _is_bit_set(edx, 7),
'cx8' : _is_bit_set(edx, 8),
'apic' : _is_bit_set(edx, 9),
#'reserved1' : _is_bit_set(edx, 10),
'sep' : _is_bit_set(edx, 11),
'mtrr' : _is_bit_set(edx, 12),
'pge' : _is_bit_set(edx, 13),
'mca' : _is_bit_set(edx, 14),
'cmov' : _is_bit_set(edx, 15),
'pat' : _is_bit_set(edx, 16),
'pse36' : _is_bit_set(edx, 17),
'pn' : _is_bit_set(edx, 18),
'clflush' : _is_bit_set(edx, 19),
#'reserved2' : _is_bit_set(edx, 20),
'dts' : _is_bit_set(edx, 21),
'acpi' : _is_bit_set(edx, 22),
'mmx' : _is_bit_set(edx, 23),
'fxsr' : _is_bit_set(edx, 24),
'sse' : _is_bit_set(edx, 25),
'sse2' : _is_bit_set(edx, 26),
'ss' : _is_bit_set(edx, 27),
'ht' : _is_bit_set(edx, 28),
'tm' : _is_bit_set(edx, 29),
'ia64' : _is_bit_set(edx, 30),
'pbe' : _is_bit_set(edx, 31),
'pni' : _is_bit_set(ecx, 0),
'pclmulqdq' : _is_bit_set(ecx, 1),
'dtes64' : _is_bit_set(ecx, 2),
'monitor' : _is_bit_set(ecx, 3),
'ds_cpl' : _is_bit_set(ecx, 4),
'vmx' : _is_bit_set(ecx, 5),
'smx' : _is_bit_set(ecx, 6),
'est' : _is_bit_set(ecx, 7),
'tm2' : _is_bit_set(ecx, 8),
'ssse3' : _is_bit_set(ecx, 9),
'cid' : _is_bit_set(ecx, 10),
#'reserved3' : _is_bit_set(ecx, 11),
'fma' : _is_bit_set(ecx, 12),
'cx16' : _is_bit_set(ecx, 13),
'xtpr' : _is_bit_set(ecx, 14),
'pdcm' : _is_bit_set(ecx, 15),
#'reserved4' : _is_bit_set(ecx, 16),
'pcid' : _is_bit_set(ecx, 17),
'dca' : _is_bit_set(ecx, 18),
'sse4_1' : _is_bit_set(ecx, 19),
'sse4_2' : _is_bit_set(ecx, 20),
'x2apic' : _is_bit_set(ecx, 21),
'movbe' : _is_bit_set(ecx, 22),
'popcnt' : _is_bit_set(ecx, 23),
'tscdeadline' : _is_bit_set(ecx, 24),
'aes' : _is_bit_set(ecx, 25),
'xsave' : _is_bit_set(ecx, 26),
'osxsave' : _is_bit_set(ecx, 27),
'avx' : _is_bit_set(ecx, 28),
'f16c' : _is_bit_set(ecx, 29),
'rdrnd' : _is_bit_set(ecx, 30),
'hypervisor' : _is_bit_set(ecx, 31)
}
# Get a list of only the flags that are true
flags = [k for k, v in flags.items() if v]
# http://en.wikipedia.org/wiki/CPUID#EAX.3D7.2C_ECX.3D0:_Extended_Features
if max_extension_support >= 7:
# EBX
ebx = self._run_asm(
self._zero_ecx(),
b"\xB8\x07\x00\x00\x00" # mov eax,7
b"\x0f\xa2" # cpuid
b"\x89\xD8" # mov ax,bx
b"\xC3" # ret
)
# ECX
ecx = self._run_asm(
self._zero_ecx(),
b"\xB8\x07\x00\x00\x00" # mov eax,7
b"\x0f\xa2" # cpuid
b"\x89\xC8" # mov ax,cx
b"\xC3" # ret
)
# Get the extended CPU flags
extended_flags = {
#'fsgsbase' : _is_bit_set(ebx, 0),
#'IA32_TSC_ADJUST' : _is_bit_set(ebx, 1),
'sgx' : _is_bit_set(ebx, 2),
'bmi1' : _is_bit_set(ebx, 3),
'hle' : _is_bit_set(ebx, 4),
'avx2' : _is_bit_set(ebx, 5),
#'reserved' : _is_bit_set(ebx, 6),
'smep' : _is_bit_set(ebx, 7),
'bmi2' : _is_bit_set(ebx, 8),
'erms' : _is_bit_set(ebx, 9),
'invpcid' : _is_bit_set(ebx, 10),
'rtm' : _is_bit_set(ebx, 11),
'pqm' : _is_bit_set(ebx, 12),
#'FPU CS and FPU DS deprecated' : _is_bit_set(ebx, 13),
'mpx' : _is_bit_set(ebx, 14),
'pqe' : _is_bit_set(ebx, 15),
'avx512f' : _is_bit_set(ebx, 16),
'avx512dq' : _is_bit_set(ebx, 17),
'rdseed' : _is_bit_set(ebx, 18),
'adx' : _is_bit_set(ebx, 19),
'smap' : _is_bit_set(ebx, 20),
'avx512ifma' : _is_bit_set(ebx, 21),
'pcommit' : _is_bit_set(ebx, 22),
'clflushopt' : _is_bit_set(ebx, 23),
'clwb' : _is_bit_set(ebx, 24),
'intel_pt' : _is_bit_set(ebx, 25),
'avx512pf' : _is_bit_set(ebx, 26),
'avx512er' : _is_bit_set(ebx, 27),
'avx512cd' : _is_bit_set(ebx, 28),
'sha' : _is_bit_set(ebx, 29),
'avx512bw' : _is_bit_set(ebx, 30),
'avx512vl' : _is_bit_set(ebx, 31),
'prefetchwt1' : _is_bit_set(ecx, 0),
'avx512vbmi' : _is_bit_set(ecx, 1),
'umip' : _is_bit_set(ecx, 2),
'pku' : _is_bit_set(ecx, 3),
'ospke' : _is_bit_set(ecx, 4),
#'reserved' : _is_bit_set(ecx, 5),
'avx512vbmi2' : _is_bit_set(ecx, 6),
#'reserved' : _is_bit_set(ecx, 7),
'gfni' : _is_bit_set(ecx, 8),
'vaes' : _is_bit_set(ecx, 9),
'vpclmulqdq' : _is_bit_set(ecx, 10),
'avx512vnni' : _is_bit_set(ecx, 11),
'avx512bitalg' : _is_bit_set(ecx, 12),
#'reserved' : _is_bit_set(ecx, 13),
'avx512vpopcntdq' : _is_bit_set(ecx, 14),
#'reserved' : _is_bit_set(ecx, 15),
#'reserved' : _is_bit_set(ecx, 16),
#'mpx0' : _is_bit_set(ecx, 17),
#'mpx1' : _is_bit_set(ecx, 18),
#'mpx2' : _is_bit_set(ecx, 19),
#'mpx3' : _is_bit_set(ecx, 20),
#'mpx4' : _is_bit_set(ecx, 21),
'rdpid' : _is_bit_set(ecx, 22),
#'reserved' : _is_bit_set(ecx, 23),
#'reserved' : _is_bit_set(ecx, 24),
#'reserved' : _is_bit_set(ecx, 25),
#'reserved' : _is_bit_set(ecx, 26),
#'reserved' : _is_bit_set(ecx, 27),
#'reserved' : _is_bit_set(ecx, 28),
#'reserved' : _is_bit_set(ecx, 29),
'sgx_lc' : _is_bit_set(ecx, 30),
#'reserved' : _is_bit_set(ecx, 31)
}
# Get a list of only the flags that are true
extended_flags = [k for k, v in extended_flags.items() if v]
flags += extended_flags
# http://en.wikipedia.org/wiki/CPUID#EAX.3D80000001h:_Extended_Processor_Info_and_Feature_Bits
if max_extension_support >= 0x80000001:
# EBX
ebx = self._run_asm(
b"\xB8\x01\x00\x00\x80" # mov ax,0x80000001
b"\x0f\xa2" # cpuid
b"\x89\xD8" # mov ax,bx
b"\xC3" # ret
)
# ECX
ecx = self._run_asm(
b"\xB8\x01\x00\x00\x80" # mov ax,0x80000001
b"\x0f\xa2" # cpuid
b"\x89\xC8" # mov ax,cx
b"\xC3" # ret
)
# Get the extended CPU flags
extended_flags = {
'fpu' : _is_bit_set(ebx, 0),
'vme' : _is_bit_set(ebx, 1),
'de' : _is_bit_set(ebx, 2),
'pse' : _is_bit_set(ebx, 3),
'tsc' : _is_bit_set(ebx, 4),
'msr' : _is_bit_set(ebx, 5),
'pae' : _is_bit_set(ebx, 6),
'mce' : _is_bit_set(ebx, 7),
'cx8' : _is_bit_set(ebx, 8),
'apic' : _is_bit_set(ebx, 9),
#'reserved' : _is_bit_set(ebx, 10),
'syscall' : _is_bit_set(ebx, 11),
'mtrr' : _is_bit_set(ebx, 12),
'pge' : _is_bit_set(ebx, 13),
'mca' : _is_bit_set(ebx, 14),
'cmov' : _is_bit_set(ebx, 15),
'pat' : _is_bit_set(ebx, 16),
'pse36' : _is_bit_set(ebx, 17),
#'reserved' : _is_bit_set(ebx, 18),
'mp' : _is_bit_set(ebx, 19),
'nx' : _is_bit_set(ebx, 20),
#'reserved' : _is_bit_set(ebx, 21),
'mmxext' : _is_bit_set(ebx, 22),
'mmx' : _is_bit_set(ebx, 23),
'fxsr' : _is_bit_set(ebx, 24),
'fxsr_opt' : _is_bit_set(ebx, 25),
'pdpe1gp' : _is_bit_set(ebx, 26),
'rdtscp' : _is_bit_set(ebx, 27),
#'reserved' : _is_bit_set(ebx, 28),
'lm' : _is_bit_set(ebx, 29),
'3dnowext' : _is_bit_set(ebx, 30),
'3dnow' : _is_bit_set(ebx, 31),
'lahf_lm' : _is_bit_set(ecx, 0),
'cmp_legacy' : _is_bit_set(ecx, 1),
'svm' : _is_bit_set(ecx, 2),
'extapic' : _is_bit_set(ecx, 3),
'cr8_legacy' : _is_bit_set(ecx, 4),
'abm' : _is_bit_set(ecx, 5),
'sse4a' : _is_bit_set(ecx, 6),
'misalignsse' : _is_bit_set(ecx, 7),
'3dnowprefetch' : _is_bit_set(ecx, 8),
'osvw' : _is_bit_set(ecx, 9),
'ibs' : _is_bit_set(ecx, 10),
'xop' : _is_bit_set(ecx, 11),
'skinit' : _is_bit_set(ecx, 12),
'wdt' : _is_bit_set(ecx, 13),
#'reserved' : _is_bit_set(ecx, 14),
'lwp' : _is_bit_set(ecx, 15),
'fma4' : _is_bit_set(ecx, 16),
'tce' : _is_bit_set(ecx, 17),
#'reserved' : _is_bit_set(ecx, 18),
'nodeid_msr' : _is_bit_set(ecx, 19),
#'reserved' : _is_bit_set(ecx, 20),
'tbm' : _is_bit_set(ecx, 21),
'topoext' : _is_bit_set(ecx, 22),
'perfctr_core' : _is_bit_set(ecx, 23),
'perfctr_nb' : _is_bit_set(ecx, 24),
#'reserved' : _is_bit_set(ecx, 25),
'dbx' : _is_bit_set(ecx, 26),
'perftsc' : _is_bit_set(ecx, 27),
'pci_l2i' : _is_bit_set(ecx, 28),
#'reserved' : _is_bit_set(ecx, 29),
#'reserved' : _is_bit_set(ecx, 30),
#'reserved' : _is_bit_set(ecx, 31)
}
# Get a list of only the flags that are true
extended_flags = [k for k, v in extended_flags.items() if v]
flags += extended_flags
flags.sort()
return flags
# http://en.wikipedia.org/wiki/CPUID#EAX.3D80000002h.2C80000003h.2C80000004h:_Processor_Brand_String
def get_processor_brand(self, max_extension_support):
processor_brand = ""
# Processor brand string
if max_extension_support >= 0x80000004:
instructions = [
b"\xB8\x02\x00\x00\x80", # mov ax,0x80000002
b"\xB8\x03\x00\x00\x80", # mov ax,0x80000003
b"\xB8\x04\x00\x00\x80" # mov ax,0x80000004
]
for instruction in instructions:
# EAX
eax = self._run_asm(
instruction, # mov ax,0x8000000?
b"\x0f\xa2" # cpuid
b"\x89\xC0" # mov ax,ax
b"\xC3" # ret
)
# EBX
ebx = self._run_asm(
instruction, # mov ax,0x8000000?
b"\x0f\xa2" # cpuid
b"\x89\xD8" # mov ax,bx
b"\xC3" # ret
)
# ECX
ecx = self._run_asm(
instruction, # mov ax,0x8000000?
b"\x0f\xa2" # cpuid
b"\x89\xC8" # mov ax,cx
b"\xC3" # ret
)
# EDX
edx = self._run_asm(
instruction, # mov ax,0x8000000?
b"\x0f\xa2" # cpuid
b"\x89\xD0" # mov ax,dx
b"\xC3" # ret
)
# Combine each of the 4 bytes in each register into the string
for reg in [eax, ebx, ecx, edx]:
for n in [0, 8, 16, 24]:
processor_brand += chr((reg >> n) & 0xFF)
# Strip off any trailing NULL terminators and white space
processor_brand = processor_brand.strip("\0").strip()
return processor_brand
# http://en.wikipedia.org/wiki/CPUID#EAX.3D80000006h:_Extended_L2_Cache_Features
def get_cache(self, max_extension_support):
cache_info = {}
# Just return if the cache feature is not supported
if max_extension_support < 0x80000006:
return cache_info
# ECX
ecx = self._run_asm(
b"\xB8\x06\x00\x00\x80" # mov ax,0x80000006
b"\x0f\xa2" # cpuid
b"\x89\xC8" # mov ax,cx
b"\xC3" # ret
)
cache_info = {
'size_kb' : ecx & 0xFF,
'line_size_b' : (ecx >> 12) & 0xF,
'associativity' : (ecx >> 16) & 0xFFFF
}
return cache_info
def get_ticks(self):
retval = None
if DataSource.bits == '32bit':
# Works on x86_32
restype = None
argtypes = (ctypes.POINTER(ctypes.c_uint), ctypes.POINTER(ctypes.c_uint))
get_ticks_x86_32, address = self._asm_func(restype, argtypes,
[
b"\x55", # push bp
b"\x89\xE5", # mov bp,sp
b"\x31\xC0", # xor ax,ax
b"\x0F\xA2", # cpuid
b"\x0F\x31", # rdtsc
b"\x8B\x5D\x08", # mov bx,[di+0x8]
b"\x8B\x4D\x0C", # mov cx,[di+0xc]
b"\x89\x13", # mov [bp+di],dx
b"\x89\x01", # mov [bx+di],ax
b"\x5D", # pop bp
b"\xC3" # ret
]
)
high = ctypes.c_uint32(0)
low = ctypes.c_uint32(0)
get_ticks_x86_32(ctypes.byref(high), ctypes.byref(low))
retval = ((high.value << 32) & 0xFFFFFFFF00000000) | low.value
elif DataSource.bits == '64bit':
# Works on x86_64
restype = ctypes.c_uint64
argtypes = ()
get_ticks_x86_64, address = self._asm_func(restype, argtypes,
[
b"\x48", # dec ax
b"\x31\xC0", # xor ax,ax
b"\x0F\xA2", # cpuid
b"\x0F\x31", # rdtsc
b"\x48", # dec ax
b"\xC1\xE2\x20", # shl dx,byte 0x20
b"\x48", # dec ax
b"\x09\xD0", # or ax,dx
b"\xC3", # ret
]
)
retval = get_ticks_x86_64()
return retval
def get_raw_hz(self):
import time
start = self.get_ticks()
time.sleep(1)
end = self.get_ticks()
ticks = (end - start)
return ticks
def _get_cpu_info_from_cpuid():
'''
Returns the CPU info gathered by querying the X86 cpuid register in a new process.
Returns {} on non X86 cpus.
Returns {} if SELinux is in enforcing mode.
'''
from multiprocessing import Process, Queue
# Return {} if can't cpuid
if not DataSource.can_cpuid:
return {}
# Get the CPU arch and bits
arch, bits = _parse_arch(DataSource.arch_string_raw)
# Return {} if this is not an X86 CPU
if not arch in ['X86_32', 'X86_64']:
return {}
try:
# Start running the function in a subprocess
queue = Queue()
p = Process(target=_actual_get_cpu_info_from_cpuid, args=(queue,))
p.start()
# Wait for the process to end, while it is still alive
while p.is_alive():
p.join(0)
# Return {} if it failed
if p.exitcode != 0:
return {}
# Return the result, only if there is something to read
if not queue.empty():
output = queue.get()
return _b64_to_obj(output)
except:
pass
# Return {} if everything failed
return {}
def _get_cpu_info_from_proc_cpuinfo():
'''
Returns the CPU info gathered from /proc/cpuinfo.
Returns {} if /proc/cpuinfo is not found.
'''
try:
# Just return {} if there is no cpuinfo
if not DataSource.has_proc_cpuinfo():
return {}
returncode, output = DataSource.cat_proc_cpuinfo()
if returncode != 0:
return {}
# Various fields
vendor_id = _get_field(False, output, None, '', 'vendor_id', 'vendor id', 'vendor')
processor_brand = _get_field(True, output, None, None, 'model name','cpu', 'processor')
cache_size = _get_field(False, output, None, '', 'cache size')
stepping = _get_field(False, output, int, 0, 'stepping')
model = _get_field(False, output, int, 0, 'model')
family = _get_field(False, output, int, 0, 'cpu family')
hardware = _get_field(False, output, None, '', 'Hardware')
# Flags
flags = _get_field(False, output, None, None, 'flags', 'Features')
if flags:
flags = flags.split()
flags.sort()
# Convert from MHz string to Hz
hz_actual = _get_field(False, output, None, '', 'cpu MHz', 'cpu speed', 'clock')
hz_actual = hz_actual.lower().rstrip('mhz').strip()
hz_actual = _to_decimal_string(hz_actual)
# Convert from GHz/MHz string to Hz
hz_advertised, scale = (None, 0)
try:
hz_advertised, scale = _parse_cpu_brand_string(processor_brand)
except Exception:
pass
info = {
'hardware_raw' : hardware,
'brand_raw' : processor_brand,
'l3_cache_size' : _to_friendly_bytes(cache_size),
'flags' : flags,
'vendor_id_raw' : vendor_id,
'stepping' : stepping,
'model' : model,
'family' : family,
}
# Make the Hz the same for actual and advertised if missing any
if not hz_advertised or hz_advertised == '0.0':
hz_advertised = hz_actual
scale = 6
elif not hz_actual or hz_actual == '0.0':
hz_actual = hz_advertised
# Add the Hz if there is one
if _hz_short_to_full(hz_advertised, scale) > (0, 0):
info['hz_advertised_friendly'] = _hz_short_to_friendly(hz_advertised, scale)
info['hz_advertised'] = _hz_short_to_full(hz_advertised, scale)
if _hz_short_to_full(hz_actual, scale) > (0, 0):
info['hz_actual_friendly'] = _hz_short_to_friendly(hz_actual, 6)
info['hz_actual'] = _hz_short_to_full(hz_actual, 6)
info = {k: v for k, v in info.items() if v}
return info
except:
#raise # NOTE: To have this throw on error, uncomment this line
return {}
def _get_cpu_info_from_cpufreq_info():
'''
Returns the CPU info gathered from cpufreq-info.
Returns {} if cpufreq-info is not found.
'''
try:
hz_brand, scale = '0.0', 0
if not DataSource.has_cpufreq_info():
return {}
returncode, output = DataSource.cpufreq_info()
if returncode != 0:
return {}
hz_brand = output.split('current CPU frequency is')[1].split('\n')[0]
i = hz_brand.find('Hz')
assert(i != -1)
hz_brand = hz_brand[0 : i+2].strip().lower()
if hz_brand.endswith('mhz'):
scale = 6
elif hz_brand.endswith('ghz'):
scale = 9
hz_brand = hz_brand.rstrip('mhz').rstrip('ghz').strip()
hz_brand = _to_decimal_string(hz_brand)
info = {
'hz_advertised_friendly' : _hz_short_to_friendly(hz_brand, scale),
'hz_actual_friendly' : _hz_short_to_friendly(hz_brand, scale),
'hz_advertised' : _hz_short_to_full(hz_brand, scale),
'hz_actual' : _hz_short_to_full(hz_brand, scale),
}
info = {k: v for k, v in info.items() if v}
return info
except:
#raise # NOTE: To have this throw on error, uncomment this line
return {}
def _get_cpu_info_from_lscpu():
'''
Returns the CPU info gathered from lscpu.
Returns {} if lscpu is not found.
'''
try:
if not DataSource.has_lscpu():
return {}
returncode, output = DataSource.lscpu()
if returncode != 0:
return {}
info = {}
new_hz = _get_field(False, output, None, None, 'CPU max MHz', 'CPU MHz')
if new_hz:
new_hz = _to_decimal_string(new_hz)
scale = 6
info['hz_advertised_friendly'] = _hz_short_to_friendly(new_hz, scale)
info['hz_actual_friendly'] = _hz_short_to_friendly(new_hz, scale)
info['hz_advertised'] = _hz_short_to_full(new_hz, scale)
info['hz_actual'] = _hz_short_to_full(new_hz, scale)
vendor_id = _get_field(False, output, None, None, 'Vendor ID')
if vendor_id:
info['vendor_id_raw'] = vendor_id
brand = _get_field(False, output, None, None, 'Model name')
if brand:
info['brand_raw'] = brand
family = _get_field(False, output, None, None, 'CPU family')
if family and family.isdigit():
info['family'] = int(family)
stepping = _get_field(False, output, None, None, 'Stepping')
if stepping and stepping.isdigit():
info['stepping'] = int(stepping)
model = _get_field(False, output, None, None, 'Model')
if model and model.isdigit():
info['model'] = int(model)
l1_data_cache_size = _get_field(False, output, None, None, 'L1d cache')
if l1_data_cache_size:
info['l1_data_cache_size'] = _to_friendly_bytes(l1_data_cache_size)
l1_instruction_cache_size = _get_field(False, output, None, None, 'L1i cache')
if l1_instruction_cache_size:
info['l1_instruction_cache_size'] = _to_friendly_bytes(l1_instruction_cache_size)
l2_cache_size = _get_field(False, output, None, None, 'L2 cache')
if l2_cache_size:
info['l2_cache_size'] = _to_friendly_bytes(l2_cache_size)
l3_cache_size = _get_field(False, output, None, None, 'L3 cache')
if l3_cache_size:
info['l3_cache_size'] = _to_friendly_bytes(l3_cache_size)
# Flags
flags = _get_field(False, output, None, None, 'flags', 'Features')
if flags:
flags = flags.split()
flags.sort()
info['flags'] = flags
info = {k: v for k, v in info.items() if v}
return info
except:
#raise # NOTE: To have this throw on error, uncomment this line
return {}
def _get_cpu_info_from_dmesg():
'''
Returns the CPU info gathered from dmesg.
Returns {} if dmesg is not found or does not have the desired info.
'''
# Just return {} if there is no dmesg
if not DataSource.has_dmesg():
return {}
# If dmesg fails return {}
returncode, output = DataSource.dmesg_a()
if output == None or returncode != 0:
return {}
return _parse_dmesg_output(output)
# https://openpowerfoundation.org/wp-content/uploads/2016/05/LoPAPR_DRAFT_v11_24March2016_cmt1.pdf
# page 767
def _get_cpu_info_from_ibm_pa_features():
'''
Returns the CPU info gathered from lsprop /proc/device-tree/cpus/*/ibm,pa-features
Returns {} if lsprop is not found or ibm,pa-features does not have the desired info.
'''
try:
# Just return {} if there is no lsprop
if not DataSource.has_ibm_pa_features():
return {}
# If ibm,pa-features fails return {}
returncode, output = DataSource.ibm_pa_features()
if output == None or returncode != 0:
return {}
# Filter out invalid characters from output
value = output.split("ibm,pa-features")[1].lower()
value = [s for s in value if s in list('0123456789abcfed')]
value = ''.join(value)
# Get data converted to Uint32 chunks
left = int(value[0 : 8], 16)
right = int(value[8 : 16], 16)
# Get the CPU flags
flags = {
# Byte 0
'mmu' : _is_bit_set(left, 0),
'fpu' : _is_bit_set(left, 1),
'slb' : _is_bit_set(left, 2),
'run' : _is_bit_set(left, 3),
#'reserved' : _is_bit_set(left, 4),
'dabr' : _is_bit_set(left, 5),
'ne' : _is_bit_set(left, 6),
'wtr' : _is_bit_set(left, 7),
# Byte 1
'mcr' : _is_bit_set(left, 8),
'dsisr' : _is_bit_set(left, 9),
'lp' : _is_bit_set(left, 10),
'ri' : _is_bit_set(left, 11),
'dabrx' : _is_bit_set(left, 12),
'sprg3' : _is_bit_set(left, 13),
'rislb' : _is_bit_set(left, 14),
'pp' : _is_bit_set(left, 15),
# Byte 2
'vpm' : _is_bit_set(left, 16),
'dss_2.05' : _is_bit_set(left, 17),
#'reserved' : _is_bit_set(left, 18),
'dar' : _is_bit_set(left, 19),
#'reserved' : _is_bit_set(left, 20),
'ppr' : _is_bit_set(left, 21),
'dss_2.02' : _is_bit_set(left, 22),
'dss_2.06' : _is_bit_set(left, 23),
# Byte 3
'lsd_in_dscr' : _is_bit_set(left, 24),
'ugr_in_dscr' : _is_bit_set(left, 25),
#'reserved' : _is_bit_set(left, 26),
#'reserved' : _is_bit_set(left, 27),
#'reserved' : _is_bit_set(left, 28),
#'reserved' : _is_bit_set(left, 29),
#'reserved' : _is_bit_set(left, 30),
#'reserved' : _is_bit_set(left, 31),
# Byte 4
'sso_2.06' : _is_bit_set(right, 0),
#'reserved' : _is_bit_set(right, 1),
#'reserved' : _is_bit_set(right, 2),
#'reserved' : _is_bit_set(right, 3),
#'reserved' : _is_bit_set(right, 4),
#'reserved' : _is_bit_set(right, 5),
#'reserved' : _is_bit_set(right, 6),
#'reserved' : _is_bit_set(right, 7),
# Byte 5
'le' : _is_bit_set(right, 8),
'cfar' : _is_bit_set(right, 9),
'eb' : _is_bit_set(right, 10),
'lsq_2.07' : _is_bit_set(right, 11),
#'reserved' : _is_bit_set(right, 12),
#'reserved' : _is_bit_set(right, 13),
#'reserved' : _is_bit_set(right, 14),
#'reserved' : _is_bit_set(right, 15),
# Byte 6
'dss_2.07' : _is_bit_set(right, 16),
#'reserved' : _is_bit_set(right, 17),
#'reserved' : _is_bit_set(right, 18),
#'reserved' : _is_bit_set(right, 19),
#'reserved' : _is_bit_set(right, 20),
#'reserved' : _is_bit_set(right, 21),
#'reserved' : _is_bit_set(right, 22),
#'reserved' : _is_bit_set(right, 23),
# Byte 7
#'reserved' : _is_bit_set(right, 24),
#'reserved' : _is_bit_set(right, 25),
#'reserved' : _is_bit_set(right, 26),
#'reserved' : _is_bit_set(right, 27),
#'reserved' : _is_bit_set(right, 28),
#'reserved' : _is_bit_set(right, 29),
#'reserved' : _is_bit_set(right, 30),
#'reserved' : _is_bit_set(right, 31),
}
# Get a list of only the flags that are true
flags = [k for k, v in flags.items() if v]
flags.sort()
info = {
'flags' : flags
}
info = {k: v for k, v in info.items() if v}
return info
except:
return {}
def _get_cpu_info_from_cat_var_run_dmesg_boot():
'''
Returns the CPU info gathered from /var/run/dmesg.boot.
Returns {} if dmesg is not found or does not have the desired info.
'''
# Just return {} if there is no /var/run/dmesg.boot
if not DataSource.has_var_run_dmesg_boot():
return {}
# If dmesg.boot fails return {}
returncode, output = DataSource.cat_var_run_dmesg_boot()
if output == None or returncode != 0:
return {}
return _parse_dmesg_output(output)
def _get_cpu_info_from_sysctl():
'''
Returns the CPU info gathered from sysctl.
Returns {} if sysctl is not found.
'''
try:
# Just return {} if there is no sysctl
if not DataSource.has_sysctl():
return {}
# If sysctl fails return {}
returncode, output = DataSource.sysctl_machdep_cpu_hw_cpufrequency()
if output == None or returncode != 0:
return {}
# Various fields
vendor_id = _get_field(False, output, None, None, 'machdep.cpu.vendor')
processor_brand = _get_field(True, output, None, None, 'machdep.cpu.brand_string')
cache_size = _get_field(False, output, None, None, 'machdep.cpu.cache.size')
stepping = _get_field(False, output, int, 0, 'machdep.cpu.stepping')
model = _get_field(False, output, int, 0, 'machdep.cpu.model')
family = _get_field(False, output, int, 0, 'machdep.cpu.family')
# Flags
flags = _get_field(False, output, None, '', 'machdep.cpu.features').lower().split()
flags.extend(_get_field(False, output, None, '', 'machdep.cpu.leaf7_features').lower().split())
flags.extend(_get_field(False, output, None, '', 'machdep.cpu.extfeatures').lower().split())
flags.sort()
# Convert from GHz/MHz string to Hz
hz_advertised, scale = _parse_cpu_brand_string(processor_brand)
hz_actual = _get_field(False, output, None, None, 'hw.cpufrequency')
hz_actual = _to_decimal_string(hz_actual)
info = {
'vendor_id_raw' : vendor_id,
'brand_raw' : processor_brand,
'hz_advertised_friendly' : _hz_short_to_friendly(hz_advertised, scale),
'hz_actual_friendly' : _hz_short_to_friendly(hz_actual, 0),
'hz_advertised' : _hz_short_to_full(hz_advertised, scale),
'hz_actual' : _hz_short_to_full(hz_actual, 0),
'l2_cache_size' : _to_friendly_bytes(cache_size),
'stepping' : stepping,
'model' : model,
'family' : family,
'flags' : flags
}
info = {k: v for k, v in info.items() if v}
return info
except:
return {}
def _get_cpu_info_from_sysinfo():
'''
Returns the CPU info gathered from sysinfo.
Returns {} if sysinfo is not found.
'''
info = _get_cpu_info_from_sysinfo_v1()
info.update(_get_cpu_info_from_sysinfo_v2())
return info
def _get_cpu_info_from_sysinfo_v1():
'''
Returns the CPU info gathered from sysinfo.
Returns {} if sysinfo is not found.
'''
try:
# Just return {} if there is no sysinfo
if not DataSource.has_sysinfo():
return {}
# If sysinfo fails return {}
returncode, output = DataSource.sysinfo_cpu()
if output == None or returncode != 0:
return {}
# Various fields
vendor_id = '' #_get_field(False, output, None, None, 'CPU #0: ')
processor_brand = output.split('CPU #0: "')[1].split('"\n')[0].strip()
cache_size = '' #_get_field(False, output, None, None, 'machdep.cpu.cache.size')
stepping = int(output.split(', stepping ')[1].split(',')[0].strip())
model = int(output.split(', model ')[1].split(',')[0].strip())
family = int(output.split(', family ')[1].split(',')[0].strip())
# Flags
flags = []
for line in output.split('\n'):
if line.startswith('\t\t'):
for flag in line.strip().lower().split():
flags.append(flag)
flags.sort()
# Convert from GHz/MHz string to Hz
hz_advertised, scale = _parse_cpu_brand_string(processor_brand)
hz_actual = hz_advertised
info = {
'vendor_id_raw' : vendor_id,
'brand_raw' : processor_brand,
'hz_advertised_friendly' : _hz_short_to_friendly(hz_advertised, scale),
'hz_actual_friendly' : _hz_short_to_friendly(hz_actual, scale),
'hz_advertised' : _hz_short_to_full(hz_advertised, scale),
'hz_actual' : _hz_short_to_full(hz_actual, scale),
'l2_cache_size' : _to_friendly_bytes(cache_size),
'stepping' : stepping,
'model' : model,
'family' : family,
'flags' : flags
}
info = {k: v for k, v in info.items() if v}
return info
except:
#raise # NOTE: To have this throw on error, uncomment this line
return {}
def _get_cpu_info_from_sysinfo_v2():
'''
Returns the CPU info gathered from sysinfo.
Returns {} if sysinfo is not found.
'''
try:
# Just return {} if there is no sysinfo
if not DataSource.has_sysinfo():
return {}
# If sysinfo fails return {}
returncode, output = DataSource.sysinfo_cpu()
if output == None or returncode != 0:
return {}
# Various fields
vendor_id = '' #_get_field(False, output, None, None, 'CPU #0: ')
processor_brand = output.split('CPU #0: "')[1].split('"\n')[0].strip()
cache_size = '' #_get_field(False, output, None, None, 'machdep.cpu.cache.size')
signature = output.split('Signature:')[1].split('\n')[0].strip()
#
stepping = int(signature.split('stepping ')[1].split(',')[0].strip())
model = int(signature.split('model ')[1].split(',')[0].strip())
family = int(signature.split('family ')[1].split(',')[0].strip())
# Flags
def get_subsection_flags(output):
retval = []
for line in output.split('\n')[1:]:
if not line.startswith(' ') and not line.startswith(' '): break
for entry in line.strip().lower().split(' '):
retval.append(entry)
return retval
flags = get_subsection_flags(output.split('Features: ')[1]) + \
get_subsection_flags(output.split('Extended Features (0x00000001): ')[1]) + \
get_subsection_flags(output.split('Extended Features (0x80000001): ')[1])
flags.sort()
# Convert from GHz/MHz string to Hz
lines = [n for n in output.split('\n') if n]
raw_hz = lines[0].split('running at ')[1].strip().lower()
hz_advertised = raw_hz.rstrip('mhz').rstrip('ghz').strip()
hz_advertised = _to_decimal_string(hz_advertised)
hz_actual = hz_advertised
scale = 0
if raw_hz.endswith('mhz'):
scale = 6
elif raw_hz.endswith('ghz'):
scale = 9
info = {
'vendor_id_raw' : vendor_id,
'brand_raw' : processor_brand,
'hz_advertised_friendly' : _hz_short_to_friendly(hz_advertised, scale),
'hz_actual_friendly' : _hz_short_to_friendly(hz_actual, scale),
'hz_advertised' : _hz_short_to_full(hz_advertised, scale),
'hz_actual' : _hz_short_to_full(hz_actual, scale),
'l2_cache_size' : _to_friendly_bytes(cache_size),
'stepping' : stepping,
'model' : model,
'family' : family,
'flags' : flags
}
info = {k: v for k, v in info.items() if v}
return info
except:
#raise # NOTE: To have this throw on error, uncomment this line
return {}
def _get_cpu_info_from_wmic():
'''
Returns the CPU info gathered from WMI.
Returns {} if not on Windows, or wmic is not installed.
'''
try:
# Just return {} if not Windows or there is no wmic
if not DataSource.is_windows or not DataSource.has_wmic():
return {}
returncode, output = DataSource.wmic_cpu()
if output == None or returncode != 0:
return {}
# Break the list into key values pairs
value = output.split("\n")
value = [s.rstrip().split('=') for s in value if '=' in s]
value = {k: v for k, v in value if v}
# Get the advertised MHz
processor_brand = value.get('Name')
hz_advertised, scale_advertised = _parse_cpu_brand_string(processor_brand)
# Get the actual MHz
hz_actual = value.get('CurrentClockSpeed')
scale_actual = 6
if hz_actual:
hz_actual = _to_decimal_string(hz_actual)
# Get cache sizes
l2_cache_size = value.get('L2CacheSize')
if l2_cache_size:
l2_cache_size = l2_cache_size + ' KB'
l3_cache_size = value.get('L3CacheSize')
if l3_cache_size:
l3_cache_size = l3_cache_size + ' KB'
# Get family, model, and stepping
family, model, stepping = '', '', ''
description = value.get('Description') or value.get('Caption')
entries = description.split(' ')
if 'Family' in entries and entries.index('Family') < len(entries)-1:
i = entries.index('Family')
family = int(entries[i + 1])
if 'Model' in entries and entries.index('Model') < len(entries)-1:
i = entries.index('Model')
model = int(entries[i + 1])
if 'Stepping' in entries and entries.index('Stepping') < len(entries)-1:
i = entries.index('Stepping')
stepping = int(entries[i + 1])
info = {
'vendor_id_raw' : value.get('Manufacturer'),
'brand_raw' : processor_brand,
'hz_advertised_friendly' : _hz_short_to_friendly(hz_advertised, scale_advertised),
'hz_actual_friendly' : _hz_short_to_friendly(hz_actual, scale_actual),
'hz_advertised' : _hz_short_to_full(hz_advertised, scale_advertised),
'hz_actual' : _hz_short_to_full(hz_actual, scale_actual),
'l2_cache_size' : l2_cache_size,
'l3_cache_size' : l3_cache_size,
'stepping' : stepping,
'model' : model,
'family' : family,
}
info = {k: v for k, v in info.items() if v}
return info
except:
#raise # NOTE: To have this throw on error, uncomment this line
return {}
def _get_cpu_info_from_registry():
'''
FIXME: Is missing many of the newer CPU flags like sse3
Returns the CPU info gathered from the Windows Registry.
Returns {} if not on Windows.
'''
try:
# Just return {} if not on Windows
if not DataSource.is_windows:
return {}
# Get the CPU name
processor_brand = DataSource.winreg_processor_brand().strip()
# Get the CPU vendor id
vendor_id = DataSource.winreg_vendor_id_raw()
# Get the CPU arch and bits
arch_string_raw = DataSource.winreg_arch_string_raw()
arch, bits = _parse_arch(arch_string_raw)
# Get the actual CPU Hz
hz_actual = DataSource.winreg_hz_actual()
hz_actual = _to_decimal_string(hz_actual)
# Get the advertised CPU Hz
hz_advertised, scale = _parse_cpu_brand_string(processor_brand)
# If advertised hz not found, use the actual hz
if hz_advertised == '0.0':
scale = 6
hz_advertised = _to_decimal_string(hz_actual)
# Get the CPU features
feature_bits = DataSource.winreg_feature_bits()
def is_set(bit):
mask = 0x80000000 >> bit
retval = mask & feature_bits > 0
return retval
# http://en.wikipedia.org/wiki/CPUID
# http://unix.stackexchange.com/questions/43539/what-do-the-flags-in-proc-cpuinfo-mean
# http://www.lohninger.com/helpcsuite/public_constants_cpuid.htm
flags = {
'fpu' : is_set(0), # Floating Point Unit
'vme' : is_set(1), # V86 Mode Extensions
'de' : is_set(2), # Debug Extensions - I/O breakpoints supported
'pse' : is_set(3), # Page Size Extensions (4 MB pages supported)
'tsc' : is_set(4), # Time Stamp Counter and RDTSC instruction are available
'msr' : is_set(5), # Model Specific Registers
'pae' : is_set(6), # Physical Address Extensions (36 bit address, 2MB pages)
'mce' : is_set(7), # Machine Check Exception supported
'cx8' : is_set(8), # Compare Exchange Eight Byte instruction available
'apic' : is_set(9), # Local APIC present (multiprocessor operation support)
'sepamd' : is_set(10), # Fast system calls (AMD only)
'sep' : is_set(11), # Fast system calls
'mtrr' : is_set(12), # Memory Type Range Registers
'pge' : is_set(13), # Page Global Enable
'mca' : is_set(14), # Machine Check Architecture
'cmov' : is_set(15), # Conditional MOVe instructions
'pat' : is_set(16), # Page Attribute Table
'pse36' : is_set(17), # 36 bit Page Size Extensions
'serial' : is_set(18), # Processor Serial Number
'clflush' : is_set(19), # Cache Flush
#'reserved1' : is_set(20), # reserved
'dts' : is_set(21), # Debug Trace Store
'acpi' : is_set(22), # ACPI support
'mmx' : is_set(23), # MultiMedia Extensions
'fxsr' : is_set(24), # FXSAVE and FXRSTOR instructions
'sse' : is_set(25), # SSE instructions
'sse2' : is_set(26), # SSE2 (WNI) instructions
'ss' : is_set(27), # self snoop
#'reserved2' : is_set(28), # reserved
'tm' : is_set(29), # Automatic clock control
'ia64' : is_set(30), # IA64 instructions
'3dnow' : is_set(31) # 3DNow! instructions available
}
# Get a list of only the flags that are true
flags = [k for k, v in flags.items() if v]
flags.sort()
info = {
'vendor_id_raw' : vendor_id,
'brand_raw' : processor_brand,
'hz_advertised_friendly' : _hz_short_to_friendly(hz_advertised, scale),
'hz_actual_friendly' : _hz_short_to_friendly(hz_actual, 6),
'hz_advertised' : _hz_short_to_full(hz_advertised, scale),
'hz_actual' : _hz_short_to_full(hz_actual, 6),
'flags' : flags
}
info = {k: v for k, v in info.items() if v}
return info
except:
return {}
def _get_cpu_info_from_kstat():
'''
Returns the CPU info gathered from isainfo and kstat.
Returns {} if isainfo or kstat are not found.
'''
try:
# Just return {} if there is no isainfo or kstat
if not DataSource.has_isainfo() or not DataSource.has_kstat():
return {}
# If isainfo fails return {}
returncode, flag_output = DataSource.isainfo_vb()
if flag_output == None or returncode != 0:
return {}
# If kstat fails return {}
returncode, kstat = DataSource.kstat_m_cpu_info()
if kstat == None or returncode != 0:
return {}
# Various fields
vendor_id = kstat.split('\tvendor_id ')[1].split('\n')[0].strip()
processor_brand = kstat.split('\tbrand ')[1].split('\n')[0].strip()
stepping = int(kstat.split('\tstepping ')[1].split('\n')[0].strip())
model = int(kstat.split('\tmodel ')[1].split('\n')[0].strip())
family = int(kstat.split('\tfamily ')[1].split('\n')[0].strip())
# Flags
flags = flag_output.strip().split('\n')[-1].strip().lower().split()
flags.sort()
# Convert from GHz/MHz string to Hz
scale = 6
hz_advertised = kstat.split('\tclock_MHz ')[1].split('\n')[0].strip()
hz_advertised = _to_decimal_string(hz_advertised)
# Convert from GHz/MHz string to Hz
hz_actual = kstat.split('\tcurrent_clock_Hz ')[1].split('\n')[0].strip()
hz_actual = _to_decimal_string(hz_actual)
info = {
'vendor_id_raw' : vendor_id,
'brand_raw' : processor_brand,
'hz_advertised_friendly' : _hz_short_to_friendly(hz_advertised, scale),
'hz_actual_friendly' : _hz_short_to_friendly(hz_actual, 0),
'hz_advertised' : _hz_short_to_full(hz_advertised, scale),
'hz_actual' : _hz_short_to_full(hz_actual, 0),
'stepping' : stepping,
'model' : model,
'family' : family,
'flags' : flags
}
info = {k: v for k, v in info.items() if v}
return info
except:
return {}
def _get_cpu_info_from_platform_uname():
try:
uname = DataSource.uname_string_raw.split(',')[0]
family, model, stepping = (None, None, None)
entries = uname.split(' ')
if 'Family' in entries and entries.index('Family') < len(entries)-1:
i = entries.index('Family')
family = int(entries[i + 1])
if 'Model' in entries and entries.index('Model') < len(entries)-1:
i = entries.index('Model')
model = int(entries[i + 1])
if 'Stepping' in entries and entries.index('Stepping') < len(entries)-1:
i = entries.index('Stepping')
stepping = int(entries[i + 1])
info = {
'family' : family,
'model' : model,
'stepping' : stepping
}
info = {k: v for k, v in info.items() if v}
return info
except:
return {}
def _get_cpu_info_internal():
'''
Returns the CPU info by using the best sources of information for your OS.
Returns {} if nothing is found.
'''
# Get the CPU arch and bits
arch, bits = _parse_arch(DataSource.arch_string_raw)
friendly_maxsize = { 2**31-1: '32 bit', 2**63-1: '64 bit' }.get(sys.maxsize) or 'unknown bits'
friendly_version = "{0}.{1}.{2}.{3}.{4}".format(*sys.version_info)
PYTHON_VERSION = "{0} ({1})".format(friendly_version, friendly_maxsize)
info = {
'python_version' : PYTHON_VERSION,
'cpuinfo_version' : CPUINFO_VERSION,
'cpuinfo_version_string' : CPUINFO_VERSION_STRING,
'arch' : arch,
'bits' : bits,
'count' : DataSource.cpu_count,
'arch_string_raw' : DataSource.arch_string_raw,
}
# Try the Windows wmic
_copy_new_fields(info, _get_cpu_info_from_wmic())
# Try the Windows registry
_copy_new_fields(info, _get_cpu_info_from_registry())
# Try /proc/cpuinfo
_copy_new_fields(info, _get_cpu_info_from_proc_cpuinfo())
# Try cpufreq-info
_copy_new_fields(info, _get_cpu_info_from_cpufreq_info())
# Try LSCPU
_copy_new_fields(info, _get_cpu_info_from_lscpu())
# Try sysctl
_copy_new_fields(info, _get_cpu_info_from_sysctl())
# Try kstat
_copy_new_fields(info, _get_cpu_info_from_kstat())
# Try dmesg
_copy_new_fields(info, _get_cpu_info_from_dmesg())
# Try /var/run/dmesg.boot
_copy_new_fields(info, _get_cpu_info_from_cat_var_run_dmesg_boot())
# Try lsprop ibm,pa-features
_copy_new_fields(info, _get_cpu_info_from_ibm_pa_features())
# Try sysinfo
_copy_new_fields(info, _get_cpu_info_from_sysinfo())
# Try querying the CPU cpuid register
_copy_new_fields(info, _get_cpu_info_from_cpuid())
# Try platform.uname
_copy_new_fields(info, _get_cpu_info_from_platform_uname())
return info
def get_cpu_info_json():
'''
Returns the CPU info by using the best sources of information for your OS.
Returns the result in a json string
'''
import json
output = None
# If running under pyinstaller, run normally
if getattr(sys, 'frozen', False):
info = _get_cpu_info_internal()
output = json.dumps(info)
output = "{0}".format(output)
# if not running under pyinstaller, run in another process.
# This is done because multiprocesing has a design flaw that
# causes non main programs to run multiple times on Windows.
else:
from subprocess import Popen, PIPE
command = [sys.executable, __file__, '--json']
p1 = Popen(command, stdout=PIPE, stderr=PIPE, stdin=PIPE)
output = p1.communicate()[0]
if p1.returncode != 0:
return "{}"
if not IS_PY2:
output = output.decode(encoding='UTF-8')
return output
def get_cpu_info():
'''
Returns the CPU info by using the best sources of information for your OS.
Returns the result in a dict
'''
import json
output = get_cpu_info_json()
# Convert JSON to Python with non unicode strings
output = json.loads(output, object_hook = _utf_to_str)
return output
def main():
from argparse import ArgumentParser
import json
# Parse args
parser = ArgumentParser(description='Gets CPU info with pure Python 2 & 3')
parser.add_argument('--json', action='store_true', help='Return the info in JSON format')
parser.add_argument('--version', action='store_true', help='Return the version of py-cpuinfo')
args = parser.parse_args()
try:
_check_arch()
except Exception as err:
sys.stderr.write(str(err) + "\n")
sys.exit(1)
info = _get_cpu_info_internal()
if not info:
sys.stderr.write("Failed to find cpu info\n")
sys.exit(1)
if args.json:
print(json.dumps(info))
elif args.version:
print(CPUINFO_VERSION_STRING)
else:
print('Python Version: {0}'.format(info.get('python_version', '')))
print('Cpuinfo Version: {0}'.format(info.get('cpuinfo_version_string', '')))
print('Vendor ID Raw: {0}'.format(info.get('vendor_id_raw', '')))
print('Hardware Raw: {0}'.format(info.get('hardware_raw', '')))
print('Brand Raw: {0}'.format(info.get('brand_raw', '')))
print('Hz Advertised Friendly: {0}'.format(info.get('hz_advertised_friendly', '')))
print('Hz Actual Friendly: {0}'.format(info.get('hz_actual_friendly', '')))
print('Hz Advertised: {0}'.format(info.get('hz_advertised', '')))
print('Hz Actual: {0}'.format(info.get('hz_actual', '')))
print('Arch: {0}'.format(info.get('arch', '')))
print('Bits: {0}'.format(info.get('bits', '')))
print('Count: {0}'.format(info.get('count', '')))
print('Arch String Raw: {0}'.format(info.get('arch_string_raw', '')))
print('L1 Data Cache Size: {0}'.format(info.get('l1_data_cache_size', '')))
print('L1 Instruction Cache Size: {0}'.format(info.get('l1_instruction_cache_size', '')))
print('L2 Cache Size: {0}'.format(info.get('l2_cache_size', '')))
print('L2 Cache Line Size: {0}'.format(info.get('l2_cache_line_size', '')))
print('L2 Cache Associativity: {0}'.format(info.get('l2_cache_associativity', '')))
print('L3 Cache Size: {0}'.format(info.get('l3_cache_size', '')))
print('Stepping: {0}'.format(info.get('stepping', '')))
print('Model: {0}'.format(info.get('model', '')))
print('Family: {0}'.format(info.get('family', '')))
print('Processor Type: {0}'.format(info.get('processor_type', '')))
print('Extended Model: {0}'.format(info.get('extended_model', '')))
print('Extended Family: {0}'.format(info.get('extended_family', '')))
print('Flags: {0}'.format(', '.join(info.get('flags', ''))))
if __name__ == '__main__':
main()
else:
_check_arch()
|
workhorsy/py-cpuinfo
|
cpuinfo/cpuinfo.py
|
_get_cpu_info_from_cpuid
|
python
|
def _get_cpu_info_from_cpuid():
'''
Returns the CPU info gathered by querying the X86 cpuid register in a new process.
Returns {} on non X86 cpus.
Returns {} if SELinux is in enforcing mode.
'''
from multiprocessing import Process, Queue
# Return {} if can't cpuid
if not DataSource.can_cpuid:
return {}
# Get the CPU arch and bits
arch, bits = _parse_arch(DataSource.arch_string_raw)
# Return {} if this is not an X86 CPU
if not arch in ['X86_32', 'X86_64']:
return {}
try:
# Start running the function in a subprocess
queue = Queue()
p = Process(target=_actual_get_cpu_info_from_cpuid, args=(queue,))
p.start()
# Wait for the process to end, while it is still alive
while p.is_alive():
p.join(0)
# Return {} if it failed
if p.exitcode != 0:
return {}
# Return the result, only if there is something to read
if not queue.empty():
output = queue.get()
return _b64_to_obj(output)
except:
pass
# Return {} if everything failed
return {}
|
Returns the CPU info gathered by querying the X86 cpuid register in a new process.
Returns {} on non X86 cpus.
Returns {} if SELinux is in enforcing mode.
|
train
|
https://github.com/workhorsy/py-cpuinfo/blob/c15afb770c1139bf76215852e17eb4f677ca3d2f/cpuinfo/cpuinfo.py#L1358-L1399
|
[
"def _parse_arch(arch_string_raw):\n\timport re\n\n\tarch, bits = None, None\n\tarch_string_raw = arch_string_raw.lower()\n\n\t# X86\n\tif re.match('^i\\d86$|^x86$|^x86_32$|^i86pc$|^ia32$|^ia-32$|^bepc$', arch_string_raw):\n\t\tarch = 'X86_32'\n\t\tbits = 32\n\telif re.match('^x64$|^x86_64$|^x86_64t$|^i686-64$|^amd64$|^ia64$|^ia-64$', arch_string_raw):\n\t\tarch = 'X86_64'\n\t\tbits = 64\n\t# ARM\n\telif re.match('^armv8-a|aarch64$', arch_string_raw):\n\t\tarch = 'ARM_8'\n\t\tbits = 64\n\telif re.match('^armv7$|^armv7[a-z]$|^armv7-[a-z]$|^armv6[a-z]$', arch_string_raw):\n\t\tarch = 'ARM_7'\n\t\tbits = 32\n\telif re.match('^armv8$|^armv8[a-z]$|^armv8-[a-z]$', arch_string_raw):\n\t\tarch = 'ARM_8'\n\t\tbits = 32\n\t# PPC\n\telif re.match('^ppc32$|^prep$|^pmac$|^powermac$', arch_string_raw):\n\t\tarch = 'PPC_32'\n\t\tbits = 32\n\telif re.match('^powerpc$|^ppc64$|^ppc64le$', arch_string_raw):\n\t\tarch = 'PPC_64'\n\t\tbits = 64\n\t# SPARC\n\telif re.match('^sparc32$|^sparc$', arch_string_raw):\n\t\tarch = 'SPARC_32'\n\t\tbits = 32\n\telif re.match('^sparc64$|^sun4u$|^sun4v$', arch_string_raw):\n\t\tarch = 'SPARC_64'\n\t\tbits = 64\n\n\treturn (arch, bits)\n"
] |
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
# Copyright (c) 2014-2019, Matthew Brennan Jones <matthew.brennan.jones@gmail.com>
# Py-cpuinfo gets CPU info with pure Python 2 & 3
# It uses the MIT License
# It is hosted at: https://github.com/workhorsy/py-cpuinfo
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
CPUINFO_VERSION = (5, 0, 0)
CPUINFO_VERSION_STRING = '.'.join([str(n) for n in CPUINFO_VERSION])
import os, sys
import platform
import multiprocessing
import ctypes
try:
import _winreg as winreg
except ImportError as err:
try:
import winreg
except ImportError as err:
pass
IS_PY2 = sys.version_info[0] == 2
class DataSource(object):
bits = platform.architecture()[0]
cpu_count = multiprocessing.cpu_count()
is_windows = platform.system().lower() == 'windows'
arch_string_raw = platform.machine()
uname_string_raw = platform.uname()[5]
can_cpuid = True
@staticmethod
def has_proc_cpuinfo():
return os.path.exists('/proc/cpuinfo')
@staticmethod
def has_dmesg():
return len(_program_paths('dmesg')) > 0
@staticmethod
def has_var_run_dmesg_boot():
uname = platform.system().strip().strip('"').strip("'").strip().lower()
return 'linux' in uname and os.path.exists('/var/run/dmesg.boot')
@staticmethod
def has_cpufreq_info():
return len(_program_paths('cpufreq-info')) > 0
@staticmethod
def has_sestatus():
return len(_program_paths('sestatus')) > 0
@staticmethod
def has_sysctl():
return len(_program_paths('sysctl')) > 0
@staticmethod
def has_isainfo():
return len(_program_paths('isainfo')) > 0
@staticmethod
def has_kstat():
return len(_program_paths('kstat')) > 0
@staticmethod
def has_sysinfo():
return len(_program_paths('sysinfo')) > 0
@staticmethod
def has_lscpu():
return len(_program_paths('lscpu')) > 0
@staticmethod
def has_ibm_pa_features():
return len(_program_paths('lsprop')) > 0
@staticmethod
def has_wmic():
returncode, output = _run_and_get_stdout(['wmic', 'os', 'get', 'Version'])
return returncode == 0 and len(output) > 0
@staticmethod
def cat_proc_cpuinfo():
return _run_and_get_stdout(['cat', '/proc/cpuinfo'])
@staticmethod
def cpufreq_info():
return _run_and_get_stdout(['cpufreq-info'])
@staticmethod
def sestatus_b():
return _run_and_get_stdout(['sestatus', '-b'])
@staticmethod
def dmesg_a():
return _run_and_get_stdout(['dmesg', '-a'])
@staticmethod
def cat_var_run_dmesg_boot():
return _run_and_get_stdout(['cat', '/var/run/dmesg.boot'])
@staticmethod
def sysctl_machdep_cpu_hw_cpufrequency():
return _run_and_get_stdout(['sysctl', 'machdep.cpu', 'hw.cpufrequency'])
@staticmethod
def isainfo_vb():
return _run_and_get_stdout(['isainfo', '-vb'])
@staticmethod
def kstat_m_cpu_info():
return _run_and_get_stdout(['kstat', '-m', 'cpu_info'])
@staticmethod
def sysinfo_cpu():
return _run_and_get_stdout(['sysinfo', '-cpu'])
@staticmethod
def lscpu():
return _run_and_get_stdout(['lscpu'])
@staticmethod
def ibm_pa_features():
import glob
ibm_features = glob.glob('/proc/device-tree/cpus/*/ibm,pa-features')
if ibm_features:
return _run_and_get_stdout(['lsprop', ibm_features[0]])
@staticmethod
def wmic_cpu():
return _run_and_get_stdout(['wmic', 'cpu', 'get', 'Name,CurrentClockSpeed,L2CacheSize,L3CacheSize,Description,Caption,Manufacturer', '/format:list'])
@staticmethod
def winreg_processor_brand():
key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, r"Hardware\Description\System\CentralProcessor\0")
processor_brand = winreg.QueryValueEx(key, "ProcessorNameString")[0]
winreg.CloseKey(key)
return processor_brand.strip()
@staticmethod
def winreg_vendor_id_raw():
key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, r"Hardware\Description\System\CentralProcessor\0")
vendor_id_raw = winreg.QueryValueEx(key, "VendorIdentifier")[0]
winreg.CloseKey(key)
return vendor_id_raw
@staticmethod
def winreg_arch_string_raw():
key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, r"SYSTEM\CurrentControlSet\Control\Session Manager\Environment")
arch_string_raw = winreg.QueryValueEx(key, "PROCESSOR_ARCHITECTURE")[0]
winreg.CloseKey(key)
return arch_string_raw
@staticmethod
def winreg_hz_actual():
key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, r"Hardware\Description\System\CentralProcessor\0")
hz_actual = winreg.QueryValueEx(key, "~Mhz")[0]
winreg.CloseKey(key)
hz_actual = _to_decimal_string(hz_actual)
return hz_actual
@staticmethod
def winreg_feature_bits():
key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, r"Hardware\Description\System\CentralProcessor\0")
feature_bits = winreg.QueryValueEx(key, "FeatureSet")[0]
winreg.CloseKey(key)
return feature_bits
def _program_paths(program_name):
paths = []
exts = filter(None, os.environ.get('PATHEXT', '').split(os.pathsep))
path = os.environ['PATH']
for p in os.environ['PATH'].split(os.pathsep):
p = os.path.join(p, program_name)
if os.access(p, os.X_OK):
paths.append(p)
for e in exts:
pext = p + e
if os.access(pext, os.X_OK):
paths.append(pext)
return paths
def _run_and_get_stdout(command, pipe_command=None):
from subprocess import Popen, PIPE
if not pipe_command:
p1 = Popen(command, stdout=PIPE, stderr=PIPE, stdin=PIPE)
output = p1.communicate()[0]
if not IS_PY2:
output = output.decode(encoding='UTF-8')
return p1.returncode, output
else:
p1 = Popen(command, stdout=PIPE, stderr=PIPE, stdin=PIPE)
p2 = Popen(pipe_command, stdin=p1.stdout, stdout=PIPE, stderr=PIPE)
p1.stdout.close()
output = p2.communicate()[0]
if not IS_PY2:
output = output.decode(encoding='UTF-8')
return p2.returncode, output
# Make sure we are running on a supported system
def _check_arch():
arch, bits = _parse_arch(DataSource.arch_string_raw)
if not arch in ['X86_32', 'X86_64', 'ARM_7', 'ARM_8', 'PPC_64']:
raise Exception("py-cpuinfo currently only works on X86 and some PPC and ARM CPUs.")
def _obj_to_b64(thing):
import pickle
import base64
a = thing
b = pickle.dumps(a)
c = base64.b64encode(b)
d = c.decode('utf8')
return d
def _b64_to_obj(thing):
import pickle
import base64
try:
a = base64.b64decode(thing)
b = pickle.loads(a)
return b
except:
return {}
def _utf_to_str(input):
if IS_PY2 and isinstance(input, unicode):
return input.encode('utf-8')
elif isinstance(input, list):
return [_utf_to_str(element) for element in input]
elif isinstance(input, dict):
return {_utf_to_str(key): _utf_to_str(value)
for key, value in input.items()}
else:
return input
def _copy_new_fields(info, new_info):
keys = [
'vendor_id_raw', 'hardware_raw', 'brand_raw', 'hz_advertised_friendly', 'hz_actual_friendly',
'hz_advertised', 'hz_actual', 'arch', 'bits', 'count',
'arch_string_raw', 'uname_string_raw',
'l2_cache_size', 'l2_cache_line_size', 'l2_cache_associativity',
'stepping', 'model', 'family',
'processor_type', 'extended_model', 'extended_family', 'flags',
'l3_cache_size', 'l1_data_cache_size', 'l1_instruction_cache_size'
]
for key in keys:
if new_info.get(key, None) and not info.get(key, None):
info[key] = new_info[key]
elif key == 'flags' and new_info.get('flags'):
for f in new_info['flags']:
if f not in info['flags']: info['flags'].append(f)
info['flags'].sort()
def _get_field_actual(cant_be_number, raw_string, field_names):
for line in raw_string.splitlines():
for field_name in field_names:
field_name = field_name.lower()
if ':' in line:
left, right = line.split(':', 1)
left = left.strip().lower()
right = right.strip()
if left == field_name and len(right) > 0:
if cant_be_number:
if not right.isdigit():
return right
else:
return right
return None
def _get_field(cant_be_number, raw_string, convert_to, default_value, *field_names):
retval = _get_field_actual(cant_be_number, raw_string, field_names)
# Convert the return value
if retval and convert_to:
try:
retval = convert_to(retval)
except:
retval = default_value
# Return the default if there is no return value
if retval is None:
retval = default_value
return retval
def _to_decimal_string(ticks):
try:
# Convert to string
ticks = '{0}'.format(ticks)
# Strip off non numbers and decimal places
ticks = "".join(n for n in ticks if n.isdigit() or n=='.').strip()
if ticks == '':
ticks = '0'
# Add decimal if missing
if '.' not in ticks:
ticks = '{0}.0'.format(ticks)
# Remove trailing zeros
ticks = ticks.rstrip('0')
# Add one trailing zero for empty right side
if ticks.endswith('.'):
ticks = '{0}0'.format(ticks)
# Make sure the number can be converted to a float
ticks = float(ticks)
ticks = '{0}'.format(ticks)
return ticks
except:
return '0.0'
def _hz_short_to_full(ticks, scale):
try:
# Make sure the number can be converted to a float
ticks = float(ticks)
ticks = '{0}'.format(ticks)
# Scale the numbers
hz = ticks.lstrip('0')
old_index = hz.index('.')
hz = hz.replace('.', '')
hz = hz.ljust(scale + old_index+1, '0')
new_index = old_index + scale
hz = '{0}.{1}'.format(hz[:new_index], hz[new_index:])
left, right = hz.split('.')
left, right = int(left), int(right)
return (left, right)
except:
return (0, 0)
def _hz_friendly_to_full(hz_string):
try:
hz_string = hz_string.strip().lower()
hz, scale = (None, None)
if hz_string.endswith('ghz'):
scale = 9
elif hz_string.endswith('mhz'):
scale = 6
elif hz_string.endswith('hz'):
scale = 0
hz = "".join(n for n in hz_string if n.isdigit() or n=='.').strip()
if not '.' in hz:
hz += '.0'
hz, scale = _hz_short_to_full(hz, scale)
return (hz, scale)
except:
return (0, 0)
def _hz_short_to_friendly(ticks, scale):
try:
# Get the raw Hz as a string
left, right = _hz_short_to_full(ticks, scale)
result = '{0}.{1}'.format(left, right)
# Get the location of the dot, and remove said dot
dot_index = result.index('.')
result = result.replace('.', '')
# Get the Hz symbol and scale
symbol = "Hz"
scale = 0
if dot_index > 9:
symbol = "GHz"
scale = 9
elif dot_index > 6:
symbol = "MHz"
scale = 6
elif dot_index > 3:
symbol = "KHz"
scale = 3
# Get the Hz with the dot at the new scaled point
result = '{0}.{1}'.format(result[:-scale-1], result[-scale-1:])
# Format the ticks to have 4 numbers after the decimal
# and remove any superfluous zeroes.
result = '{0:.4f} {1}'.format(float(result), symbol)
result = result.rstrip('0')
return result
except:
return '0.0000 Hz'
def _to_friendly_bytes(input):
import re
if not input:
return input
input = "{0}".format(input)
formats = {
r"^[0-9]+B$" : 'B',
r"^[0-9]+K$" : 'KB',
r"^[0-9]+M$" : 'MB',
r"^[0-9]+G$" : 'GB'
}
for pattern, friendly_size in formats.items():
if re.match(pattern, input):
return "{0} {1}".format(input[ : -1].strip(), friendly_size)
return input
def _parse_cpu_brand_string(cpu_string):
# Just return 0 if the processor brand does not have the Hz
if not 'hz' in cpu_string.lower():
return ('0.0', 0)
hz = cpu_string.lower()
scale = 0
if hz.endswith('mhz'):
scale = 6
elif hz.endswith('ghz'):
scale = 9
if '@' in hz:
hz = hz.split('@')[1]
else:
hz = hz.rsplit(None, 1)[1]
hz = hz.rstrip('mhz').rstrip('ghz').strip()
hz = _to_decimal_string(hz)
return (hz, scale)
def _parse_cpu_brand_string_dx(cpu_string):
import re
# Find all the strings inside brackets ()
starts = [m.start() for m in re.finditer('\(', cpu_string)]
ends = [m.start() for m in re.finditer('\)', cpu_string)]
insides = {k: v for k, v in zip(starts, ends)}
insides = [cpu_string[start+1 : end] for start, end in insides.items()]
# Find all the fields
vendor_id, stepping, model, family = (None, None, None, None)
for inside in insides:
for pair in inside.split(','):
pair = [n.strip() for n in pair.split(':')]
if len(pair) > 1:
name, value = pair[0], pair[1]
if name == 'origin':
vendor_id = value.strip('"')
elif name == 'stepping':
stepping = int(value.lstrip('0x'), 16)
elif name == 'model':
model = int(value.lstrip('0x'), 16)
elif name in ['fam', 'family']:
family = int(value.lstrip('0x'), 16)
# Find the Processor Brand
# Strip off extra strings in brackets at end
brand = cpu_string.strip()
is_working = True
while is_working:
is_working = False
for inside in insides:
full = "({0})".format(inside)
if brand.endswith(full):
brand = brand[ :-len(full)].strip()
is_working = True
# Find the Hz in the brand string
hz_brand, scale = _parse_cpu_brand_string(brand)
# Find Hz inside brackets () after the brand string
if hz_brand == '0.0':
for inside in insides:
hz = inside
for entry in ['GHz', 'MHz', 'Hz']:
if entry in hz:
hz = "CPU @ " + hz[ : hz.find(entry) + len(entry)]
hz_brand, scale = _parse_cpu_brand_string(hz)
break
return (hz_brand, scale, brand, vendor_id, stepping, model, family)
def _parse_dmesg_output(output):
try:
# Get all the dmesg lines that might contain a CPU string
lines = output.split(' CPU0:')[1:] + \
output.split(' CPU1:')[1:] + \
output.split(' CPU:')[1:] + \
output.split('\nCPU0:')[1:] + \
output.split('\nCPU1:')[1:] + \
output.split('\nCPU:')[1:]
lines = [l.split('\n')[0].strip() for l in lines]
# Convert the lines to CPU strings
cpu_strings = [_parse_cpu_brand_string_dx(l) for l in lines]
# Find the CPU string that has the most fields
best_string = None
highest_count = 0
for cpu_string in cpu_strings:
count = sum([n is not None for n in cpu_string])
if count > highest_count:
highest_count = count
best_string = cpu_string
# If no CPU string was found, return {}
if not best_string:
return {}
hz_actual, scale, processor_brand, vendor_id, stepping, model, family = best_string
# Origin
if ' Origin=' in output:
fields = output[output.find(' Origin=') : ].split('\n')[0]
fields = fields.strip().split()
fields = [n.strip().split('=') for n in fields]
fields = [{n[0].strip().lower() : n[1].strip()} for n in fields]
for field in fields:
name = list(field.keys())[0]
value = list(field.values())[0]
if name == 'origin':
vendor_id = value.strip('"')
elif name == 'stepping':
stepping = int(value.lstrip('0x'), 16)
elif name == 'model':
model = int(value.lstrip('0x'), 16)
elif name in ['fam', 'family']:
family = int(value.lstrip('0x'), 16)
# Features
flag_lines = []
for category in [' Features=', ' Features2=', ' AMD Features=', ' AMD Features2=']:
if category in output:
flag_lines.append(output.split(category)[1].split('\n')[0])
flags = []
for line in flag_lines:
line = line.split('<')[1].split('>')[0].lower()
for flag in line.split(','):
flags.append(flag)
flags.sort()
# Convert from GHz/MHz string to Hz
hz_advertised, scale = _parse_cpu_brand_string(processor_brand)
# If advertised hz not found, use the actual hz
if hz_advertised == '0.0':
scale = 6
hz_advertised = _to_decimal_string(hz_actual)
info = {
'vendor_id_raw' : vendor_id,
'brand_raw' : processor_brand,
'stepping' : stepping,
'model' : model,
'family' : family,
'flags' : flags
}
if hz_advertised and hz_advertised != '0.0':
info['hz_advertised_friendly'] = _hz_short_to_friendly(hz_advertised, scale)
info['hz_actual_friendly'] = _hz_short_to_friendly(hz_actual, scale)
if hz_advertised and hz_advertised != '0.0':
info['hz_advertised'] = _hz_short_to_full(hz_advertised, scale)
info['hz_actual'] = _hz_short_to_full(hz_actual, scale)
return {k: v for k, v in info.items() if v}
except:
#raise
pass
return {}
def _parse_arch(arch_string_raw):
import re
arch, bits = None, None
arch_string_raw = arch_string_raw.lower()
# X86
if re.match('^i\d86$|^x86$|^x86_32$|^i86pc$|^ia32$|^ia-32$|^bepc$', arch_string_raw):
arch = 'X86_32'
bits = 32
elif re.match('^x64$|^x86_64$|^x86_64t$|^i686-64$|^amd64$|^ia64$|^ia-64$', arch_string_raw):
arch = 'X86_64'
bits = 64
# ARM
elif re.match('^armv8-a|aarch64$', arch_string_raw):
arch = 'ARM_8'
bits = 64
elif re.match('^armv7$|^armv7[a-z]$|^armv7-[a-z]$|^armv6[a-z]$', arch_string_raw):
arch = 'ARM_7'
bits = 32
elif re.match('^armv8$|^armv8[a-z]$|^armv8-[a-z]$', arch_string_raw):
arch = 'ARM_8'
bits = 32
# PPC
elif re.match('^ppc32$|^prep$|^pmac$|^powermac$', arch_string_raw):
arch = 'PPC_32'
bits = 32
elif re.match('^powerpc$|^ppc64$|^ppc64le$', arch_string_raw):
arch = 'PPC_64'
bits = 64
# SPARC
elif re.match('^sparc32$|^sparc$', arch_string_raw):
arch = 'SPARC_32'
bits = 32
elif re.match('^sparc64$|^sun4u$|^sun4v$', arch_string_raw):
arch = 'SPARC_64'
bits = 64
return (arch, bits)
def _is_bit_set(reg, bit):
mask = 1 << bit
is_set = reg & mask > 0
return is_set
def _is_selinux_enforcing():
# Just return if the SE Linux Status Tool is not installed
if not DataSource.has_sestatus():
return False
# Run the sestatus, and just return if it failed to run
returncode, output = DataSource.sestatus_b()
if returncode != 0:
return False
# Figure out if explicitly in enforcing mode
for line in output.splitlines():
line = line.strip().lower()
if line.startswith("current mode:"):
if line.endswith("enforcing"):
return True
else:
return False
# Figure out if we can execute heap and execute memory
can_selinux_exec_heap = False
can_selinux_exec_memory = False
for line in output.splitlines():
line = line.strip().lower()
if line.startswith("allow_execheap") and line.endswith("on"):
can_selinux_exec_heap = True
elif line.startswith("allow_execmem") and line.endswith("on"):
can_selinux_exec_memory = True
return (not can_selinux_exec_heap or not can_selinux_exec_memory)
class CPUID(object):
def __init__(self):
self.prochandle = None
# Figure out if SE Linux is on and in enforcing mode
self.is_selinux_enforcing = _is_selinux_enforcing()
def _asm_func(self, restype=None, argtypes=(), byte_code=[]):
byte_code = bytes.join(b'', byte_code)
address = None
if DataSource.is_windows:
# Allocate a memory segment the size of the byte code, and make it executable
size = len(byte_code)
# Alloc at least 1 page to ensure we own all pages that we want to change protection on
if size < 0x1000: size = 0x1000
MEM_COMMIT = ctypes.c_ulong(0x1000)
PAGE_READWRITE = ctypes.c_ulong(0x4)
pfnVirtualAlloc = ctypes.windll.kernel32.VirtualAlloc
pfnVirtualAlloc.restype = ctypes.c_void_p
address = pfnVirtualAlloc(None, ctypes.c_size_t(size), MEM_COMMIT, PAGE_READWRITE)
if not address:
raise Exception("Failed to VirtualAlloc")
# Copy the byte code into the memory segment
memmove = ctypes.CFUNCTYPE(ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_size_t)(ctypes._memmove_addr)
if memmove(address, byte_code, size) < 0:
raise Exception("Failed to memmove")
# Enable execute permissions
PAGE_EXECUTE = ctypes.c_ulong(0x10)
old_protect = ctypes.c_ulong(0)
pfnVirtualProtect = ctypes.windll.kernel32.VirtualProtect
res = pfnVirtualProtect(ctypes.c_void_p(address), ctypes.c_size_t(size), PAGE_EXECUTE, ctypes.byref(old_protect))
if not res:
raise Exception("Failed VirtualProtect")
# Flush Instruction Cache
# First, get process Handle
if not self.prochandle:
pfnGetCurrentProcess = ctypes.windll.kernel32.GetCurrentProcess
pfnGetCurrentProcess.restype = ctypes.c_void_p
self.prochandle = ctypes.c_void_p(pfnGetCurrentProcess())
# Actually flush cache
res = ctypes.windll.kernel32.FlushInstructionCache(self.prochandle, ctypes.c_void_p(address), ctypes.c_size_t(size))
if not res:
raise Exception("Failed FlushInstructionCache")
else:
# Allocate a memory segment the size of the byte code
size = len(byte_code)
pfnvalloc = ctypes.pythonapi.valloc
pfnvalloc.restype = ctypes.c_void_p
address = pfnvalloc(ctypes.c_size_t(size))
if not address:
raise Exception("Failed to valloc")
# Mark the memory segment as writeable only
if not self.is_selinux_enforcing:
WRITE = 0x2
if ctypes.pythonapi.mprotect(ctypes.c_void_p(address), size, WRITE) < 0:
raise Exception("Failed to mprotect")
# Copy the byte code into the memory segment
if ctypes.pythonapi.memmove(ctypes.c_void_p(address), byte_code, ctypes.c_size_t(size)) < 0:
raise Exception("Failed to memmove")
# Mark the memory segment as writeable and executable only
if not self.is_selinux_enforcing:
WRITE_EXECUTE = 0x2 | 0x4
if ctypes.pythonapi.mprotect(ctypes.c_void_p(address), size, WRITE_EXECUTE) < 0:
raise Exception("Failed to mprotect")
# Cast the memory segment into a function
functype = ctypes.CFUNCTYPE(restype, *argtypes)
fun = functype(address)
return fun, address
def _run_asm(self, *byte_code):
# Convert the byte code into a function that returns an int
restype = ctypes.c_uint32
argtypes = ()
func, address = self._asm_func(restype, argtypes, byte_code)
# Call the byte code like a function
retval = func()
byte_code = bytes.join(b'', byte_code)
size = ctypes.c_size_t(len(byte_code))
# Free the function memory segment
if DataSource.is_windows:
MEM_RELEASE = ctypes.c_ulong(0x8000)
ctypes.windll.kernel32.VirtualFree(ctypes.c_void_p(address), ctypes.c_size_t(0), MEM_RELEASE)
else:
# Remove the executable tag on the memory
READ_WRITE = 0x1 | 0x2
if ctypes.pythonapi.mprotect(ctypes.c_void_p(address), size, READ_WRITE) < 0:
raise Exception("Failed to mprotect")
ctypes.pythonapi.free(ctypes.c_void_p(address))
return retval
# FIXME: We should not have to use different instructions to
# set eax to 0 or 1, on 32bit and 64bit machines.
def _zero_eax(self):
return (
b"\x31\xC0" # xor eax,eax
)
def _zero_ecx(self):
return (
b"\x31\xC9" # xor ecx,ecx
)
def _one_eax(self):
return (
b"\xB8\x01\x00\x00\x00" # mov eax,0x1"
)
# http://en.wikipedia.org/wiki/CPUID#EAX.3D0:_Get_vendor_ID
def get_vendor_id(self):
# EBX
ebx = self._run_asm(
self._zero_eax(),
b"\x0F\xA2" # cpuid
b"\x89\xD8" # mov ax,bx
b"\xC3" # ret
)
# ECX
ecx = self._run_asm(
self._zero_eax(),
b"\x0f\xa2" # cpuid
b"\x89\xC8" # mov ax,cx
b"\xC3" # ret
)
# EDX
edx = self._run_asm(
self._zero_eax(),
b"\x0f\xa2" # cpuid
b"\x89\xD0" # mov ax,dx
b"\xC3" # ret
)
# Each 4bits is a ascii letter in the name
vendor_id = []
for reg in [ebx, edx, ecx]:
for n in [0, 8, 16, 24]:
vendor_id.append(chr((reg >> n) & 0xFF))
vendor_id = ''.join(vendor_id)
return vendor_id
# http://en.wikipedia.org/wiki/CPUID#EAX.3D1:_Processor_Info_and_Feature_Bits
def get_info(self):
# EAX
eax = self._run_asm(
self._one_eax(),
b"\x0f\xa2" # cpuid
b"\xC3" # ret
)
# Get the CPU info
stepping = (eax >> 0) & 0xF # 4 bits
model = (eax >> 4) & 0xF # 4 bits
family = (eax >> 8) & 0xF # 4 bits
processor_type = (eax >> 12) & 0x3 # 2 bits
extended_model = (eax >> 16) & 0xF # 4 bits
extended_family = (eax >> 20) & 0xFF # 8 bits
return {
'stepping' : stepping,
'model' : model,
'family' : family,
'processor_type' : processor_type,
'extended_model' : extended_model,
'extended_family' : extended_family
}
# http://en.wikipedia.org/wiki/CPUID#EAX.3D80000000h:_Get_Highest_Extended_Function_Supported
def get_max_extension_support(self):
# Check for extension support
max_extension_support = self._run_asm(
b"\xB8\x00\x00\x00\x80" # mov ax,0x80000000
b"\x0f\xa2" # cpuid
b"\xC3" # ret
)
return max_extension_support
# http://en.wikipedia.org/wiki/CPUID#EAX.3D1:_Processor_Info_and_Feature_Bits
def get_flags(self, max_extension_support):
# EDX
edx = self._run_asm(
self._one_eax(),
b"\x0f\xa2" # cpuid
b"\x89\xD0" # mov ax,dx
b"\xC3" # ret
)
# ECX
ecx = self._run_asm(
self._one_eax(),
b"\x0f\xa2" # cpuid
b"\x89\xC8" # mov ax,cx
b"\xC3" # ret
)
# Get the CPU flags
flags = {
'fpu' : _is_bit_set(edx, 0),
'vme' : _is_bit_set(edx, 1),
'de' : _is_bit_set(edx, 2),
'pse' : _is_bit_set(edx, 3),
'tsc' : _is_bit_set(edx, 4),
'msr' : _is_bit_set(edx, 5),
'pae' : _is_bit_set(edx, 6),
'mce' : _is_bit_set(edx, 7),
'cx8' : _is_bit_set(edx, 8),
'apic' : _is_bit_set(edx, 9),
#'reserved1' : _is_bit_set(edx, 10),
'sep' : _is_bit_set(edx, 11),
'mtrr' : _is_bit_set(edx, 12),
'pge' : _is_bit_set(edx, 13),
'mca' : _is_bit_set(edx, 14),
'cmov' : _is_bit_set(edx, 15),
'pat' : _is_bit_set(edx, 16),
'pse36' : _is_bit_set(edx, 17),
'pn' : _is_bit_set(edx, 18),
'clflush' : _is_bit_set(edx, 19),
#'reserved2' : _is_bit_set(edx, 20),
'dts' : _is_bit_set(edx, 21),
'acpi' : _is_bit_set(edx, 22),
'mmx' : _is_bit_set(edx, 23),
'fxsr' : _is_bit_set(edx, 24),
'sse' : _is_bit_set(edx, 25),
'sse2' : _is_bit_set(edx, 26),
'ss' : _is_bit_set(edx, 27),
'ht' : _is_bit_set(edx, 28),
'tm' : _is_bit_set(edx, 29),
'ia64' : _is_bit_set(edx, 30),
'pbe' : _is_bit_set(edx, 31),
'pni' : _is_bit_set(ecx, 0),
'pclmulqdq' : _is_bit_set(ecx, 1),
'dtes64' : _is_bit_set(ecx, 2),
'monitor' : _is_bit_set(ecx, 3),
'ds_cpl' : _is_bit_set(ecx, 4),
'vmx' : _is_bit_set(ecx, 5),
'smx' : _is_bit_set(ecx, 6),
'est' : _is_bit_set(ecx, 7),
'tm2' : _is_bit_set(ecx, 8),
'ssse3' : _is_bit_set(ecx, 9),
'cid' : _is_bit_set(ecx, 10),
#'reserved3' : _is_bit_set(ecx, 11),
'fma' : _is_bit_set(ecx, 12),
'cx16' : _is_bit_set(ecx, 13),
'xtpr' : _is_bit_set(ecx, 14),
'pdcm' : _is_bit_set(ecx, 15),
#'reserved4' : _is_bit_set(ecx, 16),
'pcid' : _is_bit_set(ecx, 17),
'dca' : _is_bit_set(ecx, 18),
'sse4_1' : _is_bit_set(ecx, 19),
'sse4_2' : _is_bit_set(ecx, 20),
'x2apic' : _is_bit_set(ecx, 21),
'movbe' : _is_bit_set(ecx, 22),
'popcnt' : _is_bit_set(ecx, 23),
'tscdeadline' : _is_bit_set(ecx, 24),
'aes' : _is_bit_set(ecx, 25),
'xsave' : _is_bit_set(ecx, 26),
'osxsave' : _is_bit_set(ecx, 27),
'avx' : _is_bit_set(ecx, 28),
'f16c' : _is_bit_set(ecx, 29),
'rdrnd' : _is_bit_set(ecx, 30),
'hypervisor' : _is_bit_set(ecx, 31)
}
# Get a list of only the flags that are true
flags = [k for k, v in flags.items() if v]
# http://en.wikipedia.org/wiki/CPUID#EAX.3D7.2C_ECX.3D0:_Extended_Features
if max_extension_support >= 7:
# EBX
ebx = self._run_asm(
self._zero_ecx(),
b"\xB8\x07\x00\x00\x00" # mov eax,7
b"\x0f\xa2" # cpuid
b"\x89\xD8" # mov ax,bx
b"\xC3" # ret
)
# ECX
ecx = self._run_asm(
self._zero_ecx(),
b"\xB8\x07\x00\x00\x00" # mov eax,7
b"\x0f\xa2" # cpuid
b"\x89\xC8" # mov ax,cx
b"\xC3" # ret
)
# Get the extended CPU flags
extended_flags = {
#'fsgsbase' : _is_bit_set(ebx, 0),
#'IA32_TSC_ADJUST' : _is_bit_set(ebx, 1),
'sgx' : _is_bit_set(ebx, 2),
'bmi1' : _is_bit_set(ebx, 3),
'hle' : _is_bit_set(ebx, 4),
'avx2' : _is_bit_set(ebx, 5),
#'reserved' : _is_bit_set(ebx, 6),
'smep' : _is_bit_set(ebx, 7),
'bmi2' : _is_bit_set(ebx, 8),
'erms' : _is_bit_set(ebx, 9),
'invpcid' : _is_bit_set(ebx, 10),
'rtm' : _is_bit_set(ebx, 11),
'pqm' : _is_bit_set(ebx, 12),
#'FPU CS and FPU DS deprecated' : _is_bit_set(ebx, 13),
'mpx' : _is_bit_set(ebx, 14),
'pqe' : _is_bit_set(ebx, 15),
'avx512f' : _is_bit_set(ebx, 16),
'avx512dq' : _is_bit_set(ebx, 17),
'rdseed' : _is_bit_set(ebx, 18),
'adx' : _is_bit_set(ebx, 19),
'smap' : _is_bit_set(ebx, 20),
'avx512ifma' : _is_bit_set(ebx, 21),
'pcommit' : _is_bit_set(ebx, 22),
'clflushopt' : _is_bit_set(ebx, 23),
'clwb' : _is_bit_set(ebx, 24),
'intel_pt' : _is_bit_set(ebx, 25),
'avx512pf' : _is_bit_set(ebx, 26),
'avx512er' : _is_bit_set(ebx, 27),
'avx512cd' : _is_bit_set(ebx, 28),
'sha' : _is_bit_set(ebx, 29),
'avx512bw' : _is_bit_set(ebx, 30),
'avx512vl' : _is_bit_set(ebx, 31),
'prefetchwt1' : _is_bit_set(ecx, 0),
'avx512vbmi' : _is_bit_set(ecx, 1),
'umip' : _is_bit_set(ecx, 2),
'pku' : _is_bit_set(ecx, 3),
'ospke' : _is_bit_set(ecx, 4),
#'reserved' : _is_bit_set(ecx, 5),
'avx512vbmi2' : _is_bit_set(ecx, 6),
#'reserved' : _is_bit_set(ecx, 7),
'gfni' : _is_bit_set(ecx, 8),
'vaes' : _is_bit_set(ecx, 9),
'vpclmulqdq' : _is_bit_set(ecx, 10),
'avx512vnni' : _is_bit_set(ecx, 11),
'avx512bitalg' : _is_bit_set(ecx, 12),
#'reserved' : _is_bit_set(ecx, 13),
'avx512vpopcntdq' : _is_bit_set(ecx, 14),
#'reserved' : _is_bit_set(ecx, 15),
#'reserved' : _is_bit_set(ecx, 16),
#'mpx0' : _is_bit_set(ecx, 17),
#'mpx1' : _is_bit_set(ecx, 18),
#'mpx2' : _is_bit_set(ecx, 19),
#'mpx3' : _is_bit_set(ecx, 20),
#'mpx4' : _is_bit_set(ecx, 21),
'rdpid' : _is_bit_set(ecx, 22),
#'reserved' : _is_bit_set(ecx, 23),
#'reserved' : _is_bit_set(ecx, 24),
#'reserved' : _is_bit_set(ecx, 25),
#'reserved' : _is_bit_set(ecx, 26),
#'reserved' : _is_bit_set(ecx, 27),
#'reserved' : _is_bit_set(ecx, 28),
#'reserved' : _is_bit_set(ecx, 29),
'sgx_lc' : _is_bit_set(ecx, 30),
#'reserved' : _is_bit_set(ecx, 31)
}
# Get a list of only the flags that are true
extended_flags = [k for k, v in extended_flags.items() if v]
flags += extended_flags
# http://en.wikipedia.org/wiki/CPUID#EAX.3D80000001h:_Extended_Processor_Info_and_Feature_Bits
if max_extension_support >= 0x80000001:
# EBX
ebx = self._run_asm(
b"\xB8\x01\x00\x00\x80" # mov ax,0x80000001
b"\x0f\xa2" # cpuid
b"\x89\xD8" # mov ax,bx
b"\xC3" # ret
)
# ECX
ecx = self._run_asm(
b"\xB8\x01\x00\x00\x80" # mov ax,0x80000001
b"\x0f\xa2" # cpuid
b"\x89\xC8" # mov ax,cx
b"\xC3" # ret
)
# Get the extended CPU flags
extended_flags = {
'fpu' : _is_bit_set(ebx, 0),
'vme' : _is_bit_set(ebx, 1),
'de' : _is_bit_set(ebx, 2),
'pse' : _is_bit_set(ebx, 3),
'tsc' : _is_bit_set(ebx, 4),
'msr' : _is_bit_set(ebx, 5),
'pae' : _is_bit_set(ebx, 6),
'mce' : _is_bit_set(ebx, 7),
'cx8' : _is_bit_set(ebx, 8),
'apic' : _is_bit_set(ebx, 9),
#'reserved' : _is_bit_set(ebx, 10),
'syscall' : _is_bit_set(ebx, 11),
'mtrr' : _is_bit_set(ebx, 12),
'pge' : _is_bit_set(ebx, 13),
'mca' : _is_bit_set(ebx, 14),
'cmov' : _is_bit_set(ebx, 15),
'pat' : _is_bit_set(ebx, 16),
'pse36' : _is_bit_set(ebx, 17),
#'reserved' : _is_bit_set(ebx, 18),
'mp' : _is_bit_set(ebx, 19),
'nx' : _is_bit_set(ebx, 20),
#'reserved' : _is_bit_set(ebx, 21),
'mmxext' : _is_bit_set(ebx, 22),
'mmx' : _is_bit_set(ebx, 23),
'fxsr' : _is_bit_set(ebx, 24),
'fxsr_opt' : _is_bit_set(ebx, 25),
'pdpe1gp' : _is_bit_set(ebx, 26),
'rdtscp' : _is_bit_set(ebx, 27),
#'reserved' : _is_bit_set(ebx, 28),
'lm' : _is_bit_set(ebx, 29),
'3dnowext' : _is_bit_set(ebx, 30),
'3dnow' : _is_bit_set(ebx, 31),
'lahf_lm' : _is_bit_set(ecx, 0),
'cmp_legacy' : _is_bit_set(ecx, 1),
'svm' : _is_bit_set(ecx, 2),
'extapic' : _is_bit_set(ecx, 3),
'cr8_legacy' : _is_bit_set(ecx, 4),
'abm' : _is_bit_set(ecx, 5),
'sse4a' : _is_bit_set(ecx, 6),
'misalignsse' : _is_bit_set(ecx, 7),
'3dnowprefetch' : _is_bit_set(ecx, 8),
'osvw' : _is_bit_set(ecx, 9),
'ibs' : _is_bit_set(ecx, 10),
'xop' : _is_bit_set(ecx, 11),
'skinit' : _is_bit_set(ecx, 12),
'wdt' : _is_bit_set(ecx, 13),
#'reserved' : _is_bit_set(ecx, 14),
'lwp' : _is_bit_set(ecx, 15),
'fma4' : _is_bit_set(ecx, 16),
'tce' : _is_bit_set(ecx, 17),
#'reserved' : _is_bit_set(ecx, 18),
'nodeid_msr' : _is_bit_set(ecx, 19),
#'reserved' : _is_bit_set(ecx, 20),
'tbm' : _is_bit_set(ecx, 21),
'topoext' : _is_bit_set(ecx, 22),
'perfctr_core' : _is_bit_set(ecx, 23),
'perfctr_nb' : _is_bit_set(ecx, 24),
#'reserved' : _is_bit_set(ecx, 25),
'dbx' : _is_bit_set(ecx, 26),
'perftsc' : _is_bit_set(ecx, 27),
'pci_l2i' : _is_bit_set(ecx, 28),
#'reserved' : _is_bit_set(ecx, 29),
#'reserved' : _is_bit_set(ecx, 30),
#'reserved' : _is_bit_set(ecx, 31)
}
# Get a list of only the flags that are true
extended_flags = [k for k, v in extended_flags.items() if v]
flags += extended_flags
flags.sort()
return flags
# http://en.wikipedia.org/wiki/CPUID#EAX.3D80000002h.2C80000003h.2C80000004h:_Processor_Brand_String
def get_processor_brand(self, max_extension_support):
processor_brand = ""
# Processor brand string
if max_extension_support >= 0x80000004:
instructions = [
b"\xB8\x02\x00\x00\x80", # mov ax,0x80000002
b"\xB8\x03\x00\x00\x80", # mov ax,0x80000003
b"\xB8\x04\x00\x00\x80" # mov ax,0x80000004
]
for instruction in instructions:
# EAX
eax = self._run_asm(
instruction, # mov ax,0x8000000?
b"\x0f\xa2" # cpuid
b"\x89\xC0" # mov ax,ax
b"\xC3" # ret
)
# EBX
ebx = self._run_asm(
instruction, # mov ax,0x8000000?
b"\x0f\xa2" # cpuid
b"\x89\xD8" # mov ax,bx
b"\xC3" # ret
)
# ECX
ecx = self._run_asm(
instruction, # mov ax,0x8000000?
b"\x0f\xa2" # cpuid
b"\x89\xC8" # mov ax,cx
b"\xC3" # ret
)
# EDX
edx = self._run_asm(
instruction, # mov ax,0x8000000?
b"\x0f\xa2" # cpuid
b"\x89\xD0" # mov ax,dx
b"\xC3" # ret
)
# Combine each of the 4 bytes in each register into the string
for reg in [eax, ebx, ecx, edx]:
for n in [0, 8, 16, 24]:
processor_brand += chr((reg >> n) & 0xFF)
# Strip off any trailing NULL terminators and white space
processor_brand = processor_brand.strip("\0").strip()
return processor_brand
# http://en.wikipedia.org/wiki/CPUID#EAX.3D80000006h:_Extended_L2_Cache_Features
def get_cache(self, max_extension_support):
cache_info = {}
# Just return if the cache feature is not supported
if max_extension_support < 0x80000006:
return cache_info
# ECX
ecx = self._run_asm(
b"\xB8\x06\x00\x00\x80" # mov ax,0x80000006
b"\x0f\xa2" # cpuid
b"\x89\xC8" # mov ax,cx
b"\xC3" # ret
)
cache_info = {
'size_kb' : ecx & 0xFF,
'line_size_b' : (ecx >> 12) & 0xF,
'associativity' : (ecx >> 16) & 0xFFFF
}
return cache_info
def get_ticks(self):
retval = None
if DataSource.bits == '32bit':
# Works on x86_32
restype = None
argtypes = (ctypes.POINTER(ctypes.c_uint), ctypes.POINTER(ctypes.c_uint))
get_ticks_x86_32, address = self._asm_func(restype, argtypes,
[
b"\x55", # push bp
b"\x89\xE5", # mov bp,sp
b"\x31\xC0", # xor ax,ax
b"\x0F\xA2", # cpuid
b"\x0F\x31", # rdtsc
b"\x8B\x5D\x08", # mov bx,[di+0x8]
b"\x8B\x4D\x0C", # mov cx,[di+0xc]
b"\x89\x13", # mov [bp+di],dx
b"\x89\x01", # mov [bx+di],ax
b"\x5D", # pop bp
b"\xC3" # ret
]
)
high = ctypes.c_uint32(0)
low = ctypes.c_uint32(0)
get_ticks_x86_32(ctypes.byref(high), ctypes.byref(low))
retval = ((high.value << 32) & 0xFFFFFFFF00000000) | low.value
elif DataSource.bits == '64bit':
# Works on x86_64
restype = ctypes.c_uint64
argtypes = ()
get_ticks_x86_64, address = self._asm_func(restype, argtypes,
[
b"\x48", # dec ax
b"\x31\xC0", # xor ax,ax
b"\x0F\xA2", # cpuid
b"\x0F\x31", # rdtsc
b"\x48", # dec ax
b"\xC1\xE2\x20", # shl dx,byte 0x20
b"\x48", # dec ax
b"\x09\xD0", # or ax,dx
b"\xC3", # ret
]
)
retval = get_ticks_x86_64()
return retval
def get_raw_hz(self):
import time
start = self.get_ticks()
time.sleep(1)
end = self.get_ticks()
ticks = (end - start)
return ticks
def _actual_get_cpu_info_from_cpuid(queue):
'''
Warning! This function has the potential to crash the Python runtime.
Do not call it directly. Use the _get_cpu_info_from_cpuid function instead.
It will safely call this function in another process.
'''
# Pipe all output to nothing
sys.stdout = open(os.devnull, 'w')
sys.stderr = open(os.devnull, 'w')
# Get the CPU arch and bits
arch, bits = _parse_arch(DataSource.arch_string_raw)
# Return none if this is not an X86 CPU
if not arch in ['X86_32', 'X86_64']:
queue.put(_obj_to_b64({}))
return
# Return none if SE Linux is in enforcing mode
cpuid = CPUID()
if cpuid.is_selinux_enforcing:
queue.put(_obj_to_b64({}))
return
# Get the cpu info from the CPUID register
max_extension_support = cpuid.get_max_extension_support()
cache_info = cpuid.get_cache(max_extension_support)
info = cpuid.get_info()
processor_brand = cpuid.get_processor_brand(max_extension_support)
# Get the Hz and scale
hz_actual = cpuid.get_raw_hz()
hz_actual = _to_decimal_string(hz_actual)
# Get the Hz and scale
hz_advertised, scale = _parse_cpu_brand_string(processor_brand)
info = {
'vendor_id_raw' : cpuid.get_vendor_id(),
'hardware_raw' : '',
'brand_raw' : processor_brand,
'hz_advertised_friendly' : _hz_short_to_friendly(hz_advertised, scale),
'hz_actual_friendly' : _hz_short_to_friendly(hz_actual, 0),
'hz_advertised' : _hz_short_to_full(hz_advertised, scale),
'hz_actual' : _hz_short_to_full(hz_actual, 0),
'l2_cache_size' : _to_friendly_bytes(cache_info['size_kb']),
'l2_cache_line_size' : cache_info['line_size_b'],
'l2_cache_associativity' : hex(cache_info['associativity']),
'stepping' : info['stepping'],
'model' : info['model'],
'family' : info['family'],
'processor_type' : info['processor_type'],
'extended_model' : info['extended_model'],
'extended_family' : info['extended_family'],
'flags' : cpuid.get_flags(max_extension_support)
}
info = {k: v for k, v in info.items() if v}
queue.put(_obj_to_b64(info))
def _get_cpu_info_from_proc_cpuinfo():
'''
Returns the CPU info gathered from /proc/cpuinfo.
Returns {} if /proc/cpuinfo is not found.
'''
try:
# Just return {} if there is no cpuinfo
if not DataSource.has_proc_cpuinfo():
return {}
returncode, output = DataSource.cat_proc_cpuinfo()
if returncode != 0:
return {}
# Various fields
vendor_id = _get_field(False, output, None, '', 'vendor_id', 'vendor id', 'vendor')
processor_brand = _get_field(True, output, None, None, 'model name','cpu', 'processor')
cache_size = _get_field(False, output, None, '', 'cache size')
stepping = _get_field(False, output, int, 0, 'stepping')
model = _get_field(False, output, int, 0, 'model')
family = _get_field(False, output, int, 0, 'cpu family')
hardware = _get_field(False, output, None, '', 'Hardware')
# Flags
flags = _get_field(False, output, None, None, 'flags', 'Features')
if flags:
flags = flags.split()
flags.sort()
# Convert from MHz string to Hz
hz_actual = _get_field(False, output, None, '', 'cpu MHz', 'cpu speed', 'clock')
hz_actual = hz_actual.lower().rstrip('mhz').strip()
hz_actual = _to_decimal_string(hz_actual)
# Convert from GHz/MHz string to Hz
hz_advertised, scale = (None, 0)
try:
hz_advertised, scale = _parse_cpu_brand_string(processor_brand)
except Exception:
pass
info = {
'hardware_raw' : hardware,
'brand_raw' : processor_brand,
'l3_cache_size' : _to_friendly_bytes(cache_size),
'flags' : flags,
'vendor_id_raw' : vendor_id,
'stepping' : stepping,
'model' : model,
'family' : family,
}
# Make the Hz the same for actual and advertised if missing any
if not hz_advertised or hz_advertised == '0.0':
hz_advertised = hz_actual
scale = 6
elif not hz_actual or hz_actual == '0.0':
hz_actual = hz_advertised
# Add the Hz if there is one
if _hz_short_to_full(hz_advertised, scale) > (0, 0):
info['hz_advertised_friendly'] = _hz_short_to_friendly(hz_advertised, scale)
info['hz_advertised'] = _hz_short_to_full(hz_advertised, scale)
if _hz_short_to_full(hz_actual, scale) > (0, 0):
info['hz_actual_friendly'] = _hz_short_to_friendly(hz_actual, 6)
info['hz_actual'] = _hz_short_to_full(hz_actual, 6)
info = {k: v for k, v in info.items() if v}
return info
except:
#raise # NOTE: To have this throw on error, uncomment this line
return {}
def _get_cpu_info_from_cpufreq_info():
'''
Returns the CPU info gathered from cpufreq-info.
Returns {} if cpufreq-info is not found.
'''
try:
hz_brand, scale = '0.0', 0
if not DataSource.has_cpufreq_info():
return {}
returncode, output = DataSource.cpufreq_info()
if returncode != 0:
return {}
hz_brand = output.split('current CPU frequency is')[1].split('\n')[0]
i = hz_brand.find('Hz')
assert(i != -1)
hz_brand = hz_brand[0 : i+2].strip().lower()
if hz_brand.endswith('mhz'):
scale = 6
elif hz_brand.endswith('ghz'):
scale = 9
hz_brand = hz_brand.rstrip('mhz').rstrip('ghz').strip()
hz_brand = _to_decimal_string(hz_brand)
info = {
'hz_advertised_friendly' : _hz_short_to_friendly(hz_brand, scale),
'hz_actual_friendly' : _hz_short_to_friendly(hz_brand, scale),
'hz_advertised' : _hz_short_to_full(hz_brand, scale),
'hz_actual' : _hz_short_to_full(hz_brand, scale),
}
info = {k: v for k, v in info.items() if v}
return info
except:
#raise # NOTE: To have this throw on error, uncomment this line
return {}
def _get_cpu_info_from_lscpu():
'''
Returns the CPU info gathered from lscpu.
Returns {} if lscpu is not found.
'''
try:
if not DataSource.has_lscpu():
return {}
returncode, output = DataSource.lscpu()
if returncode != 0:
return {}
info = {}
new_hz = _get_field(False, output, None, None, 'CPU max MHz', 'CPU MHz')
if new_hz:
new_hz = _to_decimal_string(new_hz)
scale = 6
info['hz_advertised_friendly'] = _hz_short_to_friendly(new_hz, scale)
info['hz_actual_friendly'] = _hz_short_to_friendly(new_hz, scale)
info['hz_advertised'] = _hz_short_to_full(new_hz, scale)
info['hz_actual'] = _hz_short_to_full(new_hz, scale)
vendor_id = _get_field(False, output, None, None, 'Vendor ID')
if vendor_id:
info['vendor_id_raw'] = vendor_id
brand = _get_field(False, output, None, None, 'Model name')
if brand:
info['brand_raw'] = brand
family = _get_field(False, output, None, None, 'CPU family')
if family and family.isdigit():
info['family'] = int(family)
stepping = _get_field(False, output, None, None, 'Stepping')
if stepping and stepping.isdigit():
info['stepping'] = int(stepping)
model = _get_field(False, output, None, None, 'Model')
if model and model.isdigit():
info['model'] = int(model)
l1_data_cache_size = _get_field(False, output, None, None, 'L1d cache')
if l1_data_cache_size:
info['l1_data_cache_size'] = _to_friendly_bytes(l1_data_cache_size)
l1_instruction_cache_size = _get_field(False, output, None, None, 'L1i cache')
if l1_instruction_cache_size:
info['l1_instruction_cache_size'] = _to_friendly_bytes(l1_instruction_cache_size)
l2_cache_size = _get_field(False, output, None, None, 'L2 cache')
if l2_cache_size:
info['l2_cache_size'] = _to_friendly_bytes(l2_cache_size)
l3_cache_size = _get_field(False, output, None, None, 'L3 cache')
if l3_cache_size:
info['l3_cache_size'] = _to_friendly_bytes(l3_cache_size)
# Flags
flags = _get_field(False, output, None, None, 'flags', 'Features')
if flags:
flags = flags.split()
flags.sort()
info['flags'] = flags
info = {k: v for k, v in info.items() if v}
return info
except:
#raise # NOTE: To have this throw on error, uncomment this line
return {}
def _get_cpu_info_from_dmesg():
'''
Returns the CPU info gathered from dmesg.
Returns {} if dmesg is not found or does not have the desired info.
'''
# Just return {} if there is no dmesg
if not DataSource.has_dmesg():
return {}
# If dmesg fails return {}
returncode, output = DataSource.dmesg_a()
if output == None or returncode != 0:
return {}
return _parse_dmesg_output(output)
# https://openpowerfoundation.org/wp-content/uploads/2016/05/LoPAPR_DRAFT_v11_24March2016_cmt1.pdf
# page 767
def _get_cpu_info_from_ibm_pa_features():
'''
Returns the CPU info gathered from lsprop /proc/device-tree/cpus/*/ibm,pa-features
Returns {} if lsprop is not found or ibm,pa-features does not have the desired info.
'''
try:
# Just return {} if there is no lsprop
if not DataSource.has_ibm_pa_features():
return {}
# If ibm,pa-features fails return {}
returncode, output = DataSource.ibm_pa_features()
if output == None or returncode != 0:
return {}
# Filter out invalid characters from output
value = output.split("ibm,pa-features")[1].lower()
value = [s for s in value if s in list('0123456789abcfed')]
value = ''.join(value)
# Get data converted to Uint32 chunks
left = int(value[0 : 8], 16)
right = int(value[8 : 16], 16)
# Get the CPU flags
flags = {
# Byte 0
'mmu' : _is_bit_set(left, 0),
'fpu' : _is_bit_set(left, 1),
'slb' : _is_bit_set(left, 2),
'run' : _is_bit_set(left, 3),
#'reserved' : _is_bit_set(left, 4),
'dabr' : _is_bit_set(left, 5),
'ne' : _is_bit_set(left, 6),
'wtr' : _is_bit_set(left, 7),
# Byte 1
'mcr' : _is_bit_set(left, 8),
'dsisr' : _is_bit_set(left, 9),
'lp' : _is_bit_set(left, 10),
'ri' : _is_bit_set(left, 11),
'dabrx' : _is_bit_set(left, 12),
'sprg3' : _is_bit_set(left, 13),
'rislb' : _is_bit_set(left, 14),
'pp' : _is_bit_set(left, 15),
# Byte 2
'vpm' : _is_bit_set(left, 16),
'dss_2.05' : _is_bit_set(left, 17),
#'reserved' : _is_bit_set(left, 18),
'dar' : _is_bit_set(left, 19),
#'reserved' : _is_bit_set(left, 20),
'ppr' : _is_bit_set(left, 21),
'dss_2.02' : _is_bit_set(left, 22),
'dss_2.06' : _is_bit_set(left, 23),
# Byte 3
'lsd_in_dscr' : _is_bit_set(left, 24),
'ugr_in_dscr' : _is_bit_set(left, 25),
#'reserved' : _is_bit_set(left, 26),
#'reserved' : _is_bit_set(left, 27),
#'reserved' : _is_bit_set(left, 28),
#'reserved' : _is_bit_set(left, 29),
#'reserved' : _is_bit_set(left, 30),
#'reserved' : _is_bit_set(left, 31),
# Byte 4
'sso_2.06' : _is_bit_set(right, 0),
#'reserved' : _is_bit_set(right, 1),
#'reserved' : _is_bit_set(right, 2),
#'reserved' : _is_bit_set(right, 3),
#'reserved' : _is_bit_set(right, 4),
#'reserved' : _is_bit_set(right, 5),
#'reserved' : _is_bit_set(right, 6),
#'reserved' : _is_bit_set(right, 7),
# Byte 5
'le' : _is_bit_set(right, 8),
'cfar' : _is_bit_set(right, 9),
'eb' : _is_bit_set(right, 10),
'lsq_2.07' : _is_bit_set(right, 11),
#'reserved' : _is_bit_set(right, 12),
#'reserved' : _is_bit_set(right, 13),
#'reserved' : _is_bit_set(right, 14),
#'reserved' : _is_bit_set(right, 15),
# Byte 6
'dss_2.07' : _is_bit_set(right, 16),
#'reserved' : _is_bit_set(right, 17),
#'reserved' : _is_bit_set(right, 18),
#'reserved' : _is_bit_set(right, 19),
#'reserved' : _is_bit_set(right, 20),
#'reserved' : _is_bit_set(right, 21),
#'reserved' : _is_bit_set(right, 22),
#'reserved' : _is_bit_set(right, 23),
# Byte 7
#'reserved' : _is_bit_set(right, 24),
#'reserved' : _is_bit_set(right, 25),
#'reserved' : _is_bit_set(right, 26),
#'reserved' : _is_bit_set(right, 27),
#'reserved' : _is_bit_set(right, 28),
#'reserved' : _is_bit_set(right, 29),
#'reserved' : _is_bit_set(right, 30),
#'reserved' : _is_bit_set(right, 31),
}
# Get a list of only the flags that are true
flags = [k for k, v in flags.items() if v]
flags.sort()
info = {
'flags' : flags
}
info = {k: v for k, v in info.items() if v}
return info
except:
return {}
def _get_cpu_info_from_cat_var_run_dmesg_boot():
'''
Returns the CPU info gathered from /var/run/dmesg.boot.
Returns {} if dmesg is not found or does not have the desired info.
'''
# Just return {} if there is no /var/run/dmesg.boot
if not DataSource.has_var_run_dmesg_boot():
return {}
# If dmesg.boot fails return {}
returncode, output = DataSource.cat_var_run_dmesg_boot()
if output == None or returncode != 0:
return {}
return _parse_dmesg_output(output)
def _get_cpu_info_from_sysctl():
'''
Returns the CPU info gathered from sysctl.
Returns {} if sysctl is not found.
'''
try:
# Just return {} if there is no sysctl
if not DataSource.has_sysctl():
return {}
# If sysctl fails return {}
returncode, output = DataSource.sysctl_machdep_cpu_hw_cpufrequency()
if output == None or returncode != 0:
return {}
# Various fields
vendor_id = _get_field(False, output, None, None, 'machdep.cpu.vendor')
processor_brand = _get_field(True, output, None, None, 'machdep.cpu.brand_string')
cache_size = _get_field(False, output, None, None, 'machdep.cpu.cache.size')
stepping = _get_field(False, output, int, 0, 'machdep.cpu.stepping')
model = _get_field(False, output, int, 0, 'machdep.cpu.model')
family = _get_field(False, output, int, 0, 'machdep.cpu.family')
# Flags
flags = _get_field(False, output, None, '', 'machdep.cpu.features').lower().split()
flags.extend(_get_field(False, output, None, '', 'machdep.cpu.leaf7_features').lower().split())
flags.extend(_get_field(False, output, None, '', 'machdep.cpu.extfeatures').lower().split())
flags.sort()
# Convert from GHz/MHz string to Hz
hz_advertised, scale = _parse_cpu_brand_string(processor_brand)
hz_actual = _get_field(False, output, None, None, 'hw.cpufrequency')
hz_actual = _to_decimal_string(hz_actual)
info = {
'vendor_id_raw' : vendor_id,
'brand_raw' : processor_brand,
'hz_advertised_friendly' : _hz_short_to_friendly(hz_advertised, scale),
'hz_actual_friendly' : _hz_short_to_friendly(hz_actual, 0),
'hz_advertised' : _hz_short_to_full(hz_advertised, scale),
'hz_actual' : _hz_short_to_full(hz_actual, 0),
'l2_cache_size' : _to_friendly_bytes(cache_size),
'stepping' : stepping,
'model' : model,
'family' : family,
'flags' : flags
}
info = {k: v for k, v in info.items() if v}
return info
except:
return {}
def _get_cpu_info_from_sysinfo():
'''
Returns the CPU info gathered from sysinfo.
Returns {} if sysinfo is not found.
'''
info = _get_cpu_info_from_sysinfo_v1()
info.update(_get_cpu_info_from_sysinfo_v2())
return info
def _get_cpu_info_from_sysinfo_v1():
'''
Returns the CPU info gathered from sysinfo.
Returns {} if sysinfo is not found.
'''
try:
# Just return {} if there is no sysinfo
if not DataSource.has_sysinfo():
return {}
# If sysinfo fails return {}
returncode, output = DataSource.sysinfo_cpu()
if output == None or returncode != 0:
return {}
# Various fields
vendor_id = '' #_get_field(False, output, None, None, 'CPU #0: ')
processor_brand = output.split('CPU #0: "')[1].split('"\n')[0].strip()
cache_size = '' #_get_field(False, output, None, None, 'machdep.cpu.cache.size')
stepping = int(output.split(', stepping ')[1].split(',')[0].strip())
model = int(output.split(', model ')[1].split(',')[0].strip())
family = int(output.split(', family ')[1].split(',')[0].strip())
# Flags
flags = []
for line in output.split('\n'):
if line.startswith('\t\t'):
for flag in line.strip().lower().split():
flags.append(flag)
flags.sort()
# Convert from GHz/MHz string to Hz
hz_advertised, scale = _parse_cpu_brand_string(processor_brand)
hz_actual = hz_advertised
info = {
'vendor_id_raw' : vendor_id,
'brand_raw' : processor_brand,
'hz_advertised_friendly' : _hz_short_to_friendly(hz_advertised, scale),
'hz_actual_friendly' : _hz_short_to_friendly(hz_actual, scale),
'hz_advertised' : _hz_short_to_full(hz_advertised, scale),
'hz_actual' : _hz_short_to_full(hz_actual, scale),
'l2_cache_size' : _to_friendly_bytes(cache_size),
'stepping' : stepping,
'model' : model,
'family' : family,
'flags' : flags
}
info = {k: v for k, v in info.items() if v}
return info
except:
#raise # NOTE: To have this throw on error, uncomment this line
return {}
def _get_cpu_info_from_sysinfo_v2():
'''
Returns the CPU info gathered from sysinfo.
Returns {} if sysinfo is not found.
'''
try:
# Just return {} if there is no sysinfo
if not DataSource.has_sysinfo():
return {}
# If sysinfo fails return {}
returncode, output = DataSource.sysinfo_cpu()
if output == None or returncode != 0:
return {}
# Various fields
vendor_id = '' #_get_field(False, output, None, None, 'CPU #0: ')
processor_brand = output.split('CPU #0: "')[1].split('"\n')[0].strip()
cache_size = '' #_get_field(False, output, None, None, 'machdep.cpu.cache.size')
signature = output.split('Signature:')[1].split('\n')[0].strip()
#
stepping = int(signature.split('stepping ')[1].split(',')[0].strip())
model = int(signature.split('model ')[1].split(',')[0].strip())
family = int(signature.split('family ')[1].split(',')[0].strip())
# Flags
def get_subsection_flags(output):
retval = []
for line in output.split('\n')[1:]:
if not line.startswith(' ') and not line.startswith(' '): break
for entry in line.strip().lower().split(' '):
retval.append(entry)
return retval
flags = get_subsection_flags(output.split('Features: ')[1]) + \
get_subsection_flags(output.split('Extended Features (0x00000001): ')[1]) + \
get_subsection_flags(output.split('Extended Features (0x80000001): ')[1])
flags.sort()
# Convert from GHz/MHz string to Hz
lines = [n for n in output.split('\n') if n]
raw_hz = lines[0].split('running at ')[1].strip().lower()
hz_advertised = raw_hz.rstrip('mhz').rstrip('ghz').strip()
hz_advertised = _to_decimal_string(hz_advertised)
hz_actual = hz_advertised
scale = 0
if raw_hz.endswith('mhz'):
scale = 6
elif raw_hz.endswith('ghz'):
scale = 9
info = {
'vendor_id_raw' : vendor_id,
'brand_raw' : processor_brand,
'hz_advertised_friendly' : _hz_short_to_friendly(hz_advertised, scale),
'hz_actual_friendly' : _hz_short_to_friendly(hz_actual, scale),
'hz_advertised' : _hz_short_to_full(hz_advertised, scale),
'hz_actual' : _hz_short_to_full(hz_actual, scale),
'l2_cache_size' : _to_friendly_bytes(cache_size),
'stepping' : stepping,
'model' : model,
'family' : family,
'flags' : flags
}
info = {k: v for k, v in info.items() if v}
return info
except:
#raise # NOTE: To have this throw on error, uncomment this line
return {}
def _get_cpu_info_from_wmic():
'''
Returns the CPU info gathered from WMI.
Returns {} if not on Windows, or wmic is not installed.
'''
try:
# Just return {} if not Windows or there is no wmic
if not DataSource.is_windows or not DataSource.has_wmic():
return {}
returncode, output = DataSource.wmic_cpu()
if output == None or returncode != 0:
return {}
# Break the list into key values pairs
value = output.split("\n")
value = [s.rstrip().split('=') for s in value if '=' in s]
value = {k: v for k, v in value if v}
# Get the advertised MHz
processor_brand = value.get('Name')
hz_advertised, scale_advertised = _parse_cpu_brand_string(processor_brand)
# Get the actual MHz
hz_actual = value.get('CurrentClockSpeed')
scale_actual = 6
if hz_actual:
hz_actual = _to_decimal_string(hz_actual)
# Get cache sizes
l2_cache_size = value.get('L2CacheSize')
if l2_cache_size:
l2_cache_size = l2_cache_size + ' KB'
l3_cache_size = value.get('L3CacheSize')
if l3_cache_size:
l3_cache_size = l3_cache_size + ' KB'
# Get family, model, and stepping
family, model, stepping = '', '', ''
description = value.get('Description') or value.get('Caption')
entries = description.split(' ')
if 'Family' in entries and entries.index('Family') < len(entries)-1:
i = entries.index('Family')
family = int(entries[i + 1])
if 'Model' in entries and entries.index('Model') < len(entries)-1:
i = entries.index('Model')
model = int(entries[i + 1])
if 'Stepping' in entries and entries.index('Stepping') < len(entries)-1:
i = entries.index('Stepping')
stepping = int(entries[i + 1])
info = {
'vendor_id_raw' : value.get('Manufacturer'),
'brand_raw' : processor_brand,
'hz_advertised_friendly' : _hz_short_to_friendly(hz_advertised, scale_advertised),
'hz_actual_friendly' : _hz_short_to_friendly(hz_actual, scale_actual),
'hz_advertised' : _hz_short_to_full(hz_advertised, scale_advertised),
'hz_actual' : _hz_short_to_full(hz_actual, scale_actual),
'l2_cache_size' : l2_cache_size,
'l3_cache_size' : l3_cache_size,
'stepping' : stepping,
'model' : model,
'family' : family,
}
info = {k: v for k, v in info.items() if v}
return info
except:
#raise # NOTE: To have this throw on error, uncomment this line
return {}
def _get_cpu_info_from_registry():
'''
FIXME: Is missing many of the newer CPU flags like sse3
Returns the CPU info gathered from the Windows Registry.
Returns {} if not on Windows.
'''
try:
# Just return {} if not on Windows
if not DataSource.is_windows:
return {}
# Get the CPU name
processor_brand = DataSource.winreg_processor_brand().strip()
# Get the CPU vendor id
vendor_id = DataSource.winreg_vendor_id_raw()
# Get the CPU arch and bits
arch_string_raw = DataSource.winreg_arch_string_raw()
arch, bits = _parse_arch(arch_string_raw)
# Get the actual CPU Hz
hz_actual = DataSource.winreg_hz_actual()
hz_actual = _to_decimal_string(hz_actual)
# Get the advertised CPU Hz
hz_advertised, scale = _parse_cpu_brand_string(processor_brand)
# If advertised hz not found, use the actual hz
if hz_advertised == '0.0':
scale = 6
hz_advertised = _to_decimal_string(hz_actual)
# Get the CPU features
feature_bits = DataSource.winreg_feature_bits()
def is_set(bit):
mask = 0x80000000 >> bit
retval = mask & feature_bits > 0
return retval
# http://en.wikipedia.org/wiki/CPUID
# http://unix.stackexchange.com/questions/43539/what-do-the-flags-in-proc-cpuinfo-mean
# http://www.lohninger.com/helpcsuite/public_constants_cpuid.htm
flags = {
'fpu' : is_set(0), # Floating Point Unit
'vme' : is_set(1), # V86 Mode Extensions
'de' : is_set(2), # Debug Extensions - I/O breakpoints supported
'pse' : is_set(3), # Page Size Extensions (4 MB pages supported)
'tsc' : is_set(4), # Time Stamp Counter and RDTSC instruction are available
'msr' : is_set(5), # Model Specific Registers
'pae' : is_set(6), # Physical Address Extensions (36 bit address, 2MB pages)
'mce' : is_set(7), # Machine Check Exception supported
'cx8' : is_set(8), # Compare Exchange Eight Byte instruction available
'apic' : is_set(9), # Local APIC present (multiprocessor operation support)
'sepamd' : is_set(10), # Fast system calls (AMD only)
'sep' : is_set(11), # Fast system calls
'mtrr' : is_set(12), # Memory Type Range Registers
'pge' : is_set(13), # Page Global Enable
'mca' : is_set(14), # Machine Check Architecture
'cmov' : is_set(15), # Conditional MOVe instructions
'pat' : is_set(16), # Page Attribute Table
'pse36' : is_set(17), # 36 bit Page Size Extensions
'serial' : is_set(18), # Processor Serial Number
'clflush' : is_set(19), # Cache Flush
#'reserved1' : is_set(20), # reserved
'dts' : is_set(21), # Debug Trace Store
'acpi' : is_set(22), # ACPI support
'mmx' : is_set(23), # MultiMedia Extensions
'fxsr' : is_set(24), # FXSAVE and FXRSTOR instructions
'sse' : is_set(25), # SSE instructions
'sse2' : is_set(26), # SSE2 (WNI) instructions
'ss' : is_set(27), # self snoop
#'reserved2' : is_set(28), # reserved
'tm' : is_set(29), # Automatic clock control
'ia64' : is_set(30), # IA64 instructions
'3dnow' : is_set(31) # 3DNow! instructions available
}
# Get a list of only the flags that are true
flags = [k for k, v in flags.items() if v]
flags.sort()
info = {
'vendor_id_raw' : vendor_id,
'brand_raw' : processor_brand,
'hz_advertised_friendly' : _hz_short_to_friendly(hz_advertised, scale),
'hz_actual_friendly' : _hz_short_to_friendly(hz_actual, 6),
'hz_advertised' : _hz_short_to_full(hz_advertised, scale),
'hz_actual' : _hz_short_to_full(hz_actual, 6),
'flags' : flags
}
info = {k: v for k, v in info.items() if v}
return info
except:
return {}
def _get_cpu_info_from_kstat():
'''
Returns the CPU info gathered from isainfo and kstat.
Returns {} if isainfo or kstat are not found.
'''
try:
# Just return {} if there is no isainfo or kstat
if not DataSource.has_isainfo() or not DataSource.has_kstat():
return {}
# If isainfo fails return {}
returncode, flag_output = DataSource.isainfo_vb()
if flag_output == None or returncode != 0:
return {}
# If kstat fails return {}
returncode, kstat = DataSource.kstat_m_cpu_info()
if kstat == None or returncode != 0:
return {}
# Various fields
vendor_id = kstat.split('\tvendor_id ')[1].split('\n')[0].strip()
processor_brand = kstat.split('\tbrand ')[1].split('\n')[0].strip()
stepping = int(kstat.split('\tstepping ')[1].split('\n')[0].strip())
model = int(kstat.split('\tmodel ')[1].split('\n')[0].strip())
family = int(kstat.split('\tfamily ')[1].split('\n')[0].strip())
# Flags
flags = flag_output.strip().split('\n')[-1].strip().lower().split()
flags.sort()
# Convert from GHz/MHz string to Hz
scale = 6
hz_advertised = kstat.split('\tclock_MHz ')[1].split('\n')[0].strip()
hz_advertised = _to_decimal_string(hz_advertised)
# Convert from GHz/MHz string to Hz
hz_actual = kstat.split('\tcurrent_clock_Hz ')[1].split('\n')[0].strip()
hz_actual = _to_decimal_string(hz_actual)
info = {
'vendor_id_raw' : vendor_id,
'brand_raw' : processor_brand,
'hz_advertised_friendly' : _hz_short_to_friendly(hz_advertised, scale),
'hz_actual_friendly' : _hz_short_to_friendly(hz_actual, 0),
'hz_advertised' : _hz_short_to_full(hz_advertised, scale),
'hz_actual' : _hz_short_to_full(hz_actual, 0),
'stepping' : stepping,
'model' : model,
'family' : family,
'flags' : flags
}
info = {k: v for k, v in info.items() if v}
return info
except:
return {}
def _get_cpu_info_from_platform_uname():
try:
uname = DataSource.uname_string_raw.split(',')[0]
family, model, stepping = (None, None, None)
entries = uname.split(' ')
if 'Family' in entries and entries.index('Family') < len(entries)-1:
i = entries.index('Family')
family = int(entries[i + 1])
if 'Model' in entries and entries.index('Model') < len(entries)-1:
i = entries.index('Model')
model = int(entries[i + 1])
if 'Stepping' in entries and entries.index('Stepping') < len(entries)-1:
i = entries.index('Stepping')
stepping = int(entries[i + 1])
info = {
'family' : family,
'model' : model,
'stepping' : stepping
}
info = {k: v for k, v in info.items() if v}
return info
except:
return {}
def _get_cpu_info_internal():
'''
Returns the CPU info by using the best sources of information for your OS.
Returns {} if nothing is found.
'''
# Get the CPU arch and bits
arch, bits = _parse_arch(DataSource.arch_string_raw)
friendly_maxsize = { 2**31-1: '32 bit', 2**63-1: '64 bit' }.get(sys.maxsize) or 'unknown bits'
friendly_version = "{0}.{1}.{2}.{3}.{4}".format(*sys.version_info)
PYTHON_VERSION = "{0} ({1})".format(friendly_version, friendly_maxsize)
info = {
'python_version' : PYTHON_VERSION,
'cpuinfo_version' : CPUINFO_VERSION,
'cpuinfo_version_string' : CPUINFO_VERSION_STRING,
'arch' : arch,
'bits' : bits,
'count' : DataSource.cpu_count,
'arch_string_raw' : DataSource.arch_string_raw,
}
# Try the Windows wmic
_copy_new_fields(info, _get_cpu_info_from_wmic())
# Try the Windows registry
_copy_new_fields(info, _get_cpu_info_from_registry())
# Try /proc/cpuinfo
_copy_new_fields(info, _get_cpu_info_from_proc_cpuinfo())
# Try cpufreq-info
_copy_new_fields(info, _get_cpu_info_from_cpufreq_info())
# Try LSCPU
_copy_new_fields(info, _get_cpu_info_from_lscpu())
# Try sysctl
_copy_new_fields(info, _get_cpu_info_from_sysctl())
# Try kstat
_copy_new_fields(info, _get_cpu_info_from_kstat())
# Try dmesg
_copy_new_fields(info, _get_cpu_info_from_dmesg())
# Try /var/run/dmesg.boot
_copy_new_fields(info, _get_cpu_info_from_cat_var_run_dmesg_boot())
# Try lsprop ibm,pa-features
_copy_new_fields(info, _get_cpu_info_from_ibm_pa_features())
# Try sysinfo
_copy_new_fields(info, _get_cpu_info_from_sysinfo())
# Try querying the CPU cpuid register
_copy_new_fields(info, _get_cpu_info_from_cpuid())
# Try platform.uname
_copy_new_fields(info, _get_cpu_info_from_platform_uname())
return info
def get_cpu_info_json():
'''
Returns the CPU info by using the best sources of information for your OS.
Returns the result in a json string
'''
import json
output = None
# If running under pyinstaller, run normally
if getattr(sys, 'frozen', False):
info = _get_cpu_info_internal()
output = json.dumps(info)
output = "{0}".format(output)
# if not running under pyinstaller, run in another process.
# This is done because multiprocesing has a design flaw that
# causes non main programs to run multiple times on Windows.
else:
from subprocess import Popen, PIPE
command = [sys.executable, __file__, '--json']
p1 = Popen(command, stdout=PIPE, stderr=PIPE, stdin=PIPE)
output = p1.communicate()[0]
if p1.returncode != 0:
return "{}"
if not IS_PY2:
output = output.decode(encoding='UTF-8')
return output
def get_cpu_info():
'''
Returns the CPU info by using the best sources of information for your OS.
Returns the result in a dict
'''
import json
output = get_cpu_info_json()
# Convert JSON to Python with non unicode strings
output = json.loads(output, object_hook = _utf_to_str)
return output
def main():
from argparse import ArgumentParser
import json
# Parse args
parser = ArgumentParser(description='Gets CPU info with pure Python 2 & 3')
parser.add_argument('--json', action='store_true', help='Return the info in JSON format')
parser.add_argument('--version', action='store_true', help='Return the version of py-cpuinfo')
args = parser.parse_args()
try:
_check_arch()
except Exception as err:
sys.stderr.write(str(err) + "\n")
sys.exit(1)
info = _get_cpu_info_internal()
if not info:
sys.stderr.write("Failed to find cpu info\n")
sys.exit(1)
if args.json:
print(json.dumps(info))
elif args.version:
print(CPUINFO_VERSION_STRING)
else:
print('Python Version: {0}'.format(info.get('python_version', '')))
print('Cpuinfo Version: {0}'.format(info.get('cpuinfo_version_string', '')))
print('Vendor ID Raw: {0}'.format(info.get('vendor_id_raw', '')))
print('Hardware Raw: {0}'.format(info.get('hardware_raw', '')))
print('Brand Raw: {0}'.format(info.get('brand_raw', '')))
print('Hz Advertised Friendly: {0}'.format(info.get('hz_advertised_friendly', '')))
print('Hz Actual Friendly: {0}'.format(info.get('hz_actual_friendly', '')))
print('Hz Advertised: {0}'.format(info.get('hz_advertised', '')))
print('Hz Actual: {0}'.format(info.get('hz_actual', '')))
print('Arch: {0}'.format(info.get('arch', '')))
print('Bits: {0}'.format(info.get('bits', '')))
print('Count: {0}'.format(info.get('count', '')))
print('Arch String Raw: {0}'.format(info.get('arch_string_raw', '')))
print('L1 Data Cache Size: {0}'.format(info.get('l1_data_cache_size', '')))
print('L1 Instruction Cache Size: {0}'.format(info.get('l1_instruction_cache_size', '')))
print('L2 Cache Size: {0}'.format(info.get('l2_cache_size', '')))
print('L2 Cache Line Size: {0}'.format(info.get('l2_cache_line_size', '')))
print('L2 Cache Associativity: {0}'.format(info.get('l2_cache_associativity', '')))
print('L3 Cache Size: {0}'.format(info.get('l3_cache_size', '')))
print('Stepping: {0}'.format(info.get('stepping', '')))
print('Model: {0}'.format(info.get('model', '')))
print('Family: {0}'.format(info.get('family', '')))
print('Processor Type: {0}'.format(info.get('processor_type', '')))
print('Extended Model: {0}'.format(info.get('extended_model', '')))
print('Extended Family: {0}'.format(info.get('extended_family', '')))
print('Flags: {0}'.format(', '.join(info.get('flags', ''))))
if __name__ == '__main__':
main()
else:
_check_arch()
|
workhorsy/py-cpuinfo
|
cpuinfo/cpuinfo.py
|
_get_cpu_info_from_proc_cpuinfo
|
python
|
def _get_cpu_info_from_proc_cpuinfo():
'''
Returns the CPU info gathered from /proc/cpuinfo.
Returns {} if /proc/cpuinfo is not found.
'''
try:
# Just return {} if there is no cpuinfo
if not DataSource.has_proc_cpuinfo():
return {}
returncode, output = DataSource.cat_proc_cpuinfo()
if returncode != 0:
return {}
# Various fields
vendor_id = _get_field(False, output, None, '', 'vendor_id', 'vendor id', 'vendor')
processor_brand = _get_field(True, output, None, None, 'model name','cpu', 'processor')
cache_size = _get_field(False, output, None, '', 'cache size')
stepping = _get_field(False, output, int, 0, 'stepping')
model = _get_field(False, output, int, 0, 'model')
family = _get_field(False, output, int, 0, 'cpu family')
hardware = _get_field(False, output, None, '', 'Hardware')
# Flags
flags = _get_field(False, output, None, None, 'flags', 'Features')
if flags:
flags = flags.split()
flags.sort()
# Convert from MHz string to Hz
hz_actual = _get_field(False, output, None, '', 'cpu MHz', 'cpu speed', 'clock')
hz_actual = hz_actual.lower().rstrip('mhz').strip()
hz_actual = _to_decimal_string(hz_actual)
# Convert from GHz/MHz string to Hz
hz_advertised, scale = (None, 0)
try:
hz_advertised, scale = _parse_cpu_brand_string(processor_brand)
except Exception:
pass
info = {
'hardware_raw' : hardware,
'brand_raw' : processor_brand,
'l3_cache_size' : _to_friendly_bytes(cache_size),
'flags' : flags,
'vendor_id_raw' : vendor_id,
'stepping' : stepping,
'model' : model,
'family' : family,
}
# Make the Hz the same for actual and advertised if missing any
if not hz_advertised or hz_advertised == '0.0':
hz_advertised = hz_actual
scale = 6
elif not hz_actual or hz_actual == '0.0':
hz_actual = hz_advertised
# Add the Hz if there is one
if _hz_short_to_full(hz_advertised, scale) > (0, 0):
info['hz_advertised_friendly'] = _hz_short_to_friendly(hz_advertised, scale)
info['hz_advertised'] = _hz_short_to_full(hz_advertised, scale)
if _hz_short_to_full(hz_actual, scale) > (0, 0):
info['hz_actual_friendly'] = _hz_short_to_friendly(hz_actual, 6)
info['hz_actual'] = _hz_short_to_full(hz_actual, 6)
info = {k: v for k, v in info.items() if v}
return info
except:
#raise # NOTE: To have this throw on error, uncomment this line
return {}
|
Returns the CPU info gathered from /proc/cpuinfo.
Returns {} if /proc/cpuinfo is not found.
|
train
|
https://github.com/workhorsy/py-cpuinfo/blob/c15afb770c1139bf76215852e17eb4f677ca3d2f/cpuinfo/cpuinfo.py#L1401-L1472
|
[
"def has_proc_cpuinfo():\n\treturn os.path.exists('/proc/cpuinfo')\n"
] |
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
# Copyright (c) 2014-2019, Matthew Brennan Jones <matthew.brennan.jones@gmail.com>
# Py-cpuinfo gets CPU info with pure Python 2 & 3
# It uses the MIT License
# It is hosted at: https://github.com/workhorsy/py-cpuinfo
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
CPUINFO_VERSION = (5, 0, 0)
CPUINFO_VERSION_STRING = '.'.join([str(n) for n in CPUINFO_VERSION])
import os, sys
import platform
import multiprocessing
import ctypes
try:
import _winreg as winreg
except ImportError as err:
try:
import winreg
except ImportError as err:
pass
IS_PY2 = sys.version_info[0] == 2
class DataSource(object):
bits = platform.architecture()[0]
cpu_count = multiprocessing.cpu_count()
is_windows = platform.system().lower() == 'windows'
arch_string_raw = platform.machine()
uname_string_raw = platform.uname()[5]
can_cpuid = True
@staticmethod
def has_proc_cpuinfo():
return os.path.exists('/proc/cpuinfo')
@staticmethod
def has_dmesg():
return len(_program_paths('dmesg')) > 0
@staticmethod
def has_var_run_dmesg_boot():
uname = platform.system().strip().strip('"').strip("'").strip().lower()
return 'linux' in uname and os.path.exists('/var/run/dmesg.boot')
@staticmethod
def has_cpufreq_info():
return len(_program_paths('cpufreq-info')) > 0
@staticmethod
def has_sestatus():
return len(_program_paths('sestatus')) > 0
@staticmethod
def has_sysctl():
return len(_program_paths('sysctl')) > 0
@staticmethod
def has_isainfo():
return len(_program_paths('isainfo')) > 0
@staticmethod
def has_kstat():
return len(_program_paths('kstat')) > 0
@staticmethod
def has_sysinfo():
return len(_program_paths('sysinfo')) > 0
@staticmethod
def has_lscpu():
return len(_program_paths('lscpu')) > 0
@staticmethod
def has_ibm_pa_features():
return len(_program_paths('lsprop')) > 0
@staticmethod
def has_wmic():
returncode, output = _run_and_get_stdout(['wmic', 'os', 'get', 'Version'])
return returncode == 0 and len(output) > 0
@staticmethod
def cat_proc_cpuinfo():
return _run_and_get_stdout(['cat', '/proc/cpuinfo'])
@staticmethod
def cpufreq_info():
return _run_and_get_stdout(['cpufreq-info'])
@staticmethod
def sestatus_b():
return _run_and_get_stdout(['sestatus', '-b'])
@staticmethod
def dmesg_a():
return _run_and_get_stdout(['dmesg', '-a'])
@staticmethod
def cat_var_run_dmesg_boot():
return _run_and_get_stdout(['cat', '/var/run/dmesg.boot'])
@staticmethod
def sysctl_machdep_cpu_hw_cpufrequency():
return _run_and_get_stdout(['sysctl', 'machdep.cpu', 'hw.cpufrequency'])
@staticmethod
def isainfo_vb():
return _run_and_get_stdout(['isainfo', '-vb'])
@staticmethod
def kstat_m_cpu_info():
return _run_and_get_stdout(['kstat', '-m', 'cpu_info'])
@staticmethod
def sysinfo_cpu():
return _run_and_get_stdout(['sysinfo', '-cpu'])
@staticmethod
def lscpu():
return _run_and_get_stdout(['lscpu'])
@staticmethod
def ibm_pa_features():
import glob
ibm_features = glob.glob('/proc/device-tree/cpus/*/ibm,pa-features')
if ibm_features:
return _run_and_get_stdout(['lsprop', ibm_features[0]])
@staticmethod
def wmic_cpu():
return _run_and_get_stdout(['wmic', 'cpu', 'get', 'Name,CurrentClockSpeed,L2CacheSize,L3CacheSize,Description,Caption,Manufacturer', '/format:list'])
@staticmethod
def winreg_processor_brand():
key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, r"Hardware\Description\System\CentralProcessor\0")
processor_brand = winreg.QueryValueEx(key, "ProcessorNameString")[0]
winreg.CloseKey(key)
return processor_brand.strip()
@staticmethod
def winreg_vendor_id_raw():
key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, r"Hardware\Description\System\CentralProcessor\0")
vendor_id_raw = winreg.QueryValueEx(key, "VendorIdentifier")[0]
winreg.CloseKey(key)
return vendor_id_raw
@staticmethod
def winreg_arch_string_raw():
key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, r"SYSTEM\CurrentControlSet\Control\Session Manager\Environment")
arch_string_raw = winreg.QueryValueEx(key, "PROCESSOR_ARCHITECTURE")[0]
winreg.CloseKey(key)
return arch_string_raw
@staticmethod
def winreg_hz_actual():
key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, r"Hardware\Description\System\CentralProcessor\0")
hz_actual = winreg.QueryValueEx(key, "~Mhz")[0]
winreg.CloseKey(key)
hz_actual = _to_decimal_string(hz_actual)
return hz_actual
@staticmethod
def winreg_feature_bits():
key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, r"Hardware\Description\System\CentralProcessor\0")
feature_bits = winreg.QueryValueEx(key, "FeatureSet")[0]
winreg.CloseKey(key)
return feature_bits
def _program_paths(program_name):
paths = []
exts = filter(None, os.environ.get('PATHEXT', '').split(os.pathsep))
path = os.environ['PATH']
for p in os.environ['PATH'].split(os.pathsep):
p = os.path.join(p, program_name)
if os.access(p, os.X_OK):
paths.append(p)
for e in exts:
pext = p + e
if os.access(pext, os.X_OK):
paths.append(pext)
return paths
def _run_and_get_stdout(command, pipe_command=None):
from subprocess import Popen, PIPE
if not pipe_command:
p1 = Popen(command, stdout=PIPE, stderr=PIPE, stdin=PIPE)
output = p1.communicate()[0]
if not IS_PY2:
output = output.decode(encoding='UTF-8')
return p1.returncode, output
else:
p1 = Popen(command, stdout=PIPE, stderr=PIPE, stdin=PIPE)
p2 = Popen(pipe_command, stdin=p1.stdout, stdout=PIPE, stderr=PIPE)
p1.stdout.close()
output = p2.communicate()[0]
if not IS_PY2:
output = output.decode(encoding='UTF-8')
return p2.returncode, output
# Make sure we are running on a supported system
def _check_arch():
arch, bits = _parse_arch(DataSource.arch_string_raw)
if not arch in ['X86_32', 'X86_64', 'ARM_7', 'ARM_8', 'PPC_64']:
raise Exception("py-cpuinfo currently only works on X86 and some PPC and ARM CPUs.")
def _obj_to_b64(thing):
import pickle
import base64
a = thing
b = pickle.dumps(a)
c = base64.b64encode(b)
d = c.decode('utf8')
return d
def _b64_to_obj(thing):
import pickle
import base64
try:
a = base64.b64decode(thing)
b = pickle.loads(a)
return b
except:
return {}
def _utf_to_str(input):
if IS_PY2 and isinstance(input, unicode):
return input.encode('utf-8')
elif isinstance(input, list):
return [_utf_to_str(element) for element in input]
elif isinstance(input, dict):
return {_utf_to_str(key): _utf_to_str(value)
for key, value in input.items()}
else:
return input
def _copy_new_fields(info, new_info):
keys = [
'vendor_id_raw', 'hardware_raw', 'brand_raw', 'hz_advertised_friendly', 'hz_actual_friendly',
'hz_advertised', 'hz_actual', 'arch', 'bits', 'count',
'arch_string_raw', 'uname_string_raw',
'l2_cache_size', 'l2_cache_line_size', 'l2_cache_associativity',
'stepping', 'model', 'family',
'processor_type', 'extended_model', 'extended_family', 'flags',
'l3_cache_size', 'l1_data_cache_size', 'l1_instruction_cache_size'
]
for key in keys:
if new_info.get(key, None) and not info.get(key, None):
info[key] = new_info[key]
elif key == 'flags' and new_info.get('flags'):
for f in new_info['flags']:
if f not in info['flags']: info['flags'].append(f)
info['flags'].sort()
def _get_field_actual(cant_be_number, raw_string, field_names):
for line in raw_string.splitlines():
for field_name in field_names:
field_name = field_name.lower()
if ':' in line:
left, right = line.split(':', 1)
left = left.strip().lower()
right = right.strip()
if left == field_name and len(right) > 0:
if cant_be_number:
if not right.isdigit():
return right
else:
return right
return None
def _get_field(cant_be_number, raw_string, convert_to, default_value, *field_names):
retval = _get_field_actual(cant_be_number, raw_string, field_names)
# Convert the return value
if retval and convert_to:
try:
retval = convert_to(retval)
except:
retval = default_value
# Return the default if there is no return value
if retval is None:
retval = default_value
return retval
def _to_decimal_string(ticks):
try:
# Convert to string
ticks = '{0}'.format(ticks)
# Strip off non numbers and decimal places
ticks = "".join(n for n in ticks if n.isdigit() or n=='.').strip()
if ticks == '':
ticks = '0'
# Add decimal if missing
if '.' not in ticks:
ticks = '{0}.0'.format(ticks)
# Remove trailing zeros
ticks = ticks.rstrip('0')
# Add one trailing zero for empty right side
if ticks.endswith('.'):
ticks = '{0}0'.format(ticks)
# Make sure the number can be converted to a float
ticks = float(ticks)
ticks = '{0}'.format(ticks)
return ticks
except:
return '0.0'
def _hz_short_to_full(ticks, scale):
try:
# Make sure the number can be converted to a float
ticks = float(ticks)
ticks = '{0}'.format(ticks)
# Scale the numbers
hz = ticks.lstrip('0')
old_index = hz.index('.')
hz = hz.replace('.', '')
hz = hz.ljust(scale + old_index+1, '0')
new_index = old_index + scale
hz = '{0}.{1}'.format(hz[:new_index], hz[new_index:])
left, right = hz.split('.')
left, right = int(left), int(right)
return (left, right)
except:
return (0, 0)
def _hz_friendly_to_full(hz_string):
try:
hz_string = hz_string.strip().lower()
hz, scale = (None, None)
if hz_string.endswith('ghz'):
scale = 9
elif hz_string.endswith('mhz'):
scale = 6
elif hz_string.endswith('hz'):
scale = 0
hz = "".join(n for n in hz_string if n.isdigit() or n=='.').strip()
if not '.' in hz:
hz += '.0'
hz, scale = _hz_short_to_full(hz, scale)
return (hz, scale)
except:
return (0, 0)
def _hz_short_to_friendly(ticks, scale):
try:
# Get the raw Hz as a string
left, right = _hz_short_to_full(ticks, scale)
result = '{0}.{1}'.format(left, right)
# Get the location of the dot, and remove said dot
dot_index = result.index('.')
result = result.replace('.', '')
# Get the Hz symbol and scale
symbol = "Hz"
scale = 0
if dot_index > 9:
symbol = "GHz"
scale = 9
elif dot_index > 6:
symbol = "MHz"
scale = 6
elif dot_index > 3:
symbol = "KHz"
scale = 3
# Get the Hz with the dot at the new scaled point
result = '{0}.{1}'.format(result[:-scale-1], result[-scale-1:])
# Format the ticks to have 4 numbers after the decimal
# and remove any superfluous zeroes.
result = '{0:.4f} {1}'.format(float(result), symbol)
result = result.rstrip('0')
return result
except:
return '0.0000 Hz'
def _to_friendly_bytes(input):
import re
if not input:
return input
input = "{0}".format(input)
formats = {
r"^[0-9]+B$" : 'B',
r"^[0-9]+K$" : 'KB',
r"^[0-9]+M$" : 'MB',
r"^[0-9]+G$" : 'GB'
}
for pattern, friendly_size in formats.items():
if re.match(pattern, input):
return "{0} {1}".format(input[ : -1].strip(), friendly_size)
return input
def _parse_cpu_brand_string(cpu_string):
# Just return 0 if the processor brand does not have the Hz
if not 'hz' in cpu_string.lower():
return ('0.0', 0)
hz = cpu_string.lower()
scale = 0
if hz.endswith('mhz'):
scale = 6
elif hz.endswith('ghz'):
scale = 9
if '@' in hz:
hz = hz.split('@')[1]
else:
hz = hz.rsplit(None, 1)[1]
hz = hz.rstrip('mhz').rstrip('ghz').strip()
hz = _to_decimal_string(hz)
return (hz, scale)
def _parse_cpu_brand_string_dx(cpu_string):
import re
# Find all the strings inside brackets ()
starts = [m.start() for m in re.finditer('\(', cpu_string)]
ends = [m.start() for m in re.finditer('\)', cpu_string)]
insides = {k: v for k, v in zip(starts, ends)}
insides = [cpu_string[start+1 : end] for start, end in insides.items()]
# Find all the fields
vendor_id, stepping, model, family = (None, None, None, None)
for inside in insides:
for pair in inside.split(','):
pair = [n.strip() for n in pair.split(':')]
if len(pair) > 1:
name, value = pair[0], pair[1]
if name == 'origin':
vendor_id = value.strip('"')
elif name == 'stepping':
stepping = int(value.lstrip('0x'), 16)
elif name == 'model':
model = int(value.lstrip('0x'), 16)
elif name in ['fam', 'family']:
family = int(value.lstrip('0x'), 16)
# Find the Processor Brand
# Strip off extra strings in brackets at end
brand = cpu_string.strip()
is_working = True
while is_working:
is_working = False
for inside in insides:
full = "({0})".format(inside)
if brand.endswith(full):
brand = brand[ :-len(full)].strip()
is_working = True
# Find the Hz in the brand string
hz_brand, scale = _parse_cpu_brand_string(brand)
# Find Hz inside brackets () after the brand string
if hz_brand == '0.0':
for inside in insides:
hz = inside
for entry in ['GHz', 'MHz', 'Hz']:
if entry in hz:
hz = "CPU @ " + hz[ : hz.find(entry) + len(entry)]
hz_brand, scale = _parse_cpu_brand_string(hz)
break
return (hz_brand, scale, brand, vendor_id, stepping, model, family)
def _parse_dmesg_output(output):
try:
# Get all the dmesg lines that might contain a CPU string
lines = output.split(' CPU0:')[1:] + \
output.split(' CPU1:')[1:] + \
output.split(' CPU:')[1:] + \
output.split('\nCPU0:')[1:] + \
output.split('\nCPU1:')[1:] + \
output.split('\nCPU:')[1:]
lines = [l.split('\n')[0].strip() for l in lines]
# Convert the lines to CPU strings
cpu_strings = [_parse_cpu_brand_string_dx(l) for l in lines]
# Find the CPU string that has the most fields
best_string = None
highest_count = 0
for cpu_string in cpu_strings:
count = sum([n is not None for n in cpu_string])
if count > highest_count:
highest_count = count
best_string = cpu_string
# If no CPU string was found, return {}
if not best_string:
return {}
hz_actual, scale, processor_brand, vendor_id, stepping, model, family = best_string
# Origin
if ' Origin=' in output:
fields = output[output.find(' Origin=') : ].split('\n')[0]
fields = fields.strip().split()
fields = [n.strip().split('=') for n in fields]
fields = [{n[0].strip().lower() : n[1].strip()} for n in fields]
for field in fields:
name = list(field.keys())[0]
value = list(field.values())[0]
if name == 'origin':
vendor_id = value.strip('"')
elif name == 'stepping':
stepping = int(value.lstrip('0x'), 16)
elif name == 'model':
model = int(value.lstrip('0x'), 16)
elif name in ['fam', 'family']:
family = int(value.lstrip('0x'), 16)
# Features
flag_lines = []
for category in [' Features=', ' Features2=', ' AMD Features=', ' AMD Features2=']:
if category in output:
flag_lines.append(output.split(category)[1].split('\n')[0])
flags = []
for line in flag_lines:
line = line.split('<')[1].split('>')[0].lower()
for flag in line.split(','):
flags.append(flag)
flags.sort()
# Convert from GHz/MHz string to Hz
hz_advertised, scale = _parse_cpu_brand_string(processor_brand)
# If advertised hz not found, use the actual hz
if hz_advertised == '0.0':
scale = 6
hz_advertised = _to_decimal_string(hz_actual)
info = {
'vendor_id_raw' : vendor_id,
'brand_raw' : processor_brand,
'stepping' : stepping,
'model' : model,
'family' : family,
'flags' : flags
}
if hz_advertised and hz_advertised != '0.0':
info['hz_advertised_friendly'] = _hz_short_to_friendly(hz_advertised, scale)
info['hz_actual_friendly'] = _hz_short_to_friendly(hz_actual, scale)
if hz_advertised and hz_advertised != '0.0':
info['hz_advertised'] = _hz_short_to_full(hz_advertised, scale)
info['hz_actual'] = _hz_short_to_full(hz_actual, scale)
return {k: v for k, v in info.items() if v}
except:
#raise
pass
return {}
def _parse_arch(arch_string_raw):
import re
arch, bits = None, None
arch_string_raw = arch_string_raw.lower()
# X86
if re.match('^i\d86$|^x86$|^x86_32$|^i86pc$|^ia32$|^ia-32$|^bepc$', arch_string_raw):
arch = 'X86_32'
bits = 32
elif re.match('^x64$|^x86_64$|^x86_64t$|^i686-64$|^amd64$|^ia64$|^ia-64$', arch_string_raw):
arch = 'X86_64'
bits = 64
# ARM
elif re.match('^armv8-a|aarch64$', arch_string_raw):
arch = 'ARM_8'
bits = 64
elif re.match('^armv7$|^armv7[a-z]$|^armv7-[a-z]$|^armv6[a-z]$', arch_string_raw):
arch = 'ARM_7'
bits = 32
elif re.match('^armv8$|^armv8[a-z]$|^armv8-[a-z]$', arch_string_raw):
arch = 'ARM_8'
bits = 32
# PPC
elif re.match('^ppc32$|^prep$|^pmac$|^powermac$', arch_string_raw):
arch = 'PPC_32'
bits = 32
elif re.match('^powerpc$|^ppc64$|^ppc64le$', arch_string_raw):
arch = 'PPC_64'
bits = 64
# SPARC
elif re.match('^sparc32$|^sparc$', arch_string_raw):
arch = 'SPARC_32'
bits = 32
elif re.match('^sparc64$|^sun4u$|^sun4v$', arch_string_raw):
arch = 'SPARC_64'
bits = 64
return (arch, bits)
def _is_bit_set(reg, bit):
mask = 1 << bit
is_set = reg & mask > 0
return is_set
def _is_selinux_enforcing():
# Just return if the SE Linux Status Tool is not installed
if not DataSource.has_sestatus():
return False
# Run the sestatus, and just return if it failed to run
returncode, output = DataSource.sestatus_b()
if returncode != 0:
return False
# Figure out if explicitly in enforcing mode
for line in output.splitlines():
line = line.strip().lower()
if line.startswith("current mode:"):
if line.endswith("enforcing"):
return True
else:
return False
# Figure out if we can execute heap and execute memory
can_selinux_exec_heap = False
can_selinux_exec_memory = False
for line in output.splitlines():
line = line.strip().lower()
if line.startswith("allow_execheap") and line.endswith("on"):
can_selinux_exec_heap = True
elif line.startswith("allow_execmem") and line.endswith("on"):
can_selinux_exec_memory = True
return (not can_selinux_exec_heap or not can_selinux_exec_memory)
class CPUID(object):
def __init__(self):
self.prochandle = None
# Figure out if SE Linux is on and in enforcing mode
self.is_selinux_enforcing = _is_selinux_enforcing()
def _asm_func(self, restype=None, argtypes=(), byte_code=[]):
byte_code = bytes.join(b'', byte_code)
address = None
if DataSource.is_windows:
# Allocate a memory segment the size of the byte code, and make it executable
size = len(byte_code)
# Alloc at least 1 page to ensure we own all pages that we want to change protection on
if size < 0x1000: size = 0x1000
MEM_COMMIT = ctypes.c_ulong(0x1000)
PAGE_READWRITE = ctypes.c_ulong(0x4)
pfnVirtualAlloc = ctypes.windll.kernel32.VirtualAlloc
pfnVirtualAlloc.restype = ctypes.c_void_p
address = pfnVirtualAlloc(None, ctypes.c_size_t(size), MEM_COMMIT, PAGE_READWRITE)
if not address:
raise Exception("Failed to VirtualAlloc")
# Copy the byte code into the memory segment
memmove = ctypes.CFUNCTYPE(ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_size_t)(ctypes._memmove_addr)
if memmove(address, byte_code, size) < 0:
raise Exception("Failed to memmove")
# Enable execute permissions
PAGE_EXECUTE = ctypes.c_ulong(0x10)
old_protect = ctypes.c_ulong(0)
pfnVirtualProtect = ctypes.windll.kernel32.VirtualProtect
res = pfnVirtualProtect(ctypes.c_void_p(address), ctypes.c_size_t(size), PAGE_EXECUTE, ctypes.byref(old_protect))
if not res:
raise Exception("Failed VirtualProtect")
# Flush Instruction Cache
# First, get process Handle
if not self.prochandle:
pfnGetCurrentProcess = ctypes.windll.kernel32.GetCurrentProcess
pfnGetCurrentProcess.restype = ctypes.c_void_p
self.prochandle = ctypes.c_void_p(pfnGetCurrentProcess())
# Actually flush cache
res = ctypes.windll.kernel32.FlushInstructionCache(self.prochandle, ctypes.c_void_p(address), ctypes.c_size_t(size))
if not res:
raise Exception("Failed FlushInstructionCache")
else:
# Allocate a memory segment the size of the byte code
size = len(byte_code)
pfnvalloc = ctypes.pythonapi.valloc
pfnvalloc.restype = ctypes.c_void_p
address = pfnvalloc(ctypes.c_size_t(size))
if not address:
raise Exception("Failed to valloc")
# Mark the memory segment as writeable only
if not self.is_selinux_enforcing:
WRITE = 0x2
if ctypes.pythonapi.mprotect(ctypes.c_void_p(address), size, WRITE) < 0:
raise Exception("Failed to mprotect")
# Copy the byte code into the memory segment
if ctypes.pythonapi.memmove(ctypes.c_void_p(address), byte_code, ctypes.c_size_t(size)) < 0:
raise Exception("Failed to memmove")
# Mark the memory segment as writeable and executable only
if not self.is_selinux_enforcing:
WRITE_EXECUTE = 0x2 | 0x4
if ctypes.pythonapi.mprotect(ctypes.c_void_p(address), size, WRITE_EXECUTE) < 0:
raise Exception("Failed to mprotect")
# Cast the memory segment into a function
functype = ctypes.CFUNCTYPE(restype, *argtypes)
fun = functype(address)
return fun, address
def _run_asm(self, *byte_code):
# Convert the byte code into a function that returns an int
restype = ctypes.c_uint32
argtypes = ()
func, address = self._asm_func(restype, argtypes, byte_code)
# Call the byte code like a function
retval = func()
byte_code = bytes.join(b'', byte_code)
size = ctypes.c_size_t(len(byte_code))
# Free the function memory segment
if DataSource.is_windows:
MEM_RELEASE = ctypes.c_ulong(0x8000)
ctypes.windll.kernel32.VirtualFree(ctypes.c_void_p(address), ctypes.c_size_t(0), MEM_RELEASE)
else:
# Remove the executable tag on the memory
READ_WRITE = 0x1 | 0x2
if ctypes.pythonapi.mprotect(ctypes.c_void_p(address), size, READ_WRITE) < 0:
raise Exception("Failed to mprotect")
ctypes.pythonapi.free(ctypes.c_void_p(address))
return retval
# FIXME: We should not have to use different instructions to
# set eax to 0 or 1, on 32bit and 64bit machines.
def _zero_eax(self):
return (
b"\x31\xC0" # xor eax,eax
)
def _zero_ecx(self):
return (
b"\x31\xC9" # xor ecx,ecx
)
def _one_eax(self):
return (
b"\xB8\x01\x00\x00\x00" # mov eax,0x1"
)
# http://en.wikipedia.org/wiki/CPUID#EAX.3D0:_Get_vendor_ID
def get_vendor_id(self):
# EBX
ebx = self._run_asm(
self._zero_eax(),
b"\x0F\xA2" # cpuid
b"\x89\xD8" # mov ax,bx
b"\xC3" # ret
)
# ECX
ecx = self._run_asm(
self._zero_eax(),
b"\x0f\xa2" # cpuid
b"\x89\xC8" # mov ax,cx
b"\xC3" # ret
)
# EDX
edx = self._run_asm(
self._zero_eax(),
b"\x0f\xa2" # cpuid
b"\x89\xD0" # mov ax,dx
b"\xC3" # ret
)
# Each 4bits is a ascii letter in the name
vendor_id = []
for reg in [ebx, edx, ecx]:
for n in [0, 8, 16, 24]:
vendor_id.append(chr((reg >> n) & 0xFF))
vendor_id = ''.join(vendor_id)
return vendor_id
# http://en.wikipedia.org/wiki/CPUID#EAX.3D1:_Processor_Info_and_Feature_Bits
def get_info(self):
# EAX
eax = self._run_asm(
self._one_eax(),
b"\x0f\xa2" # cpuid
b"\xC3" # ret
)
# Get the CPU info
stepping = (eax >> 0) & 0xF # 4 bits
model = (eax >> 4) & 0xF # 4 bits
family = (eax >> 8) & 0xF # 4 bits
processor_type = (eax >> 12) & 0x3 # 2 bits
extended_model = (eax >> 16) & 0xF # 4 bits
extended_family = (eax >> 20) & 0xFF # 8 bits
return {
'stepping' : stepping,
'model' : model,
'family' : family,
'processor_type' : processor_type,
'extended_model' : extended_model,
'extended_family' : extended_family
}
# http://en.wikipedia.org/wiki/CPUID#EAX.3D80000000h:_Get_Highest_Extended_Function_Supported
def get_max_extension_support(self):
# Check for extension support
max_extension_support = self._run_asm(
b"\xB8\x00\x00\x00\x80" # mov ax,0x80000000
b"\x0f\xa2" # cpuid
b"\xC3" # ret
)
return max_extension_support
# http://en.wikipedia.org/wiki/CPUID#EAX.3D1:_Processor_Info_and_Feature_Bits
def get_flags(self, max_extension_support):
# EDX
edx = self._run_asm(
self._one_eax(),
b"\x0f\xa2" # cpuid
b"\x89\xD0" # mov ax,dx
b"\xC3" # ret
)
# ECX
ecx = self._run_asm(
self._one_eax(),
b"\x0f\xa2" # cpuid
b"\x89\xC8" # mov ax,cx
b"\xC3" # ret
)
# Get the CPU flags
flags = {
'fpu' : _is_bit_set(edx, 0),
'vme' : _is_bit_set(edx, 1),
'de' : _is_bit_set(edx, 2),
'pse' : _is_bit_set(edx, 3),
'tsc' : _is_bit_set(edx, 4),
'msr' : _is_bit_set(edx, 5),
'pae' : _is_bit_set(edx, 6),
'mce' : _is_bit_set(edx, 7),
'cx8' : _is_bit_set(edx, 8),
'apic' : _is_bit_set(edx, 9),
#'reserved1' : _is_bit_set(edx, 10),
'sep' : _is_bit_set(edx, 11),
'mtrr' : _is_bit_set(edx, 12),
'pge' : _is_bit_set(edx, 13),
'mca' : _is_bit_set(edx, 14),
'cmov' : _is_bit_set(edx, 15),
'pat' : _is_bit_set(edx, 16),
'pse36' : _is_bit_set(edx, 17),
'pn' : _is_bit_set(edx, 18),
'clflush' : _is_bit_set(edx, 19),
#'reserved2' : _is_bit_set(edx, 20),
'dts' : _is_bit_set(edx, 21),
'acpi' : _is_bit_set(edx, 22),
'mmx' : _is_bit_set(edx, 23),
'fxsr' : _is_bit_set(edx, 24),
'sse' : _is_bit_set(edx, 25),
'sse2' : _is_bit_set(edx, 26),
'ss' : _is_bit_set(edx, 27),
'ht' : _is_bit_set(edx, 28),
'tm' : _is_bit_set(edx, 29),
'ia64' : _is_bit_set(edx, 30),
'pbe' : _is_bit_set(edx, 31),
'pni' : _is_bit_set(ecx, 0),
'pclmulqdq' : _is_bit_set(ecx, 1),
'dtes64' : _is_bit_set(ecx, 2),
'monitor' : _is_bit_set(ecx, 3),
'ds_cpl' : _is_bit_set(ecx, 4),
'vmx' : _is_bit_set(ecx, 5),
'smx' : _is_bit_set(ecx, 6),
'est' : _is_bit_set(ecx, 7),
'tm2' : _is_bit_set(ecx, 8),
'ssse3' : _is_bit_set(ecx, 9),
'cid' : _is_bit_set(ecx, 10),
#'reserved3' : _is_bit_set(ecx, 11),
'fma' : _is_bit_set(ecx, 12),
'cx16' : _is_bit_set(ecx, 13),
'xtpr' : _is_bit_set(ecx, 14),
'pdcm' : _is_bit_set(ecx, 15),
#'reserved4' : _is_bit_set(ecx, 16),
'pcid' : _is_bit_set(ecx, 17),
'dca' : _is_bit_set(ecx, 18),
'sse4_1' : _is_bit_set(ecx, 19),
'sse4_2' : _is_bit_set(ecx, 20),
'x2apic' : _is_bit_set(ecx, 21),
'movbe' : _is_bit_set(ecx, 22),
'popcnt' : _is_bit_set(ecx, 23),
'tscdeadline' : _is_bit_set(ecx, 24),
'aes' : _is_bit_set(ecx, 25),
'xsave' : _is_bit_set(ecx, 26),
'osxsave' : _is_bit_set(ecx, 27),
'avx' : _is_bit_set(ecx, 28),
'f16c' : _is_bit_set(ecx, 29),
'rdrnd' : _is_bit_set(ecx, 30),
'hypervisor' : _is_bit_set(ecx, 31)
}
# Get a list of only the flags that are true
flags = [k for k, v in flags.items() if v]
# http://en.wikipedia.org/wiki/CPUID#EAX.3D7.2C_ECX.3D0:_Extended_Features
if max_extension_support >= 7:
# EBX
ebx = self._run_asm(
self._zero_ecx(),
b"\xB8\x07\x00\x00\x00" # mov eax,7
b"\x0f\xa2" # cpuid
b"\x89\xD8" # mov ax,bx
b"\xC3" # ret
)
# ECX
ecx = self._run_asm(
self._zero_ecx(),
b"\xB8\x07\x00\x00\x00" # mov eax,7
b"\x0f\xa2" # cpuid
b"\x89\xC8" # mov ax,cx
b"\xC3" # ret
)
# Get the extended CPU flags
extended_flags = {
#'fsgsbase' : _is_bit_set(ebx, 0),
#'IA32_TSC_ADJUST' : _is_bit_set(ebx, 1),
'sgx' : _is_bit_set(ebx, 2),
'bmi1' : _is_bit_set(ebx, 3),
'hle' : _is_bit_set(ebx, 4),
'avx2' : _is_bit_set(ebx, 5),
#'reserved' : _is_bit_set(ebx, 6),
'smep' : _is_bit_set(ebx, 7),
'bmi2' : _is_bit_set(ebx, 8),
'erms' : _is_bit_set(ebx, 9),
'invpcid' : _is_bit_set(ebx, 10),
'rtm' : _is_bit_set(ebx, 11),
'pqm' : _is_bit_set(ebx, 12),
#'FPU CS and FPU DS deprecated' : _is_bit_set(ebx, 13),
'mpx' : _is_bit_set(ebx, 14),
'pqe' : _is_bit_set(ebx, 15),
'avx512f' : _is_bit_set(ebx, 16),
'avx512dq' : _is_bit_set(ebx, 17),
'rdseed' : _is_bit_set(ebx, 18),
'adx' : _is_bit_set(ebx, 19),
'smap' : _is_bit_set(ebx, 20),
'avx512ifma' : _is_bit_set(ebx, 21),
'pcommit' : _is_bit_set(ebx, 22),
'clflushopt' : _is_bit_set(ebx, 23),
'clwb' : _is_bit_set(ebx, 24),
'intel_pt' : _is_bit_set(ebx, 25),
'avx512pf' : _is_bit_set(ebx, 26),
'avx512er' : _is_bit_set(ebx, 27),
'avx512cd' : _is_bit_set(ebx, 28),
'sha' : _is_bit_set(ebx, 29),
'avx512bw' : _is_bit_set(ebx, 30),
'avx512vl' : _is_bit_set(ebx, 31),
'prefetchwt1' : _is_bit_set(ecx, 0),
'avx512vbmi' : _is_bit_set(ecx, 1),
'umip' : _is_bit_set(ecx, 2),
'pku' : _is_bit_set(ecx, 3),
'ospke' : _is_bit_set(ecx, 4),
#'reserved' : _is_bit_set(ecx, 5),
'avx512vbmi2' : _is_bit_set(ecx, 6),
#'reserved' : _is_bit_set(ecx, 7),
'gfni' : _is_bit_set(ecx, 8),
'vaes' : _is_bit_set(ecx, 9),
'vpclmulqdq' : _is_bit_set(ecx, 10),
'avx512vnni' : _is_bit_set(ecx, 11),
'avx512bitalg' : _is_bit_set(ecx, 12),
#'reserved' : _is_bit_set(ecx, 13),
'avx512vpopcntdq' : _is_bit_set(ecx, 14),
#'reserved' : _is_bit_set(ecx, 15),
#'reserved' : _is_bit_set(ecx, 16),
#'mpx0' : _is_bit_set(ecx, 17),
#'mpx1' : _is_bit_set(ecx, 18),
#'mpx2' : _is_bit_set(ecx, 19),
#'mpx3' : _is_bit_set(ecx, 20),
#'mpx4' : _is_bit_set(ecx, 21),
'rdpid' : _is_bit_set(ecx, 22),
#'reserved' : _is_bit_set(ecx, 23),
#'reserved' : _is_bit_set(ecx, 24),
#'reserved' : _is_bit_set(ecx, 25),
#'reserved' : _is_bit_set(ecx, 26),
#'reserved' : _is_bit_set(ecx, 27),
#'reserved' : _is_bit_set(ecx, 28),
#'reserved' : _is_bit_set(ecx, 29),
'sgx_lc' : _is_bit_set(ecx, 30),
#'reserved' : _is_bit_set(ecx, 31)
}
# Get a list of only the flags that are true
extended_flags = [k for k, v in extended_flags.items() if v]
flags += extended_flags
# http://en.wikipedia.org/wiki/CPUID#EAX.3D80000001h:_Extended_Processor_Info_and_Feature_Bits
if max_extension_support >= 0x80000001:
# EBX
ebx = self._run_asm(
b"\xB8\x01\x00\x00\x80" # mov ax,0x80000001
b"\x0f\xa2" # cpuid
b"\x89\xD8" # mov ax,bx
b"\xC3" # ret
)
# ECX
ecx = self._run_asm(
b"\xB8\x01\x00\x00\x80" # mov ax,0x80000001
b"\x0f\xa2" # cpuid
b"\x89\xC8" # mov ax,cx
b"\xC3" # ret
)
# Get the extended CPU flags
extended_flags = {
'fpu' : _is_bit_set(ebx, 0),
'vme' : _is_bit_set(ebx, 1),
'de' : _is_bit_set(ebx, 2),
'pse' : _is_bit_set(ebx, 3),
'tsc' : _is_bit_set(ebx, 4),
'msr' : _is_bit_set(ebx, 5),
'pae' : _is_bit_set(ebx, 6),
'mce' : _is_bit_set(ebx, 7),
'cx8' : _is_bit_set(ebx, 8),
'apic' : _is_bit_set(ebx, 9),
#'reserved' : _is_bit_set(ebx, 10),
'syscall' : _is_bit_set(ebx, 11),
'mtrr' : _is_bit_set(ebx, 12),
'pge' : _is_bit_set(ebx, 13),
'mca' : _is_bit_set(ebx, 14),
'cmov' : _is_bit_set(ebx, 15),
'pat' : _is_bit_set(ebx, 16),
'pse36' : _is_bit_set(ebx, 17),
#'reserved' : _is_bit_set(ebx, 18),
'mp' : _is_bit_set(ebx, 19),
'nx' : _is_bit_set(ebx, 20),
#'reserved' : _is_bit_set(ebx, 21),
'mmxext' : _is_bit_set(ebx, 22),
'mmx' : _is_bit_set(ebx, 23),
'fxsr' : _is_bit_set(ebx, 24),
'fxsr_opt' : _is_bit_set(ebx, 25),
'pdpe1gp' : _is_bit_set(ebx, 26),
'rdtscp' : _is_bit_set(ebx, 27),
#'reserved' : _is_bit_set(ebx, 28),
'lm' : _is_bit_set(ebx, 29),
'3dnowext' : _is_bit_set(ebx, 30),
'3dnow' : _is_bit_set(ebx, 31),
'lahf_lm' : _is_bit_set(ecx, 0),
'cmp_legacy' : _is_bit_set(ecx, 1),
'svm' : _is_bit_set(ecx, 2),
'extapic' : _is_bit_set(ecx, 3),
'cr8_legacy' : _is_bit_set(ecx, 4),
'abm' : _is_bit_set(ecx, 5),
'sse4a' : _is_bit_set(ecx, 6),
'misalignsse' : _is_bit_set(ecx, 7),
'3dnowprefetch' : _is_bit_set(ecx, 8),
'osvw' : _is_bit_set(ecx, 9),
'ibs' : _is_bit_set(ecx, 10),
'xop' : _is_bit_set(ecx, 11),
'skinit' : _is_bit_set(ecx, 12),
'wdt' : _is_bit_set(ecx, 13),
#'reserved' : _is_bit_set(ecx, 14),
'lwp' : _is_bit_set(ecx, 15),
'fma4' : _is_bit_set(ecx, 16),
'tce' : _is_bit_set(ecx, 17),
#'reserved' : _is_bit_set(ecx, 18),
'nodeid_msr' : _is_bit_set(ecx, 19),
#'reserved' : _is_bit_set(ecx, 20),
'tbm' : _is_bit_set(ecx, 21),
'topoext' : _is_bit_set(ecx, 22),
'perfctr_core' : _is_bit_set(ecx, 23),
'perfctr_nb' : _is_bit_set(ecx, 24),
#'reserved' : _is_bit_set(ecx, 25),
'dbx' : _is_bit_set(ecx, 26),
'perftsc' : _is_bit_set(ecx, 27),
'pci_l2i' : _is_bit_set(ecx, 28),
#'reserved' : _is_bit_set(ecx, 29),
#'reserved' : _is_bit_set(ecx, 30),
#'reserved' : _is_bit_set(ecx, 31)
}
# Get a list of only the flags that are true
extended_flags = [k for k, v in extended_flags.items() if v]
flags += extended_flags
flags.sort()
return flags
# http://en.wikipedia.org/wiki/CPUID#EAX.3D80000002h.2C80000003h.2C80000004h:_Processor_Brand_String
def get_processor_brand(self, max_extension_support):
processor_brand = ""
# Processor brand string
if max_extension_support >= 0x80000004:
instructions = [
b"\xB8\x02\x00\x00\x80", # mov ax,0x80000002
b"\xB8\x03\x00\x00\x80", # mov ax,0x80000003
b"\xB8\x04\x00\x00\x80" # mov ax,0x80000004
]
for instruction in instructions:
# EAX
eax = self._run_asm(
instruction, # mov ax,0x8000000?
b"\x0f\xa2" # cpuid
b"\x89\xC0" # mov ax,ax
b"\xC3" # ret
)
# EBX
ebx = self._run_asm(
instruction, # mov ax,0x8000000?
b"\x0f\xa2" # cpuid
b"\x89\xD8" # mov ax,bx
b"\xC3" # ret
)
# ECX
ecx = self._run_asm(
instruction, # mov ax,0x8000000?
b"\x0f\xa2" # cpuid
b"\x89\xC8" # mov ax,cx
b"\xC3" # ret
)
# EDX
edx = self._run_asm(
instruction, # mov ax,0x8000000?
b"\x0f\xa2" # cpuid
b"\x89\xD0" # mov ax,dx
b"\xC3" # ret
)
# Combine each of the 4 bytes in each register into the string
for reg in [eax, ebx, ecx, edx]:
for n in [0, 8, 16, 24]:
processor_brand += chr((reg >> n) & 0xFF)
# Strip off any trailing NULL terminators and white space
processor_brand = processor_brand.strip("\0").strip()
return processor_brand
# http://en.wikipedia.org/wiki/CPUID#EAX.3D80000006h:_Extended_L2_Cache_Features
def get_cache(self, max_extension_support):
cache_info = {}
# Just return if the cache feature is not supported
if max_extension_support < 0x80000006:
return cache_info
# ECX
ecx = self._run_asm(
b"\xB8\x06\x00\x00\x80" # mov ax,0x80000006
b"\x0f\xa2" # cpuid
b"\x89\xC8" # mov ax,cx
b"\xC3" # ret
)
cache_info = {
'size_kb' : ecx & 0xFF,
'line_size_b' : (ecx >> 12) & 0xF,
'associativity' : (ecx >> 16) & 0xFFFF
}
return cache_info
def get_ticks(self):
retval = None
if DataSource.bits == '32bit':
# Works on x86_32
restype = None
argtypes = (ctypes.POINTER(ctypes.c_uint), ctypes.POINTER(ctypes.c_uint))
get_ticks_x86_32, address = self._asm_func(restype, argtypes,
[
b"\x55", # push bp
b"\x89\xE5", # mov bp,sp
b"\x31\xC0", # xor ax,ax
b"\x0F\xA2", # cpuid
b"\x0F\x31", # rdtsc
b"\x8B\x5D\x08", # mov bx,[di+0x8]
b"\x8B\x4D\x0C", # mov cx,[di+0xc]
b"\x89\x13", # mov [bp+di],dx
b"\x89\x01", # mov [bx+di],ax
b"\x5D", # pop bp
b"\xC3" # ret
]
)
high = ctypes.c_uint32(0)
low = ctypes.c_uint32(0)
get_ticks_x86_32(ctypes.byref(high), ctypes.byref(low))
retval = ((high.value << 32) & 0xFFFFFFFF00000000) | low.value
elif DataSource.bits == '64bit':
# Works on x86_64
restype = ctypes.c_uint64
argtypes = ()
get_ticks_x86_64, address = self._asm_func(restype, argtypes,
[
b"\x48", # dec ax
b"\x31\xC0", # xor ax,ax
b"\x0F\xA2", # cpuid
b"\x0F\x31", # rdtsc
b"\x48", # dec ax
b"\xC1\xE2\x20", # shl dx,byte 0x20
b"\x48", # dec ax
b"\x09\xD0", # or ax,dx
b"\xC3", # ret
]
)
retval = get_ticks_x86_64()
return retval
def get_raw_hz(self):
import time
start = self.get_ticks()
time.sleep(1)
end = self.get_ticks()
ticks = (end - start)
return ticks
def _actual_get_cpu_info_from_cpuid(queue):
'''
Warning! This function has the potential to crash the Python runtime.
Do not call it directly. Use the _get_cpu_info_from_cpuid function instead.
It will safely call this function in another process.
'''
# Pipe all output to nothing
sys.stdout = open(os.devnull, 'w')
sys.stderr = open(os.devnull, 'w')
# Get the CPU arch and bits
arch, bits = _parse_arch(DataSource.arch_string_raw)
# Return none if this is not an X86 CPU
if not arch in ['X86_32', 'X86_64']:
queue.put(_obj_to_b64({}))
return
# Return none if SE Linux is in enforcing mode
cpuid = CPUID()
if cpuid.is_selinux_enforcing:
queue.put(_obj_to_b64({}))
return
# Get the cpu info from the CPUID register
max_extension_support = cpuid.get_max_extension_support()
cache_info = cpuid.get_cache(max_extension_support)
info = cpuid.get_info()
processor_brand = cpuid.get_processor_brand(max_extension_support)
# Get the Hz and scale
hz_actual = cpuid.get_raw_hz()
hz_actual = _to_decimal_string(hz_actual)
# Get the Hz and scale
hz_advertised, scale = _parse_cpu_brand_string(processor_brand)
info = {
'vendor_id_raw' : cpuid.get_vendor_id(),
'hardware_raw' : '',
'brand_raw' : processor_brand,
'hz_advertised_friendly' : _hz_short_to_friendly(hz_advertised, scale),
'hz_actual_friendly' : _hz_short_to_friendly(hz_actual, 0),
'hz_advertised' : _hz_short_to_full(hz_advertised, scale),
'hz_actual' : _hz_short_to_full(hz_actual, 0),
'l2_cache_size' : _to_friendly_bytes(cache_info['size_kb']),
'l2_cache_line_size' : cache_info['line_size_b'],
'l2_cache_associativity' : hex(cache_info['associativity']),
'stepping' : info['stepping'],
'model' : info['model'],
'family' : info['family'],
'processor_type' : info['processor_type'],
'extended_model' : info['extended_model'],
'extended_family' : info['extended_family'],
'flags' : cpuid.get_flags(max_extension_support)
}
info = {k: v for k, v in info.items() if v}
queue.put(_obj_to_b64(info))
def _get_cpu_info_from_cpuid():
'''
Returns the CPU info gathered by querying the X86 cpuid register in a new process.
Returns {} on non X86 cpus.
Returns {} if SELinux is in enforcing mode.
'''
from multiprocessing import Process, Queue
# Return {} if can't cpuid
if not DataSource.can_cpuid:
return {}
# Get the CPU arch and bits
arch, bits = _parse_arch(DataSource.arch_string_raw)
# Return {} if this is not an X86 CPU
if not arch in ['X86_32', 'X86_64']:
return {}
try:
# Start running the function in a subprocess
queue = Queue()
p = Process(target=_actual_get_cpu_info_from_cpuid, args=(queue,))
p.start()
# Wait for the process to end, while it is still alive
while p.is_alive():
p.join(0)
# Return {} if it failed
if p.exitcode != 0:
return {}
# Return the result, only if there is something to read
if not queue.empty():
output = queue.get()
return _b64_to_obj(output)
except:
pass
# Return {} if everything failed
return {}
def _get_cpu_info_from_cpufreq_info():
'''
Returns the CPU info gathered from cpufreq-info.
Returns {} if cpufreq-info is not found.
'''
try:
hz_brand, scale = '0.0', 0
if not DataSource.has_cpufreq_info():
return {}
returncode, output = DataSource.cpufreq_info()
if returncode != 0:
return {}
hz_brand = output.split('current CPU frequency is')[1].split('\n')[0]
i = hz_brand.find('Hz')
assert(i != -1)
hz_brand = hz_brand[0 : i+2].strip().lower()
if hz_brand.endswith('mhz'):
scale = 6
elif hz_brand.endswith('ghz'):
scale = 9
hz_brand = hz_brand.rstrip('mhz').rstrip('ghz').strip()
hz_brand = _to_decimal_string(hz_brand)
info = {
'hz_advertised_friendly' : _hz_short_to_friendly(hz_brand, scale),
'hz_actual_friendly' : _hz_short_to_friendly(hz_brand, scale),
'hz_advertised' : _hz_short_to_full(hz_brand, scale),
'hz_actual' : _hz_short_to_full(hz_brand, scale),
}
info = {k: v for k, v in info.items() if v}
return info
except:
#raise # NOTE: To have this throw on error, uncomment this line
return {}
def _get_cpu_info_from_lscpu():
'''
Returns the CPU info gathered from lscpu.
Returns {} if lscpu is not found.
'''
try:
if not DataSource.has_lscpu():
return {}
returncode, output = DataSource.lscpu()
if returncode != 0:
return {}
info = {}
new_hz = _get_field(False, output, None, None, 'CPU max MHz', 'CPU MHz')
if new_hz:
new_hz = _to_decimal_string(new_hz)
scale = 6
info['hz_advertised_friendly'] = _hz_short_to_friendly(new_hz, scale)
info['hz_actual_friendly'] = _hz_short_to_friendly(new_hz, scale)
info['hz_advertised'] = _hz_short_to_full(new_hz, scale)
info['hz_actual'] = _hz_short_to_full(new_hz, scale)
vendor_id = _get_field(False, output, None, None, 'Vendor ID')
if vendor_id:
info['vendor_id_raw'] = vendor_id
brand = _get_field(False, output, None, None, 'Model name')
if brand:
info['brand_raw'] = brand
family = _get_field(False, output, None, None, 'CPU family')
if family and family.isdigit():
info['family'] = int(family)
stepping = _get_field(False, output, None, None, 'Stepping')
if stepping and stepping.isdigit():
info['stepping'] = int(stepping)
model = _get_field(False, output, None, None, 'Model')
if model and model.isdigit():
info['model'] = int(model)
l1_data_cache_size = _get_field(False, output, None, None, 'L1d cache')
if l1_data_cache_size:
info['l1_data_cache_size'] = _to_friendly_bytes(l1_data_cache_size)
l1_instruction_cache_size = _get_field(False, output, None, None, 'L1i cache')
if l1_instruction_cache_size:
info['l1_instruction_cache_size'] = _to_friendly_bytes(l1_instruction_cache_size)
l2_cache_size = _get_field(False, output, None, None, 'L2 cache')
if l2_cache_size:
info['l2_cache_size'] = _to_friendly_bytes(l2_cache_size)
l3_cache_size = _get_field(False, output, None, None, 'L3 cache')
if l3_cache_size:
info['l3_cache_size'] = _to_friendly_bytes(l3_cache_size)
# Flags
flags = _get_field(False, output, None, None, 'flags', 'Features')
if flags:
flags = flags.split()
flags.sort()
info['flags'] = flags
info = {k: v for k, v in info.items() if v}
return info
except:
#raise # NOTE: To have this throw on error, uncomment this line
return {}
def _get_cpu_info_from_dmesg():
'''
Returns the CPU info gathered from dmesg.
Returns {} if dmesg is not found or does not have the desired info.
'''
# Just return {} if there is no dmesg
if not DataSource.has_dmesg():
return {}
# If dmesg fails return {}
returncode, output = DataSource.dmesg_a()
if output == None or returncode != 0:
return {}
return _parse_dmesg_output(output)
# https://openpowerfoundation.org/wp-content/uploads/2016/05/LoPAPR_DRAFT_v11_24March2016_cmt1.pdf
# page 767
def _get_cpu_info_from_ibm_pa_features():
'''
Returns the CPU info gathered from lsprop /proc/device-tree/cpus/*/ibm,pa-features
Returns {} if lsprop is not found or ibm,pa-features does not have the desired info.
'''
try:
# Just return {} if there is no lsprop
if not DataSource.has_ibm_pa_features():
return {}
# If ibm,pa-features fails return {}
returncode, output = DataSource.ibm_pa_features()
if output == None or returncode != 0:
return {}
# Filter out invalid characters from output
value = output.split("ibm,pa-features")[1].lower()
value = [s for s in value if s in list('0123456789abcfed')]
value = ''.join(value)
# Get data converted to Uint32 chunks
left = int(value[0 : 8], 16)
right = int(value[8 : 16], 16)
# Get the CPU flags
flags = {
# Byte 0
'mmu' : _is_bit_set(left, 0),
'fpu' : _is_bit_set(left, 1),
'slb' : _is_bit_set(left, 2),
'run' : _is_bit_set(left, 3),
#'reserved' : _is_bit_set(left, 4),
'dabr' : _is_bit_set(left, 5),
'ne' : _is_bit_set(left, 6),
'wtr' : _is_bit_set(left, 7),
# Byte 1
'mcr' : _is_bit_set(left, 8),
'dsisr' : _is_bit_set(left, 9),
'lp' : _is_bit_set(left, 10),
'ri' : _is_bit_set(left, 11),
'dabrx' : _is_bit_set(left, 12),
'sprg3' : _is_bit_set(left, 13),
'rislb' : _is_bit_set(left, 14),
'pp' : _is_bit_set(left, 15),
# Byte 2
'vpm' : _is_bit_set(left, 16),
'dss_2.05' : _is_bit_set(left, 17),
#'reserved' : _is_bit_set(left, 18),
'dar' : _is_bit_set(left, 19),
#'reserved' : _is_bit_set(left, 20),
'ppr' : _is_bit_set(left, 21),
'dss_2.02' : _is_bit_set(left, 22),
'dss_2.06' : _is_bit_set(left, 23),
# Byte 3
'lsd_in_dscr' : _is_bit_set(left, 24),
'ugr_in_dscr' : _is_bit_set(left, 25),
#'reserved' : _is_bit_set(left, 26),
#'reserved' : _is_bit_set(left, 27),
#'reserved' : _is_bit_set(left, 28),
#'reserved' : _is_bit_set(left, 29),
#'reserved' : _is_bit_set(left, 30),
#'reserved' : _is_bit_set(left, 31),
# Byte 4
'sso_2.06' : _is_bit_set(right, 0),
#'reserved' : _is_bit_set(right, 1),
#'reserved' : _is_bit_set(right, 2),
#'reserved' : _is_bit_set(right, 3),
#'reserved' : _is_bit_set(right, 4),
#'reserved' : _is_bit_set(right, 5),
#'reserved' : _is_bit_set(right, 6),
#'reserved' : _is_bit_set(right, 7),
# Byte 5
'le' : _is_bit_set(right, 8),
'cfar' : _is_bit_set(right, 9),
'eb' : _is_bit_set(right, 10),
'lsq_2.07' : _is_bit_set(right, 11),
#'reserved' : _is_bit_set(right, 12),
#'reserved' : _is_bit_set(right, 13),
#'reserved' : _is_bit_set(right, 14),
#'reserved' : _is_bit_set(right, 15),
# Byte 6
'dss_2.07' : _is_bit_set(right, 16),
#'reserved' : _is_bit_set(right, 17),
#'reserved' : _is_bit_set(right, 18),
#'reserved' : _is_bit_set(right, 19),
#'reserved' : _is_bit_set(right, 20),
#'reserved' : _is_bit_set(right, 21),
#'reserved' : _is_bit_set(right, 22),
#'reserved' : _is_bit_set(right, 23),
# Byte 7
#'reserved' : _is_bit_set(right, 24),
#'reserved' : _is_bit_set(right, 25),
#'reserved' : _is_bit_set(right, 26),
#'reserved' : _is_bit_set(right, 27),
#'reserved' : _is_bit_set(right, 28),
#'reserved' : _is_bit_set(right, 29),
#'reserved' : _is_bit_set(right, 30),
#'reserved' : _is_bit_set(right, 31),
}
# Get a list of only the flags that are true
flags = [k for k, v in flags.items() if v]
flags.sort()
info = {
'flags' : flags
}
info = {k: v for k, v in info.items() if v}
return info
except:
return {}
def _get_cpu_info_from_cat_var_run_dmesg_boot():
'''
Returns the CPU info gathered from /var/run/dmesg.boot.
Returns {} if dmesg is not found or does not have the desired info.
'''
# Just return {} if there is no /var/run/dmesg.boot
if not DataSource.has_var_run_dmesg_boot():
return {}
# If dmesg.boot fails return {}
returncode, output = DataSource.cat_var_run_dmesg_boot()
if output == None or returncode != 0:
return {}
return _parse_dmesg_output(output)
def _get_cpu_info_from_sysctl():
'''
Returns the CPU info gathered from sysctl.
Returns {} if sysctl is not found.
'''
try:
# Just return {} if there is no sysctl
if not DataSource.has_sysctl():
return {}
# If sysctl fails return {}
returncode, output = DataSource.sysctl_machdep_cpu_hw_cpufrequency()
if output == None or returncode != 0:
return {}
# Various fields
vendor_id = _get_field(False, output, None, None, 'machdep.cpu.vendor')
processor_brand = _get_field(True, output, None, None, 'machdep.cpu.brand_string')
cache_size = _get_field(False, output, None, None, 'machdep.cpu.cache.size')
stepping = _get_field(False, output, int, 0, 'machdep.cpu.stepping')
model = _get_field(False, output, int, 0, 'machdep.cpu.model')
family = _get_field(False, output, int, 0, 'machdep.cpu.family')
# Flags
flags = _get_field(False, output, None, '', 'machdep.cpu.features').lower().split()
flags.extend(_get_field(False, output, None, '', 'machdep.cpu.leaf7_features').lower().split())
flags.extend(_get_field(False, output, None, '', 'machdep.cpu.extfeatures').lower().split())
flags.sort()
# Convert from GHz/MHz string to Hz
hz_advertised, scale = _parse_cpu_brand_string(processor_brand)
hz_actual = _get_field(False, output, None, None, 'hw.cpufrequency')
hz_actual = _to_decimal_string(hz_actual)
info = {
'vendor_id_raw' : vendor_id,
'brand_raw' : processor_brand,
'hz_advertised_friendly' : _hz_short_to_friendly(hz_advertised, scale),
'hz_actual_friendly' : _hz_short_to_friendly(hz_actual, 0),
'hz_advertised' : _hz_short_to_full(hz_advertised, scale),
'hz_actual' : _hz_short_to_full(hz_actual, 0),
'l2_cache_size' : _to_friendly_bytes(cache_size),
'stepping' : stepping,
'model' : model,
'family' : family,
'flags' : flags
}
info = {k: v for k, v in info.items() if v}
return info
except:
return {}
def _get_cpu_info_from_sysinfo():
'''
Returns the CPU info gathered from sysinfo.
Returns {} if sysinfo is not found.
'''
info = _get_cpu_info_from_sysinfo_v1()
info.update(_get_cpu_info_from_sysinfo_v2())
return info
def _get_cpu_info_from_sysinfo_v1():
'''
Returns the CPU info gathered from sysinfo.
Returns {} if sysinfo is not found.
'''
try:
# Just return {} if there is no sysinfo
if not DataSource.has_sysinfo():
return {}
# If sysinfo fails return {}
returncode, output = DataSource.sysinfo_cpu()
if output == None or returncode != 0:
return {}
# Various fields
vendor_id = '' #_get_field(False, output, None, None, 'CPU #0: ')
processor_brand = output.split('CPU #0: "')[1].split('"\n')[0].strip()
cache_size = '' #_get_field(False, output, None, None, 'machdep.cpu.cache.size')
stepping = int(output.split(', stepping ')[1].split(',')[0].strip())
model = int(output.split(', model ')[1].split(',')[0].strip())
family = int(output.split(', family ')[1].split(',')[0].strip())
# Flags
flags = []
for line in output.split('\n'):
if line.startswith('\t\t'):
for flag in line.strip().lower().split():
flags.append(flag)
flags.sort()
# Convert from GHz/MHz string to Hz
hz_advertised, scale = _parse_cpu_brand_string(processor_brand)
hz_actual = hz_advertised
info = {
'vendor_id_raw' : vendor_id,
'brand_raw' : processor_brand,
'hz_advertised_friendly' : _hz_short_to_friendly(hz_advertised, scale),
'hz_actual_friendly' : _hz_short_to_friendly(hz_actual, scale),
'hz_advertised' : _hz_short_to_full(hz_advertised, scale),
'hz_actual' : _hz_short_to_full(hz_actual, scale),
'l2_cache_size' : _to_friendly_bytes(cache_size),
'stepping' : stepping,
'model' : model,
'family' : family,
'flags' : flags
}
info = {k: v for k, v in info.items() if v}
return info
except:
#raise # NOTE: To have this throw on error, uncomment this line
return {}
def _get_cpu_info_from_sysinfo_v2():
'''
Returns the CPU info gathered from sysinfo.
Returns {} if sysinfo is not found.
'''
try:
# Just return {} if there is no sysinfo
if not DataSource.has_sysinfo():
return {}
# If sysinfo fails return {}
returncode, output = DataSource.sysinfo_cpu()
if output == None or returncode != 0:
return {}
# Various fields
vendor_id = '' #_get_field(False, output, None, None, 'CPU #0: ')
processor_brand = output.split('CPU #0: "')[1].split('"\n')[0].strip()
cache_size = '' #_get_field(False, output, None, None, 'machdep.cpu.cache.size')
signature = output.split('Signature:')[1].split('\n')[0].strip()
#
stepping = int(signature.split('stepping ')[1].split(',')[0].strip())
model = int(signature.split('model ')[1].split(',')[0].strip())
family = int(signature.split('family ')[1].split(',')[0].strip())
# Flags
def get_subsection_flags(output):
retval = []
for line in output.split('\n')[1:]:
if not line.startswith(' ') and not line.startswith(' '): break
for entry in line.strip().lower().split(' '):
retval.append(entry)
return retval
flags = get_subsection_flags(output.split('Features: ')[1]) + \
get_subsection_flags(output.split('Extended Features (0x00000001): ')[1]) + \
get_subsection_flags(output.split('Extended Features (0x80000001): ')[1])
flags.sort()
# Convert from GHz/MHz string to Hz
lines = [n for n in output.split('\n') if n]
raw_hz = lines[0].split('running at ')[1].strip().lower()
hz_advertised = raw_hz.rstrip('mhz').rstrip('ghz').strip()
hz_advertised = _to_decimal_string(hz_advertised)
hz_actual = hz_advertised
scale = 0
if raw_hz.endswith('mhz'):
scale = 6
elif raw_hz.endswith('ghz'):
scale = 9
info = {
'vendor_id_raw' : vendor_id,
'brand_raw' : processor_brand,
'hz_advertised_friendly' : _hz_short_to_friendly(hz_advertised, scale),
'hz_actual_friendly' : _hz_short_to_friendly(hz_actual, scale),
'hz_advertised' : _hz_short_to_full(hz_advertised, scale),
'hz_actual' : _hz_short_to_full(hz_actual, scale),
'l2_cache_size' : _to_friendly_bytes(cache_size),
'stepping' : stepping,
'model' : model,
'family' : family,
'flags' : flags
}
info = {k: v for k, v in info.items() if v}
return info
except:
#raise # NOTE: To have this throw on error, uncomment this line
return {}
def _get_cpu_info_from_wmic():
'''
Returns the CPU info gathered from WMI.
Returns {} if not on Windows, or wmic is not installed.
'''
try:
# Just return {} if not Windows or there is no wmic
if not DataSource.is_windows or not DataSource.has_wmic():
return {}
returncode, output = DataSource.wmic_cpu()
if output == None or returncode != 0:
return {}
# Break the list into key values pairs
value = output.split("\n")
value = [s.rstrip().split('=') for s in value if '=' in s]
value = {k: v for k, v in value if v}
# Get the advertised MHz
processor_brand = value.get('Name')
hz_advertised, scale_advertised = _parse_cpu_brand_string(processor_brand)
# Get the actual MHz
hz_actual = value.get('CurrentClockSpeed')
scale_actual = 6
if hz_actual:
hz_actual = _to_decimal_string(hz_actual)
# Get cache sizes
l2_cache_size = value.get('L2CacheSize')
if l2_cache_size:
l2_cache_size = l2_cache_size + ' KB'
l3_cache_size = value.get('L3CacheSize')
if l3_cache_size:
l3_cache_size = l3_cache_size + ' KB'
# Get family, model, and stepping
family, model, stepping = '', '', ''
description = value.get('Description') or value.get('Caption')
entries = description.split(' ')
if 'Family' in entries and entries.index('Family') < len(entries)-1:
i = entries.index('Family')
family = int(entries[i + 1])
if 'Model' in entries and entries.index('Model') < len(entries)-1:
i = entries.index('Model')
model = int(entries[i + 1])
if 'Stepping' in entries and entries.index('Stepping') < len(entries)-1:
i = entries.index('Stepping')
stepping = int(entries[i + 1])
info = {
'vendor_id_raw' : value.get('Manufacturer'),
'brand_raw' : processor_brand,
'hz_advertised_friendly' : _hz_short_to_friendly(hz_advertised, scale_advertised),
'hz_actual_friendly' : _hz_short_to_friendly(hz_actual, scale_actual),
'hz_advertised' : _hz_short_to_full(hz_advertised, scale_advertised),
'hz_actual' : _hz_short_to_full(hz_actual, scale_actual),
'l2_cache_size' : l2_cache_size,
'l3_cache_size' : l3_cache_size,
'stepping' : stepping,
'model' : model,
'family' : family,
}
info = {k: v for k, v in info.items() if v}
return info
except:
#raise # NOTE: To have this throw on error, uncomment this line
return {}
def _get_cpu_info_from_registry():
'''
FIXME: Is missing many of the newer CPU flags like sse3
Returns the CPU info gathered from the Windows Registry.
Returns {} if not on Windows.
'''
try:
# Just return {} if not on Windows
if not DataSource.is_windows:
return {}
# Get the CPU name
processor_brand = DataSource.winreg_processor_brand().strip()
# Get the CPU vendor id
vendor_id = DataSource.winreg_vendor_id_raw()
# Get the CPU arch and bits
arch_string_raw = DataSource.winreg_arch_string_raw()
arch, bits = _parse_arch(arch_string_raw)
# Get the actual CPU Hz
hz_actual = DataSource.winreg_hz_actual()
hz_actual = _to_decimal_string(hz_actual)
# Get the advertised CPU Hz
hz_advertised, scale = _parse_cpu_brand_string(processor_brand)
# If advertised hz not found, use the actual hz
if hz_advertised == '0.0':
scale = 6
hz_advertised = _to_decimal_string(hz_actual)
# Get the CPU features
feature_bits = DataSource.winreg_feature_bits()
def is_set(bit):
mask = 0x80000000 >> bit
retval = mask & feature_bits > 0
return retval
# http://en.wikipedia.org/wiki/CPUID
# http://unix.stackexchange.com/questions/43539/what-do-the-flags-in-proc-cpuinfo-mean
# http://www.lohninger.com/helpcsuite/public_constants_cpuid.htm
flags = {
'fpu' : is_set(0), # Floating Point Unit
'vme' : is_set(1), # V86 Mode Extensions
'de' : is_set(2), # Debug Extensions - I/O breakpoints supported
'pse' : is_set(3), # Page Size Extensions (4 MB pages supported)
'tsc' : is_set(4), # Time Stamp Counter and RDTSC instruction are available
'msr' : is_set(5), # Model Specific Registers
'pae' : is_set(6), # Physical Address Extensions (36 bit address, 2MB pages)
'mce' : is_set(7), # Machine Check Exception supported
'cx8' : is_set(8), # Compare Exchange Eight Byte instruction available
'apic' : is_set(9), # Local APIC present (multiprocessor operation support)
'sepamd' : is_set(10), # Fast system calls (AMD only)
'sep' : is_set(11), # Fast system calls
'mtrr' : is_set(12), # Memory Type Range Registers
'pge' : is_set(13), # Page Global Enable
'mca' : is_set(14), # Machine Check Architecture
'cmov' : is_set(15), # Conditional MOVe instructions
'pat' : is_set(16), # Page Attribute Table
'pse36' : is_set(17), # 36 bit Page Size Extensions
'serial' : is_set(18), # Processor Serial Number
'clflush' : is_set(19), # Cache Flush
#'reserved1' : is_set(20), # reserved
'dts' : is_set(21), # Debug Trace Store
'acpi' : is_set(22), # ACPI support
'mmx' : is_set(23), # MultiMedia Extensions
'fxsr' : is_set(24), # FXSAVE and FXRSTOR instructions
'sse' : is_set(25), # SSE instructions
'sse2' : is_set(26), # SSE2 (WNI) instructions
'ss' : is_set(27), # self snoop
#'reserved2' : is_set(28), # reserved
'tm' : is_set(29), # Automatic clock control
'ia64' : is_set(30), # IA64 instructions
'3dnow' : is_set(31) # 3DNow! instructions available
}
# Get a list of only the flags that are true
flags = [k for k, v in flags.items() if v]
flags.sort()
info = {
'vendor_id_raw' : vendor_id,
'brand_raw' : processor_brand,
'hz_advertised_friendly' : _hz_short_to_friendly(hz_advertised, scale),
'hz_actual_friendly' : _hz_short_to_friendly(hz_actual, 6),
'hz_advertised' : _hz_short_to_full(hz_advertised, scale),
'hz_actual' : _hz_short_to_full(hz_actual, 6),
'flags' : flags
}
info = {k: v for k, v in info.items() if v}
return info
except:
return {}
def _get_cpu_info_from_kstat():
'''
Returns the CPU info gathered from isainfo and kstat.
Returns {} if isainfo or kstat are not found.
'''
try:
# Just return {} if there is no isainfo or kstat
if not DataSource.has_isainfo() or not DataSource.has_kstat():
return {}
# If isainfo fails return {}
returncode, flag_output = DataSource.isainfo_vb()
if flag_output == None or returncode != 0:
return {}
# If kstat fails return {}
returncode, kstat = DataSource.kstat_m_cpu_info()
if kstat == None or returncode != 0:
return {}
# Various fields
vendor_id = kstat.split('\tvendor_id ')[1].split('\n')[0].strip()
processor_brand = kstat.split('\tbrand ')[1].split('\n')[0].strip()
stepping = int(kstat.split('\tstepping ')[1].split('\n')[0].strip())
model = int(kstat.split('\tmodel ')[1].split('\n')[0].strip())
family = int(kstat.split('\tfamily ')[1].split('\n')[0].strip())
# Flags
flags = flag_output.strip().split('\n')[-1].strip().lower().split()
flags.sort()
# Convert from GHz/MHz string to Hz
scale = 6
hz_advertised = kstat.split('\tclock_MHz ')[1].split('\n')[0].strip()
hz_advertised = _to_decimal_string(hz_advertised)
# Convert from GHz/MHz string to Hz
hz_actual = kstat.split('\tcurrent_clock_Hz ')[1].split('\n')[0].strip()
hz_actual = _to_decimal_string(hz_actual)
info = {
'vendor_id_raw' : vendor_id,
'brand_raw' : processor_brand,
'hz_advertised_friendly' : _hz_short_to_friendly(hz_advertised, scale),
'hz_actual_friendly' : _hz_short_to_friendly(hz_actual, 0),
'hz_advertised' : _hz_short_to_full(hz_advertised, scale),
'hz_actual' : _hz_short_to_full(hz_actual, 0),
'stepping' : stepping,
'model' : model,
'family' : family,
'flags' : flags
}
info = {k: v for k, v in info.items() if v}
return info
except:
return {}
def _get_cpu_info_from_platform_uname():
try:
uname = DataSource.uname_string_raw.split(',')[0]
family, model, stepping = (None, None, None)
entries = uname.split(' ')
if 'Family' in entries and entries.index('Family') < len(entries)-1:
i = entries.index('Family')
family = int(entries[i + 1])
if 'Model' in entries and entries.index('Model') < len(entries)-1:
i = entries.index('Model')
model = int(entries[i + 1])
if 'Stepping' in entries and entries.index('Stepping') < len(entries)-1:
i = entries.index('Stepping')
stepping = int(entries[i + 1])
info = {
'family' : family,
'model' : model,
'stepping' : stepping
}
info = {k: v for k, v in info.items() if v}
return info
except:
return {}
def _get_cpu_info_internal():
'''
Returns the CPU info by using the best sources of information for your OS.
Returns {} if nothing is found.
'''
# Get the CPU arch and bits
arch, bits = _parse_arch(DataSource.arch_string_raw)
friendly_maxsize = { 2**31-1: '32 bit', 2**63-1: '64 bit' }.get(sys.maxsize) or 'unknown bits'
friendly_version = "{0}.{1}.{2}.{3}.{4}".format(*sys.version_info)
PYTHON_VERSION = "{0} ({1})".format(friendly_version, friendly_maxsize)
info = {
'python_version' : PYTHON_VERSION,
'cpuinfo_version' : CPUINFO_VERSION,
'cpuinfo_version_string' : CPUINFO_VERSION_STRING,
'arch' : arch,
'bits' : bits,
'count' : DataSource.cpu_count,
'arch_string_raw' : DataSource.arch_string_raw,
}
# Try the Windows wmic
_copy_new_fields(info, _get_cpu_info_from_wmic())
# Try the Windows registry
_copy_new_fields(info, _get_cpu_info_from_registry())
# Try /proc/cpuinfo
_copy_new_fields(info, _get_cpu_info_from_proc_cpuinfo())
# Try cpufreq-info
_copy_new_fields(info, _get_cpu_info_from_cpufreq_info())
# Try LSCPU
_copy_new_fields(info, _get_cpu_info_from_lscpu())
# Try sysctl
_copy_new_fields(info, _get_cpu_info_from_sysctl())
# Try kstat
_copy_new_fields(info, _get_cpu_info_from_kstat())
# Try dmesg
_copy_new_fields(info, _get_cpu_info_from_dmesg())
# Try /var/run/dmesg.boot
_copy_new_fields(info, _get_cpu_info_from_cat_var_run_dmesg_boot())
# Try lsprop ibm,pa-features
_copy_new_fields(info, _get_cpu_info_from_ibm_pa_features())
# Try sysinfo
_copy_new_fields(info, _get_cpu_info_from_sysinfo())
# Try querying the CPU cpuid register
_copy_new_fields(info, _get_cpu_info_from_cpuid())
# Try platform.uname
_copy_new_fields(info, _get_cpu_info_from_platform_uname())
return info
def get_cpu_info_json():
'''
Returns the CPU info by using the best sources of information for your OS.
Returns the result in a json string
'''
import json
output = None
# If running under pyinstaller, run normally
if getattr(sys, 'frozen', False):
info = _get_cpu_info_internal()
output = json.dumps(info)
output = "{0}".format(output)
# if not running under pyinstaller, run in another process.
# This is done because multiprocesing has a design flaw that
# causes non main programs to run multiple times on Windows.
else:
from subprocess import Popen, PIPE
command = [sys.executable, __file__, '--json']
p1 = Popen(command, stdout=PIPE, stderr=PIPE, stdin=PIPE)
output = p1.communicate()[0]
if p1.returncode != 0:
return "{}"
if not IS_PY2:
output = output.decode(encoding='UTF-8')
return output
def get_cpu_info():
'''
Returns the CPU info by using the best sources of information for your OS.
Returns the result in a dict
'''
import json
output = get_cpu_info_json()
# Convert JSON to Python with non unicode strings
output = json.loads(output, object_hook = _utf_to_str)
return output
def main():
from argparse import ArgumentParser
import json
# Parse args
parser = ArgumentParser(description='Gets CPU info with pure Python 2 & 3')
parser.add_argument('--json', action='store_true', help='Return the info in JSON format')
parser.add_argument('--version', action='store_true', help='Return the version of py-cpuinfo')
args = parser.parse_args()
try:
_check_arch()
except Exception as err:
sys.stderr.write(str(err) + "\n")
sys.exit(1)
info = _get_cpu_info_internal()
if not info:
sys.stderr.write("Failed to find cpu info\n")
sys.exit(1)
if args.json:
print(json.dumps(info))
elif args.version:
print(CPUINFO_VERSION_STRING)
else:
print('Python Version: {0}'.format(info.get('python_version', '')))
print('Cpuinfo Version: {0}'.format(info.get('cpuinfo_version_string', '')))
print('Vendor ID Raw: {0}'.format(info.get('vendor_id_raw', '')))
print('Hardware Raw: {0}'.format(info.get('hardware_raw', '')))
print('Brand Raw: {0}'.format(info.get('brand_raw', '')))
print('Hz Advertised Friendly: {0}'.format(info.get('hz_advertised_friendly', '')))
print('Hz Actual Friendly: {0}'.format(info.get('hz_actual_friendly', '')))
print('Hz Advertised: {0}'.format(info.get('hz_advertised', '')))
print('Hz Actual: {0}'.format(info.get('hz_actual', '')))
print('Arch: {0}'.format(info.get('arch', '')))
print('Bits: {0}'.format(info.get('bits', '')))
print('Count: {0}'.format(info.get('count', '')))
print('Arch String Raw: {0}'.format(info.get('arch_string_raw', '')))
print('L1 Data Cache Size: {0}'.format(info.get('l1_data_cache_size', '')))
print('L1 Instruction Cache Size: {0}'.format(info.get('l1_instruction_cache_size', '')))
print('L2 Cache Size: {0}'.format(info.get('l2_cache_size', '')))
print('L2 Cache Line Size: {0}'.format(info.get('l2_cache_line_size', '')))
print('L2 Cache Associativity: {0}'.format(info.get('l2_cache_associativity', '')))
print('L3 Cache Size: {0}'.format(info.get('l3_cache_size', '')))
print('Stepping: {0}'.format(info.get('stepping', '')))
print('Model: {0}'.format(info.get('model', '')))
print('Family: {0}'.format(info.get('family', '')))
print('Processor Type: {0}'.format(info.get('processor_type', '')))
print('Extended Model: {0}'.format(info.get('extended_model', '')))
print('Extended Family: {0}'.format(info.get('extended_family', '')))
print('Flags: {0}'.format(', '.join(info.get('flags', ''))))
if __name__ == '__main__':
main()
else:
_check_arch()
|
workhorsy/py-cpuinfo
|
cpuinfo/cpuinfo.py
|
_get_cpu_info_from_cpufreq_info
|
python
|
def _get_cpu_info_from_cpufreq_info():
'''
Returns the CPU info gathered from cpufreq-info.
Returns {} if cpufreq-info is not found.
'''
try:
hz_brand, scale = '0.0', 0
if not DataSource.has_cpufreq_info():
return {}
returncode, output = DataSource.cpufreq_info()
if returncode != 0:
return {}
hz_brand = output.split('current CPU frequency is')[1].split('\n')[0]
i = hz_brand.find('Hz')
assert(i != -1)
hz_brand = hz_brand[0 : i+2].strip().lower()
if hz_brand.endswith('mhz'):
scale = 6
elif hz_brand.endswith('ghz'):
scale = 9
hz_brand = hz_brand.rstrip('mhz').rstrip('ghz').strip()
hz_brand = _to_decimal_string(hz_brand)
info = {
'hz_advertised_friendly' : _hz_short_to_friendly(hz_brand, scale),
'hz_actual_friendly' : _hz_short_to_friendly(hz_brand, scale),
'hz_advertised' : _hz_short_to_full(hz_brand, scale),
'hz_actual' : _hz_short_to_full(hz_brand, scale),
}
info = {k: v for k, v in info.items() if v}
return info
except:
#raise # NOTE: To have this throw on error, uncomment this line
return {}
|
Returns the CPU info gathered from cpufreq-info.
Returns {} if cpufreq-info is not found.
|
train
|
https://github.com/workhorsy/py-cpuinfo/blob/c15afb770c1139bf76215852e17eb4f677ca3d2f/cpuinfo/cpuinfo.py#L1474-L1512
|
[
"def has_cpufreq_info():\n\treturn len(_program_paths('cpufreq-info')) > 0\n"
] |
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
# Copyright (c) 2014-2019, Matthew Brennan Jones <matthew.brennan.jones@gmail.com>
# Py-cpuinfo gets CPU info with pure Python 2 & 3
# It uses the MIT License
# It is hosted at: https://github.com/workhorsy/py-cpuinfo
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
CPUINFO_VERSION = (5, 0, 0)
CPUINFO_VERSION_STRING = '.'.join([str(n) for n in CPUINFO_VERSION])
import os, sys
import platform
import multiprocessing
import ctypes
try:
import _winreg as winreg
except ImportError as err:
try:
import winreg
except ImportError as err:
pass
IS_PY2 = sys.version_info[0] == 2
class DataSource(object):
bits = platform.architecture()[0]
cpu_count = multiprocessing.cpu_count()
is_windows = platform.system().lower() == 'windows'
arch_string_raw = platform.machine()
uname_string_raw = platform.uname()[5]
can_cpuid = True
@staticmethod
def has_proc_cpuinfo():
return os.path.exists('/proc/cpuinfo')
@staticmethod
def has_dmesg():
return len(_program_paths('dmesg')) > 0
@staticmethod
def has_var_run_dmesg_boot():
uname = platform.system().strip().strip('"').strip("'").strip().lower()
return 'linux' in uname and os.path.exists('/var/run/dmesg.boot')
@staticmethod
def has_cpufreq_info():
return len(_program_paths('cpufreq-info')) > 0
@staticmethod
def has_sestatus():
return len(_program_paths('sestatus')) > 0
@staticmethod
def has_sysctl():
return len(_program_paths('sysctl')) > 0
@staticmethod
def has_isainfo():
return len(_program_paths('isainfo')) > 0
@staticmethod
def has_kstat():
return len(_program_paths('kstat')) > 0
@staticmethod
def has_sysinfo():
return len(_program_paths('sysinfo')) > 0
@staticmethod
def has_lscpu():
return len(_program_paths('lscpu')) > 0
@staticmethod
def has_ibm_pa_features():
return len(_program_paths('lsprop')) > 0
@staticmethod
def has_wmic():
returncode, output = _run_and_get_stdout(['wmic', 'os', 'get', 'Version'])
return returncode == 0 and len(output) > 0
@staticmethod
def cat_proc_cpuinfo():
return _run_and_get_stdout(['cat', '/proc/cpuinfo'])
@staticmethod
def cpufreq_info():
return _run_and_get_stdout(['cpufreq-info'])
@staticmethod
def sestatus_b():
return _run_and_get_stdout(['sestatus', '-b'])
@staticmethod
def dmesg_a():
return _run_and_get_stdout(['dmesg', '-a'])
@staticmethod
def cat_var_run_dmesg_boot():
return _run_and_get_stdout(['cat', '/var/run/dmesg.boot'])
@staticmethod
def sysctl_machdep_cpu_hw_cpufrequency():
return _run_and_get_stdout(['sysctl', 'machdep.cpu', 'hw.cpufrequency'])
@staticmethod
def isainfo_vb():
return _run_and_get_stdout(['isainfo', '-vb'])
@staticmethod
def kstat_m_cpu_info():
return _run_and_get_stdout(['kstat', '-m', 'cpu_info'])
@staticmethod
def sysinfo_cpu():
return _run_and_get_stdout(['sysinfo', '-cpu'])
@staticmethod
def lscpu():
return _run_and_get_stdout(['lscpu'])
@staticmethod
def ibm_pa_features():
import glob
ibm_features = glob.glob('/proc/device-tree/cpus/*/ibm,pa-features')
if ibm_features:
return _run_and_get_stdout(['lsprop', ibm_features[0]])
@staticmethod
def wmic_cpu():
return _run_and_get_stdout(['wmic', 'cpu', 'get', 'Name,CurrentClockSpeed,L2CacheSize,L3CacheSize,Description,Caption,Manufacturer', '/format:list'])
@staticmethod
def winreg_processor_brand():
key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, r"Hardware\Description\System\CentralProcessor\0")
processor_brand = winreg.QueryValueEx(key, "ProcessorNameString")[0]
winreg.CloseKey(key)
return processor_brand.strip()
@staticmethod
def winreg_vendor_id_raw():
key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, r"Hardware\Description\System\CentralProcessor\0")
vendor_id_raw = winreg.QueryValueEx(key, "VendorIdentifier")[0]
winreg.CloseKey(key)
return vendor_id_raw
@staticmethod
def winreg_arch_string_raw():
key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, r"SYSTEM\CurrentControlSet\Control\Session Manager\Environment")
arch_string_raw = winreg.QueryValueEx(key, "PROCESSOR_ARCHITECTURE")[0]
winreg.CloseKey(key)
return arch_string_raw
@staticmethod
def winreg_hz_actual():
key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, r"Hardware\Description\System\CentralProcessor\0")
hz_actual = winreg.QueryValueEx(key, "~Mhz")[0]
winreg.CloseKey(key)
hz_actual = _to_decimal_string(hz_actual)
return hz_actual
@staticmethod
def winreg_feature_bits():
key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, r"Hardware\Description\System\CentralProcessor\0")
feature_bits = winreg.QueryValueEx(key, "FeatureSet")[0]
winreg.CloseKey(key)
return feature_bits
def _program_paths(program_name):
paths = []
exts = filter(None, os.environ.get('PATHEXT', '').split(os.pathsep))
path = os.environ['PATH']
for p in os.environ['PATH'].split(os.pathsep):
p = os.path.join(p, program_name)
if os.access(p, os.X_OK):
paths.append(p)
for e in exts:
pext = p + e
if os.access(pext, os.X_OK):
paths.append(pext)
return paths
def _run_and_get_stdout(command, pipe_command=None):
from subprocess import Popen, PIPE
if not pipe_command:
p1 = Popen(command, stdout=PIPE, stderr=PIPE, stdin=PIPE)
output = p1.communicate()[0]
if not IS_PY2:
output = output.decode(encoding='UTF-8')
return p1.returncode, output
else:
p1 = Popen(command, stdout=PIPE, stderr=PIPE, stdin=PIPE)
p2 = Popen(pipe_command, stdin=p1.stdout, stdout=PIPE, stderr=PIPE)
p1.stdout.close()
output = p2.communicate()[0]
if not IS_PY2:
output = output.decode(encoding='UTF-8')
return p2.returncode, output
# Make sure we are running on a supported system
def _check_arch():
arch, bits = _parse_arch(DataSource.arch_string_raw)
if not arch in ['X86_32', 'X86_64', 'ARM_7', 'ARM_8', 'PPC_64']:
raise Exception("py-cpuinfo currently only works on X86 and some PPC and ARM CPUs.")
def _obj_to_b64(thing):
import pickle
import base64
a = thing
b = pickle.dumps(a)
c = base64.b64encode(b)
d = c.decode('utf8')
return d
def _b64_to_obj(thing):
import pickle
import base64
try:
a = base64.b64decode(thing)
b = pickle.loads(a)
return b
except:
return {}
def _utf_to_str(input):
if IS_PY2 and isinstance(input, unicode):
return input.encode('utf-8')
elif isinstance(input, list):
return [_utf_to_str(element) for element in input]
elif isinstance(input, dict):
return {_utf_to_str(key): _utf_to_str(value)
for key, value in input.items()}
else:
return input
def _copy_new_fields(info, new_info):
keys = [
'vendor_id_raw', 'hardware_raw', 'brand_raw', 'hz_advertised_friendly', 'hz_actual_friendly',
'hz_advertised', 'hz_actual', 'arch', 'bits', 'count',
'arch_string_raw', 'uname_string_raw',
'l2_cache_size', 'l2_cache_line_size', 'l2_cache_associativity',
'stepping', 'model', 'family',
'processor_type', 'extended_model', 'extended_family', 'flags',
'l3_cache_size', 'l1_data_cache_size', 'l1_instruction_cache_size'
]
for key in keys:
if new_info.get(key, None) and not info.get(key, None):
info[key] = new_info[key]
elif key == 'flags' and new_info.get('flags'):
for f in new_info['flags']:
if f not in info['flags']: info['flags'].append(f)
info['flags'].sort()
def _get_field_actual(cant_be_number, raw_string, field_names):
for line in raw_string.splitlines():
for field_name in field_names:
field_name = field_name.lower()
if ':' in line:
left, right = line.split(':', 1)
left = left.strip().lower()
right = right.strip()
if left == field_name and len(right) > 0:
if cant_be_number:
if not right.isdigit():
return right
else:
return right
return None
def _get_field(cant_be_number, raw_string, convert_to, default_value, *field_names):
retval = _get_field_actual(cant_be_number, raw_string, field_names)
# Convert the return value
if retval and convert_to:
try:
retval = convert_to(retval)
except:
retval = default_value
# Return the default if there is no return value
if retval is None:
retval = default_value
return retval
def _to_decimal_string(ticks):
try:
# Convert to string
ticks = '{0}'.format(ticks)
# Strip off non numbers and decimal places
ticks = "".join(n for n in ticks if n.isdigit() or n=='.').strip()
if ticks == '':
ticks = '0'
# Add decimal if missing
if '.' not in ticks:
ticks = '{0}.0'.format(ticks)
# Remove trailing zeros
ticks = ticks.rstrip('0')
# Add one trailing zero for empty right side
if ticks.endswith('.'):
ticks = '{0}0'.format(ticks)
# Make sure the number can be converted to a float
ticks = float(ticks)
ticks = '{0}'.format(ticks)
return ticks
except:
return '0.0'
def _hz_short_to_full(ticks, scale):
try:
# Make sure the number can be converted to a float
ticks = float(ticks)
ticks = '{0}'.format(ticks)
# Scale the numbers
hz = ticks.lstrip('0')
old_index = hz.index('.')
hz = hz.replace('.', '')
hz = hz.ljust(scale + old_index+1, '0')
new_index = old_index + scale
hz = '{0}.{1}'.format(hz[:new_index], hz[new_index:])
left, right = hz.split('.')
left, right = int(left), int(right)
return (left, right)
except:
return (0, 0)
def _hz_friendly_to_full(hz_string):
try:
hz_string = hz_string.strip().lower()
hz, scale = (None, None)
if hz_string.endswith('ghz'):
scale = 9
elif hz_string.endswith('mhz'):
scale = 6
elif hz_string.endswith('hz'):
scale = 0
hz = "".join(n for n in hz_string if n.isdigit() or n=='.').strip()
if not '.' in hz:
hz += '.0'
hz, scale = _hz_short_to_full(hz, scale)
return (hz, scale)
except:
return (0, 0)
def _hz_short_to_friendly(ticks, scale):
try:
# Get the raw Hz as a string
left, right = _hz_short_to_full(ticks, scale)
result = '{0}.{1}'.format(left, right)
# Get the location of the dot, and remove said dot
dot_index = result.index('.')
result = result.replace('.', '')
# Get the Hz symbol and scale
symbol = "Hz"
scale = 0
if dot_index > 9:
symbol = "GHz"
scale = 9
elif dot_index > 6:
symbol = "MHz"
scale = 6
elif dot_index > 3:
symbol = "KHz"
scale = 3
# Get the Hz with the dot at the new scaled point
result = '{0}.{1}'.format(result[:-scale-1], result[-scale-1:])
# Format the ticks to have 4 numbers after the decimal
# and remove any superfluous zeroes.
result = '{0:.4f} {1}'.format(float(result), symbol)
result = result.rstrip('0')
return result
except:
return '0.0000 Hz'
def _to_friendly_bytes(input):
import re
if not input:
return input
input = "{0}".format(input)
formats = {
r"^[0-9]+B$" : 'B',
r"^[0-9]+K$" : 'KB',
r"^[0-9]+M$" : 'MB',
r"^[0-9]+G$" : 'GB'
}
for pattern, friendly_size in formats.items():
if re.match(pattern, input):
return "{0} {1}".format(input[ : -1].strip(), friendly_size)
return input
def _parse_cpu_brand_string(cpu_string):
# Just return 0 if the processor brand does not have the Hz
if not 'hz' in cpu_string.lower():
return ('0.0', 0)
hz = cpu_string.lower()
scale = 0
if hz.endswith('mhz'):
scale = 6
elif hz.endswith('ghz'):
scale = 9
if '@' in hz:
hz = hz.split('@')[1]
else:
hz = hz.rsplit(None, 1)[1]
hz = hz.rstrip('mhz').rstrip('ghz').strip()
hz = _to_decimal_string(hz)
return (hz, scale)
def _parse_cpu_brand_string_dx(cpu_string):
import re
# Find all the strings inside brackets ()
starts = [m.start() for m in re.finditer('\(', cpu_string)]
ends = [m.start() for m in re.finditer('\)', cpu_string)]
insides = {k: v for k, v in zip(starts, ends)}
insides = [cpu_string[start+1 : end] for start, end in insides.items()]
# Find all the fields
vendor_id, stepping, model, family = (None, None, None, None)
for inside in insides:
for pair in inside.split(','):
pair = [n.strip() for n in pair.split(':')]
if len(pair) > 1:
name, value = pair[0], pair[1]
if name == 'origin':
vendor_id = value.strip('"')
elif name == 'stepping':
stepping = int(value.lstrip('0x'), 16)
elif name == 'model':
model = int(value.lstrip('0x'), 16)
elif name in ['fam', 'family']:
family = int(value.lstrip('0x'), 16)
# Find the Processor Brand
# Strip off extra strings in brackets at end
brand = cpu_string.strip()
is_working = True
while is_working:
is_working = False
for inside in insides:
full = "({0})".format(inside)
if brand.endswith(full):
brand = brand[ :-len(full)].strip()
is_working = True
# Find the Hz in the brand string
hz_brand, scale = _parse_cpu_brand_string(brand)
# Find Hz inside brackets () after the brand string
if hz_brand == '0.0':
for inside in insides:
hz = inside
for entry in ['GHz', 'MHz', 'Hz']:
if entry in hz:
hz = "CPU @ " + hz[ : hz.find(entry) + len(entry)]
hz_brand, scale = _parse_cpu_brand_string(hz)
break
return (hz_brand, scale, brand, vendor_id, stepping, model, family)
def _parse_dmesg_output(output):
try:
# Get all the dmesg lines that might contain a CPU string
lines = output.split(' CPU0:')[1:] + \
output.split(' CPU1:')[1:] + \
output.split(' CPU:')[1:] + \
output.split('\nCPU0:')[1:] + \
output.split('\nCPU1:')[1:] + \
output.split('\nCPU:')[1:]
lines = [l.split('\n')[0].strip() for l in lines]
# Convert the lines to CPU strings
cpu_strings = [_parse_cpu_brand_string_dx(l) for l in lines]
# Find the CPU string that has the most fields
best_string = None
highest_count = 0
for cpu_string in cpu_strings:
count = sum([n is not None for n in cpu_string])
if count > highest_count:
highest_count = count
best_string = cpu_string
# If no CPU string was found, return {}
if not best_string:
return {}
hz_actual, scale, processor_brand, vendor_id, stepping, model, family = best_string
# Origin
if ' Origin=' in output:
fields = output[output.find(' Origin=') : ].split('\n')[0]
fields = fields.strip().split()
fields = [n.strip().split('=') for n in fields]
fields = [{n[0].strip().lower() : n[1].strip()} for n in fields]
for field in fields:
name = list(field.keys())[0]
value = list(field.values())[0]
if name == 'origin':
vendor_id = value.strip('"')
elif name == 'stepping':
stepping = int(value.lstrip('0x'), 16)
elif name == 'model':
model = int(value.lstrip('0x'), 16)
elif name in ['fam', 'family']:
family = int(value.lstrip('0x'), 16)
# Features
flag_lines = []
for category in [' Features=', ' Features2=', ' AMD Features=', ' AMD Features2=']:
if category in output:
flag_lines.append(output.split(category)[1].split('\n')[0])
flags = []
for line in flag_lines:
line = line.split('<')[1].split('>')[0].lower()
for flag in line.split(','):
flags.append(flag)
flags.sort()
# Convert from GHz/MHz string to Hz
hz_advertised, scale = _parse_cpu_brand_string(processor_brand)
# If advertised hz not found, use the actual hz
if hz_advertised == '0.0':
scale = 6
hz_advertised = _to_decimal_string(hz_actual)
info = {
'vendor_id_raw' : vendor_id,
'brand_raw' : processor_brand,
'stepping' : stepping,
'model' : model,
'family' : family,
'flags' : flags
}
if hz_advertised and hz_advertised != '0.0':
info['hz_advertised_friendly'] = _hz_short_to_friendly(hz_advertised, scale)
info['hz_actual_friendly'] = _hz_short_to_friendly(hz_actual, scale)
if hz_advertised and hz_advertised != '0.0':
info['hz_advertised'] = _hz_short_to_full(hz_advertised, scale)
info['hz_actual'] = _hz_short_to_full(hz_actual, scale)
return {k: v for k, v in info.items() if v}
except:
#raise
pass
return {}
def _parse_arch(arch_string_raw):
import re
arch, bits = None, None
arch_string_raw = arch_string_raw.lower()
# X86
if re.match('^i\d86$|^x86$|^x86_32$|^i86pc$|^ia32$|^ia-32$|^bepc$', arch_string_raw):
arch = 'X86_32'
bits = 32
elif re.match('^x64$|^x86_64$|^x86_64t$|^i686-64$|^amd64$|^ia64$|^ia-64$', arch_string_raw):
arch = 'X86_64'
bits = 64
# ARM
elif re.match('^armv8-a|aarch64$', arch_string_raw):
arch = 'ARM_8'
bits = 64
elif re.match('^armv7$|^armv7[a-z]$|^armv7-[a-z]$|^armv6[a-z]$', arch_string_raw):
arch = 'ARM_7'
bits = 32
elif re.match('^armv8$|^armv8[a-z]$|^armv8-[a-z]$', arch_string_raw):
arch = 'ARM_8'
bits = 32
# PPC
elif re.match('^ppc32$|^prep$|^pmac$|^powermac$', arch_string_raw):
arch = 'PPC_32'
bits = 32
elif re.match('^powerpc$|^ppc64$|^ppc64le$', arch_string_raw):
arch = 'PPC_64'
bits = 64
# SPARC
elif re.match('^sparc32$|^sparc$', arch_string_raw):
arch = 'SPARC_32'
bits = 32
elif re.match('^sparc64$|^sun4u$|^sun4v$', arch_string_raw):
arch = 'SPARC_64'
bits = 64
return (arch, bits)
def _is_bit_set(reg, bit):
mask = 1 << bit
is_set = reg & mask > 0
return is_set
def _is_selinux_enforcing():
# Just return if the SE Linux Status Tool is not installed
if not DataSource.has_sestatus():
return False
# Run the sestatus, and just return if it failed to run
returncode, output = DataSource.sestatus_b()
if returncode != 0:
return False
# Figure out if explicitly in enforcing mode
for line in output.splitlines():
line = line.strip().lower()
if line.startswith("current mode:"):
if line.endswith("enforcing"):
return True
else:
return False
# Figure out if we can execute heap and execute memory
can_selinux_exec_heap = False
can_selinux_exec_memory = False
for line in output.splitlines():
line = line.strip().lower()
if line.startswith("allow_execheap") and line.endswith("on"):
can_selinux_exec_heap = True
elif line.startswith("allow_execmem") and line.endswith("on"):
can_selinux_exec_memory = True
return (not can_selinux_exec_heap or not can_selinux_exec_memory)
class CPUID(object):
def __init__(self):
self.prochandle = None
# Figure out if SE Linux is on and in enforcing mode
self.is_selinux_enforcing = _is_selinux_enforcing()
def _asm_func(self, restype=None, argtypes=(), byte_code=[]):
byte_code = bytes.join(b'', byte_code)
address = None
if DataSource.is_windows:
# Allocate a memory segment the size of the byte code, and make it executable
size = len(byte_code)
# Alloc at least 1 page to ensure we own all pages that we want to change protection on
if size < 0x1000: size = 0x1000
MEM_COMMIT = ctypes.c_ulong(0x1000)
PAGE_READWRITE = ctypes.c_ulong(0x4)
pfnVirtualAlloc = ctypes.windll.kernel32.VirtualAlloc
pfnVirtualAlloc.restype = ctypes.c_void_p
address = pfnVirtualAlloc(None, ctypes.c_size_t(size), MEM_COMMIT, PAGE_READWRITE)
if not address:
raise Exception("Failed to VirtualAlloc")
# Copy the byte code into the memory segment
memmove = ctypes.CFUNCTYPE(ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_size_t)(ctypes._memmove_addr)
if memmove(address, byte_code, size) < 0:
raise Exception("Failed to memmove")
# Enable execute permissions
PAGE_EXECUTE = ctypes.c_ulong(0x10)
old_protect = ctypes.c_ulong(0)
pfnVirtualProtect = ctypes.windll.kernel32.VirtualProtect
res = pfnVirtualProtect(ctypes.c_void_p(address), ctypes.c_size_t(size), PAGE_EXECUTE, ctypes.byref(old_protect))
if not res:
raise Exception("Failed VirtualProtect")
# Flush Instruction Cache
# First, get process Handle
if not self.prochandle:
pfnGetCurrentProcess = ctypes.windll.kernel32.GetCurrentProcess
pfnGetCurrentProcess.restype = ctypes.c_void_p
self.prochandle = ctypes.c_void_p(pfnGetCurrentProcess())
# Actually flush cache
res = ctypes.windll.kernel32.FlushInstructionCache(self.prochandle, ctypes.c_void_p(address), ctypes.c_size_t(size))
if not res:
raise Exception("Failed FlushInstructionCache")
else:
# Allocate a memory segment the size of the byte code
size = len(byte_code)
pfnvalloc = ctypes.pythonapi.valloc
pfnvalloc.restype = ctypes.c_void_p
address = pfnvalloc(ctypes.c_size_t(size))
if not address:
raise Exception("Failed to valloc")
# Mark the memory segment as writeable only
if not self.is_selinux_enforcing:
WRITE = 0x2
if ctypes.pythonapi.mprotect(ctypes.c_void_p(address), size, WRITE) < 0:
raise Exception("Failed to mprotect")
# Copy the byte code into the memory segment
if ctypes.pythonapi.memmove(ctypes.c_void_p(address), byte_code, ctypes.c_size_t(size)) < 0:
raise Exception("Failed to memmove")
# Mark the memory segment as writeable and executable only
if not self.is_selinux_enforcing:
WRITE_EXECUTE = 0x2 | 0x4
if ctypes.pythonapi.mprotect(ctypes.c_void_p(address), size, WRITE_EXECUTE) < 0:
raise Exception("Failed to mprotect")
# Cast the memory segment into a function
functype = ctypes.CFUNCTYPE(restype, *argtypes)
fun = functype(address)
return fun, address
def _run_asm(self, *byte_code):
# Convert the byte code into a function that returns an int
restype = ctypes.c_uint32
argtypes = ()
func, address = self._asm_func(restype, argtypes, byte_code)
# Call the byte code like a function
retval = func()
byte_code = bytes.join(b'', byte_code)
size = ctypes.c_size_t(len(byte_code))
# Free the function memory segment
if DataSource.is_windows:
MEM_RELEASE = ctypes.c_ulong(0x8000)
ctypes.windll.kernel32.VirtualFree(ctypes.c_void_p(address), ctypes.c_size_t(0), MEM_RELEASE)
else:
# Remove the executable tag on the memory
READ_WRITE = 0x1 | 0x2
if ctypes.pythonapi.mprotect(ctypes.c_void_p(address), size, READ_WRITE) < 0:
raise Exception("Failed to mprotect")
ctypes.pythonapi.free(ctypes.c_void_p(address))
return retval
# FIXME: We should not have to use different instructions to
# set eax to 0 or 1, on 32bit and 64bit machines.
def _zero_eax(self):
return (
b"\x31\xC0" # xor eax,eax
)
def _zero_ecx(self):
return (
b"\x31\xC9" # xor ecx,ecx
)
def _one_eax(self):
return (
b"\xB8\x01\x00\x00\x00" # mov eax,0x1"
)
# http://en.wikipedia.org/wiki/CPUID#EAX.3D0:_Get_vendor_ID
def get_vendor_id(self):
# EBX
ebx = self._run_asm(
self._zero_eax(),
b"\x0F\xA2" # cpuid
b"\x89\xD8" # mov ax,bx
b"\xC3" # ret
)
# ECX
ecx = self._run_asm(
self._zero_eax(),
b"\x0f\xa2" # cpuid
b"\x89\xC8" # mov ax,cx
b"\xC3" # ret
)
# EDX
edx = self._run_asm(
self._zero_eax(),
b"\x0f\xa2" # cpuid
b"\x89\xD0" # mov ax,dx
b"\xC3" # ret
)
# Each 4bits is a ascii letter in the name
vendor_id = []
for reg in [ebx, edx, ecx]:
for n in [0, 8, 16, 24]:
vendor_id.append(chr((reg >> n) & 0xFF))
vendor_id = ''.join(vendor_id)
return vendor_id
# http://en.wikipedia.org/wiki/CPUID#EAX.3D1:_Processor_Info_and_Feature_Bits
def get_info(self):
# EAX
eax = self._run_asm(
self._one_eax(),
b"\x0f\xa2" # cpuid
b"\xC3" # ret
)
# Get the CPU info
stepping = (eax >> 0) & 0xF # 4 bits
model = (eax >> 4) & 0xF # 4 bits
family = (eax >> 8) & 0xF # 4 bits
processor_type = (eax >> 12) & 0x3 # 2 bits
extended_model = (eax >> 16) & 0xF # 4 bits
extended_family = (eax >> 20) & 0xFF # 8 bits
return {
'stepping' : stepping,
'model' : model,
'family' : family,
'processor_type' : processor_type,
'extended_model' : extended_model,
'extended_family' : extended_family
}
# http://en.wikipedia.org/wiki/CPUID#EAX.3D80000000h:_Get_Highest_Extended_Function_Supported
def get_max_extension_support(self):
# Check for extension support
max_extension_support = self._run_asm(
b"\xB8\x00\x00\x00\x80" # mov ax,0x80000000
b"\x0f\xa2" # cpuid
b"\xC3" # ret
)
return max_extension_support
# http://en.wikipedia.org/wiki/CPUID#EAX.3D1:_Processor_Info_and_Feature_Bits
def get_flags(self, max_extension_support):
# EDX
edx = self._run_asm(
self._one_eax(),
b"\x0f\xa2" # cpuid
b"\x89\xD0" # mov ax,dx
b"\xC3" # ret
)
# ECX
ecx = self._run_asm(
self._one_eax(),
b"\x0f\xa2" # cpuid
b"\x89\xC8" # mov ax,cx
b"\xC3" # ret
)
# Get the CPU flags
flags = {
'fpu' : _is_bit_set(edx, 0),
'vme' : _is_bit_set(edx, 1),
'de' : _is_bit_set(edx, 2),
'pse' : _is_bit_set(edx, 3),
'tsc' : _is_bit_set(edx, 4),
'msr' : _is_bit_set(edx, 5),
'pae' : _is_bit_set(edx, 6),
'mce' : _is_bit_set(edx, 7),
'cx8' : _is_bit_set(edx, 8),
'apic' : _is_bit_set(edx, 9),
#'reserved1' : _is_bit_set(edx, 10),
'sep' : _is_bit_set(edx, 11),
'mtrr' : _is_bit_set(edx, 12),
'pge' : _is_bit_set(edx, 13),
'mca' : _is_bit_set(edx, 14),
'cmov' : _is_bit_set(edx, 15),
'pat' : _is_bit_set(edx, 16),
'pse36' : _is_bit_set(edx, 17),
'pn' : _is_bit_set(edx, 18),
'clflush' : _is_bit_set(edx, 19),
#'reserved2' : _is_bit_set(edx, 20),
'dts' : _is_bit_set(edx, 21),
'acpi' : _is_bit_set(edx, 22),
'mmx' : _is_bit_set(edx, 23),
'fxsr' : _is_bit_set(edx, 24),
'sse' : _is_bit_set(edx, 25),
'sse2' : _is_bit_set(edx, 26),
'ss' : _is_bit_set(edx, 27),
'ht' : _is_bit_set(edx, 28),
'tm' : _is_bit_set(edx, 29),
'ia64' : _is_bit_set(edx, 30),
'pbe' : _is_bit_set(edx, 31),
'pni' : _is_bit_set(ecx, 0),
'pclmulqdq' : _is_bit_set(ecx, 1),
'dtes64' : _is_bit_set(ecx, 2),
'monitor' : _is_bit_set(ecx, 3),
'ds_cpl' : _is_bit_set(ecx, 4),
'vmx' : _is_bit_set(ecx, 5),
'smx' : _is_bit_set(ecx, 6),
'est' : _is_bit_set(ecx, 7),
'tm2' : _is_bit_set(ecx, 8),
'ssse3' : _is_bit_set(ecx, 9),
'cid' : _is_bit_set(ecx, 10),
#'reserved3' : _is_bit_set(ecx, 11),
'fma' : _is_bit_set(ecx, 12),
'cx16' : _is_bit_set(ecx, 13),
'xtpr' : _is_bit_set(ecx, 14),
'pdcm' : _is_bit_set(ecx, 15),
#'reserved4' : _is_bit_set(ecx, 16),
'pcid' : _is_bit_set(ecx, 17),
'dca' : _is_bit_set(ecx, 18),
'sse4_1' : _is_bit_set(ecx, 19),
'sse4_2' : _is_bit_set(ecx, 20),
'x2apic' : _is_bit_set(ecx, 21),
'movbe' : _is_bit_set(ecx, 22),
'popcnt' : _is_bit_set(ecx, 23),
'tscdeadline' : _is_bit_set(ecx, 24),
'aes' : _is_bit_set(ecx, 25),
'xsave' : _is_bit_set(ecx, 26),
'osxsave' : _is_bit_set(ecx, 27),
'avx' : _is_bit_set(ecx, 28),
'f16c' : _is_bit_set(ecx, 29),
'rdrnd' : _is_bit_set(ecx, 30),
'hypervisor' : _is_bit_set(ecx, 31)
}
# Get a list of only the flags that are true
flags = [k for k, v in flags.items() if v]
# http://en.wikipedia.org/wiki/CPUID#EAX.3D7.2C_ECX.3D0:_Extended_Features
if max_extension_support >= 7:
# EBX
ebx = self._run_asm(
self._zero_ecx(),
b"\xB8\x07\x00\x00\x00" # mov eax,7
b"\x0f\xa2" # cpuid
b"\x89\xD8" # mov ax,bx
b"\xC3" # ret
)
# ECX
ecx = self._run_asm(
self._zero_ecx(),
b"\xB8\x07\x00\x00\x00" # mov eax,7
b"\x0f\xa2" # cpuid
b"\x89\xC8" # mov ax,cx
b"\xC3" # ret
)
# Get the extended CPU flags
extended_flags = {
#'fsgsbase' : _is_bit_set(ebx, 0),
#'IA32_TSC_ADJUST' : _is_bit_set(ebx, 1),
'sgx' : _is_bit_set(ebx, 2),
'bmi1' : _is_bit_set(ebx, 3),
'hle' : _is_bit_set(ebx, 4),
'avx2' : _is_bit_set(ebx, 5),
#'reserved' : _is_bit_set(ebx, 6),
'smep' : _is_bit_set(ebx, 7),
'bmi2' : _is_bit_set(ebx, 8),
'erms' : _is_bit_set(ebx, 9),
'invpcid' : _is_bit_set(ebx, 10),
'rtm' : _is_bit_set(ebx, 11),
'pqm' : _is_bit_set(ebx, 12),
#'FPU CS and FPU DS deprecated' : _is_bit_set(ebx, 13),
'mpx' : _is_bit_set(ebx, 14),
'pqe' : _is_bit_set(ebx, 15),
'avx512f' : _is_bit_set(ebx, 16),
'avx512dq' : _is_bit_set(ebx, 17),
'rdseed' : _is_bit_set(ebx, 18),
'adx' : _is_bit_set(ebx, 19),
'smap' : _is_bit_set(ebx, 20),
'avx512ifma' : _is_bit_set(ebx, 21),
'pcommit' : _is_bit_set(ebx, 22),
'clflushopt' : _is_bit_set(ebx, 23),
'clwb' : _is_bit_set(ebx, 24),
'intel_pt' : _is_bit_set(ebx, 25),
'avx512pf' : _is_bit_set(ebx, 26),
'avx512er' : _is_bit_set(ebx, 27),
'avx512cd' : _is_bit_set(ebx, 28),
'sha' : _is_bit_set(ebx, 29),
'avx512bw' : _is_bit_set(ebx, 30),
'avx512vl' : _is_bit_set(ebx, 31),
'prefetchwt1' : _is_bit_set(ecx, 0),
'avx512vbmi' : _is_bit_set(ecx, 1),
'umip' : _is_bit_set(ecx, 2),
'pku' : _is_bit_set(ecx, 3),
'ospke' : _is_bit_set(ecx, 4),
#'reserved' : _is_bit_set(ecx, 5),
'avx512vbmi2' : _is_bit_set(ecx, 6),
#'reserved' : _is_bit_set(ecx, 7),
'gfni' : _is_bit_set(ecx, 8),
'vaes' : _is_bit_set(ecx, 9),
'vpclmulqdq' : _is_bit_set(ecx, 10),
'avx512vnni' : _is_bit_set(ecx, 11),
'avx512bitalg' : _is_bit_set(ecx, 12),
#'reserved' : _is_bit_set(ecx, 13),
'avx512vpopcntdq' : _is_bit_set(ecx, 14),
#'reserved' : _is_bit_set(ecx, 15),
#'reserved' : _is_bit_set(ecx, 16),
#'mpx0' : _is_bit_set(ecx, 17),
#'mpx1' : _is_bit_set(ecx, 18),
#'mpx2' : _is_bit_set(ecx, 19),
#'mpx3' : _is_bit_set(ecx, 20),
#'mpx4' : _is_bit_set(ecx, 21),
'rdpid' : _is_bit_set(ecx, 22),
#'reserved' : _is_bit_set(ecx, 23),
#'reserved' : _is_bit_set(ecx, 24),
#'reserved' : _is_bit_set(ecx, 25),
#'reserved' : _is_bit_set(ecx, 26),
#'reserved' : _is_bit_set(ecx, 27),
#'reserved' : _is_bit_set(ecx, 28),
#'reserved' : _is_bit_set(ecx, 29),
'sgx_lc' : _is_bit_set(ecx, 30),
#'reserved' : _is_bit_set(ecx, 31)
}
# Get a list of only the flags that are true
extended_flags = [k for k, v in extended_flags.items() if v]
flags += extended_flags
# http://en.wikipedia.org/wiki/CPUID#EAX.3D80000001h:_Extended_Processor_Info_and_Feature_Bits
if max_extension_support >= 0x80000001:
# EBX
ebx = self._run_asm(
b"\xB8\x01\x00\x00\x80" # mov ax,0x80000001
b"\x0f\xa2" # cpuid
b"\x89\xD8" # mov ax,bx
b"\xC3" # ret
)
# ECX
ecx = self._run_asm(
b"\xB8\x01\x00\x00\x80" # mov ax,0x80000001
b"\x0f\xa2" # cpuid
b"\x89\xC8" # mov ax,cx
b"\xC3" # ret
)
# Get the extended CPU flags
extended_flags = {
'fpu' : _is_bit_set(ebx, 0),
'vme' : _is_bit_set(ebx, 1),
'de' : _is_bit_set(ebx, 2),
'pse' : _is_bit_set(ebx, 3),
'tsc' : _is_bit_set(ebx, 4),
'msr' : _is_bit_set(ebx, 5),
'pae' : _is_bit_set(ebx, 6),
'mce' : _is_bit_set(ebx, 7),
'cx8' : _is_bit_set(ebx, 8),
'apic' : _is_bit_set(ebx, 9),
#'reserved' : _is_bit_set(ebx, 10),
'syscall' : _is_bit_set(ebx, 11),
'mtrr' : _is_bit_set(ebx, 12),
'pge' : _is_bit_set(ebx, 13),
'mca' : _is_bit_set(ebx, 14),
'cmov' : _is_bit_set(ebx, 15),
'pat' : _is_bit_set(ebx, 16),
'pse36' : _is_bit_set(ebx, 17),
#'reserved' : _is_bit_set(ebx, 18),
'mp' : _is_bit_set(ebx, 19),
'nx' : _is_bit_set(ebx, 20),
#'reserved' : _is_bit_set(ebx, 21),
'mmxext' : _is_bit_set(ebx, 22),
'mmx' : _is_bit_set(ebx, 23),
'fxsr' : _is_bit_set(ebx, 24),
'fxsr_opt' : _is_bit_set(ebx, 25),
'pdpe1gp' : _is_bit_set(ebx, 26),
'rdtscp' : _is_bit_set(ebx, 27),
#'reserved' : _is_bit_set(ebx, 28),
'lm' : _is_bit_set(ebx, 29),
'3dnowext' : _is_bit_set(ebx, 30),
'3dnow' : _is_bit_set(ebx, 31),
'lahf_lm' : _is_bit_set(ecx, 0),
'cmp_legacy' : _is_bit_set(ecx, 1),
'svm' : _is_bit_set(ecx, 2),
'extapic' : _is_bit_set(ecx, 3),
'cr8_legacy' : _is_bit_set(ecx, 4),
'abm' : _is_bit_set(ecx, 5),
'sse4a' : _is_bit_set(ecx, 6),
'misalignsse' : _is_bit_set(ecx, 7),
'3dnowprefetch' : _is_bit_set(ecx, 8),
'osvw' : _is_bit_set(ecx, 9),
'ibs' : _is_bit_set(ecx, 10),
'xop' : _is_bit_set(ecx, 11),
'skinit' : _is_bit_set(ecx, 12),
'wdt' : _is_bit_set(ecx, 13),
#'reserved' : _is_bit_set(ecx, 14),
'lwp' : _is_bit_set(ecx, 15),
'fma4' : _is_bit_set(ecx, 16),
'tce' : _is_bit_set(ecx, 17),
#'reserved' : _is_bit_set(ecx, 18),
'nodeid_msr' : _is_bit_set(ecx, 19),
#'reserved' : _is_bit_set(ecx, 20),
'tbm' : _is_bit_set(ecx, 21),
'topoext' : _is_bit_set(ecx, 22),
'perfctr_core' : _is_bit_set(ecx, 23),
'perfctr_nb' : _is_bit_set(ecx, 24),
#'reserved' : _is_bit_set(ecx, 25),
'dbx' : _is_bit_set(ecx, 26),
'perftsc' : _is_bit_set(ecx, 27),
'pci_l2i' : _is_bit_set(ecx, 28),
#'reserved' : _is_bit_set(ecx, 29),
#'reserved' : _is_bit_set(ecx, 30),
#'reserved' : _is_bit_set(ecx, 31)
}
# Get a list of only the flags that are true
extended_flags = [k for k, v in extended_flags.items() if v]
flags += extended_flags
flags.sort()
return flags
# http://en.wikipedia.org/wiki/CPUID#EAX.3D80000002h.2C80000003h.2C80000004h:_Processor_Brand_String
def get_processor_brand(self, max_extension_support):
processor_brand = ""
# Processor brand string
if max_extension_support >= 0x80000004:
instructions = [
b"\xB8\x02\x00\x00\x80", # mov ax,0x80000002
b"\xB8\x03\x00\x00\x80", # mov ax,0x80000003
b"\xB8\x04\x00\x00\x80" # mov ax,0x80000004
]
for instruction in instructions:
# EAX
eax = self._run_asm(
instruction, # mov ax,0x8000000?
b"\x0f\xa2" # cpuid
b"\x89\xC0" # mov ax,ax
b"\xC3" # ret
)
# EBX
ebx = self._run_asm(
instruction, # mov ax,0x8000000?
b"\x0f\xa2" # cpuid
b"\x89\xD8" # mov ax,bx
b"\xC3" # ret
)
# ECX
ecx = self._run_asm(
instruction, # mov ax,0x8000000?
b"\x0f\xa2" # cpuid
b"\x89\xC8" # mov ax,cx
b"\xC3" # ret
)
# EDX
edx = self._run_asm(
instruction, # mov ax,0x8000000?
b"\x0f\xa2" # cpuid
b"\x89\xD0" # mov ax,dx
b"\xC3" # ret
)
# Combine each of the 4 bytes in each register into the string
for reg in [eax, ebx, ecx, edx]:
for n in [0, 8, 16, 24]:
processor_brand += chr((reg >> n) & 0xFF)
# Strip off any trailing NULL terminators and white space
processor_brand = processor_brand.strip("\0").strip()
return processor_brand
# http://en.wikipedia.org/wiki/CPUID#EAX.3D80000006h:_Extended_L2_Cache_Features
def get_cache(self, max_extension_support):
cache_info = {}
# Just return if the cache feature is not supported
if max_extension_support < 0x80000006:
return cache_info
# ECX
ecx = self._run_asm(
b"\xB8\x06\x00\x00\x80" # mov ax,0x80000006
b"\x0f\xa2" # cpuid
b"\x89\xC8" # mov ax,cx
b"\xC3" # ret
)
cache_info = {
'size_kb' : ecx & 0xFF,
'line_size_b' : (ecx >> 12) & 0xF,
'associativity' : (ecx >> 16) & 0xFFFF
}
return cache_info
def get_ticks(self):
retval = None
if DataSource.bits == '32bit':
# Works on x86_32
restype = None
argtypes = (ctypes.POINTER(ctypes.c_uint), ctypes.POINTER(ctypes.c_uint))
get_ticks_x86_32, address = self._asm_func(restype, argtypes,
[
b"\x55", # push bp
b"\x89\xE5", # mov bp,sp
b"\x31\xC0", # xor ax,ax
b"\x0F\xA2", # cpuid
b"\x0F\x31", # rdtsc
b"\x8B\x5D\x08", # mov bx,[di+0x8]
b"\x8B\x4D\x0C", # mov cx,[di+0xc]
b"\x89\x13", # mov [bp+di],dx
b"\x89\x01", # mov [bx+di],ax
b"\x5D", # pop bp
b"\xC3" # ret
]
)
high = ctypes.c_uint32(0)
low = ctypes.c_uint32(0)
get_ticks_x86_32(ctypes.byref(high), ctypes.byref(low))
retval = ((high.value << 32) & 0xFFFFFFFF00000000) | low.value
elif DataSource.bits == '64bit':
# Works on x86_64
restype = ctypes.c_uint64
argtypes = ()
get_ticks_x86_64, address = self._asm_func(restype, argtypes,
[
b"\x48", # dec ax
b"\x31\xC0", # xor ax,ax
b"\x0F\xA2", # cpuid
b"\x0F\x31", # rdtsc
b"\x48", # dec ax
b"\xC1\xE2\x20", # shl dx,byte 0x20
b"\x48", # dec ax
b"\x09\xD0", # or ax,dx
b"\xC3", # ret
]
)
retval = get_ticks_x86_64()
return retval
def get_raw_hz(self):
import time
start = self.get_ticks()
time.sleep(1)
end = self.get_ticks()
ticks = (end - start)
return ticks
def _actual_get_cpu_info_from_cpuid(queue):
'''
Warning! This function has the potential to crash the Python runtime.
Do not call it directly. Use the _get_cpu_info_from_cpuid function instead.
It will safely call this function in another process.
'''
# Pipe all output to nothing
sys.stdout = open(os.devnull, 'w')
sys.stderr = open(os.devnull, 'w')
# Get the CPU arch and bits
arch, bits = _parse_arch(DataSource.arch_string_raw)
# Return none if this is not an X86 CPU
if not arch in ['X86_32', 'X86_64']:
queue.put(_obj_to_b64({}))
return
# Return none if SE Linux is in enforcing mode
cpuid = CPUID()
if cpuid.is_selinux_enforcing:
queue.put(_obj_to_b64({}))
return
# Get the cpu info from the CPUID register
max_extension_support = cpuid.get_max_extension_support()
cache_info = cpuid.get_cache(max_extension_support)
info = cpuid.get_info()
processor_brand = cpuid.get_processor_brand(max_extension_support)
# Get the Hz and scale
hz_actual = cpuid.get_raw_hz()
hz_actual = _to_decimal_string(hz_actual)
# Get the Hz and scale
hz_advertised, scale = _parse_cpu_brand_string(processor_brand)
info = {
'vendor_id_raw' : cpuid.get_vendor_id(),
'hardware_raw' : '',
'brand_raw' : processor_brand,
'hz_advertised_friendly' : _hz_short_to_friendly(hz_advertised, scale),
'hz_actual_friendly' : _hz_short_to_friendly(hz_actual, 0),
'hz_advertised' : _hz_short_to_full(hz_advertised, scale),
'hz_actual' : _hz_short_to_full(hz_actual, 0),
'l2_cache_size' : _to_friendly_bytes(cache_info['size_kb']),
'l2_cache_line_size' : cache_info['line_size_b'],
'l2_cache_associativity' : hex(cache_info['associativity']),
'stepping' : info['stepping'],
'model' : info['model'],
'family' : info['family'],
'processor_type' : info['processor_type'],
'extended_model' : info['extended_model'],
'extended_family' : info['extended_family'],
'flags' : cpuid.get_flags(max_extension_support)
}
info = {k: v for k, v in info.items() if v}
queue.put(_obj_to_b64(info))
def _get_cpu_info_from_cpuid():
'''
Returns the CPU info gathered by querying the X86 cpuid register in a new process.
Returns {} on non X86 cpus.
Returns {} if SELinux is in enforcing mode.
'''
from multiprocessing import Process, Queue
# Return {} if can't cpuid
if not DataSource.can_cpuid:
return {}
# Get the CPU arch and bits
arch, bits = _parse_arch(DataSource.arch_string_raw)
# Return {} if this is not an X86 CPU
if not arch in ['X86_32', 'X86_64']:
return {}
try:
# Start running the function in a subprocess
queue = Queue()
p = Process(target=_actual_get_cpu_info_from_cpuid, args=(queue,))
p.start()
# Wait for the process to end, while it is still alive
while p.is_alive():
p.join(0)
# Return {} if it failed
if p.exitcode != 0:
return {}
# Return the result, only if there is something to read
if not queue.empty():
output = queue.get()
return _b64_to_obj(output)
except:
pass
# Return {} if everything failed
return {}
def _get_cpu_info_from_proc_cpuinfo():
'''
Returns the CPU info gathered from /proc/cpuinfo.
Returns {} if /proc/cpuinfo is not found.
'''
try:
# Just return {} if there is no cpuinfo
if not DataSource.has_proc_cpuinfo():
return {}
returncode, output = DataSource.cat_proc_cpuinfo()
if returncode != 0:
return {}
# Various fields
vendor_id = _get_field(False, output, None, '', 'vendor_id', 'vendor id', 'vendor')
processor_brand = _get_field(True, output, None, None, 'model name','cpu', 'processor')
cache_size = _get_field(False, output, None, '', 'cache size')
stepping = _get_field(False, output, int, 0, 'stepping')
model = _get_field(False, output, int, 0, 'model')
family = _get_field(False, output, int, 0, 'cpu family')
hardware = _get_field(False, output, None, '', 'Hardware')
# Flags
flags = _get_field(False, output, None, None, 'flags', 'Features')
if flags:
flags = flags.split()
flags.sort()
# Convert from MHz string to Hz
hz_actual = _get_field(False, output, None, '', 'cpu MHz', 'cpu speed', 'clock')
hz_actual = hz_actual.lower().rstrip('mhz').strip()
hz_actual = _to_decimal_string(hz_actual)
# Convert from GHz/MHz string to Hz
hz_advertised, scale = (None, 0)
try:
hz_advertised, scale = _parse_cpu_brand_string(processor_brand)
except Exception:
pass
info = {
'hardware_raw' : hardware,
'brand_raw' : processor_brand,
'l3_cache_size' : _to_friendly_bytes(cache_size),
'flags' : flags,
'vendor_id_raw' : vendor_id,
'stepping' : stepping,
'model' : model,
'family' : family,
}
# Make the Hz the same for actual and advertised if missing any
if not hz_advertised or hz_advertised == '0.0':
hz_advertised = hz_actual
scale = 6
elif not hz_actual or hz_actual == '0.0':
hz_actual = hz_advertised
# Add the Hz if there is one
if _hz_short_to_full(hz_advertised, scale) > (0, 0):
info['hz_advertised_friendly'] = _hz_short_to_friendly(hz_advertised, scale)
info['hz_advertised'] = _hz_short_to_full(hz_advertised, scale)
if _hz_short_to_full(hz_actual, scale) > (0, 0):
info['hz_actual_friendly'] = _hz_short_to_friendly(hz_actual, 6)
info['hz_actual'] = _hz_short_to_full(hz_actual, 6)
info = {k: v for k, v in info.items() if v}
return info
except:
#raise # NOTE: To have this throw on error, uncomment this line
return {}
def _get_cpu_info_from_lscpu():
'''
Returns the CPU info gathered from lscpu.
Returns {} if lscpu is not found.
'''
try:
if not DataSource.has_lscpu():
return {}
returncode, output = DataSource.lscpu()
if returncode != 0:
return {}
info = {}
new_hz = _get_field(False, output, None, None, 'CPU max MHz', 'CPU MHz')
if new_hz:
new_hz = _to_decimal_string(new_hz)
scale = 6
info['hz_advertised_friendly'] = _hz_short_to_friendly(new_hz, scale)
info['hz_actual_friendly'] = _hz_short_to_friendly(new_hz, scale)
info['hz_advertised'] = _hz_short_to_full(new_hz, scale)
info['hz_actual'] = _hz_short_to_full(new_hz, scale)
vendor_id = _get_field(False, output, None, None, 'Vendor ID')
if vendor_id:
info['vendor_id_raw'] = vendor_id
brand = _get_field(False, output, None, None, 'Model name')
if brand:
info['brand_raw'] = brand
family = _get_field(False, output, None, None, 'CPU family')
if family and family.isdigit():
info['family'] = int(family)
stepping = _get_field(False, output, None, None, 'Stepping')
if stepping and stepping.isdigit():
info['stepping'] = int(stepping)
model = _get_field(False, output, None, None, 'Model')
if model and model.isdigit():
info['model'] = int(model)
l1_data_cache_size = _get_field(False, output, None, None, 'L1d cache')
if l1_data_cache_size:
info['l1_data_cache_size'] = _to_friendly_bytes(l1_data_cache_size)
l1_instruction_cache_size = _get_field(False, output, None, None, 'L1i cache')
if l1_instruction_cache_size:
info['l1_instruction_cache_size'] = _to_friendly_bytes(l1_instruction_cache_size)
l2_cache_size = _get_field(False, output, None, None, 'L2 cache')
if l2_cache_size:
info['l2_cache_size'] = _to_friendly_bytes(l2_cache_size)
l3_cache_size = _get_field(False, output, None, None, 'L3 cache')
if l3_cache_size:
info['l3_cache_size'] = _to_friendly_bytes(l3_cache_size)
# Flags
flags = _get_field(False, output, None, None, 'flags', 'Features')
if flags:
flags = flags.split()
flags.sort()
info['flags'] = flags
info = {k: v for k, v in info.items() if v}
return info
except:
#raise # NOTE: To have this throw on error, uncomment this line
return {}
def _get_cpu_info_from_dmesg():
'''
Returns the CPU info gathered from dmesg.
Returns {} if dmesg is not found or does not have the desired info.
'''
# Just return {} if there is no dmesg
if not DataSource.has_dmesg():
return {}
# If dmesg fails return {}
returncode, output = DataSource.dmesg_a()
if output == None or returncode != 0:
return {}
return _parse_dmesg_output(output)
# https://openpowerfoundation.org/wp-content/uploads/2016/05/LoPAPR_DRAFT_v11_24March2016_cmt1.pdf
# page 767
def _get_cpu_info_from_ibm_pa_features():
'''
Returns the CPU info gathered from lsprop /proc/device-tree/cpus/*/ibm,pa-features
Returns {} if lsprop is not found or ibm,pa-features does not have the desired info.
'''
try:
# Just return {} if there is no lsprop
if not DataSource.has_ibm_pa_features():
return {}
# If ibm,pa-features fails return {}
returncode, output = DataSource.ibm_pa_features()
if output == None or returncode != 0:
return {}
# Filter out invalid characters from output
value = output.split("ibm,pa-features")[1].lower()
value = [s for s in value if s in list('0123456789abcfed')]
value = ''.join(value)
# Get data converted to Uint32 chunks
left = int(value[0 : 8], 16)
right = int(value[8 : 16], 16)
# Get the CPU flags
flags = {
# Byte 0
'mmu' : _is_bit_set(left, 0),
'fpu' : _is_bit_set(left, 1),
'slb' : _is_bit_set(left, 2),
'run' : _is_bit_set(left, 3),
#'reserved' : _is_bit_set(left, 4),
'dabr' : _is_bit_set(left, 5),
'ne' : _is_bit_set(left, 6),
'wtr' : _is_bit_set(left, 7),
# Byte 1
'mcr' : _is_bit_set(left, 8),
'dsisr' : _is_bit_set(left, 9),
'lp' : _is_bit_set(left, 10),
'ri' : _is_bit_set(left, 11),
'dabrx' : _is_bit_set(left, 12),
'sprg3' : _is_bit_set(left, 13),
'rislb' : _is_bit_set(left, 14),
'pp' : _is_bit_set(left, 15),
# Byte 2
'vpm' : _is_bit_set(left, 16),
'dss_2.05' : _is_bit_set(left, 17),
#'reserved' : _is_bit_set(left, 18),
'dar' : _is_bit_set(left, 19),
#'reserved' : _is_bit_set(left, 20),
'ppr' : _is_bit_set(left, 21),
'dss_2.02' : _is_bit_set(left, 22),
'dss_2.06' : _is_bit_set(left, 23),
# Byte 3
'lsd_in_dscr' : _is_bit_set(left, 24),
'ugr_in_dscr' : _is_bit_set(left, 25),
#'reserved' : _is_bit_set(left, 26),
#'reserved' : _is_bit_set(left, 27),
#'reserved' : _is_bit_set(left, 28),
#'reserved' : _is_bit_set(left, 29),
#'reserved' : _is_bit_set(left, 30),
#'reserved' : _is_bit_set(left, 31),
# Byte 4
'sso_2.06' : _is_bit_set(right, 0),
#'reserved' : _is_bit_set(right, 1),
#'reserved' : _is_bit_set(right, 2),
#'reserved' : _is_bit_set(right, 3),
#'reserved' : _is_bit_set(right, 4),
#'reserved' : _is_bit_set(right, 5),
#'reserved' : _is_bit_set(right, 6),
#'reserved' : _is_bit_set(right, 7),
# Byte 5
'le' : _is_bit_set(right, 8),
'cfar' : _is_bit_set(right, 9),
'eb' : _is_bit_set(right, 10),
'lsq_2.07' : _is_bit_set(right, 11),
#'reserved' : _is_bit_set(right, 12),
#'reserved' : _is_bit_set(right, 13),
#'reserved' : _is_bit_set(right, 14),
#'reserved' : _is_bit_set(right, 15),
# Byte 6
'dss_2.07' : _is_bit_set(right, 16),
#'reserved' : _is_bit_set(right, 17),
#'reserved' : _is_bit_set(right, 18),
#'reserved' : _is_bit_set(right, 19),
#'reserved' : _is_bit_set(right, 20),
#'reserved' : _is_bit_set(right, 21),
#'reserved' : _is_bit_set(right, 22),
#'reserved' : _is_bit_set(right, 23),
# Byte 7
#'reserved' : _is_bit_set(right, 24),
#'reserved' : _is_bit_set(right, 25),
#'reserved' : _is_bit_set(right, 26),
#'reserved' : _is_bit_set(right, 27),
#'reserved' : _is_bit_set(right, 28),
#'reserved' : _is_bit_set(right, 29),
#'reserved' : _is_bit_set(right, 30),
#'reserved' : _is_bit_set(right, 31),
}
# Get a list of only the flags that are true
flags = [k for k, v in flags.items() if v]
flags.sort()
info = {
'flags' : flags
}
info = {k: v for k, v in info.items() if v}
return info
except:
return {}
def _get_cpu_info_from_cat_var_run_dmesg_boot():
'''
Returns the CPU info gathered from /var/run/dmesg.boot.
Returns {} if dmesg is not found or does not have the desired info.
'''
# Just return {} if there is no /var/run/dmesg.boot
if not DataSource.has_var_run_dmesg_boot():
return {}
# If dmesg.boot fails return {}
returncode, output = DataSource.cat_var_run_dmesg_boot()
if output == None or returncode != 0:
return {}
return _parse_dmesg_output(output)
def _get_cpu_info_from_sysctl():
'''
Returns the CPU info gathered from sysctl.
Returns {} if sysctl is not found.
'''
try:
# Just return {} if there is no sysctl
if not DataSource.has_sysctl():
return {}
# If sysctl fails return {}
returncode, output = DataSource.sysctl_machdep_cpu_hw_cpufrequency()
if output == None or returncode != 0:
return {}
# Various fields
vendor_id = _get_field(False, output, None, None, 'machdep.cpu.vendor')
processor_brand = _get_field(True, output, None, None, 'machdep.cpu.brand_string')
cache_size = _get_field(False, output, None, None, 'machdep.cpu.cache.size')
stepping = _get_field(False, output, int, 0, 'machdep.cpu.stepping')
model = _get_field(False, output, int, 0, 'machdep.cpu.model')
family = _get_field(False, output, int, 0, 'machdep.cpu.family')
# Flags
flags = _get_field(False, output, None, '', 'machdep.cpu.features').lower().split()
flags.extend(_get_field(False, output, None, '', 'machdep.cpu.leaf7_features').lower().split())
flags.extend(_get_field(False, output, None, '', 'machdep.cpu.extfeatures').lower().split())
flags.sort()
# Convert from GHz/MHz string to Hz
hz_advertised, scale = _parse_cpu_brand_string(processor_brand)
hz_actual = _get_field(False, output, None, None, 'hw.cpufrequency')
hz_actual = _to_decimal_string(hz_actual)
info = {
'vendor_id_raw' : vendor_id,
'brand_raw' : processor_brand,
'hz_advertised_friendly' : _hz_short_to_friendly(hz_advertised, scale),
'hz_actual_friendly' : _hz_short_to_friendly(hz_actual, 0),
'hz_advertised' : _hz_short_to_full(hz_advertised, scale),
'hz_actual' : _hz_short_to_full(hz_actual, 0),
'l2_cache_size' : _to_friendly_bytes(cache_size),
'stepping' : stepping,
'model' : model,
'family' : family,
'flags' : flags
}
info = {k: v for k, v in info.items() if v}
return info
except:
return {}
def _get_cpu_info_from_sysinfo():
'''
Returns the CPU info gathered from sysinfo.
Returns {} if sysinfo is not found.
'''
info = _get_cpu_info_from_sysinfo_v1()
info.update(_get_cpu_info_from_sysinfo_v2())
return info
def _get_cpu_info_from_sysinfo_v1():
'''
Returns the CPU info gathered from sysinfo.
Returns {} if sysinfo is not found.
'''
try:
# Just return {} if there is no sysinfo
if not DataSource.has_sysinfo():
return {}
# If sysinfo fails return {}
returncode, output = DataSource.sysinfo_cpu()
if output == None or returncode != 0:
return {}
# Various fields
vendor_id = '' #_get_field(False, output, None, None, 'CPU #0: ')
processor_brand = output.split('CPU #0: "')[1].split('"\n')[0].strip()
cache_size = '' #_get_field(False, output, None, None, 'machdep.cpu.cache.size')
stepping = int(output.split(', stepping ')[1].split(',')[0].strip())
model = int(output.split(', model ')[1].split(',')[0].strip())
family = int(output.split(', family ')[1].split(',')[0].strip())
# Flags
flags = []
for line in output.split('\n'):
if line.startswith('\t\t'):
for flag in line.strip().lower().split():
flags.append(flag)
flags.sort()
# Convert from GHz/MHz string to Hz
hz_advertised, scale = _parse_cpu_brand_string(processor_brand)
hz_actual = hz_advertised
info = {
'vendor_id_raw' : vendor_id,
'brand_raw' : processor_brand,
'hz_advertised_friendly' : _hz_short_to_friendly(hz_advertised, scale),
'hz_actual_friendly' : _hz_short_to_friendly(hz_actual, scale),
'hz_advertised' : _hz_short_to_full(hz_advertised, scale),
'hz_actual' : _hz_short_to_full(hz_actual, scale),
'l2_cache_size' : _to_friendly_bytes(cache_size),
'stepping' : stepping,
'model' : model,
'family' : family,
'flags' : flags
}
info = {k: v for k, v in info.items() if v}
return info
except:
#raise # NOTE: To have this throw on error, uncomment this line
return {}
def _get_cpu_info_from_sysinfo_v2():
'''
Returns the CPU info gathered from sysinfo.
Returns {} if sysinfo is not found.
'''
try:
# Just return {} if there is no sysinfo
if not DataSource.has_sysinfo():
return {}
# If sysinfo fails return {}
returncode, output = DataSource.sysinfo_cpu()
if output == None or returncode != 0:
return {}
# Various fields
vendor_id = '' #_get_field(False, output, None, None, 'CPU #0: ')
processor_brand = output.split('CPU #0: "')[1].split('"\n')[0].strip()
cache_size = '' #_get_field(False, output, None, None, 'machdep.cpu.cache.size')
signature = output.split('Signature:')[1].split('\n')[0].strip()
#
stepping = int(signature.split('stepping ')[1].split(',')[0].strip())
model = int(signature.split('model ')[1].split(',')[0].strip())
family = int(signature.split('family ')[1].split(',')[0].strip())
# Flags
def get_subsection_flags(output):
retval = []
for line in output.split('\n')[1:]:
if not line.startswith(' ') and not line.startswith(' '): break
for entry in line.strip().lower().split(' '):
retval.append(entry)
return retval
flags = get_subsection_flags(output.split('Features: ')[1]) + \
get_subsection_flags(output.split('Extended Features (0x00000001): ')[1]) + \
get_subsection_flags(output.split('Extended Features (0x80000001): ')[1])
flags.sort()
# Convert from GHz/MHz string to Hz
lines = [n for n in output.split('\n') if n]
raw_hz = lines[0].split('running at ')[1].strip().lower()
hz_advertised = raw_hz.rstrip('mhz').rstrip('ghz').strip()
hz_advertised = _to_decimal_string(hz_advertised)
hz_actual = hz_advertised
scale = 0
if raw_hz.endswith('mhz'):
scale = 6
elif raw_hz.endswith('ghz'):
scale = 9
info = {
'vendor_id_raw' : vendor_id,
'brand_raw' : processor_brand,
'hz_advertised_friendly' : _hz_short_to_friendly(hz_advertised, scale),
'hz_actual_friendly' : _hz_short_to_friendly(hz_actual, scale),
'hz_advertised' : _hz_short_to_full(hz_advertised, scale),
'hz_actual' : _hz_short_to_full(hz_actual, scale),
'l2_cache_size' : _to_friendly_bytes(cache_size),
'stepping' : stepping,
'model' : model,
'family' : family,
'flags' : flags
}
info = {k: v for k, v in info.items() if v}
return info
except:
#raise # NOTE: To have this throw on error, uncomment this line
return {}
def _get_cpu_info_from_wmic():
'''
Returns the CPU info gathered from WMI.
Returns {} if not on Windows, or wmic is not installed.
'''
try:
# Just return {} if not Windows or there is no wmic
if not DataSource.is_windows or not DataSource.has_wmic():
return {}
returncode, output = DataSource.wmic_cpu()
if output == None or returncode != 0:
return {}
# Break the list into key values pairs
value = output.split("\n")
value = [s.rstrip().split('=') for s in value if '=' in s]
value = {k: v for k, v in value if v}
# Get the advertised MHz
processor_brand = value.get('Name')
hz_advertised, scale_advertised = _parse_cpu_brand_string(processor_brand)
# Get the actual MHz
hz_actual = value.get('CurrentClockSpeed')
scale_actual = 6
if hz_actual:
hz_actual = _to_decimal_string(hz_actual)
# Get cache sizes
l2_cache_size = value.get('L2CacheSize')
if l2_cache_size:
l2_cache_size = l2_cache_size + ' KB'
l3_cache_size = value.get('L3CacheSize')
if l3_cache_size:
l3_cache_size = l3_cache_size + ' KB'
# Get family, model, and stepping
family, model, stepping = '', '', ''
description = value.get('Description') or value.get('Caption')
entries = description.split(' ')
if 'Family' in entries and entries.index('Family') < len(entries)-1:
i = entries.index('Family')
family = int(entries[i + 1])
if 'Model' in entries and entries.index('Model') < len(entries)-1:
i = entries.index('Model')
model = int(entries[i + 1])
if 'Stepping' in entries and entries.index('Stepping') < len(entries)-1:
i = entries.index('Stepping')
stepping = int(entries[i + 1])
info = {
'vendor_id_raw' : value.get('Manufacturer'),
'brand_raw' : processor_brand,
'hz_advertised_friendly' : _hz_short_to_friendly(hz_advertised, scale_advertised),
'hz_actual_friendly' : _hz_short_to_friendly(hz_actual, scale_actual),
'hz_advertised' : _hz_short_to_full(hz_advertised, scale_advertised),
'hz_actual' : _hz_short_to_full(hz_actual, scale_actual),
'l2_cache_size' : l2_cache_size,
'l3_cache_size' : l3_cache_size,
'stepping' : stepping,
'model' : model,
'family' : family,
}
info = {k: v for k, v in info.items() if v}
return info
except:
#raise # NOTE: To have this throw on error, uncomment this line
return {}
def _get_cpu_info_from_registry():
'''
FIXME: Is missing many of the newer CPU flags like sse3
Returns the CPU info gathered from the Windows Registry.
Returns {} if not on Windows.
'''
try:
# Just return {} if not on Windows
if not DataSource.is_windows:
return {}
# Get the CPU name
processor_brand = DataSource.winreg_processor_brand().strip()
# Get the CPU vendor id
vendor_id = DataSource.winreg_vendor_id_raw()
# Get the CPU arch and bits
arch_string_raw = DataSource.winreg_arch_string_raw()
arch, bits = _parse_arch(arch_string_raw)
# Get the actual CPU Hz
hz_actual = DataSource.winreg_hz_actual()
hz_actual = _to_decimal_string(hz_actual)
# Get the advertised CPU Hz
hz_advertised, scale = _parse_cpu_brand_string(processor_brand)
# If advertised hz not found, use the actual hz
if hz_advertised == '0.0':
scale = 6
hz_advertised = _to_decimal_string(hz_actual)
# Get the CPU features
feature_bits = DataSource.winreg_feature_bits()
def is_set(bit):
mask = 0x80000000 >> bit
retval = mask & feature_bits > 0
return retval
# http://en.wikipedia.org/wiki/CPUID
# http://unix.stackexchange.com/questions/43539/what-do-the-flags-in-proc-cpuinfo-mean
# http://www.lohninger.com/helpcsuite/public_constants_cpuid.htm
flags = {
'fpu' : is_set(0), # Floating Point Unit
'vme' : is_set(1), # V86 Mode Extensions
'de' : is_set(2), # Debug Extensions - I/O breakpoints supported
'pse' : is_set(3), # Page Size Extensions (4 MB pages supported)
'tsc' : is_set(4), # Time Stamp Counter and RDTSC instruction are available
'msr' : is_set(5), # Model Specific Registers
'pae' : is_set(6), # Physical Address Extensions (36 bit address, 2MB pages)
'mce' : is_set(7), # Machine Check Exception supported
'cx8' : is_set(8), # Compare Exchange Eight Byte instruction available
'apic' : is_set(9), # Local APIC present (multiprocessor operation support)
'sepamd' : is_set(10), # Fast system calls (AMD only)
'sep' : is_set(11), # Fast system calls
'mtrr' : is_set(12), # Memory Type Range Registers
'pge' : is_set(13), # Page Global Enable
'mca' : is_set(14), # Machine Check Architecture
'cmov' : is_set(15), # Conditional MOVe instructions
'pat' : is_set(16), # Page Attribute Table
'pse36' : is_set(17), # 36 bit Page Size Extensions
'serial' : is_set(18), # Processor Serial Number
'clflush' : is_set(19), # Cache Flush
#'reserved1' : is_set(20), # reserved
'dts' : is_set(21), # Debug Trace Store
'acpi' : is_set(22), # ACPI support
'mmx' : is_set(23), # MultiMedia Extensions
'fxsr' : is_set(24), # FXSAVE and FXRSTOR instructions
'sse' : is_set(25), # SSE instructions
'sse2' : is_set(26), # SSE2 (WNI) instructions
'ss' : is_set(27), # self snoop
#'reserved2' : is_set(28), # reserved
'tm' : is_set(29), # Automatic clock control
'ia64' : is_set(30), # IA64 instructions
'3dnow' : is_set(31) # 3DNow! instructions available
}
# Get a list of only the flags that are true
flags = [k for k, v in flags.items() if v]
flags.sort()
info = {
'vendor_id_raw' : vendor_id,
'brand_raw' : processor_brand,
'hz_advertised_friendly' : _hz_short_to_friendly(hz_advertised, scale),
'hz_actual_friendly' : _hz_short_to_friendly(hz_actual, 6),
'hz_advertised' : _hz_short_to_full(hz_advertised, scale),
'hz_actual' : _hz_short_to_full(hz_actual, 6),
'flags' : flags
}
info = {k: v for k, v in info.items() if v}
return info
except:
return {}
def _get_cpu_info_from_kstat():
'''
Returns the CPU info gathered from isainfo and kstat.
Returns {} if isainfo or kstat are not found.
'''
try:
# Just return {} if there is no isainfo or kstat
if not DataSource.has_isainfo() or not DataSource.has_kstat():
return {}
# If isainfo fails return {}
returncode, flag_output = DataSource.isainfo_vb()
if flag_output == None or returncode != 0:
return {}
# If kstat fails return {}
returncode, kstat = DataSource.kstat_m_cpu_info()
if kstat == None or returncode != 0:
return {}
# Various fields
vendor_id = kstat.split('\tvendor_id ')[1].split('\n')[0].strip()
processor_brand = kstat.split('\tbrand ')[1].split('\n')[0].strip()
stepping = int(kstat.split('\tstepping ')[1].split('\n')[0].strip())
model = int(kstat.split('\tmodel ')[1].split('\n')[0].strip())
family = int(kstat.split('\tfamily ')[1].split('\n')[0].strip())
# Flags
flags = flag_output.strip().split('\n')[-1].strip().lower().split()
flags.sort()
# Convert from GHz/MHz string to Hz
scale = 6
hz_advertised = kstat.split('\tclock_MHz ')[1].split('\n')[0].strip()
hz_advertised = _to_decimal_string(hz_advertised)
# Convert from GHz/MHz string to Hz
hz_actual = kstat.split('\tcurrent_clock_Hz ')[1].split('\n')[0].strip()
hz_actual = _to_decimal_string(hz_actual)
info = {
'vendor_id_raw' : vendor_id,
'brand_raw' : processor_brand,
'hz_advertised_friendly' : _hz_short_to_friendly(hz_advertised, scale),
'hz_actual_friendly' : _hz_short_to_friendly(hz_actual, 0),
'hz_advertised' : _hz_short_to_full(hz_advertised, scale),
'hz_actual' : _hz_short_to_full(hz_actual, 0),
'stepping' : stepping,
'model' : model,
'family' : family,
'flags' : flags
}
info = {k: v for k, v in info.items() if v}
return info
except:
return {}
def _get_cpu_info_from_platform_uname():
try:
uname = DataSource.uname_string_raw.split(',')[0]
family, model, stepping = (None, None, None)
entries = uname.split(' ')
if 'Family' in entries and entries.index('Family') < len(entries)-1:
i = entries.index('Family')
family = int(entries[i + 1])
if 'Model' in entries and entries.index('Model') < len(entries)-1:
i = entries.index('Model')
model = int(entries[i + 1])
if 'Stepping' in entries and entries.index('Stepping') < len(entries)-1:
i = entries.index('Stepping')
stepping = int(entries[i + 1])
info = {
'family' : family,
'model' : model,
'stepping' : stepping
}
info = {k: v for k, v in info.items() if v}
return info
except:
return {}
def _get_cpu_info_internal():
'''
Returns the CPU info by using the best sources of information for your OS.
Returns {} if nothing is found.
'''
# Get the CPU arch and bits
arch, bits = _parse_arch(DataSource.arch_string_raw)
friendly_maxsize = { 2**31-1: '32 bit', 2**63-1: '64 bit' }.get(sys.maxsize) or 'unknown bits'
friendly_version = "{0}.{1}.{2}.{3}.{4}".format(*sys.version_info)
PYTHON_VERSION = "{0} ({1})".format(friendly_version, friendly_maxsize)
info = {
'python_version' : PYTHON_VERSION,
'cpuinfo_version' : CPUINFO_VERSION,
'cpuinfo_version_string' : CPUINFO_VERSION_STRING,
'arch' : arch,
'bits' : bits,
'count' : DataSource.cpu_count,
'arch_string_raw' : DataSource.arch_string_raw,
}
# Try the Windows wmic
_copy_new_fields(info, _get_cpu_info_from_wmic())
# Try the Windows registry
_copy_new_fields(info, _get_cpu_info_from_registry())
# Try /proc/cpuinfo
_copy_new_fields(info, _get_cpu_info_from_proc_cpuinfo())
# Try cpufreq-info
_copy_new_fields(info, _get_cpu_info_from_cpufreq_info())
# Try LSCPU
_copy_new_fields(info, _get_cpu_info_from_lscpu())
# Try sysctl
_copy_new_fields(info, _get_cpu_info_from_sysctl())
# Try kstat
_copy_new_fields(info, _get_cpu_info_from_kstat())
# Try dmesg
_copy_new_fields(info, _get_cpu_info_from_dmesg())
# Try /var/run/dmesg.boot
_copy_new_fields(info, _get_cpu_info_from_cat_var_run_dmesg_boot())
# Try lsprop ibm,pa-features
_copy_new_fields(info, _get_cpu_info_from_ibm_pa_features())
# Try sysinfo
_copy_new_fields(info, _get_cpu_info_from_sysinfo())
# Try querying the CPU cpuid register
_copy_new_fields(info, _get_cpu_info_from_cpuid())
# Try platform.uname
_copy_new_fields(info, _get_cpu_info_from_platform_uname())
return info
def get_cpu_info_json():
'''
Returns the CPU info by using the best sources of information for your OS.
Returns the result in a json string
'''
import json
output = None
# If running under pyinstaller, run normally
if getattr(sys, 'frozen', False):
info = _get_cpu_info_internal()
output = json.dumps(info)
output = "{0}".format(output)
# if not running under pyinstaller, run in another process.
# This is done because multiprocesing has a design flaw that
# causes non main programs to run multiple times on Windows.
else:
from subprocess import Popen, PIPE
command = [sys.executable, __file__, '--json']
p1 = Popen(command, stdout=PIPE, stderr=PIPE, stdin=PIPE)
output = p1.communicate()[0]
if p1.returncode != 0:
return "{}"
if not IS_PY2:
output = output.decode(encoding='UTF-8')
return output
def get_cpu_info():
'''
Returns the CPU info by using the best sources of information for your OS.
Returns the result in a dict
'''
import json
output = get_cpu_info_json()
# Convert JSON to Python with non unicode strings
output = json.loads(output, object_hook = _utf_to_str)
return output
def main():
from argparse import ArgumentParser
import json
# Parse args
parser = ArgumentParser(description='Gets CPU info with pure Python 2 & 3')
parser.add_argument('--json', action='store_true', help='Return the info in JSON format')
parser.add_argument('--version', action='store_true', help='Return the version of py-cpuinfo')
args = parser.parse_args()
try:
_check_arch()
except Exception as err:
sys.stderr.write(str(err) + "\n")
sys.exit(1)
info = _get_cpu_info_internal()
if not info:
sys.stderr.write("Failed to find cpu info\n")
sys.exit(1)
if args.json:
print(json.dumps(info))
elif args.version:
print(CPUINFO_VERSION_STRING)
else:
print('Python Version: {0}'.format(info.get('python_version', '')))
print('Cpuinfo Version: {0}'.format(info.get('cpuinfo_version_string', '')))
print('Vendor ID Raw: {0}'.format(info.get('vendor_id_raw', '')))
print('Hardware Raw: {0}'.format(info.get('hardware_raw', '')))
print('Brand Raw: {0}'.format(info.get('brand_raw', '')))
print('Hz Advertised Friendly: {0}'.format(info.get('hz_advertised_friendly', '')))
print('Hz Actual Friendly: {0}'.format(info.get('hz_actual_friendly', '')))
print('Hz Advertised: {0}'.format(info.get('hz_advertised', '')))
print('Hz Actual: {0}'.format(info.get('hz_actual', '')))
print('Arch: {0}'.format(info.get('arch', '')))
print('Bits: {0}'.format(info.get('bits', '')))
print('Count: {0}'.format(info.get('count', '')))
print('Arch String Raw: {0}'.format(info.get('arch_string_raw', '')))
print('L1 Data Cache Size: {0}'.format(info.get('l1_data_cache_size', '')))
print('L1 Instruction Cache Size: {0}'.format(info.get('l1_instruction_cache_size', '')))
print('L2 Cache Size: {0}'.format(info.get('l2_cache_size', '')))
print('L2 Cache Line Size: {0}'.format(info.get('l2_cache_line_size', '')))
print('L2 Cache Associativity: {0}'.format(info.get('l2_cache_associativity', '')))
print('L3 Cache Size: {0}'.format(info.get('l3_cache_size', '')))
print('Stepping: {0}'.format(info.get('stepping', '')))
print('Model: {0}'.format(info.get('model', '')))
print('Family: {0}'.format(info.get('family', '')))
print('Processor Type: {0}'.format(info.get('processor_type', '')))
print('Extended Model: {0}'.format(info.get('extended_model', '')))
print('Extended Family: {0}'.format(info.get('extended_family', '')))
print('Flags: {0}'.format(', '.join(info.get('flags', ''))))
if __name__ == '__main__':
main()
else:
_check_arch()
|
workhorsy/py-cpuinfo
|
cpuinfo/cpuinfo.py
|
_get_cpu_info_from_lscpu
|
python
|
def _get_cpu_info_from_lscpu():
'''
Returns the CPU info gathered from lscpu.
Returns {} if lscpu is not found.
'''
try:
if not DataSource.has_lscpu():
return {}
returncode, output = DataSource.lscpu()
if returncode != 0:
return {}
info = {}
new_hz = _get_field(False, output, None, None, 'CPU max MHz', 'CPU MHz')
if new_hz:
new_hz = _to_decimal_string(new_hz)
scale = 6
info['hz_advertised_friendly'] = _hz_short_to_friendly(new_hz, scale)
info['hz_actual_friendly'] = _hz_short_to_friendly(new_hz, scale)
info['hz_advertised'] = _hz_short_to_full(new_hz, scale)
info['hz_actual'] = _hz_short_to_full(new_hz, scale)
vendor_id = _get_field(False, output, None, None, 'Vendor ID')
if vendor_id:
info['vendor_id_raw'] = vendor_id
brand = _get_field(False, output, None, None, 'Model name')
if brand:
info['brand_raw'] = brand
family = _get_field(False, output, None, None, 'CPU family')
if family and family.isdigit():
info['family'] = int(family)
stepping = _get_field(False, output, None, None, 'Stepping')
if stepping and stepping.isdigit():
info['stepping'] = int(stepping)
model = _get_field(False, output, None, None, 'Model')
if model and model.isdigit():
info['model'] = int(model)
l1_data_cache_size = _get_field(False, output, None, None, 'L1d cache')
if l1_data_cache_size:
info['l1_data_cache_size'] = _to_friendly_bytes(l1_data_cache_size)
l1_instruction_cache_size = _get_field(False, output, None, None, 'L1i cache')
if l1_instruction_cache_size:
info['l1_instruction_cache_size'] = _to_friendly_bytes(l1_instruction_cache_size)
l2_cache_size = _get_field(False, output, None, None, 'L2 cache')
if l2_cache_size:
info['l2_cache_size'] = _to_friendly_bytes(l2_cache_size)
l3_cache_size = _get_field(False, output, None, None, 'L3 cache')
if l3_cache_size:
info['l3_cache_size'] = _to_friendly_bytes(l3_cache_size)
# Flags
flags = _get_field(False, output, None, None, 'flags', 'Features')
if flags:
flags = flags.split()
flags.sort()
info['flags'] = flags
info = {k: v for k, v in info.items() if v}
return info
except:
#raise # NOTE: To have this throw on error, uncomment this line
return {}
|
Returns the CPU info gathered from lscpu.
Returns {} if lscpu is not found.
|
train
|
https://github.com/workhorsy/py-cpuinfo/blob/c15afb770c1139bf76215852e17eb4f677ca3d2f/cpuinfo/cpuinfo.py#L1514-L1585
|
[
"def has_lscpu():\n\treturn len(_program_paths('lscpu')) > 0\n"
] |
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
# Copyright (c) 2014-2019, Matthew Brennan Jones <matthew.brennan.jones@gmail.com>
# Py-cpuinfo gets CPU info with pure Python 2 & 3
# It uses the MIT License
# It is hosted at: https://github.com/workhorsy/py-cpuinfo
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
CPUINFO_VERSION = (5, 0, 0)
CPUINFO_VERSION_STRING = '.'.join([str(n) for n in CPUINFO_VERSION])
import os, sys
import platform
import multiprocessing
import ctypes
try:
import _winreg as winreg
except ImportError as err:
try:
import winreg
except ImportError as err:
pass
IS_PY2 = sys.version_info[0] == 2
class DataSource(object):
bits = platform.architecture()[0]
cpu_count = multiprocessing.cpu_count()
is_windows = platform.system().lower() == 'windows'
arch_string_raw = platform.machine()
uname_string_raw = platform.uname()[5]
can_cpuid = True
@staticmethod
def has_proc_cpuinfo():
return os.path.exists('/proc/cpuinfo')
@staticmethod
def has_dmesg():
return len(_program_paths('dmesg')) > 0
@staticmethod
def has_var_run_dmesg_boot():
uname = platform.system().strip().strip('"').strip("'").strip().lower()
return 'linux' in uname and os.path.exists('/var/run/dmesg.boot')
@staticmethod
def has_cpufreq_info():
return len(_program_paths('cpufreq-info')) > 0
@staticmethod
def has_sestatus():
return len(_program_paths('sestatus')) > 0
@staticmethod
def has_sysctl():
return len(_program_paths('sysctl')) > 0
@staticmethod
def has_isainfo():
return len(_program_paths('isainfo')) > 0
@staticmethod
def has_kstat():
return len(_program_paths('kstat')) > 0
@staticmethod
def has_sysinfo():
return len(_program_paths('sysinfo')) > 0
@staticmethod
def has_lscpu():
return len(_program_paths('lscpu')) > 0
@staticmethod
def has_ibm_pa_features():
return len(_program_paths('lsprop')) > 0
@staticmethod
def has_wmic():
returncode, output = _run_and_get_stdout(['wmic', 'os', 'get', 'Version'])
return returncode == 0 and len(output) > 0
@staticmethod
def cat_proc_cpuinfo():
return _run_and_get_stdout(['cat', '/proc/cpuinfo'])
@staticmethod
def cpufreq_info():
return _run_and_get_stdout(['cpufreq-info'])
@staticmethod
def sestatus_b():
return _run_and_get_stdout(['sestatus', '-b'])
@staticmethod
def dmesg_a():
return _run_and_get_stdout(['dmesg', '-a'])
@staticmethod
def cat_var_run_dmesg_boot():
return _run_and_get_stdout(['cat', '/var/run/dmesg.boot'])
@staticmethod
def sysctl_machdep_cpu_hw_cpufrequency():
return _run_and_get_stdout(['sysctl', 'machdep.cpu', 'hw.cpufrequency'])
@staticmethod
def isainfo_vb():
return _run_and_get_stdout(['isainfo', '-vb'])
@staticmethod
def kstat_m_cpu_info():
return _run_and_get_stdout(['kstat', '-m', 'cpu_info'])
@staticmethod
def sysinfo_cpu():
return _run_and_get_stdout(['sysinfo', '-cpu'])
@staticmethod
def lscpu():
return _run_and_get_stdout(['lscpu'])
@staticmethod
def ibm_pa_features():
import glob
ibm_features = glob.glob('/proc/device-tree/cpus/*/ibm,pa-features')
if ibm_features:
return _run_and_get_stdout(['lsprop', ibm_features[0]])
@staticmethod
def wmic_cpu():
return _run_and_get_stdout(['wmic', 'cpu', 'get', 'Name,CurrentClockSpeed,L2CacheSize,L3CacheSize,Description,Caption,Manufacturer', '/format:list'])
@staticmethod
def winreg_processor_brand():
key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, r"Hardware\Description\System\CentralProcessor\0")
processor_brand = winreg.QueryValueEx(key, "ProcessorNameString")[0]
winreg.CloseKey(key)
return processor_brand.strip()
@staticmethod
def winreg_vendor_id_raw():
key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, r"Hardware\Description\System\CentralProcessor\0")
vendor_id_raw = winreg.QueryValueEx(key, "VendorIdentifier")[0]
winreg.CloseKey(key)
return vendor_id_raw
@staticmethod
def winreg_arch_string_raw():
key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, r"SYSTEM\CurrentControlSet\Control\Session Manager\Environment")
arch_string_raw = winreg.QueryValueEx(key, "PROCESSOR_ARCHITECTURE")[0]
winreg.CloseKey(key)
return arch_string_raw
@staticmethod
def winreg_hz_actual():
key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, r"Hardware\Description\System\CentralProcessor\0")
hz_actual = winreg.QueryValueEx(key, "~Mhz")[0]
winreg.CloseKey(key)
hz_actual = _to_decimal_string(hz_actual)
return hz_actual
@staticmethod
def winreg_feature_bits():
key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, r"Hardware\Description\System\CentralProcessor\0")
feature_bits = winreg.QueryValueEx(key, "FeatureSet")[0]
winreg.CloseKey(key)
return feature_bits
def _program_paths(program_name):
paths = []
exts = filter(None, os.environ.get('PATHEXT', '').split(os.pathsep))
path = os.environ['PATH']
for p in os.environ['PATH'].split(os.pathsep):
p = os.path.join(p, program_name)
if os.access(p, os.X_OK):
paths.append(p)
for e in exts:
pext = p + e
if os.access(pext, os.X_OK):
paths.append(pext)
return paths
def _run_and_get_stdout(command, pipe_command=None):
from subprocess import Popen, PIPE
if not pipe_command:
p1 = Popen(command, stdout=PIPE, stderr=PIPE, stdin=PIPE)
output = p1.communicate()[0]
if not IS_PY2:
output = output.decode(encoding='UTF-8')
return p1.returncode, output
else:
p1 = Popen(command, stdout=PIPE, stderr=PIPE, stdin=PIPE)
p2 = Popen(pipe_command, stdin=p1.stdout, stdout=PIPE, stderr=PIPE)
p1.stdout.close()
output = p2.communicate()[0]
if not IS_PY2:
output = output.decode(encoding='UTF-8')
return p2.returncode, output
# Make sure we are running on a supported system
def _check_arch():
arch, bits = _parse_arch(DataSource.arch_string_raw)
if not arch in ['X86_32', 'X86_64', 'ARM_7', 'ARM_8', 'PPC_64']:
raise Exception("py-cpuinfo currently only works on X86 and some PPC and ARM CPUs.")
def _obj_to_b64(thing):
import pickle
import base64
a = thing
b = pickle.dumps(a)
c = base64.b64encode(b)
d = c.decode('utf8')
return d
def _b64_to_obj(thing):
import pickle
import base64
try:
a = base64.b64decode(thing)
b = pickle.loads(a)
return b
except:
return {}
def _utf_to_str(input):
if IS_PY2 and isinstance(input, unicode):
return input.encode('utf-8')
elif isinstance(input, list):
return [_utf_to_str(element) for element in input]
elif isinstance(input, dict):
return {_utf_to_str(key): _utf_to_str(value)
for key, value in input.items()}
else:
return input
def _copy_new_fields(info, new_info):
keys = [
'vendor_id_raw', 'hardware_raw', 'brand_raw', 'hz_advertised_friendly', 'hz_actual_friendly',
'hz_advertised', 'hz_actual', 'arch', 'bits', 'count',
'arch_string_raw', 'uname_string_raw',
'l2_cache_size', 'l2_cache_line_size', 'l2_cache_associativity',
'stepping', 'model', 'family',
'processor_type', 'extended_model', 'extended_family', 'flags',
'l3_cache_size', 'l1_data_cache_size', 'l1_instruction_cache_size'
]
for key in keys:
if new_info.get(key, None) and not info.get(key, None):
info[key] = new_info[key]
elif key == 'flags' and new_info.get('flags'):
for f in new_info['flags']:
if f not in info['flags']: info['flags'].append(f)
info['flags'].sort()
def _get_field_actual(cant_be_number, raw_string, field_names):
for line in raw_string.splitlines():
for field_name in field_names:
field_name = field_name.lower()
if ':' in line:
left, right = line.split(':', 1)
left = left.strip().lower()
right = right.strip()
if left == field_name and len(right) > 0:
if cant_be_number:
if not right.isdigit():
return right
else:
return right
return None
def _get_field(cant_be_number, raw_string, convert_to, default_value, *field_names):
retval = _get_field_actual(cant_be_number, raw_string, field_names)
# Convert the return value
if retval and convert_to:
try:
retval = convert_to(retval)
except:
retval = default_value
# Return the default if there is no return value
if retval is None:
retval = default_value
return retval
def _to_decimal_string(ticks):
try:
# Convert to string
ticks = '{0}'.format(ticks)
# Strip off non numbers and decimal places
ticks = "".join(n for n in ticks if n.isdigit() or n=='.').strip()
if ticks == '':
ticks = '0'
# Add decimal if missing
if '.' not in ticks:
ticks = '{0}.0'.format(ticks)
# Remove trailing zeros
ticks = ticks.rstrip('0')
# Add one trailing zero for empty right side
if ticks.endswith('.'):
ticks = '{0}0'.format(ticks)
# Make sure the number can be converted to a float
ticks = float(ticks)
ticks = '{0}'.format(ticks)
return ticks
except:
return '0.0'
def _hz_short_to_full(ticks, scale):
try:
# Make sure the number can be converted to a float
ticks = float(ticks)
ticks = '{0}'.format(ticks)
# Scale the numbers
hz = ticks.lstrip('0')
old_index = hz.index('.')
hz = hz.replace('.', '')
hz = hz.ljust(scale + old_index+1, '0')
new_index = old_index + scale
hz = '{0}.{1}'.format(hz[:new_index], hz[new_index:])
left, right = hz.split('.')
left, right = int(left), int(right)
return (left, right)
except:
return (0, 0)
def _hz_friendly_to_full(hz_string):
try:
hz_string = hz_string.strip().lower()
hz, scale = (None, None)
if hz_string.endswith('ghz'):
scale = 9
elif hz_string.endswith('mhz'):
scale = 6
elif hz_string.endswith('hz'):
scale = 0
hz = "".join(n for n in hz_string if n.isdigit() or n=='.').strip()
if not '.' in hz:
hz += '.0'
hz, scale = _hz_short_to_full(hz, scale)
return (hz, scale)
except:
return (0, 0)
def _hz_short_to_friendly(ticks, scale):
try:
# Get the raw Hz as a string
left, right = _hz_short_to_full(ticks, scale)
result = '{0}.{1}'.format(left, right)
# Get the location of the dot, and remove said dot
dot_index = result.index('.')
result = result.replace('.', '')
# Get the Hz symbol and scale
symbol = "Hz"
scale = 0
if dot_index > 9:
symbol = "GHz"
scale = 9
elif dot_index > 6:
symbol = "MHz"
scale = 6
elif dot_index > 3:
symbol = "KHz"
scale = 3
# Get the Hz with the dot at the new scaled point
result = '{0}.{1}'.format(result[:-scale-1], result[-scale-1:])
# Format the ticks to have 4 numbers after the decimal
# and remove any superfluous zeroes.
result = '{0:.4f} {1}'.format(float(result), symbol)
result = result.rstrip('0')
return result
except:
return '0.0000 Hz'
def _to_friendly_bytes(input):
import re
if not input:
return input
input = "{0}".format(input)
formats = {
r"^[0-9]+B$" : 'B',
r"^[0-9]+K$" : 'KB',
r"^[0-9]+M$" : 'MB',
r"^[0-9]+G$" : 'GB'
}
for pattern, friendly_size in formats.items():
if re.match(pattern, input):
return "{0} {1}".format(input[ : -1].strip(), friendly_size)
return input
def _parse_cpu_brand_string(cpu_string):
# Just return 0 if the processor brand does not have the Hz
if not 'hz' in cpu_string.lower():
return ('0.0', 0)
hz = cpu_string.lower()
scale = 0
if hz.endswith('mhz'):
scale = 6
elif hz.endswith('ghz'):
scale = 9
if '@' in hz:
hz = hz.split('@')[1]
else:
hz = hz.rsplit(None, 1)[1]
hz = hz.rstrip('mhz').rstrip('ghz').strip()
hz = _to_decimal_string(hz)
return (hz, scale)
def _parse_cpu_brand_string_dx(cpu_string):
import re
# Find all the strings inside brackets ()
starts = [m.start() for m in re.finditer('\(', cpu_string)]
ends = [m.start() for m in re.finditer('\)', cpu_string)]
insides = {k: v for k, v in zip(starts, ends)}
insides = [cpu_string[start+1 : end] for start, end in insides.items()]
# Find all the fields
vendor_id, stepping, model, family = (None, None, None, None)
for inside in insides:
for pair in inside.split(','):
pair = [n.strip() for n in pair.split(':')]
if len(pair) > 1:
name, value = pair[0], pair[1]
if name == 'origin':
vendor_id = value.strip('"')
elif name == 'stepping':
stepping = int(value.lstrip('0x'), 16)
elif name == 'model':
model = int(value.lstrip('0x'), 16)
elif name in ['fam', 'family']:
family = int(value.lstrip('0x'), 16)
# Find the Processor Brand
# Strip off extra strings in brackets at end
brand = cpu_string.strip()
is_working = True
while is_working:
is_working = False
for inside in insides:
full = "({0})".format(inside)
if brand.endswith(full):
brand = brand[ :-len(full)].strip()
is_working = True
# Find the Hz in the brand string
hz_brand, scale = _parse_cpu_brand_string(brand)
# Find Hz inside brackets () after the brand string
if hz_brand == '0.0':
for inside in insides:
hz = inside
for entry in ['GHz', 'MHz', 'Hz']:
if entry in hz:
hz = "CPU @ " + hz[ : hz.find(entry) + len(entry)]
hz_brand, scale = _parse_cpu_brand_string(hz)
break
return (hz_brand, scale, brand, vendor_id, stepping, model, family)
def _parse_dmesg_output(output):
try:
# Get all the dmesg lines that might contain a CPU string
lines = output.split(' CPU0:')[1:] + \
output.split(' CPU1:')[1:] + \
output.split(' CPU:')[1:] + \
output.split('\nCPU0:')[1:] + \
output.split('\nCPU1:')[1:] + \
output.split('\nCPU:')[1:]
lines = [l.split('\n')[0].strip() for l in lines]
# Convert the lines to CPU strings
cpu_strings = [_parse_cpu_brand_string_dx(l) for l in lines]
# Find the CPU string that has the most fields
best_string = None
highest_count = 0
for cpu_string in cpu_strings:
count = sum([n is not None for n in cpu_string])
if count > highest_count:
highest_count = count
best_string = cpu_string
# If no CPU string was found, return {}
if not best_string:
return {}
hz_actual, scale, processor_brand, vendor_id, stepping, model, family = best_string
# Origin
if ' Origin=' in output:
fields = output[output.find(' Origin=') : ].split('\n')[0]
fields = fields.strip().split()
fields = [n.strip().split('=') for n in fields]
fields = [{n[0].strip().lower() : n[1].strip()} for n in fields]
for field in fields:
name = list(field.keys())[0]
value = list(field.values())[0]
if name == 'origin':
vendor_id = value.strip('"')
elif name == 'stepping':
stepping = int(value.lstrip('0x'), 16)
elif name == 'model':
model = int(value.lstrip('0x'), 16)
elif name in ['fam', 'family']:
family = int(value.lstrip('0x'), 16)
# Features
flag_lines = []
for category in [' Features=', ' Features2=', ' AMD Features=', ' AMD Features2=']:
if category in output:
flag_lines.append(output.split(category)[1].split('\n')[0])
flags = []
for line in flag_lines:
line = line.split('<')[1].split('>')[0].lower()
for flag in line.split(','):
flags.append(flag)
flags.sort()
# Convert from GHz/MHz string to Hz
hz_advertised, scale = _parse_cpu_brand_string(processor_brand)
# If advertised hz not found, use the actual hz
if hz_advertised == '0.0':
scale = 6
hz_advertised = _to_decimal_string(hz_actual)
info = {
'vendor_id_raw' : vendor_id,
'brand_raw' : processor_brand,
'stepping' : stepping,
'model' : model,
'family' : family,
'flags' : flags
}
if hz_advertised and hz_advertised != '0.0':
info['hz_advertised_friendly'] = _hz_short_to_friendly(hz_advertised, scale)
info['hz_actual_friendly'] = _hz_short_to_friendly(hz_actual, scale)
if hz_advertised and hz_advertised != '0.0':
info['hz_advertised'] = _hz_short_to_full(hz_advertised, scale)
info['hz_actual'] = _hz_short_to_full(hz_actual, scale)
return {k: v for k, v in info.items() if v}
except:
#raise
pass
return {}
def _parse_arch(arch_string_raw):
import re
arch, bits = None, None
arch_string_raw = arch_string_raw.lower()
# X86
if re.match('^i\d86$|^x86$|^x86_32$|^i86pc$|^ia32$|^ia-32$|^bepc$', arch_string_raw):
arch = 'X86_32'
bits = 32
elif re.match('^x64$|^x86_64$|^x86_64t$|^i686-64$|^amd64$|^ia64$|^ia-64$', arch_string_raw):
arch = 'X86_64'
bits = 64
# ARM
elif re.match('^armv8-a|aarch64$', arch_string_raw):
arch = 'ARM_8'
bits = 64
elif re.match('^armv7$|^armv7[a-z]$|^armv7-[a-z]$|^armv6[a-z]$', arch_string_raw):
arch = 'ARM_7'
bits = 32
elif re.match('^armv8$|^armv8[a-z]$|^armv8-[a-z]$', arch_string_raw):
arch = 'ARM_8'
bits = 32
# PPC
elif re.match('^ppc32$|^prep$|^pmac$|^powermac$', arch_string_raw):
arch = 'PPC_32'
bits = 32
elif re.match('^powerpc$|^ppc64$|^ppc64le$', arch_string_raw):
arch = 'PPC_64'
bits = 64
# SPARC
elif re.match('^sparc32$|^sparc$', arch_string_raw):
arch = 'SPARC_32'
bits = 32
elif re.match('^sparc64$|^sun4u$|^sun4v$', arch_string_raw):
arch = 'SPARC_64'
bits = 64
return (arch, bits)
def _is_bit_set(reg, bit):
mask = 1 << bit
is_set = reg & mask > 0
return is_set
def _is_selinux_enforcing():
# Just return if the SE Linux Status Tool is not installed
if not DataSource.has_sestatus():
return False
# Run the sestatus, and just return if it failed to run
returncode, output = DataSource.sestatus_b()
if returncode != 0:
return False
# Figure out if explicitly in enforcing mode
for line in output.splitlines():
line = line.strip().lower()
if line.startswith("current mode:"):
if line.endswith("enforcing"):
return True
else:
return False
# Figure out if we can execute heap and execute memory
can_selinux_exec_heap = False
can_selinux_exec_memory = False
for line in output.splitlines():
line = line.strip().lower()
if line.startswith("allow_execheap") and line.endswith("on"):
can_selinux_exec_heap = True
elif line.startswith("allow_execmem") and line.endswith("on"):
can_selinux_exec_memory = True
return (not can_selinux_exec_heap or not can_selinux_exec_memory)
class CPUID(object):
def __init__(self):
self.prochandle = None
# Figure out if SE Linux is on and in enforcing mode
self.is_selinux_enforcing = _is_selinux_enforcing()
def _asm_func(self, restype=None, argtypes=(), byte_code=[]):
byte_code = bytes.join(b'', byte_code)
address = None
if DataSource.is_windows:
# Allocate a memory segment the size of the byte code, and make it executable
size = len(byte_code)
# Alloc at least 1 page to ensure we own all pages that we want to change protection on
if size < 0x1000: size = 0x1000
MEM_COMMIT = ctypes.c_ulong(0x1000)
PAGE_READWRITE = ctypes.c_ulong(0x4)
pfnVirtualAlloc = ctypes.windll.kernel32.VirtualAlloc
pfnVirtualAlloc.restype = ctypes.c_void_p
address = pfnVirtualAlloc(None, ctypes.c_size_t(size), MEM_COMMIT, PAGE_READWRITE)
if not address:
raise Exception("Failed to VirtualAlloc")
# Copy the byte code into the memory segment
memmove = ctypes.CFUNCTYPE(ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_size_t)(ctypes._memmove_addr)
if memmove(address, byte_code, size) < 0:
raise Exception("Failed to memmove")
# Enable execute permissions
PAGE_EXECUTE = ctypes.c_ulong(0x10)
old_protect = ctypes.c_ulong(0)
pfnVirtualProtect = ctypes.windll.kernel32.VirtualProtect
res = pfnVirtualProtect(ctypes.c_void_p(address), ctypes.c_size_t(size), PAGE_EXECUTE, ctypes.byref(old_protect))
if not res:
raise Exception("Failed VirtualProtect")
# Flush Instruction Cache
# First, get process Handle
if not self.prochandle:
pfnGetCurrentProcess = ctypes.windll.kernel32.GetCurrentProcess
pfnGetCurrentProcess.restype = ctypes.c_void_p
self.prochandle = ctypes.c_void_p(pfnGetCurrentProcess())
# Actually flush cache
res = ctypes.windll.kernel32.FlushInstructionCache(self.prochandle, ctypes.c_void_p(address), ctypes.c_size_t(size))
if not res:
raise Exception("Failed FlushInstructionCache")
else:
# Allocate a memory segment the size of the byte code
size = len(byte_code)
pfnvalloc = ctypes.pythonapi.valloc
pfnvalloc.restype = ctypes.c_void_p
address = pfnvalloc(ctypes.c_size_t(size))
if not address:
raise Exception("Failed to valloc")
# Mark the memory segment as writeable only
if not self.is_selinux_enforcing:
WRITE = 0x2
if ctypes.pythonapi.mprotect(ctypes.c_void_p(address), size, WRITE) < 0:
raise Exception("Failed to mprotect")
# Copy the byte code into the memory segment
if ctypes.pythonapi.memmove(ctypes.c_void_p(address), byte_code, ctypes.c_size_t(size)) < 0:
raise Exception("Failed to memmove")
# Mark the memory segment as writeable and executable only
if not self.is_selinux_enforcing:
WRITE_EXECUTE = 0x2 | 0x4
if ctypes.pythonapi.mprotect(ctypes.c_void_p(address), size, WRITE_EXECUTE) < 0:
raise Exception("Failed to mprotect")
# Cast the memory segment into a function
functype = ctypes.CFUNCTYPE(restype, *argtypes)
fun = functype(address)
return fun, address
def _run_asm(self, *byte_code):
# Convert the byte code into a function that returns an int
restype = ctypes.c_uint32
argtypes = ()
func, address = self._asm_func(restype, argtypes, byte_code)
# Call the byte code like a function
retval = func()
byte_code = bytes.join(b'', byte_code)
size = ctypes.c_size_t(len(byte_code))
# Free the function memory segment
if DataSource.is_windows:
MEM_RELEASE = ctypes.c_ulong(0x8000)
ctypes.windll.kernel32.VirtualFree(ctypes.c_void_p(address), ctypes.c_size_t(0), MEM_RELEASE)
else:
# Remove the executable tag on the memory
READ_WRITE = 0x1 | 0x2
if ctypes.pythonapi.mprotect(ctypes.c_void_p(address), size, READ_WRITE) < 0:
raise Exception("Failed to mprotect")
ctypes.pythonapi.free(ctypes.c_void_p(address))
return retval
# FIXME: We should not have to use different instructions to
# set eax to 0 or 1, on 32bit and 64bit machines.
def _zero_eax(self):
return (
b"\x31\xC0" # xor eax,eax
)
def _zero_ecx(self):
return (
b"\x31\xC9" # xor ecx,ecx
)
def _one_eax(self):
return (
b"\xB8\x01\x00\x00\x00" # mov eax,0x1"
)
# http://en.wikipedia.org/wiki/CPUID#EAX.3D0:_Get_vendor_ID
def get_vendor_id(self):
# EBX
ebx = self._run_asm(
self._zero_eax(),
b"\x0F\xA2" # cpuid
b"\x89\xD8" # mov ax,bx
b"\xC3" # ret
)
# ECX
ecx = self._run_asm(
self._zero_eax(),
b"\x0f\xa2" # cpuid
b"\x89\xC8" # mov ax,cx
b"\xC3" # ret
)
# EDX
edx = self._run_asm(
self._zero_eax(),
b"\x0f\xa2" # cpuid
b"\x89\xD0" # mov ax,dx
b"\xC3" # ret
)
# Each 4bits is a ascii letter in the name
vendor_id = []
for reg in [ebx, edx, ecx]:
for n in [0, 8, 16, 24]:
vendor_id.append(chr((reg >> n) & 0xFF))
vendor_id = ''.join(vendor_id)
return vendor_id
# http://en.wikipedia.org/wiki/CPUID#EAX.3D1:_Processor_Info_and_Feature_Bits
def get_info(self):
# EAX
eax = self._run_asm(
self._one_eax(),
b"\x0f\xa2" # cpuid
b"\xC3" # ret
)
# Get the CPU info
stepping = (eax >> 0) & 0xF # 4 bits
model = (eax >> 4) & 0xF # 4 bits
family = (eax >> 8) & 0xF # 4 bits
processor_type = (eax >> 12) & 0x3 # 2 bits
extended_model = (eax >> 16) & 0xF # 4 bits
extended_family = (eax >> 20) & 0xFF # 8 bits
return {
'stepping' : stepping,
'model' : model,
'family' : family,
'processor_type' : processor_type,
'extended_model' : extended_model,
'extended_family' : extended_family
}
# http://en.wikipedia.org/wiki/CPUID#EAX.3D80000000h:_Get_Highest_Extended_Function_Supported
def get_max_extension_support(self):
# Check for extension support
max_extension_support = self._run_asm(
b"\xB8\x00\x00\x00\x80" # mov ax,0x80000000
b"\x0f\xa2" # cpuid
b"\xC3" # ret
)
return max_extension_support
# http://en.wikipedia.org/wiki/CPUID#EAX.3D1:_Processor_Info_and_Feature_Bits
def get_flags(self, max_extension_support):
# EDX
edx = self._run_asm(
self._one_eax(),
b"\x0f\xa2" # cpuid
b"\x89\xD0" # mov ax,dx
b"\xC3" # ret
)
# ECX
ecx = self._run_asm(
self._one_eax(),
b"\x0f\xa2" # cpuid
b"\x89\xC8" # mov ax,cx
b"\xC3" # ret
)
# Get the CPU flags
flags = {
'fpu' : _is_bit_set(edx, 0),
'vme' : _is_bit_set(edx, 1),
'de' : _is_bit_set(edx, 2),
'pse' : _is_bit_set(edx, 3),
'tsc' : _is_bit_set(edx, 4),
'msr' : _is_bit_set(edx, 5),
'pae' : _is_bit_set(edx, 6),
'mce' : _is_bit_set(edx, 7),
'cx8' : _is_bit_set(edx, 8),
'apic' : _is_bit_set(edx, 9),
#'reserved1' : _is_bit_set(edx, 10),
'sep' : _is_bit_set(edx, 11),
'mtrr' : _is_bit_set(edx, 12),
'pge' : _is_bit_set(edx, 13),
'mca' : _is_bit_set(edx, 14),
'cmov' : _is_bit_set(edx, 15),
'pat' : _is_bit_set(edx, 16),
'pse36' : _is_bit_set(edx, 17),
'pn' : _is_bit_set(edx, 18),
'clflush' : _is_bit_set(edx, 19),
#'reserved2' : _is_bit_set(edx, 20),
'dts' : _is_bit_set(edx, 21),
'acpi' : _is_bit_set(edx, 22),
'mmx' : _is_bit_set(edx, 23),
'fxsr' : _is_bit_set(edx, 24),
'sse' : _is_bit_set(edx, 25),
'sse2' : _is_bit_set(edx, 26),
'ss' : _is_bit_set(edx, 27),
'ht' : _is_bit_set(edx, 28),
'tm' : _is_bit_set(edx, 29),
'ia64' : _is_bit_set(edx, 30),
'pbe' : _is_bit_set(edx, 31),
'pni' : _is_bit_set(ecx, 0),
'pclmulqdq' : _is_bit_set(ecx, 1),
'dtes64' : _is_bit_set(ecx, 2),
'monitor' : _is_bit_set(ecx, 3),
'ds_cpl' : _is_bit_set(ecx, 4),
'vmx' : _is_bit_set(ecx, 5),
'smx' : _is_bit_set(ecx, 6),
'est' : _is_bit_set(ecx, 7),
'tm2' : _is_bit_set(ecx, 8),
'ssse3' : _is_bit_set(ecx, 9),
'cid' : _is_bit_set(ecx, 10),
#'reserved3' : _is_bit_set(ecx, 11),
'fma' : _is_bit_set(ecx, 12),
'cx16' : _is_bit_set(ecx, 13),
'xtpr' : _is_bit_set(ecx, 14),
'pdcm' : _is_bit_set(ecx, 15),
#'reserved4' : _is_bit_set(ecx, 16),
'pcid' : _is_bit_set(ecx, 17),
'dca' : _is_bit_set(ecx, 18),
'sse4_1' : _is_bit_set(ecx, 19),
'sse4_2' : _is_bit_set(ecx, 20),
'x2apic' : _is_bit_set(ecx, 21),
'movbe' : _is_bit_set(ecx, 22),
'popcnt' : _is_bit_set(ecx, 23),
'tscdeadline' : _is_bit_set(ecx, 24),
'aes' : _is_bit_set(ecx, 25),
'xsave' : _is_bit_set(ecx, 26),
'osxsave' : _is_bit_set(ecx, 27),
'avx' : _is_bit_set(ecx, 28),
'f16c' : _is_bit_set(ecx, 29),
'rdrnd' : _is_bit_set(ecx, 30),
'hypervisor' : _is_bit_set(ecx, 31)
}
# Get a list of only the flags that are true
flags = [k for k, v in flags.items() if v]
# http://en.wikipedia.org/wiki/CPUID#EAX.3D7.2C_ECX.3D0:_Extended_Features
if max_extension_support >= 7:
# EBX
ebx = self._run_asm(
self._zero_ecx(),
b"\xB8\x07\x00\x00\x00" # mov eax,7
b"\x0f\xa2" # cpuid
b"\x89\xD8" # mov ax,bx
b"\xC3" # ret
)
# ECX
ecx = self._run_asm(
self._zero_ecx(),
b"\xB8\x07\x00\x00\x00" # mov eax,7
b"\x0f\xa2" # cpuid
b"\x89\xC8" # mov ax,cx
b"\xC3" # ret
)
# Get the extended CPU flags
extended_flags = {
#'fsgsbase' : _is_bit_set(ebx, 0),
#'IA32_TSC_ADJUST' : _is_bit_set(ebx, 1),
'sgx' : _is_bit_set(ebx, 2),
'bmi1' : _is_bit_set(ebx, 3),
'hle' : _is_bit_set(ebx, 4),
'avx2' : _is_bit_set(ebx, 5),
#'reserved' : _is_bit_set(ebx, 6),
'smep' : _is_bit_set(ebx, 7),
'bmi2' : _is_bit_set(ebx, 8),
'erms' : _is_bit_set(ebx, 9),
'invpcid' : _is_bit_set(ebx, 10),
'rtm' : _is_bit_set(ebx, 11),
'pqm' : _is_bit_set(ebx, 12),
#'FPU CS and FPU DS deprecated' : _is_bit_set(ebx, 13),
'mpx' : _is_bit_set(ebx, 14),
'pqe' : _is_bit_set(ebx, 15),
'avx512f' : _is_bit_set(ebx, 16),
'avx512dq' : _is_bit_set(ebx, 17),
'rdseed' : _is_bit_set(ebx, 18),
'adx' : _is_bit_set(ebx, 19),
'smap' : _is_bit_set(ebx, 20),
'avx512ifma' : _is_bit_set(ebx, 21),
'pcommit' : _is_bit_set(ebx, 22),
'clflushopt' : _is_bit_set(ebx, 23),
'clwb' : _is_bit_set(ebx, 24),
'intel_pt' : _is_bit_set(ebx, 25),
'avx512pf' : _is_bit_set(ebx, 26),
'avx512er' : _is_bit_set(ebx, 27),
'avx512cd' : _is_bit_set(ebx, 28),
'sha' : _is_bit_set(ebx, 29),
'avx512bw' : _is_bit_set(ebx, 30),
'avx512vl' : _is_bit_set(ebx, 31),
'prefetchwt1' : _is_bit_set(ecx, 0),
'avx512vbmi' : _is_bit_set(ecx, 1),
'umip' : _is_bit_set(ecx, 2),
'pku' : _is_bit_set(ecx, 3),
'ospke' : _is_bit_set(ecx, 4),
#'reserved' : _is_bit_set(ecx, 5),
'avx512vbmi2' : _is_bit_set(ecx, 6),
#'reserved' : _is_bit_set(ecx, 7),
'gfni' : _is_bit_set(ecx, 8),
'vaes' : _is_bit_set(ecx, 9),
'vpclmulqdq' : _is_bit_set(ecx, 10),
'avx512vnni' : _is_bit_set(ecx, 11),
'avx512bitalg' : _is_bit_set(ecx, 12),
#'reserved' : _is_bit_set(ecx, 13),
'avx512vpopcntdq' : _is_bit_set(ecx, 14),
#'reserved' : _is_bit_set(ecx, 15),
#'reserved' : _is_bit_set(ecx, 16),
#'mpx0' : _is_bit_set(ecx, 17),
#'mpx1' : _is_bit_set(ecx, 18),
#'mpx2' : _is_bit_set(ecx, 19),
#'mpx3' : _is_bit_set(ecx, 20),
#'mpx4' : _is_bit_set(ecx, 21),
'rdpid' : _is_bit_set(ecx, 22),
#'reserved' : _is_bit_set(ecx, 23),
#'reserved' : _is_bit_set(ecx, 24),
#'reserved' : _is_bit_set(ecx, 25),
#'reserved' : _is_bit_set(ecx, 26),
#'reserved' : _is_bit_set(ecx, 27),
#'reserved' : _is_bit_set(ecx, 28),
#'reserved' : _is_bit_set(ecx, 29),
'sgx_lc' : _is_bit_set(ecx, 30),
#'reserved' : _is_bit_set(ecx, 31)
}
# Get a list of only the flags that are true
extended_flags = [k for k, v in extended_flags.items() if v]
flags += extended_flags
# http://en.wikipedia.org/wiki/CPUID#EAX.3D80000001h:_Extended_Processor_Info_and_Feature_Bits
if max_extension_support >= 0x80000001:
# EBX
ebx = self._run_asm(
b"\xB8\x01\x00\x00\x80" # mov ax,0x80000001
b"\x0f\xa2" # cpuid
b"\x89\xD8" # mov ax,bx
b"\xC3" # ret
)
# ECX
ecx = self._run_asm(
b"\xB8\x01\x00\x00\x80" # mov ax,0x80000001
b"\x0f\xa2" # cpuid
b"\x89\xC8" # mov ax,cx
b"\xC3" # ret
)
# Get the extended CPU flags
extended_flags = {
'fpu' : _is_bit_set(ebx, 0),
'vme' : _is_bit_set(ebx, 1),
'de' : _is_bit_set(ebx, 2),
'pse' : _is_bit_set(ebx, 3),
'tsc' : _is_bit_set(ebx, 4),
'msr' : _is_bit_set(ebx, 5),
'pae' : _is_bit_set(ebx, 6),
'mce' : _is_bit_set(ebx, 7),
'cx8' : _is_bit_set(ebx, 8),
'apic' : _is_bit_set(ebx, 9),
#'reserved' : _is_bit_set(ebx, 10),
'syscall' : _is_bit_set(ebx, 11),
'mtrr' : _is_bit_set(ebx, 12),
'pge' : _is_bit_set(ebx, 13),
'mca' : _is_bit_set(ebx, 14),
'cmov' : _is_bit_set(ebx, 15),
'pat' : _is_bit_set(ebx, 16),
'pse36' : _is_bit_set(ebx, 17),
#'reserved' : _is_bit_set(ebx, 18),
'mp' : _is_bit_set(ebx, 19),
'nx' : _is_bit_set(ebx, 20),
#'reserved' : _is_bit_set(ebx, 21),
'mmxext' : _is_bit_set(ebx, 22),
'mmx' : _is_bit_set(ebx, 23),
'fxsr' : _is_bit_set(ebx, 24),
'fxsr_opt' : _is_bit_set(ebx, 25),
'pdpe1gp' : _is_bit_set(ebx, 26),
'rdtscp' : _is_bit_set(ebx, 27),
#'reserved' : _is_bit_set(ebx, 28),
'lm' : _is_bit_set(ebx, 29),
'3dnowext' : _is_bit_set(ebx, 30),
'3dnow' : _is_bit_set(ebx, 31),
'lahf_lm' : _is_bit_set(ecx, 0),
'cmp_legacy' : _is_bit_set(ecx, 1),
'svm' : _is_bit_set(ecx, 2),
'extapic' : _is_bit_set(ecx, 3),
'cr8_legacy' : _is_bit_set(ecx, 4),
'abm' : _is_bit_set(ecx, 5),
'sse4a' : _is_bit_set(ecx, 6),
'misalignsse' : _is_bit_set(ecx, 7),
'3dnowprefetch' : _is_bit_set(ecx, 8),
'osvw' : _is_bit_set(ecx, 9),
'ibs' : _is_bit_set(ecx, 10),
'xop' : _is_bit_set(ecx, 11),
'skinit' : _is_bit_set(ecx, 12),
'wdt' : _is_bit_set(ecx, 13),
#'reserved' : _is_bit_set(ecx, 14),
'lwp' : _is_bit_set(ecx, 15),
'fma4' : _is_bit_set(ecx, 16),
'tce' : _is_bit_set(ecx, 17),
#'reserved' : _is_bit_set(ecx, 18),
'nodeid_msr' : _is_bit_set(ecx, 19),
#'reserved' : _is_bit_set(ecx, 20),
'tbm' : _is_bit_set(ecx, 21),
'topoext' : _is_bit_set(ecx, 22),
'perfctr_core' : _is_bit_set(ecx, 23),
'perfctr_nb' : _is_bit_set(ecx, 24),
#'reserved' : _is_bit_set(ecx, 25),
'dbx' : _is_bit_set(ecx, 26),
'perftsc' : _is_bit_set(ecx, 27),
'pci_l2i' : _is_bit_set(ecx, 28),
#'reserved' : _is_bit_set(ecx, 29),
#'reserved' : _is_bit_set(ecx, 30),
#'reserved' : _is_bit_set(ecx, 31)
}
# Get a list of only the flags that are true
extended_flags = [k for k, v in extended_flags.items() if v]
flags += extended_flags
flags.sort()
return flags
# http://en.wikipedia.org/wiki/CPUID#EAX.3D80000002h.2C80000003h.2C80000004h:_Processor_Brand_String
def get_processor_brand(self, max_extension_support):
processor_brand = ""
# Processor brand string
if max_extension_support >= 0x80000004:
instructions = [
b"\xB8\x02\x00\x00\x80", # mov ax,0x80000002
b"\xB8\x03\x00\x00\x80", # mov ax,0x80000003
b"\xB8\x04\x00\x00\x80" # mov ax,0x80000004
]
for instruction in instructions:
# EAX
eax = self._run_asm(
instruction, # mov ax,0x8000000?
b"\x0f\xa2" # cpuid
b"\x89\xC0" # mov ax,ax
b"\xC3" # ret
)
# EBX
ebx = self._run_asm(
instruction, # mov ax,0x8000000?
b"\x0f\xa2" # cpuid
b"\x89\xD8" # mov ax,bx
b"\xC3" # ret
)
# ECX
ecx = self._run_asm(
instruction, # mov ax,0x8000000?
b"\x0f\xa2" # cpuid
b"\x89\xC8" # mov ax,cx
b"\xC3" # ret
)
# EDX
edx = self._run_asm(
instruction, # mov ax,0x8000000?
b"\x0f\xa2" # cpuid
b"\x89\xD0" # mov ax,dx
b"\xC3" # ret
)
# Combine each of the 4 bytes in each register into the string
for reg in [eax, ebx, ecx, edx]:
for n in [0, 8, 16, 24]:
processor_brand += chr((reg >> n) & 0xFF)
# Strip off any trailing NULL terminators and white space
processor_brand = processor_brand.strip("\0").strip()
return processor_brand
# http://en.wikipedia.org/wiki/CPUID#EAX.3D80000006h:_Extended_L2_Cache_Features
def get_cache(self, max_extension_support):
cache_info = {}
# Just return if the cache feature is not supported
if max_extension_support < 0x80000006:
return cache_info
# ECX
ecx = self._run_asm(
b"\xB8\x06\x00\x00\x80" # mov ax,0x80000006
b"\x0f\xa2" # cpuid
b"\x89\xC8" # mov ax,cx
b"\xC3" # ret
)
cache_info = {
'size_kb' : ecx & 0xFF,
'line_size_b' : (ecx >> 12) & 0xF,
'associativity' : (ecx >> 16) & 0xFFFF
}
return cache_info
def get_ticks(self):
retval = None
if DataSource.bits == '32bit':
# Works on x86_32
restype = None
argtypes = (ctypes.POINTER(ctypes.c_uint), ctypes.POINTER(ctypes.c_uint))
get_ticks_x86_32, address = self._asm_func(restype, argtypes,
[
b"\x55", # push bp
b"\x89\xE5", # mov bp,sp
b"\x31\xC0", # xor ax,ax
b"\x0F\xA2", # cpuid
b"\x0F\x31", # rdtsc
b"\x8B\x5D\x08", # mov bx,[di+0x8]
b"\x8B\x4D\x0C", # mov cx,[di+0xc]
b"\x89\x13", # mov [bp+di],dx
b"\x89\x01", # mov [bx+di],ax
b"\x5D", # pop bp
b"\xC3" # ret
]
)
high = ctypes.c_uint32(0)
low = ctypes.c_uint32(0)
get_ticks_x86_32(ctypes.byref(high), ctypes.byref(low))
retval = ((high.value << 32) & 0xFFFFFFFF00000000) | low.value
elif DataSource.bits == '64bit':
# Works on x86_64
restype = ctypes.c_uint64
argtypes = ()
get_ticks_x86_64, address = self._asm_func(restype, argtypes,
[
b"\x48", # dec ax
b"\x31\xC0", # xor ax,ax
b"\x0F\xA2", # cpuid
b"\x0F\x31", # rdtsc
b"\x48", # dec ax
b"\xC1\xE2\x20", # shl dx,byte 0x20
b"\x48", # dec ax
b"\x09\xD0", # or ax,dx
b"\xC3", # ret
]
)
retval = get_ticks_x86_64()
return retval
def get_raw_hz(self):
import time
start = self.get_ticks()
time.sleep(1)
end = self.get_ticks()
ticks = (end - start)
return ticks
def _actual_get_cpu_info_from_cpuid(queue):
'''
Warning! This function has the potential to crash the Python runtime.
Do not call it directly. Use the _get_cpu_info_from_cpuid function instead.
It will safely call this function in another process.
'''
# Pipe all output to nothing
sys.stdout = open(os.devnull, 'w')
sys.stderr = open(os.devnull, 'w')
# Get the CPU arch and bits
arch, bits = _parse_arch(DataSource.arch_string_raw)
# Return none if this is not an X86 CPU
if not arch in ['X86_32', 'X86_64']:
queue.put(_obj_to_b64({}))
return
# Return none if SE Linux is in enforcing mode
cpuid = CPUID()
if cpuid.is_selinux_enforcing:
queue.put(_obj_to_b64({}))
return
# Get the cpu info from the CPUID register
max_extension_support = cpuid.get_max_extension_support()
cache_info = cpuid.get_cache(max_extension_support)
info = cpuid.get_info()
processor_brand = cpuid.get_processor_brand(max_extension_support)
# Get the Hz and scale
hz_actual = cpuid.get_raw_hz()
hz_actual = _to_decimal_string(hz_actual)
# Get the Hz and scale
hz_advertised, scale = _parse_cpu_brand_string(processor_brand)
info = {
'vendor_id_raw' : cpuid.get_vendor_id(),
'hardware_raw' : '',
'brand_raw' : processor_brand,
'hz_advertised_friendly' : _hz_short_to_friendly(hz_advertised, scale),
'hz_actual_friendly' : _hz_short_to_friendly(hz_actual, 0),
'hz_advertised' : _hz_short_to_full(hz_advertised, scale),
'hz_actual' : _hz_short_to_full(hz_actual, 0),
'l2_cache_size' : _to_friendly_bytes(cache_info['size_kb']),
'l2_cache_line_size' : cache_info['line_size_b'],
'l2_cache_associativity' : hex(cache_info['associativity']),
'stepping' : info['stepping'],
'model' : info['model'],
'family' : info['family'],
'processor_type' : info['processor_type'],
'extended_model' : info['extended_model'],
'extended_family' : info['extended_family'],
'flags' : cpuid.get_flags(max_extension_support)
}
info = {k: v for k, v in info.items() if v}
queue.put(_obj_to_b64(info))
def _get_cpu_info_from_cpuid():
'''
Returns the CPU info gathered by querying the X86 cpuid register in a new process.
Returns {} on non X86 cpus.
Returns {} if SELinux is in enforcing mode.
'''
from multiprocessing import Process, Queue
# Return {} if can't cpuid
if not DataSource.can_cpuid:
return {}
# Get the CPU arch and bits
arch, bits = _parse_arch(DataSource.arch_string_raw)
# Return {} if this is not an X86 CPU
if not arch in ['X86_32', 'X86_64']:
return {}
try:
# Start running the function in a subprocess
queue = Queue()
p = Process(target=_actual_get_cpu_info_from_cpuid, args=(queue,))
p.start()
# Wait for the process to end, while it is still alive
while p.is_alive():
p.join(0)
# Return {} if it failed
if p.exitcode != 0:
return {}
# Return the result, only if there is something to read
if not queue.empty():
output = queue.get()
return _b64_to_obj(output)
except:
pass
# Return {} if everything failed
return {}
def _get_cpu_info_from_proc_cpuinfo():
'''
Returns the CPU info gathered from /proc/cpuinfo.
Returns {} if /proc/cpuinfo is not found.
'''
try:
# Just return {} if there is no cpuinfo
if not DataSource.has_proc_cpuinfo():
return {}
returncode, output = DataSource.cat_proc_cpuinfo()
if returncode != 0:
return {}
# Various fields
vendor_id = _get_field(False, output, None, '', 'vendor_id', 'vendor id', 'vendor')
processor_brand = _get_field(True, output, None, None, 'model name','cpu', 'processor')
cache_size = _get_field(False, output, None, '', 'cache size')
stepping = _get_field(False, output, int, 0, 'stepping')
model = _get_field(False, output, int, 0, 'model')
family = _get_field(False, output, int, 0, 'cpu family')
hardware = _get_field(False, output, None, '', 'Hardware')
# Flags
flags = _get_field(False, output, None, None, 'flags', 'Features')
if flags:
flags = flags.split()
flags.sort()
# Convert from MHz string to Hz
hz_actual = _get_field(False, output, None, '', 'cpu MHz', 'cpu speed', 'clock')
hz_actual = hz_actual.lower().rstrip('mhz').strip()
hz_actual = _to_decimal_string(hz_actual)
# Convert from GHz/MHz string to Hz
hz_advertised, scale = (None, 0)
try:
hz_advertised, scale = _parse_cpu_brand_string(processor_brand)
except Exception:
pass
info = {
'hardware_raw' : hardware,
'brand_raw' : processor_brand,
'l3_cache_size' : _to_friendly_bytes(cache_size),
'flags' : flags,
'vendor_id_raw' : vendor_id,
'stepping' : stepping,
'model' : model,
'family' : family,
}
# Make the Hz the same for actual and advertised if missing any
if not hz_advertised or hz_advertised == '0.0':
hz_advertised = hz_actual
scale = 6
elif not hz_actual or hz_actual == '0.0':
hz_actual = hz_advertised
# Add the Hz if there is one
if _hz_short_to_full(hz_advertised, scale) > (0, 0):
info['hz_advertised_friendly'] = _hz_short_to_friendly(hz_advertised, scale)
info['hz_advertised'] = _hz_short_to_full(hz_advertised, scale)
if _hz_short_to_full(hz_actual, scale) > (0, 0):
info['hz_actual_friendly'] = _hz_short_to_friendly(hz_actual, 6)
info['hz_actual'] = _hz_short_to_full(hz_actual, 6)
info = {k: v for k, v in info.items() if v}
return info
except:
#raise # NOTE: To have this throw on error, uncomment this line
return {}
def _get_cpu_info_from_cpufreq_info():
'''
Returns the CPU info gathered from cpufreq-info.
Returns {} if cpufreq-info is not found.
'''
try:
hz_brand, scale = '0.0', 0
if not DataSource.has_cpufreq_info():
return {}
returncode, output = DataSource.cpufreq_info()
if returncode != 0:
return {}
hz_brand = output.split('current CPU frequency is')[1].split('\n')[0]
i = hz_brand.find('Hz')
assert(i != -1)
hz_brand = hz_brand[0 : i+2].strip().lower()
if hz_brand.endswith('mhz'):
scale = 6
elif hz_brand.endswith('ghz'):
scale = 9
hz_brand = hz_brand.rstrip('mhz').rstrip('ghz').strip()
hz_brand = _to_decimal_string(hz_brand)
info = {
'hz_advertised_friendly' : _hz_short_to_friendly(hz_brand, scale),
'hz_actual_friendly' : _hz_short_to_friendly(hz_brand, scale),
'hz_advertised' : _hz_short_to_full(hz_brand, scale),
'hz_actual' : _hz_short_to_full(hz_brand, scale),
}
info = {k: v for k, v in info.items() if v}
return info
except:
#raise # NOTE: To have this throw on error, uncomment this line
return {}
def _get_cpu_info_from_dmesg():
'''
Returns the CPU info gathered from dmesg.
Returns {} if dmesg is not found or does not have the desired info.
'''
# Just return {} if there is no dmesg
if not DataSource.has_dmesg():
return {}
# If dmesg fails return {}
returncode, output = DataSource.dmesg_a()
if output == None or returncode != 0:
return {}
return _parse_dmesg_output(output)
# https://openpowerfoundation.org/wp-content/uploads/2016/05/LoPAPR_DRAFT_v11_24March2016_cmt1.pdf
# page 767
def _get_cpu_info_from_ibm_pa_features():
'''
Returns the CPU info gathered from lsprop /proc/device-tree/cpus/*/ibm,pa-features
Returns {} if lsprop is not found or ibm,pa-features does not have the desired info.
'''
try:
# Just return {} if there is no lsprop
if not DataSource.has_ibm_pa_features():
return {}
# If ibm,pa-features fails return {}
returncode, output = DataSource.ibm_pa_features()
if output == None or returncode != 0:
return {}
# Filter out invalid characters from output
value = output.split("ibm,pa-features")[1].lower()
value = [s for s in value if s in list('0123456789abcfed')]
value = ''.join(value)
# Get data converted to Uint32 chunks
left = int(value[0 : 8], 16)
right = int(value[8 : 16], 16)
# Get the CPU flags
flags = {
# Byte 0
'mmu' : _is_bit_set(left, 0),
'fpu' : _is_bit_set(left, 1),
'slb' : _is_bit_set(left, 2),
'run' : _is_bit_set(left, 3),
#'reserved' : _is_bit_set(left, 4),
'dabr' : _is_bit_set(left, 5),
'ne' : _is_bit_set(left, 6),
'wtr' : _is_bit_set(left, 7),
# Byte 1
'mcr' : _is_bit_set(left, 8),
'dsisr' : _is_bit_set(left, 9),
'lp' : _is_bit_set(left, 10),
'ri' : _is_bit_set(left, 11),
'dabrx' : _is_bit_set(left, 12),
'sprg3' : _is_bit_set(left, 13),
'rislb' : _is_bit_set(left, 14),
'pp' : _is_bit_set(left, 15),
# Byte 2
'vpm' : _is_bit_set(left, 16),
'dss_2.05' : _is_bit_set(left, 17),
#'reserved' : _is_bit_set(left, 18),
'dar' : _is_bit_set(left, 19),
#'reserved' : _is_bit_set(left, 20),
'ppr' : _is_bit_set(left, 21),
'dss_2.02' : _is_bit_set(left, 22),
'dss_2.06' : _is_bit_set(left, 23),
# Byte 3
'lsd_in_dscr' : _is_bit_set(left, 24),
'ugr_in_dscr' : _is_bit_set(left, 25),
#'reserved' : _is_bit_set(left, 26),
#'reserved' : _is_bit_set(left, 27),
#'reserved' : _is_bit_set(left, 28),
#'reserved' : _is_bit_set(left, 29),
#'reserved' : _is_bit_set(left, 30),
#'reserved' : _is_bit_set(left, 31),
# Byte 4
'sso_2.06' : _is_bit_set(right, 0),
#'reserved' : _is_bit_set(right, 1),
#'reserved' : _is_bit_set(right, 2),
#'reserved' : _is_bit_set(right, 3),
#'reserved' : _is_bit_set(right, 4),
#'reserved' : _is_bit_set(right, 5),
#'reserved' : _is_bit_set(right, 6),
#'reserved' : _is_bit_set(right, 7),
# Byte 5
'le' : _is_bit_set(right, 8),
'cfar' : _is_bit_set(right, 9),
'eb' : _is_bit_set(right, 10),
'lsq_2.07' : _is_bit_set(right, 11),
#'reserved' : _is_bit_set(right, 12),
#'reserved' : _is_bit_set(right, 13),
#'reserved' : _is_bit_set(right, 14),
#'reserved' : _is_bit_set(right, 15),
# Byte 6
'dss_2.07' : _is_bit_set(right, 16),
#'reserved' : _is_bit_set(right, 17),
#'reserved' : _is_bit_set(right, 18),
#'reserved' : _is_bit_set(right, 19),
#'reserved' : _is_bit_set(right, 20),
#'reserved' : _is_bit_set(right, 21),
#'reserved' : _is_bit_set(right, 22),
#'reserved' : _is_bit_set(right, 23),
# Byte 7
#'reserved' : _is_bit_set(right, 24),
#'reserved' : _is_bit_set(right, 25),
#'reserved' : _is_bit_set(right, 26),
#'reserved' : _is_bit_set(right, 27),
#'reserved' : _is_bit_set(right, 28),
#'reserved' : _is_bit_set(right, 29),
#'reserved' : _is_bit_set(right, 30),
#'reserved' : _is_bit_set(right, 31),
}
# Get a list of only the flags that are true
flags = [k for k, v in flags.items() if v]
flags.sort()
info = {
'flags' : flags
}
info = {k: v for k, v in info.items() if v}
return info
except:
return {}
def _get_cpu_info_from_cat_var_run_dmesg_boot():
'''
Returns the CPU info gathered from /var/run/dmesg.boot.
Returns {} if dmesg is not found or does not have the desired info.
'''
# Just return {} if there is no /var/run/dmesg.boot
if not DataSource.has_var_run_dmesg_boot():
return {}
# If dmesg.boot fails return {}
returncode, output = DataSource.cat_var_run_dmesg_boot()
if output == None or returncode != 0:
return {}
return _parse_dmesg_output(output)
def _get_cpu_info_from_sysctl():
'''
Returns the CPU info gathered from sysctl.
Returns {} if sysctl is not found.
'''
try:
# Just return {} if there is no sysctl
if not DataSource.has_sysctl():
return {}
# If sysctl fails return {}
returncode, output = DataSource.sysctl_machdep_cpu_hw_cpufrequency()
if output == None or returncode != 0:
return {}
# Various fields
vendor_id = _get_field(False, output, None, None, 'machdep.cpu.vendor')
processor_brand = _get_field(True, output, None, None, 'machdep.cpu.brand_string')
cache_size = _get_field(False, output, None, None, 'machdep.cpu.cache.size')
stepping = _get_field(False, output, int, 0, 'machdep.cpu.stepping')
model = _get_field(False, output, int, 0, 'machdep.cpu.model')
family = _get_field(False, output, int, 0, 'machdep.cpu.family')
# Flags
flags = _get_field(False, output, None, '', 'machdep.cpu.features').lower().split()
flags.extend(_get_field(False, output, None, '', 'machdep.cpu.leaf7_features').lower().split())
flags.extend(_get_field(False, output, None, '', 'machdep.cpu.extfeatures').lower().split())
flags.sort()
# Convert from GHz/MHz string to Hz
hz_advertised, scale = _parse_cpu_brand_string(processor_brand)
hz_actual = _get_field(False, output, None, None, 'hw.cpufrequency')
hz_actual = _to_decimal_string(hz_actual)
info = {
'vendor_id_raw' : vendor_id,
'brand_raw' : processor_brand,
'hz_advertised_friendly' : _hz_short_to_friendly(hz_advertised, scale),
'hz_actual_friendly' : _hz_short_to_friendly(hz_actual, 0),
'hz_advertised' : _hz_short_to_full(hz_advertised, scale),
'hz_actual' : _hz_short_to_full(hz_actual, 0),
'l2_cache_size' : _to_friendly_bytes(cache_size),
'stepping' : stepping,
'model' : model,
'family' : family,
'flags' : flags
}
info = {k: v for k, v in info.items() if v}
return info
except:
return {}
def _get_cpu_info_from_sysinfo():
'''
Returns the CPU info gathered from sysinfo.
Returns {} if sysinfo is not found.
'''
info = _get_cpu_info_from_sysinfo_v1()
info.update(_get_cpu_info_from_sysinfo_v2())
return info
def _get_cpu_info_from_sysinfo_v1():
'''
Returns the CPU info gathered from sysinfo.
Returns {} if sysinfo is not found.
'''
try:
# Just return {} if there is no sysinfo
if not DataSource.has_sysinfo():
return {}
# If sysinfo fails return {}
returncode, output = DataSource.sysinfo_cpu()
if output == None or returncode != 0:
return {}
# Various fields
vendor_id = '' #_get_field(False, output, None, None, 'CPU #0: ')
processor_brand = output.split('CPU #0: "')[1].split('"\n')[0].strip()
cache_size = '' #_get_field(False, output, None, None, 'machdep.cpu.cache.size')
stepping = int(output.split(', stepping ')[1].split(',')[0].strip())
model = int(output.split(', model ')[1].split(',')[0].strip())
family = int(output.split(', family ')[1].split(',')[0].strip())
# Flags
flags = []
for line in output.split('\n'):
if line.startswith('\t\t'):
for flag in line.strip().lower().split():
flags.append(flag)
flags.sort()
# Convert from GHz/MHz string to Hz
hz_advertised, scale = _parse_cpu_brand_string(processor_brand)
hz_actual = hz_advertised
info = {
'vendor_id_raw' : vendor_id,
'brand_raw' : processor_brand,
'hz_advertised_friendly' : _hz_short_to_friendly(hz_advertised, scale),
'hz_actual_friendly' : _hz_short_to_friendly(hz_actual, scale),
'hz_advertised' : _hz_short_to_full(hz_advertised, scale),
'hz_actual' : _hz_short_to_full(hz_actual, scale),
'l2_cache_size' : _to_friendly_bytes(cache_size),
'stepping' : stepping,
'model' : model,
'family' : family,
'flags' : flags
}
info = {k: v for k, v in info.items() if v}
return info
except:
#raise # NOTE: To have this throw on error, uncomment this line
return {}
def _get_cpu_info_from_sysinfo_v2():
'''
Returns the CPU info gathered from sysinfo.
Returns {} if sysinfo is not found.
'''
try:
# Just return {} if there is no sysinfo
if not DataSource.has_sysinfo():
return {}
# If sysinfo fails return {}
returncode, output = DataSource.sysinfo_cpu()
if output == None or returncode != 0:
return {}
# Various fields
vendor_id = '' #_get_field(False, output, None, None, 'CPU #0: ')
processor_brand = output.split('CPU #0: "')[1].split('"\n')[0].strip()
cache_size = '' #_get_field(False, output, None, None, 'machdep.cpu.cache.size')
signature = output.split('Signature:')[1].split('\n')[0].strip()
#
stepping = int(signature.split('stepping ')[1].split(',')[0].strip())
model = int(signature.split('model ')[1].split(',')[0].strip())
family = int(signature.split('family ')[1].split(',')[0].strip())
# Flags
def get_subsection_flags(output):
retval = []
for line in output.split('\n')[1:]:
if not line.startswith(' ') and not line.startswith(' '): break
for entry in line.strip().lower().split(' '):
retval.append(entry)
return retval
flags = get_subsection_flags(output.split('Features: ')[1]) + \
get_subsection_flags(output.split('Extended Features (0x00000001): ')[1]) + \
get_subsection_flags(output.split('Extended Features (0x80000001): ')[1])
flags.sort()
# Convert from GHz/MHz string to Hz
lines = [n for n in output.split('\n') if n]
raw_hz = lines[0].split('running at ')[1].strip().lower()
hz_advertised = raw_hz.rstrip('mhz').rstrip('ghz').strip()
hz_advertised = _to_decimal_string(hz_advertised)
hz_actual = hz_advertised
scale = 0
if raw_hz.endswith('mhz'):
scale = 6
elif raw_hz.endswith('ghz'):
scale = 9
info = {
'vendor_id_raw' : vendor_id,
'brand_raw' : processor_brand,
'hz_advertised_friendly' : _hz_short_to_friendly(hz_advertised, scale),
'hz_actual_friendly' : _hz_short_to_friendly(hz_actual, scale),
'hz_advertised' : _hz_short_to_full(hz_advertised, scale),
'hz_actual' : _hz_short_to_full(hz_actual, scale),
'l2_cache_size' : _to_friendly_bytes(cache_size),
'stepping' : stepping,
'model' : model,
'family' : family,
'flags' : flags
}
info = {k: v for k, v in info.items() if v}
return info
except:
#raise # NOTE: To have this throw on error, uncomment this line
return {}
def _get_cpu_info_from_wmic():
'''
Returns the CPU info gathered from WMI.
Returns {} if not on Windows, or wmic is not installed.
'''
try:
# Just return {} if not Windows or there is no wmic
if not DataSource.is_windows or not DataSource.has_wmic():
return {}
returncode, output = DataSource.wmic_cpu()
if output == None or returncode != 0:
return {}
# Break the list into key values pairs
value = output.split("\n")
value = [s.rstrip().split('=') for s in value if '=' in s]
value = {k: v for k, v in value if v}
# Get the advertised MHz
processor_brand = value.get('Name')
hz_advertised, scale_advertised = _parse_cpu_brand_string(processor_brand)
# Get the actual MHz
hz_actual = value.get('CurrentClockSpeed')
scale_actual = 6
if hz_actual:
hz_actual = _to_decimal_string(hz_actual)
# Get cache sizes
l2_cache_size = value.get('L2CacheSize')
if l2_cache_size:
l2_cache_size = l2_cache_size + ' KB'
l3_cache_size = value.get('L3CacheSize')
if l3_cache_size:
l3_cache_size = l3_cache_size + ' KB'
# Get family, model, and stepping
family, model, stepping = '', '', ''
description = value.get('Description') or value.get('Caption')
entries = description.split(' ')
if 'Family' in entries and entries.index('Family') < len(entries)-1:
i = entries.index('Family')
family = int(entries[i + 1])
if 'Model' in entries and entries.index('Model') < len(entries)-1:
i = entries.index('Model')
model = int(entries[i + 1])
if 'Stepping' in entries and entries.index('Stepping') < len(entries)-1:
i = entries.index('Stepping')
stepping = int(entries[i + 1])
info = {
'vendor_id_raw' : value.get('Manufacturer'),
'brand_raw' : processor_brand,
'hz_advertised_friendly' : _hz_short_to_friendly(hz_advertised, scale_advertised),
'hz_actual_friendly' : _hz_short_to_friendly(hz_actual, scale_actual),
'hz_advertised' : _hz_short_to_full(hz_advertised, scale_advertised),
'hz_actual' : _hz_short_to_full(hz_actual, scale_actual),
'l2_cache_size' : l2_cache_size,
'l3_cache_size' : l3_cache_size,
'stepping' : stepping,
'model' : model,
'family' : family,
}
info = {k: v for k, v in info.items() if v}
return info
except:
#raise # NOTE: To have this throw on error, uncomment this line
return {}
def _get_cpu_info_from_registry():
'''
FIXME: Is missing many of the newer CPU flags like sse3
Returns the CPU info gathered from the Windows Registry.
Returns {} if not on Windows.
'''
try:
# Just return {} if not on Windows
if not DataSource.is_windows:
return {}
# Get the CPU name
processor_brand = DataSource.winreg_processor_brand().strip()
# Get the CPU vendor id
vendor_id = DataSource.winreg_vendor_id_raw()
# Get the CPU arch and bits
arch_string_raw = DataSource.winreg_arch_string_raw()
arch, bits = _parse_arch(arch_string_raw)
# Get the actual CPU Hz
hz_actual = DataSource.winreg_hz_actual()
hz_actual = _to_decimal_string(hz_actual)
# Get the advertised CPU Hz
hz_advertised, scale = _parse_cpu_brand_string(processor_brand)
# If advertised hz not found, use the actual hz
if hz_advertised == '0.0':
scale = 6
hz_advertised = _to_decimal_string(hz_actual)
# Get the CPU features
feature_bits = DataSource.winreg_feature_bits()
def is_set(bit):
mask = 0x80000000 >> bit
retval = mask & feature_bits > 0
return retval
# http://en.wikipedia.org/wiki/CPUID
# http://unix.stackexchange.com/questions/43539/what-do-the-flags-in-proc-cpuinfo-mean
# http://www.lohninger.com/helpcsuite/public_constants_cpuid.htm
flags = {
'fpu' : is_set(0), # Floating Point Unit
'vme' : is_set(1), # V86 Mode Extensions
'de' : is_set(2), # Debug Extensions - I/O breakpoints supported
'pse' : is_set(3), # Page Size Extensions (4 MB pages supported)
'tsc' : is_set(4), # Time Stamp Counter and RDTSC instruction are available
'msr' : is_set(5), # Model Specific Registers
'pae' : is_set(6), # Physical Address Extensions (36 bit address, 2MB pages)
'mce' : is_set(7), # Machine Check Exception supported
'cx8' : is_set(8), # Compare Exchange Eight Byte instruction available
'apic' : is_set(9), # Local APIC present (multiprocessor operation support)
'sepamd' : is_set(10), # Fast system calls (AMD only)
'sep' : is_set(11), # Fast system calls
'mtrr' : is_set(12), # Memory Type Range Registers
'pge' : is_set(13), # Page Global Enable
'mca' : is_set(14), # Machine Check Architecture
'cmov' : is_set(15), # Conditional MOVe instructions
'pat' : is_set(16), # Page Attribute Table
'pse36' : is_set(17), # 36 bit Page Size Extensions
'serial' : is_set(18), # Processor Serial Number
'clflush' : is_set(19), # Cache Flush
#'reserved1' : is_set(20), # reserved
'dts' : is_set(21), # Debug Trace Store
'acpi' : is_set(22), # ACPI support
'mmx' : is_set(23), # MultiMedia Extensions
'fxsr' : is_set(24), # FXSAVE and FXRSTOR instructions
'sse' : is_set(25), # SSE instructions
'sse2' : is_set(26), # SSE2 (WNI) instructions
'ss' : is_set(27), # self snoop
#'reserved2' : is_set(28), # reserved
'tm' : is_set(29), # Automatic clock control
'ia64' : is_set(30), # IA64 instructions
'3dnow' : is_set(31) # 3DNow! instructions available
}
# Get a list of only the flags that are true
flags = [k for k, v in flags.items() if v]
flags.sort()
info = {
'vendor_id_raw' : vendor_id,
'brand_raw' : processor_brand,
'hz_advertised_friendly' : _hz_short_to_friendly(hz_advertised, scale),
'hz_actual_friendly' : _hz_short_to_friendly(hz_actual, 6),
'hz_advertised' : _hz_short_to_full(hz_advertised, scale),
'hz_actual' : _hz_short_to_full(hz_actual, 6),
'flags' : flags
}
info = {k: v for k, v in info.items() if v}
return info
except:
return {}
def _get_cpu_info_from_kstat():
'''
Returns the CPU info gathered from isainfo and kstat.
Returns {} if isainfo or kstat are not found.
'''
try:
# Just return {} if there is no isainfo or kstat
if not DataSource.has_isainfo() or not DataSource.has_kstat():
return {}
# If isainfo fails return {}
returncode, flag_output = DataSource.isainfo_vb()
if flag_output == None or returncode != 0:
return {}
# If kstat fails return {}
returncode, kstat = DataSource.kstat_m_cpu_info()
if kstat == None or returncode != 0:
return {}
# Various fields
vendor_id = kstat.split('\tvendor_id ')[1].split('\n')[0].strip()
processor_brand = kstat.split('\tbrand ')[1].split('\n')[0].strip()
stepping = int(kstat.split('\tstepping ')[1].split('\n')[0].strip())
model = int(kstat.split('\tmodel ')[1].split('\n')[0].strip())
family = int(kstat.split('\tfamily ')[1].split('\n')[0].strip())
# Flags
flags = flag_output.strip().split('\n')[-1].strip().lower().split()
flags.sort()
# Convert from GHz/MHz string to Hz
scale = 6
hz_advertised = kstat.split('\tclock_MHz ')[1].split('\n')[0].strip()
hz_advertised = _to_decimal_string(hz_advertised)
# Convert from GHz/MHz string to Hz
hz_actual = kstat.split('\tcurrent_clock_Hz ')[1].split('\n')[0].strip()
hz_actual = _to_decimal_string(hz_actual)
info = {
'vendor_id_raw' : vendor_id,
'brand_raw' : processor_brand,
'hz_advertised_friendly' : _hz_short_to_friendly(hz_advertised, scale),
'hz_actual_friendly' : _hz_short_to_friendly(hz_actual, 0),
'hz_advertised' : _hz_short_to_full(hz_advertised, scale),
'hz_actual' : _hz_short_to_full(hz_actual, 0),
'stepping' : stepping,
'model' : model,
'family' : family,
'flags' : flags
}
info = {k: v for k, v in info.items() if v}
return info
except:
return {}
def _get_cpu_info_from_platform_uname():
try:
uname = DataSource.uname_string_raw.split(',')[0]
family, model, stepping = (None, None, None)
entries = uname.split(' ')
if 'Family' in entries and entries.index('Family') < len(entries)-1:
i = entries.index('Family')
family = int(entries[i + 1])
if 'Model' in entries and entries.index('Model') < len(entries)-1:
i = entries.index('Model')
model = int(entries[i + 1])
if 'Stepping' in entries and entries.index('Stepping') < len(entries)-1:
i = entries.index('Stepping')
stepping = int(entries[i + 1])
info = {
'family' : family,
'model' : model,
'stepping' : stepping
}
info = {k: v for k, v in info.items() if v}
return info
except:
return {}
def _get_cpu_info_internal():
'''
Returns the CPU info by using the best sources of information for your OS.
Returns {} if nothing is found.
'''
# Get the CPU arch and bits
arch, bits = _parse_arch(DataSource.arch_string_raw)
friendly_maxsize = { 2**31-1: '32 bit', 2**63-1: '64 bit' }.get(sys.maxsize) or 'unknown bits'
friendly_version = "{0}.{1}.{2}.{3}.{4}".format(*sys.version_info)
PYTHON_VERSION = "{0} ({1})".format(friendly_version, friendly_maxsize)
info = {
'python_version' : PYTHON_VERSION,
'cpuinfo_version' : CPUINFO_VERSION,
'cpuinfo_version_string' : CPUINFO_VERSION_STRING,
'arch' : arch,
'bits' : bits,
'count' : DataSource.cpu_count,
'arch_string_raw' : DataSource.arch_string_raw,
}
# Try the Windows wmic
_copy_new_fields(info, _get_cpu_info_from_wmic())
# Try the Windows registry
_copy_new_fields(info, _get_cpu_info_from_registry())
# Try /proc/cpuinfo
_copy_new_fields(info, _get_cpu_info_from_proc_cpuinfo())
# Try cpufreq-info
_copy_new_fields(info, _get_cpu_info_from_cpufreq_info())
# Try LSCPU
_copy_new_fields(info, _get_cpu_info_from_lscpu())
# Try sysctl
_copy_new_fields(info, _get_cpu_info_from_sysctl())
# Try kstat
_copy_new_fields(info, _get_cpu_info_from_kstat())
# Try dmesg
_copy_new_fields(info, _get_cpu_info_from_dmesg())
# Try /var/run/dmesg.boot
_copy_new_fields(info, _get_cpu_info_from_cat_var_run_dmesg_boot())
# Try lsprop ibm,pa-features
_copy_new_fields(info, _get_cpu_info_from_ibm_pa_features())
# Try sysinfo
_copy_new_fields(info, _get_cpu_info_from_sysinfo())
# Try querying the CPU cpuid register
_copy_new_fields(info, _get_cpu_info_from_cpuid())
# Try platform.uname
_copy_new_fields(info, _get_cpu_info_from_platform_uname())
return info
def get_cpu_info_json():
'''
Returns the CPU info by using the best sources of information for your OS.
Returns the result in a json string
'''
import json
output = None
# If running under pyinstaller, run normally
if getattr(sys, 'frozen', False):
info = _get_cpu_info_internal()
output = json.dumps(info)
output = "{0}".format(output)
# if not running under pyinstaller, run in another process.
# This is done because multiprocesing has a design flaw that
# causes non main programs to run multiple times on Windows.
else:
from subprocess import Popen, PIPE
command = [sys.executable, __file__, '--json']
p1 = Popen(command, stdout=PIPE, stderr=PIPE, stdin=PIPE)
output = p1.communicate()[0]
if p1.returncode != 0:
return "{}"
if not IS_PY2:
output = output.decode(encoding='UTF-8')
return output
def get_cpu_info():
'''
Returns the CPU info by using the best sources of information for your OS.
Returns the result in a dict
'''
import json
output = get_cpu_info_json()
# Convert JSON to Python with non unicode strings
output = json.loads(output, object_hook = _utf_to_str)
return output
def main():
from argparse import ArgumentParser
import json
# Parse args
parser = ArgumentParser(description='Gets CPU info with pure Python 2 & 3')
parser.add_argument('--json', action='store_true', help='Return the info in JSON format')
parser.add_argument('--version', action='store_true', help='Return the version of py-cpuinfo')
args = parser.parse_args()
try:
_check_arch()
except Exception as err:
sys.stderr.write(str(err) + "\n")
sys.exit(1)
info = _get_cpu_info_internal()
if not info:
sys.stderr.write("Failed to find cpu info\n")
sys.exit(1)
if args.json:
print(json.dumps(info))
elif args.version:
print(CPUINFO_VERSION_STRING)
else:
print('Python Version: {0}'.format(info.get('python_version', '')))
print('Cpuinfo Version: {0}'.format(info.get('cpuinfo_version_string', '')))
print('Vendor ID Raw: {0}'.format(info.get('vendor_id_raw', '')))
print('Hardware Raw: {0}'.format(info.get('hardware_raw', '')))
print('Brand Raw: {0}'.format(info.get('brand_raw', '')))
print('Hz Advertised Friendly: {0}'.format(info.get('hz_advertised_friendly', '')))
print('Hz Actual Friendly: {0}'.format(info.get('hz_actual_friendly', '')))
print('Hz Advertised: {0}'.format(info.get('hz_advertised', '')))
print('Hz Actual: {0}'.format(info.get('hz_actual', '')))
print('Arch: {0}'.format(info.get('arch', '')))
print('Bits: {0}'.format(info.get('bits', '')))
print('Count: {0}'.format(info.get('count', '')))
print('Arch String Raw: {0}'.format(info.get('arch_string_raw', '')))
print('L1 Data Cache Size: {0}'.format(info.get('l1_data_cache_size', '')))
print('L1 Instruction Cache Size: {0}'.format(info.get('l1_instruction_cache_size', '')))
print('L2 Cache Size: {0}'.format(info.get('l2_cache_size', '')))
print('L2 Cache Line Size: {0}'.format(info.get('l2_cache_line_size', '')))
print('L2 Cache Associativity: {0}'.format(info.get('l2_cache_associativity', '')))
print('L3 Cache Size: {0}'.format(info.get('l3_cache_size', '')))
print('Stepping: {0}'.format(info.get('stepping', '')))
print('Model: {0}'.format(info.get('model', '')))
print('Family: {0}'.format(info.get('family', '')))
print('Processor Type: {0}'.format(info.get('processor_type', '')))
print('Extended Model: {0}'.format(info.get('extended_model', '')))
print('Extended Family: {0}'.format(info.get('extended_family', '')))
print('Flags: {0}'.format(', '.join(info.get('flags', ''))))
if __name__ == '__main__':
main()
else:
_check_arch()
|
workhorsy/py-cpuinfo
|
cpuinfo/cpuinfo.py
|
_get_cpu_info_from_dmesg
|
python
|
def _get_cpu_info_from_dmesg():
'''
Returns the CPU info gathered from dmesg.
Returns {} if dmesg is not found or does not have the desired info.
'''
# Just return {} if there is no dmesg
if not DataSource.has_dmesg():
return {}
# If dmesg fails return {}
returncode, output = DataSource.dmesg_a()
if output == None or returncode != 0:
return {}
return _parse_dmesg_output(output)
|
Returns the CPU info gathered from dmesg.
Returns {} if dmesg is not found or does not have the desired info.
|
train
|
https://github.com/workhorsy/py-cpuinfo/blob/c15afb770c1139bf76215852e17eb4f677ca3d2f/cpuinfo/cpuinfo.py#L1587-L1601
|
[
"def has_dmesg():\n\treturn len(_program_paths('dmesg')) > 0\n"
] |
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
# Copyright (c) 2014-2019, Matthew Brennan Jones <matthew.brennan.jones@gmail.com>
# Py-cpuinfo gets CPU info with pure Python 2 & 3
# It uses the MIT License
# It is hosted at: https://github.com/workhorsy/py-cpuinfo
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
CPUINFO_VERSION = (5, 0, 0)
CPUINFO_VERSION_STRING = '.'.join([str(n) for n in CPUINFO_VERSION])
import os, sys
import platform
import multiprocessing
import ctypes
try:
import _winreg as winreg
except ImportError as err:
try:
import winreg
except ImportError as err:
pass
IS_PY2 = sys.version_info[0] == 2
class DataSource(object):
bits = platform.architecture()[0]
cpu_count = multiprocessing.cpu_count()
is_windows = platform.system().lower() == 'windows'
arch_string_raw = platform.machine()
uname_string_raw = platform.uname()[5]
can_cpuid = True
@staticmethod
def has_proc_cpuinfo():
return os.path.exists('/proc/cpuinfo')
@staticmethod
def has_dmesg():
return len(_program_paths('dmesg')) > 0
@staticmethod
def has_var_run_dmesg_boot():
uname = platform.system().strip().strip('"').strip("'").strip().lower()
return 'linux' in uname and os.path.exists('/var/run/dmesg.boot')
@staticmethod
def has_cpufreq_info():
return len(_program_paths('cpufreq-info')) > 0
@staticmethod
def has_sestatus():
return len(_program_paths('sestatus')) > 0
@staticmethod
def has_sysctl():
return len(_program_paths('sysctl')) > 0
@staticmethod
def has_isainfo():
return len(_program_paths('isainfo')) > 0
@staticmethod
def has_kstat():
return len(_program_paths('kstat')) > 0
@staticmethod
def has_sysinfo():
return len(_program_paths('sysinfo')) > 0
@staticmethod
def has_lscpu():
return len(_program_paths('lscpu')) > 0
@staticmethod
def has_ibm_pa_features():
return len(_program_paths('lsprop')) > 0
@staticmethod
def has_wmic():
returncode, output = _run_and_get_stdout(['wmic', 'os', 'get', 'Version'])
return returncode == 0 and len(output) > 0
@staticmethod
def cat_proc_cpuinfo():
return _run_and_get_stdout(['cat', '/proc/cpuinfo'])
@staticmethod
def cpufreq_info():
return _run_and_get_stdout(['cpufreq-info'])
@staticmethod
def sestatus_b():
return _run_and_get_stdout(['sestatus', '-b'])
@staticmethod
def dmesg_a():
return _run_and_get_stdout(['dmesg', '-a'])
@staticmethod
def cat_var_run_dmesg_boot():
return _run_and_get_stdout(['cat', '/var/run/dmesg.boot'])
@staticmethod
def sysctl_machdep_cpu_hw_cpufrequency():
return _run_and_get_stdout(['sysctl', 'machdep.cpu', 'hw.cpufrequency'])
@staticmethod
def isainfo_vb():
return _run_and_get_stdout(['isainfo', '-vb'])
@staticmethod
def kstat_m_cpu_info():
return _run_and_get_stdout(['kstat', '-m', 'cpu_info'])
@staticmethod
def sysinfo_cpu():
return _run_and_get_stdout(['sysinfo', '-cpu'])
@staticmethod
def lscpu():
return _run_and_get_stdout(['lscpu'])
@staticmethod
def ibm_pa_features():
import glob
ibm_features = glob.glob('/proc/device-tree/cpus/*/ibm,pa-features')
if ibm_features:
return _run_and_get_stdout(['lsprop', ibm_features[0]])
@staticmethod
def wmic_cpu():
return _run_and_get_stdout(['wmic', 'cpu', 'get', 'Name,CurrentClockSpeed,L2CacheSize,L3CacheSize,Description,Caption,Manufacturer', '/format:list'])
@staticmethod
def winreg_processor_brand():
key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, r"Hardware\Description\System\CentralProcessor\0")
processor_brand = winreg.QueryValueEx(key, "ProcessorNameString")[0]
winreg.CloseKey(key)
return processor_brand.strip()
@staticmethod
def winreg_vendor_id_raw():
key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, r"Hardware\Description\System\CentralProcessor\0")
vendor_id_raw = winreg.QueryValueEx(key, "VendorIdentifier")[0]
winreg.CloseKey(key)
return vendor_id_raw
@staticmethod
def winreg_arch_string_raw():
key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, r"SYSTEM\CurrentControlSet\Control\Session Manager\Environment")
arch_string_raw = winreg.QueryValueEx(key, "PROCESSOR_ARCHITECTURE")[0]
winreg.CloseKey(key)
return arch_string_raw
@staticmethod
def winreg_hz_actual():
key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, r"Hardware\Description\System\CentralProcessor\0")
hz_actual = winreg.QueryValueEx(key, "~Mhz")[0]
winreg.CloseKey(key)
hz_actual = _to_decimal_string(hz_actual)
return hz_actual
@staticmethod
def winreg_feature_bits():
key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, r"Hardware\Description\System\CentralProcessor\0")
feature_bits = winreg.QueryValueEx(key, "FeatureSet")[0]
winreg.CloseKey(key)
return feature_bits
def _program_paths(program_name):
paths = []
exts = filter(None, os.environ.get('PATHEXT', '').split(os.pathsep))
path = os.environ['PATH']
for p in os.environ['PATH'].split(os.pathsep):
p = os.path.join(p, program_name)
if os.access(p, os.X_OK):
paths.append(p)
for e in exts:
pext = p + e
if os.access(pext, os.X_OK):
paths.append(pext)
return paths
def _run_and_get_stdout(command, pipe_command=None):
from subprocess import Popen, PIPE
if not pipe_command:
p1 = Popen(command, stdout=PIPE, stderr=PIPE, stdin=PIPE)
output = p1.communicate()[0]
if not IS_PY2:
output = output.decode(encoding='UTF-8')
return p1.returncode, output
else:
p1 = Popen(command, stdout=PIPE, stderr=PIPE, stdin=PIPE)
p2 = Popen(pipe_command, stdin=p1.stdout, stdout=PIPE, stderr=PIPE)
p1.stdout.close()
output = p2.communicate()[0]
if not IS_PY2:
output = output.decode(encoding='UTF-8')
return p2.returncode, output
# Make sure we are running on a supported system
def _check_arch():
arch, bits = _parse_arch(DataSource.arch_string_raw)
if not arch in ['X86_32', 'X86_64', 'ARM_7', 'ARM_8', 'PPC_64']:
raise Exception("py-cpuinfo currently only works on X86 and some PPC and ARM CPUs.")
def _obj_to_b64(thing):
import pickle
import base64
a = thing
b = pickle.dumps(a)
c = base64.b64encode(b)
d = c.decode('utf8')
return d
def _b64_to_obj(thing):
import pickle
import base64
try:
a = base64.b64decode(thing)
b = pickle.loads(a)
return b
except:
return {}
def _utf_to_str(input):
if IS_PY2 and isinstance(input, unicode):
return input.encode('utf-8')
elif isinstance(input, list):
return [_utf_to_str(element) for element in input]
elif isinstance(input, dict):
return {_utf_to_str(key): _utf_to_str(value)
for key, value in input.items()}
else:
return input
def _copy_new_fields(info, new_info):
keys = [
'vendor_id_raw', 'hardware_raw', 'brand_raw', 'hz_advertised_friendly', 'hz_actual_friendly',
'hz_advertised', 'hz_actual', 'arch', 'bits', 'count',
'arch_string_raw', 'uname_string_raw',
'l2_cache_size', 'l2_cache_line_size', 'l2_cache_associativity',
'stepping', 'model', 'family',
'processor_type', 'extended_model', 'extended_family', 'flags',
'l3_cache_size', 'l1_data_cache_size', 'l1_instruction_cache_size'
]
for key in keys:
if new_info.get(key, None) and not info.get(key, None):
info[key] = new_info[key]
elif key == 'flags' and new_info.get('flags'):
for f in new_info['flags']:
if f not in info['flags']: info['flags'].append(f)
info['flags'].sort()
def _get_field_actual(cant_be_number, raw_string, field_names):
for line in raw_string.splitlines():
for field_name in field_names:
field_name = field_name.lower()
if ':' in line:
left, right = line.split(':', 1)
left = left.strip().lower()
right = right.strip()
if left == field_name and len(right) > 0:
if cant_be_number:
if not right.isdigit():
return right
else:
return right
return None
def _get_field(cant_be_number, raw_string, convert_to, default_value, *field_names):
retval = _get_field_actual(cant_be_number, raw_string, field_names)
# Convert the return value
if retval and convert_to:
try:
retval = convert_to(retval)
except:
retval = default_value
# Return the default if there is no return value
if retval is None:
retval = default_value
return retval
def _to_decimal_string(ticks):
try:
# Convert to string
ticks = '{0}'.format(ticks)
# Strip off non numbers and decimal places
ticks = "".join(n for n in ticks if n.isdigit() or n=='.').strip()
if ticks == '':
ticks = '0'
# Add decimal if missing
if '.' not in ticks:
ticks = '{0}.0'.format(ticks)
# Remove trailing zeros
ticks = ticks.rstrip('0')
# Add one trailing zero for empty right side
if ticks.endswith('.'):
ticks = '{0}0'.format(ticks)
# Make sure the number can be converted to a float
ticks = float(ticks)
ticks = '{0}'.format(ticks)
return ticks
except:
return '0.0'
def _hz_short_to_full(ticks, scale):
try:
# Make sure the number can be converted to a float
ticks = float(ticks)
ticks = '{0}'.format(ticks)
# Scale the numbers
hz = ticks.lstrip('0')
old_index = hz.index('.')
hz = hz.replace('.', '')
hz = hz.ljust(scale + old_index+1, '0')
new_index = old_index + scale
hz = '{0}.{1}'.format(hz[:new_index], hz[new_index:])
left, right = hz.split('.')
left, right = int(left), int(right)
return (left, right)
except:
return (0, 0)
def _hz_friendly_to_full(hz_string):
try:
hz_string = hz_string.strip().lower()
hz, scale = (None, None)
if hz_string.endswith('ghz'):
scale = 9
elif hz_string.endswith('mhz'):
scale = 6
elif hz_string.endswith('hz'):
scale = 0
hz = "".join(n for n in hz_string if n.isdigit() or n=='.').strip()
if not '.' in hz:
hz += '.0'
hz, scale = _hz_short_to_full(hz, scale)
return (hz, scale)
except:
return (0, 0)
def _hz_short_to_friendly(ticks, scale):
try:
# Get the raw Hz as a string
left, right = _hz_short_to_full(ticks, scale)
result = '{0}.{1}'.format(left, right)
# Get the location of the dot, and remove said dot
dot_index = result.index('.')
result = result.replace('.', '')
# Get the Hz symbol and scale
symbol = "Hz"
scale = 0
if dot_index > 9:
symbol = "GHz"
scale = 9
elif dot_index > 6:
symbol = "MHz"
scale = 6
elif dot_index > 3:
symbol = "KHz"
scale = 3
# Get the Hz with the dot at the new scaled point
result = '{0}.{1}'.format(result[:-scale-1], result[-scale-1:])
# Format the ticks to have 4 numbers after the decimal
# and remove any superfluous zeroes.
result = '{0:.4f} {1}'.format(float(result), symbol)
result = result.rstrip('0')
return result
except:
return '0.0000 Hz'
def _to_friendly_bytes(input):
import re
if not input:
return input
input = "{0}".format(input)
formats = {
r"^[0-9]+B$" : 'B',
r"^[0-9]+K$" : 'KB',
r"^[0-9]+M$" : 'MB',
r"^[0-9]+G$" : 'GB'
}
for pattern, friendly_size in formats.items():
if re.match(pattern, input):
return "{0} {1}".format(input[ : -1].strip(), friendly_size)
return input
def _parse_cpu_brand_string(cpu_string):
# Just return 0 if the processor brand does not have the Hz
if not 'hz' in cpu_string.lower():
return ('0.0', 0)
hz = cpu_string.lower()
scale = 0
if hz.endswith('mhz'):
scale = 6
elif hz.endswith('ghz'):
scale = 9
if '@' in hz:
hz = hz.split('@')[1]
else:
hz = hz.rsplit(None, 1)[1]
hz = hz.rstrip('mhz').rstrip('ghz').strip()
hz = _to_decimal_string(hz)
return (hz, scale)
def _parse_cpu_brand_string_dx(cpu_string):
import re
# Find all the strings inside brackets ()
starts = [m.start() for m in re.finditer('\(', cpu_string)]
ends = [m.start() for m in re.finditer('\)', cpu_string)]
insides = {k: v for k, v in zip(starts, ends)}
insides = [cpu_string[start+1 : end] for start, end in insides.items()]
# Find all the fields
vendor_id, stepping, model, family = (None, None, None, None)
for inside in insides:
for pair in inside.split(','):
pair = [n.strip() for n in pair.split(':')]
if len(pair) > 1:
name, value = pair[0], pair[1]
if name == 'origin':
vendor_id = value.strip('"')
elif name == 'stepping':
stepping = int(value.lstrip('0x'), 16)
elif name == 'model':
model = int(value.lstrip('0x'), 16)
elif name in ['fam', 'family']:
family = int(value.lstrip('0x'), 16)
# Find the Processor Brand
# Strip off extra strings in brackets at end
brand = cpu_string.strip()
is_working = True
while is_working:
is_working = False
for inside in insides:
full = "({0})".format(inside)
if brand.endswith(full):
brand = brand[ :-len(full)].strip()
is_working = True
# Find the Hz in the brand string
hz_brand, scale = _parse_cpu_brand_string(brand)
# Find Hz inside brackets () after the brand string
if hz_brand == '0.0':
for inside in insides:
hz = inside
for entry in ['GHz', 'MHz', 'Hz']:
if entry in hz:
hz = "CPU @ " + hz[ : hz.find(entry) + len(entry)]
hz_brand, scale = _parse_cpu_brand_string(hz)
break
return (hz_brand, scale, brand, vendor_id, stepping, model, family)
def _parse_dmesg_output(output):
try:
# Get all the dmesg lines that might contain a CPU string
lines = output.split(' CPU0:')[1:] + \
output.split(' CPU1:')[1:] + \
output.split(' CPU:')[1:] + \
output.split('\nCPU0:')[1:] + \
output.split('\nCPU1:')[1:] + \
output.split('\nCPU:')[1:]
lines = [l.split('\n')[0].strip() for l in lines]
# Convert the lines to CPU strings
cpu_strings = [_parse_cpu_brand_string_dx(l) for l in lines]
# Find the CPU string that has the most fields
best_string = None
highest_count = 0
for cpu_string in cpu_strings:
count = sum([n is not None for n in cpu_string])
if count > highest_count:
highest_count = count
best_string = cpu_string
# If no CPU string was found, return {}
if not best_string:
return {}
hz_actual, scale, processor_brand, vendor_id, stepping, model, family = best_string
# Origin
if ' Origin=' in output:
fields = output[output.find(' Origin=') : ].split('\n')[0]
fields = fields.strip().split()
fields = [n.strip().split('=') for n in fields]
fields = [{n[0].strip().lower() : n[1].strip()} for n in fields]
for field in fields:
name = list(field.keys())[0]
value = list(field.values())[0]
if name == 'origin':
vendor_id = value.strip('"')
elif name == 'stepping':
stepping = int(value.lstrip('0x'), 16)
elif name == 'model':
model = int(value.lstrip('0x'), 16)
elif name in ['fam', 'family']:
family = int(value.lstrip('0x'), 16)
# Features
flag_lines = []
for category in [' Features=', ' Features2=', ' AMD Features=', ' AMD Features2=']:
if category in output:
flag_lines.append(output.split(category)[1].split('\n')[0])
flags = []
for line in flag_lines:
line = line.split('<')[1].split('>')[0].lower()
for flag in line.split(','):
flags.append(flag)
flags.sort()
# Convert from GHz/MHz string to Hz
hz_advertised, scale = _parse_cpu_brand_string(processor_brand)
# If advertised hz not found, use the actual hz
if hz_advertised == '0.0':
scale = 6
hz_advertised = _to_decimal_string(hz_actual)
info = {
'vendor_id_raw' : vendor_id,
'brand_raw' : processor_brand,
'stepping' : stepping,
'model' : model,
'family' : family,
'flags' : flags
}
if hz_advertised and hz_advertised != '0.0':
info['hz_advertised_friendly'] = _hz_short_to_friendly(hz_advertised, scale)
info['hz_actual_friendly'] = _hz_short_to_friendly(hz_actual, scale)
if hz_advertised and hz_advertised != '0.0':
info['hz_advertised'] = _hz_short_to_full(hz_advertised, scale)
info['hz_actual'] = _hz_short_to_full(hz_actual, scale)
return {k: v for k, v in info.items() if v}
except:
#raise
pass
return {}
def _parse_arch(arch_string_raw):
import re
arch, bits = None, None
arch_string_raw = arch_string_raw.lower()
# X86
if re.match('^i\d86$|^x86$|^x86_32$|^i86pc$|^ia32$|^ia-32$|^bepc$', arch_string_raw):
arch = 'X86_32'
bits = 32
elif re.match('^x64$|^x86_64$|^x86_64t$|^i686-64$|^amd64$|^ia64$|^ia-64$', arch_string_raw):
arch = 'X86_64'
bits = 64
# ARM
elif re.match('^armv8-a|aarch64$', arch_string_raw):
arch = 'ARM_8'
bits = 64
elif re.match('^armv7$|^armv7[a-z]$|^armv7-[a-z]$|^armv6[a-z]$', arch_string_raw):
arch = 'ARM_7'
bits = 32
elif re.match('^armv8$|^armv8[a-z]$|^armv8-[a-z]$', arch_string_raw):
arch = 'ARM_8'
bits = 32
# PPC
elif re.match('^ppc32$|^prep$|^pmac$|^powermac$', arch_string_raw):
arch = 'PPC_32'
bits = 32
elif re.match('^powerpc$|^ppc64$|^ppc64le$', arch_string_raw):
arch = 'PPC_64'
bits = 64
# SPARC
elif re.match('^sparc32$|^sparc$', arch_string_raw):
arch = 'SPARC_32'
bits = 32
elif re.match('^sparc64$|^sun4u$|^sun4v$', arch_string_raw):
arch = 'SPARC_64'
bits = 64
return (arch, bits)
def _is_bit_set(reg, bit):
mask = 1 << bit
is_set = reg & mask > 0
return is_set
def _is_selinux_enforcing():
# Just return if the SE Linux Status Tool is not installed
if not DataSource.has_sestatus():
return False
# Run the sestatus, and just return if it failed to run
returncode, output = DataSource.sestatus_b()
if returncode != 0:
return False
# Figure out if explicitly in enforcing mode
for line in output.splitlines():
line = line.strip().lower()
if line.startswith("current mode:"):
if line.endswith("enforcing"):
return True
else:
return False
# Figure out if we can execute heap and execute memory
can_selinux_exec_heap = False
can_selinux_exec_memory = False
for line in output.splitlines():
line = line.strip().lower()
if line.startswith("allow_execheap") and line.endswith("on"):
can_selinux_exec_heap = True
elif line.startswith("allow_execmem") and line.endswith("on"):
can_selinux_exec_memory = True
return (not can_selinux_exec_heap or not can_selinux_exec_memory)
class CPUID(object):
def __init__(self):
self.prochandle = None
# Figure out if SE Linux is on and in enforcing mode
self.is_selinux_enforcing = _is_selinux_enforcing()
def _asm_func(self, restype=None, argtypes=(), byte_code=[]):
byte_code = bytes.join(b'', byte_code)
address = None
if DataSource.is_windows:
# Allocate a memory segment the size of the byte code, and make it executable
size = len(byte_code)
# Alloc at least 1 page to ensure we own all pages that we want to change protection on
if size < 0x1000: size = 0x1000
MEM_COMMIT = ctypes.c_ulong(0x1000)
PAGE_READWRITE = ctypes.c_ulong(0x4)
pfnVirtualAlloc = ctypes.windll.kernel32.VirtualAlloc
pfnVirtualAlloc.restype = ctypes.c_void_p
address = pfnVirtualAlloc(None, ctypes.c_size_t(size), MEM_COMMIT, PAGE_READWRITE)
if not address:
raise Exception("Failed to VirtualAlloc")
# Copy the byte code into the memory segment
memmove = ctypes.CFUNCTYPE(ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_size_t)(ctypes._memmove_addr)
if memmove(address, byte_code, size) < 0:
raise Exception("Failed to memmove")
# Enable execute permissions
PAGE_EXECUTE = ctypes.c_ulong(0x10)
old_protect = ctypes.c_ulong(0)
pfnVirtualProtect = ctypes.windll.kernel32.VirtualProtect
res = pfnVirtualProtect(ctypes.c_void_p(address), ctypes.c_size_t(size), PAGE_EXECUTE, ctypes.byref(old_protect))
if not res:
raise Exception("Failed VirtualProtect")
# Flush Instruction Cache
# First, get process Handle
if not self.prochandle:
pfnGetCurrentProcess = ctypes.windll.kernel32.GetCurrentProcess
pfnGetCurrentProcess.restype = ctypes.c_void_p
self.prochandle = ctypes.c_void_p(pfnGetCurrentProcess())
# Actually flush cache
res = ctypes.windll.kernel32.FlushInstructionCache(self.prochandle, ctypes.c_void_p(address), ctypes.c_size_t(size))
if not res:
raise Exception("Failed FlushInstructionCache")
else:
# Allocate a memory segment the size of the byte code
size = len(byte_code)
pfnvalloc = ctypes.pythonapi.valloc
pfnvalloc.restype = ctypes.c_void_p
address = pfnvalloc(ctypes.c_size_t(size))
if not address:
raise Exception("Failed to valloc")
# Mark the memory segment as writeable only
if not self.is_selinux_enforcing:
WRITE = 0x2
if ctypes.pythonapi.mprotect(ctypes.c_void_p(address), size, WRITE) < 0:
raise Exception("Failed to mprotect")
# Copy the byte code into the memory segment
if ctypes.pythonapi.memmove(ctypes.c_void_p(address), byte_code, ctypes.c_size_t(size)) < 0:
raise Exception("Failed to memmove")
# Mark the memory segment as writeable and executable only
if not self.is_selinux_enforcing:
WRITE_EXECUTE = 0x2 | 0x4
if ctypes.pythonapi.mprotect(ctypes.c_void_p(address), size, WRITE_EXECUTE) < 0:
raise Exception("Failed to mprotect")
# Cast the memory segment into a function
functype = ctypes.CFUNCTYPE(restype, *argtypes)
fun = functype(address)
return fun, address
def _run_asm(self, *byte_code):
# Convert the byte code into a function that returns an int
restype = ctypes.c_uint32
argtypes = ()
func, address = self._asm_func(restype, argtypes, byte_code)
# Call the byte code like a function
retval = func()
byte_code = bytes.join(b'', byte_code)
size = ctypes.c_size_t(len(byte_code))
# Free the function memory segment
if DataSource.is_windows:
MEM_RELEASE = ctypes.c_ulong(0x8000)
ctypes.windll.kernel32.VirtualFree(ctypes.c_void_p(address), ctypes.c_size_t(0), MEM_RELEASE)
else:
# Remove the executable tag on the memory
READ_WRITE = 0x1 | 0x2
if ctypes.pythonapi.mprotect(ctypes.c_void_p(address), size, READ_WRITE) < 0:
raise Exception("Failed to mprotect")
ctypes.pythonapi.free(ctypes.c_void_p(address))
return retval
# FIXME: We should not have to use different instructions to
# set eax to 0 or 1, on 32bit and 64bit machines.
def _zero_eax(self):
return (
b"\x31\xC0" # xor eax,eax
)
def _zero_ecx(self):
return (
b"\x31\xC9" # xor ecx,ecx
)
def _one_eax(self):
return (
b"\xB8\x01\x00\x00\x00" # mov eax,0x1"
)
# http://en.wikipedia.org/wiki/CPUID#EAX.3D0:_Get_vendor_ID
def get_vendor_id(self):
# EBX
ebx = self._run_asm(
self._zero_eax(),
b"\x0F\xA2" # cpuid
b"\x89\xD8" # mov ax,bx
b"\xC3" # ret
)
# ECX
ecx = self._run_asm(
self._zero_eax(),
b"\x0f\xa2" # cpuid
b"\x89\xC8" # mov ax,cx
b"\xC3" # ret
)
# EDX
edx = self._run_asm(
self._zero_eax(),
b"\x0f\xa2" # cpuid
b"\x89\xD0" # mov ax,dx
b"\xC3" # ret
)
# Each 4bits is a ascii letter in the name
vendor_id = []
for reg in [ebx, edx, ecx]:
for n in [0, 8, 16, 24]:
vendor_id.append(chr((reg >> n) & 0xFF))
vendor_id = ''.join(vendor_id)
return vendor_id
# http://en.wikipedia.org/wiki/CPUID#EAX.3D1:_Processor_Info_and_Feature_Bits
def get_info(self):
# EAX
eax = self._run_asm(
self._one_eax(),
b"\x0f\xa2" # cpuid
b"\xC3" # ret
)
# Get the CPU info
stepping = (eax >> 0) & 0xF # 4 bits
model = (eax >> 4) & 0xF # 4 bits
family = (eax >> 8) & 0xF # 4 bits
processor_type = (eax >> 12) & 0x3 # 2 bits
extended_model = (eax >> 16) & 0xF # 4 bits
extended_family = (eax >> 20) & 0xFF # 8 bits
return {
'stepping' : stepping,
'model' : model,
'family' : family,
'processor_type' : processor_type,
'extended_model' : extended_model,
'extended_family' : extended_family
}
# http://en.wikipedia.org/wiki/CPUID#EAX.3D80000000h:_Get_Highest_Extended_Function_Supported
def get_max_extension_support(self):
# Check for extension support
max_extension_support = self._run_asm(
b"\xB8\x00\x00\x00\x80" # mov ax,0x80000000
b"\x0f\xa2" # cpuid
b"\xC3" # ret
)
return max_extension_support
# http://en.wikipedia.org/wiki/CPUID#EAX.3D1:_Processor_Info_and_Feature_Bits
def get_flags(self, max_extension_support):
# EDX
edx = self._run_asm(
self._one_eax(),
b"\x0f\xa2" # cpuid
b"\x89\xD0" # mov ax,dx
b"\xC3" # ret
)
# ECX
ecx = self._run_asm(
self._one_eax(),
b"\x0f\xa2" # cpuid
b"\x89\xC8" # mov ax,cx
b"\xC3" # ret
)
# Get the CPU flags
flags = {
'fpu' : _is_bit_set(edx, 0),
'vme' : _is_bit_set(edx, 1),
'de' : _is_bit_set(edx, 2),
'pse' : _is_bit_set(edx, 3),
'tsc' : _is_bit_set(edx, 4),
'msr' : _is_bit_set(edx, 5),
'pae' : _is_bit_set(edx, 6),
'mce' : _is_bit_set(edx, 7),
'cx8' : _is_bit_set(edx, 8),
'apic' : _is_bit_set(edx, 9),
#'reserved1' : _is_bit_set(edx, 10),
'sep' : _is_bit_set(edx, 11),
'mtrr' : _is_bit_set(edx, 12),
'pge' : _is_bit_set(edx, 13),
'mca' : _is_bit_set(edx, 14),
'cmov' : _is_bit_set(edx, 15),
'pat' : _is_bit_set(edx, 16),
'pse36' : _is_bit_set(edx, 17),
'pn' : _is_bit_set(edx, 18),
'clflush' : _is_bit_set(edx, 19),
#'reserved2' : _is_bit_set(edx, 20),
'dts' : _is_bit_set(edx, 21),
'acpi' : _is_bit_set(edx, 22),
'mmx' : _is_bit_set(edx, 23),
'fxsr' : _is_bit_set(edx, 24),
'sse' : _is_bit_set(edx, 25),
'sse2' : _is_bit_set(edx, 26),
'ss' : _is_bit_set(edx, 27),
'ht' : _is_bit_set(edx, 28),
'tm' : _is_bit_set(edx, 29),
'ia64' : _is_bit_set(edx, 30),
'pbe' : _is_bit_set(edx, 31),
'pni' : _is_bit_set(ecx, 0),
'pclmulqdq' : _is_bit_set(ecx, 1),
'dtes64' : _is_bit_set(ecx, 2),
'monitor' : _is_bit_set(ecx, 3),
'ds_cpl' : _is_bit_set(ecx, 4),
'vmx' : _is_bit_set(ecx, 5),
'smx' : _is_bit_set(ecx, 6),
'est' : _is_bit_set(ecx, 7),
'tm2' : _is_bit_set(ecx, 8),
'ssse3' : _is_bit_set(ecx, 9),
'cid' : _is_bit_set(ecx, 10),
#'reserved3' : _is_bit_set(ecx, 11),
'fma' : _is_bit_set(ecx, 12),
'cx16' : _is_bit_set(ecx, 13),
'xtpr' : _is_bit_set(ecx, 14),
'pdcm' : _is_bit_set(ecx, 15),
#'reserved4' : _is_bit_set(ecx, 16),
'pcid' : _is_bit_set(ecx, 17),
'dca' : _is_bit_set(ecx, 18),
'sse4_1' : _is_bit_set(ecx, 19),
'sse4_2' : _is_bit_set(ecx, 20),
'x2apic' : _is_bit_set(ecx, 21),
'movbe' : _is_bit_set(ecx, 22),
'popcnt' : _is_bit_set(ecx, 23),
'tscdeadline' : _is_bit_set(ecx, 24),
'aes' : _is_bit_set(ecx, 25),
'xsave' : _is_bit_set(ecx, 26),
'osxsave' : _is_bit_set(ecx, 27),
'avx' : _is_bit_set(ecx, 28),
'f16c' : _is_bit_set(ecx, 29),
'rdrnd' : _is_bit_set(ecx, 30),
'hypervisor' : _is_bit_set(ecx, 31)
}
# Get a list of only the flags that are true
flags = [k for k, v in flags.items() if v]
# http://en.wikipedia.org/wiki/CPUID#EAX.3D7.2C_ECX.3D0:_Extended_Features
if max_extension_support >= 7:
# EBX
ebx = self._run_asm(
self._zero_ecx(),
b"\xB8\x07\x00\x00\x00" # mov eax,7
b"\x0f\xa2" # cpuid
b"\x89\xD8" # mov ax,bx
b"\xC3" # ret
)
# ECX
ecx = self._run_asm(
self._zero_ecx(),
b"\xB8\x07\x00\x00\x00" # mov eax,7
b"\x0f\xa2" # cpuid
b"\x89\xC8" # mov ax,cx
b"\xC3" # ret
)
# Get the extended CPU flags
extended_flags = {
#'fsgsbase' : _is_bit_set(ebx, 0),
#'IA32_TSC_ADJUST' : _is_bit_set(ebx, 1),
'sgx' : _is_bit_set(ebx, 2),
'bmi1' : _is_bit_set(ebx, 3),
'hle' : _is_bit_set(ebx, 4),
'avx2' : _is_bit_set(ebx, 5),
#'reserved' : _is_bit_set(ebx, 6),
'smep' : _is_bit_set(ebx, 7),
'bmi2' : _is_bit_set(ebx, 8),
'erms' : _is_bit_set(ebx, 9),
'invpcid' : _is_bit_set(ebx, 10),
'rtm' : _is_bit_set(ebx, 11),
'pqm' : _is_bit_set(ebx, 12),
#'FPU CS and FPU DS deprecated' : _is_bit_set(ebx, 13),
'mpx' : _is_bit_set(ebx, 14),
'pqe' : _is_bit_set(ebx, 15),
'avx512f' : _is_bit_set(ebx, 16),
'avx512dq' : _is_bit_set(ebx, 17),
'rdseed' : _is_bit_set(ebx, 18),
'adx' : _is_bit_set(ebx, 19),
'smap' : _is_bit_set(ebx, 20),
'avx512ifma' : _is_bit_set(ebx, 21),
'pcommit' : _is_bit_set(ebx, 22),
'clflushopt' : _is_bit_set(ebx, 23),
'clwb' : _is_bit_set(ebx, 24),
'intel_pt' : _is_bit_set(ebx, 25),
'avx512pf' : _is_bit_set(ebx, 26),
'avx512er' : _is_bit_set(ebx, 27),
'avx512cd' : _is_bit_set(ebx, 28),
'sha' : _is_bit_set(ebx, 29),
'avx512bw' : _is_bit_set(ebx, 30),
'avx512vl' : _is_bit_set(ebx, 31),
'prefetchwt1' : _is_bit_set(ecx, 0),
'avx512vbmi' : _is_bit_set(ecx, 1),
'umip' : _is_bit_set(ecx, 2),
'pku' : _is_bit_set(ecx, 3),
'ospke' : _is_bit_set(ecx, 4),
#'reserved' : _is_bit_set(ecx, 5),
'avx512vbmi2' : _is_bit_set(ecx, 6),
#'reserved' : _is_bit_set(ecx, 7),
'gfni' : _is_bit_set(ecx, 8),
'vaes' : _is_bit_set(ecx, 9),
'vpclmulqdq' : _is_bit_set(ecx, 10),
'avx512vnni' : _is_bit_set(ecx, 11),
'avx512bitalg' : _is_bit_set(ecx, 12),
#'reserved' : _is_bit_set(ecx, 13),
'avx512vpopcntdq' : _is_bit_set(ecx, 14),
#'reserved' : _is_bit_set(ecx, 15),
#'reserved' : _is_bit_set(ecx, 16),
#'mpx0' : _is_bit_set(ecx, 17),
#'mpx1' : _is_bit_set(ecx, 18),
#'mpx2' : _is_bit_set(ecx, 19),
#'mpx3' : _is_bit_set(ecx, 20),
#'mpx4' : _is_bit_set(ecx, 21),
'rdpid' : _is_bit_set(ecx, 22),
#'reserved' : _is_bit_set(ecx, 23),
#'reserved' : _is_bit_set(ecx, 24),
#'reserved' : _is_bit_set(ecx, 25),
#'reserved' : _is_bit_set(ecx, 26),
#'reserved' : _is_bit_set(ecx, 27),
#'reserved' : _is_bit_set(ecx, 28),
#'reserved' : _is_bit_set(ecx, 29),
'sgx_lc' : _is_bit_set(ecx, 30),
#'reserved' : _is_bit_set(ecx, 31)
}
# Get a list of only the flags that are true
extended_flags = [k for k, v in extended_flags.items() if v]
flags += extended_flags
# http://en.wikipedia.org/wiki/CPUID#EAX.3D80000001h:_Extended_Processor_Info_and_Feature_Bits
if max_extension_support >= 0x80000001:
# EBX
ebx = self._run_asm(
b"\xB8\x01\x00\x00\x80" # mov ax,0x80000001
b"\x0f\xa2" # cpuid
b"\x89\xD8" # mov ax,bx
b"\xC3" # ret
)
# ECX
ecx = self._run_asm(
b"\xB8\x01\x00\x00\x80" # mov ax,0x80000001
b"\x0f\xa2" # cpuid
b"\x89\xC8" # mov ax,cx
b"\xC3" # ret
)
# Get the extended CPU flags
extended_flags = {
'fpu' : _is_bit_set(ebx, 0),
'vme' : _is_bit_set(ebx, 1),
'de' : _is_bit_set(ebx, 2),
'pse' : _is_bit_set(ebx, 3),
'tsc' : _is_bit_set(ebx, 4),
'msr' : _is_bit_set(ebx, 5),
'pae' : _is_bit_set(ebx, 6),
'mce' : _is_bit_set(ebx, 7),
'cx8' : _is_bit_set(ebx, 8),
'apic' : _is_bit_set(ebx, 9),
#'reserved' : _is_bit_set(ebx, 10),
'syscall' : _is_bit_set(ebx, 11),
'mtrr' : _is_bit_set(ebx, 12),
'pge' : _is_bit_set(ebx, 13),
'mca' : _is_bit_set(ebx, 14),
'cmov' : _is_bit_set(ebx, 15),
'pat' : _is_bit_set(ebx, 16),
'pse36' : _is_bit_set(ebx, 17),
#'reserved' : _is_bit_set(ebx, 18),
'mp' : _is_bit_set(ebx, 19),
'nx' : _is_bit_set(ebx, 20),
#'reserved' : _is_bit_set(ebx, 21),
'mmxext' : _is_bit_set(ebx, 22),
'mmx' : _is_bit_set(ebx, 23),
'fxsr' : _is_bit_set(ebx, 24),
'fxsr_opt' : _is_bit_set(ebx, 25),
'pdpe1gp' : _is_bit_set(ebx, 26),
'rdtscp' : _is_bit_set(ebx, 27),
#'reserved' : _is_bit_set(ebx, 28),
'lm' : _is_bit_set(ebx, 29),
'3dnowext' : _is_bit_set(ebx, 30),
'3dnow' : _is_bit_set(ebx, 31),
'lahf_lm' : _is_bit_set(ecx, 0),
'cmp_legacy' : _is_bit_set(ecx, 1),
'svm' : _is_bit_set(ecx, 2),
'extapic' : _is_bit_set(ecx, 3),
'cr8_legacy' : _is_bit_set(ecx, 4),
'abm' : _is_bit_set(ecx, 5),
'sse4a' : _is_bit_set(ecx, 6),
'misalignsse' : _is_bit_set(ecx, 7),
'3dnowprefetch' : _is_bit_set(ecx, 8),
'osvw' : _is_bit_set(ecx, 9),
'ibs' : _is_bit_set(ecx, 10),
'xop' : _is_bit_set(ecx, 11),
'skinit' : _is_bit_set(ecx, 12),
'wdt' : _is_bit_set(ecx, 13),
#'reserved' : _is_bit_set(ecx, 14),
'lwp' : _is_bit_set(ecx, 15),
'fma4' : _is_bit_set(ecx, 16),
'tce' : _is_bit_set(ecx, 17),
#'reserved' : _is_bit_set(ecx, 18),
'nodeid_msr' : _is_bit_set(ecx, 19),
#'reserved' : _is_bit_set(ecx, 20),
'tbm' : _is_bit_set(ecx, 21),
'topoext' : _is_bit_set(ecx, 22),
'perfctr_core' : _is_bit_set(ecx, 23),
'perfctr_nb' : _is_bit_set(ecx, 24),
#'reserved' : _is_bit_set(ecx, 25),
'dbx' : _is_bit_set(ecx, 26),
'perftsc' : _is_bit_set(ecx, 27),
'pci_l2i' : _is_bit_set(ecx, 28),
#'reserved' : _is_bit_set(ecx, 29),
#'reserved' : _is_bit_set(ecx, 30),
#'reserved' : _is_bit_set(ecx, 31)
}
# Get a list of only the flags that are true
extended_flags = [k for k, v in extended_flags.items() if v]
flags += extended_flags
flags.sort()
return flags
# http://en.wikipedia.org/wiki/CPUID#EAX.3D80000002h.2C80000003h.2C80000004h:_Processor_Brand_String
def get_processor_brand(self, max_extension_support):
processor_brand = ""
# Processor brand string
if max_extension_support >= 0x80000004:
instructions = [
b"\xB8\x02\x00\x00\x80", # mov ax,0x80000002
b"\xB8\x03\x00\x00\x80", # mov ax,0x80000003
b"\xB8\x04\x00\x00\x80" # mov ax,0x80000004
]
for instruction in instructions:
# EAX
eax = self._run_asm(
instruction, # mov ax,0x8000000?
b"\x0f\xa2" # cpuid
b"\x89\xC0" # mov ax,ax
b"\xC3" # ret
)
# EBX
ebx = self._run_asm(
instruction, # mov ax,0x8000000?
b"\x0f\xa2" # cpuid
b"\x89\xD8" # mov ax,bx
b"\xC3" # ret
)
# ECX
ecx = self._run_asm(
instruction, # mov ax,0x8000000?
b"\x0f\xa2" # cpuid
b"\x89\xC8" # mov ax,cx
b"\xC3" # ret
)
# EDX
edx = self._run_asm(
instruction, # mov ax,0x8000000?
b"\x0f\xa2" # cpuid
b"\x89\xD0" # mov ax,dx
b"\xC3" # ret
)
# Combine each of the 4 bytes in each register into the string
for reg in [eax, ebx, ecx, edx]:
for n in [0, 8, 16, 24]:
processor_brand += chr((reg >> n) & 0xFF)
# Strip off any trailing NULL terminators and white space
processor_brand = processor_brand.strip("\0").strip()
return processor_brand
# http://en.wikipedia.org/wiki/CPUID#EAX.3D80000006h:_Extended_L2_Cache_Features
def get_cache(self, max_extension_support):
cache_info = {}
# Just return if the cache feature is not supported
if max_extension_support < 0x80000006:
return cache_info
# ECX
ecx = self._run_asm(
b"\xB8\x06\x00\x00\x80" # mov ax,0x80000006
b"\x0f\xa2" # cpuid
b"\x89\xC8" # mov ax,cx
b"\xC3" # ret
)
cache_info = {
'size_kb' : ecx & 0xFF,
'line_size_b' : (ecx >> 12) & 0xF,
'associativity' : (ecx >> 16) & 0xFFFF
}
return cache_info
def get_ticks(self):
retval = None
if DataSource.bits == '32bit':
# Works on x86_32
restype = None
argtypes = (ctypes.POINTER(ctypes.c_uint), ctypes.POINTER(ctypes.c_uint))
get_ticks_x86_32, address = self._asm_func(restype, argtypes,
[
b"\x55", # push bp
b"\x89\xE5", # mov bp,sp
b"\x31\xC0", # xor ax,ax
b"\x0F\xA2", # cpuid
b"\x0F\x31", # rdtsc
b"\x8B\x5D\x08", # mov bx,[di+0x8]
b"\x8B\x4D\x0C", # mov cx,[di+0xc]
b"\x89\x13", # mov [bp+di],dx
b"\x89\x01", # mov [bx+di],ax
b"\x5D", # pop bp
b"\xC3" # ret
]
)
high = ctypes.c_uint32(0)
low = ctypes.c_uint32(0)
get_ticks_x86_32(ctypes.byref(high), ctypes.byref(low))
retval = ((high.value << 32) & 0xFFFFFFFF00000000) | low.value
elif DataSource.bits == '64bit':
# Works on x86_64
restype = ctypes.c_uint64
argtypes = ()
get_ticks_x86_64, address = self._asm_func(restype, argtypes,
[
b"\x48", # dec ax
b"\x31\xC0", # xor ax,ax
b"\x0F\xA2", # cpuid
b"\x0F\x31", # rdtsc
b"\x48", # dec ax
b"\xC1\xE2\x20", # shl dx,byte 0x20
b"\x48", # dec ax
b"\x09\xD0", # or ax,dx
b"\xC3", # ret
]
)
retval = get_ticks_x86_64()
return retval
def get_raw_hz(self):
import time
start = self.get_ticks()
time.sleep(1)
end = self.get_ticks()
ticks = (end - start)
return ticks
def _actual_get_cpu_info_from_cpuid(queue):
'''
Warning! This function has the potential to crash the Python runtime.
Do not call it directly. Use the _get_cpu_info_from_cpuid function instead.
It will safely call this function in another process.
'''
# Pipe all output to nothing
sys.stdout = open(os.devnull, 'w')
sys.stderr = open(os.devnull, 'w')
# Get the CPU arch and bits
arch, bits = _parse_arch(DataSource.arch_string_raw)
# Return none if this is not an X86 CPU
if not arch in ['X86_32', 'X86_64']:
queue.put(_obj_to_b64({}))
return
# Return none if SE Linux is in enforcing mode
cpuid = CPUID()
if cpuid.is_selinux_enforcing:
queue.put(_obj_to_b64({}))
return
# Get the cpu info from the CPUID register
max_extension_support = cpuid.get_max_extension_support()
cache_info = cpuid.get_cache(max_extension_support)
info = cpuid.get_info()
processor_brand = cpuid.get_processor_brand(max_extension_support)
# Get the Hz and scale
hz_actual = cpuid.get_raw_hz()
hz_actual = _to_decimal_string(hz_actual)
# Get the Hz and scale
hz_advertised, scale = _parse_cpu_brand_string(processor_brand)
info = {
'vendor_id_raw' : cpuid.get_vendor_id(),
'hardware_raw' : '',
'brand_raw' : processor_brand,
'hz_advertised_friendly' : _hz_short_to_friendly(hz_advertised, scale),
'hz_actual_friendly' : _hz_short_to_friendly(hz_actual, 0),
'hz_advertised' : _hz_short_to_full(hz_advertised, scale),
'hz_actual' : _hz_short_to_full(hz_actual, 0),
'l2_cache_size' : _to_friendly_bytes(cache_info['size_kb']),
'l2_cache_line_size' : cache_info['line_size_b'],
'l2_cache_associativity' : hex(cache_info['associativity']),
'stepping' : info['stepping'],
'model' : info['model'],
'family' : info['family'],
'processor_type' : info['processor_type'],
'extended_model' : info['extended_model'],
'extended_family' : info['extended_family'],
'flags' : cpuid.get_flags(max_extension_support)
}
info = {k: v for k, v in info.items() if v}
queue.put(_obj_to_b64(info))
def _get_cpu_info_from_cpuid():
'''
Returns the CPU info gathered by querying the X86 cpuid register in a new process.
Returns {} on non X86 cpus.
Returns {} if SELinux is in enforcing mode.
'''
from multiprocessing import Process, Queue
# Return {} if can't cpuid
if not DataSource.can_cpuid:
return {}
# Get the CPU arch and bits
arch, bits = _parse_arch(DataSource.arch_string_raw)
# Return {} if this is not an X86 CPU
if not arch in ['X86_32', 'X86_64']:
return {}
try:
# Start running the function in a subprocess
queue = Queue()
p = Process(target=_actual_get_cpu_info_from_cpuid, args=(queue,))
p.start()
# Wait for the process to end, while it is still alive
while p.is_alive():
p.join(0)
# Return {} if it failed
if p.exitcode != 0:
return {}
# Return the result, only if there is something to read
if not queue.empty():
output = queue.get()
return _b64_to_obj(output)
except:
pass
# Return {} if everything failed
return {}
def _get_cpu_info_from_proc_cpuinfo():
'''
Returns the CPU info gathered from /proc/cpuinfo.
Returns {} if /proc/cpuinfo is not found.
'''
try:
# Just return {} if there is no cpuinfo
if not DataSource.has_proc_cpuinfo():
return {}
returncode, output = DataSource.cat_proc_cpuinfo()
if returncode != 0:
return {}
# Various fields
vendor_id = _get_field(False, output, None, '', 'vendor_id', 'vendor id', 'vendor')
processor_brand = _get_field(True, output, None, None, 'model name','cpu', 'processor')
cache_size = _get_field(False, output, None, '', 'cache size')
stepping = _get_field(False, output, int, 0, 'stepping')
model = _get_field(False, output, int, 0, 'model')
family = _get_field(False, output, int, 0, 'cpu family')
hardware = _get_field(False, output, None, '', 'Hardware')
# Flags
flags = _get_field(False, output, None, None, 'flags', 'Features')
if flags:
flags = flags.split()
flags.sort()
# Convert from MHz string to Hz
hz_actual = _get_field(False, output, None, '', 'cpu MHz', 'cpu speed', 'clock')
hz_actual = hz_actual.lower().rstrip('mhz').strip()
hz_actual = _to_decimal_string(hz_actual)
# Convert from GHz/MHz string to Hz
hz_advertised, scale = (None, 0)
try:
hz_advertised, scale = _parse_cpu_brand_string(processor_brand)
except Exception:
pass
info = {
'hardware_raw' : hardware,
'brand_raw' : processor_brand,
'l3_cache_size' : _to_friendly_bytes(cache_size),
'flags' : flags,
'vendor_id_raw' : vendor_id,
'stepping' : stepping,
'model' : model,
'family' : family,
}
# Make the Hz the same for actual and advertised if missing any
if not hz_advertised or hz_advertised == '0.0':
hz_advertised = hz_actual
scale = 6
elif not hz_actual or hz_actual == '0.0':
hz_actual = hz_advertised
# Add the Hz if there is one
if _hz_short_to_full(hz_advertised, scale) > (0, 0):
info['hz_advertised_friendly'] = _hz_short_to_friendly(hz_advertised, scale)
info['hz_advertised'] = _hz_short_to_full(hz_advertised, scale)
if _hz_short_to_full(hz_actual, scale) > (0, 0):
info['hz_actual_friendly'] = _hz_short_to_friendly(hz_actual, 6)
info['hz_actual'] = _hz_short_to_full(hz_actual, 6)
info = {k: v for k, v in info.items() if v}
return info
except:
#raise # NOTE: To have this throw on error, uncomment this line
return {}
def _get_cpu_info_from_cpufreq_info():
'''
Returns the CPU info gathered from cpufreq-info.
Returns {} if cpufreq-info is not found.
'''
try:
hz_brand, scale = '0.0', 0
if not DataSource.has_cpufreq_info():
return {}
returncode, output = DataSource.cpufreq_info()
if returncode != 0:
return {}
hz_brand = output.split('current CPU frequency is')[1].split('\n')[0]
i = hz_brand.find('Hz')
assert(i != -1)
hz_brand = hz_brand[0 : i+2].strip().lower()
if hz_brand.endswith('mhz'):
scale = 6
elif hz_brand.endswith('ghz'):
scale = 9
hz_brand = hz_brand.rstrip('mhz').rstrip('ghz').strip()
hz_brand = _to_decimal_string(hz_brand)
info = {
'hz_advertised_friendly' : _hz_short_to_friendly(hz_brand, scale),
'hz_actual_friendly' : _hz_short_to_friendly(hz_brand, scale),
'hz_advertised' : _hz_short_to_full(hz_brand, scale),
'hz_actual' : _hz_short_to_full(hz_brand, scale),
}
info = {k: v for k, v in info.items() if v}
return info
except:
#raise # NOTE: To have this throw on error, uncomment this line
return {}
def _get_cpu_info_from_lscpu():
'''
Returns the CPU info gathered from lscpu.
Returns {} if lscpu is not found.
'''
try:
if not DataSource.has_lscpu():
return {}
returncode, output = DataSource.lscpu()
if returncode != 0:
return {}
info = {}
new_hz = _get_field(False, output, None, None, 'CPU max MHz', 'CPU MHz')
if new_hz:
new_hz = _to_decimal_string(new_hz)
scale = 6
info['hz_advertised_friendly'] = _hz_short_to_friendly(new_hz, scale)
info['hz_actual_friendly'] = _hz_short_to_friendly(new_hz, scale)
info['hz_advertised'] = _hz_short_to_full(new_hz, scale)
info['hz_actual'] = _hz_short_to_full(new_hz, scale)
vendor_id = _get_field(False, output, None, None, 'Vendor ID')
if vendor_id:
info['vendor_id_raw'] = vendor_id
brand = _get_field(False, output, None, None, 'Model name')
if brand:
info['brand_raw'] = brand
family = _get_field(False, output, None, None, 'CPU family')
if family and family.isdigit():
info['family'] = int(family)
stepping = _get_field(False, output, None, None, 'Stepping')
if stepping and stepping.isdigit():
info['stepping'] = int(stepping)
model = _get_field(False, output, None, None, 'Model')
if model and model.isdigit():
info['model'] = int(model)
l1_data_cache_size = _get_field(False, output, None, None, 'L1d cache')
if l1_data_cache_size:
info['l1_data_cache_size'] = _to_friendly_bytes(l1_data_cache_size)
l1_instruction_cache_size = _get_field(False, output, None, None, 'L1i cache')
if l1_instruction_cache_size:
info['l1_instruction_cache_size'] = _to_friendly_bytes(l1_instruction_cache_size)
l2_cache_size = _get_field(False, output, None, None, 'L2 cache')
if l2_cache_size:
info['l2_cache_size'] = _to_friendly_bytes(l2_cache_size)
l3_cache_size = _get_field(False, output, None, None, 'L3 cache')
if l3_cache_size:
info['l3_cache_size'] = _to_friendly_bytes(l3_cache_size)
# Flags
flags = _get_field(False, output, None, None, 'flags', 'Features')
if flags:
flags = flags.split()
flags.sort()
info['flags'] = flags
info = {k: v for k, v in info.items() if v}
return info
except:
#raise # NOTE: To have this throw on error, uncomment this line
return {}
# https://openpowerfoundation.org/wp-content/uploads/2016/05/LoPAPR_DRAFT_v11_24March2016_cmt1.pdf
# page 767
def _get_cpu_info_from_ibm_pa_features():
'''
Returns the CPU info gathered from lsprop /proc/device-tree/cpus/*/ibm,pa-features
Returns {} if lsprop is not found or ibm,pa-features does not have the desired info.
'''
try:
# Just return {} if there is no lsprop
if not DataSource.has_ibm_pa_features():
return {}
# If ibm,pa-features fails return {}
returncode, output = DataSource.ibm_pa_features()
if output == None or returncode != 0:
return {}
# Filter out invalid characters from output
value = output.split("ibm,pa-features")[1].lower()
value = [s for s in value if s in list('0123456789abcfed')]
value = ''.join(value)
# Get data converted to Uint32 chunks
left = int(value[0 : 8], 16)
right = int(value[8 : 16], 16)
# Get the CPU flags
flags = {
# Byte 0
'mmu' : _is_bit_set(left, 0),
'fpu' : _is_bit_set(left, 1),
'slb' : _is_bit_set(left, 2),
'run' : _is_bit_set(left, 3),
#'reserved' : _is_bit_set(left, 4),
'dabr' : _is_bit_set(left, 5),
'ne' : _is_bit_set(left, 6),
'wtr' : _is_bit_set(left, 7),
# Byte 1
'mcr' : _is_bit_set(left, 8),
'dsisr' : _is_bit_set(left, 9),
'lp' : _is_bit_set(left, 10),
'ri' : _is_bit_set(left, 11),
'dabrx' : _is_bit_set(left, 12),
'sprg3' : _is_bit_set(left, 13),
'rislb' : _is_bit_set(left, 14),
'pp' : _is_bit_set(left, 15),
# Byte 2
'vpm' : _is_bit_set(left, 16),
'dss_2.05' : _is_bit_set(left, 17),
#'reserved' : _is_bit_set(left, 18),
'dar' : _is_bit_set(left, 19),
#'reserved' : _is_bit_set(left, 20),
'ppr' : _is_bit_set(left, 21),
'dss_2.02' : _is_bit_set(left, 22),
'dss_2.06' : _is_bit_set(left, 23),
# Byte 3
'lsd_in_dscr' : _is_bit_set(left, 24),
'ugr_in_dscr' : _is_bit_set(left, 25),
#'reserved' : _is_bit_set(left, 26),
#'reserved' : _is_bit_set(left, 27),
#'reserved' : _is_bit_set(left, 28),
#'reserved' : _is_bit_set(left, 29),
#'reserved' : _is_bit_set(left, 30),
#'reserved' : _is_bit_set(left, 31),
# Byte 4
'sso_2.06' : _is_bit_set(right, 0),
#'reserved' : _is_bit_set(right, 1),
#'reserved' : _is_bit_set(right, 2),
#'reserved' : _is_bit_set(right, 3),
#'reserved' : _is_bit_set(right, 4),
#'reserved' : _is_bit_set(right, 5),
#'reserved' : _is_bit_set(right, 6),
#'reserved' : _is_bit_set(right, 7),
# Byte 5
'le' : _is_bit_set(right, 8),
'cfar' : _is_bit_set(right, 9),
'eb' : _is_bit_set(right, 10),
'lsq_2.07' : _is_bit_set(right, 11),
#'reserved' : _is_bit_set(right, 12),
#'reserved' : _is_bit_set(right, 13),
#'reserved' : _is_bit_set(right, 14),
#'reserved' : _is_bit_set(right, 15),
# Byte 6
'dss_2.07' : _is_bit_set(right, 16),
#'reserved' : _is_bit_set(right, 17),
#'reserved' : _is_bit_set(right, 18),
#'reserved' : _is_bit_set(right, 19),
#'reserved' : _is_bit_set(right, 20),
#'reserved' : _is_bit_set(right, 21),
#'reserved' : _is_bit_set(right, 22),
#'reserved' : _is_bit_set(right, 23),
# Byte 7
#'reserved' : _is_bit_set(right, 24),
#'reserved' : _is_bit_set(right, 25),
#'reserved' : _is_bit_set(right, 26),
#'reserved' : _is_bit_set(right, 27),
#'reserved' : _is_bit_set(right, 28),
#'reserved' : _is_bit_set(right, 29),
#'reserved' : _is_bit_set(right, 30),
#'reserved' : _is_bit_set(right, 31),
}
# Get a list of only the flags that are true
flags = [k for k, v in flags.items() if v]
flags.sort()
info = {
'flags' : flags
}
info = {k: v for k, v in info.items() if v}
return info
except:
return {}
def _get_cpu_info_from_cat_var_run_dmesg_boot():
'''
Returns the CPU info gathered from /var/run/dmesg.boot.
Returns {} if dmesg is not found or does not have the desired info.
'''
# Just return {} if there is no /var/run/dmesg.boot
if not DataSource.has_var_run_dmesg_boot():
return {}
# If dmesg.boot fails return {}
returncode, output = DataSource.cat_var_run_dmesg_boot()
if output == None or returncode != 0:
return {}
return _parse_dmesg_output(output)
def _get_cpu_info_from_sysctl():
'''
Returns the CPU info gathered from sysctl.
Returns {} if sysctl is not found.
'''
try:
# Just return {} if there is no sysctl
if not DataSource.has_sysctl():
return {}
# If sysctl fails return {}
returncode, output = DataSource.sysctl_machdep_cpu_hw_cpufrequency()
if output == None or returncode != 0:
return {}
# Various fields
vendor_id = _get_field(False, output, None, None, 'machdep.cpu.vendor')
processor_brand = _get_field(True, output, None, None, 'machdep.cpu.brand_string')
cache_size = _get_field(False, output, None, None, 'machdep.cpu.cache.size')
stepping = _get_field(False, output, int, 0, 'machdep.cpu.stepping')
model = _get_field(False, output, int, 0, 'machdep.cpu.model')
family = _get_field(False, output, int, 0, 'machdep.cpu.family')
# Flags
flags = _get_field(False, output, None, '', 'machdep.cpu.features').lower().split()
flags.extend(_get_field(False, output, None, '', 'machdep.cpu.leaf7_features').lower().split())
flags.extend(_get_field(False, output, None, '', 'machdep.cpu.extfeatures').lower().split())
flags.sort()
# Convert from GHz/MHz string to Hz
hz_advertised, scale = _parse_cpu_brand_string(processor_brand)
hz_actual = _get_field(False, output, None, None, 'hw.cpufrequency')
hz_actual = _to_decimal_string(hz_actual)
info = {
'vendor_id_raw' : vendor_id,
'brand_raw' : processor_brand,
'hz_advertised_friendly' : _hz_short_to_friendly(hz_advertised, scale),
'hz_actual_friendly' : _hz_short_to_friendly(hz_actual, 0),
'hz_advertised' : _hz_short_to_full(hz_advertised, scale),
'hz_actual' : _hz_short_to_full(hz_actual, 0),
'l2_cache_size' : _to_friendly_bytes(cache_size),
'stepping' : stepping,
'model' : model,
'family' : family,
'flags' : flags
}
info = {k: v for k, v in info.items() if v}
return info
except:
return {}
def _get_cpu_info_from_sysinfo():
'''
Returns the CPU info gathered from sysinfo.
Returns {} if sysinfo is not found.
'''
info = _get_cpu_info_from_sysinfo_v1()
info.update(_get_cpu_info_from_sysinfo_v2())
return info
def _get_cpu_info_from_sysinfo_v1():
'''
Returns the CPU info gathered from sysinfo.
Returns {} if sysinfo is not found.
'''
try:
# Just return {} if there is no sysinfo
if not DataSource.has_sysinfo():
return {}
# If sysinfo fails return {}
returncode, output = DataSource.sysinfo_cpu()
if output == None or returncode != 0:
return {}
# Various fields
vendor_id = '' #_get_field(False, output, None, None, 'CPU #0: ')
processor_brand = output.split('CPU #0: "')[1].split('"\n')[0].strip()
cache_size = '' #_get_field(False, output, None, None, 'machdep.cpu.cache.size')
stepping = int(output.split(', stepping ')[1].split(',')[0].strip())
model = int(output.split(', model ')[1].split(',')[0].strip())
family = int(output.split(', family ')[1].split(',')[0].strip())
# Flags
flags = []
for line in output.split('\n'):
if line.startswith('\t\t'):
for flag in line.strip().lower().split():
flags.append(flag)
flags.sort()
# Convert from GHz/MHz string to Hz
hz_advertised, scale = _parse_cpu_brand_string(processor_brand)
hz_actual = hz_advertised
info = {
'vendor_id_raw' : vendor_id,
'brand_raw' : processor_brand,
'hz_advertised_friendly' : _hz_short_to_friendly(hz_advertised, scale),
'hz_actual_friendly' : _hz_short_to_friendly(hz_actual, scale),
'hz_advertised' : _hz_short_to_full(hz_advertised, scale),
'hz_actual' : _hz_short_to_full(hz_actual, scale),
'l2_cache_size' : _to_friendly_bytes(cache_size),
'stepping' : stepping,
'model' : model,
'family' : family,
'flags' : flags
}
info = {k: v for k, v in info.items() if v}
return info
except:
#raise # NOTE: To have this throw on error, uncomment this line
return {}
def _get_cpu_info_from_sysinfo_v2():
'''
Returns the CPU info gathered from sysinfo.
Returns {} if sysinfo is not found.
'''
try:
# Just return {} if there is no sysinfo
if not DataSource.has_sysinfo():
return {}
# If sysinfo fails return {}
returncode, output = DataSource.sysinfo_cpu()
if output == None or returncode != 0:
return {}
# Various fields
vendor_id = '' #_get_field(False, output, None, None, 'CPU #0: ')
processor_brand = output.split('CPU #0: "')[1].split('"\n')[0].strip()
cache_size = '' #_get_field(False, output, None, None, 'machdep.cpu.cache.size')
signature = output.split('Signature:')[1].split('\n')[0].strip()
#
stepping = int(signature.split('stepping ')[1].split(',')[0].strip())
model = int(signature.split('model ')[1].split(',')[0].strip())
family = int(signature.split('family ')[1].split(',')[0].strip())
# Flags
def get_subsection_flags(output):
retval = []
for line in output.split('\n')[1:]:
if not line.startswith(' ') and not line.startswith(' '): break
for entry in line.strip().lower().split(' '):
retval.append(entry)
return retval
flags = get_subsection_flags(output.split('Features: ')[1]) + \
get_subsection_flags(output.split('Extended Features (0x00000001): ')[1]) + \
get_subsection_flags(output.split('Extended Features (0x80000001): ')[1])
flags.sort()
# Convert from GHz/MHz string to Hz
lines = [n for n in output.split('\n') if n]
raw_hz = lines[0].split('running at ')[1].strip().lower()
hz_advertised = raw_hz.rstrip('mhz').rstrip('ghz').strip()
hz_advertised = _to_decimal_string(hz_advertised)
hz_actual = hz_advertised
scale = 0
if raw_hz.endswith('mhz'):
scale = 6
elif raw_hz.endswith('ghz'):
scale = 9
info = {
'vendor_id_raw' : vendor_id,
'brand_raw' : processor_brand,
'hz_advertised_friendly' : _hz_short_to_friendly(hz_advertised, scale),
'hz_actual_friendly' : _hz_short_to_friendly(hz_actual, scale),
'hz_advertised' : _hz_short_to_full(hz_advertised, scale),
'hz_actual' : _hz_short_to_full(hz_actual, scale),
'l2_cache_size' : _to_friendly_bytes(cache_size),
'stepping' : stepping,
'model' : model,
'family' : family,
'flags' : flags
}
info = {k: v for k, v in info.items() if v}
return info
except:
#raise # NOTE: To have this throw on error, uncomment this line
return {}
def _get_cpu_info_from_wmic():
'''
Returns the CPU info gathered from WMI.
Returns {} if not on Windows, or wmic is not installed.
'''
try:
# Just return {} if not Windows or there is no wmic
if not DataSource.is_windows or not DataSource.has_wmic():
return {}
returncode, output = DataSource.wmic_cpu()
if output == None or returncode != 0:
return {}
# Break the list into key values pairs
value = output.split("\n")
value = [s.rstrip().split('=') for s in value if '=' in s]
value = {k: v for k, v in value if v}
# Get the advertised MHz
processor_brand = value.get('Name')
hz_advertised, scale_advertised = _parse_cpu_brand_string(processor_brand)
# Get the actual MHz
hz_actual = value.get('CurrentClockSpeed')
scale_actual = 6
if hz_actual:
hz_actual = _to_decimal_string(hz_actual)
# Get cache sizes
l2_cache_size = value.get('L2CacheSize')
if l2_cache_size:
l2_cache_size = l2_cache_size + ' KB'
l3_cache_size = value.get('L3CacheSize')
if l3_cache_size:
l3_cache_size = l3_cache_size + ' KB'
# Get family, model, and stepping
family, model, stepping = '', '', ''
description = value.get('Description') or value.get('Caption')
entries = description.split(' ')
if 'Family' in entries and entries.index('Family') < len(entries)-1:
i = entries.index('Family')
family = int(entries[i + 1])
if 'Model' in entries and entries.index('Model') < len(entries)-1:
i = entries.index('Model')
model = int(entries[i + 1])
if 'Stepping' in entries and entries.index('Stepping') < len(entries)-1:
i = entries.index('Stepping')
stepping = int(entries[i + 1])
info = {
'vendor_id_raw' : value.get('Manufacturer'),
'brand_raw' : processor_brand,
'hz_advertised_friendly' : _hz_short_to_friendly(hz_advertised, scale_advertised),
'hz_actual_friendly' : _hz_short_to_friendly(hz_actual, scale_actual),
'hz_advertised' : _hz_short_to_full(hz_advertised, scale_advertised),
'hz_actual' : _hz_short_to_full(hz_actual, scale_actual),
'l2_cache_size' : l2_cache_size,
'l3_cache_size' : l3_cache_size,
'stepping' : stepping,
'model' : model,
'family' : family,
}
info = {k: v for k, v in info.items() if v}
return info
except:
#raise # NOTE: To have this throw on error, uncomment this line
return {}
def _get_cpu_info_from_registry():
'''
FIXME: Is missing many of the newer CPU flags like sse3
Returns the CPU info gathered from the Windows Registry.
Returns {} if not on Windows.
'''
try:
# Just return {} if not on Windows
if not DataSource.is_windows:
return {}
# Get the CPU name
processor_brand = DataSource.winreg_processor_brand().strip()
# Get the CPU vendor id
vendor_id = DataSource.winreg_vendor_id_raw()
# Get the CPU arch and bits
arch_string_raw = DataSource.winreg_arch_string_raw()
arch, bits = _parse_arch(arch_string_raw)
# Get the actual CPU Hz
hz_actual = DataSource.winreg_hz_actual()
hz_actual = _to_decimal_string(hz_actual)
# Get the advertised CPU Hz
hz_advertised, scale = _parse_cpu_brand_string(processor_brand)
# If advertised hz not found, use the actual hz
if hz_advertised == '0.0':
scale = 6
hz_advertised = _to_decimal_string(hz_actual)
# Get the CPU features
feature_bits = DataSource.winreg_feature_bits()
def is_set(bit):
mask = 0x80000000 >> bit
retval = mask & feature_bits > 0
return retval
# http://en.wikipedia.org/wiki/CPUID
# http://unix.stackexchange.com/questions/43539/what-do-the-flags-in-proc-cpuinfo-mean
# http://www.lohninger.com/helpcsuite/public_constants_cpuid.htm
flags = {
'fpu' : is_set(0), # Floating Point Unit
'vme' : is_set(1), # V86 Mode Extensions
'de' : is_set(2), # Debug Extensions - I/O breakpoints supported
'pse' : is_set(3), # Page Size Extensions (4 MB pages supported)
'tsc' : is_set(4), # Time Stamp Counter and RDTSC instruction are available
'msr' : is_set(5), # Model Specific Registers
'pae' : is_set(6), # Physical Address Extensions (36 bit address, 2MB pages)
'mce' : is_set(7), # Machine Check Exception supported
'cx8' : is_set(8), # Compare Exchange Eight Byte instruction available
'apic' : is_set(9), # Local APIC present (multiprocessor operation support)
'sepamd' : is_set(10), # Fast system calls (AMD only)
'sep' : is_set(11), # Fast system calls
'mtrr' : is_set(12), # Memory Type Range Registers
'pge' : is_set(13), # Page Global Enable
'mca' : is_set(14), # Machine Check Architecture
'cmov' : is_set(15), # Conditional MOVe instructions
'pat' : is_set(16), # Page Attribute Table
'pse36' : is_set(17), # 36 bit Page Size Extensions
'serial' : is_set(18), # Processor Serial Number
'clflush' : is_set(19), # Cache Flush
#'reserved1' : is_set(20), # reserved
'dts' : is_set(21), # Debug Trace Store
'acpi' : is_set(22), # ACPI support
'mmx' : is_set(23), # MultiMedia Extensions
'fxsr' : is_set(24), # FXSAVE and FXRSTOR instructions
'sse' : is_set(25), # SSE instructions
'sse2' : is_set(26), # SSE2 (WNI) instructions
'ss' : is_set(27), # self snoop
#'reserved2' : is_set(28), # reserved
'tm' : is_set(29), # Automatic clock control
'ia64' : is_set(30), # IA64 instructions
'3dnow' : is_set(31) # 3DNow! instructions available
}
# Get a list of only the flags that are true
flags = [k for k, v in flags.items() if v]
flags.sort()
info = {
'vendor_id_raw' : vendor_id,
'brand_raw' : processor_brand,
'hz_advertised_friendly' : _hz_short_to_friendly(hz_advertised, scale),
'hz_actual_friendly' : _hz_short_to_friendly(hz_actual, 6),
'hz_advertised' : _hz_short_to_full(hz_advertised, scale),
'hz_actual' : _hz_short_to_full(hz_actual, 6),
'flags' : flags
}
info = {k: v for k, v in info.items() if v}
return info
except:
return {}
def _get_cpu_info_from_kstat():
'''
Returns the CPU info gathered from isainfo and kstat.
Returns {} if isainfo or kstat are not found.
'''
try:
# Just return {} if there is no isainfo or kstat
if not DataSource.has_isainfo() or not DataSource.has_kstat():
return {}
# If isainfo fails return {}
returncode, flag_output = DataSource.isainfo_vb()
if flag_output == None or returncode != 0:
return {}
# If kstat fails return {}
returncode, kstat = DataSource.kstat_m_cpu_info()
if kstat == None or returncode != 0:
return {}
# Various fields
vendor_id = kstat.split('\tvendor_id ')[1].split('\n')[0].strip()
processor_brand = kstat.split('\tbrand ')[1].split('\n')[0].strip()
stepping = int(kstat.split('\tstepping ')[1].split('\n')[0].strip())
model = int(kstat.split('\tmodel ')[1].split('\n')[0].strip())
family = int(kstat.split('\tfamily ')[1].split('\n')[0].strip())
# Flags
flags = flag_output.strip().split('\n')[-1].strip().lower().split()
flags.sort()
# Convert from GHz/MHz string to Hz
scale = 6
hz_advertised = kstat.split('\tclock_MHz ')[1].split('\n')[0].strip()
hz_advertised = _to_decimal_string(hz_advertised)
# Convert from GHz/MHz string to Hz
hz_actual = kstat.split('\tcurrent_clock_Hz ')[1].split('\n')[0].strip()
hz_actual = _to_decimal_string(hz_actual)
info = {
'vendor_id_raw' : vendor_id,
'brand_raw' : processor_brand,
'hz_advertised_friendly' : _hz_short_to_friendly(hz_advertised, scale),
'hz_actual_friendly' : _hz_short_to_friendly(hz_actual, 0),
'hz_advertised' : _hz_short_to_full(hz_advertised, scale),
'hz_actual' : _hz_short_to_full(hz_actual, 0),
'stepping' : stepping,
'model' : model,
'family' : family,
'flags' : flags
}
info = {k: v for k, v in info.items() if v}
return info
except:
return {}
def _get_cpu_info_from_platform_uname():
try:
uname = DataSource.uname_string_raw.split(',')[0]
family, model, stepping = (None, None, None)
entries = uname.split(' ')
if 'Family' in entries and entries.index('Family') < len(entries)-1:
i = entries.index('Family')
family = int(entries[i + 1])
if 'Model' in entries and entries.index('Model') < len(entries)-1:
i = entries.index('Model')
model = int(entries[i + 1])
if 'Stepping' in entries and entries.index('Stepping') < len(entries)-1:
i = entries.index('Stepping')
stepping = int(entries[i + 1])
info = {
'family' : family,
'model' : model,
'stepping' : stepping
}
info = {k: v for k, v in info.items() if v}
return info
except:
return {}
def _get_cpu_info_internal():
'''
Returns the CPU info by using the best sources of information for your OS.
Returns {} if nothing is found.
'''
# Get the CPU arch and bits
arch, bits = _parse_arch(DataSource.arch_string_raw)
friendly_maxsize = { 2**31-1: '32 bit', 2**63-1: '64 bit' }.get(sys.maxsize) or 'unknown bits'
friendly_version = "{0}.{1}.{2}.{3}.{4}".format(*sys.version_info)
PYTHON_VERSION = "{0} ({1})".format(friendly_version, friendly_maxsize)
info = {
'python_version' : PYTHON_VERSION,
'cpuinfo_version' : CPUINFO_VERSION,
'cpuinfo_version_string' : CPUINFO_VERSION_STRING,
'arch' : arch,
'bits' : bits,
'count' : DataSource.cpu_count,
'arch_string_raw' : DataSource.arch_string_raw,
}
# Try the Windows wmic
_copy_new_fields(info, _get_cpu_info_from_wmic())
# Try the Windows registry
_copy_new_fields(info, _get_cpu_info_from_registry())
# Try /proc/cpuinfo
_copy_new_fields(info, _get_cpu_info_from_proc_cpuinfo())
# Try cpufreq-info
_copy_new_fields(info, _get_cpu_info_from_cpufreq_info())
# Try LSCPU
_copy_new_fields(info, _get_cpu_info_from_lscpu())
# Try sysctl
_copy_new_fields(info, _get_cpu_info_from_sysctl())
# Try kstat
_copy_new_fields(info, _get_cpu_info_from_kstat())
# Try dmesg
_copy_new_fields(info, _get_cpu_info_from_dmesg())
# Try /var/run/dmesg.boot
_copy_new_fields(info, _get_cpu_info_from_cat_var_run_dmesg_boot())
# Try lsprop ibm,pa-features
_copy_new_fields(info, _get_cpu_info_from_ibm_pa_features())
# Try sysinfo
_copy_new_fields(info, _get_cpu_info_from_sysinfo())
# Try querying the CPU cpuid register
_copy_new_fields(info, _get_cpu_info_from_cpuid())
# Try platform.uname
_copy_new_fields(info, _get_cpu_info_from_platform_uname())
return info
def get_cpu_info_json():
'''
Returns the CPU info by using the best sources of information for your OS.
Returns the result in a json string
'''
import json
output = None
# If running under pyinstaller, run normally
if getattr(sys, 'frozen', False):
info = _get_cpu_info_internal()
output = json.dumps(info)
output = "{0}".format(output)
# if not running under pyinstaller, run in another process.
# This is done because multiprocesing has a design flaw that
# causes non main programs to run multiple times on Windows.
else:
from subprocess import Popen, PIPE
command = [sys.executable, __file__, '--json']
p1 = Popen(command, stdout=PIPE, stderr=PIPE, stdin=PIPE)
output = p1.communicate()[0]
if p1.returncode != 0:
return "{}"
if not IS_PY2:
output = output.decode(encoding='UTF-8')
return output
def get_cpu_info():
'''
Returns the CPU info by using the best sources of information for your OS.
Returns the result in a dict
'''
import json
output = get_cpu_info_json()
# Convert JSON to Python with non unicode strings
output = json.loads(output, object_hook = _utf_to_str)
return output
def main():
from argparse import ArgumentParser
import json
# Parse args
parser = ArgumentParser(description='Gets CPU info with pure Python 2 & 3')
parser.add_argument('--json', action='store_true', help='Return the info in JSON format')
parser.add_argument('--version', action='store_true', help='Return the version of py-cpuinfo')
args = parser.parse_args()
try:
_check_arch()
except Exception as err:
sys.stderr.write(str(err) + "\n")
sys.exit(1)
info = _get_cpu_info_internal()
if not info:
sys.stderr.write("Failed to find cpu info\n")
sys.exit(1)
if args.json:
print(json.dumps(info))
elif args.version:
print(CPUINFO_VERSION_STRING)
else:
print('Python Version: {0}'.format(info.get('python_version', '')))
print('Cpuinfo Version: {0}'.format(info.get('cpuinfo_version_string', '')))
print('Vendor ID Raw: {0}'.format(info.get('vendor_id_raw', '')))
print('Hardware Raw: {0}'.format(info.get('hardware_raw', '')))
print('Brand Raw: {0}'.format(info.get('brand_raw', '')))
print('Hz Advertised Friendly: {0}'.format(info.get('hz_advertised_friendly', '')))
print('Hz Actual Friendly: {0}'.format(info.get('hz_actual_friendly', '')))
print('Hz Advertised: {0}'.format(info.get('hz_advertised', '')))
print('Hz Actual: {0}'.format(info.get('hz_actual', '')))
print('Arch: {0}'.format(info.get('arch', '')))
print('Bits: {0}'.format(info.get('bits', '')))
print('Count: {0}'.format(info.get('count', '')))
print('Arch String Raw: {0}'.format(info.get('arch_string_raw', '')))
print('L1 Data Cache Size: {0}'.format(info.get('l1_data_cache_size', '')))
print('L1 Instruction Cache Size: {0}'.format(info.get('l1_instruction_cache_size', '')))
print('L2 Cache Size: {0}'.format(info.get('l2_cache_size', '')))
print('L2 Cache Line Size: {0}'.format(info.get('l2_cache_line_size', '')))
print('L2 Cache Associativity: {0}'.format(info.get('l2_cache_associativity', '')))
print('L3 Cache Size: {0}'.format(info.get('l3_cache_size', '')))
print('Stepping: {0}'.format(info.get('stepping', '')))
print('Model: {0}'.format(info.get('model', '')))
print('Family: {0}'.format(info.get('family', '')))
print('Processor Type: {0}'.format(info.get('processor_type', '')))
print('Extended Model: {0}'.format(info.get('extended_model', '')))
print('Extended Family: {0}'.format(info.get('extended_family', '')))
print('Flags: {0}'.format(', '.join(info.get('flags', ''))))
if __name__ == '__main__':
main()
else:
_check_arch()
|
workhorsy/py-cpuinfo
|
cpuinfo/cpuinfo.py
|
_get_cpu_info_from_ibm_pa_features
|
python
|
def _get_cpu_info_from_ibm_pa_features():
'''
Returns the CPU info gathered from lsprop /proc/device-tree/cpus/*/ibm,pa-features
Returns {} if lsprop is not found or ibm,pa-features does not have the desired info.
'''
try:
# Just return {} if there is no lsprop
if not DataSource.has_ibm_pa_features():
return {}
# If ibm,pa-features fails return {}
returncode, output = DataSource.ibm_pa_features()
if output == None or returncode != 0:
return {}
# Filter out invalid characters from output
value = output.split("ibm,pa-features")[1].lower()
value = [s for s in value if s in list('0123456789abcfed')]
value = ''.join(value)
# Get data converted to Uint32 chunks
left = int(value[0 : 8], 16)
right = int(value[8 : 16], 16)
# Get the CPU flags
flags = {
# Byte 0
'mmu' : _is_bit_set(left, 0),
'fpu' : _is_bit_set(left, 1),
'slb' : _is_bit_set(left, 2),
'run' : _is_bit_set(left, 3),
#'reserved' : _is_bit_set(left, 4),
'dabr' : _is_bit_set(left, 5),
'ne' : _is_bit_set(left, 6),
'wtr' : _is_bit_set(left, 7),
# Byte 1
'mcr' : _is_bit_set(left, 8),
'dsisr' : _is_bit_set(left, 9),
'lp' : _is_bit_set(left, 10),
'ri' : _is_bit_set(left, 11),
'dabrx' : _is_bit_set(left, 12),
'sprg3' : _is_bit_set(left, 13),
'rislb' : _is_bit_set(left, 14),
'pp' : _is_bit_set(left, 15),
# Byte 2
'vpm' : _is_bit_set(left, 16),
'dss_2.05' : _is_bit_set(left, 17),
#'reserved' : _is_bit_set(left, 18),
'dar' : _is_bit_set(left, 19),
#'reserved' : _is_bit_set(left, 20),
'ppr' : _is_bit_set(left, 21),
'dss_2.02' : _is_bit_set(left, 22),
'dss_2.06' : _is_bit_set(left, 23),
# Byte 3
'lsd_in_dscr' : _is_bit_set(left, 24),
'ugr_in_dscr' : _is_bit_set(left, 25),
#'reserved' : _is_bit_set(left, 26),
#'reserved' : _is_bit_set(left, 27),
#'reserved' : _is_bit_set(left, 28),
#'reserved' : _is_bit_set(left, 29),
#'reserved' : _is_bit_set(left, 30),
#'reserved' : _is_bit_set(left, 31),
# Byte 4
'sso_2.06' : _is_bit_set(right, 0),
#'reserved' : _is_bit_set(right, 1),
#'reserved' : _is_bit_set(right, 2),
#'reserved' : _is_bit_set(right, 3),
#'reserved' : _is_bit_set(right, 4),
#'reserved' : _is_bit_set(right, 5),
#'reserved' : _is_bit_set(right, 6),
#'reserved' : _is_bit_set(right, 7),
# Byte 5
'le' : _is_bit_set(right, 8),
'cfar' : _is_bit_set(right, 9),
'eb' : _is_bit_set(right, 10),
'lsq_2.07' : _is_bit_set(right, 11),
#'reserved' : _is_bit_set(right, 12),
#'reserved' : _is_bit_set(right, 13),
#'reserved' : _is_bit_set(right, 14),
#'reserved' : _is_bit_set(right, 15),
# Byte 6
'dss_2.07' : _is_bit_set(right, 16),
#'reserved' : _is_bit_set(right, 17),
#'reserved' : _is_bit_set(right, 18),
#'reserved' : _is_bit_set(right, 19),
#'reserved' : _is_bit_set(right, 20),
#'reserved' : _is_bit_set(right, 21),
#'reserved' : _is_bit_set(right, 22),
#'reserved' : _is_bit_set(right, 23),
# Byte 7
#'reserved' : _is_bit_set(right, 24),
#'reserved' : _is_bit_set(right, 25),
#'reserved' : _is_bit_set(right, 26),
#'reserved' : _is_bit_set(right, 27),
#'reserved' : _is_bit_set(right, 28),
#'reserved' : _is_bit_set(right, 29),
#'reserved' : _is_bit_set(right, 30),
#'reserved' : _is_bit_set(right, 31),
}
# Get a list of only the flags that are true
flags = [k for k, v in flags.items() if v]
flags.sort()
info = {
'flags' : flags
}
info = {k: v for k, v in info.items() if v}
return info
except:
return {}
|
Returns the CPU info gathered from lsprop /proc/device-tree/cpus/*/ibm,pa-features
Returns {} if lsprop is not found or ibm,pa-features does not have the desired info.
|
train
|
https://github.com/workhorsy/py-cpuinfo/blob/c15afb770c1139bf76215852e17eb4f677ca3d2f/cpuinfo/cpuinfo.py#L1606-L1724
|
[
"def has_ibm_pa_features():\n\treturn len(_program_paths('lsprop')) > 0\n"
] |
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
# Copyright (c) 2014-2019, Matthew Brennan Jones <matthew.brennan.jones@gmail.com>
# Py-cpuinfo gets CPU info with pure Python 2 & 3
# It uses the MIT License
# It is hosted at: https://github.com/workhorsy/py-cpuinfo
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
CPUINFO_VERSION = (5, 0, 0)
CPUINFO_VERSION_STRING = '.'.join([str(n) for n in CPUINFO_VERSION])
import os, sys
import platform
import multiprocessing
import ctypes
try:
import _winreg as winreg
except ImportError as err:
try:
import winreg
except ImportError as err:
pass
IS_PY2 = sys.version_info[0] == 2
class DataSource(object):
bits = platform.architecture()[0]
cpu_count = multiprocessing.cpu_count()
is_windows = platform.system().lower() == 'windows'
arch_string_raw = platform.machine()
uname_string_raw = platform.uname()[5]
can_cpuid = True
@staticmethod
def has_proc_cpuinfo():
return os.path.exists('/proc/cpuinfo')
@staticmethod
def has_dmesg():
return len(_program_paths('dmesg')) > 0
@staticmethod
def has_var_run_dmesg_boot():
uname = platform.system().strip().strip('"').strip("'").strip().lower()
return 'linux' in uname and os.path.exists('/var/run/dmesg.boot')
@staticmethod
def has_cpufreq_info():
return len(_program_paths('cpufreq-info')) > 0
@staticmethod
def has_sestatus():
return len(_program_paths('sestatus')) > 0
@staticmethod
def has_sysctl():
return len(_program_paths('sysctl')) > 0
@staticmethod
def has_isainfo():
return len(_program_paths('isainfo')) > 0
@staticmethod
def has_kstat():
return len(_program_paths('kstat')) > 0
@staticmethod
def has_sysinfo():
return len(_program_paths('sysinfo')) > 0
@staticmethod
def has_lscpu():
return len(_program_paths('lscpu')) > 0
@staticmethod
def has_ibm_pa_features():
return len(_program_paths('lsprop')) > 0
@staticmethod
def has_wmic():
returncode, output = _run_and_get_stdout(['wmic', 'os', 'get', 'Version'])
return returncode == 0 and len(output) > 0
@staticmethod
def cat_proc_cpuinfo():
return _run_and_get_stdout(['cat', '/proc/cpuinfo'])
@staticmethod
def cpufreq_info():
return _run_and_get_stdout(['cpufreq-info'])
@staticmethod
def sestatus_b():
return _run_and_get_stdout(['sestatus', '-b'])
@staticmethod
def dmesg_a():
return _run_and_get_stdout(['dmesg', '-a'])
@staticmethod
def cat_var_run_dmesg_boot():
return _run_and_get_stdout(['cat', '/var/run/dmesg.boot'])
@staticmethod
def sysctl_machdep_cpu_hw_cpufrequency():
return _run_and_get_stdout(['sysctl', 'machdep.cpu', 'hw.cpufrequency'])
@staticmethod
def isainfo_vb():
return _run_and_get_stdout(['isainfo', '-vb'])
@staticmethod
def kstat_m_cpu_info():
return _run_and_get_stdout(['kstat', '-m', 'cpu_info'])
@staticmethod
def sysinfo_cpu():
return _run_and_get_stdout(['sysinfo', '-cpu'])
@staticmethod
def lscpu():
return _run_and_get_stdout(['lscpu'])
@staticmethod
def ibm_pa_features():
import glob
ibm_features = glob.glob('/proc/device-tree/cpus/*/ibm,pa-features')
if ibm_features:
return _run_and_get_stdout(['lsprop', ibm_features[0]])
@staticmethod
def wmic_cpu():
return _run_and_get_stdout(['wmic', 'cpu', 'get', 'Name,CurrentClockSpeed,L2CacheSize,L3CacheSize,Description,Caption,Manufacturer', '/format:list'])
@staticmethod
def winreg_processor_brand():
key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, r"Hardware\Description\System\CentralProcessor\0")
processor_brand = winreg.QueryValueEx(key, "ProcessorNameString")[0]
winreg.CloseKey(key)
return processor_brand.strip()
@staticmethod
def winreg_vendor_id_raw():
key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, r"Hardware\Description\System\CentralProcessor\0")
vendor_id_raw = winreg.QueryValueEx(key, "VendorIdentifier")[0]
winreg.CloseKey(key)
return vendor_id_raw
@staticmethod
def winreg_arch_string_raw():
key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, r"SYSTEM\CurrentControlSet\Control\Session Manager\Environment")
arch_string_raw = winreg.QueryValueEx(key, "PROCESSOR_ARCHITECTURE")[0]
winreg.CloseKey(key)
return arch_string_raw
@staticmethod
def winreg_hz_actual():
key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, r"Hardware\Description\System\CentralProcessor\0")
hz_actual = winreg.QueryValueEx(key, "~Mhz")[0]
winreg.CloseKey(key)
hz_actual = _to_decimal_string(hz_actual)
return hz_actual
@staticmethod
def winreg_feature_bits():
key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, r"Hardware\Description\System\CentralProcessor\0")
feature_bits = winreg.QueryValueEx(key, "FeatureSet")[0]
winreg.CloseKey(key)
return feature_bits
def _program_paths(program_name):
paths = []
exts = filter(None, os.environ.get('PATHEXT', '').split(os.pathsep))
path = os.environ['PATH']
for p in os.environ['PATH'].split(os.pathsep):
p = os.path.join(p, program_name)
if os.access(p, os.X_OK):
paths.append(p)
for e in exts:
pext = p + e
if os.access(pext, os.X_OK):
paths.append(pext)
return paths
def _run_and_get_stdout(command, pipe_command=None):
from subprocess import Popen, PIPE
if not pipe_command:
p1 = Popen(command, stdout=PIPE, stderr=PIPE, stdin=PIPE)
output = p1.communicate()[0]
if not IS_PY2:
output = output.decode(encoding='UTF-8')
return p1.returncode, output
else:
p1 = Popen(command, stdout=PIPE, stderr=PIPE, stdin=PIPE)
p2 = Popen(pipe_command, stdin=p1.stdout, stdout=PIPE, stderr=PIPE)
p1.stdout.close()
output = p2.communicate()[0]
if not IS_PY2:
output = output.decode(encoding='UTF-8')
return p2.returncode, output
# Make sure we are running on a supported system
def _check_arch():
arch, bits = _parse_arch(DataSource.arch_string_raw)
if not arch in ['X86_32', 'X86_64', 'ARM_7', 'ARM_8', 'PPC_64']:
raise Exception("py-cpuinfo currently only works on X86 and some PPC and ARM CPUs.")
def _obj_to_b64(thing):
import pickle
import base64
a = thing
b = pickle.dumps(a)
c = base64.b64encode(b)
d = c.decode('utf8')
return d
def _b64_to_obj(thing):
import pickle
import base64
try:
a = base64.b64decode(thing)
b = pickle.loads(a)
return b
except:
return {}
def _utf_to_str(input):
if IS_PY2 and isinstance(input, unicode):
return input.encode('utf-8')
elif isinstance(input, list):
return [_utf_to_str(element) for element in input]
elif isinstance(input, dict):
return {_utf_to_str(key): _utf_to_str(value)
for key, value in input.items()}
else:
return input
def _copy_new_fields(info, new_info):
keys = [
'vendor_id_raw', 'hardware_raw', 'brand_raw', 'hz_advertised_friendly', 'hz_actual_friendly',
'hz_advertised', 'hz_actual', 'arch', 'bits', 'count',
'arch_string_raw', 'uname_string_raw',
'l2_cache_size', 'l2_cache_line_size', 'l2_cache_associativity',
'stepping', 'model', 'family',
'processor_type', 'extended_model', 'extended_family', 'flags',
'l3_cache_size', 'l1_data_cache_size', 'l1_instruction_cache_size'
]
for key in keys:
if new_info.get(key, None) and not info.get(key, None):
info[key] = new_info[key]
elif key == 'flags' and new_info.get('flags'):
for f in new_info['flags']:
if f not in info['flags']: info['flags'].append(f)
info['flags'].sort()
def _get_field_actual(cant_be_number, raw_string, field_names):
for line in raw_string.splitlines():
for field_name in field_names:
field_name = field_name.lower()
if ':' in line:
left, right = line.split(':', 1)
left = left.strip().lower()
right = right.strip()
if left == field_name and len(right) > 0:
if cant_be_number:
if not right.isdigit():
return right
else:
return right
return None
def _get_field(cant_be_number, raw_string, convert_to, default_value, *field_names):
retval = _get_field_actual(cant_be_number, raw_string, field_names)
# Convert the return value
if retval and convert_to:
try:
retval = convert_to(retval)
except:
retval = default_value
# Return the default if there is no return value
if retval is None:
retval = default_value
return retval
def _to_decimal_string(ticks):
try:
# Convert to string
ticks = '{0}'.format(ticks)
# Strip off non numbers and decimal places
ticks = "".join(n for n in ticks if n.isdigit() or n=='.').strip()
if ticks == '':
ticks = '0'
# Add decimal if missing
if '.' not in ticks:
ticks = '{0}.0'.format(ticks)
# Remove trailing zeros
ticks = ticks.rstrip('0')
# Add one trailing zero for empty right side
if ticks.endswith('.'):
ticks = '{0}0'.format(ticks)
# Make sure the number can be converted to a float
ticks = float(ticks)
ticks = '{0}'.format(ticks)
return ticks
except:
return '0.0'
def _hz_short_to_full(ticks, scale):
try:
# Make sure the number can be converted to a float
ticks = float(ticks)
ticks = '{0}'.format(ticks)
# Scale the numbers
hz = ticks.lstrip('0')
old_index = hz.index('.')
hz = hz.replace('.', '')
hz = hz.ljust(scale + old_index+1, '0')
new_index = old_index + scale
hz = '{0}.{1}'.format(hz[:new_index], hz[new_index:])
left, right = hz.split('.')
left, right = int(left), int(right)
return (left, right)
except:
return (0, 0)
def _hz_friendly_to_full(hz_string):
try:
hz_string = hz_string.strip().lower()
hz, scale = (None, None)
if hz_string.endswith('ghz'):
scale = 9
elif hz_string.endswith('mhz'):
scale = 6
elif hz_string.endswith('hz'):
scale = 0
hz = "".join(n for n in hz_string if n.isdigit() or n=='.').strip()
if not '.' in hz:
hz += '.0'
hz, scale = _hz_short_to_full(hz, scale)
return (hz, scale)
except:
return (0, 0)
def _hz_short_to_friendly(ticks, scale):
try:
# Get the raw Hz as a string
left, right = _hz_short_to_full(ticks, scale)
result = '{0}.{1}'.format(left, right)
# Get the location of the dot, and remove said dot
dot_index = result.index('.')
result = result.replace('.', '')
# Get the Hz symbol and scale
symbol = "Hz"
scale = 0
if dot_index > 9:
symbol = "GHz"
scale = 9
elif dot_index > 6:
symbol = "MHz"
scale = 6
elif dot_index > 3:
symbol = "KHz"
scale = 3
# Get the Hz with the dot at the new scaled point
result = '{0}.{1}'.format(result[:-scale-1], result[-scale-1:])
# Format the ticks to have 4 numbers after the decimal
# and remove any superfluous zeroes.
result = '{0:.4f} {1}'.format(float(result), symbol)
result = result.rstrip('0')
return result
except:
return '0.0000 Hz'
def _to_friendly_bytes(input):
import re
if not input:
return input
input = "{0}".format(input)
formats = {
r"^[0-9]+B$" : 'B',
r"^[0-9]+K$" : 'KB',
r"^[0-9]+M$" : 'MB',
r"^[0-9]+G$" : 'GB'
}
for pattern, friendly_size in formats.items():
if re.match(pattern, input):
return "{0} {1}".format(input[ : -1].strip(), friendly_size)
return input
def _parse_cpu_brand_string(cpu_string):
# Just return 0 if the processor brand does not have the Hz
if not 'hz' in cpu_string.lower():
return ('0.0', 0)
hz = cpu_string.lower()
scale = 0
if hz.endswith('mhz'):
scale = 6
elif hz.endswith('ghz'):
scale = 9
if '@' in hz:
hz = hz.split('@')[1]
else:
hz = hz.rsplit(None, 1)[1]
hz = hz.rstrip('mhz').rstrip('ghz').strip()
hz = _to_decimal_string(hz)
return (hz, scale)
def _parse_cpu_brand_string_dx(cpu_string):
import re
# Find all the strings inside brackets ()
starts = [m.start() for m in re.finditer('\(', cpu_string)]
ends = [m.start() for m in re.finditer('\)', cpu_string)]
insides = {k: v for k, v in zip(starts, ends)}
insides = [cpu_string[start+1 : end] for start, end in insides.items()]
# Find all the fields
vendor_id, stepping, model, family = (None, None, None, None)
for inside in insides:
for pair in inside.split(','):
pair = [n.strip() for n in pair.split(':')]
if len(pair) > 1:
name, value = pair[0], pair[1]
if name == 'origin':
vendor_id = value.strip('"')
elif name == 'stepping':
stepping = int(value.lstrip('0x'), 16)
elif name == 'model':
model = int(value.lstrip('0x'), 16)
elif name in ['fam', 'family']:
family = int(value.lstrip('0x'), 16)
# Find the Processor Brand
# Strip off extra strings in brackets at end
brand = cpu_string.strip()
is_working = True
while is_working:
is_working = False
for inside in insides:
full = "({0})".format(inside)
if brand.endswith(full):
brand = brand[ :-len(full)].strip()
is_working = True
# Find the Hz in the brand string
hz_brand, scale = _parse_cpu_brand_string(brand)
# Find Hz inside brackets () after the brand string
if hz_brand == '0.0':
for inside in insides:
hz = inside
for entry in ['GHz', 'MHz', 'Hz']:
if entry in hz:
hz = "CPU @ " + hz[ : hz.find(entry) + len(entry)]
hz_brand, scale = _parse_cpu_brand_string(hz)
break
return (hz_brand, scale, brand, vendor_id, stepping, model, family)
def _parse_dmesg_output(output):
try:
# Get all the dmesg lines that might contain a CPU string
lines = output.split(' CPU0:')[1:] + \
output.split(' CPU1:')[1:] + \
output.split(' CPU:')[1:] + \
output.split('\nCPU0:')[1:] + \
output.split('\nCPU1:')[1:] + \
output.split('\nCPU:')[1:]
lines = [l.split('\n')[0].strip() for l in lines]
# Convert the lines to CPU strings
cpu_strings = [_parse_cpu_brand_string_dx(l) for l in lines]
# Find the CPU string that has the most fields
best_string = None
highest_count = 0
for cpu_string in cpu_strings:
count = sum([n is not None for n in cpu_string])
if count > highest_count:
highest_count = count
best_string = cpu_string
# If no CPU string was found, return {}
if not best_string:
return {}
hz_actual, scale, processor_brand, vendor_id, stepping, model, family = best_string
# Origin
if ' Origin=' in output:
fields = output[output.find(' Origin=') : ].split('\n')[0]
fields = fields.strip().split()
fields = [n.strip().split('=') for n in fields]
fields = [{n[0].strip().lower() : n[1].strip()} for n in fields]
for field in fields:
name = list(field.keys())[0]
value = list(field.values())[0]
if name == 'origin':
vendor_id = value.strip('"')
elif name == 'stepping':
stepping = int(value.lstrip('0x'), 16)
elif name == 'model':
model = int(value.lstrip('0x'), 16)
elif name in ['fam', 'family']:
family = int(value.lstrip('0x'), 16)
# Features
flag_lines = []
for category in [' Features=', ' Features2=', ' AMD Features=', ' AMD Features2=']:
if category in output:
flag_lines.append(output.split(category)[1].split('\n')[0])
flags = []
for line in flag_lines:
line = line.split('<')[1].split('>')[0].lower()
for flag in line.split(','):
flags.append(flag)
flags.sort()
# Convert from GHz/MHz string to Hz
hz_advertised, scale = _parse_cpu_brand_string(processor_brand)
# If advertised hz not found, use the actual hz
if hz_advertised == '0.0':
scale = 6
hz_advertised = _to_decimal_string(hz_actual)
info = {
'vendor_id_raw' : vendor_id,
'brand_raw' : processor_brand,
'stepping' : stepping,
'model' : model,
'family' : family,
'flags' : flags
}
if hz_advertised and hz_advertised != '0.0':
info['hz_advertised_friendly'] = _hz_short_to_friendly(hz_advertised, scale)
info['hz_actual_friendly'] = _hz_short_to_friendly(hz_actual, scale)
if hz_advertised and hz_advertised != '0.0':
info['hz_advertised'] = _hz_short_to_full(hz_advertised, scale)
info['hz_actual'] = _hz_short_to_full(hz_actual, scale)
return {k: v for k, v in info.items() if v}
except:
#raise
pass
return {}
def _parse_arch(arch_string_raw):
import re
arch, bits = None, None
arch_string_raw = arch_string_raw.lower()
# X86
if re.match('^i\d86$|^x86$|^x86_32$|^i86pc$|^ia32$|^ia-32$|^bepc$', arch_string_raw):
arch = 'X86_32'
bits = 32
elif re.match('^x64$|^x86_64$|^x86_64t$|^i686-64$|^amd64$|^ia64$|^ia-64$', arch_string_raw):
arch = 'X86_64'
bits = 64
# ARM
elif re.match('^armv8-a|aarch64$', arch_string_raw):
arch = 'ARM_8'
bits = 64
elif re.match('^armv7$|^armv7[a-z]$|^armv7-[a-z]$|^armv6[a-z]$', arch_string_raw):
arch = 'ARM_7'
bits = 32
elif re.match('^armv8$|^armv8[a-z]$|^armv8-[a-z]$', arch_string_raw):
arch = 'ARM_8'
bits = 32
# PPC
elif re.match('^ppc32$|^prep$|^pmac$|^powermac$', arch_string_raw):
arch = 'PPC_32'
bits = 32
elif re.match('^powerpc$|^ppc64$|^ppc64le$', arch_string_raw):
arch = 'PPC_64'
bits = 64
# SPARC
elif re.match('^sparc32$|^sparc$', arch_string_raw):
arch = 'SPARC_32'
bits = 32
elif re.match('^sparc64$|^sun4u$|^sun4v$', arch_string_raw):
arch = 'SPARC_64'
bits = 64
return (arch, bits)
def _is_bit_set(reg, bit):
mask = 1 << bit
is_set = reg & mask > 0
return is_set
def _is_selinux_enforcing():
# Just return if the SE Linux Status Tool is not installed
if not DataSource.has_sestatus():
return False
# Run the sestatus, and just return if it failed to run
returncode, output = DataSource.sestatus_b()
if returncode != 0:
return False
# Figure out if explicitly in enforcing mode
for line in output.splitlines():
line = line.strip().lower()
if line.startswith("current mode:"):
if line.endswith("enforcing"):
return True
else:
return False
# Figure out if we can execute heap and execute memory
can_selinux_exec_heap = False
can_selinux_exec_memory = False
for line in output.splitlines():
line = line.strip().lower()
if line.startswith("allow_execheap") and line.endswith("on"):
can_selinux_exec_heap = True
elif line.startswith("allow_execmem") and line.endswith("on"):
can_selinux_exec_memory = True
return (not can_selinux_exec_heap or not can_selinux_exec_memory)
class CPUID(object):
def __init__(self):
self.prochandle = None
# Figure out if SE Linux is on and in enforcing mode
self.is_selinux_enforcing = _is_selinux_enforcing()
def _asm_func(self, restype=None, argtypes=(), byte_code=[]):
byte_code = bytes.join(b'', byte_code)
address = None
if DataSource.is_windows:
# Allocate a memory segment the size of the byte code, and make it executable
size = len(byte_code)
# Alloc at least 1 page to ensure we own all pages that we want to change protection on
if size < 0x1000: size = 0x1000
MEM_COMMIT = ctypes.c_ulong(0x1000)
PAGE_READWRITE = ctypes.c_ulong(0x4)
pfnVirtualAlloc = ctypes.windll.kernel32.VirtualAlloc
pfnVirtualAlloc.restype = ctypes.c_void_p
address = pfnVirtualAlloc(None, ctypes.c_size_t(size), MEM_COMMIT, PAGE_READWRITE)
if not address:
raise Exception("Failed to VirtualAlloc")
# Copy the byte code into the memory segment
memmove = ctypes.CFUNCTYPE(ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_size_t)(ctypes._memmove_addr)
if memmove(address, byte_code, size) < 0:
raise Exception("Failed to memmove")
# Enable execute permissions
PAGE_EXECUTE = ctypes.c_ulong(0x10)
old_protect = ctypes.c_ulong(0)
pfnVirtualProtect = ctypes.windll.kernel32.VirtualProtect
res = pfnVirtualProtect(ctypes.c_void_p(address), ctypes.c_size_t(size), PAGE_EXECUTE, ctypes.byref(old_protect))
if not res:
raise Exception("Failed VirtualProtect")
# Flush Instruction Cache
# First, get process Handle
if not self.prochandle:
pfnGetCurrentProcess = ctypes.windll.kernel32.GetCurrentProcess
pfnGetCurrentProcess.restype = ctypes.c_void_p
self.prochandle = ctypes.c_void_p(pfnGetCurrentProcess())
# Actually flush cache
res = ctypes.windll.kernel32.FlushInstructionCache(self.prochandle, ctypes.c_void_p(address), ctypes.c_size_t(size))
if not res:
raise Exception("Failed FlushInstructionCache")
else:
# Allocate a memory segment the size of the byte code
size = len(byte_code)
pfnvalloc = ctypes.pythonapi.valloc
pfnvalloc.restype = ctypes.c_void_p
address = pfnvalloc(ctypes.c_size_t(size))
if not address:
raise Exception("Failed to valloc")
# Mark the memory segment as writeable only
if not self.is_selinux_enforcing:
WRITE = 0x2
if ctypes.pythonapi.mprotect(ctypes.c_void_p(address), size, WRITE) < 0:
raise Exception("Failed to mprotect")
# Copy the byte code into the memory segment
if ctypes.pythonapi.memmove(ctypes.c_void_p(address), byte_code, ctypes.c_size_t(size)) < 0:
raise Exception("Failed to memmove")
# Mark the memory segment as writeable and executable only
if not self.is_selinux_enforcing:
WRITE_EXECUTE = 0x2 | 0x4
if ctypes.pythonapi.mprotect(ctypes.c_void_p(address), size, WRITE_EXECUTE) < 0:
raise Exception("Failed to mprotect")
# Cast the memory segment into a function
functype = ctypes.CFUNCTYPE(restype, *argtypes)
fun = functype(address)
return fun, address
def _run_asm(self, *byte_code):
# Convert the byte code into a function that returns an int
restype = ctypes.c_uint32
argtypes = ()
func, address = self._asm_func(restype, argtypes, byte_code)
# Call the byte code like a function
retval = func()
byte_code = bytes.join(b'', byte_code)
size = ctypes.c_size_t(len(byte_code))
# Free the function memory segment
if DataSource.is_windows:
MEM_RELEASE = ctypes.c_ulong(0x8000)
ctypes.windll.kernel32.VirtualFree(ctypes.c_void_p(address), ctypes.c_size_t(0), MEM_RELEASE)
else:
# Remove the executable tag on the memory
READ_WRITE = 0x1 | 0x2
if ctypes.pythonapi.mprotect(ctypes.c_void_p(address), size, READ_WRITE) < 0:
raise Exception("Failed to mprotect")
ctypes.pythonapi.free(ctypes.c_void_p(address))
return retval
# FIXME: We should not have to use different instructions to
# set eax to 0 or 1, on 32bit and 64bit machines.
def _zero_eax(self):
return (
b"\x31\xC0" # xor eax,eax
)
def _zero_ecx(self):
return (
b"\x31\xC9" # xor ecx,ecx
)
def _one_eax(self):
return (
b"\xB8\x01\x00\x00\x00" # mov eax,0x1"
)
# http://en.wikipedia.org/wiki/CPUID#EAX.3D0:_Get_vendor_ID
def get_vendor_id(self):
# EBX
ebx = self._run_asm(
self._zero_eax(),
b"\x0F\xA2" # cpuid
b"\x89\xD8" # mov ax,bx
b"\xC3" # ret
)
# ECX
ecx = self._run_asm(
self._zero_eax(),
b"\x0f\xa2" # cpuid
b"\x89\xC8" # mov ax,cx
b"\xC3" # ret
)
# EDX
edx = self._run_asm(
self._zero_eax(),
b"\x0f\xa2" # cpuid
b"\x89\xD0" # mov ax,dx
b"\xC3" # ret
)
# Each 4bits is a ascii letter in the name
vendor_id = []
for reg in [ebx, edx, ecx]:
for n in [0, 8, 16, 24]:
vendor_id.append(chr((reg >> n) & 0xFF))
vendor_id = ''.join(vendor_id)
return vendor_id
# http://en.wikipedia.org/wiki/CPUID#EAX.3D1:_Processor_Info_and_Feature_Bits
def get_info(self):
# EAX
eax = self._run_asm(
self._one_eax(),
b"\x0f\xa2" # cpuid
b"\xC3" # ret
)
# Get the CPU info
stepping = (eax >> 0) & 0xF # 4 bits
model = (eax >> 4) & 0xF # 4 bits
family = (eax >> 8) & 0xF # 4 bits
processor_type = (eax >> 12) & 0x3 # 2 bits
extended_model = (eax >> 16) & 0xF # 4 bits
extended_family = (eax >> 20) & 0xFF # 8 bits
return {
'stepping' : stepping,
'model' : model,
'family' : family,
'processor_type' : processor_type,
'extended_model' : extended_model,
'extended_family' : extended_family
}
# http://en.wikipedia.org/wiki/CPUID#EAX.3D80000000h:_Get_Highest_Extended_Function_Supported
def get_max_extension_support(self):
# Check for extension support
max_extension_support = self._run_asm(
b"\xB8\x00\x00\x00\x80" # mov ax,0x80000000
b"\x0f\xa2" # cpuid
b"\xC3" # ret
)
return max_extension_support
# http://en.wikipedia.org/wiki/CPUID#EAX.3D1:_Processor_Info_and_Feature_Bits
def get_flags(self, max_extension_support):
# EDX
edx = self._run_asm(
self._one_eax(),
b"\x0f\xa2" # cpuid
b"\x89\xD0" # mov ax,dx
b"\xC3" # ret
)
# ECX
ecx = self._run_asm(
self._one_eax(),
b"\x0f\xa2" # cpuid
b"\x89\xC8" # mov ax,cx
b"\xC3" # ret
)
# Get the CPU flags
flags = {
'fpu' : _is_bit_set(edx, 0),
'vme' : _is_bit_set(edx, 1),
'de' : _is_bit_set(edx, 2),
'pse' : _is_bit_set(edx, 3),
'tsc' : _is_bit_set(edx, 4),
'msr' : _is_bit_set(edx, 5),
'pae' : _is_bit_set(edx, 6),
'mce' : _is_bit_set(edx, 7),
'cx8' : _is_bit_set(edx, 8),
'apic' : _is_bit_set(edx, 9),
#'reserved1' : _is_bit_set(edx, 10),
'sep' : _is_bit_set(edx, 11),
'mtrr' : _is_bit_set(edx, 12),
'pge' : _is_bit_set(edx, 13),
'mca' : _is_bit_set(edx, 14),
'cmov' : _is_bit_set(edx, 15),
'pat' : _is_bit_set(edx, 16),
'pse36' : _is_bit_set(edx, 17),
'pn' : _is_bit_set(edx, 18),
'clflush' : _is_bit_set(edx, 19),
#'reserved2' : _is_bit_set(edx, 20),
'dts' : _is_bit_set(edx, 21),
'acpi' : _is_bit_set(edx, 22),
'mmx' : _is_bit_set(edx, 23),
'fxsr' : _is_bit_set(edx, 24),
'sse' : _is_bit_set(edx, 25),
'sse2' : _is_bit_set(edx, 26),
'ss' : _is_bit_set(edx, 27),
'ht' : _is_bit_set(edx, 28),
'tm' : _is_bit_set(edx, 29),
'ia64' : _is_bit_set(edx, 30),
'pbe' : _is_bit_set(edx, 31),
'pni' : _is_bit_set(ecx, 0),
'pclmulqdq' : _is_bit_set(ecx, 1),
'dtes64' : _is_bit_set(ecx, 2),
'monitor' : _is_bit_set(ecx, 3),
'ds_cpl' : _is_bit_set(ecx, 4),
'vmx' : _is_bit_set(ecx, 5),
'smx' : _is_bit_set(ecx, 6),
'est' : _is_bit_set(ecx, 7),
'tm2' : _is_bit_set(ecx, 8),
'ssse3' : _is_bit_set(ecx, 9),
'cid' : _is_bit_set(ecx, 10),
#'reserved3' : _is_bit_set(ecx, 11),
'fma' : _is_bit_set(ecx, 12),
'cx16' : _is_bit_set(ecx, 13),
'xtpr' : _is_bit_set(ecx, 14),
'pdcm' : _is_bit_set(ecx, 15),
#'reserved4' : _is_bit_set(ecx, 16),
'pcid' : _is_bit_set(ecx, 17),
'dca' : _is_bit_set(ecx, 18),
'sse4_1' : _is_bit_set(ecx, 19),
'sse4_2' : _is_bit_set(ecx, 20),
'x2apic' : _is_bit_set(ecx, 21),
'movbe' : _is_bit_set(ecx, 22),
'popcnt' : _is_bit_set(ecx, 23),
'tscdeadline' : _is_bit_set(ecx, 24),
'aes' : _is_bit_set(ecx, 25),
'xsave' : _is_bit_set(ecx, 26),
'osxsave' : _is_bit_set(ecx, 27),
'avx' : _is_bit_set(ecx, 28),
'f16c' : _is_bit_set(ecx, 29),
'rdrnd' : _is_bit_set(ecx, 30),
'hypervisor' : _is_bit_set(ecx, 31)
}
# Get a list of only the flags that are true
flags = [k for k, v in flags.items() if v]
# http://en.wikipedia.org/wiki/CPUID#EAX.3D7.2C_ECX.3D0:_Extended_Features
if max_extension_support >= 7:
# EBX
ebx = self._run_asm(
self._zero_ecx(),
b"\xB8\x07\x00\x00\x00" # mov eax,7
b"\x0f\xa2" # cpuid
b"\x89\xD8" # mov ax,bx
b"\xC3" # ret
)
# ECX
ecx = self._run_asm(
self._zero_ecx(),
b"\xB8\x07\x00\x00\x00" # mov eax,7
b"\x0f\xa2" # cpuid
b"\x89\xC8" # mov ax,cx
b"\xC3" # ret
)
# Get the extended CPU flags
extended_flags = {
#'fsgsbase' : _is_bit_set(ebx, 0),
#'IA32_TSC_ADJUST' : _is_bit_set(ebx, 1),
'sgx' : _is_bit_set(ebx, 2),
'bmi1' : _is_bit_set(ebx, 3),
'hle' : _is_bit_set(ebx, 4),
'avx2' : _is_bit_set(ebx, 5),
#'reserved' : _is_bit_set(ebx, 6),
'smep' : _is_bit_set(ebx, 7),
'bmi2' : _is_bit_set(ebx, 8),
'erms' : _is_bit_set(ebx, 9),
'invpcid' : _is_bit_set(ebx, 10),
'rtm' : _is_bit_set(ebx, 11),
'pqm' : _is_bit_set(ebx, 12),
#'FPU CS and FPU DS deprecated' : _is_bit_set(ebx, 13),
'mpx' : _is_bit_set(ebx, 14),
'pqe' : _is_bit_set(ebx, 15),
'avx512f' : _is_bit_set(ebx, 16),
'avx512dq' : _is_bit_set(ebx, 17),
'rdseed' : _is_bit_set(ebx, 18),
'adx' : _is_bit_set(ebx, 19),
'smap' : _is_bit_set(ebx, 20),
'avx512ifma' : _is_bit_set(ebx, 21),
'pcommit' : _is_bit_set(ebx, 22),
'clflushopt' : _is_bit_set(ebx, 23),
'clwb' : _is_bit_set(ebx, 24),
'intel_pt' : _is_bit_set(ebx, 25),
'avx512pf' : _is_bit_set(ebx, 26),
'avx512er' : _is_bit_set(ebx, 27),
'avx512cd' : _is_bit_set(ebx, 28),
'sha' : _is_bit_set(ebx, 29),
'avx512bw' : _is_bit_set(ebx, 30),
'avx512vl' : _is_bit_set(ebx, 31),
'prefetchwt1' : _is_bit_set(ecx, 0),
'avx512vbmi' : _is_bit_set(ecx, 1),
'umip' : _is_bit_set(ecx, 2),
'pku' : _is_bit_set(ecx, 3),
'ospke' : _is_bit_set(ecx, 4),
#'reserved' : _is_bit_set(ecx, 5),
'avx512vbmi2' : _is_bit_set(ecx, 6),
#'reserved' : _is_bit_set(ecx, 7),
'gfni' : _is_bit_set(ecx, 8),
'vaes' : _is_bit_set(ecx, 9),
'vpclmulqdq' : _is_bit_set(ecx, 10),
'avx512vnni' : _is_bit_set(ecx, 11),
'avx512bitalg' : _is_bit_set(ecx, 12),
#'reserved' : _is_bit_set(ecx, 13),
'avx512vpopcntdq' : _is_bit_set(ecx, 14),
#'reserved' : _is_bit_set(ecx, 15),
#'reserved' : _is_bit_set(ecx, 16),
#'mpx0' : _is_bit_set(ecx, 17),
#'mpx1' : _is_bit_set(ecx, 18),
#'mpx2' : _is_bit_set(ecx, 19),
#'mpx3' : _is_bit_set(ecx, 20),
#'mpx4' : _is_bit_set(ecx, 21),
'rdpid' : _is_bit_set(ecx, 22),
#'reserved' : _is_bit_set(ecx, 23),
#'reserved' : _is_bit_set(ecx, 24),
#'reserved' : _is_bit_set(ecx, 25),
#'reserved' : _is_bit_set(ecx, 26),
#'reserved' : _is_bit_set(ecx, 27),
#'reserved' : _is_bit_set(ecx, 28),
#'reserved' : _is_bit_set(ecx, 29),
'sgx_lc' : _is_bit_set(ecx, 30),
#'reserved' : _is_bit_set(ecx, 31)
}
# Get a list of only the flags that are true
extended_flags = [k for k, v in extended_flags.items() if v]
flags += extended_flags
# http://en.wikipedia.org/wiki/CPUID#EAX.3D80000001h:_Extended_Processor_Info_and_Feature_Bits
if max_extension_support >= 0x80000001:
# EBX
ebx = self._run_asm(
b"\xB8\x01\x00\x00\x80" # mov ax,0x80000001
b"\x0f\xa2" # cpuid
b"\x89\xD8" # mov ax,bx
b"\xC3" # ret
)
# ECX
ecx = self._run_asm(
b"\xB8\x01\x00\x00\x80" # mov ax,0x80000001
b"\x0f\xa2" # cpuid
b"\x89\xC8" # mov ax,cx
b"\xC3" # ret
)
# Get the extended CPU flags
extended_flags = {
'fpu' : _is_bit_set(ebx, 0),
'vme' : _is_bit_set(ebx, 1),
'de' : _is_bit_set(ebx, 2),
'pse' : _is_bit_set(ebx, 3),
'tsc' : _is_bit_set(ebx, 4),
'msr' : _is_bit_set(ebx, 5),
'pae' : _is_bit_set(ebx, 6),
'mce' : _is_bit_set(ebx, 7),
'cx8' : _is_bit_set(ebx, 8),
'apic' : _is_bit_set(ebx, 9),
#'reserved' : _is_bit_set(ebx, 10),
'syscall' : _is_bit_set(ebx, 11),
'mtrr' : _is_bit_set(ebx, 12),
'pge' : _is_bit_set(ebx, 13),
'mca' : _is_bit_set(ebx, 14),
'cmov' : _is_bit_set(ebx, 15),
'pat' : _is_bit_set(ebx, 16),
'pse36' : _is_bit_set(ebx, 17),
#'reserved' : _is_bit_set(ebx, 18),
'mp' : _is_bit_set(ebx, 19),
'nx' : _is_bit_set(ebx, 20),
#'reserved' : _is_bit_set(ebx, 21),
'mmxext' : _is_bit_set(ebx, 22),
'mmx' : _is_bit_set(ebx, 23),
'fxsr' : _is_bit_set(ebx, 24),
'fxsr_opt' : _is_bit_set(ebx, 25),
'pdpe1gp' : _is_bit_set(ebx, 26),
'rdtscp' : _is_bit_set(ebx, 27),
#'reserved' : _is_bit_set(ebx, 28),
'lm' : _is_bit_set(ebx, 29),
'3dnowext' : _is_bit_set(ebx, 30),
'3dnow' : _is_bit_set(ebx, 31),
'lahf_lm' : _is_bit_set(ecx, 0),
'cmp_legacy' : _is_bit_set(ecx, 1),
'svm' : _is_bit_set(ecx, 2),
'extapic' : _is_bit_set(ecx, 3),
'cr8_legacy' : _is_bit_set(ecx, 4),
'abm' : _is_bit_set(ecx, 5),
'sse4a' : _is_bit_set(ecx, 6),
'misalignsse' : _is_bit_set(ecx, 7),
'3dnowprefetch' : _is_bit_set(ecx, 8),
'osvw' : _is_bit_set(ecx, 9),
'ibs' : _is_bit_set(ecx, 10),
'xop' : _is_bit_set(ecx, 11),
'skinit' : _is_bit_set(ecx, 12),
'wdt' : _is_bit_set(ecx, 13),
#'reserved' : _is_bit_set(ecx, 14),
'lwp' : _is_bit_set(ecx, 15),
'fma4' : _is_bit_set(ecx, 16),
'tce' : _is_bit_set(ecx, 17),
#'reserved' : _is_bit_set(ecx, 18),
'nodeid_msr' : _is_bit_set(ecx, 19),
#'reserved' : _is_bit_set(ecx, 20),
'tbm' : _is_bit_set(ecx, 21),
'topoext' : _is_bit_set(ecx, 22),
'perfctr_core' : _is_bit_set(ecx, 23),
'perfctr_nb' : _is_bit_set(ecx, 24),
#'reserved' : _is_bit_set(ecx, 25),
'dbx' : _is_bit_set(ecx, 26),
'perftsc' : _is_bit_set(ecx, 27),
'pci_l2i' : _is_bit_set(ecx, 28),
#'reserved' : _is_bit_set(ecx, 29),
#'reserved' : _is_bit_set(ecx, 30),
#'reserved' : _is_bit_set(ecx, 31)
}
# Get a list of only the flags that are true
extended_flags = [k for k, v in extended_flags.items() if v]
flags += extended_flags
flags.sort()
return flags
# http://en.wikipedia.org/wiki/CPUID#EAX.3D80000002h.2C80000003h.2C80000004h:_Processor_Brand_String
def get_processor_brand(self, max_extension_support):
processor_brand = ""
# Processor brand string
if max_extension_support >= 0x80000004:
instructions = [
b"\xB8\x02\x00\x00\x80", # mov ax,0x80000002
b"\xB8\x03\x00\x00\x80", # mov ax,0x80000003
b"\xB8\x04\x00\x00\x80" # mov ax,0x80000004
]
for instruction in instructions:
# EAX
eax = self._run_asm(
instruction, # mov ax,0x8000000?
b"\x0f\xa2" # cpuid
b"\x89\xC0" # mov ax,ax
b"\xC3" # ret
)
# EBX
ebx = self._run_asm(
instruction, # mov ax,0x8000000?
b"\x0f\xa2" # cpuid
b"\x89\xD8" # mov ax,bx
b"\xC3" # ret
)
# ECX
ecx = self._run_asm(
instruction, # mov ax,0x8000000?
b"\x0f\xa2" # cpuid
b"\x89\xC8" # mov ax,cx
b"\xC3" # ret
)
# EDX
edx = self._run_asm(
instruction, # mov ax,0x8000000?
b"\x0f\xa2" # cpuid
b"\x89\xD0" # mov ax,dx
b"\xC3" # ret
)
# Combine each of the 4 bytes in each register into the string
for reg in [eax, ebx, ecx, edx]:
for n in [0, 8, 16, 24]:
processor_brand += chr((reg >> n) & 0xFF)
# Strip off any trailing NULL terminators and white space
processor_brand = processor_brand.strip("\0").strip()
return processor_brand
# http://en.wikipedia.org/wiki/CPUID#EAX.3D80000006h:_Extended_L2_Cache_Features
def get_cache(self, max_extension_support):
cache_info = {}
# Just return if the cache feature is not supported
if max_extension_support < 0x80000006:
return cache_info
# ECX
ecx = self._run_asm(
b"\xB8\x06\x00\x00\x80" # mov ax,0x80000006
b"\x0f\xa2" # cpuid
b"\x89\xC8" # mov ax,cx
b"\xC3" # ret
)
cache_info = {
'size_kb' : ecx & 0xFF,
'line_size_b' : (ecx >> 12) & 0xF,
'associativity' : (ecx >> 16) & 0xFFFF
}
return cache_info
def get_ticks(self):
retval = None
if DataSource.bits == '32bit':
# Works on x86_32
restype = None
argtypes = (ctypes.POINTER(ctypes.c_uint), ctypes.POINTER(ctypes.c_uint))
get_ticks_x86_32, address = self._asm_func(restype, argtypes,
[
b"\x55", # push bp
b"\x89\xE5", # mov bp,sp
b"\x31\xC0", # xor ax,ax
b"\x0F\xA2", # cpuid
b"\x0F\x31", # rdtsc
b"\x8B\x5D\x08", # mov bx,[di+0x8]
b"\x8B\x4D\x0C", # mov cx,[di+0xc]
b"\x89\x13", # mov [bp+di],dx
b"\x89\x01", # mov [bx+di],ax
b"\x5D", # pop bp
b"\xC3" # ret
]
)
high = ctypes.c_uint32(0)
low = ctypes.c_uint32(0)
get_ticks_x86_32(ctypes.byref(high), ctypes.byref(low))
retval = ((high.value << 32) & 0xFFFFFFFF00000000) | low.value
elif DataSource.bits == '64bit':
# Works on x86_64
restype = ctypes.c_uint64
argtypes = ()
get_ticks_x86_64, address = self._asm_func(restype, argtypes,
[
b"\x48", # dec ax
b"\x31\xC0", # xor ax,ax
b"\x0F\xA2", # cpuid
b"\x0F\x31", # rdtsc
b"\x48", # dec ax
b"\xC1\xE2\x20", # shl dx,byte 0x20
b"\x48", # dec ax
b"\x09\xD0", # or ax,dx
b"\xC3", # ret
]
)
retval = get_ticks_x86_64()
return retval
def get_raw_hz(self):
import time
start = self.get_ticks()
time.sleep(1)
end = self.get_ticks()
ticks = (end - start)
return ticks
def _actual_get_cpu_info_from_cpuid(queue):
'''
Warning! This function has the potential to crash the Python runtime.
Do not call it directly. Use the _get_cpu_info_from_cpuid function instead.
It will safely call this function in another process.
'''
# Pipe all output to nothing
sys.stdout = open(os.devnull, 'w')
sys.stderr = open(os.devnull, 'w')
# Get the CPU arch and bits
arch, bits = _parse_arch(DataSource.arch_string_raw)
# Return none if this is not an X86 CPU
if not arch in ['X86_32', 'X86_64']:
queue.put(_obj_to_b64({}))
return
# Return none if SE Linux is in enforcing mode
cpuid = CPUID()
if cpuid.is_selinux_enforcing:
queue.put(_obj_to_b64({}))
return
# Get the cpu info from the CPUID register
max_extension_support = cpuid.get_max_extension_support()
cache_info = cpuid.get_cache(max_extension_support)
info = cpuid.get_info()
processor_brand = cpuid.get_processor_brand(max_extension_support)
# Get the Hz and scale
hz_actual = cpuid.get_raw_hz()
hz_actual = _to_decimal_string(hz_actual)
# Get the Hz and scale
hz_advertised, scale = _parse_cpu_brand_string(processor_brand)
info = {
'vendor_id_raw' : cpuid.get_vendor_id(),
'hardware_raw' : '',
'brand_raw' : processor_brand,
'hz_advertised_friendly' : _hz_short_to_friendly(hz_advertised, scale),
'hz_actual_friendly' : _hz_short_to_friendly(hz_actual, 0),
'hz_advertised' : _hz_short_to_full(hz_advertised, scale),
'hz_actual' : _hz_short_to_full(hz_actual, 0),
'l2_cache_size' : _to_friendly_bytes(cache_info['size_kb']),
'l2_cache_line_size' : cache_info['line_size_b'],
'l2_cache_associativity' : hex(cache_info['associativity']),
'stepping' : info['stepping'],
'model' : info['model'],
'family' : info['family'],
'processor_type' : info['processor_type'],
'extended_model' : info['extended_model'],
'extended_family' : info['extended_family'],
'flags' : cpuid.get_flags(max_extension_support)
}
info = {k: v for k, v in info.items() if v}
queue.put(_obj_to_b64(info))
def _get_cpu_info_from_cpuid():
'''
Returns the CPU info gathered by querying the X86 cpuid register in a new process.
Returns {} on non X86 cpus.
Returns {} if SELinux is in enforcing mode.
'''
from multiprocessing import Process, Queue
# Return {} if can't cpuid
if not DataSource.can_cpuid:
return {}
# Get the CPU arch and bits
arch, bits = _parse_arch(DataSource.arch_string_raw)
# Return {} if this is not an X86 CPU
if not arch in ['X86_32', 'X86_64']:
return {}
try:
# Start running the function in a subprocess
queue = Queue()
p = Process(target=_actual_get_cpu_info_from_cpuid, args=(queue,))
p.start()
# Wait for the process to end, while it is still alive
while p.is_alive():
p.join(0)
# Return {} if it failed
if p.exitcode != 0:
return {}
# Return the result, only if there is something to read
if not queue.empty():
output = queue.get()
return _b64_to_obj(output)
except:
pass
# Return {} if everything failed
return {}
def _get_cpu_info_from_proc_cpuinfo():
'''
Returns the CPU info gathered from /proc/cpuinfo.
Returns {} if /proc/cpuinfo is not found.
'''
try:
# Just return {} if there is no cpuinfo
if not DataSource.has_proc_cpuinfo():
return {}
returncode, output = DataSource.cat_proc_cpuinfo()
if returncode != 0:
return {}
# Various fields
vendor_id = _get_field(False, output, None, '', 'vendor_id', 'vendor id', 'vendor')
processor_brand = _get_field(True, output, None, None, 'model name','cpu', 'processor')
cache_size = _get_field(False, output, None, '', 'cache size')
stepping = _get_field(False, output, int, 0, 'stepping')
model = _get_field(False, output, int, 0, 'model')
family = _get_field(False, output, int, 0, 'cpu family')
hardware = _get_field(False, output, None, '', 'Hardware')
# Flags
flags = _get_field(False, output, None, None, 'flags', 'Features')
if flags:
flags = flags.split()
flags.sort()
# Convert from MHz string to Hz
hz_actual = _get_field(False, output, None, '', 'cpu MHz', 'cpu speed', 'clock')
hz_actual = hz_actual.lower().rstrip('mhz').strip()
hz_actual = _to_decimal_string(hz_actual)
# Convert from GHz/MHz string to Hz
hz_advertised, scale = (None, 0)
try:
hz_advertised, scale = _parse_cpu_brand_string(processor_brand)
except Exception:
pass
info = {
'hardware_raw' : hardware,
'brand_raw' : processor_brand,
'l3_cache_size' : _to_friendly_bytes(cache_size),
'flags' : flags,
'vendor_id_raw' : vendor_id,
'stepping' : stepping,
'model' : model,
'family' : family,
}
# Make the Hz the same for actual and advertised if missing any
if not hz_advertised or hz_advertised == '0.0':
hz_advertised = hz_actual
scale = 6
elif not hz_actual or hz_actual == '0.0':
hz_actual = hz_advertised
# Add the Hz if there is one
if _hz_short_to_full(hz_advertised, scale) > (0, 0):
info['hz_advertised_friendly'] = _hz_short_to_friendly(hz_advertised, scale)
info['hz_advertised'] = _hz_short_to_full(hz_advertised, scale)
if _hz_short_to_full(hz_actual, scale) > (0, 0):
info['hz_actual_friendly'] = _hz_short_to_friendly(hz_actual, 6)
info['hz_actual'] = _hz_short_to_full(hz_actual, 6)
info = {k: v for k, v in info.items() if v}
return info
except:
#raise # NOTE: To have this throw on error, uncomment this line
return {}
def _get_cpu_info_from_cpufreq_info():
'''
Returns the CPU info gathered from cpufreq-info.
Returns {} if cpufreq-info is not found.
'''
try:
hz_brand, scale = '0.0', 0
if not DataSource.has_cpufreq_info():
return {}
returncode, output = DataSource.cpufreq_info()
if returncode != 0:
return {}
hz_brand = output.split('current CPU frequency is')[1].split('\n')[0]
i = hz_brand.find('Hz')
assert(i != -1)
hz_brand = hz_brand[0 : i+2].strip().lower()
if hz_brand.endswith('mhz'):
scale = 6
elif hz_brand.endswith('ghz'):
scale = 9
hz_brand = hz_brand.rstrip('mhz').rstrip('ghz').strip()
hz_brand = _to_decimal_string(hz_brand)
info = {
'hz_advertised_friendly' : _hz_short_to_friendly(hz_brand, scale),
'hz_actual_friendly' : _hz_short_to_friendly(hz_brand, scale),
'hz_advertised' : _hz_short_to_full(hz_brand, scale),
'hz_actual' : _hz_short_to_full(hz_brand, scale),
}
info = {k: v for k, v in info.items() if v}
return info
except:
#raise # NOTE: To have this throw on error, uncomment this line
return {}
def _get_cpu_info_from_lscpu():
'''
Returns the CPU info gathered from lscpu.
Returns {} if lscpu is not found.
'''
try:
if not DataSource.has_lscpu():
return {}
returncode, output = DataSource.lscpu()
if returncode != 0:
return {}
info = {}
new_hz = _get_field(False, output, None, None, 'CPU max MHz', 'CPU MHz')
if new_hz:
new_hz = _to_decimal_string(new_hz)
scale = 6
info['hz_advertised_friendly'] = _hz_short_to_friendly(new_hz, scale)
info['hz_actual_friendly'] = _hz_short_to_friendly(new_hz, scale)
info['hz_advertised'] = _hz_short_to_full(new_hz, scale)
info['hz_actual'] = _hz_short_to_full(new_hz, scale)
vendor_id = _get_field(False, output, None, None, 'Vendor ID')
if vendor_id:
info['vendor_id_raw'] = vendor_id
brand = _get_field(False, output, None, None, 'Model name')
if brand:
info['brand_raw'] = brand
family = _get_field(False, output, None, None, 'CPU family')
if family and family.isdigit():
info['family'] = int(family)
stepping = _get_field(False, output, None, None, 'Stepping')
if stepping and stepping.isdigit():
info['stepping'] = int(stepping)
model = _get_field(False, output, None, None, 'Model')
if model and model.isdigit():
info['model'] = int(model)
l1_data_cache_size = _get_field(False, output, None, None, 'L1d cache')
if l1_data_cache_size:
info['l1_data_cache_size'] = _to_friendly_bytes(l1_data_cache_size)
l1_instruction_cache_size = _get_field(False, output, None, None, 'L1i cache')
if l1_instruction_cache_size:
info['l1_instruction_cache_size'] = _to_friendly_bytes(l1_instruction_cache_size)
l2_cache_size = _get_field(False, output, None, None, 'L2 cache')
if l2_cache_size:
info['l2_cache_size'] = _to_friendly_bytes(l2_cache_size)
l3_cache_size = _get_field(False, output, None, None, 'L3 cache')
if l3_cache_size:
info['l3_cache_size'] = _to_friendly_bytes(l3_cache_size)
# Flags
flags = _get_field(False, output, None, None, 'flags', 'Features')
if flags:
flags = flags.split()
flags.sort()
info['flags'] = flags
info = {k: v for k, v in info.items() if v}
return info
except:
#raise # NOTE: To have this throw on error, uncomment this line
return {}
def _get_cpu_info_from_dmesg():
'''
Returns the CPU info gathered from dmesg.
Returns {} if dmesg is not found or does not have the desired info.
'''
# Just return {} if there is no dmesg
if not DataSource.has_dmesg():
return {}
# If dmesg fails return {}
returncode, output = DataSource.dmesg_a()
if output == None or returncode != 0:
return {}
return _parse_dmesg_output(output)
# https://openpowerfoundation.org/wp-content/uploads/2016/05/LoPAPR_DRAFT_v11_24March2016_cmt1.pdf
# page 767
def _get_cpu_info_from_cat_var_run_dmesg_boot():
'''
Returns the CPU info gathered from /var/run/dmesg.boot.
Returns {} if dmesg is not found or does not have the desired info.
'''
# Just return {} if there is no /var/run/dmesg.boot
if not DataSource.has_var_run_dmesg_boot():
return {}
# If dmesg.boot fails return {}
returncode, output = DataSource.cat_var_run_dmesg_boot()
if output == None or returncode != 0:
return {}
return _parse_dmesg_output(output)
def _get_cpu_info_from_sysctl():
'''
Returns the CPU info gathered from sysctl.
Returns {} if sysctl is not found.
'''
try:
# Just return {} if there is no sysctl
if not DataSource.has_sysctl():
return {}
# If sysctl fails return {}
returncode, output = DataSource.sysctl_machdep_cpu_hw_cpufrequency()
if output == None or returncode != 0:
return {}
# Various fields
vendor_id = _get_field(False, output, None, None, 'machdep.cpu.vendor')
processor_brand = _get_field(True, output, None, None, 'machdep.cpu.brand_string')
cache_size = _get_field(False, output, None, None, 'machdep.cpu.cache.size')
stepping = _get_field(False, output, int, 0, 'machdep.cpu.stepping')
model = _get_field(False, output, int, 0, 'machdep.cpu.model')
family = _get_field(False, output, int, 0, 'machdep.cpu.family')
# Flags
flags = _get_field(False, output, None, '', 'machdep.cpu.features').lower().split()
flags.extend(_get_field(False, output, None, '', 'machdep.cpu.leaf7_features').lower().split())
flags.extend(_get_field(False, output, None, '', 'machdep.cpu.extfeatures').lower().split())
flags.sort()
# Convert from GHz/MHz string to Hz
hz_advertised, scale = _parse_cpu_brand_string(processor_brand)
hz_actual = _get_field(False, output, None, None, 'hw.cpufrequency')
hz_actual = _to_decimal_string(hz_actual)
info = {
'vendor_id_raw' : vendor_id,
'brand_raw' : processor_brand,
'hz_advertised_friendly' : _hz_short_to_friendly(hz_advertised, scale),
'hz_actual_friendly' : _hz_short_to_friendly(hz_actual, 0),
'hz_advertised' : _hz_short_to_full(hz_advertised, scale),
'hz_actual' : _hz_short_to_full(hz_actual, 0),
'l2_cache_size' : _to_friendly_bytes(cache_size),
'stepping' : stepping,
'model' : model,
'family' : family,
'flags' : flags
}
info = {k: v for k, v in info.items() if v}
return info
except:
return {}
def _get_cpu_info_from_sysinfo():
'''
Returns the CPU info gathered from sysinfo.
Returns {} if sysinfo is not found.
'''
info = _get_cpu_info_from_sysinfo_v1()
info.update(_get_cpu_info_from_sysinfo_v2())
return info
def _get_cpu_info_from_sysinfo_v1():
'''
Returns the CPU info gathered from sysinfo.
Returns {} if sysinfo is not found.
'''
try:
# Just return {} if there is no sysinfo
if not DataSource.has_sysinfo():
return {}
# If sysinfo fails return {}
returncode, output = DataSource.sysinfo_cpu()
if output == None or returncode != 0:
return {}
# Various fields
vendor_id = '' #_get_field(False, output, None, None, 'CPU #0: ')
processor_brand = output.split('CPU #0: "')[1].split('"\n')[0].strip()
cache_size = '' #_get_field(False, output, None, None, 'machdep.cpu.cache.size')
stepping = int(output.split(', stepping ')[1].split(',')[0].strip())
model = int(output.split(', model ')[1].split(',')[0].strip())
family = int(output.split(', family ')[1].split(',')[0].strip())
# Flags
flags = []
for line in output.split('\n'):
if line.startswith('\t\t'):
for flag in line.strip().lower().split():
flags.append(flag)
flags.sort()
# Convert from GHz/MHz string to Hz
hz_advertised, scale = _parse_cpu_brand_string(processor_brand)
hz_actual = hz_advertised
info = {
'vendor_id_raw' : vendor_id,
'brand_raw' : processor_brand,
'hz_advertised_friendly' : _hz_short_to_friendly(hz_advertised, scale),
'hz_actual_friendly' : _hz_short_to_friendly(hz_actual, scale),
'hz_advertised' : _hz_short_to_full(hz_advertised, scale),
'hz_actual' : _hz_short_to_full(hz_actual, scale),
'l2_cache_size' : _to_friendly_bytes(cache_size),
'stepping' : stepping,
'model' : model,
'family' : family,
'flags' : flags
}
info = {k: v for k, v in info.items() if v}
return info
except:
#raise # NOTE: To have this throw on error, uncomment this line
return {}
def _get_cpu_info_from_sysinfo_v2():
'''
Returns the CPU info gathered from sysinfo.
Returns {} if sysinfo is not found.
'''
try:
# Just return {} if there is no sysinfo
if not DataSource.has_sysinfo():
return {}
# If sysinfo fails return {}
returncode, output = DataSource.sysinfo_cpu()
if output == None or returncode != 0:
return {}
# Various fields
vendor_id = '' #_get_field(False, output, None, None, 'CPU #0: ')
processor_brand = output.split('CPU #0: "')[1].split('"\n')[0].strip()
cache_size = '' #_get_field(False, output, None, None, 'machdep.cpu.cache.size')
signature = output.split('Signature:')[1].split('\n')[0].strip()
#
stepping = int(signature.split('stepping ')[1].split(',')[0].strip())
model = int(signature.split('model ')[1].split(',')[0].strip())
family = int(signature.split('family ')[1].split(',')[0].strip())
# Flags
def get_subsection_flags(output):
retval = []
for line in output.split('\n')[1:]:
if not line.startswith(' ') and not line.startswith(' '): break
for entry in line.strip().lower().split(' '):
retval.append(entry)
return retval
flags = get_subsection_flags(output.split('Features: ')[1]) + \
get_subsection_flags(output.split('Extended Features (0x00000001): ')[1]) + \
get_subsection_flags(output.split('Extended Features (0x80000001): ')[1])
flags.sort()
# Convert from GHz/MHz string to Hz
lines = [n for n in output.split('\n') if n]
raw_hz = lines[0].split('running at ')[1].strip().lower()
hz_advertised = raw_hz.rstrip('mhz').rstrip('ghz').strip()
hz_advertised = _to_decimal_string(hz_advertised)
hz_actual = hz_advertised
scale = 0
if raw_hz.endswith('mhz'):
scale = 6
elif raw_hz.endswith('ghz'):
scale = 9
info = {
'vendor_id_raw' : vendor_id,
'brand_raw' : processor_brand,
'hz_advertised_friendly' : _hz_short_to_friendly(hz_advertised, scale),
'hz_actual_friendly' : _hz_short_to_friendly(hz_actual, scale),
'hz_advertised' : _hz_short_to_full(hz_advertised, scale),
'hz_actual' : _hz_short_to_full(hz_actual, scale),
'l2_cache_size' : _to_friendly_bytes(cache_size),
'stepping' : stepping,
'model' : model,
'family' : family,
'flags' : flags
}
info = {k: v for k, v in info.items() if v}
return info
except:
#raise # NOTE: To have this throw on error, uncomment this line
return {}
def _get_cpu_info_from_wmic():
'''
Returns the CPU info gathered from WMI.
Returns {} if not on Windows, or wmic is not installed.
'''
try:
# Just return {} if not Windows or there is no wmic
if not DataSource.is_windows or not DataSource.has_wmic():
return {}
returncode, output = DataSource.wmic_cpu()
if output == None or returncode != 0:
return {}
# Break the list into key values pairs
value = output.split("\n")
value = [s.rstrip().split('=') for s in value if '=' in s]
value = {k: v for k, v in value if v}
# Get the advertised MHz
processor_brand = value.get('Name')
hz_advertised, scale_advertised = _parse_cpu_brand_string(processor_brand)
# Get the actual MHz
hz_actual = value.get('CurrentClockSpeed')
scale_actual = 6
if hz_actual:
hz_actual = _to_decimal_string(hz_actual)
# Get cache sizes
l2_cache_size = value.get('L2CacheSize')
if l2_cache_size:
l2_cache_size = l2_cache_size + ' KB'
l3_cache_size = value.get('L3CacheSize')
if l3_cache_size:
l3_cache_size = l3_cache_size + ' KB'
# Get family, model, and stepping
family, model, stepping = '', '', ''
description = value.get('Description') or value.get('Caption')
entries = description.split(' ')
if 'Family' in entries and entries.index('Family') < len(entries)-1:
i = entries.index('Family')
family = int(entries[i + 1])
if 'Model' in entries and entries.index('Model') < len(entries)-1:
i = entries.index('Model')
model = int(entries[i + 1])
if 'Stepping' in entries and entries.index('Stepping') < len(entries)-1:
i = entries.index('Stepping')
stepping = int(entries[i + 1])
info = {
'vendor_id_raw' : value.get('Manufacturer'),
'brand_raw' : processor_brand,
'hz_advertised_friendly' : _hz_short_to_friendly(hz_advertised, scale_advertised),
'hz_actual_friendly' : _hz_short_to_friendly(hz_actual, scale_actual),
'hz_advertised' : _hz_short_to_full(hz_advertised, scale_advertised),
'hz_actual' : _hz_short_to_full(hz_actual, scale_actual),
'l2_cache_size' : l2_cache_size,
'l3_cache_size' : l3_cache_size,
'stepping' : stepping,
'model' : model,
'family' : family,
}
info = {k: v for k, v in info.items() if v}
return info
except:
#raise # NOTE: To have this throw on error, uncomment this line
return {}
def _get_cpu_info_from_registry():
'''
FIXME: Is missing many of the newer CPU flags like sse3
Returns the CPU info gathered from the Windows Registry.
Returns {} if not on Windows.
'''
try:
# Just return {} if not on Windows
if not DataSource.is_windows:
return {}
# Get the CPU name
processor_brand = DataSource.winreg_processor_brand().strip()
# Get the CPU vendor id
vendor_id = DataSource.winreg_vendor_id_raw()
# Get the CPU arch and bits
arch_string_raw = DataSource.winreg_arch_string_raw()
arch, bits = _parse_arch(arch_string_raw)
# Get the actual CPU Hz
hz_actual = DataSource.winreg_hz_actual()
hz_actual = _to_decimal_string(hz_actual)
# Get the advertised CPU Hz
hz_advertised, scale = _parse_cpu_brand_string(processor_brand)
# If advertised hz not found, use the actual hz
if hz_advertised == '0.0':
scale = 6
hz_advertised = _to_decimal_string(hz_actual)
# Get the CPU features
feature_bits = DataSource.winreg_feature_bits()
def is_set(bit):
mask = 0x80000000 >> bit
retval = mask & feature_bits > 0
return retval
# http://en.wikipedia.org/wiki/CPUID
# http://unix.stackexchange.com/questions/43539/what-do-the-flags-in-proc-cpuinfo-mean
# http://www.lohninger.com/helpcsuite/public_constants_cpuid.htm
flags = {
'fpu' : is_set(0), # Floating Point Unit
'vme' : is_set(1), # V86 Mode Extensions
'de' : is_set(2), # Debug Extensions - I/O breakpoints supported
'pse' : is_set(3), # Page Size Extensions (4 MB pages supported)
'tsc' : is_set(4), # Time Stamp Counter and RDTSC instruction are available
'msr' : is_set(5), # Model Specific Registers
'pae' : is_set(6), # Physical Address Extensions (36 bit address, 2MB pages)
'mce' : is_set(7), # Machine Check Exception supported
'cx8' : is_set(8), # Compare Exchange Eight Byte instruction available
'apic' : is_set(9), # Local APIC present (multiprocessor operation support)
'sepamd' : is_set(10), # Fast system calls (AMD only)
'sep' : is_set(11), # Fast system calls
'mtrr' : is_set(12), # Memory Type Range Registers
'pge' : is_set(13), # Page Global Enable
'mca' : is_set(14), # Machine Check Architecture
'cmov' : is_set(15), # Conditional MOVe instructions
'pat' : is_set(16), # Page Attribute Table
'pse36' : is_set(17), # 36 bit Page Size Extensions
'serial' : is_set(18), # Processor Serial Number
'clflush' : is_set(19), # Cache Flush
#'reserved1' : is_set(20), # reserved
'dts' : is_set(21), # Debug Trace Store
'acpi' : is_set(22), # ACPI support
'mmx' : is_set(23), # MultiMedia Extensions
'fxsr' : is_set(24), # FXSAVE and FXRSTOR instructions
'sse' : is_set(25), # SSE instructions
'sse2' : is_set(26), # SSE2 (WNI) instructions
'ss' : is_set(27), # self snoop
#'reserved2' : is_set(28), # reserved
'tm' : is_set(29), # Automatic clock control
'ia64' : is_set(30), # IA64 instructions
'3dnow' : is_set(31) # 3DNow! instructions available
}
# Get a list of only the flags that are true
flags = [k for k, v in flags.items() if v]
flags.sort()
info = {
'vendor_id_raw' : vendor_id,
'brand_raw' : processor_brand,
'hz_advertised_friendly' : _hz_short_to_friendly(hz_advertised, scale),
'hz_actual_friendly' : _hz_short_to_friendly(hz_actual, 6),
'hz_advertised' : _hz_short_to_full(hz_advertised, scale),
'hz_actual' : _hz_short_to_full(hz_actual, 6),
'flags' : flags
}
info = {k: v for k, v in info.items() if v}
return info
except:
return {}
def _get_cpu_info_from_kstat():
'''
Returns the CPU info gathered from isainfo and kstat.
Returns {} if isainfo or kstat are not found.
'''
try:
# Just return {} if there is no isainfo or kstat
if not DataSource.has_isainfo() or not DataSource.has_kstat():
return {}
# If isainfo fails return {}
returncode, flag_output = DataSource.isainfo_vb()
if flag_output == None or returncode != 0:
return {}
# If kstat fails return {}
returncode, kstat = DataSource.kstat_m_cpu_info()
if kstat == None or returncode != 0:
return {}
# Various fields
vendor_id = kstat.split('\tvendor_id ')[1].split('\n')[0].strip()
processor_brand = kstat.split('\tbrand ')[1].split('\n')[0].strip()
stepping = int(kstat.split('\tstepping ')[1].split('\n')[0].strip())
model = int(kstat.split('\tmodel ')[1].split('\n')[0].strip())
family = int(kstat.split('\tfamily ')[1].split('\n')[0].strip())
# Flags
flags = flag_output.strip().split('\n')[-1].strip().lower().split()
flags.sort()
# Convert from GHz/MHz string to Hz
scale = 6
hz_advertised = kstat.split('\tclock_MHz ')[1].split('\n')[0].strip()
hz_advertised = _to_decimal_string(hz_advertised)
# Convert from GHz/MHz string to Hz
hz_actual = kstat.split('\tcurrent_clock_Hz ')[1].split('\n')[0].strip()
hz_actual = _to_decimal_string(hz_actual)
info = {
'vendor_id_raw' : vendor_id,
'brand_raw' : processor_brand,
'hz_advertised_friendly' : _hz_short_to_friendly(hz_advertised, scale),
'hz_actual_friendly' : _hz_short_to_friendly(hz_actual, 0),
'hz_advertised' : _hz_short_to_full(hz_advertised, scale),
'hz_actual' : _hz_short_to_full(hz_actual, 0),
'stepping' : stepping,
'model' : model,
'family' : family,
'flags' : flags
}
info = {k: v for k, v in info.items() if v}
return info
except:
return {}
def _get_cpu_info_from_platform_uname():
try:
uname = DataSource.uname_string_raw.split(',')[0]
family, model, stepping = (None, None, None)
entries = uname.split(' ')
if 'Family' in entries and entries.index('Family') < len(entries)-1:
i = entries.index('Family')
family = int(entries[i + 1])
if 'Model' in entries and entries.index('Model') < len(entries)-1:
i = entries.index('Model')
model = int(entries[i + 1])
if 'Stepping' in entries and entries.index('Stepping') < len(entries)-1:
i = entries.index('Stepping')
stepping = int(entries[i + 1])
info = {
'family' : family,
'model' : model,
'stepping' : stepping
}
info = {k: v for k, v in info.items() if v}
return info
except:
return {}
def _get_cpu_info_internal():
'''
Returns the CPU info by using the best sources of information for your OS.
Returns {} if nothing is found.
'''
# Get the CPU arch and bits
arch, bits = _parse_arch(DataSource.arch_string_raw)
friendly_maxsize = { 2**31-1: '32 bit', 2**63-1: '64 bit' }.get(sys.maxsize) or 'unknown bits'
friendly_version = "{0}.{1}.{2}.{3}.{4}".format(*sys.version_info)
PYTHON_VERSION = "{0} ({1})".format(friendly_version, friendly_maxsize)
info = {
'python_version' : PYTHON_VERSION,
'cpuinfo_version' : CPUINFO_VERSION,
'cpuinfo_version_string' : CPUINFO_VERSION_STRING,
'arch' : arch,
'bits' : bits,
'count' : DataSource.cpu_count,
'arch_string_raw' : DataSource.arch_string_raw,
}
# Try the Windows wmic
_copy_new_fields(info, _get_cpu_info_from_wmic())
# Try the Windows registry
_copy_new_fields(info, _get_cpu_info_from_registry())
# Try /proc/cpuinfo
_copy_new_fields(info, _get_cpu_info_from_proc_cpuinfo())
# Try cpufreq-info
_copy_new_fields(info, _get_cpu_info_from_cpufreq_info())
# Try LSCPU
_copy_new_fields(info, _get_cpu_info_from_lscpu())
# Try sysctl
_copy_new_fields(info, _get_cpu_info_from_sysctl())
# Try kstat
_copy_new_fields(info, _get_cpu_info_from_kstat())
# Try dmesg
_copy_new_fields(info, _get_cpu_info_from_dmesg())
# Try /var/run/dmesg.boot
_copy_new_fields(info, _get_cpu_info_from_cat_var_run_dmesg_boot())
# Try lsprop ibm,pa-features
_copy_new_fields(info, _get_cpu_info_from_ibm_pa_features())
# Try sysinfo
_copy_new_fields(info, _get_cpu_info_from_sysinfo())
# Try querying the CPU cpuid register
_copy_new_fields(info, _get_cpu_info_from_cpuid())
# Try platform.uname
_copy_new_fields(info, _get_cpu_info_from_platform_uname())
return info
def get_cpu_info_json():
'''
Returns the CPU info by using the best sources of information for your OS.
Returns the result in a json string
'''
import json
output = None
# If running under pyinstaller, run normally
if getattr(sys, 'frozen', False):
info = _get_cpu_info_internal()
output = json.dumps(info)
output = "{0}".format(output)
# if not running under pyinstaller, run in another process.
# This is done because multiprocesing has a design flaw that
# causes non main programs to run multiple times on Windows.
else:
from subprocess import Popen, PIPE
command = [sys.executable, __file__, '--json']
p1 = Popen(command, stdout=PIPE, stderr=PIPE, stdin=PIPE)
output = p1.communicate()[0]
if p1.returncode != 0:
return "{}"
if not IS_PY2:
output = output.decode(encoding='UTF-8')
return output
def get_cpu_info():
'''
Returns the CPU info by using the best sources of information for your OS.
Returns the result in a dict
'''
import json
output = get_cpu_info_json()
# Convert JSON to Python with non unicode strings
output = json.loads(output, object_hook = _utf_to_str)
return output
def main():
from argparse import ArgumentParser
import json
# Parse args
parser = ArgumentParser(description='Gets CPU info with pure Python 2 & 3')
parser.add_argument('--json', action='store_true', help='Return the info in JSON format')
parser.add_argument('--version', action='store_true', help='Return the version of py-cpuinfo')
args = parser.parse_args()
try:
_check_arch()
except Exception as err:
sys.stderr.write(str(err) + "\n")
sys.exit(1)
info = _get_cpu_info_internal()
if not info:
sys.stderr.write("Failed to find cpu info\n")
sys.exit(1)
if args.json:
print(json.dumps(info))
elif args.version:
print(CPUINFO_VERSION_STRING)
else:
print('Python Version: {0}'.format(info.get('python_version', '')))
print('Cpuinfo Version: {0}'.format(info.get('cpuinfo_version_string', '')))
print('Vendor ID Raw: {0}'.format(info.get('vendor_id_raw', '')))
print('Hardware Raw: {0}'.format(info.get('hardware_raw', '')))
print('Brand Raw: {0}'.format(info.get('brand_raw', '')))
print('Hz Advertised Friendly: {0}'.format(info.get('hz_advertised_friendly', '')))
print('Hz Actual Friendly: {0}'.format(info.get('hz_actual_friendly', '')))
print('Hz Advertised: {0}'.format(info.get('hz_advertised', '')))
print('Hz Actual: {0}'.format(info.get('hz_actual', '')))
print('Arch: {0}'.format(info.get('arch', '')))
print('Bits: {0}'.format(info.get('bits', '')))
print('Count: {0}'.format(info.get('count', '')))
print('Arch String Raw: {0}'.format(info.get('arch_string_raw', '')))
print('L1 Data Cache Size: {0}'.format(info.get('l1_data_cache_size', '')))
print('L1 Instruction Cache Size: {0}'.format(info.get('l1_instruction_cache_size', '')))
print('L2 Cache Size: {0}'.format(info.get('l2_cache_size', '')))
print('L2 Cache Line Size: {0}'.format(info.get('l2_cache_line_size', '')))
print('L2 Cache Associativity: {0}'.format(info.get('l2_cache_associativity', '')))
print('L3 Cache Size: {0}'.format(info.get('l3_cache_size', '')))
print('Stepping: {0}'.format(info.get('stepping', '')))
print('Model: {0}'.format(info.get('model', '')))
print('Family: {0}'.format(info.get('family', '')))
print('Processor Type: {0}'.format(info.get('processor_type', '')))
print('Extended Model: {0}'.format(info.get('extended_model', '')))
print('Extended Family: {0}'.format(info.get('extended_family', '')))
print('Flags: {0}'.format(', '.join(info.get('flags', ''))))
if __name__ == '__main__':
main()
else:
_check_arch()
|
workhorsy/py-cpuinfo
|
cpuinfo/cpuinfo.py
|
_get_cpu_info_from_cat_var_run_dmesg_boot
|
python
|
def _get_cpu_info_from_cat_var_run_dmesg_boot():
'''
Returns the CPU info gathered from /var/run/dmesg.boot.
Returns {} if dmesg is not found or does not have the desired info.
'''
# Just return {} if there is no /var/run/dmesg.boot
if not DataSource.has_var_run_dmesg_boot():
return {}
# If dmesg.boot fails return {}
returncode, output = DataSource.cat_var_run_dmesg_boot()
if output == None or returncode != 0:
return {}
return _parse_dmesg_output(output)
|
Returns the CPU info gathered from /var/run/dmesg.boot.
Returns {} if dmesg is not found or does not have the desired info.
|
train
|
https://github.com/workhorsy/py-cpuinfo/blob/c15afb770c1139bf76215852e17eb4f677ca3d2f/cpuinfo/cpuinfo.py#L1727-L1741
|
[
"def has_var_run_dmesg_boot():\n\tuname = platform.system().strip().strip('\"').strip(\"'\").strip().lower()\n\treturn 'linux' in uname and os.path.exists('/var/run/dmesg.boot')\n"
] |
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
# Copyright (c) 2014-2019, Matthew Brennan Jones <matthew.brennan.jones@gmail.com>
# Py-cpuinfo gets CPU info with pure Python 2 & 3
# It uses the MIT License
# It is hosted at: https://github.com/workhorsy/py-cpuinfo
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
CPUINFO_VERSION = (5, 0, 0)
CPUINFO_VERSION_STRING = '.'.join([str(n) for n in CPUINFO_VERSION])
import os, sys
import platform
import multiprocessing
import ctypes
try:
import _winreg as winreg
except ImportError as err:
try:
import winreg
except ImportError as err:
pass
IS_PY2 = sys.version_info[0] == 2
class DataSource(object):
bits = platform.architecture()[0]
cpu_count = multiprocessing.cpu_count()
is_windows = platform.system().lower() == 'windows'
arch_string_raw = platform.machine()
uname_string_raw = platform.uname()[5]
can_cpuid = True
@staticmethod
def has_proc_cpuinfo():
return os.path.exists('/proc/cpuinfo')
@staticmethod
def has_dmesg():
return len(_program_paths('dmesg')) > 0
@staticmethod
def has_var_run_dmesg_boot():
uname = platform.system().strip().strip('"').strip("'").strip().lower()
return 'linux' in uname and os.path.exists('/var/run/dmesg.boot')
@staticmethod
def has_cpufreq_info():
return len(_program_paths('cpufreq-info')) > 0
@staticmethod
def has_sestatus():
return len(_program_paths('sestatus')) > 0
@staticmethod
def has_sysctl():
return len(_program_paths('sysctl')) > 0
@staticmethod
def has_isainfo():
return len(_program_paths('isainfo')) > 0
@staticmethod
def has_kstat():
return len(_program_paths('kstat')) > 0
@staticmethod
def has_sysinfo():
return len(_program_paths('sysinfo')) > 0
@staticmethod
def has_lscpu():
return len(_program_paths('lscpu')) > 0
@staticmethod
def has_ibm_pa_features():
return len(_program_paths('lsprop')) > 0
@staticmethod
def has_wmic():
returncode, output = _run_and_get_stdout(['wmic', 'os', 'get', 'Version'])
return returncode == 0 and len(output) > 0
@staticmethod
def cat_proc_cpuinfo():
return _run_and_get_stdout(['cat', '/proc/cpuinfo'])
@staticmethod
def cpufreq_info():
return _run_and_get_stdout(['cpufreq-info'])
@staticmethod
def sestatus_b():
return _run_and_get_stdout(['sestatus', '-b'])
@staticmethod
def dmesg_a():
return _run_and_get_stdout(['dmesg', '-a'])
@staticmethod
def cat_var_run_dmesg_boot():
return _run_and_get_stdout(['cat', '/var/run/dmesg.boot'])
@staticmethod
def sysctl_machdep_cpu_hw_cpufrequency():
return _run_and_get_stdout(['sysctl', 'machdep.cpu', 'hw.cpufrequency'])
@staticmethod
def isainfo_vb():
return _run_and_get_stdout(['isainfo', '-vb'])
@staticmethod
def kstat_m_cpu_info():
return _run_and_get_stdout(['kstat', '-m', 'cpu_info'])
@staticmethod
def sysinfo_cpu():
return _run_and_get_stdout(['sysinfo', '-cpu'])
@staticmethod
def lscpu():
return _run_and_get_stdout(['lscpu'])
@staticmethod
def ibm_pa_features():
import glob
ibm_features = glob.glob('/proc/device-tree/cpus/*/ibm,pa-features')
if ibm_features:
return _run_and_get_stdout(['lsprop', ibm_features[0]])
@staticmethod
def wmic_cpu():
return _run_and_get_stdout(['wmic', 'cpu', 'get', 'Name,CurrentClockSpeed,L2CacheSize,L3CacheSize,Description,Caption,Manufacturer', '/format:list'])
@staticmethod
def winreg_processor_brand():
key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, r"Hardware\Description\System\CentralProcessor\0")
processor_brand = winreg.QueryValueEx(key, "ProcessorNameString")[0]
winreg.CloseKey(key)
return processor_brand.strip()
@staticmethod
def winreg_vendor_id_raw():
key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, r"Hardware\Description\System\CentralProcessor\0")
vendor_id_raw = winreg.QueryValueEx(key, "VendorIdentifier")[0]
winreg.CloseKey(key)
return vendor_id_raw
@staticmethod
def winreg_arch_string_raw():
key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, r"SYSTEM\CurrentControlSet\Control\Session Manager\Environment")
arch_string_raw = winreg.QueryValueEx(key, "PROCESSOR_ARCHITECTURE")[0]
winreg.CloseKey(key)
return arch_string_raw
@staticmethod
def winreg_hz_actual():
key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, r"Hardware\Description\System\CentralProcessor\0")
hz_actual = winreg.QueryValueEx(key, "~Mhz")[0]
winreg.CloseKey(key)
hz_actual = _to_decimal_string(hz_actual)
return hz_actual
@staticmethod
def winreg_feature_bits():
key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, r"Hardware\Description\System\CentralProcessor\0")
feature_bits = winreg.QueryValueEx(key, "FeatureSet")[0]
winreg.CloseKey(key)
return feature_bits
def _program_paths(program_name):
paths = []
exts = filter(None, os.environ.get('PATHEXT', '').split(os.pathsep))
path = os.environ['PATH']
for p in os.environ['PATH'].split(os.pathsep):
p = os.path.join(p, program_name)
if os.access(p, os.X_OK):
paths.append(p)
for e in exts:
pext = p + e
if os.access(pext, os.X_OK):
paths.append(pext)
return paths
def _run_and_get_stdout(command, pipe_command=None):
from subprocess import Popen, PIPE
if not pipe_command:
p1 = Popen(command, stdout=PIPE, stderr=PIPE, stdin=PIPE)
output = p1.communicate()[0]
if not IS_PY2:
output = output.decode(encoding='UTF-8')
return p1.returncode, output
else:
p1 = Popen(command, stdout=PIPE, stderr=PIPE, stdin=PIPE)
p2 = Popen(pipe_command, stdin=p1.stdout, stdout=PIPE, stderr=PIPE)
p1.stdout.close()
output = p2.communicate()[0]
if not IS_PY2:
output = output.decode(encoding='UTF-8')
return p2.returncode, output
# Make sure we are running on a supported system
def _check_arch():
arch, bits = _parse_arch(DataSource.arch_string_raw)
if not arch in ['X86_32', 'X86_64', 'ARM_7', 'ARM_8', 'PPC_64']:
raise Exception("py-cpuinfo currently only works on X86 and some PPC and ARM CPUs.")
def _obj_to_b64(thing):
import pickle
import base64
a = thing
b = pickle.dumps(a)
c = base64.b64encode(b)
d = c.decode('utf8')
return d
def _b64_to_obj(thing):
import pickle
import base64
try:
a = base64.b64decode(thing)
b = pickle.loads(a)
return b
except:
return {}
def _utf_to_str(input):
if IS_PY2 and isinstance(input, unicode):
return input.encode('utf-8')
elif isinstance(input, list):
return [_utf_to_str(element) for element in input]
elif isinstance(input, dict):
return {_utf_to_str(key): _utf_to_str(value)
for key, value in input.items()}
else:
return input
def _copy_new_fields(info, new_info):
keys = [
'vendor_id_raw', 'hardware_raw', 'brand_raw', 'hz_advertised_friendly', 'hz_actual_friendly',
'hz_advertised', 'hz_actual', 'arch', 'bits', 'count',
'arch_string_raw', 'uname_string_raw',
'l2_cache_size', 'l2_cache_line_size', 'l2_cache_associativity',
'stepping', 'model', 'family',
'processor_type', 'extended_model', 'extended_family', 'flags',
'l3_cache_size', 'l1_data_cache_size', 'l1_instruction_cache_size'
]
for key in keys:
if new_info.get(key, None) and not info.get(key, None):
info[key] = new_info[key]
elif key == 'flags' and new_info.get('flags'):
for f in new_info['flags']:
if f not in info['flags']: info['flags'].append(f)
info['flags'].sort()
def _get_field_actual(cant_be_number, raw_string, field_names):
for line in raw_string.splitlines():
for field_name in field_names:
field_name = field_name.lower()
if ':' in line:
left, right = line.split(':', 1)
left = left.strip().lower()
right = right.strip()
if left == field_name and len(right) > 0:
if cant_be_number:
if not right.isdigit():
return right
else:
return right
return None
def _get_field(cant_be_number, raw_string, convert_to, default_value, *field_names):
retval = _get_field_actual(cant_be_number, raw_string, field_names)
# Convert the return value
if retval and convert_to:
try:
retval = convert_to(retval)
except:
retval = default_value
# Return the default if there is no return value
if retval is None:
retval = default_value
return retval
def _to_decimal_string(ticks):
try:
# Convert to string
ticks = '{0}'.format(ticks)
# Strip off non numbers and decimal places
ticks = "".join(n for n in ticks if n.isdigit() or n=='.').strip()
if ticks == '':
ticks = '0'
# Add decimal if missing
if '.' not in ticks:
ticks = '{0}.0'.format(ticks)
# Remove trailing zeros
ticks = ticks.rstrip('0')
# Add one trailing zero for empty right side
if ticks.endswith('.'):
ticks = '{0}0'.format(ticks)
# Make sure the number can be converted to a float
ticks = float(ticks)
ticks = '{0}'.format(ticks)
return ticks
except:
return '0.0'
def _hz_short_to_full(ticks, scale):
try:
# Make sure the number can be converted to a float
ticks = float(ticks)
ticks = '{0}'.format(ticks)
# Scale the numbers
hz = ticks.lstrip('0')
old_index = hz.index('.')
hz = hz.replace('.', '')
hz = hz.ljust(scale + old_index+1, '0')
new_index = old_index + scale
hz = '{0}.{1}'.format(hz[:new_index], hz[new_index:])
left, right = hz.split('.')
left, right = int(left), int(right)
return (left, right)
except:
return (0, 0)
def _hz_friendly_to_full(hz_string):
try:
hz_string = hz_string.strip().lower()
hz, scale = (None, None)
if hz_string.endswith('ghz'):
scale = 9
elif hz_string.endswith('mhz'):
scale = 6
elif hz_string.endswith('hz'):
scale = 0
hz = "".join(n for n in hz_string if n.isdigit() or n=='.').strip()
if not '.' in hz:
hz += '.0'
hz, scale = _hz_short_to_full(hz, scale)
return (hz, scale)
except:
return (0, 0)
def _hz_short_to_friendly(ticks, scale):
try:
# Get the raw Hz as a string
left, right = _hz_short_to_full(ticks, scale)
result = '{0}.{1}'.format(left, right)
# Get the location of the dot, and remove said dot
dot_index = result.index('.')
result = result.replace('.', '')
# Get the Hz symbol and scale
symbol = "Hz"
scale = 0
if dot_index > 9:
symbol = "GHz"
scale = 9
elif dot_index > 6:
symbol = "MHz"
scale = 6
elif dot_index > 3:
symbol = "KHz"
scale = 3
# Get the Hz with the dot at the new scaled point
result = '{0}.{1}'.format(result[:-scale-1], result[-scale-1:])
# Format the ticks to have 4 numbers after the decimal
# and remove any superfluous zeroes.
result = '{0:.4f} {1}'.format(float(result), symbol)
result = result.rstrip('0')
return result
except:
return '0.0000 Hz'
def _to_friendly_bytes(input):
import re
if not input:
return input
input = "{0}".format(input)
formats = {
r"^[0-9]+B$" : 'B',
r"^[0-9]+K$" : 'KB',
r"^[0-9]+M$" : 'MB',
r"^[0-9]+G$" : 'GB'
}
for pattern, friendly_size in formats.items():
if re.match(pattern, input):
return "{0} {1}".format(input[ : -1].strip(), friendly_size)
return input
def _parse_cpu_brand_string(cpu_string):
# Just return 0 if the processor brand does not have the Hz
if not 'hz' in cpu_string.lower():
return ('0.0', 0)
hz = cpu_string.lower()
scale = 0
if hz.endswith('mhz'):
scale = 6
elif hz.endswith('ghz'):
scale = 9
if '@' in hz:
hz = hz.split('@')[1]
else:
hz = hz.rsplit(None, 1)[1]
hz = hz.rstrip('mhz').rstrip('ghz').strip()
hz = _to_decimal_string(hz)
return (hz, scale)
def _parse_cpu_brand_string_dx(cpu_string):
import re
# Find all the strings inside brackets ()
starts = [m.start() for m in re.finditer('\(', cpu_string)]
ends = [m.start() for m in re.finditer('\)', cpu_string)]
insides = {k: v for k, v in zip(starts, ends)}
insides = [cpu_string[start+1 : end] for start, end in insides.items()]
# Find all the fields
vendor_id, stepping, model, family = (None, None, None, None)
for inside in insides:
for pair in inside.split(','):
pair = [n.strip() for n in pair.split(':')]
if len(pair) > 1:
name, value = pair[0], pair[1]
if name == 'origin':
vendor_id = value.strip('"')
elif name == 'stepping':
stepping = int(value.lstrip('0x'), 16)
elif name == 'model':
model = int(value.lstrip('0x'), 16)
elif name in ['fam', 'family']:
family = int(value.lstrip('0x'), 16)
# Find the Processor Brand
# Strip off extra strings in brackets at end
brand = cpu_string.strip()
is_working = True
while is_working:
is_working = False
for inside in insides:
full = "({0})".format(inside)
if brand.endswith(full):
brand = brand[ :-len(full)].strip()
is_working = True
# Find the Hz in the brand string
hz_brand, scale = _parse_cpu_brand_string(brand)
# Find Hz inside brackets () after the brand string
if hz_brand == '0.0':
for inside in insides:
hz = inside
for entry in ['GHz', 'MHz', 'Hz']:
if entry in hz:
hz = "CPU @ " + hz[ : hz.find(entry) + len(entry)]
hz_brand, scale = _parse_cpu_brand_string(hz)
break
return (hz_brand, scale, brand, vendor_id, stepping, model, family)
def _parse_dmesg_output(output):
try:
# Get all the dmesg lines that might contain a CPU string
lines = output.split(' CPU0:')[1:] + \
output.split(' CPU1:')[1:] + \
output.split(' CPU:')[1:] + \
output.split('\nCPU0:')[1:] + \
output.split('\nCPU1:')[1:] + \
output.split('\nCPU:')[1:]
lines = [l.split('\n')[0].strip() for l in lines]
# Convert the lines to CPU strings
cpu_strings = [_parse_cpu_brand_string_dx(l) for l in lines]
# Find the CPU string that has the most fields
best_string = None
highest_count = 0
for cpu_string in cpu_strings:
count = sum([n is not None for n in cpu_string])
if count > highest_count:
highest_count = count
best_string = cpu_string
# If no CPU string was found, return {}
if not best_string:
return {}
hz_actual, scale, processor_brand, vendor_id, stepping, model, family = best_string
# Origin
if ' Origin=' in output:
fields = output[output.find(' Origin=') : ].split('\n')[0]
fields = fields.strip().split()
fields = [n.strip().split('=') for n in fields]
fields = [{n[0].strip().lower() : n[1].strip()} for n in fields]
for field in fields:
name = list(field.keys())[0]
value = list(field.values())[0]
if name == 'origin':
vendor_id = value.strip('"')
elif name == 'stepping':
stepping = int(value.lstrip('0x'), 16)
elif name == 'model':
model = int(value.lstrip('0x'), 16)
elif name in ['fam', 'family']:
family = int(value.lstrip('0x'), 16)
# Features
flag_lines = []
for category in [' Features=', ' Features2=', ' AMD Features=', ' AMD Features2=']:
if category in output:
flag_lines.append(output.split(category)[1].split('\n')[0])
flags = []
for line in flag_lines:
line = line.split('<')[1].split('>')[0].lower()
for flag in line.split(','):
flags.append(flag)
flags.sort()
# Convert from GHz/MHz string to Hz
hz_advertised, scale = _parse_cpu_brand_string(processor_brand)
# If advertised hz not found, use the actual hz
if hz_advertised == '0.0':
scale = 6
hz_advertised = _to_decimal_string(hz_actual)
info = {
'vendor_id_raw' : vendor_id,
'brand_raw' : processor_brand,
'stepping' : stepping,
'model' : model,
'family' : family,
'flags' : flags
}
if hz_advertised and hz_advertised != '0.0':
info['hz_advertised_friendly'] = _hz_short_to_friendly(hz_advertised, scale)
info['hz_actual_friendly'] = _hz_short_to_friendly(hz_actual, scale)
if hz_advertised and hz_advertised != '0.0':
info['hz_advertised'] = _hz_short_to_full(hz_advertised, scale)
info['hz_actual'] = _hz_short_to_full(hz_actual, scale)
return {k: v for k, v in info.items() if v}
except:
#raise
pass
return {}
def _parse_arch(arch_string_raw):
import re
arch, bits = None, None
arch_string_raw = arch_string_raw.lower()
# X86
if re.match('^i\d86$|^x86$|^x86_32$|^i86pc$|^ia32$|^ia-32$|^bepc$', arch_string_raw):
arch = 'X86_32'
bits = 32
elif re.match('^x64$|^x86_64$|^x86_64t$|^i686-64$|^amd64$|^ia64$|^ia-64$', arch_string_raw):
arch = 'X86_64'
bits = 64
# ARM
elif re.match('^armv8-a|aarch64$', arch_string_raw):
arch = 'ARM_8'
bits = 64
elif re.match('^armv7$|^armv7[a-z]$|^armv7-[a-z]$|^armv6[a-z]$', arch_string_raw):
arch = 'ARM_7'
bits = 32
elif re.match('^armv8$|^armv8[a-z]$|^armv8-[a-z]$', arch_string_raw):
arch = 'ARM_8'
bits = 32
# PPC
elif re.match('^ppc32$|^prep$|^pmac$|^powermac$', arch_string_raw):
arch = 'PPC_32'
bits = 32
elif re.match('^powerpc$|^ppc64$|^ppc64le$', arch_string_raw):
arch = 'PPC_64'
bits = 64
# SPARC
elif re.match('^sparc32$|^sparc$', arch_string_raw):
arch = 'SPARC_32'
bits = 32
elif re.match('^sparc64$|^sun4u$|^sun4v$', arch_string_raw):
arch = 'SPARC_64'
bits = 64
return (arch, bits)
def _is_bit_set(reg, bit):
mask = 1 << bit
is_set = reg & mask > 0
return is_set
def _is_selinux_enforcing():
# Just return if the SE Linux Status Tool is not installed
if not DataSource.has_sestatus():
return False
# Run the sestatus, and just return if it failed to run
returncode, output = DataSource.sestatus_b()
if returncode != 0:
return False
# Figure out if explicitly in enforcing mode
for line in output.splitlines():
line = line.strip().lower()
if line.startswith("current mode:"):
if line.endswith("enforcing"):
return True
else:
return False
# Figure out if we can execute heap and execute memory
can_selinux_exec_heap = False
can_selinux_exec_memory = False
for line in output.splitlines():
line = line.strip().lower()
if line.startswith("allow_execheap") and line.endswith("on"):
can_selinux_exec_heap = True
elif line.startswith("allow_execmem") and line.endswith("on"):
can_selinux_exec_memory = True
return (not can_selinux_exec_heap or not can_selinux_exec_memory)
class CPUID(object):
def __init__(self):
self.prochandle = None
# Figure out if SE Linux is on and in enforcing mode
self.is_selinux_enforcing = _is_selinux_enforcing()
def _asm_func(self, restype=None, argtypes=(), byte_code=[]):
byte_code = bytes.join(b'', byte_code)
address = None
if DataSource.is_windows:
# Allocate a memory segment the size of the byte code, and make it executable
size = len(byte_code)
# Alloc at least 1 page to ensure we own all pages that we want to change protection on
if size < 0x1000: size = 0x1000
MEM_COMMIT = ctypes.c_ulong(0x1000)
PAGE_READWRITE = ctypes.c_ulong(0x4)
pfnVirtualAlloc = ctypes.windll.kernel32.VirtualAlloc
pfnVirtualAlloc.restype = ctypes.c_void_p
address = pfnVirtualAlloc(None, ctypes.c_size_t(size), MEM_COMMIT, PAGE_READWRITE)
if not address:
raise Exception("Failed to VirtualAlloc")
# Copy the byte code into the memory segment
memmove = ctypes.CFUNCTYPE(ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_size_t)(ctypes._memmove_addr)
if memmove(address, byte_code, size) < 0:
raise Exception("Failed to memmove")
# Enable execute permissions
PAGE_EXECUTE = ctypes.c_ulong(0x10)
old_protect = ctypes.c_ulong(0)
pfnVirtualProtect = ctypes.windll.kernel32.VirtualProtect
res = pfnVirtualProtect(ctypes.c_void_p(address), ctypes.c_size_t(size), PAGE_EXECUTE, ctypes.byref(old_protect))
if not res:
raise Exception("Failed VirtualProtect")
# Flush Instruction Cache
# First, get process Handle
if not self.prochandle:
pfnGetCurrentProcess = ctypes.windll.kernel32.GetCurrentProcess
pfnGetCurrentProcess.restype = ctypes.c_void_p
self.prochandle = ctypes.c_void_p(pfnGetCurrentProcess())
# Actually flush cache
res = ctypes.windll.kernel32.FlushInstructionCache(self.prochandle, ctypes.c_void_p(address), ctypes.c_size_t(size))
if not res:
raise Exception("Failed FlushInstructionCache")
else:
# Allocate a memory segment the size of the byte code
size = len(byte_code)
pfnvalloc = ctypes.pythonapi.valloc
pfnvalloc.restype = ctypes.c_void_p
address = pfnvalloc(ctypes.c_size_t(size))
if not address:
raise Exception("Failed to valloc")
# Mark the memory segment as writeable only
if not self.is_selinux_enforcing:
WRITE = 0x2
if ctypes.pythonapi.mprotect(ctypes.c_void_p(address), size, WRITE) < 0:
raise Exception("Failed to mprotect")
# Copy the byte code into the memory segment
if ctypes.pythonapi.memmove(ctypes.c_void_p(address), byte_code, ctypes.c_size_t(size)) < 0:
raise Exception("Failed to memmove")
# Mark the memory segment as writeable and executable only
if not self.is_selinux_enforcing:
WRITE_EXECUTE = 0x2 | 0x4
if ctypes.pythonapi.mprotect(ctypes.c_void_p(address), size, WRITE_EXECUTE) < 0:
raise Exception("Failed to mprotect")
# Cast the memory segment into a function
functype = ctypes.CFUNCTYPE(restype, *argtypes)
fun = functype(address)
return fun, address
def _run_asm(self, *byte_code):
# Convert the byte code into a function that returns an int
restype = ctypes.c_uint32
argtypes = ()
func, address = self._asm_func(restype, argtypes, byte_code)
# Call the byte code like a function
retval = func()
byte_code = bytes.join(b'', byte_code)
size = ctypes.c_size_t(len(byte_code))
# Free the function memory segment
if DataSource.is_windows:
MEM_RELEASE = ctypes.c_ulong(0x8000)
ctypes.windll.kernel32.VirtualFree(ctypes.c_void_p(address), ctypes.c_size_t(0), MEM_RELEASE)
else:
# Remove the executable tag on the memory
READ_WRITE = 0x1 | 0x2
if ctypes.pythonapi.mprotect(ctypes.c_void_p(address), size, READ_WRITE) < 0:
raise Exception("Failed to mprotect")
ctypes.pythonapi.free(ctypes.c_void_p(address))
return retval
# FIXME: We should not have to use different instructions to
# set eax to 0 or 1, on 32bit and 64bit machines.
def _zero_eax(self):
return (
b"\x31\xC0" # xor eax,eax
)
def _zero_ecx(self):
return (
b"\x31\xC9" # xor ecx,ecx
)
def _one_eax(self):
return (
b"\xB8\x01\x00\x00\x00" # mov eax,0x1"
)
# http://en.wikipedia.org/wiki/CPUID#EAX.3D0:_Get_vendor_ID
def get_vendor_id(self):
# EBX
ebx = self._run_asm(
self._zero_eax(),
b"\x0F\xA2" # cpuid
b"\x89\xD8" # mov ax,bx
b"\xC3" # ret
)
# ECX
ecx = self._run_asm(
self._zero_eax(),
b"\x0f\xa2" # cpuid
b"\x89\xC8" # mov ax,cx
b"\xC3" # ret
)
# EDX
edx = self._run_asm(
self._zero_eax(),
b"\x0f\xa2" # cpuid
b"\x89\xD0" # mov ax,dx
b"\xC3" # ret
)
# Each 4bits is a ascii letter in the name
vendor_id = []
for reg in [ebx, edx, ecx]:
for n in [0, 8, 16, 24]:
vendor_id.append(chr((reg >> n) & 0xFF))
vendor_id = ''.join(vendor_id)
return vendor_id
# http://en.wikipedia.org/wiki/CPUID#EAX.3D1:_Processor_Info_and_Feature_Bits
def get_info(self):
# EAX
eax = self._run_asm(
self._one_eax(),
b"\x0f\xa2" # cpuid
b"\xC3" # ret
)
# Get the CPU info
stepping = (eax >> 0) & 0xF # 4 bits
model = (eax >> 4) & 0xF # 4 bits
family = (eax >> 8) & 0xF # 4 bits
processor_type = (eax >> 12) & 0x3 # 2 bits
extended_model = (eax >> 16) & 0xF # 4 bits
extended_family = (eax >> 20) & 0xFF # 8 bits
return {
'stepping' : stepping,
'model' : model,
'family' : family,
'processor_type' : processor_type,
'extended_model' : extended_model,
'extended_family' : extended_family
}
# http://en.wikipedia.org/wiki/CPUID#EAX.3D80000000h:_Get_Highest_Extended_Function_Supported
def get_max_extension_support(self):
# Check for extension support
max_extension_support = self._run_asm(
b"\xB8\x00\x00\x00\x80" # mov ax,0x80000000
b"\x0f\xa2" # cpuid
b"\xC3" # ret
)
return max_extension_support
# http://en.wikipedia.org/wiki/CPUID#EAX.3D1:_Processor_Info_and_Feature_Bits
def get_flags(self, max_extension_support):
# EDX
edx = self._run_asm(
self._one_eax(),
b"\x0f\xa2" # cpuid
b"\x89\xD0" # mov ax,dx
b"\xC3" # ret
)
# ECX
ecx = self._run_asm(
self._one_eax(),
b"\x0f\xa2" # cpuid
b"\x89\xC8" # mov ax,cx
b"\xC3" # ret
)
# Get the CPU flags
flags = {
'fpu' : _is_bit_set(edx, 0),
'vme' : _is_bit_set(edx, 1),
'de' : _is_bit_set(edx, 2),
'pse' : _is_bit_set(edx, 3),
'tsc' : _is_bit_set(edx, 4),
'msr' : _is_bit_set(edx, 5),
'pae' : _is_bit_set(edx, 6),
'mce' : _is_bit_set(edx, 7),
'cx8' : _is_bit_set(edx, 8),
'apic' : _is_bit_set(edx, 9),
#'reserved1' : _is_bit_set(edx, 10),
'sep' : _is_bit_set(edx, 11),
'mtrr' : _is_bit_set(edx, 12),
'pge' : _is_bit_set(edx, 13),
'mca' : _is_bit_set(edx, 14),
'cmov' : _is_bit_set(edx, 15),
'pat' : _is_bit_set(edx, 16),
'pse36' : _is_bit_set(edx, 17),
'pn' : _is_bit_set(edx, 18),
'clflush' : _is_bit_set(edx, 19),
#'reserved2' : _is_bit_set(edx, 20),
'dts' : _is_bit_set(edx, 21),
'acpi' : _is_bit_set(edx, 22),
'mmx' : _is_bit_set(edx, 23),
'fxsr' : _is_bit_set(edx, 24),
'sse' : _is_bit_set(edx, 25),
'sse2' : _is_bit_set(edx, 26),
'ss' : _is_bit_set(edx, 27),
'ht' : _is_bit_set(edx, 28),
'tm' : _is_bit_set(edx, 29),
'ia64' : _is_bit_set(edx, 30),
'pbe' : _is_bit_set(edx, 31),
'pni' : _is_bit_set(ecx, 0),
'pclmulqdq' : _is_bit_set(ecx, 1),
'dtes64' : _is_bit_set(ecx, 2),
'monitor' : _is_bit_set(ecx, 3),
'ds_cpl' : _is_bit_set(ecx, 4),
'vmx' : _is_bit_set(ecx, 5),
'smx' : _is_bit_set(ecx, 6),
'est' : _is_bit_set(ecx, 7),
'tm2' : _is_bit_set(ecx, 8),
'ssse3' : _is_bit_set(ecx, 9),
'cid' : _is_bit_set(ecx, 10),
#'reserved3' : _is_bit_set(ecx, 11),
'fma' : _is_bit_set(ecx, 12),
'cx16' : _is_bit_set(ecx, 13),
'xtpr' : _is_bit_set(ecx, 14),
'pdcm' : _is_bit_set(ecx, 15),
#'reserved4' : _is_bit_set(ecx, 16),
'pcid' : _is_bit_set(ecx, 17),
'dca' : _is_bit_set(ecx, 18),
'sse4_1' : _is_bit_set(ecx, 19),
'sse4_2' : _is_bit_set(ecx, 20),
'x2apic' : _is_bit_set(ecx, 21),
'movbe' : _is_bit_set(ecx, 22),
'popcnt' : _is_bit_set(ecx, 23),
'tscdeadline' : _is_bit_set(ecx, 24),
'aes' : _is_bit_set(ecx, 25),
'xsave' : _is_bit_set(ecx, 26),
'osxsave' : _is_bit_set(ecx, 27),
'avx' : _is_bit_set(ecx, 28),
'f16c' : _is_bit_set(ecx, 29),
'rdrnd' : _is_bit_set(ecx, 30),
'hypervisor' : _is_bit_set(ecx, 31)
}
# Get a list of only the flags that are true
flags = [k for k, v in flags.items() if v]
# http://en.wikipedia.org/wiki/CPUID#EAX.3D7.2C_ECX.3D0:_Extended_Features
if max_extension_support >= 7:
# EBX
ebx = self._run_asm(
self._zero_ecx(),
b"\xB8\x07\x00\x00\x00" # mov eax,7
b"\x0f\xa2" # cpuid
b"\x89\xD8" # mov ax,bx
b"\xC3" # ret
)
# ECX
ecx = self._run_asm(
self._zero_ecx(),
b"\xB8\x07\x00\x00\x00" # mov eax,7
b"\x0f\xa2" # cpuid
b"\x89\xC8" # mov ax,cx
b"\xC3" # ret
)
# Get the extended CPU flags
extended_flags = {
#'fsgsbase' : _is_bit_set(ebx, 0),
#'IA32_TSC_ADJUST' : _is_bit_set(ebx, 1),
'sgx' : _is_bit_set(ebx, 2),
'bmi1' : _is_bit_set(ebx, 3),
'hle' : _is_bit_set(ebx, 4),
'avx2' : _is_bit_set(ebx, 5),
#'reserved' : _is_bit_set(ebx, 6),
'smep' : _is_bit_set(ebx, 7),
'bmi2' : _is_bit_set(ebx, 8),
'erms' : _is_bit_set(ebx, 9),
'invpcid' : _is_bit_set(ebx, 10),
'rtm' : _is_bit_set(ebx, 11),
'pqm' : _is_bit_set(ebx, 12),
#'FPU CS and FPU DS deprecated' : _is_bit_set(ebx, 13),
'mpx' : _is_bit_set(ebx, 14),
'pqe' : _is_bit_set(ebx, 15),
'avx512f' : _is_bit_set(ebx, 16),
'avx512dq' : _is_bit_set(ebx, 17),
'rdseed' : _is_bit_set(ebx, 18),
'adx' : _is_bit_set(ebx, 19),
'smap' : _is_bit_set(ebx, 20),
'avx512ifma' : _is_bit_set(ebx, 21),
'pcommit' : _is_bit_set(ebx, 22),
'clflushopt' : _is_bit_set(ebx, 23),
'clwb' : _is_bit_set(ebx, 24),
'intel_pt' : _is_bit_set(ebx, 25),
'avx512pf' : _is_bit_set(ebx, 26),
'avx512er' : _is_bit_set(ebx, 27),
'avx512cd' : _is_bit_set(ebx, 28),
'sha' : _is_bit_set(ebx, 29),
'avx512bw' : _is_bit_set(ebx, 30),
'avx512vl' : _is_bit_set(ebx, 31),
'prefetchwt1' : _is_bit_set(ecx, 0),
'avx512vbmi' : _is_bit_set(ecx, 1),
'umip' : _is_bit_set(ecx, 2),
'pku' : _is_bit_set(ecx, 3),
'ospke' : _is_bit_set(ecx, 4),
#'reserved' : _is_bit_set(ecx, 5),
'avx512vbmi2' : _is_bit_set(ecx, 6),
#'reserved' : _is_bit_set(ecx, 7),
'gfni' : _is_bit_set(ecx, 8),
'vaes' : _is_bit_set(ecx, 9),
'vpclmulqdq' : _is_bit_set(ecx, 10),
'avx512vnni' : _is_bit_set(ecx, 11),
'avx512bitalg' : _is_bit_set(ecx, 12),
#'reserved' : _is_bit_set(ecx, 13),
'avx512vpopcntdq' : _is_bit_set(ecx, 14),
#'reserved' : _is_bit_set(ecx, 15),
#'reserved' : _is_bit_set(ecx, 16),
#'mpx0' : _is_bit_set(ecx, 17),
#'mpx1' : _is_bit_set(ecx, 18),
#'mpx2' : _is_bit_set(ecx, 19),
#'mpx3' : _is_bit_set(ecx, 20),
#'mpx4' : _is_bit_set(ecx, 21),
'rdpid' : _is_bit_set(ecx, 22),
#'reserved' : _is_bit_set(ecx, 23),
#'reserved' : _is_bit_set(ecx, 24),
#'reserved' : _is_bit_set(ecx, 25),
#'reserved' : _is_bit_set(ecx, 26),
#'reserved' : _is_bit_set(ecx, 27),
#'reserved' : _is_bit_set(ecx, 28),
#'reserved' : _is_bit_set(ecx, 29),
'sgx_lc' : _is_bit_set(ecx, 30),
#'reserved' : _is_bit_set(ecx, 31)
}
# Get a list of only the flags that are true
extended_flags = [k for k, v in extended_flags.items() if v]
flags += extended_flags
# http://en.wikipedia.org/wiki/CPUID#EAX.3D80000001h:_Extended_Processor_Info_and_Feature_Bits
if max_extension_support >= 0x80000001:
# EBX
ebx = self._run_asm(
b"\xB8\x01\x00\x00\x80" # mov ax,0x80000001
b"\x0f\xa2" # cpuid
b"\x89\xD8" # mov ax,bx
b"\xC3" # ret
)
# ECX
ecx = self._run_asm(
b"\xB8\x01\x00\x00\x80" # mov ax,0x80000001
b"\x0f\xa2" # cpuid
b"\x89\xC8" # mov ax,cx
b"\xC3" # ret
)
# Get the extended CPU flags
extended_flags = {
'fpu' : _is_bit_set(ebx, 0),
'vme' : _is_bit_set(ebx, 1),
'de' : _is_bit_set(ebx, 2),
'pse' : _is_bit_set(ebx, 3),
'tsc' : _is_bit_set(ebx, 4),
'msr' : _is_bit_set(ebx, 5),
'pae' : _is_bit_set(ebx, 6),
'mce' : _is_bit_set(ebx, 7),
'cx8' : _is_bit_set(ebx, 8),
'apic' : _is_bit_set(ebx, 9),
#'reserved' : _is_bit_set(ebx, 10),
'syscall' : _is_bit_set(ebx, 11),
'mtrr' : _is_bit_set(ebx, 12),
'pge' : _is_bit_set(ebx, 13),
'mca' : _is_bit_set(ebx, 14),
'cmov' : _is_bit_set(ebx, 15),
'pat' : _is_bit_set(ebx, 16),
'pse36' : _is_bit_set(ebx, 17),
#'reserved' : _is_bit_set(ebx, 18),
'mp' : _is_bit_set(ebx, 19),
'nx' : _is_bit_set(ebx, 20),
#'reserved' : _is_bit_set(ebx, 21),
'mmxext' : _is_bit_set(ebx, 22),
'mmx' : _is_bit_set(ebx, 23),
'fxsr' : _is_bit_set(ebx, 24),
'fxsr_opt' : _is_bit_set(ebx, 25),
'pdpe1gp' : _is_bit_set(ebx, 26),
'rdtscp' : _is_bit_set(ebx, 27),
#'reserved' : _is_bit_set(ebx, 28),
'lm' : _is_bit_set(ebx, 29),
'3dnowext' : _is_bit_set(ebx, 30),
'3dnow' : _is_bit_set(ebx, 31),
'lahf_lm' : _is_bit_set(ecx, 0),
'cmp_legacy' : _is_bit_set(ecx, 1),
'svm' : _is_bit_set(ecx, 2),
'extapic' : _is_bit_set(ecx, 3),
'cr8_legacy' : _is_bit_set(ecx, 4),
'abm' : _is_bit_set(ecx, 5),
'sse4a' : _is_bit_set(ecx, 6),
'misalignsse' : _is_bit_set(ecx, 7),
'3dnowprefetch' : _is_bit_set(ecx, 8),
'osvw' : _is_bit_set(ecx, 9),
'ibs' : _is_bit_set(ecx, 10),
'xop' : _is_bit_set(ecx, 11),
'skinit' : _is_bit_set(ecx, 12),
'wdt' : _is_bit_set(ecx, 13),
#'reserved' : _is_bit_set(ecx, 14),
'lwp' : _is_bit_set(ecx, 15),
'fma4' : _is_bit_set(ecx, 16),
'tce' : _is_bit_set(ecx, 17),
#'reserved' : _is_bit_set(ecx, 18),
'nodeid_msr' : _is_bit_set(ecx, 19),
#'reserved' : _is_bit_set(ecx, 20),
'tbm' : _is_bit_set(ecx, 21),
'topoext' : _is_bit_set(ecx, 22),
'perfctr_core' : _is_bit_set(ecx, 23),
'perfctr_nb' : _is_bit_set(ecx, 24),
#'reserved' : _is_bit_set(ecx, 25),
'dbx' : _is_bit_set(ecx, 26),
'perftsc' : _is_bit_set(ecx, 27),
'pci_l2i' : _is_bit_set(ecx, 28),
#'reserved' : _is_bit_set(ecx, 29),
#'reserved' : _is_bit_set(ecx, 30),
#'reserved' : _is_bit_set(ecx, 31)
}
# Get a list of only the flags that are true
extended_flags = [k for k, v in extended_flags.items() if v]
flags += extended_flags
flags.sort()
return flags
# http://en.wikipedia.org/wiki/CPUID#EAX.3D80000002h.2C80000003h.2C80000004h:_Processor_Brand_String
def get_processor_brand(self, max_extension_support):
processor_brand = ""
# Processor brand string
if max_extension_support >= 0x80000004:
instructions = [
b"\xB8\x02\x00\x00\x80", # mov ax,0x80000002
b"\xB8\x03\x00\x00\x80", # mov ax,0x80000003
b"\xB8\x04\x00\x00\x80" # mov ax,0x80000004
]
for instruction in instructions:
# EAX
eax = self._run_asm(
instruction, # mov ax,0x8000000?
b"\x0f\xa2" # cpuid
b"\x89\xC0" # mov ax,ax
b"\xC3" # ret
)
# EBX
ebx = self._run_asm(
instruction, # mov ax,0x8000000?
b"\x0f\xa2" # cpuid
b"\x89\xD8" # mov ax,bx
b"\xC3" # ret
)
# ECX
ecx = self._run_asm(
instruction, # mov ax,0x8000000?
b"\x0f\xa2" # cpuid
b"\x89\xC8" # mov ax,cx
b"\xC3" # ret
)
# EDX
edx = self._run_asm(
instruction, # mov ax,0x8000000?
b"\x0f\xa2" # cpuid
b"\x89\xD0" # mov ax,dx
b"\xC3" # ret
)
# Combine each of the 4 bytes in each register into the string
for reg in [eax, ebx, ecx, edx]:
for n in [0, 8, 16, 24]:
processor_brand += chr((reg >> n) & 0xFF)
# Strip off any trailing NULL terminators and white space
processor_brand = processor_brand.strip("\0").strip()
return processor_brand
# http://en.wikipedia.org/wiki/CPUID#EAX.3D80000006h:_Extended_L2_Cache_Features
def get_cache(self, max_extension_support):
cache_info = {}
# Just return if the cache feature is not supported
if max_extension_support < 0x80000006:
return cache_info
# ECX
ecx = self._run_asm(
b"\xB8\x06\x00\x00\x80" # mov ax,0x80000006
b"\x0f\xa2" # cpuid
b"\x89\xC8" # mov ax,cx
b"\xC3" # ret
)
cache_info = {
'size_kb' : ecx & 0xFF,
'line_size_b' : (ecx >> 12) & 0xF,
'associativity' : (ecx >> 16) & 0xFFFF
}
return cache_info
def get_ticks(self):
retval = None
if DataSource.bits == '32bit':
# Works on x86_32
restype = None
argtypes = (ctypes.POINTER(ctypes.c_uint), ctypes.POINTER(ctypes.c_uint))
get_ticks_x86_32, address = self._asm_func(restype, argtypes,
[
b"\x55", # push bp
b"\x89\xE5", # mov bp,sp
b"\x31\xC0", # xor ax,ax
b"\x0F\xA2", # cpuid
b"\x0F\x31", # rdtsc
b"\x8B\x5D\x08", # mov bx,[di+0x8]
b"\x8B\x4D\x0C", # mov cx,[di+0xc]
b"\x89\x13", # mov [bp+di],dx
b"\x89\x01", # mov [bx+di],ax
b"\x5D", # pop bp
b"\xC3" # ret
]
)
high = ctypes.c_uint32(0)
low = ctypes.c_uint32(0)
get_ticks_x86_32(ctypes.byref(high), ctypes.byref(low))
retval = ((high.value << 32) & 0xFFFFFFFF00000000) | low.value
elif DataSource.bits == '64bit':
# Works on x86_64
restype = ctypes.c_uint64
argtypes = ()
get_ticks_x86_64, address = self._asm_func(restype, argtypes,
[
b"\x48", # dec ax
b"\x31\xC0", # xor ax,ax
b"\x0F\xA2", # cpuid
b"\x0F\x31", # rdtsc
b"\x48", # dec ax
b"\xC1\xE2\x20", # shl dx,byte 0x20
b"\x48", # dec ax
b"\x09\xD0", # or ax,dx
b"\xC3", # ret
]
)
retval = get_ticks_x86_64()
return retval
def get_raw_hz(self):
import time
start = self.get_ticks()
time.sleep(1)
end = self.get_ticks()
ticks = (end - start)
return ticks
def _actual_get_cpu_info_from_cpuid(queue):
'''
Warning! This function has the potential to crash the Python runtime.
Do not call it directly. Use the _get_cpu_info_from_cpuid function instead.
It will safely call this function in another process.
'''
# Pipe all output to nothing
sys.stdout = open(os.devnull, 'w')
sys.stderr = open(os.devnull, 'w')
# Get the CPU arch and bits
arch, bits = _parse_arch(DataSource.arch_string_raw)
# Return none if this is not an X86 CPU
if not arch in ['X86_32', 'X86_64']:
queue.put(_obj_to_b64({}))
return
# Return none if SE Linux is in enforcing mode
cpuid = CPUID()
if cpuid.is_selinux_enforcing:
queue.put(_obj_to_b64({}))
return
# Get the cpu info from the CPUID register
max_extension_support = cpuid.get_max_extension_support()
cache_info = cpuid.get_cache(max_extension_support)
info = cpuid.get_info()
processor_brand = cpuid.get_processor_brand(max_extension_support)
# Get the Hz and scale
hz_actual = cpuid.get_raw_hz()
hz_actual = _to_decimal_string(hz_actual)
# Get the Hz and scale
hz_advertised, scale = _parse_cpu_brand_string(processor_brand)
info = {
'vendor_id_raw' : cpuid.get_vendor_id(),
'hardware_raw' : '',
'brand_raw' : processor_brand,
'hz_advertised_friendly' : _hz_short_to_friendly(hz_advertised, scale),
'hz_actual_friendly' : _hz_short_to_friendly(hz_actual, 0),
'hz_advertised' : _hz_short_to_full(hz_advertised, scale),
'hz_actual' : _hz_short_to_full(hz_actual, 0),
'l2_cache_size' : _to_friendly_bytes(cache_info['size_kb']),
'l2_cache_line_size' : cache_info['line_size_b'],
'l2_cache_associativity' : hex(cache_info['associativity']),
'stepping' : info['stepping'],
'model' : info['model'],
'family' : info['family'],
'processor_type' : info['processor_type'],
'extended_model' : info['extended_model'],
'extended_family' : info['extended_family'],
'flags' : cpuid.get_flags(max_extension_support)
}
info = {k: v for k, v in info.items() if v}
queue.put(_obj_to_b64(info))
def _get_cpu_info_from_cpuid():
'''
Returns the CPU info gathered by querying the X86 cpuid register in a new process.
Returns {} on non X86 cpus.
Returns {} if SELinux is in enforcing mode.
'''
from multiprocessing import Process, Queue
# Return {} if can't cpuid
if not DataSource.can_cpuid:
return {}
# Get the CPU arch and bits
arch, bits = _parse_arch(DataSource.arch_string_raw)
# Return {} if this is not an X86 CPU
if not arch in ['X86_32', 'X86_64']:
return {}
try:
# Start running the function in a subprocess
queue = Queue()
p = Process(target=_actual_get_cpu_info_from_cpuid, args=(queue,))
p.start()
# Wait for the process to end, while it is still alive
while p.is_alive():
p.join(0)
# Return {} if it failed
if p.exitcode != 0:
return {}
# Return the result, only if there is something to read
if not queue.empty():
output = queue.get()
return _b64_to_obj(output)
except:
pass
# Return {} if everything failed
return {}
def _get_cpu_info_from_proc_cpuinfo():
'''
Returns the CPU info gathered from /proc/cpuinfo.
Returns {} if /proc/cpuinfo is not found.
'''
try:
# Just return {} if there is no cpuinfo
if not DataSource.has_proc_cpuinfo():
return {}
returncode, output = DataSource.cat_proc_cpuinfo()
if returncode != 0:
return {}
# Various fields
vendor_id = _get_field(False, output, None, '', 'vendor_id', 'vendor id', 'vendor')
processor_brand = _get_field(True, output, None, None, 'model name','cpu', 'processor')
cache_size = _get_field(False, output, None, '', 'cache size')
stepping = _get_field(False, output, int, 0, 'stepping')
model = _get_field(False, output, int, 0, 'model')
family = _get_field(False, output, int, 0, 'cpu family')
hardware = _get_field(False, output, None, '', 'Hardware')
# Flags
flags = _get_field(False, output, None, None, 'flags', 'Features')
if flags:
flags = flags.split()
flags.sort()
# Convert from MHz string to Hz
hz_actual = _get_field(False, output, None, '', 'cpu MHz', 'cpu speed', 'clock')
hz_actual = hz_actual.lower().rstrip('mhz').strip()
hz_actual = _to_decimal_string(hz_actual)
# Convert from GHz/MHz string to Hz
hz_advertised, scale = (None, 0)
try:
hz_advertised, scale = _parse_cpu_brand_string(processor_brand)
except Exception:
pass
info = {
'hardware_raw' : hardware,
'brand_raw' : processor_brand,
'l3_cache_size' : _to_friendly_bytes(cache_size),
'flags' : flags,
'vendor_id_raw' : vendor_id,
'stepping' : stepping,
'model' : model,
'family' : family,
}
# Make the Hz the same for actual and advertised if missing any
if not hz_advertised or hz_advertised == '0.0':
hz_advertised = hz_actual
scale = 6
elif not hz_actual or hz_actual == '0.0':
hz_actual = hz_advertised
# Add the Hz if there is one
if _hz_short_to_full(hz_advertised, scale) > (0, 0):
info['hz_advertised_friendly'] = _hz_short_to_friendly(hz_advertised, scale)
info['hz_advertised'] = _hz_short_to_full(hz_advertised, scale)
if _hz_short_to_full(hz_actual, scale) > (0, 0):
info['hz_actual_friendly'] = _hz_short_to_friendly(hz_actual, 6)
info['hz_actual'] = _hz_short_to_full(hz_actual, 6)
info = {k: v for k, v in info.items() if v}
return info
except:
#raise # NOTE: To have this throw on error, uncomment this line
return {}
def _get_cpu_info_from_cpufreq_info():
'''
Returns the CPU info gathered from cpufreq-info.
Returns {} if cpufreq-info is not found.
'''
try:
hz_brand, scale = '0.0', 0
if not DataSource.has_cpufreq_info():
return {}
returncode, output = DataSource.cpufreq_info()
if returncode != 0:
return {}
hz_brand = output.split('current CPU frequency is')[1].split('\n')[0]
i = hz_brand.find('Hz')
assert(i != -1)
hz_brand = hz_brand[0 : i+2].strip().lower()
if hz_brand.endswith('mhz'):
scale = 6
elif hz_brand.endswith('ghz'):
scale = 9
hz_brand = hz_brand.rstrip('mhz').rstrip('ghz').strip()
hz_brand = _to_decimal_string(hz_brand)
info = {
'hz_advertised_friendly' : _hz_short_to_friendly(hz_brand, scale),
'hz_actual_friendly' : _hz_short_to_friendly(hz_brand, scale),
'hz_advertised' : _hz_short_to_full(hz_brand, scale),
'hz_actual' : _hz_short_to_full(hz_brand, scale),
}
info = {k: v for k, v in info.items() if v}
return info
except:
#raise # NOTE: To have this throw on error, uncomment this line
return {}
def _get_cpu_info_from_lscpu():
'''
Returns the CPU info gathered from lscpu.
Returns {} if lscpu is not found.
'''
try:
if not DataSource.has_lscpu():
return {}
returncode, output = DataSource.lscpu()
if returncode != 0:
return {}
info = {}
new_hz = _get_field(False, output, None, None, 'CPU max MHz', 'CPU MHz')
if new_hz:
new_hz = _to_decimal_string(new_hz)
scale = 6
info['hz_advertised_friendly'] = _hz_short_to_friendly(new_hz, scale)
info['hz_actual_friendly'] = _hz_short_to_friendly(new_hz, scale)
info['hz_advertised'] = _hz_short_to_full(new_hz, scale)
info['hz_actual'] = _hz_short_to_full(new_hz, scale)
vendor_id = _get_field(False, output, None, None, 'Vendor ID')
if vendor_id:
info['vendor_id_raw'] = vendor_id
brand = _get_field(False, output, None, None, 'Model name')
if brand:
info['brand_raw'] = brand
family = _get_field(False, output, None, None, 'CPU family')
if family and family.isdigit():
info['family'] = int(family)
stepping = _get_field(False, output, None, None, 'Stepping')
if stepping and stepping.isdigit():
info['stepping'] = int(stepping)
model = _get_field(False, output, None, None, 'Model')
if model and model.isdigit():
info['model'] = int(model)
l1_data_cache_size = _get_field(False, output, None, None, 'L1d cache')
if l1_data_cache_size:
info['l1_data_cache_size'] = _to_friendly_bytes(l1_data_cache_size)
l1_instruction_cache_size = _get_field(False, output, None, None, 'L1i cache')
if l1_instruction_cache_size:
info['l1_instruction_cache_size'] = _to_friendly_bytes(l1_instruction_cache_size)
l2_cache_size = _get_field(False, output, None, None, 'L2 cache')
if l2_cache_size:
info['l2_cache_size'] = _to_friendly_bytes(l2_cache_size)
l3_cache_size = _get_field(False, output, None, None, 'L3 cache')
if l3_cache_size:
info['l3_cache_size'] = _to_friendly_bytes(l3_cache_size)
# Flags
flags = _get_field(False, output, None, None, 'flags', 'Features')
if flags:
flags = flags.split()
flags.sort()
info['flags'] = flags
info = {k: v for k, v in info.items() if v}
return info
except:
#raise # NOTE: To have this throw on error, uncomment this line
return {}
def _get_cpu_info_from_dmesg():
'''
Returns the CPU info gathered from dmesg.
Returns {} if dmesg is not found or does not have the desired info.
'''
# Just return {} if there is no dmesg
if not DataSource.has_dmesg():
return {}
# If dmesg fails return {}
returncode, output = DataSource.dmesg_a()
if output == None or returncode != 0:
return {}
return _parse_dmesg_output(output)
# https://openpowerfoundation.org/wp-content/uploads/2016/05/LoPAPR_DRAFT_v11_24March2016_cmt1.pdf
# page 767
def _get_cpu_info_from_ibm_pa_features():
'''
Returns the CPU info gathered from lsprop /proc/device-tree/cpus/*/ibm,pa-features
Returns {} if lsprop is not found or ibm,pa-features does not have the desired info.
'''
try:
# Just return {} if there is no lsprop
if not DataSource.has_ibm_pa_features():
return {}
# If ibm,pa-features fails return {}
returncode, output = DataSource.ibm_pa_features()
if output == None or returncode != 0:
return {}
# Filter out invalid characters from output
value = output.split("ibm,pa-features")[1].lower()
value = [s for s in value if s in list('0123456789abcfed')]
value = ''.join(value)
# Get data converted to Uint32 chunks
left = int(value[0 : 8], 16)
right = int(value[8 : 16], 16)
# Get the CPU flags
flags = {
# Byte 0
'mmu' : _is_bit_set(left, 0),
'fpu' : _is_bit_set(left, 1),
'slb' : _is_bit_set(left, 2),
'run' : _is_bit_set(left, 3),
#'reserved' : _is_bit_set(left, 4),
'dabr' : _is_bit_set(left, 5),
'ne' : _is_bit_set(left, 6),
'wtr' : _is_bit_set(left, 7),
# Byte 1
'mcr' : _is_bit_set(left, 8),
'dsisr' : _is_bit_set(left, 9),
'lp' : _is_bit_set(left, 10),
'ri' : _is_bit_set(left, 11),
'dabrx' : _is_bit_set(left, 12),
'sprg3' : _is_bit_set(left, 13),
'rislb' : _is_bit_set(left, 14),
'pp' : _is_bit_set(left, 15),
# Byte 2
'vpm' : _is_bit_set(left, 16),
'dss_2.05' : _is_bit_set(left, 17),
#'reserved' : _is_bit_set(left, 18),
'dar' : _is_bit_set(left, 19),
#'reserved' : _is_bit_set(left, 20),
'ppr' : _is_bit_set(left, 21),
'dss_2.02' : _is_bit_set(left, 22),
'dss_2.06' : _is_bit_set(left, 23),
# Byte 3
'lsd_in_dscr' : _is_bit_set(left, 24),
'ugr_in_dscr' : _is_bit_set(left, 25),
#'reserved' : _is_bit_set(left, 26),
#'reserved' : _is_bit_set(left, 27),
#'reserved' : _is_bit_set(left, 28),
#'reserved' : _is_bit_set(left, 29),
#'reserved' : _is_bit_set(left, 30),
#'reserved' : _is_bit_set(left, 31),
# Byte 4
'sso_2.06' : _is_bit_set(right, 0),
#'reserved' : _is_bit_set(right, 1),
#'reserved' : _is_bit_set(right, 2),
#'reserved' : _is_bit_set(right, 3),
#'reserved' : _is_bit_set(right, 4),
#'reserved' : _is_bit_set(right, 5),
#'reserved' : _is_bit_set(right, 6),
#'reserved' : _is_bit_set(right, 7),
# Byte 5
'le' : _is_bit_set(right, 8),
'cfar' : _is_bit_set(right, 9),
'eb' : _is_bit_set(right, 10),
'lsq_2.07' : _is_bit_set(right, 11),
#'reserved' : _is_bit_set(right, 12),
#'reserved' : _is_bit_set(right, 13),
#'reserved' : _is_bit_set(right, 14),
#'reserved' : _is_bit_set(right, 15),
# Byte 6
'dss_2.07' : _is_bit_set(right, 16),
#'reserved' : _is_bit_set(right, 17),
#'reserved' : _is_bit_set(right, 18),
#'reserved' : _is_bit_set(right, 19),
#'reserved' : _is_bit_set(right, 20),
#'reserved' : _is_bit_set(right, 21),
#'reserved' : _is_bit_set(right, 22),
#'reserved' : _is_bit_set(right, 23),
# Byte 7
#'reserved' : _is_bit_set(right, 24),
#'reserved' : _is_bit_set(right, 25),
#'reserved' : _is_bit_set(right, 26),
#'reserved' : _is_bit_set(right, 27),
#'reserved' : _is_bit_set(right, 28),
#'reserved' : _is_bit_set(right, 29),
#'reserved' : _is_bit_set(right, 30),
#'reserved' : _is_bit_set(right, 31),
}
# Get a list of only the flags that are true
flags = [k for k, v in flags.items() if v]
flags.sort()
info = {
'flags' : flags
}
info = {k: v for k, v in info.items() if v}
return info
except:
return {}
def _get_cpu_info_from_sysctl():
'''
Returns the CPU info gathered from sysctl.
Returns {} if sysctl is not found.
'''
try:
# Just return {} if there is no sysctl
if not DataSource.has_sysctl():
return {}
# If sysctl fails return {}
returncode, output = DataSource.sysctl_machdep_cpu_hw_cpufrequency()
if output == None or returncode != 0:
return {}
# Various fields
vendor_id = _get_field(False, output, None, None, 'machdep.cpu.vendor')
processor_brand = _get_field(True, output, None, None, 'machdep.cpu.brand_string')
cache_size = _get_field(False, output, None, None, 'machdep.cpu.cache.size')
stepping = _get_field(False, output, int, 0, 'machdep.cpu.stepping')
model = _get_field(False, output, int, 0, 'machdep.cpu.model')
family = _get_field(False, output, int, 0, 'machdep.cpu.family')
# Flags
flags = _get_field(False, output, None, '', 'machdep.cpu.features').lower().split()
flags.extend(_get_field(False, output, None, '', 'machdep.cpu.leaf7_features').lower().split())
flags.extend(_get_field(False, output, None, '', 'machdep.cpu.extfeatures').lower().split())
flags.sort()
# Convert from GHz/MHz string to Hz
hz_advertised, scale = _parse_cpu_brand_string(processor_brand)
hz_actual = _get_field(False, output, None, None, 'hw.cpufrequency')
hz_actual = _to_decimal_string(hz_actual)
info = {
'vendor_id_raw' : vendor_id,
'brand_raw' : processor_brand,
'hz_advertised_friendly' : _hz_short_to_friendly(hz_advertised, scale),
'hz_actual_friendly' : _hz_short_to_friendly(hz_actual, 0),
'hz_advertised' : _hz_short_to_full(hz_advertised, scale),
'hz_actual' : _hz_short_to_full(hz_actual, 0),
'l2_cache_size' : _to_friendly_bytes(cache_size),
'stepping' : stepping,
'model' : model,
'family' : family,
'flags' : flags
}
info = {k: v for k, v in info.items() if v}
return info
except:
return {}
def _get_cpu_info_from_sysinfo():
'''
Returns the CPU info gathered from sysinfo.
Returns {} if sysinfo is not found.
'''
info = _get_cpu_info_from_sysinfo_v1()
info.update(_get_cpu_info_from_sysinfo_v2())
return info
def _get_cpu_info_from_sysinfo_v1():
'''
Returns the CPU info gathered from sysinfo.
Returns {} if sysinfo is not found.
'''
try:
# Just return {} if there is no sysinfo
if not DataSource.has_sysinfo():
return {}
# If sysinfo fails return {}
returncode, output = DataSource.sysinfo_cpu()
if output == None or returncode != 0:
return {}
# Various fields
vendor_id = '' #_get_field(False, output, None, None, 'CPU #0: ')
processor_brand = output.split('CPU #0: "')[1].split('"\n')[0].strip()
cache_size = '' #_get_field(False, output, None, None, 'machdep.cpu.cache.size')
stepping = int(output.split(', stepping ')[1].split(',')[0].strip())
model = int(output.split(', model ')[1].split(',')[0].strip())
family = int(output.split(', family ')[1].split(',')[0].strip())
# Flags
flags = []
for line in output.split('\n'):
if line.startswith('\t\t'):
for flag in line.strip().lower().split():
flags.append(flag)
flags.sort()
# Convert from GHz/MHz string to Hz
hz_advertised, scale = _parse_cpu_brand_string(processor_brand)
hz_actual = hz_advertised
info = {
'vendor_id_raw' : vendor_id,
'brand_raw' : processor_brand,
'hz_advertised_friendly' : _hz_short_to_friendly(hz_advertised, scale),
'hz_actual_friendly' : _hz_short_to_friendly(hz_actual, scale),
'hz_advertised' : _hz_short_to_full(hz_advertised, scale),
'hz_actual' : _hz_short_to_full(hz_actual, scale),
'l2_cache_size' : _to_friendly_bytes(cache_size),
'stepping' : stepping,
'model' : model,
'family' : family,
'flags' : flags
}
info = {k: v for k, v in info.items() if v}
return info
except:
#raise # NOTE: To have this throw on error, uncomment this line
return {}
def _get_cpu_info_from_sysinfo_v2():
'''
Returns the CPU info gathered from sysinfo.
Returns {} if sysinfo is not found.
'''
try:
# Just return {} if there is no sysinfo
if not DataSource.has_sysinfo():
return {}
# If sysinfo fails return {}
returncode, output = DataSource.sysinfo_cpu()
if output == None or returncode != 0:
return {}
# Various fields
vendor_id = '' #_get_field(False, output, None, None, 'CPU #0: ')
processor_brand = output.split('CPU #0: "')[1].split('"\n')[0].strip()
cache_size = '' #_get_field(False, output, None, None, 'machdep.cpu.cache.size')
signature = output.split('Signature:')[1].split('\n')[0].strip()
#
stepping = int(signature.split('stepping ')[1].split(',')[0].strip())
model = int(signature.split('model ')[1].split(',')[0].strip())
family = int(signature.split('family ')[1].split(',')[0].strip())
# Flags
def get_subsection_flags(output):
retval = []
for line in output.split('\n')[1:]:
if not line.startswith(' ') and not line.startswith(' '): break
for entry in line.strip().lower().split(' '):
retval.append(entry)
return retval
flags = get_subsection_flags(output.split('Features: ')[1]) + \
get_subsection_flags(output.split('Extended Features (0x00000001): ')[1]) + \
get_subsection_flags(output.split('Extended Features (0x80000001): ')[1])
flags.sort()
# Convert from GHz/MHz string to Hz
lines = [n for n in output.split('\n') if n]
raw_hz = lines[0].split('running at ')[1].strip().lower()
hz_advertised = raw_hz.rstrip('mhz').rstrip('ghz').strip()
hz_advertised = _to_decimal_string(hz_advertised)
hz_actual = hz_advertised
scale = 0
if raw_hz.endswith('mhz'):
scale = 6
elif raw_hz.endswith('ghz'):
scale = 9
info = {
'vendor_id_raw' : vendor_id,
'brand_raw' : processor_brand,
'hz_advertised_friendly' : _hz_short_to_friendly(hz_advertised, scale),
'hz_actual_friendly' : _hz_short_to_friendly(hz_actual, scale),
'hz_advertised' : _hz_short_to_full(hz_advertised, scale),
'hz_actual' : _hz_short_to_full(hz_actual, scale),
'l2_cache_size' : _to_friendly_bytes(cache_size),
'stepping' : stepping,
'model' : model,
'family' : family,
'flags' : flags
}
info = {k: v for k, v in info.items() if v}
return info
except:
#raise # NOTE: To have this throw on error, uncomment this line
return {}
def _get_cpu_info_from_wmic():
'''
Returns the CPU info gathered from WMI.
Returns {} if not on Windows, or wmic is not installed.
'''
try:
# Just return {} if not Windows or there is no wmic
if not DataSource.is_windows or not DataSource.has_wmic():
return {}
returncode, output = DataSource.wmic_cpu()
if output == None or returncode != 0:
return {}
# Break the list into key values pairs
value = output.split("\n")
value = [s.rstrip().split('=') for s in value if '=' in s]
value = {k: v for k, v in value if v}
# Get the advertised MHz
processor_brand = value.get('Name')
hz_advertised, scale_advertised = _parse_cpu_brand_string(processor_brand)
# Get the actual MHz
hz_actual = value.get('CurrentClockSpeed')
scale_actual = 6
if hz_actual:
hz_actual = _to_decimal_string(hz_actual)
# Get cache sizes
l2_cache_size = value.get('L2CacheSize')
if l2_cache_size:
l2_cache_size = l2_cache_size + ' KB'
l3_cache_size = value.get('L3CacheSize')
if l3_cache_size:
l3_cache_size = l3_cache_size + ' KB'
# Get family, model, and stepping
family, model, stepping = '', '', ''
description = value.get('Description') or value.get('Caption')
entries = description.split(' ')
if 'Family' in entries and entries.index('Family') < len(entries)-1:
i = entries.index('Family')
family = int(entries[i + 1])
if 'Model' in entries and entries.index('Model') < len(entries)-1:
i = entries.index('Model')
model = int(entries[i + 1])
if 'Stepping' in entries and entries.index('Stepping') < len(entries)-1:
i = entries.index('Stepping')
stepping = int(entries[i + 1])
info = {
'vendor_id_raw' : value.get('Manufacturer'),
'brand_raw' : processor_brand,
'hz_advertised_friendly' : _hz_short_to_friendly(hz_advertised, scale_advertised),
'hz_actual_friendly' : _hz_short_to_friendly(hz_actual, scale_actual),
'hz_advertised' : _hz_short_to_full(hz_advertised, scale_advertised),
'hz_actual' : _hz_short_to_full(hz_actual, scale_actual),
'l2_cache_size' : l2_cache_size,
'l3_cache_size' : l3_cache_size,
'stepping' : stepping,
'model' : model,
'family' : family,
}
info = {k: v for k, v in info.items() if v}
return info
except:
#raise # NOTE: To have this throw on error, uncomment this line
return {}
def _get_cpu_info_from_registry():
'''
FIXME: Is missing many of the newer CPU flags like sse3
Returns the CPU info gathered from the Windows Registry.
Returns {} if not on Windows.
'''
try:
# Just return {} if not on Windows
if not DataSource.is_windows:
return {}
# Get the CPU name
processor_brand = DataSource.winreg_processor_brand().strip()
# Get the CPU vendor id
vendor_id = DataSource.winreg_vendor_id_raw()
# Get the CPU arch and bits
arch_string_raw = DataSource.winreg_arch_string_raw()
arch, bits = _parse_arch(arch_string_raw)
# Get the actual CPU Hz
hz_actual = DataSource.winreg_hz_actual()
hz_actual = _to_decimal_string(hz_actual)
# Get the advertised CPU Hz
hz_advertised, scale = _parse_cpu_brand_string(processor_brand)
# If advertised hz not found, use the actual hz
if hz_advertised == '0.0':
scale = 6
hz_advertised = _to_decimal_string(hz_actual)
# Get the CPU features
feature_bits = DataSource.winreg_feature_bits()
def is_set(bit):
mask = 0x80000000 >> bit
retval = mask & feature_bits > 0
return retval
# http://en.wikipedia.org/wiki/CPUID
# http://unix.stackexchange.com/questions/43539/what-do-the-flags-in-proc-cpuinfo-mean
# http://www.lohninger.com/helpcsuite/public_constants_cpuid.htm
flags = {
'fpu' : is_set(0), # Floating Point Unit
'vme' : is_set(1), # V86 Mode Extensions
'de' : is_set(2), # Debug Extensions - I/O breakpoints supported
'pse' : is_set(3), # Page Size Extensions (4 MB pages supported)
'tsc' : is_set(4), # Time Stamp Counter and RDTSC instruction are available
'msr' : is_set(5), # Model Specific Registers
'pae' : is_set(6), # Physical Address Extensions (36 bit address, 2MB pages)
'mce' : is_set(7), # Machine Check Exception supported
'cx8' : is_set(8), # Compare Exchange Eight Byte instruction available
'apic' : is_set(9), # Local APIC present (multiprocessor operation support)
'sepamd' : is_set(10), # Fast system calls (AMD only)
'sep' : is_set(11), # Fast system calls
'mtrr' : is_set(12), # Memory Type Range Registers
'pge' : is_set(13), # Page Global Enable
'mca' : is_set(14), # Machine Check Architecture
'cmov' : is_set(15), # Conditional MOVe instructions
'pat' : is_set(16), # Page Attribute Table
'pse36' : is_set(17), # 36 bit Page Size Extensions
'serial' : is_set(18), # Processor Serial Number
'clflush' : is_set(19), # Cache Flush
#'reserved1' : is_set(20), # reserved
'dts' : is_set(21), # Debug Trace Store
'acpi' : is_set(22), # ACPI support
'mmx' : is_set(23), # MultiMedia Extensions
'fxsr' : is_set(24), # FXSAVE and FXRSTOR instructions
'sse' : is_set(25), # SSE instructions
'sse2' : is_set(26), # SSE2 (WNI) instructions
'ss' : is_set(27), # self snoop
#'reserved2' : is_set(28), # reserved
'tm' : is_set(29), # Automatic clock control
'ia64' : is_set(30), # IA64 instructions
'3dnow' : is_set(31) # 3DNow! instructions available
}
# Get a list of only the flags that are true
flags = [k for k, v in flags.items() if v]
flags.sort()
info = {
'vendor_id_raw' : vendor_id,
'brand_raw' : processor_brand,
'hz_advertised_friendly' : _hz_short_to_friendly(hz_advertised, scale),
'hz_actual_friendly' : _hz_short_to_friendly(hz_actual, 6),
'hz_advertised' : _hz_short_to_full(hz_advertised, scale),
'hz_actual' : _hz_short_to_full(hz_actual, 6),
'flags' : flags
}
info = {k: v for k, v in info.items() if v}
return info
except:
return {}
def _get_cpu_info_from_kstat():
'''
Returns the CPU info gathered from isainfo and kstat.
Returns {} if isainfo or kstat are not found.
'''
try:
# Just return {} if there is no isainfo or kstat
if not DataSource.has_isainfo() or not DataSource.has_kstat():
return {}
# If isainfo fails return {}
returncode, flag_output = DataSource.isainfo_vb()
if flag_output == None or returncode != 0:
return {}
# If kstat fails return {}
returncode, kstat = DataSource.kstat_m_cpu_info()
if kstat == None or returncode != 0:
return {}
# Various fields
vendor_id = kstat.split('\tvendor_id ')[1].split('\n')[0].strip()
processor_brand = kstat.split('\tbrand ')[1].split('\n')[0].strip()
stepping = int(kstat.split('\tstepping ')[1].split('\n')[0].strip())
model = int(kstat.split('\tmodel ')[1].split('\n')[0].strip())
family = int(kstat.split('\tfamily ')[1].split('\n')[0].strip())
# Flags
flags = flag_output.strip().split('\n')[-1].strip().lower().split()
flags.sort()
# Convert from GHz/MHz string to Hz
scale = 6
hz_advertised = kstat.split('\tclock_MHz ')[1].split('\n')[0].strip()
hz_advertised = _to_decimal_string(hz_advertised)
# Convert from GHz/MHz string to Hz
hz_actual = kstat.split('\tcurrent_clock_Hz ')[1].split('\n')[0].strip()
hz_actual = _to_decimal_string(hz_actual)
info = {
'vendor_id_raw' : vendor_id,
'brand_raw' : processor_brand,
'hz_advertised_friendly' : _hz_short_to_friendly(hz_advertised, scale),
'hz_actual_friendly' : _hz_short_to_friendly(hz_actual, 0),
'hz_advertised' : _hz_short_to_full(hz_advertised, scale),
'hz_actual' : _hz_short_to_full(hz_actual, 0),
'stepping' : stepping,
'model' : model,
'family' : family,
'flags' : flags
}
info = {k: v for k, v in info.items() if v}
return info
except:
return {}
def _get_cpu_info_from_platform_uname():
try:
uname = DataSource.uname_string_raw.split(',')[0]
family, model, stepping = (None, None, None)
entries = uname.split(' ')
if 'Family' in entries and entries.index('Family') < len(entries)-1:
i = entries.index('Family')
family = int(entries[i + 1])
if 'Model' in entries and entries.index('Model') < len(entries)-1:
i = entries.index('Model')
model = int(entries[i + 1])
if 'Stepping' in entries and entries.index('Stepping') < len(entries)-1:
i = entries.index('Stepping')
stepping = int(entries[i + 1])
info = {
'family' : family,
'model' : model,
'stepping' : stepping
}
info = {k: v for k, v in info.items() if v}
return info
except:
return {}
def _get_cpu_info_internal():
'''
Returns the CPU info by using the best sources of information for your OS.
Returns {} if nothing is found.
'''
# Get the CPU arch and bits
arch, bits = _parse_arch(DataSource.arch_string_raw)
friendly_maxsize = { 2**31-1: '32 bit', 2**63-1: '64 bit' }.get(sys.maxsize) or 'unknown bits'
friendly_version = "{0}.{1}.{2}.{3}.{4}".format(*sys.version_info)
PYTHON_VERSION = "{0} ({1})".format(friendly_version, friendly_maxsize)
info = {
'python_version' : PYTHON_VERSION,
'cpuinfo_version' : CPUINFO_VERSION,
'cpuinfo_version_string' : CPUINFO_VERSION_STRING,
'arch' : arch,
'bits' : bits,
'count' : DataSource.cpu_count,
'arch_string_raw' : DataSource.arch_string_raw,
}
# Try the Windows wmic
_copy_new_fields(info, _get_cpu_info_from_wmic())
# Try the Windows registry
_copy_new_fields(info, _get_cpu_info_from_registry())
# Try /proc/cpuinfo
_copy_new_fields(info, _get_cpu_info_from_proc_cpuinfo())
# Try cpufreq-info
_copy_new_fields(info, _get_cpu_info_from_cpufreq_info())
# Try LSCPU
_copy_new_fields(info, _get_cpu_info_from_lscpu())
# Try sysctl
_copy_new_fields(info, _get_cpu_info_from_sysctl())
# Try kstat
_copy_new_fields(info, _get_cpu_info_from_kstat())
# Try dmesg
_copy_new_fields(info, _get_cpu_info_from_dmesg())
# Try /var/run/dmesg.boot
_copy_new_fields(info, _get_cpu_info_from_cat_var_run_dmesg_boot())
# Try lsprop ibm,pa-features
_copy_new_fields(info, _get_cpu_info_from_ibm_pa_features())
# Try sysinfo
_copy_new_fields(info, _get_cpu_info_from_sysinfo())
# Try querying the CPU cpuid register
_copy_new_fields(info, _get_cpu_info_from_cpuid())
# Try platform.uname
_copy_new_fields(info, _get_cpu_info_from_platform_uname())
return info
def get_cpu_info_json():
'''
Returns the CPU info by using the best sources of information for your OS.
Returns the result in a json string
'''
import json
output = None
# If running under pyinstaller, run normally
if getattr(sys, 'frozen', False):
info = _get_cpu_info_internal()
output = json.dumps(info)
output = "{0}".format(output)
# if not running under pyinstaller, run in another process.
# This is done because multiprocesing has a design flaw that
# causes non main programs to run multiple times on Windows.
else:
from subprocess import Popen, PIPE
command = [sys.executable, __file__, '--json']
p1 = Popen(command, stdout=PIPE, stderr=PIPE, stdin=PIPE)
output = p1.communicate()[0]
if p1.returncode != 0:
return "{}"
if not IS_PY2:
output = output.decode(encoding='UTF-8')
return output
def get_cpu_info():
'''
Returns the CPU info by using the best sources of information for your OS.
Returns the result in a dict
'''
import json
output = get_cpu_info_json()
# Convert JSON to Python with non unicode strings
output = json.loads(output, object_hook = _utf_to_str)
return output
def main():
from argparse import ArgumentParser
import json
# Parse args
parser = ArgumentParser(description='Gets CPU info with pure Python 2 & 3')
parser.add_argument('--json', action='store_true', help='Return the info in JSON format')
parser.add_argument('--version', action='store_true', help='Return the version of py-cpuinfo')
args = parser.parse_args()
try:
_check_arch()
except Exception as err:
sys.stderr.write(str(err) + "\n")
sys.exit(1)
info = _get_cpu_info_internal()
if not info:
sys.stderr.write("Failed to find cpu info\n")
sys.exit(1)
if args.json:
print(json.dumps(info))
elif args.version:
print(CPUINFO_VERSION_STRING)
else:
print('Python Version: {0}'.format(info.get('python_version', '')))
print('Cpuinfo Version: {0}'.format(info.get('cpuinfo_version_string', '')))
print('Vendor ID Raw: {0}'.format(info.get('vendor_id_raw', '')))
print('Hardware Raw: {0}'.format(info.get('hardware_raw', '')))
print('Brand Raw: {0}'.format(info.get('brand_raw', '')))
print('Hz Advertised Friendly: {0}'.format(info.get('hz_advertised_friendly', '')))
print('Hz Actual Friendly: {0}'.format(info.get('hz_actual_friendly', '')))
print('Hz Advertised: {0}'.format(info.get('hz_advertised', '')))
print('Hz Actual: {0}'.format(info.get('hz_actual', '')))
print('Arch: {0}'.format(info.get('arch', '')))
print('Bits: {0}'.format(info.get('bits', '')))
print('Count: {0}'.format(info.get('count', '')))
print('Arch String Raw: {0}'.format(info.get('arch_string_raw', '')))
print('L1 Data Cache Size: {0}'.format(info.get('l1_data_cache_size', '')))
print('L1 Instruction Cache Size: {0}'.format(info.get('l1_instruction_cache_size', '')))
print('L2 Cache Size: {0}'.format(info.get('l2_cache_size', '')))
print('L2 Cache Line Size: {0}'.format(info.get('l2_cache_line_size', '')))
print('L2 Cache Associativity: {0}'.format(info.get('l2_cache_associativity', '')))
print('L3 Cache Size: {0}'.format(info.get('l3_cache_size', '')))
print('Stepping: {0}'.format(info.get('stepping', '')))
print('Model: {0}'.format(info.get('model', '')))
print('Family: {0}'.format(info.get('family', '')))
print('Processor Type: {0}'.format(info.get('processor_type', '')))
print('Extended Model: {0}'.format(info.get('extended_model', '')))
print('Extended Family: {0}'.format(info.get('extended_family', '')))
print('Flags: {0}'.format(', '.join(info.get('flags', ''))))
if __name__ == '__main__':
main()
else:
_check_arch()
|
workhorsy/py-cpuinfo
|
cpuinfo/cpuinfo.py
|
_get_cpu_info_from_sysctl
|
python
|
def _get_cpu_info_from_sysctl():
'''
Returns the CPU info gathered from sysctl.
Returns {} if sysctl is not found.
'''
try:
# Just return {} if there is no sysctl
if not DataSource.has_sysctl():
return {}
# If sysctl fails return {}
returncode, output = DataSource.sysctl_machdep_cpu_hw_cpufrequency()
if output == None or returncode != 0:
return {}
# Various fields
vendor_id = _get_field(False, output, None, None, 'machdep.cpu.vendor')
processor_brand = _get_field(True, output, None, None, 'machdep.cpu.brand_string')
cache_size = _get_field(False, output, None, None, 'machdep.cpu.cache.size')
stepping = _get_field(False, output, int, 0, 'machdep.cpu.stepping')
model = _get_field(False, output, int, 0, 'machdep.cpu.model')
family = _get_field(False, output, int, 0, 'machdep.cpu.family')
# Flags
flags = _get_field(False, output, None, '', 'machdep.cpu.features').lower().split()
flags.extend(_get_field(False, output, None, '', 'machdep.cpu.leaf7_features').lower().split())
flags.extend(_get_field(False, output, None, '', 'machdep.cpu.extfeatures').lower().split())
flags.sort()
# Convert from GHz/MHz string to Hz
hz_advertised, scale = _parse_cpu_brand_string(processor_brand)
hz_actual = _get_field(False, output, None, None, 'hw.cpufrequency')
hz_actual = _to_decimal_string(hz_actual)
info = {
'vendor_id_raw' : vendor_id,
'brand_raw' : processor_brand,
'hz_advertised_friendly' : _hz_short_to_friendly(hz_advertised, scale),
'hz_actual_friendly' : _hz_short_to_friendly(hz_actual, 0),
'hz_advertised' : _hz_short_to_full(hz_advertised, scale),
'hz_actual' : _hz_short_to_full(hz_actual, 0),
'l2_cache_size' : _to_friendly_bytes(cache_size),
'stepping' : stepping,
'model' : model,
'family' : family,
'flags' : flags
}
info = {k: v for k, v in info.items() if v}
return info
except:
return {}
|
Returns the CPU info gathered from sysctl.
Returns {} if sysctl is not found.
|
train
|
https://github.com/workhorsy/py-cpuinfo/blob/c15afb770c1139bf76215852e17eb4f677ca3d2f/cpuinfo/cpuinfo.py#L1744-L1798
|
[
"def has_sysctl():\n\treturn len(_program_paths('sysctl')) > 0\n"
] |
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
# Copyright (c) 2014-2019, Matthew Brennan Jones <matthew.brennan.jones@gmail.com>
# Py-cpuinfo gets CPU info with pure Python 2 & 3
# It uses the MIT License
# It is hosted at: https://github.com/workhorsy/py-cpuinfo
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
CPUINFO_VERSION = (5, 0, 0)
CPUINFO_VERSION_STRING = '.'.join([str(n) for n in CPUINFO_VERSION])
import os, sys
import platform
import multiprocessing
import ctypes
try:
import _winreg as winreg
except ImportError as err:
try:
import winreg
except ImportError as err:
pass
IS_PY2 = sys.version_info[0] == 2
class DataSource(object):
bits = platform.architecture()[0]
cpu_count = multiprocessing.cpu_count()
is_windows = platform.system().lower() == 'windows'
arch_string_raw = platform.machine()
uname_string_raw = platform.uname()[5]
can_cpuid = True
@staticmethod
def has_proc_cpuinfo():
return os.path.exists('/proc/cpuinfo')
@staticmethod
def has_dmesg():
return len(_program_paths('dmesg')) > 0
@staticmethod
def has_var_run_dmesg_boot():
uname = platform.system().strip().strip('"').strip("'").strip().lower()
return 'linux' in uname and os.path.exists('/var/run/dmesg.boot')
@staticmethod
def has_cpufreq_info():
return len(_program_paths('cpufreq-info')) > 0
@staticmethod
def has_sestatus():
return len(_program_paths('sestatus')) > 0
@staticmethod
def has_sysctl():
return len(_program_paths('sysctl')) > 0
@staticmethod
def has_isainfo():
return len(_program_paths('isainfo')) > 0
@staticmethod
def has_kstat():
return len(_program_paths('kstat')) > 0
@staticmethod
def has_sysinfo():
return len(_program_paths('sysinfo')) > 0
@staticmethod
def has_lscpu():
return len(_program_paths('lscpu')) > 0
@staticmethod
def has_ibm_pa_features():
return len(_program_paths('lsprop')) > 0
@staticmethod
def has_wmic():
returncode, output = _run_and_get_stdout(['wmic', 'os', 'get', 'Version'])
return returncode == 0 and len(output) > 0
@staticmethod
def cat_proc_cpuinfo():
return _run_and_get_stdout(['cat', '/proc/cpuinfo'])
@staticmethod
def cpufreq_info():
return _run_and_get_stdout(['cpufreq-info'])
@staticmethod
def sestatus_b():
return _run_and_get_stdout(['sestatus', '-b'])
@staticmethod
def dmesg_a():
return _run_and_get_stdout(['dmesg', '-a'])
@staticmethod
def cat_var_run_dmesg_boot():
return _run_and_get_stdout(['cat', '/var/run/dmesg.boot'])
@staticmethod
def sysctl_machdep_cpu_hw_cpufrequency():
return _run_and_get_stdout(['sysctl', 'machdep.cpu', 'hw.cpufrequency'])
@staticmethod
def isainfo_vb():
return _run_and_get_stdout(['isainfo', '-vb'])
@staticmethod
def kstat_m_cpu_info():
return _run_and_get_stdout(['kstat', '-m', 'cpu_info'])
@staticmethod
def sysinfo_cpu():
return _run_and_get_stdout(['sysinfo', '-cpu'])
@staticmethod
def lscpu():
return _run_and_get_stdout(['lscpu'])
@staticmethod
def ibm_pa_features():
import glob
ibm_features = glob.glob('/proc/device-tree/cpus/*/ibm,pa-features')
if ibm_features:
return _run_and_get_stdout(['lsprop', ibm_features[0]])
@staticmethod
def wmic_cpu():
return _run_and_get_stdout(['wmic', 'cpu', 'get', 'Name,CurrentClockSpeed,L2CacheSize,L3CacheSize,Description,Caption,Manufacturer', '/format:list'])
@staticmethod
def winreg_processor_brand():
key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, r"Hardware\Description\System\CentralProcessor\0")
processor_brand = winreg.QueryValueEx(key, "ProcessorNameString")[0]
winreg.CloseKey(key)
return processor_brand.strip()
@staticmethod
def winreg_vendor_id_raw():
key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, r"Hardware\Description\System\CentralProcessor\0")
vendor_id_raw = winreg.QueryValueEx(key, "VendorIdentifier")[0]
winreg.CloseKey(key)
return vendor_id_raw
@staticmethod
def winreg_arch_string_raw():
key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, r"SYSTEM\CurrentControlSet\Control\Session Manager\Environment")
arch_string_raw = winreg.QueryValueEx(key, "PROCESSOR_ARCHITECTURE")[0]
winreg.CloseKey(key)
return arch_string_raw
@staticmethod
def winreg_hz_actual():
key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, r"Hardware\Description\System\CentralProcessor\0")
hz_actual = winreg.QueryValueEx(key, "~Mhz")[0]
winreg.CloseKey(key)
hz_actual = _to_decimal_string(hz_actual)
return hz_actual
@staticmethod
def winreg_feature_bits():
key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, r"Hardware\Description\System\CentralProcessor\0")
feature_bits = winreg.QueryValueEx(key, "FeatureSet")[0]
winreg.CloseKey(key)
return feature_bits
def _program_paths(program_name):
paths = []
exts = filter(None, os.environ.get('PATHEXT', '').split(os.pathsep))
path = os.environ['PATH']
for p in os.environ['PATH'].split(os.pathsep):
p = os.path.join(p, program_name)
if os.access(p, os.X_OK):
paths.append(p)
for e in exts:
pext = p + e
if os.access(pext, os.X_OK):
paths.append(pext)
return paths
def _run_and_get_stdout(command, pipe_command=None):
from subprocess import Popen, PIPE
if not pipe_command:
p1 = Popen(command, stdout=PIPE, stderr=PIPE, stdin=PIPE)
output = p1.communicate()[0]
if not IS_PY2:
output = output.decode(encoding='UTF-8')
return p1.returncode, output
else:
p1 = Popen(command, stdout=PIPE, stderr=PIPE, stdin=PIPE)
p2 = Popen(pipe_command, stdin=p1.stdout, stdout=PIPE, stderr=PIPE)
p1.stdout.close()
output = p2.communicate()[0]
if not IS_PY2:
output = output.decode(encoding='UTF-8')
return p2.returncode, output
# Make sure we are running on a supported system
def _check_arch():
arch, bits = _parse_arch(DataSource.arch_string_raw)
if not arch in ['X86_32', 'X86_64', 'ARM_7', 'ARM_8', 'PPC_64']:
raise Exception("py-cpuinfo currently only works on X86 and some PPC and ARM CPUs.")
def _obj_to_b64(thing):
import pickle
import base64
a = thing
b = pickle.dumps(a)
c = base64.b64encode(b)
d = c.decode('utf8')
return d
def _b64_to_obj(thing):
import pickle
import base64
try:
a = base64.b64decode(thing)
b = pickle.loads(a)
return b
except:
return {}
def _utf_to_str(input):
if IS_PY2 and isinstance(input, unicode):
return input.encode('utf-8')
elif isinstance(input, list):
return [_utf_to_str(element) for element in input]
elif isinstance(input, dict):
return {_utf_to_str(key): _utf_to_str(value)
for key, value in input.items()}
else:
return input
def _copy_new_fields(info, new_info):
keys = [
'vendor_id_raw', 'hardware_raw', 'brand_raw', 'hz_advertised_friendly', 'hz_actual_friendly',
'hz_advertised', 'hz_actual', 'arch', 'bits', 'count',
'arch_string_raw', 'uname_string_raw',
'l2_cache_size', 'l2_cache_line_size', 'l2_cache_associativity',
'stepping', 'model', 'family',
'processor_type', 'extended_model', 'extended_family', 'flags',
'l3_cache_size', 'l1_data_cache_size', 'l1_instruction_cache_size'
]
for key in keys:
if new_info.get(key, None) and not info.get(key, None):
info[key] = new_info[key]
elif key == 'flags' and new_info.get('flags'):
for f in new_info['flags']:
if f not in info['flags']: info['flags'].append(f)
info['flags'].sort()
def _get_field_actual(cant_be_number, raw_string, field_names):
for line in raw_string.splitlines():
for field_name in field_names:
field_name = field_name.lower()
if ':' in line:
left, right = line.split(':', 1)
left = left.strip().lower()
right = right.strip()
if left == field_name and len(right) > 0:
if cant_be_number:
if not right.isdigit():
return right
else:
return right
return None
def _get_field(cant_be_number, raw_string, convert_to, default_value, *field_names):
retval = _get_field_actual(cant_be_number, raw_string, field_names)
# Convert the return value
if retval and convert_to:
try:
retval = convert_to(retval)
except:
retval = default_value
# Return the default if there is no return value
if retval is None:
retval = default_value
return retval
def _to_decimal_string(ticks):
try:
# Convert to string
ticks = '{0}'.format(ticks)
# Strip off non numbers and decimal places
ticks = "".join(n for n in ticks if n.isdigit() or n=='.').strip()
if ticks == '':
ticks = '0'
# Add decimal if missing
if '.' not in ticks:
ticks = '{0}.0'.format(ticks)
# Remove trailing zeros
ticks = ticks.rstrip('0')
# Add one trailing zero for empty right side
if ticks.endswith('.'):
ticks = '{0}0'.format(ticks)
# Make sure the number can be converted to a float
ticks = float(ticks)
ticks = '{0}'.format(ticks)
return ticks
except:
return '0.0'
def _hz_short_to_full(ticks, scale):
try:
# Make sure the number can be converted to a float
ticks = float(ticks)
ticks = '{0}'.format(ticks)
# Scale the numbers
hz = ticks.lstrip('0')
old_index = hz.index('.')
hz = hz.replace('.', '')
hz = hz.ljust(scale + old_index+1, '0')
new_index = old_index + scale
hz = '{0}.{1}'.format(hz[:new_index], hz[new_index:])
left, right = hz.split('.')
left, right = int(left), int(right)
return (left, right)
except:
return (0, 0)
def _hz_friendly_to_full(hz_string):
try:
hz_string = hz_string.strip().lower()
hz, scale = (None, None)
if hz_string.endswith('ghz'):
scale = 9
elif hz_string.endswith('mhz'):
scale = 6
elif hz_string.endswith('hz'):
scale = 0
hz = "".join(n for n in hz_string if n.isdigit() or n=='.').strip()
if not '.' in hz:
hz += '.0'
hz, scale = _hz_short_to_full(hz, scale)
return (hz, scale)
except:
return (0, 0)
def _hz_short_to_friendly(ticks, scale):
try:
# Get the raw Hz as a string
left, right = _hz_short_to_full(ticks, scale)
result = '{0}.{1}'.format(left, right)
# Get the location of the dot, and remove said dot
dot_index = result.index('.')
result = result.replace('.', '')
# Get the Hz symbol and scale
symbol = "Hz"
scale = 0
if dot_index > 9:
symbol = "GHz"
scale = 9
elif dot_index > 6:
symbol = "MHz"
scale = 6
elif dot_index > 3:
symbol = "KHz"
scale = 3
# Get the Hz with the dot at the new scaled point
result = '{0}.{1}'.format(result[:-scale-1], result[-scale-1:])
# Format the ticks to have 4 numbers after the decimal
# and remove any superfluous zeroes.
result = '{0:.4f} {1}'.format(float(result), symbol)
result = result.rstrip('0')
return result
except:
return '0.0000 Hz'
def _to_friendly_bytes(input):
import re
if not input:
return input
input = "{0}".format(input)
formats = {
r"^[0-9]+B$" : 'B',
r"^[0-9]+K$" : 'KB',
r"^[0-9]+M$" : 'MB',
r"^[0-9]+G$" : 'GB'
}
for pattern, friendly_size in formats.items():
if re.match(pattern, input):
return "{0} {1}".format(input[ : -1].strip(), friendly_size)
return input
def _parse_cpu_brand_string(cpu_string):
# Just return 0 if the processor brand does not have the Hz
if not 'hz' in cpu_string.lower():
return ('0.0', 0)
hz = cpu_string.lower()
scale = 0
if hz.endswith('mhz'):
scale = 6
elif hz.endswith('ghz'):
scale = 9
if '@' in hz:
hz = hz.split('@')[1]
else:
hz = hz.rsplit(None, 1)[1]
hz = hz.rstrip('mhz').rstrip('ghz').strip()
hz = _to_decimal_string(hz)
return (hz, scale)
def _parse_cpu_brand_string_dx(cpu_string):
import re
# Find all the strings inside brackets ()
starts = [m.start() for m in re.finditer('\(', cpu_string)]
ends = [m.start() for m in re.finditer('\)', cpu_string)]
insides = {k: v for k, v in zip(starts, ends)}
insides = [cpu_string[start+1 : end] for start, end in insides.items()]
# Find all the fields
vendor_id, stepping, model, family = (None, None, None, None)
for inside in insides:
for pair in inside.split(','):
pair = [n.strip() for n in pair.split(':')]
if len(pair) > 1:
name, value = pair[0], pair[1]
if name == 'origin':
vendor_id = value.strip('"')
elif name == 'stepping':
stepping = int(value.lstrip('0x'), 16)
elif name == 'model':
model = int(value.lstrip('0x'), 16)
elif name in ['fam', 'family']:
family = int(value.lstrip('0x'), 16)
# Find the Processor Brand
# Strip off extra strings in brackets at end
brand = cpu_string.strip()
is_working = True
while is_working:
is_working = False
for inside in insides:
full = "({0})".format(inside)
if brand.endswith(full):
brand = brand[ :-len(full)].strip()
is_working = True
# Find the Hz in the brand string
hz_brand, scale = _parse_cpu_brand_string(brand)
# Find Hz inside brackets () after the brand string
if hz_brand == '0.0':
for inside in insides:
hz = inside
for entry in ['GHz', 'MHz', 'Hz']:
if entry in hz:
hz = "CPU @ " + hz[ : hz.find(entry) + len(entry)]
hz_brand, scale = _parse_cpu_brand_string(hz)
break
return (hz_brand, scale, brand, vendor_id, stepping, model, family)
def _parse_dmesg_output(output):
try:
# Get all the dmesg lines that might contain a CPU string
lines = output.split(' CPU0:')[1:] + \
output.split(' CPU1:')[1:] + \
output.split(' CPU:')[1:] + \
output.split('\nCPU0:')[1:] + \
output.split('\nCPU1:')[1:] + \
output.split('\nCPU:')[1:]
lines = [l.split('\n')[0].strip() for l in lines]
# Convert the lines to CPU strings
cpu_strings = [_parse_cpu_brand_string_dx(l) for l in lines]
# Find the CPU string that has the most fields
best_string = None
highest_count = 0
for cpu_string in cpu_strings:
count = sum([n is not None for n in cpu_string])
if count > highest_count:
highest_count = count
best_string = cpu_string
# If no CPU string was found, return {}
if not best_string:
return {}
hz_actual, scale, processor_brand, vendor_id, stepping, model, family = best_string
# Origin
if ' Origin=' in output:
fields = output[output.find(' Origin=') : ].split('\n')[0]
fields = fields.strip().split()
fields = [n.strip().split('=') for n in fields]
fields = [{n[0].strip().lower() : n[1].strip()} for n in fields]
for field in fields:
name = list(field.keys())[0]
value = list(field.values())[0]
if name == 'origin':
vendor_id = value.strip('"')
elif name == 'stepping':
stepping = int(value.lstrip('0x'), 16)
elif name == 'model':
model = int(value.lstrip('0x'), 16)
elif name in ['fam', 'family']:
family = int(value.lstrip('0x'), 16)
# Features
flag_lines = []
for category in [' Features=', ' Features2=', ' AMD Features=', ' AMD Features2=']:
if category in output:
flag_lines.append(output.split(category)[1].split('\n')[0])
flags = []
for line in flag_lines:
line = line.split('<')[1].split('>')[0].lower()
for flag in line.split(','):
flags.append(flag)
flags.sort()
# Convert from GHz/MHz string to Hz
hz_advertised, scale = _parse_cpu_brand_string(processor_brand)
# If advertised hz not found, use the actual hz
if hz_advertised == '0.0':
scale = 6
hz_advertised = _to_decimal_string(hz_actual)
info = {
'vendor_id_raw' : vendor_id,
'brand_raw' : processor_brand,
'stepping' : stepping,
'model' : model,
'family' : family,
'flags' : flags
}
if hz_advertised and hz_advertised != '0.0':
info['hz_advertised_friendly'] = _hz_short_to_friendly(hz_advertised, scale)
info['hz_actual_friendly'] = _hz_short_to_friendly(hz_actual, scale)
if hz_advertised and hz_advertised != '0.0':
info['hz_advertised'] = _hz_short_to_full(hz_advertised, scale)
info['hz_actual'] = _hz_short_to_full(hz_actual, scale)
return {k: v for k, v in info.items() if v}
except:
#raise
pass
return {}
def _parse_arch(arch_string_raw):
import re
arch, bits = None, None
arch_string_raw = arch_string_raw.lower()
# X86
if re.match('^i\d86$|^x86$|^x86_32$|^i86pc$|^ia32$|^ia-32$|^bepc$', arch_string_raw):
arch = 'X86_32'
bits = 32
elif re.match('^x64$|^x86_64$|^x86_64t$|^i686-64$|^amd64$|^ia64$|^ia-64$', arch_string_raw):
arch = 'X86_64'
bits = 64
# ARM
elif re.match('^armv8-a|aarch64$', arch_string_raw):
arch = 'ARM_8'
bits = 64
elif re.match('^armv7$|^armv7[a-z]$|^armv7-[a-z]$|^armv6[a-z]$', arch_string_raw):
arch = 'ARM_7'
bits = 32
elif re.match('^armv8$|^armv8[a-z]$|^armv8-[a-z]$', arch_string_raw):
arch = 'ARM_8'
bits = 32
# PPC
elif re.match('^ppc32$|^prep$|^pmac$|^powermac$', arch_string_raw):
arch = 'PPC_32'
bits = 32
elif re.match('^powerpc$|^ppc64$|^ppc64le$', arch_string_raw):
arch = 'PPC_64'
bits = 64
# SPARC
elif re.match('^sparc32$|^sparc$', arch_string_raw):
arch = 'SPARC_32'
bits = 32
elif re.match('^sparc64$|^sun4u$|^sun4v$', arch_string_raw):
arch = 'SPARC_64'
bits = 64
return (arch, bits)
def _is_bit_set(reg, bit):
mask = 1 << bit
is_set = reg & mask > 0
return is_set
def _is_selinux_enforcing():
# Just return if the SE Linux Status Tool is not installed
if not DataSource.has_sestatus():
return False
# Run the sestatus, and just return if it failed to run
returncode, output = DataSource.sestatus_b()
if returncode != 0:
return False
# Figure out if explicitly in enforcing mode
for line in output.splitlines():
line = line.strip().lower()
if line.startswith("current mode:"):
if line.endswith("enforcing"):
return True
else:
return False
# Figure out if we can execute heap and execute memory
can_selinux_exec_heap = False
can_selinux_exec_memory = False
for line in output.splitlines():
line = line.strip().lower()
if line.startswith("allow_execheap") and line.endswith("on"):
can_selinux_exec_heap = True
elif line.startswith("allow_execmem") and line.endswith("on"):
can_selinux_exec_memory = True
return (not can_selinux_exec_heap or not can_selinux_exec_memory)
class CPUID(object):
def __init__(self):
self.prochandle = None
# Figure out if SE Linux is on and in enforcing mode
self.is_selinux_enforcing = _is_selinux_enforcing()
def _asm_func(self, restype=None, argtypes=(), byte_code=[]):
byte_code = bytes.join(b'', byte_code)
address = None
if DataSource.is_windows:
# Allocate a memory segment the size of the byte code, and make it executable
size = len(byte_code)
# Alloc at least 1 page to ensure we own all pages that we want to change protection on
if size < 0x1000: size = 0x1000
MEM_COMMIT = ctypes.c_ulong(0x1000)
PAGE_READWRITE = ctypes.c_ulong(0x4)
pfnVirtualAlloc = ctypes.windll.kernel32.VirtualAlloc
pfnVirtualAlloc.restype = ctypes.c_void_p
address = pfnVirtualAlloc(None, ctypes.c_size_t(size), MEM_COMMIT, PAGE_READWRITE)
if not address:
raise Exception("Failed to VirtualAlloc")
# Copy the byte code into the memory segment
memmove = ctypes.CFUNCTYPE(ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_size_t)(ctypes._memmove_addr)
if memmove(address, byte_code, size) < 0:
raise Exception("Failed to memmove")
# Enable execute permissions
PAGE_EXECUTE = ctypes.c_ulong(0x10)
old_protect = ctypes.c_ulong(0)
pfnVirtualProtect = ctypes.windll.kernel32.VirtualProtect
res = pfnVirtualProtect(ctypes.c_void_p(address), ctypes.c_size_t(size), PAGE_EXECUTE, ctypes.byref(old_protect))
if not res:
raise Exception("Failed VirtualProtect")
# Flush Instruction Cache
# First, get process Handle
if not self.prochandle:
pfnGetCurrentProcess = ctypes.windll.kernel32.GetCurrentProcess
pfnGetCurrentProcess.restype = ctypes.c_void_p
self.prochandle = ctypes.c_void_p(pfnGetCurrentProcess())
# Actually flush cache
res = ctypes.windll.kernel32.FlushInstructionCache(self.prochandle, ctypes.c_void_p(address), ctypes.c_size_t(size))
if not res:
raise Exception("Failed FlushInstructionCache")
else:
# Allocate a memory segment the size of the byte code
size = len(byte_code)
pfnvalloc = ctypes.pythonapi.valloc
pfnvalloc.restype = ctypes.c_void_p
address = pfnvalloc(ctypes.c_size_t(size))
if not address:
raise Exception("Failed to valloc")
# Mark the memory segment as writeable only
if not self.is_selinux_enforcing:
WRITE = 0x2
if ctypes.pythonapi.mprotect(ctypes.c_void_p(address), size, WRITE) < 0:
raise Exception("Failed to mprotect")
# Copy the byte code into the memory segment
if ctypes.pythonapi.memmove(ctypes.c_void_p(address), byte_code, ctypes.c_size_t(size)) < 0:
raise Exception("Failed to memmove")
# Mark the memory segment as writeable and executable only
if not self.is_selinux_enforcing:
WRITE_EXECUTE = 0x2 | 0x4
if ctypes.pythonapi.mprotect(ctypes.c_void_p(address), size, WRITE_EXECUTE) < 0:
raise Exception("Failed to mprotect")
# Cast the memory segment into a function
functype = ctypes.CFUNCTYPE(restype, *argtypes)
fun = functype(address)
return fun, address
def _run_asm(self, *byte_code):
# Convert the byte code into a function that returns an int
restype = ctypes.c_uint32
argtypes = ()
func, address = self._asm_func(restype, argtypes, byte_code)
# Call the byte code like a function
retval = func()
byte_code = bytes.join(b'', byte_code)
size = ctypes.c_size_t(len(byte_code))
# Free the function memory segment
if DataSource.is_windows:
MEM_RELEASE = ctypes.c_ulong(0x8000)
ctypes.windll.kernel32.VirtualFree(ctypes.c_void_p(address), ctypes.c_size_t(0), MEM_RELEASE)
else:
# Remove the executable tag on the memory
READ_WRITE = 0x1 | 0x2
if ctypes.pythonapi.mprotect(ctypes.c_void_p(address), size, READ_WRITE) < 0:
raise Exception("Failed to mprotect")
ctypes.pythonapi.free(ctypes.c_void_p(address))
return retval
# FIXME: We should not have to use different instructions to
# set eax to 0 or 1, on 32bit and 64bit machines.
def _zero_eax(self):
return (
b"\x31\xC0" # xor eax,eax
)
def _zero_ecx(self):
return (
b"\x31\xC9" # xor ecx,ecx
)
def _one_eax(self):
return (
b"\xB8\x01\x00\x00\x00" # mov eax,0x1"
)
# http://en.wikipedia.org/wiki/CPUID#EAX.3D0:_Get_vendor_ID
def get_vendor_id(self):
# EBX
ebx = self._run_asm(
self._zero_eax(),
b"\x0F\xA2" # cpuid
b"\x89\xD8" # mov ax,bx
b"\xC3" # ret
)
# ECX
ecx = self._run_asm(
self._zero_eax(),
b"\x0f\xa2" # cpuid
b"\x89\xC8" # mov ax,cx
b"\xC3" # ret
)
# EDX
edx = self._run_asm(
self._zero_eax(),
b"\x0f\xa2" # cpuid
b"\x89\xD0" # mov ax,dx
b"\xC3" # ret
)
# Each 4bits is a ascii letter in the name
vendor_id = []
for reg in [ebx, edx, ecx]:
for n in [0, 8, 16, 24]:
vendor_id.append(chr((reg >> n) & 0xFF))
vendor_id = ''.join(vendor_id)
return vendor_id
# http://en.wikipedia.org/wiki/CPUID#EAX.3D1:_Processor_Info_and_Feature_Bits
def get_info(self):
# EAX
eax = self._run_asm(
self._one_eax(),
b"\x0f\xa2" # cpuid
b"\xC3" # ret
)
# Get the CPU info
stepping = (eax >> 0) & 0xF # 4 bits
model = (eax >> 4) & 0xF # 4 bits
family = (eax >> 8) & 0xF # 4 bits
processor_type = (eax >> 12) & 0x3 # 2 bits
extended_model = (eax >> 16) & 0xF # 4 bits
extended_family = (eax >> 20) & 0xFF # 8 bits
return {
'stepping' : stepping,
'model' : model,
'family' : family,
'processor_type' : processor_type,
'extended_model' : extended_model,
'extended_family' : extended_family
}
# http://en.wikipedia.org/wiki/CPUID#EAX.3D80000000h:_Get_Highest_Extended_Function_Supported
def get_max_extension_support(self):
# Check for extension support
max_extension_support = self._run_asm(
b"\xB8\x00\x00\x00\x80" # mov ax,0x80000000
b"\x0f\xa2" # cpuid
b"\xC3" # ret
)
return max_extension_support
# http://en.wikipedia.org/wiki/CPUID#EAX.3D1:_Processor_Info_and_Feature_Bits
def get_flags(self, max_extension_support):
# EDX
edx = self._run_asm(
self._one_eax(),
b"\x0f\xa2" # cpuid
b"\x89\xD0" # mov ax,dx
b"\xC3" # ret
)
# ECX
ecx = self._run_asm(
self._one_eax(),
b"\x0f\xa2" # cpuid
b"\x89\xC8" # mov ax,cx
b"\xC3" # ret
)
# Get the CPU flags
flags = {
'fpu' : _is_bit_set(edx, 0),
'vme' : _is_bit_set(edx, 1),
'de' : _is_bit_set(edx, 2),
'pse' : _is_bit_set(edx, 3),
'tsc' : _is_bit_set(edx, 4),
'msr' : _is_bit_set(edx, 5),
'pae' : _is_bit_set(edx, 6),
'mce' : _is_bit_set(edx, 7),
'cx8' : _is_bit_set(edx, 8),
'apic' : _is_bit_set(edx, 9),
#'reserved1' : _is_bit_set(edx, 10),
'sep' : _is_bit_set(edx, 11),
'mtrr' : _is_bit_set(edx, 12),
'pge' : _is_bit_set(edx, 13),
'mca' : _is_bit_set(edx, 14),
'cmov' : _is_bit_set(edx, 15),
'pat' : _is_bit_set(edx, 16),
'pse36' : _is_bit_set(edx, 17),
'pn' : _is_bit_set(edx, 18),
'clflush' : _is_bit_set(edx, 19),
#'reserved2' : _is_bit_set(edx, 20),
'dts' : _is_bit_set(edx, 21),
'acpi' : _is_bit_set(edx, 22),
'mmx' : _is_bit_set(edx, 23),
'fxsr' : _is_bit_set(edx, 24),
'sse' : _is_bit_set(edx, 25),
'sse2' : _is_bit_set(edx, 26),
'ss' : _is_bit_set(edx, 27),
'ht' : _is_bit_set(edx, 28),
'tm' : _is_bit_set(edx, 29),
'ia64' : _is_bit_set(edx, 30),
'pbe' : _is_bit_set(edx, 31),
'pni' : _is_bit_set(ecx, 0),
'pclmulqdq' : _is_bit_set(ecx, 1),
'dtes64' : _is_bit_set(ecx, 2),
'monitor' : _is_bit_set(ecx, 3),
'ds_cpl' : _is_bit_set(ecx, 4),
'vmx' : _is_bit_set(ecx, 5),
'smx' : _is_bit_set(ecx, 6),
'est' : _is_bit_set(ecx, 7),
'tm2' : _is_bit_set(ecx, 8),
'ssse3' : _is_bit_set(ecx, 9),
'cid' : _is_bit_set(ecx, 10),
#'reserved3' : _is_bit_set(ecx, 11),
'fma' : _is_bit_set(ecx, 12),
'cx16' : _is_bit_set(ecx, 13),
'xtpr' : _is_bit_set(ecx, 14),
'pdcm' : _is_bit_set(ecx, 15),
#'reserved4' : _is_bit_set(ecx, 16),
'pcid' : _is_bit_set(ecx, 17),
'dca' : _is_bit_set(ecx, 18),
'sse4_1' : _is_bit_set(ecx, 19),
'sse4_2' : _is_bit_set(ecx, 20),
'x2apic' : _is_bit_set(ecx, 21),
'movbe' : _is_bit_set(ecx, 22),
'popcnt' : _is_bit_set(ecx, 23),
'tscdeadline' : _is_bit_set(ecx, 24),
'aes' : _is_bit_set(ecx, 25),
'xsave' : _is_bit_set(ecx, 26),
'osxsave' : _is_bit_set(ecx, 27),
'avx' : _is_bit_set(ecx, 28),
'f16c' : _is_bit_set(ecx, 29),
'rdrnd' : _is_bit_set(ecx, 30),
'hypervisor' : _is_bit_set(ecx, 31)
}
# Get a list of only the flags that are true
flags = [k for k, v in flags.items() if v]
# http://en.wikipedia.org/wiki/CPUID#EAX.3D7.2C_ECX.3D0:_Extended_Features
if max_extension_support >= 7:
# EBX
ebx = self._run_asm(
self._zero_ecx(),
b"\xB8\x07\x00\x00\x00" # mov eax,7
b"\x0f\xa2" # cpuid
b"\x89\xD8" # mov ax,bx
b"\xC3" # ret
)
# ECX
ecx = self._run_asm(
self._zero_ecx(),
b"\xB8\x07\x00\x00\x00" # mov eax,7
b"\x0f\xa2" # cpuid
b"\x89\xC8" # mov ax,cx
b"\xC3" # ret
)
# Get the extended CPU flags
extended_flags = {
#'fsgsbase' : _is_bit_set(ebx, 0),
#'IA32_TSC_ADJUST' : _is_bit_set(ebx, 1),
'sgx' : _is_bit_set(ebx, 2),
'bmi1' : _is_bit_set(ebx, 3),
'hle' : _is_bit_set(ebx, 4),
'avx2' : _is_bit_set(ebx, 5),
#'reserved' : _is_bit_set(ebx, 6),
'smep' : _is_bit_set(ebx, 7),
'bmi2' : _is_bit_set(ebx, 8),
'erms' : _is_bit_set(ebx, 9),
'invpcid' : _is_bit_set(ebx, 10),
'rtm' : _is_bit_set(ebx, 11),
'pqm' : _is_bit_set(ebx, 12),
#'FPU CS and FPU DS deprecated' : _is_bit_set(ebx, 13),
'mpx' : _is_bit_set(ebx, 14),
'pqe' : _is_bit_set(ebx, 15),
'avx512f' : _is_bit_set(ebx, 16),
'avx512dq' : _is_bit_set(ebx, 17),
'rdseed' : _is_bit_set(ebx, 18),
'adx' : _is_bit_set(ebx, 19),
'smap' : _is_bit_set(ebx, 20),
'avx512ifma' : _is_bit_set(ebx, 21),
'pcommit' : _is_bit_set(ebx, 22),
'clflushopt' : _is_bit_set(ebx, 23),
'clwb' : _is_bit_set(ebx, 24),
'intel_pt' : _is_bit_set(ebx, 25),
'avx512pf' : _is_bit_set(ebx, 26),
'avx512er' : _is_bit_set(ebx, 27),
'avx512cd' : _is_bit_set(ebx, 28),
'sha' : _is_bit_set(ebx, 29),
'avx512bw' : _is_bit_set(ebx, 30),
'avx512vl' : _is_bit_set(ebx, 31),
'prefetchwt1' : _is_bit_set(ecx, 0),
'avx512vbmi' : _is_bit_set(ecx, 1),
'umip' : _is_bit_set(ecx, 2),
'pku' : _is_bit_set(ecx, 3),
'ospke' : _is_bit_set(ecx, 4),
#'reserved' : _is_bit_set(ecx, 5),
'avx512vbmi2' : _is_bit_set(ecx, 6),
#'reserved' : _is_bit_set(ecx, 7),
'gfni' : _is_bit_set(ecx, 8),
'vaes' : _is_bit_set(ecx, 9),
'vpclmulqdq' : _is_bit_set(ecx, 10),
'avx512vnni' : _is_bit_set(ecx, 11),
'avx512bitalg' : _is_bit_set(ecx, 12),
#'reserved' : _is_bit_set(ecx, 13),
'avx512vpopcntdq' : _is_bit_set(ecx, 14),
#'reserved' : _is_bit_set(ecx, 15),
#'reserved' : _is_bit_set(ecx, 16),
#'mpx0' : _is_bit_set(ecx, 17),
#'mpx1' : _is_bit_set(ecx, 18),
#'mpx2' : _is_bit_set(ecx, 19),
#'mpx3' : _is_bit_set(ecx, 20),
#'mpx4' : _is_bit_set(ecx, 21),
'rdpid' : _is_bit_set(ecx, 22),
#'reserved' : _is_bit_set(ecx, 23),
#'reserved' : _is_bit_set(ecx, 24),
#'reserved' : _is_bit_set(ecx, 25),
#'reserved' : _is_bit_set(ecx, 26),
#'reserved' : _is_bit_set(ecx, 27),
#'reserved' : _is_bit_set(ecx, 28),
#'reserved' : _is_bit_set(ecx, 29),
'sgx_lc' : _is_bit_set(ecx, 30),
#'reserved' : _is_bit_set(ecx, 31)
}
# Get a list of only the flags that are true
extended_flags = [k for k, v in extended_flags.items() if v]
flags += extended_flags
# http://en.wikipedia.org/wiki/CPUID#EAX.3D80000001h:_Extended_Processor_Info_and_Feature_Bits
if max_extension_support >= 0x80000001:
# EBX
ebx = self._run_asm(
b"\xB8\x01\x00\x00\x80" # mov ax,0x80000001
b"\x0f\xa2" # cpuid
b"\x89\xD8" # mov ax,bx
b"\xC3" # ret
)
# ECX
ecx = self._run_asm(
b"\xB8\x01\x00\x00\x80" # mov ax,0x80000001
b"\x0f\xa2" # cpuid
b"\x89\xC8" # mov ax,cx
b"\xC3" # ret
)
# Get the extended CPU flags
extended_flags = {
'fpu' : _is_bit_set(ebx, 0),
'vme' : _is_bit_set(ebx, 1),
'de' : _is_bit_set(ebx, 2),
'pse' : _is_bit_set(ebx, 3),
'tsc' : _is_bit_set(ebx, 4),
'msr' : _is_bit_set(ebx, 5),
'pae' : _is_bit_set(ebx, 6),
'mce' : _is_bit_set(ebx, 7),
'cx8' : _is_bit_set(ebx, 8),
'apic' : _is_bit_set(ebx, 9),
#'reserved' : _is_bit_set(ebx, 10),
'syscall' : _is_bit_set(ebx, 11),
'mtrr' : _is_bit_set(ebx, 12),
'pge' : _is_bit_set(ebx, 13),
'mca' : _is_bit_set(ebx, 14),
'cmov' : _is_bit_set(ebx, 15),
'pat' : _is_bit_set(ebx, 16),
'pse36' : _is_bit_set(ebx, 17),
#'reserved' : _is_bit_set(ebx, 18),
'mp' : _is_bit_set(ebx, 19),
'nx' : _is_bit_set(ebx, 20),
#'reserved' : _is_bit_set(ebx, 21),
'mmxext' : _is_bit_set(ebx, 22),
'mmx' : _is_bit_set(ebx, 23),
'fxsr' : _is_bit_set(ebx, 24),
'fxsr_opt' : _is_bit_set(ebx, 25),
'pdpe1gp' : _is_bit_set(ebx, 26),
'rdtscp' : _is_bit_set(ebx, 27),
#'reserved' : _is_bit_set(ebx, 28),
'lm' : _is_bit_set(ebx, 29),
'3dnowext' : _is_bit_set(ebx, 30),
'3dnow' : _is_bit_set(ebx, 31),
'lahf_lm' : _is_bit_set(ecx, 0),
'cmp_legacy' : _is_bit_set(ecx, 1),
'svm' : _is_bit_set(ecx, 2),
'extapic' : _is_bit_set(ecx, 3),
'cr8_legacy' : _is_bit_set(ecx, 4),
'abm' : _is_bit_set(ecx, 5),
'sse4a' : _is_bit_set(ecx, 6),
'misalignsse' : _is_bit_set(ecx, 7),
'3dnowprefetch' : _is_bit_set(ecx, 8),
'osvw' : _is_bit_set(ecx, 9),
'ibs' : _is_bit_set(ecx, 10),
'xop' : _is_bit_set(ecx, 11),
'skinit' : _is_bit_set(ecx, 12),
'wdt' : _is_bit_set(ecx, 13),
#'reserved' : _is_bit_set(ecx, 14),
'lwp' : _is_bit_set(ecx, 15),
'fma4' : _is_bit_set(ecx, 16),
'tce' : _is_bit_set(ecx, 17),
#'reserved' : _is_bit_set(ecx, 18),
'nodeid_msr' : _is_bit_set(ecx, 19),
#'reserved' : _is_bit_set(ecx, 20),
'tbm' : _is_bit_set(ecx, 21),
'topoext' : _is_bit_set(ecx, 22),
'perfctr_core' : _is_bit_set(ecx, 23),
'perfctr_nb' : _is_bit_set(ecx, 24),
#'reserved' : _is_bit_set(ecx, 25),
'dbx' : _is_bit_set(ecx, 26),
'perftsc' : _is_bit_set(ecx, 27),
'pci_l2i' : _is_bit_set(ecx, 28),
#'reserved' : _is_bit_set(ecx, 29),
#'reserved' : _is_bit_set(ecx, 30),
#'reserved' : _is_bit_set(ecx, 31)
}
# Get a list of only the flags that are true
extended_flags = [k for k, v in extended_flags.items() if v]
flags += extended_flags
flags.sort()
return flags
# http://en.wikipedia.org/wiki/CPUID#EAX.3D80000002h.2C80000003h.2C80000004h:_Processor_Brand_String
def get_processor_brand(self, max_extension_support):
processor_brand = ""
# Processor brand string
if max_extension_support >= 0x80000004:
instructions = [
b"\xB8\x02\x00\x00\x80", # mov ax,0x80000002
b"\xB8\x03\x00\x00\x80", # mov ax,0x80000003
b"\xB8\x04\x00\x00\x80" # mov ax,0x80000004
]
for instruction in instructions:
# EAX
eax = self._run_asm(
instruction, # mov ax,0x8000000?
b"\x0f\xa2" # cpuid
b"\x89\xC0" # mov ax,ax
b"\xC3" # ret
)
# EBX
ebx = self._run_asm(
instruction, # mov ax,0x8000000?
b"\x0f\xa2" # cpuid
b"\x89\xD8" # mov ax,bx
b"\xC3" # ret
)
# ECX
ecx = self._run_asm(
instruction, # mov ax,0x8000000?
b"\x0f\xa2" # cpuid
b"\x89\xC8" # mov ax,cx
b"\xC3" # ret
)
# EDX
edx = self._run_asm(
instruction, # mov ax,0x8000000?
b"\x0f\xa2" # cpuid
b"\x89\xD0" # mov ax,dx
b"\xC3" # ret
)
# Combine each of the 4 bytes in each register into the string
for reg in [eax, ebx, ecx, edx]:
for n in [0, 8, 16, 24]:
processor_brand += chr((reg >> n) & 0xFF)
# Strip off any trailing NULL terminators and white space
processor_brand = processor_brand.strip("\0").strip()
return processor_brand
# http://en.wikipedia.org/wiki/CPUID#EAX.3D80000006h:_Extended_L2_Cache_Features
def get_cache(self, max_extension_support):
cache_info = {}
# Just return if the cache feature is not supported
if max_extension_support < 0x80000006:
return cache_info
# ECX
ecx = self._run_asm(
b"\xB8\x06\x00\x00\x80" # mov ax,0x80000006
b"\x0f\xa2" # cpuid
b"\x89\xC8" # mov ax,cx
b"\xC3" # ret
)
cache_info = {
'size_kb' : ecx & 0xFF,
'line_size_b' : (ecx >> 12) & 0xF,
'associativity' : (ecx >> 16) & 0xFFFF
}
return cache_info
def get_ticks(self):
retval = None
if DataSource.bits == '32bit':
# Works on x86_32
restype = None
argtypes = (ctypes.POINTER(ctypes.c_uint), ctypes.POINTER(ctypes.c_uint))
get_ticks_x86_32, address = self._asm_func(restype, argtypes,
[
b"\x55", # push bp
b"\x89\xE5", # mov bp,sp
b"\x31\xC0", # xor ax,ax
b"\x0F\xA2", # cpuid
b"\x0F\x31", # rdtsc
b"\x8B\x5D\x08", # mov bx,[di+0x8]
b"\x8B\x4D\x0C", # mov cx,[di+0xc]
b"\x89\x13", # mov [bp+di],dx
b"\x89\x01", # mov [bx+di],ax
b"\x5D", # pop bp
b"\xC3" # ret
]
)
high = ctypes.c_uint32(0)
low = ctypes.c_uint32(0)
get_ticks_x86_32(ctypes.byref(high), ctypes.byref(low))
retval = ((high.value << 32) & 0xFFFFFFFF00000000) | low.value
elif DataSource.bits == '64bit':
# Works on x86_64
restype = ctypes.c_uint64
argtypes = ()
get_ticks_x86_64, address = self._asm_func(restype, argtypes,
[
b"\x48", # dec ax
b"\x31\xC0", # xor ax,ax
b"\x0F\xA2", # cpuid
b"\x0F\x31", # rdtsc
b"\x48", # dec ax
b"\xC1\xE2\x20", # shl dx,byte 0x20
b"\x48", # dec ax
b"\x09\xD0", # or ax,dx
b"\xC3", # ret
]
)
retval = get_ticks_x86_64()
return retval
def get_raw_hz(self):
import time
start = self.get_ticks()
time.sleep(1)
end = self.get_ticks()
ticks = (end - start)
return ticks
def _actual_get_cpu_info_from_cpuid(queue):
'''
Warning! This function has the potential to crash the Python runtime.
Do not call it directly. Use the _get_cpu_info_from_cpuid function instead.
It will safely call this function in another process.
'''
# Pipe all output to nothing
sys.stdout = open(os.devnull, 'w')
sys.stderr = open(os.devnull, 'w')
# Get the CPU arch and bits
arch, bits = _parse_arch(DataSource.arch_string_raw)
# Return none if this is not an X86 CPU
if not arch in ['X86_32', 'X86_64']:
queue.put(_obj_to_b64({}))
return
# Return none if SE Linux is in enforcing mode
cpuid = CPUID()
if cpuid.is_selinux_enforcing:
queue.put(_obj_to_b64({}))
return
# Get the cpu info from the CPUID register
max_extension_support = cpuid.get_max_extension_support()
cache_info = cpuid.get_cache(max_extension_support)
info = cpuid.get_info()
processor_brand = cpuid.get_processor_brand(max_extension_support)
# Get the Hz and scale
hz_actual = cpuid.get_raw_hz()
hz_actual = _to_decimal_string(hz_actual)
# Get the Hz and scale
hz_advertised, scale = _parse_cpu_brand_string(processor_brand)
info = {
'vendor_id_raw' : cpuid.get_vendor_id(),
'hardware_raw' : '',
'brand_raw' : processor_brand,
'hz_advertised_friendly' : _hz_short_to_friendly(hz_advertised, scale),
'hz_actual_friendly' : _hz_short_to_friendly(hz_actual, 0),
'hz_advertised' : _hz_short_to_full(hz_advertised, scale),
'hz_actual' : _hz_short_to_full(hz_actual, 0),
'l2_cache_size' : _to_friendly_bytes(cache_info['size_kb']),
'l2_cache_line_size' : cache_info['line_size_b'],
'l2_cache_associativity' : hex(cache_info['associativity']),
'stepping' : info['stepping'],
'model' : info['model'],
'family' : info['family'],
'processor_type' : info['processor_type'],
'extended_model' : info['extended_model'],
'extended_family' : info['extended_family'],
'flags' : cpuid.get_flags(max_extension_support)
}
info = {k: v for k, v in info.items() if v}
queue.put(_obj_to_b64(info))
def _get_cpu_info_from_cpuid():
'''
Returns the CPU info gathered by querying the X86 cpuid register in a new process.
Returns {} on non X86 cpus.
Returns {} if SELinux is in enforcing mode.
'''
from multiprocessing import Process, Queue
# Return {} if can't cpuid
if not DataSource.can_cpuid:
return {}
# Get the CPU arch and bits
arch, bits = _parse_arch(DataSource.arch_string_raw)
# Return {} if this is not an X86 CPU
if not arch in ['X86_32', 'X86_64']:
return {}
try:
# Start running the function in a subprocess
queue = Queue()
p = Process(target=_actual_get_cpu_info_from_cpuid, args=(queue,))
p.start()
# Wait for the process to end, while it is still alive
while p.is_alive():
p.join(0)
# Return {} if it failed
if p.exitcode != 0:
return {}
# Return the result, only if there is something to read
if not queue.empty():
output = queue.get()
return _b64_to_obj(output)
except:
pass
# Return {} if everything failed
return {}
def _get_cpu_info_from_proc_cpuinfo():
'''
Returns the CPU info gathered from /proc/cpuinfo.
Returns {} if /proc/cpuinfo is not found.
'''
try:
# Just return {} if there is no cpuinfo
if not DataSource.has_proc_cpuinfo():
return {}
returncode, output = DataSource.cat_proc_cpuinfo()
if returncode != 0:
return {}
# Various fields
vendor_id = _get_field(False, output, None, '', 'vendor_id', 'vendor id', 'vendor')
processor_brand = _get_field(True, output, None, None, 'model name','cpu', 'processor')
cache_size = _get_field(False, output, None, '', 'cache size')
stepping = _get_field(False, output, int, 0, 'stepping')
model = _get_field(False, output, int, 0, 'model')
family = _get_field(False, output, int, 0, 'cpu family')
hardware = _get_field(False, output, None, '', 'Hardware')
# Flags
flags = _get_field(False, output, None, None, 'flags', 'Features')
if flags:
flags = flags.split()
flags.sort()
# Convert from MHz string to Hz
hz_actual = _get_field(False, output, None, '', 'cpu MHz', 'cpu speed', 'clock')
hz_actual = hz_actual.lower().rstrip('mhz').strip()
hz_actual = _to_decimal_string(hz_actual)
# Convert from GHz/MHz string to Hz
hz_advertised, scale = (None, 0)
try:
hz_advertised, scale = _parse_cpu_brand_string(processor_brand)
except Exception:
pass
info = {
'hardware_raw' : hardware,
'brand_raw' : processor_brand,
'l3_cache_size' : _to_friendly_bytes(cache_size),
'flags' : flags,
'vendor_id_raw' : vendor_id,
'stepping' : stepping,
'model' : model,
'family' : family,
}
# Make the Hz the same for actual and advertised if missing any
if not hz_advertised or hz_advertised == '0.0':
hz_advertised = hz_actual
scale = 6
elif not hz_actual or hz_actual == '0.0':
hz_actual = hz_advertised
# Add the Hz if there is one
if _hz_short_to_full(hz_advertised, scale) > (0, 0):
info['hz_advertised_friendly'] = _hz_short_to_friendly(hz_advertised, scale)
info['hz_advertised'] = _hz_short_to_full(hz_advertised, scale)
if _hz_short_to_full(hz_actual, scale) > (0, 0):
info['hz_actual_friendly'] = _hz_short_to_friendly(hz_actual, 6)
info['hz_actual'] = _hz_short_to_full(hz_actual, 6)
info = {k: v for k, v in info.items() if v}
return info
except:
#raise # NOTE: To have this throw on error, uncomment this line
return {}
def _get_cpu_info_from_cpufreq_info():
'''
Returns the CPU info gathered from cpufreq-info.
Returns {} if cpufreq-info is not found.
'''
try:
hz_brand, scale = '0.0', 0
if not DataSource.has_cpufreq_info():
return {}
returncode, output = DataSource.cpufreq_info()
if returncode != 0:
return {}
hz_brand = output.split('current CPU frequency is')[1].split('\n')[0]
i = hz_brand.find('Hz')
assert(i != -1)
hz_brand = hz_brand[0 : i+2].strip().lower()
if hz_brand.endswith('mhz'):
scale = 6
elif hz_brand.endswith('ghz'):
scale = 9
hz_brand = hz_brand.rstrip('mhz').rstrip('ghz').strip()
hz_brand = _to_decimal_string(hz_brand)
info = {
'hz_advertised_friendly' : _hz_short_to_friendly(hz_brand, scale),
'hz_actual_friendly' : _hz_short_to_friendly(hz_brand, scale),
'hz_advertised' : _hz_short_to_full(hz_brand, scale),
'hz_actual' : _hz_short_to_full(hz_brand, scale),
}
info = {k: v for k, v in info.items() if v}
return info
except:
#raise # NOTE: To have this throw on error, uncomment this line
return {}
def _get_cpu_info_from_lscpu():
'''
Returns the CPU info gathered from lscpu.
Returns {} if lscpu is not found.
'''
try:
if not DataSource.has_lscpu():
return {}
returncode, output = DataSource.lscpu()
if returncode != 0:
return {}
info = {}
new_hz = _get_field(False, output, None, None, 'CPU max MHz', 'CPU MHz')
if new_hz:
new_hz = _to_decimal_string(new_hz)
scale = 6
info['hz_advertised_friendly'] = _hz_short_to_friendly(new_hz, scale)
info['hz_actual_friendly'] = _hz_short_to_friendly(new_hz, scale)
info['hz_advertised'] = _hz_short_to_full(new_hz, scale)
info['hz_actual'] = _hz_short_to_full(new_hz, scale)
vendor_id = _get_field(False, output, None, None, 'Vendor ID')
if vendor_id:
info['vendor_id_raw'] = vendor_id
brand = _get_field(False, output, None, None, 'Model name')
if brand:
info['brand_raw'] = brand
family = _get_field(False, output, None, None, 'CPU family')
if family and family.isdigit():
info['family'] = int(family)
stepping = _get_field(False, output, None, None, 'Stepping')
if stepping and stepping.isdigit():
info['stepping'] = int(stepping)
model = _get_field(False, output, None, None, 'Model')
if model and model.isdigit():
info['model'] = int(model)
l1_data_cache_size = _get_field(False, output, None, None, 'L1d cache')
if l1_data_cache_size:
info['l1_data_cache_size'] = _to_friendly_bytes(l1_data_cache_size)
l1_instruction_cache_size = _get_field(False, output, None, None, 'L1i cache')
if l1_instruction_cache_size:
info['l1_instruction_cache_size'] = _to_friendly_bytes(l1_instruction_cache_size)
l2_cache_size = _get_field(False, output, None, None, 'L2 cache')
if l2_cache_size:
info['l2_cache_size'] = _to_friendly_bytes(l2_cache_size)
l3_cache_size = _get_field(False, output, None, None, 'L3 cache')
if l3_cache_size:
info['l3_cache_size'] = _to_friendly_bytes(l3_cache_size)
# Flags
flags = _get_field(False, output, None, None, 'flags', 'Features')
if flags:
flags = flags.split()
flags.sort()
info['flags'] = flags
info = {k: v for k, v in info.items() if v}
return info
except:
#raise # NOTE: To have this throw on error, uncomment this line
return {}
def _get_cpu_info_from_dmesg():
'''
Returns the CPU info gathered from dmesg.
Returns {} if dmesg is not found or does not have the desired info.
'''
# Just return {} if there is no dmesg
if not DataSource.has_dmesg():
return {}
# If dmesg fails return {}
returncode, output = DataSource.dmesg_a()
if output == None or returncode != 0:
return {}
return _parse_dmesg_output(output)
# https://openpowerfoundation.org/wp-content/uploads/2016/05/LoPAPR_DRAFT_v11_24March2016_cmt1.pdf
# page 767
def _get_cpu_info_from_ibm_pa_features():
'''
Returns the CPU info gathered from lsprop /proc/device-tree/cpus/*/ibm,pa-features
Returns {} if lsprop is not found or ibm,pa-features does not have the desired info.
'''
try:
# Just return {} if there is no lsprop
if not DataSource.has_ibm_pa_features():
return {}
# If ibm,pa-features fails return {}
returncode, output = DataSource.ibm_pa_features()
if output == None or returncode != 0:
return {}
# Filter out invalid characters from output
value = output.split("ibm,pa-features")[1].lower()
value = [s for s in value if s in list('0123456789abcfed')]
value = ''.join(value)
# Get data converted to Uint32 chunks
left = int(value[0 : 8], 16)
right = int(value[8 : 16], 16)
# Get the CPU flags
flags = {
# Byte 0
'mmu' : _is_bit_set(left, 0),
'fpu' : _is_bit_set(left, 1),
'slb' : _is_bit_set(left, 2),
'run' : _is_bit_set(left, 3),
#'reserved' : _is_bit_set(left, 4),
'dabr' : _is_bit_set(left, 5),
'ne' : _is_bit_set(left, 6),
'wtr' : _is_bit_set(left, 7),
# Byte 1
'mcr' : _is_bit_set(left, 8),
'dsisr' : _is_bit_set(left, 9),
'lp' : _is_bit_set(left, 10),
'ri' : _is_bit_set(left, 11),
'dabrx' : _is_bit_set(left, 12),
'sprg3' : _is_bit_set(left, 13),
'rislb' : _is_bit_set(left, 14),
'pp' : _is_bit_set(left, 15),
# Byte 2
'vpm' : _is_bit_set(left, 16),
'dss_2.05' : _is_bit_set(left, 17),
#'reserved' : _is_bit_set(left, 18),
'dar' : _is_bit_set(left, 19),
#'reserved' : _is_bit_set(left, 20),
'ppr' : _is_bit_set(left, 21),
'dss_2.02' : _is_bit_set(left, 22),
'dss_2.06' : _is_bit_set(left, 23),
# Byte 3
'lsd_in_dscr' : _is_bit_set(left, 24),
'ugr_in_dscr' : _is_bit_set(left, 25),
#'reserved' : _is_bit_set(left, 26),
#'reserved' : _is_bit_set(left, 27),
#'reserved' : _is_bit_set(left, 28),
#'reserved' : _is_bit_set(left, 29),
#'reserved' : _is_bit_set(left, 30),
#'reserved' : _is_bit_set(left, 31),
# Byte 4
'sso_2.06' : _is_bit_set(right, 0),
#'reserved' : _is_bit_set(right, 1),
#'reserved' : _is_bit_set(right, 2),
#'reserved' : _is_bit_set(right, 3),
#'reserved' : _is_bit_set(right, 4),
#'reserved' : _is_bit_set(right, 5),
#'reserved' : _is_bit_set(right, 6),
#'reserved' : _is_bit_set(right, 7),
# Byte 5
'le' : _is_bit_set(right, 8),
'cfar' : _is_bit_set(right, 9),
'eb' : _is_bit_set(right, 10),
'lsq_2.07' : _is_bit_set(right, 11),
#'reserved' : _is_bit_set(right, 12),
#'reserved' : _is_bit_set(right, 13),
#'reserved' : _is_bit_set(right, 14),
#'reserved' : _is_bit_set(right, 15),
# Byte 6
'dss_2.07' : _is_bit_set(right, 16),
#'reserved' : _is_bit_set(right, 17),
#'reserved' : _is_bit_set(right, 18),
#'reserved' : _is_bit_set(right, 19),
#'reserved' : _is_bit_set(right, 20),
#'reserved' : _is_bit_set(right, 21),
#'reserved' : _is_bit_set(right, 22),
#'reserved' : _is_bit_set(right, 23),
# Byte 7
#'reserved' : _is_bit_set(right, 24),
#'reserved' : _is_bit_set(right, 25),
#'reserved' : _is_bit_set(right, 26),
#'reserved' : _is_bit_set(right, 27),
#'reserved' : _is_bit_set(right, 28),
#'reserved' : _is_bit_set(right, 29),
#'reserved' : _is_bit_set(right, 30),
#'reserved' : _is_bit_set(right, 31),
}
# Get a list of only the flags that are true
flags = [k for k, v in flags.items() if v]
flags.sort()
info = {
'flags' : flags
}
info = {k: v for k, v in info.items() if v}
return info
except:
return {}
def _get_cpu_info_from_cat_var_run_dmesg_boot():
'''
Returns the CPU info gathered from /var/run/dmesg.boot.
Returns {} if dmesg is not found or does not have the desired info.
'''
# Just return {} if there is no /var/run/dmesg.boot
if not DataSource.has_var_run_dmesg_boot():
return {}
# If dmesg.boot fails return {}
returncode, output = DataSource.cat_var_run_dmesg_boot()
if output == None or returncode != 0:
return {}
return _parse_dmesg_output(output)
def _get_cpu_info_from_sysinfo():
'''
Returns the CPU info gathered from sysinfo.
Returns {} if sysinfo is not found.
'''
info = _get_cpu_info_from_sysinfo_v1()
info.update(_get_cpu_info_from_sysinfo_v2())
return info
def _get_cpu_info_from_sysinfo_v1():
'''
Returns the CPU info gathered from sysinfo.
Returns {} if sysinfo is not found.
'''
try:
# Just return {} if there is no sysinfo
if not DataSource.has_sysinfo():
return {}
# If sysinfo fails return {}
returncode, output = DataSource.sysinfo_cpu()
if output == None or returncode != 0:
return {}
# Various fields
vendor_id = '' #_get_field(False, output, None, None, 'CPU #0: ')
processor_brand = output.split('CPU #0: "')[1].split('"\n')[0].strip()
cache_size = '' #_get_field(False, output, None, None, 'machdep.cpu.cache.size')
stepping = int(output.split(', stepping ')[1].split(',')[0].strip())
model = int(output.split(', model ')[1].split(',')[0].strip())
family = int(output.split(', family ')[1].split(',')[0].strip())
# Flags
flags = []
for line in output.split('\n'):
if line.startswith('\t\t'):
for flag in line.strip().lower().split():
flags.append(flag)
flags.sort()
# Convert from GHz/MHz string to Hz
hz_advertised, scale = _parse_cpu_brand_string(processor_brand)
hz_actual = hz_advertised
info = {
'vendor_id_raw' : vendor_id,
'brand_raw' : processor_brand,
'hz_advertised_friendly' : _hz_short_to_friendly(hz_advertised, scale),
'hz_actual_friendly' : _hz_short_to_friendly(hz_actual, scale),
'hz_advertised' : _hz_short_to_full(hz_advertised, scale),
'hz_actual' : _hz_short_to_full(hz_actual, scale),
'l2_cache_size' : _to_friendly_bytes(cache_size),
'stepping' : stepping,
'model' : model,
'family' : family,
'flags' : flags
}
info = {k: v for k, v in info.items() if v}
return info
except:
#raise # NOTE: To have this throw on error, uncomment this line
return {}
def _get_cpu_info_from_sysinfo_v2():
'''
Returns the CPU info gathered from sysinfo.
Returns {} if sysinfo is not found.
'''
try:
# Just return {} if there is no sysinfo
if not DataSource.has_sysinfo():
return {}
# If sysinfo fails return {}
returncode, output = DataSource.sysinfo_cpu()
if output == None or returncode != 0:
return {}
# Various fields
vendor_id = '' #_get_field(False, output, None, None, 'CPU #0: ')
processor_brand = output.split('CPU #0: "')[1].split('"\n')[0].strip()
cache_size = '' #_get_field(False, output, None, None, 'machdep.cpu.cache.size')
signature = output.split('Signature:')[1].split('\n')[0].strip()
#
stepping = int(signature.split('stepping ')[1].split(',')[0].strip())
model = int(signature.split('model ')[1].split(',')[0].strip())
family = int(signature.split('family ')[1].split(',')[0].strip())
# Flags
def get_subsection_flags(output):
retval = []
for line in output.split('\n')[1:]:
if not line.startswith(' ') and not line.startswith(' '): break
for entry in line.strip().lower().split(' '):
retval.append(entry)
return retval
flags = get_subsection_flags(output.split('Features: ')[1]) + \
get_subsection_flags(output.split('Extended Features (0x00000001): ')[1]) + \
get_subsection_flags(output.split('Extended Features (0x80000001): ')[1])
flags.sort()
# Convert from GHz/MHz string to Hz
lines = [n for n in output.split('\n') if n]
raw_hz = lines[0].split('running at ')[1].strip().lower()
hz_advertised = raw_hz.rstrip('mhz').rstrip('ghz').strip()
hz_advertised = _to_decimal_string(hz_advertised)
hz_actual = hz_advertised
scale = 0
if raw_hz.endswith('mhz'):
scale = 6
elif raw_hz.endswith('ghz'):
scale = 9
info = {
'vendor_id_raw' : vendor_id,
'brand_raw' : processor_brand,
'hz_advertised_friendly' : _hz_short_to_friendly(hz_advertised, scale),
'hz_actual_friendly' : _hz_short_to_friendly(hz_actual, scale),
'hz_advertised' : _hz_short_to_full(hz_advertised, scale),
'hz_actual' : _hz_short_to_full(hz_actual, scale),
'l2_cache_size' : _to_friendly_bytes(cache_size),
'stepping' : stepping,
'model' : model,
'family' : family,
'flags' : flags
}
info = {k: v for k, v in info.items() if v}
return info
except:
#raise # NOTE: To have this throw on error, uncomment this line
return {}
def _get_cpu_info_from_wmic():
'''
Returns the CPU info gathered from WMI.
Returns {} if not on Windows, or wmic is not installed.
'''
try:
# Just return {} if not Windows or there is no wmic
if not DataSource.is_windows or not DataSource.has_wmic():
return {}
returncode, output = DataSource.wmic_cpu()
if output == None or returncode != 0:
return {}
# Break the list into key values pairs
value = output.split("\n")
value = [s.rstrip().split('=') for s in value if '=' in s]
value = {k: v for k, v in value if v}
# Get the advertised MHz
processor_brand = value.get('Name')
hz_advertised, scale_advertised = _parse_cpu_brand_string(processor_brand)
# Get the actual MHz
hz_actual = value.get('CurrentClockSpeed')
scale_actual = 6
if hz_actual:
hz_actual = _to_decimal_string(hz_actual)
# Get cache sizes
l2_cache_size = value.get('L2CacheSize')
if l2_cache_size:
l2_cache_size = l2_cache_size + ' KB'
l3_cache_size = value.get('L3CacheSize')
if l3_cache_size:
l3_cache_size = l3_cache_size + ' KB'
# Get family, model, and stepping
family, model, stepping = '', '', ''
description = value.get('Description') or value.get('Caption')
entries = description.split(' ')
if 'Family' in entries and entries.index('Family') < len(entries)-1:
i = entries.index('Family')
family = int(entries[i + 1])
if 'Model' in entries and entries.index('Model') < len(entries)-1:
i = entries.index('Model')
model = int(entries[i + 1])
if 'Stepping' in entries and entries.index('Stepping') < len(entries)-1:
i = entries.index('Stepping')
stepping = int(entries[i + 1])
info = {
'vendor_id_raw' : value.get('Manufacturer'),
'brand_raw' : processor_brand,
'hz_advertised_friendly' : _hz_short_to_friendly(hz_advertised, scale_advertised),
'hz_actual_friendly' : _hz_short_to_friendly(hz_actual, scale_actual),
'hz_advertised' : _hz_short_to_full(hz_advertised, scale_advertised),
'hz_actual' : _hz_short_to_full(hz_actual, scale_actual),
'l2_cache_size' : l2_cache_size,
'l3_cache_size' : l3_cache_size,
'stepping' : stepping,
'model' : model,
'family' : family,
}
info = {k: v for k, v in info.items() if v}
return info
except:
#raise # NOTE: To have this throw on error, uncomment this line
return {}
def _get_cpu_info_from_registry():
'''
FIXME: Is missing many of the newer CPU flags like sse3
Returns the CPU info gathered from the Windows Registry.
Returns {} if not on Windows.
'''
try:
# Just return {} if not on Windows
if not DataSource.is_windows:
return {}
# Get the CPU name
processor_brand = DataSource.winreg_processor_brand().strip()
# Get the CPU vendor id
vendor_id = DataSource.winreg_vendor_id_raw()
# Get the CPU arch and bits
arch_string_raw = DataSource.winreg_arch_string_raw()
arch, bits = _parse_arch(arch_string_raw)
# Get the actual CPU Hz
hz_actual = DataSource.winreg_hz_actual()
hz_actual = _to_decimal_string(hz_actual)
# Get the advertised CPU Hz
hz_advertised, scale = _parse_cpu_brand_string(processor_brand)
# If advertised hz not found, use the actual hz
if hz_advertised == '0.0':
scale = 6
hz_advertised = _to_decimal_string(hz_actual)
# Get the CPU features
feature_bits = DataSource.winreg_feature_bits()
def is_set(bit):
mask = 0x80000000 >> bit
retval = mask & feature_bits > 0
return retval
# http://en.wikipedia.org/wiki/CPUID
# http://unix.stackexchange.com/questions/43539/what-do-the-flags-in-proc-cpuinfo-mean
# http://www.lohninger.com/helpcsuite/public_constants_cpuid.htm
flags = {
'fpu' : is_set(0), # Floating Point Unit
'vme' : is_set(1), # V86 Mode Extensions
'de' : is_set(2), # Debug Extensions - I/O breakpoints supported
'pse' : is_set(3), # Page Size Extensions (4 MB pages supported)
'tsc' : is_set(4), # Time Stamp Counter and RDTSC instruction are available
'msr' : is_set(5), # Model Specific Registers
'pae' : is_set(6), # Physical Address Extensions (36 bit address, 2MB pages)
'mce' : is_set(7), # Machine Check Exception supported
'cx8' : is_set(8), # Compare Exchange Eight Byte instruction available
'apic' : is_set(9), # Local APIC present (multiprocessor operation support)
'sepamd' : is_set(10), # Fast system calls (AMD only)
'sep' : is_set(11), # Fast system calls
'mtrr' : is_set(12), # Memory Type Range Registers
'pge' : is_set(13), # Page Global Enable
'mca' : is_set(14), # Machine Check Architecture
'cmov' : is_set(15), # Conditional MOVe instructions
'pat' : is_set(16), # Page Attribute Table
'pse36' : is_set(17), # 36 bit Page Size Extensions
'serial' : is_set(18), # Processor Serial Number
'clflush' : is_set(19), # Cache Flush
#'reserved1' : is_set(20), # reserved
'dts' : is_set(21), # Debug Trace Store
'acpi' : is_set(22), # ACPI support
'mmx' : is_set(23), # MultiMedia Extensions
'fxsr' : is_set(24), # FXSAVE and FXRSTOR instructions
'sse' : is_set(25), # SSE instructions
'sse2' : is_set(26), # SSE2 (WNI) instructions
'ss' : is_set(27), # self snoop
#'reserved2' : is_set(28), # reserved
'tm' : is_set(29), # Automatic clock control
'ia64' : is_set(30), # IA64 instructions
'3dnow' : is_set(31) # 3DNow! instructions available
}
# Get a list of only the flags that are true
flags = [k for k, v in flags.items() if v]
flags.sort()
info = {
'vendor_id_raw' : vendor_id,
'brand_raw' : processor_brand,
'hz_advertised_friendly' : _hz_short_to_friendly(hz_advertised, scale),
'hz_actual_friendly' : _hz_short_to_friendly(hz_actual, 6),
'hz_advertised' : _hz_short_to_full(hz_advertised, scale),
'hz_actual' : _hz_short_to_full(hz_actual, 6),
'flags' : flags
}
info = {k: v for k, v in info.items() if v}
return info
except:
return {}
def _get_cpu_info_from_kstat():
'''
Returns the CPU info gathered from isainfo and kstat.
Returns {} if isainfo or kstat are not found.
'''
try:
# Just return {} if there is no isainfo or kstat
if not DataSource.has_isainfo() or not DataSource.has_kstat():
return {}
# If isainfo fails return {}
returncode, flag_output = DataSource.isainfo_vb()
if flag_output == None or returncode != 0:
return {}
# If kstat fails return {}
returncode, kstat = DataSource.kstat_m_cpu_info()
if kstat == None or returncode != 0:
return {}
# Various fields
vendor_id = kstat.split('\tvendor_id ')[1].split('\n')[0].strip()
processor_brand = kstat.split('\tbrand ')[1].split('\n')[0].strip()
stepping = int(kstat.split('\tstepping ')[1].split('\n')[0].strip())
model = int(kstat.split('\tmodel ')[1].split('\n')[0].strip())
family = int(kstat.split('\tfamily ')[1].split('\n')[0].strip())
# Flags
flags = flag_output.strip().split('\n')[-1].strip().lower().split()
flags.sort()
# Convert from GHz/MHz string to Hz
scale = 6
hz_advertised = kstat.split('\tclock_MHz ')[1].split('\n')[0].strip()
hz_advertised = _to_decimal_string(hz_advertised)
# Convert from GHz/MHz string to Hz
hz_actual = kstat.split('\tcurrent_clock_Hz ')[1].split('\n')[0].strip()
hz_actual = _to_decimal_string(hz_actual)
info = {
'vendor_id_raw' : vendor_id,
'brand_raw' : processor_brand,
'hz_advertised_friendly' : _hz_short_to_friendly(hz_advertised, scale),
'hz_actual_friendly' : _hz_short_to_friendly(hz_actual, 0),
'hz_advertised' : _hz_short_to_full(hz_advertised, scale),
'hz_actual' : _hz_short_to_full(hz_actual, 0),
'stepping' : stepping,
'model' : model,
'family' : family,
'flags' : flags
}
info = {k: v for k, v in info.items() if v}
return info
except:
return {}
def _get_cpu_info_from_platform_uname():
try:
uname = DataSource.uname_string_raw.split(',')[0]
family, model, stepping = (None, None, None)
entries = uname.split(' ')
if 'Family' in entries and entries.index('Family') < len(entries)-1:
i = entries.index('Family')
family = int(entries[i + 1])
if 'Model' in entries and entries.index('Model') < len(entries)-1:
i = entries.index('Model')
model = int(entries[i + 1])
if 'Stepping' in entries and entries.index('Stepping') < len(entries)-1:
i = entries.index('Stepping')
stepping = int(entries[i + 1])
info = {
'family' : family,
'model' : model,
'stepping' : stepping
}
info = {k: v for k, v in info.items() if v}
return info
except:
return {}
def _get_cpu_info_internal():
'''
Returns the CPU info by using the best sources of information for your OS.
Returns {} if nothing is found.
'''
# Get the CPU arch and bits
arch, bits = _parse_arch(DataSource.arch_string_raw)
friendly_maxsize = { 2**31-1: '32 bit', 2**63-1: '64 bit' }.get(sys.maxsize) or 'unknown bits'
friendly_version = "{0}.{1}.{2}.{3}.{4}".format(*sys.version_info)
PYTHON_VERSION = "{0} ({1})".format(friendly_version, friendly_maxsize)
info = {
'python_version' : PYTHON_VERSION,
'cpuinfo_version' : CPUINFO_VERSION,
'cpuinfo_version_string' : CPUINFO_VERSION_STRING,
'arch' : arch,
'bits' : bits,
'count' : DataSource.cpu_count,
'arch_string_raw' : DataSource.arch_string_raw,
}
# Try the Windows wmic
_copy_new_fields(info, _get_cpu_info_from_wmic())
# Try the Windows registry
_copy_new_fields(info, _get_cpu_info_from_registry())
# Try /proc/cpuinfo
_copy_new_fields(info, _get_cpu_info_from_proc_cpuinfo())
# Try cpufreq-info
_copy_new_fields(info, _get_cpu_info_from_cpufreq_info())
# Try LSCPU
_copy_new_fields(info, _get_cpu_info_from_lscpu())
# Try sysctl
_copy_new_fields(info, _get_cpu_info_from_sysctl())
# Try kstat
_copy_new_fields(info, _get_cpu_info_from_kstat())
# Try dmesg
_copy_new_fields(info, _get_cpu_info_from_dmesg())
# Try /var/run/dmesg.boot
_copy_new_fields(info, _get_cpu_info_from_cat_var_run_dmesg_boot())
# Try lsprop ibm,pa-features
_copy_new_fields(info, _get_cpu_info_from_ibm_pa_features())
# Try sysinfo
_copy_new_fields(info, _get_cpu_info_from_sysinfo())
# Try querying the CPU cpuid register
_copy_new_fields(info, _get_cpu_info_from_cpuid())
# Try platform.uname
_copy_new_fields(info, _get_cpu_info_from_platform_uname())
return info
def get_cpu_info_json():
'''
Returns the CPU info by using the best sources of information for your OS.
Returns the result in a json string
'''
import json
output = None
# If running under pyinstaller, run normally
if getattr(sys, 'frozen', False):
info = _get_cpu_info_internal()
output = json.dumps(info)
output = "{0}".format(output)
# if not running under pyinstaller, run in another process.
# This is done because multiprocesing has a design flaw that
# causes non main programs to run multiple times on Windows.
else:
from subprocess import Popen, PIPE
command = [sys.executable, __file__, '--json']
p1 = Popen(command, stdout=PIPE, stderr=PIPE, stdin=PIPE)
output = p1.communicate()[0]
if p1.returncode != 0:
return "{}"
if not IS_PY2:
output = output.decode(encoding='UTF-8')
return output
def get_cpu_info():
'''
Returns the CPU info by using the best sources of information for your OS.
Returns the result in a dict
'''
import json
output = get_cpu_info_json()
# Convert JSON to Python with non unicode strings
output = json.loads(output, object_hook = _utf_to_str)
return output
def main():
from argparse import ArgumentParser
import json
# Parse args
parser = ArgumentParser(description='Gets CPU info with pure Python 2 & 3')
parser.add_argument('--json', action='store_true', help='Return the info in JSON format')
parser.add_argument('--version', action='store_true', help='Return the version of py-cpuinfo')
args = parser.parse_args()
try:
_check_arch()
except Exception as err:
sys.stderr.write(str(err) + "\n")
sys.exit(1)
info = _get_cpu_info_internal()
if not info:
sys.stderr.write("Failed to find cpu info\n")
sys.exit(1)
if args.json:
print(json.dumps(info))
elif args.version:
print(CPUINFO_VERSION_STRING)
else:
print('Python Version: {0}'.format(info.get('python_version', '')))
print('Cpuinfo Version: {0}'.format(info.get('cpuinfo_version_string', '')))
print('Vendor ID Raw: {0}'.format(info.get('vendor_id_raw', '')))
print('Hardware Raw: {0}'.format(info.get('hardware_raw', '')))
print('Brand Raw: {0}'.format(info.get('brand_raw', '')))
print('Hz Advertised Friendly: {0}'.format(info.get('hz_advertised_friendly', '')))
print('Hz Actual Friendly: {0}'.format(info.get('hz_actual_friendly', '')))
print('Hz Advertised: {0}'.format(info.get('hz_advertised', '')))
print('Hz Actual: {0}'.format(info.get('hz_actual', '')))
print('Arch: {0}'.format(info.get('arch', '')))
print('Bits: {0}'.format(info.get('bits', '')))
print('Count: {0}'.format(info.get('count', '')))
print('Arch String Raw: {0}'.format(info.get('arch_string_raw', '')))
print('L1 Data Cache Size: {0}'.format(info.get('l1_data_cache_size', '')))
print('L1 Instruction Cache Size: {0}'.format(info.get('l1_instruction_cache_size', '')))
print('L2 Cache Size: {0}'.format(info.get('l2_cache_size', '')))
print('L2 Cache Line Size: {0}'.format(info.get('l2_cache_line_size', '')))
print('L2 Cache Associativity: {0}'.format(info.get('l2_cache_associativity', '')))
print('L3 Cache Size: {0}'.format(info.get('l3_cache_size', '')))
print('Stepping: {0}'.format(info.get('stepping', '')))
print('Model: {0}'.format(info.get('model', '')))
print('Family: {0}'.format(info.get('family', '')))
print('Processor Type: {0}'.format(info.get('processor_type', '')))
print('Extended Model: {0}'.format(info.get('extended_model', '')))
print('Extended Family: {0}'.format(info.get('extended_family', '')))
print('Flags: {0}'.format(', '.join(info.get('flags', ''))))
if __name__ == '__main__':
main()
else:
_check_arch()
|
workhorsy/py-cpuinfo
|
cpuinfo/cpuinfo.py
|
_get_cpu_info_from_sysinfo_v1
|
python
|
def _get_cpu_info_from_sysinfo_v1():
'''
Returns the CPU info gathered from sysinfo.
Returns {} if sysinfo is not found.
'''
try:
# Just return {} if there is no sysinfo
if not DataSource.has_sysinfo():
return {}
# If sysinfo fails return {}
returncode, output = DataSource.sysinfo_cpu()
if output == None or returncode != 0:
return {}
# Various fields
vendor_id = '' #_get_field(False, output, None, None, 'CPU #0: ')
processor_brand = output.split('CPU #0: "')[1].split('"\n')[0].strip()
cache_size = '' #_get_field(False, output, None, None, 'machdep.cpu.cache.size')
stepping = int(output.split(', stepping ')[1].split(',')[0].strip())
model = int(output.split(', model ')[1].split(',')[0].strip())
family = int(output.split(', family ')[1].split(',')[0].strip())
# Flags
flags = []
for line in output.split('\n'):
if line.startswith('\t\t'):
for flag in line.strip().lower().split():
flags.append(flag)
flags.sort()
# Convert from GHz/MHz string to Hz
hz_advertised, scale = _parse_cpu_brand_string(processor_brand)
hz_actual = hz_advertised
info = {
'vendor_id_raw' : vendor_id,
'brand_raw' : processor_brand,
'hz_advertised_friendly' : _hz_short_to_friendly(hz_advertised, scale),
'hz_actual_friendly' : _hz_short_to_friendly(hz_actual, scale),
'hz_advertised' : _hz_short_to_full(hz_advertised, scale),
'hz_actual' : _hz_short_to_full(hz_actual, scale),
'l2_cache_size' : _to_friendly_bytes(cache_size),
'stepping' : stepping,
'model' : model,
'family' : family,
'flags' : flags
}
info = {k: v for k, v in info.items() if v}
return info
except:
#raise # NOTE: To have this throw on error, uncomment this line
return {}
|
Returns the CPU info gathered from sysinfo.
Returns {} if sysinfo is not found.
|
train
|
https://github.com/workhorsy/py-cpuinfo/blob/c15afb770c1139bf76215852e17eb4f677ca3d2f/cpuinfo/cpuinfo.py#L1810-L1866
| null |
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
# Copyright (c) 2014-2019, Matthew Brennan Jones <matthew.brennan.jones@gmail.com>
# Py-cpuinfo gets CPU info with pure Python 2 & 3
# It uses the MIT License
# It is hosted at: https://github.com/workhorsy/py-cpuinfo
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
CPUINFO_VERSION = (5, 0, 0)
CPUINFO_VERSION_STRING = '.'.join([str(n) for n in CPUINFO_VERSION])
import os, sys
import platform
import multiprocessing
import ctypes
try:
import _winreg as winreg
except ImportError as err:
try:
import winreg
except ImportError as err:
pass
IS_PY2 = sys.version_info[0] == 2
class DataSource(object):
bits = platform.architecture()[0]
cpu_count = multiprocessing.cpu_count()
is_windows = platform.system().lower() == 'windows'
arch_string_raw = platform.machine()
uname_string_raw = platform.uname()[5]
can_cpuid = True
@staticmethod
def has_proc_cpuinfo():
return os.path.exists('/proc/cpuinfo')
@staticmethod
def has_dmesg():
return len(_program_paths('dmesg')) > 0
@staticmethod
def has_var_run_dmesg_boot():
uname = platform.system().strip().strip('"').strip("'").strip().lower()
return 'linux' in uname and os.path.exists('/var/run/dmesg.boot')
@staticmethod
def has_cpufreq_info():
return len(_program_paths('cpufreq-info')) > 0
@staticmethod
def has_sestatus():
return len(_program_paths('sestatus')) > 0
@staticmethod
def has_sysctl():
return len(_program_paths('sysctl')) > 0
@staticmethod
def has_isainfo():
return len(_program_paths('isainfo')) > 0
@staticmethod
def has_kstat():
return len(_program_paths('kstat')) > 0
@staticmethod
def has_sysinfo():
return len(_program_paths('sysinfo')) > 0
@staticmethod
def has_lscpu():
return len(_program_paths('lscpu')) > 0
@staticmethod
def has_ibm_pa_features():
return len(_program_paths('lsprop')) > 0
@staticmethod
def has_wmic():
returncode, output = _run_and_get_stdout(['wmic', 'os', 'get', 'Version'])
return returncode == 0 and len(output) > 0
@staticmethod
def cat_proc_cpuinfo():
return _run_and_get_stdout(['cat', '/proc/cpuinfo'])
@staticmethod
def cpufreq_info():
return _run_and_get_stdout(['cpufreq-info'])
@staticmethod
def sestatus_b():
return _run_and_get_stdout(['sestatus', '-b'])
@staticmethod
def dmesg_a():
return _run_and_get_stdout(['dmesg', '-a'])
@staticmethod
def cat_var_run_dmesg_boot():
return _run_and_get_stdout(['cat', '/var/run/dmesg.boot'])
@staticmethod
def sysctl_machdep_cpu_hw_cpufrequency():
return _run_and_get_stdout(['sysctl', 'machdep.cpu', 'hw.cpufrequency'])
@staticmethod
def isainfo_vb():
return _run_and_get_stdout(['isainfo', '-vb'])
@staticmethod
def kstat_m_cpu_info():
return _run_and_get_stdout(['kstat', '-m', 'cpu_info'])
@staticmethod
def sysinfo_cpu():
return _run_and_get_stdout(['sysinfo', '-cpu'])
@staticmethod
def lscpu():
return _run_and_get_stdout(['lscpu'])
@staticmethod
def ibm_pa_features():
import glob
ibm_features = glob.glob('/proc/device-tree/cpus/*/ibm,pa-features')
if ibm_features:
return _run_and_get_stdout(['lsprop', ibm_features[0]])
@staticmethod
def wmic_cpu():
return _run_and_get_stdout(['wmic', 'cpu', 'get', 'Name,CurrentClockSpeed,L2CacheSize,L3CacheSize,Description,Caption,Manufacturer', '/format:list'])
@staticmethod
def winreg_processor_brand():
key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, r"Hardware\Description\System\CentralProcessor\0")
processor_brand = winreg.QueryValueEx(key, "ProcessorNameString")[0]
winreg.CloseKey(key)
return processor_brand.strip()
@staticmethod
def winreg_vendor_id_raw():
key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, r"Hardware\Description\System\CentralProcessor\0")
vendor_id_raw = winreg.QueryValueEx(key, "VendorIdentifier")[0]
winreg.CloseKey(key)
return vendor_id_raw
@staticmethod
def winreg_arch_string_raw():
key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, r"SYSTEM\CurrentControlSet\Control\Session Manager\Environment")
arch_string_raw = winreg.QueryValueEx(key, "PROCESSOR_ARCHITECTURE")[0]
winreg.CloseKey(key)
return arch_string_raw
@staticmethod
def winreg_hz_actual():
key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, r"Hardware\Description\System\CentralProcessor\0")
hz_actual = winreg.QueryValueEx(key, "~Mhz")[0]
winreg.CloseKey(key)
hz_actual = _to_decimal_string(hz_actual)
return hz_actual
@staticmethod
def winreg_feature_bits():
key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, r"Hardware\Description\System\CentralProcessor\0")
feature_bits = winreg.QueryValueEx(key, "FeatureSet")[0]
winreg.CloseKey(key)
return feature_bits
def _program_paths(program_name):
paths = []
exts = filter(None, os.environ.get('PATHEXT', '').split(os.pathsep))
path = os.environ['PATH']
for p in os.environ['PATH'].split(os.pathsep):
p = os.path.join(p, program_name)
if os.access(p, os.X_OK):
paths.append(p)
for e in exts:
pext = p + e
if os.access(pext, os.X_OK):
paths.append(pext)
return paths
def _run_and_get_stdout(command, pipe_command=None):
from subprocess import Popen, PIPE
if not pipe_command:
p1 = Popen(command, stdout=PIPE, stderr=PIPE, stdin=PIPE)
output = p1.communicate()[0]
if not IS_PY2:
output = output.decode(encoding='UTF-8')
return p1.returncode, output
else:
p1 = Popen(command, stdout=PIPE, stderr=PIPE, stdin=PIPE)
p2 = Popen(pipe_command, stdin=p1.stdout, stdout=PIPE, stderr=PIPE)
p1.stdout.close()
output = p2.communicate()[0]
if not IS_PY2:
output = output.decode(encoding='UTF-8')
return p2.returncode, output
# Make sure we are running on a supported system
def _check_arch():
arch, bits = _parse_arch(DataSource.arch_string_raw)
if not arch in ['X86_32', 'X86_64', 'ARM_7', 'ARM_8', 'PPC_64']:
raise Exception("py-cpuinfo currently only works on X86 and some PPC and ARM CPUs.")
def _obj_to_b64(thing):
import pickle
import base64
a = thing
b = pickle.dumps(a)
c = base64.b64encode(b)
d = c.decode('utf8')
return d
def _b64_to_obj(thing):
import pickle
import base64
try:
a = base64.b64decode(thing)
b = pickle.loads(a)
return b
except:
return {}
def _utf_to_str(input):
if IS_PY2 and isinstance(input, unicode):
return input.encode('utf-8')
elif isinstance(input, list):
return [_utf_to_str(element) for element in input]
elif isinstance(input, dict):
return {_utf_to_str(key): _utf_to_str(value)
for key, value in input.items()}
else:
return input
def _copy_new_fields(info, new_info):
keys = [
'vendor_id_raw', 'hardware_raw', 'brand_raw', 'hz_advertised_friendly', 'hz_actual_friendly',
'hz_advertised', 'hz_actual', 'arch', 'bits', 'count',
'arch_string_raw', 'uname_string_raw',
'l2_cache_size', 'l2_cache_line_size', 'l2_cache_associativity',
'stepping', 'model', 'family',
'processor_type', 'extended_model', 'extended_family', 'flags',
'l3_cache_size', 'l1_data_cache_size', 'l1_instruction_cache_size'
]
for key in keys:
if new_info.get(key, None) and not info.get(key, None):
info[key] = new_info[key]
elif key == 'flags' and new_info.get('flags'):
for f in new_info['flags']:
if f not in info['flags']: info['flags'].append(f)
info['flags'].sort()
def _get_field_actual(cant_be_number, raw_string, field_names):
for line in raw_string.splitlines():
for field_name in field_names:
field_name = field_name.lower()
if ':' in line:
left, right = line.split(':', 1)
left = left.strip().lower()
right = right.strip()
if left == field_name and len(right) > 0:
if cant_be_number:
if not right.isdigit():
return right
else:
return right
return None
def _get_field(cant_be_number, raw_string, convert_to, default_value, *field_names):
retval = _get_field_actual(cant_be_number, raw_string, field_names)
# Convert the return value
if retval and convert_to:
try:
retval = convert_to(retval)
except:
retval = default_value
# Return the default if there is no return value
if retval is None:
retval = default_value
return retval
def _to_decimal_string(ticks):
try:
# Convert to string
ticks = '{0}'.format(ticks)
# Strip off non numbers and decimal places
ticks = "".join(n for n in ticks if n.isdigit() or n=='.').strip()
if ticks == '':
ticks = '0'
# Add decimal if missing
if '.' not in ticks:
ticks = '{0}.0'.format(ticks)
# Remove trailing zeros
ticks = ticks.rstrip('0')
# Add one trailing zero for empty right side
if ticks.endswith('.'):
ticks = '{0}0'.format(ticks)
# Make sure the number can be converted to a float
ticks = float(ticks)
ticks = '{0}'.format(ticks)
return ticks
except:
return '0.0'
def _hz_short_to_full(ticks, scale):
try:
# Make sure the number can be converted to a float
ticks = float(ticks)
ticks = '{0}'.format(ticks)
# Scale the numbers
hz = ticks.lstrip('0')
old_index = hz.index('.')
hz = hz.replace('.', '')
hz = hz.ljust(scale + old_index+1, '0')
new_index = old_index + scale
hz = '{0}.{1}'.format(hz[:new_index], hz[new_index:])
left, right = hz.split('.')
left, right = int(left), int(right)
return (left, right)
except:
return (0, 0)
def _hz_friendly_to_full(hz_string):
try:
hz_string = hz_string.strip().lower()
hz, scale = (None, None)
if hz_string.endswith('ghz'):
scale = 9
elif hz_string.endswith('mhz'):
scale = 6
elif hz_string.endswith('hz'):
scale = 0
hz = "".join(n for n in hz_string if n.isdigit() or n=='.').strip()
if not '.' in hz:
hz += '.0'
hz, scale = _hz_short_to_full(hz, scale)
return (hz, scale)
except:
return (0, 0)
def _hz_short_to_friendly(ticks, scale):
try:
# Get the raw Hz as a string
left, right = _hz_short_to_full(ticks, scale)
result = '{0}.{1}'.format(left, right)
# Get the location of the dot, and remove said dot
dot_index = result.index('.')
result = result.replace('.', '')
# Get the Hz symbol and scale
symbol = "Hz"
scale = 0
if dot_index > 9:
symbol = "GHz"
scale = 9
elif dot_index > 6:
symbol = "MHz"
scale = 6
elif dot_index > 3:
symbol = "KHz"
scale = 3
# Get the Hz with the dot at the new scaled point
result = '{0}.{1}'.format(result[:-scale-1], result[-scale-1:])
# Format the ticks to have 4 numbers after the decimal
# and remove any superfluous zeroes.
result = '{0:.4f} {1}'.format(float(result), symbol)
result = result.rstrip('0')
return result
except:
return '0.0000 Hz'
def _to_friendly_bytes(input):
import re
if not input:
return input
input = "{0}".format(input)
formats = {
r"^[0-9]+B$" : 'B',
r"^[0-9]+K$" : 'KB',
r"^[0-9]+M$" : 'MB',
r"^[0-9]+G$" : 'GB'
}
for pattern, friendly_size in formats.items():
if re.match(pattern, input):
return "{0} {1}".format(input[ : -1].strip(), friendly_size)
return input
def _parse_cpu_brand_string(cpu_string):
# Just return 0 if the processor brand does not have the Hz
if not 'hz' in cpu_string.lower():
return ('0.0', 0)
hz = cpu_string.lower()
scale = 0
if hz.endswith('mhz'):
scale = 6
elif hz.endswith('ghz'):
scale = 9
if '@' in hz:
hz = hz.split('@')[1]
else:
hz = hz.rsplit(None, 1)[1]
hz = hz.rstrip('mhz').rstrip('ghz').strip()
hz = _to_decimal_string(hz)
return (hz, scale)
def _parse_cpu_brand_string_dx(cpu_string):
import re
# Find all the strings inside brackets ()
starts = [m.start() for m in re.finditer('\(', cpu_string)]
ends = [m.start() for m in re.finditer('\)', cpu_string)]
insides = {k: v for k, v in zip(starts, ends)}
insides = [cpu_string[start+1 : end] for start, end in insides.items()]
# Find all the fields
vendor_id, stepping, model, family = (None, None, None, None)
for inside in insides:
for pair in inside.split(','):
pair = [n.strip() for n in pair.split(':')]
if len(pair) > 1:
name, value = pair[0], pair[1]
if name == 'origin':
vendor_id = value.strip('"')
elif name == 'stepping':
stepping = int(value.lstrip('0x'), 16)
elif name == 'model':
model = int(value.lstrip('0x'), 16)
elif name in ['fam', 'family']:
family = int(value.lstrip('0x'), 16)
# Find the Processor Brand
# Strip off extra strings in brackets at end
brand = cpu_string.strip()
is_working = True
while is_working:
is_working = False
for inside in insides:
full = "({0})".format(inside)
if brand.endswith(full):
brand = brand[ :-len(full)].strip()
is_working = True
# Find the Hz in the brand string
hz_brand, scale = _parse_cpu_brand_string(brand)
# Find Hz inside brackets () after the brand string
if hz_brand == '0.0':
for inside in insides:
hz = inside
for entry in ['GHz', 'MHz', 'Hz']:
if entry in hz:
hz = "CPU @ " + hz[ : hz.find(entry) + len(entry)]
hz_brand, scale = _parse_cpu_brand_string(hz)
break
return (hz_brand, scale, brand, vendor_id, stepping, model, family)
def _parse_dmesg_output(output):
try:
# Get all the dmesg lines that might contain a CPU string
lines = output.split(' CPU0:')[1:] + \
output.split(' CPU1:')[1:] + \
output.split(' CPU:')[1:] + \
output.split('\nCPU0:')[1:] + \
output.split('\nCPU1:')[1:] + \
output.split('\nCPU:')[1:]
lines = [l.split('\n')[0].strip() for l in lines]
# Convert the lines to CPU strings
cpu_strings = [_parse_cpu_brand_string_dx(l) for l in lines]
# Find the CPU string that has the most fields
best_string = None
highest_count = 0
for cpu_string in cpu_strings:
count = sum([n is not None for n in cpu_string])
if count > highest_count:
highest_count = count
best_string = cpu_string
# If no CPU string was found, return {}
if not best_string:
return {}
hz_actual, scale, processor_brand, vendor_id, stepping, model, family = best_string
# Origin
if ' Origin=' in output:
fields = output[output.find(' Origin=') : ].split('\n')[0]
fields = fields.strip().split()
fields = [n.strip().split('=') for n in fields]
fields = [{n[0].strip().lower() : n[1].strip()} for n in fields]
for field in fields:
name = list(field.keys())[0]
value = list(field.values())[0]
if name == 'origin':
vendor_id = value.strip('"')
elif name == 'stepping':
stepping = int(value.lstrip('0x'), 16)
elif name == 'model':
model = int(value.lstrip('0x'), 16)
elif name in ['fam', 'family']:
family = int(value.lstrip('0x'), 16)
# Features
flag_lines = []
for category in [' Features=', ' Features2=', ' AMD Features=', ' AMD Features2=']:
if category in output:
flag_lines.append(output.split(category)[1].split('\n')[0])
flags = []
for line in flag_lines:
line = line.split('<')[1].split('>')[0].lower()
for flag in line.split(','):
flags.append(flag)
flags.sort()
# Convert from GHz/MHz string to Hz
hz_advertised, scale = _parse_cpu_brand_string(processor_brand)
# If advertised hz not found, use the actual hz
if hz_advertised == '0.0':
scale = 6
hz_advertised = _to_decimal_string(hz_actual)
info = {
'vendor_id_raw' : vendor_id,
'brand_raw' : processor_brand,
'stepping' : stepping,
'model' : model,
'family' : family,
'flags' : flags
}
if hz_advertised and hz_advertised != '0.0':
info['hz_advertised_friendly'] = _hz_short_to_friendly(hz_advertised, scale)
info['hz_actual_friendly'] = _hz_short_to_friendly(hz_actual, scale)
if hz_advertised and hz_advertised != '0.0':
info['hz_advertised'] = _hz_short_to_full(hz_advertised, scale)
info['hz_actual'] = _hz_short_to_full(hz_actual, scale)
return {k: v for k, v in info.items() if v}
except:
#raise
pass
return {}
def _parse_arch(arch_string_raw):
import re
arch, bits = None, None
arch_string_raw = arch_string_raw.lower()
# X86
if re.match('^i\d86$|^x86$|^x86_32$|^i86pc$|^ia32$|^ia-32$|^bepc$', arch_string_raw):
arch = 'X86_32'
bits = 32
elif re.match('^x64$|^x86_64$|^x86_64t$|^i686-64$|^amd64$|^ia64$|^ia-64$', arch_string_raw):
arch = 'X86_64'
bits = 64
# ARM
elif re.match('^armv8-a|aarch64$', arch_string_raw):
arch = 'ARM_8'
bits = 64
elif re.match('^armv7$|^armv7[a-z]$|^armv7-[a-z]$|^armv6[a-z]$', arch_string_raw):
arch = 'ARM_7'
bits = 32
elif re.match('^armv8$|^armv8[a-z]$|^armv8-[a-z]$', arch_string_raw):
arch = 'ARM_8'
bits = 32
# PPC
elif re.match('^ppc32$|^prep$|^pmac$|^powermac$', arch_string_raw):
arch = 'PPC_32'
bits = 32
elif re.match('^powerpc$|^ppc64$|^ppc64le$', arch_string_raw):
arch = 'PPC_64'
bits = 64
# SPARC
elif re.match('^sparc32$|^sparc$', arch_string_raw):
arch = 'SPARC_32'
bits = 32
elif re.match('^sparc64$|^sun4u$|^sun4v$', arch_string_raw):
arch = 'SPARC_64'
bits = 64
return (arch, bits)
def _is_bit_set(reg, bit):
mask = 1 << bit
is_set = reg & mask > 0
return is_set
def _is_selinux_enforcing():
# Just return if the SE Linux Status Tool is not installed
if not DataSource.has_sestatus():
return False
# Run the sestatus, and just return if it failed to run
returncode, output = DataSource.sestatus_b()
if returncode != 0:
return False
# Figure out if explicitly in enforcing mode
for line in output.splitlines():
line = line.strip().lower()
if line.startswith("current mode:"):
if line.endswith("enforcing"):
return True
else:
return False
# Figure out if we can execute heap and execute memory
can_selinux_exec_heap = False
can_selinux_exec_memory = False
for line in output.splitlines():
line = line.strip().lower()
if line.startswith("allow_execheap") and line.endswith("on"):
can_selinux_exec_heap = True
elif line.startswith("allow_execmem") and line.endswith("on"):
can_selinux_exec_memory = True
return (not can_selinux_exec_heap or not can_selinux_exec_memory)
class CPUID(object):
def __init__(self):
self.prochandle = None
# Figure out if SE Linux is on and in enforcing mode
self.is_selinux_enforcing = _is_selinux_enforcing()
def _asm_func(self, restype=None, argtypes=(), byte_code=[]):
byte_code = bytes.join(b'', byte_code)
address = None
if DataSource.is_windows:
# Allocate a memory segment the size of the byte code, and make it executable
size = len(byte_code)
# Alloc at least 1 page to ensure we own all pages that we want to change protection on
if size < 0x1000: size = 0x1000
MEM_COMMIT = ctypes.c_ulong(0x1000)
PAGE_READWRITE = ctypes.c_ulong(0x4)
pfnVirtualAlloc = ctypes.windll.kernel32.VirtualAlloc
pfnVirtualAlloc.restype = ctypes.c_void_p
address = pfnVirtualAlloc(None, ctypes.c_size_t(size), MEM_COMMIT, PAGE_READWRITE)
if not address:
raise Exception("Failed to VirtualAlloc")
# Copy the byte code into the memory segment
memmove = ctypes.CFUNCTYPE(ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_size_t)(ctypes._memmove_addr)
if memmove(address, byte_code, size) < 0:
raise Exception("Failed to memmove")
# Enable execute permissions
PAGE_EXECUTE = ctypes.c_ulong(0x10)
old_protect = ctypes.c_ulong(0)
pfnVirtualProtect = ctypes.windll.kernel32.VirtualProtect
res = pfnVirtualProtect(ctypes.c_void_p(address), ctypes.c_size_t(size), PAGE_EXECUTE, ctypes.byref(old_protect))
if not res:
raise Exception("Failed VirtualProtect")
# Flush Instruction Cache
# First, get process Handle
if not self.prochandle:
pfnGetCurrentProcess = ctypes.windll.kernel32.GetCurrentProcess
pfnGetCurrentProcess.restype = ctypes.c_void_p
self.prochandle = ctypes.c_void_p(pfnGetCurrentProcess())
# Actually flush cache
res = ctypes.windll.kernel32.FlushInstructionCache(self.prochandle, ctypes.c_void_p(address), ctypes.c_size_t(size))
if not res:
raise Exception("Failed FlushInstructionCache")
else:
# Allocate a memory segment the size of the byte code
size = len(byte_code)
pfnvalloc = ctypes.pythonapi.valloc
pfnvalloc.restype = ctypes.c_void_p
address = pfnvalloc(ctypes.c_size_t(size))
if not address:
raise Exception("Failed to valloc")
# Mark the memory segment as writeable only
if not self.is_selinux_enforcing:
WRITE = 0x2
if ctypes.pythonapi.mprotect(ctypes.c_void_p(address), size, WRITE) < 0:
raise Exception("Failed to mprotect")
# Copy the byte code into the memory segment
if ctypes.pythonapi.memmove(ctypes.c_void_p(address), byte_code, ctypes.c_size_t(size)) < 0:
raise Exception("Failed to memmove")
# Mark the memory segment as writeable and executable only
if not self.is_selinux_enforcing:
WRITE_EXECUTE = 0x2 | 0x4
if ctypes.pythonapi.mprotect(ctypes.c_void_p(address), size, WRITE_EXECUTE) < 0:
raise Exception("Failed to mprotect")
# Cast the memory segment into a function
functype = ctypes.CFUNCTYPE(restype, *argtypes)
fun = functype(address)
return fun, address
def _run_asm(self, *byte_code):
# Convert the byte code into a function that returns an int
restype = ctypes.c_uint32
argtypes = ()
func, address = self._asm_func(restype, argtypes, byte_code)
# Call the byte code like a function
retval = func()
byte_code = bytes.join(b'', byte_code)
size = ctypes.c_size_t(len(byte_code))
# Free the function memory segment
if DataSource.is_windows:
MEM_RELEASE = ctypes.c_ulong(0x8000)
ctypes.windll.kernel32.VirtualFree(ctypes.c_void_p(address), ctypes.c_size_t(0), MEM_RELEASE)
else:
# Remove the executable tag on the memory
READ_WRITE = 0x1 | 0x2
if ctypes.pythonapi.mprotect(ctypes.c_void_p(address), size, READ_WRITE) < 0:
raise Exception("Failed to mprotect")
ctypes.pythonapi.free(ctypes.c_void_p(address))
return retval
# FIXME: We should not have to use different instructions to
# set eax to 0 or 1, on 32bit and 64bit machines.
def _zero_eax(self):
return (
b"\x31\xC0" # xor eax,eax
)
def _zero_ecx(self):
return (
b"\x31\xC9" # xor ecx,ecx
)
def _one_eax(self):
return (
b"\xB8\x01\x00\x00\x00" # mov eax,0x1"
)
# http://en.wikipedia.org/wiki/CPUID#EAX.3D0:_Get_vendor_ID
def get_vendor_id(self):
# EBX
ebx = self._run_asm(
self._zero_eax(),
b"\x0F\xA2" # cpuid
b"\x89\xD8" # mov ax,bx
b"\xC3" # ret
)
# ECX
ecx = self._run_asm(
self._zero_eax(),
b"\x0f\xa2" # cpuid
b"\x89\xC8" # mov ax,cx
b"\xC3" # ret
)
# EDX
edx = self._run_asm(
self._zero_eax(),
b"\x0f\xa2" # cpuid
b"\x89\xD0" # mov ax,dx
b"\xC3" # ret
)
# Each 4bits is a ascii letter in the name
vendor_id = []
for reg in [ebx, edx, ecx]:
for n in [0, 8, 16, 24]:
vendor_id.append(chr((reg >> n) & 0xFF))
vendor_id = ''.join(vendor_id)
return vendor_id
# http://en.wikipedia.org/wiki/CPUID#EAX.3D1:_Processor_Info_and_Feature_Bits
def get_info(self):
# EAX
eax = self._run_asm(
self._one_eax(),
b"\x0f\xa2" # cpuid
b"\xC3" # ret
)
# Get the CPU info
stepping = (eax >> 0) & 0xF # 4 bits
model = (eax >> 4) & 0xF # 4 bits
family = (eax >> 8) & 0xF # 4 bits
processor_type = (eax >> 12) & 0x3 # 2 bits
extended_model = (eax >> 16) & 0xF # 4 bits
extended_family = (eax >> 20) & 0xFF # 8 bits
return {
'stepping' : stepping,
'model' : model,
'family' : family,
'processor_type' : processor_type,
'extended_model' : extended_model,
'extended_family' : extended_family
}
# http://en.wikipedia.org/wiki/CPUID#EAX.3D80000000h:_Get_Highest_Extended_Function_Supported
def get_max_extension_support(self):
# Check for extension support
max_extension_support = self._run_asm(
b"\xB8\x00\x00\x00\x80" # mov ax,0x80000000
b"\x0f\xa2" # cpuid
b"\xC3" # ret
)
return max_extension_support
# http://en.wikipedia.org/wiki/CPUID#EAX.3D1:_Processor_Info_and_Feature_Bits
def get_flags(self, max_extension_support):
# EDX
edx = self._run_asm(
self._one_eax(),
b"\x0f\xa2" # cpuid
b"\x89\xD0" # mov ax,dx
b"\xC3" # ret
)
# ECX
ecx = self._run_asm(
self._one_eax(),
b"\x0f\xa2" # cpuid
b"\x89\xC8" # mov ax,cx
b"\xC3" # ret
)
# Get the CPU flags
flags = {
'fpu' : _is_bit_set(edx, 0),
'vme' : _is_bit_set(edx, 1),
'de' : _is_bit_set(edx, 2),
'pse' : _is_bit_set(edx, 3),
'tsc' : _is_bit_set(edx, 4),
'msr' : _is_bit_set(edx, 5),
'pae' : _is_bit_set(edx, 6),
'mce' : _is_bit_set(edx, 7),
'cx8' : _is_bit_set(edx, 8),
'apic' : _is_bit_set(edx, 9),
#'reserved1' : _is_bit_set(edx, 10),
'sep' : _is_bit_set(edx, 11),
'mtrr' : _is_bit_set(edx, 12),
'pge' : _is_bit_set(edx, 13),
'mca' : _is_bit_set(edx, 14),
'cmov' : _is_bit_set(edx, 15),
'pat' : _is_bit_set(edx, 16),
'pse36' : _is_bit_set(edx, 17),
'pn' : _is_bit_set(edx, 18),
'clflush' : _is_bit_set(edx, 19),
#'reserved2' : _is_bit_set(edx, 20),
'dts' : _is_bit_set(edx, 21),
'acpi' : _is_bit_set(edx, 22),
'mmx' : _is_bit_set(edx, 23),
'fxsr' : _is_bit_set(edx, 24),
'sse' : _is_bit_set(edx, 25),
'sse2' : _is_bit_set(edx, 26),
'ss' : _is_bit_set(edx, 27),
'ht' : _is_bit_set(edx, 28),
'tm' : _is_bit_set(edx, 29),
'ia64' : _is_bit_set(edx, 30),
'pbe' : _is_bit_set(edx, 31),
'pni' : _is_bit_set(ecx, 0),
'pclmulqdq' : _is_bit_set(ecx, 1),
'dtes64' : _is_bit_set(ecx, 2),
'monitor' : _is_bit_set(ecx, 3),
'ds_cpl' : _is_bit_set(ecx, 4),
'vmx' : _is_bit_set(ecx, 5),
'smx' : _is_bit_set(ecx, 6),
'est' : _is_bit_set(ecx, 7),
'tm2' : _is_bit_set(ecx, 8),
'ssse3' : _is_bit_set(ecx, 9),
'cid' : _is_bit_set(ecx, 10),
#'reserved3' : _is_bit_set(ecx, 11),
'fma' : _is_bit_set(ecx, 12),
'cx16' : _is_bit_set(ecx, 13),
'xtpr' : _is_bit_set(ecx, 14),
'pdcm' : _is_bit_set(ecx, 15),
#'reserved4' : _is_bit_set(ecx, 16),
'pcid' : _is_bit_set(ecx, 17),
'dca' : _is_bit_set(ecx, 18),
'sse4_1' : _is_bit_set(ecx, 19),
'sse4_2' : _is_bit_set(ecx, 20),
'x2apic' : _is_bit_set(ecx, 21),
'movbe' : _is_bit_set(ecx, 22),
'popcnt' : _is_bit_set(ecx, 23),
'tscdeadline' : _is_bit_set(ecx, 24),
'aes' : _is_bit_set(ecx, 25),
'xsave' : _is_bit_set(ecx, 26),
'osxsave' : _is_bit_set(ecx, 27),
'avx' : _is_bit_set(ecx, 28),
'f16c' : _is_bit_set(ecx, 29),
'rdrnd' : _is_bit_set(ecx, 30),
'hypervisor' : _is_bit_set(ecx, 31)
}
# Get a list of only the flags that are true
flags = [k for k, v in flags.items() if v]
# http://en.wikipedia.org/wiki/CPUID#EAX.3D7.2C_ECX.3D0:_Extended_Features
if max_extension_support >= 7:
# EBX
ebx = self._run_asm(
self._zero_ecx(),
b"\xB8\x07\x00\x00\x00" # mov eax,7
b"\x0f\xa2" # cpuid
b"\x89\xD8" # mov ax,bx
b"\xC3" # ret
)
# ECX
ecx = self._run_asm(
self._zero_ecx(),
b"\xB8\x07\x00\x00\x00" # mov eax,7
b"\x0f\xa2" # cpuid
b"\x89\xC8" # mov ax,cx
b"\xC3" # ret
)
# Get the extended CPU flags
extended_flags = {
#'fsgsbase' : _is_bit_set(ebx, 0),
#'IA32_TSC_ADJUST' : _is_bit_set(ebx, 1),
'sgx' : _is_bit_set(ebx, 2),
'bmi1' : _is_bit_set(ebx, 3),
'hle' : _is_bit_set(ebx, 4),
'avx2' : _is_bit_set(ebx, 5),
#'reserved' : _is_bit_set(ebx, 6),
'smep' : _is_bit_set(ebx, 7),
'bmi2' : _is_bit_set(ebx, 8),
'erms' : _is_bit_set(ebx, 9),
'invpcid' : _is_bit_set(ebx, 10),
'rtm' : _is_bit_set(ebx, 11),
'pqm' : _is_bit_set(ebx, 12),
#'FPU CS and FPU DS deprecated' : _is_bit_set(ebx, 13),
'mpx' : _is_bit_set(ebx, 14),
'pqe' : _is_bit_set(ebx, 15),
'avx512f' : _is_bit_set(ebx, 16),
'avx512dq' : _is_bit_set(ebx, 17),
'rdseed' : _is_bit_set(ebx, 18),
'adx' : _is_bit_set(ebx, 19),
'smap' : _is_bit_set(ebx, 20),
'avx512ifma' : _is_bit_set(ebx, 21),
'pcommit' : _is_bit_set(ebx, 22),
'clflushopt' : _is_bit_set(ebx, 23),
'clwb' : _is_bit_set(ebx, 24),
'intel_pt' : _is_bit_set(ebx, 25),
'avx512pf' : _is_bit_set(ebx, 26),
'avx512er' : _is_bit_set(ebx, 27),
'avx512cd' : _is_bit_set(ebx, 28),
'sha' : _is_bit_set(ebx, 29),
'avx512bw' : _is_bit_set(ebx, 30),
'avx512vl' : _is_bit_set(ebx, 31),
'prefetchwt1' : _is_bit_set(ecx, 0),
'avx512vbmi' : _is_bit_set(ecx, 1),
'umip' : _is_bit_set(ecx, 2),
'pku' : _is_bit_set(ecx, 3),
'ospke' : _is_bit_set(ecx, 4),
#'reserved' : _is_bit_set(ecx, 5),
'avx512vbmi2' : _is_bit_set(ecx, 6),
#'reserved' : _is_bit_set(ecx, 7),
'gfni' : _is_bit_set(ecx, 8),
'vaes' : _is_bit_set(ecx, 9),
'vpclmulqdq' : _is_bit_set(ecx, 10),
'avx512vnni' : _is_bit_set(ecx, 11),
'avx512bitalg' : _is_bit_set(ecx, 12),
#'reserved' : _is_bit_set(ecx, 13),
'avx512vpopcntdq' : _is_bit_set(ecx, 14),
#'reserved' : _is_bit_set(ecx, 15),
#'reserved' : _is_bit_set(ecx, 16),
#'mpx0' : _is_bit_set(ecx, 17),
#'mpx1' : _is_bit_set(ecx, 18),
#'mpx2' : _is_bit_set(ecx, 19),
#'mpx3' : _is_bit_set(ecx, 20),
#'mpx4' : _is_bit_set(ecx, 21),
'rdpid' : _is_bit_set(ecx, 22),
#'reserved' : _is_bit_set(ecx, 23),
#'reserved' : _is_bit_set(ecx, 24),
#'reserved' : _is_bit_set(ecx, 25),
#'reserved' : _is_bit_set(ecx, 26),
#'reserved' : _is_bit_set(ecx, 27),
#'reserved' : _is_bit_set(ecx, 28),
#'reserved' : _is_bit_set(ecx, 29),
'sgx_lc' : _is_bit_set(ecx, 30),
#'reserved' : _is_bit_set(ecx, 31)
}
# Get a list of only the flags that are true
extended_flags = [k for k, v in extended_flags.items() if v]
flags += extended_flags
# http://en.wikipedia.org/wiki/CPUID#EAX.3D80000001h:_Extended_Processor_Info_and_Feature_Bits
if max_extension_support >= 0x80000001:
# EBX
ebx = self._run_asm(
b"\xB8\x01\x00\x00\x80" # mov ax,0x80000001
b"\x0f\xa2" # cpuid
b"\x89\xD8" # mov ax,bx
b"\xC3" # ret
)
# ECX
ecx = self._run_asm(
b"\xB8\x01\x00\x00\x80" # mov ax,0x80000001
b"\x0f\xa2" # cpuid
b"\x89\xC8" # mov ax,cx
b"\xC3" # ret
)
# Get the extended CPU flags
extended_flags = {
'fpu' : _is_bit_set(ebx, 0),
'vme' : _is_bit_set(ebx, 1),
'de' : _is_bit_set(ebx, 2),
'pse' : _is_bit_set(ebx, 3),
'tsc' : _is_bit_set(ebx, 4),
'msr' : _is_bit_set(ebx, 5),
'pae' : _is_bit_set(ebx, 6),
'mce' : _is_bit_set(ebx, 7),
'cx8' : _is_bit_set(ebx, 8),
'apic' : _is_bit_set(ebx, 9),
#'reserved' : _is_bit_set(ebx, 10),
'syscall' : _is_bit_set(ebx, 11),
'mtrr' : _is_bit_set(ebx, 12),
'pge' : _is_bit_set(ebx, 13),
'mca' : _is_bit_set(ebx, 14),
'cmov' : _is_bit_set(ebx, 15),
'pat' : _is_bit_set(ebx, 16),
'pse36' : _is_bit_set(ebx, 17),
#'reserved' : _is_bit_set(ebx, 18),
'mp' : _is_bit_set(ebx, 19),
'nx' : _is_bit_set(ebx, 20),
#'reserved' : _is_bit_set(ebx, 21),
'mmxext' : _is_bit_set(ebx, 22),
'mmx' : _is_bit_set(ebx, 23),
'fxsr' : _is_bit_set(ebx, 24),
'fxsr_opt' : _is_bit_set(ebx, 25),
'pdpe1gp' : _is_bit_set(ebx, 26),
'rdtscp' : _is_bit_set(ebx, 27),
#'reserved' : _is_bit_set(ebx, 28),
'lm' : _is_bit_set(ebx, 29),
'3dnowext' : _is_bit_set(ebx, 30),
'3dnow' : _is_bit_set(ebx, 31),
'lahf_lm' : _is_bit_set(ecx, 0),
'cmp_legacy' : _is_bit_set(ecx, 1),
'svm' : _is_bit_set(ecx, 2),
'extapic' : _is_bit_set(ecx, 3),
'cr8_legacy' : _is_bit_set(ecx, 4),
'abm' : _is_bit_set(ecx, 5),
'sse4a' : _is_bit_set(ecx, 6),
'misalignsse' : _is_bit_set(ecx, 7),
'3dnowprefetch' : _is_bit_set(ecx, 8),
'osvw' : _is_bit_set(ecx, 9),
'ibs' : _is_bit_set(ecx, 10),
'xop' : _is_bit_set(ecx, 11),
'skinit' : _is_bit_set(ecx, 12),
'wdt' : _is_bit_set(ecx, 13),
#'reserved' : _is_bit_set(ecx, 14),
'lwp' : _is_bit_set(ecx, 15),
'fma4' : _is_bit_set(ecx, 16),
'tce' : _is_bit_set(ecx, 17),
#'reserved' : _is_bit_set(ecx, 18),
'nodeid_msr' : _is_bit_set(ecx, 19),
#'reserved' : _is_bit_set(ecx, 20),
'tbm' : _is_bit_set(ecx, 21),
'topoext' : _is_bit_set(ecx, 22),
'perfctr_core' : _is_bit_set(ecx, 23),
'perfctr_nb' : _is_bit_set(ecx, 24),
#'reserved' : _is_bit_set(ecx, 25),
'dbx' : _is_bit_set(ecx, 26),
'perftsc' : _is_bit_set(ecx, 27),
'pci_l2i' : _is_bit_set(ecx, 28),
#'reserved' : _is_bit_set(ecx, 29),
#'reserved' : _is_bit_set(ecx, 30),
#'reserved' : _is_bit_set(ecx, 31)
}
# Get a list of only the flags that are true
extended_flags = [k for k, v in extended_flags.items() if v]
flags += extended_flags
flags.sort()
return flags
# http://en.wikipedia.org/wiki/CPUID#EAX.3D80000002h.2C80000003h.2C80000004h:_Processor_Brand_String
def get_processor_brand(self, max_extension_support):
processor_brand = ""
# Processor brand string
if max_extension_support >= 0x80000004:
instructions = [
b"\xB8\x02\x00\x00\x80", # mov ax,0x80000002
b"\xB8\x03\x00\x00\x80", # mov ax,0x80000003
b"\xB8\x04\x00\x00\x80" # mov ax,0x80000004
]
for instruction in instructions:
# EAX
eax = self._run_asm(
instruction, # mov ax,0x8000000?
b"\x0f\xa2" # cpuid
b"\x89\xC0" # mov ax,ax
b"\xC3" # ret
)
# EBX
ebx = self._run_asm(
instruction, # mov ax,0x8000000?
b"\x0f\xa2" # cpuid
b"\x89\xD8" # mov ax,bx
b"\xC3" # ret
)
# ECX
ecx = self._run_asm(
instruction, # mov ax,0x8000000?
b"\x0f\xa2" # cpuid
b"\x89\xC8" # mov ax,cx
b"\xC3" # ret
)
# EDX
edx = self._run_asm(
instruction, # mov ax,0x8000000?
b"\x0f\xa2" # cpuid
b"\x89\xD0" # mov ax,dx
b"\xC3" # ret
)
# Combine each of the 4 bytes in each register into the string
for reg in [eax, ebx, ecx, edx]:
for n in [0, 8, 16, 24]:
processor_brand += chr((reg >> n) & 0xFF)
# Strip off any trailing NULL terminators and white space
processor_brand = processor_brand.strip("\0").strip()
return processor_brand
# http://en.wikipedia.org/wiki/CPUID#EAX.3D80000006h:_Extended_L2_Cache_Features
def get_cache(self, max_extension_support):
cache_info = {}
# Just return if the cache feature is not supported
if max_extension_support < 0x80000006:
return cache_info
# ECX
ecx = self._run_asm(
b"\xB8\x06\x00\x00\x80" # mov ax,0x80000006
b"\x0f\xa2" # cpuid
b"\x89\xC8" # mov ax,cx
b"\xC3" # ret
)
cache_info = {
'size_kb' : ecx & 0xFF,
'line_size_b' : (ecx >> 12) & 0xF,
'associativity' : (ecx >> 16) & 0xFFFF
}
return cache_info
def get_ticks(self):
retval = None
if DataSource.bits == '32bit':
# Works on x86_32
restype = None
argtypes = (ctypes.POINTER(ctypes.c_uint), ctypes.POINTER(ctypes.c_uint))
get_ticks_x86_32, address = self._asm_func(restype, argtypes,
[
b"\x55", # push bp
b"\x89\xE5", # mov bp,sp
b"\x31\xC0", # xor ax,ax
b"\x0F\xA2", # cpuid
b"\x0F\x31", # rdtsc
b"\x8B\x5D\x08", # mov bx,[di+0x8]
b"\x8B\x4D\x0C", # mov cx,[di+0xc]
b"\x89\x13", # mov [bp+di],dx
b"\x89\x01", # mov [bx+di],ax
b"\x5D", # pop bp
b"\xC3" # ret
]
)
high = ctypes.c_uint32(0)
low = ctypes.c_uint32(0)
get_ticks_x86_32(ctypes.byref(high), ctypes.byref(low))
retval = ((high.value << 32) & 0xFFFFFFFF00000000) | low.value
elif DataSource.bits == '64bit':
# Works on x86_64
restype = ctypes.c_uint64
argtypes = ()
get_ticks_x86_64, address = self._asm_func(restype, argtypes,
[
b"\x48", # dec ax
b"\x31\xC0", # xor ax,ax
b"\x0F\xA2", # cpuid
b"\x0F\x31", # rdtsc
b"\x48", # dec ax
b"\xC1\xE2\x20", # shl dx,byte 0x20
b"\x48", # dec ax
b"\x09\xD0", # or ax,dx
b"\xC3", # ret
]
)
retval = get_ticks_x86_64()
return retval
def get_raw_hz(self):
import time
start = self.get_ticks()
time.sleep(1)
end = self.get_ticks()
ticks = (end - start)
return ticks
def _actual_get_cpu_info_from_cpuid(queue):
'''
Warning! This function has the potential to crash the Python runtime.
Do not call it directly. Use the _get_cpu_info_from_cpuid function instead.
It will safely call this function in another process.
'''
# Pipe all output to nothing
sys.stdout = open(os.devnull, 'w')
sys.stderr = open(os.devnull, 'w')
# Get the CPU arch and bits
arch, bits = _parse_arch(DataSource.arch_string_raw)
# Return none if this is not an X86 CPU
if not arch in ['X86_32', 'X86_64']:
queue.put(_obj_to_b64({}))
return
# Return none if SE Linux is in enforcing mode
cpuid = CPUID()
if cpuid.is_selinux_enforcing:
queue.put(_obj_to_b64({}))
return
# Get the cpu info from the CPUID register
max_extension_support = cpuid.get_max_extension_support()
cache_info = cpuid.get_cache(max_extension_support)
info = cpuid.get_info()
processor_brand = cpuid.get_processor_brand(max_extension_support)
# Get the Hz and scale
hz_actual = cpuid.get_raw_hz()
hz_actual = _to_decimal_string(hz_actual)
# Get the Hz and scale
hz_advertised, scale = _parse_cpu_brand_string(processor_brand)
info = {
'vendor_id_raw' : cpuid.get_vendor_id(),
'hardware_raw' : '',
'brand_raw' : processor_brand,
'hz_advertised_friendly' : _hz_short_to_friendly(hz_advertised, scale),
'hz_actual_friendly' : _hz_short_to_friendly(hz_actual, 0),
'hz_advertised' : _hz_short_to_full(hz_advertised, scale),
'hz_actual' : _hz_short_to_full(hz_actual, 0),
'l2_cache_size' : _to_friendly_bytes(cache_info['size_kb']),
'l2_cache_line_size' : cache_info['line_size_b'],
'l2_cache_associativity' : hex(cache_info['associativity']),
'stepping' : info['stepping'],
'model' : info['model'],
'family' : info['family'],
'processor_type' : info['processor_type'],
'extended_model' : info['extended_model'],
'extended_family' : info['extended_family'],
'flags' : cpuid.get_flags(max_extension_support)
}
info = {k: v for k, v in info.items() if v}
queue.put(_obj_to_b64(info))
def _get_cpu_info_from_cpuid():
'''
Returns the CPU info gathered by querying the X86 cpuid register in a new process.
Returns {} on non X86 cpus.
Returns {} if SELinux is in enforcing mode.
'''
from multiprocessing import Process, Queue
# Return {} if can't cpuid
if not DataSource.can_cpuid:
return {}
# Get the CPU arch and bits
arch, bits = _parse_arch(DataSource.arch_string_raw)
# Return {} if this is not an X86 CPU
if not arch in ['X86_32', 'X86_64']:
return {}
try:
# Start running the function in a subprocess
queue = Queue()
p = Process(target=_actual_get_cpu_info_from_cpuid, args=(queue,))
p.start()
# Wait for the process to end, while it is still alive
while p.is_alive():
p.join(0)
# Return {} if it failed
if p.exitcode != 0:
return {}
# Return the result, only if there is something to read
if not queue.empty():
output = queue.get()
return _b64_to_obj(output)
except:
pass
# Return {} if everything failed
return {}
def _get_cpu_info_from_proc_cpuinfo():
'''
Returns the CPU info gathered from /proc/cpuinfo.
Returns {} if /proc/cpuinfo is not found.
'''
try:
# Just return {} if there is no cpuinfo
if not DataSource.has_proc_cpuinfo():
return {}
returncode, output = DataSource.cat_proc_cpuinfo()
if returncode != 0:
return {}
# Various fields
vendor_id = _get_field(False, output, None, '', 'vendor_id', 'vendor id', 'vendor')
processor_brand = _get_field(True, output, None, None, 'model name','cpu', 'processor')
cache_size = _get_field(False, output, None, '', 'cache size')
stepping = _get_field(False, output, int, 0, 'stepping')
model = _get_field(False, output, int, 0, 'model')
family = _get_field(False, output, int, 0, 'cpu family')
hardware = _get_field(False, output, None, '', 'Hardware')
# Flags
flags = _get_field(False, output, None, None, 'flags', 'Features')
if flags:
flags = flags.split()
flags.sort()
# Convert from MHz string to Hz
hz_actual = _get_field(False, output, None, '', 'cpu MHz', 'cpu speed', 'clock')
hz_actual = hz_actual.lower().rstrip('mhz').strip()
hz_actual = _to_decimal_string(hz_actual)
# Convert from GHz/MHz string to Hz
hz_advertised, scale = (None, 0)
try:
hz_advertised, scale = _parse_cpu_brand_string(processor_brand)
except Exception:
pass
info = {
'hardware_raw' : hardware,
'brand_raw' : processor_brand,
'l3_cache_size' : _to_friendly_bytes(cache_size),
'flags' : flags,
'vendor_id_raw' : vendor_id,
'stepping' : stepping,
'model' : model,
'family' : family,
}
# Make the Hz the same for actual and advertised if missing any
if not hz_advertised or hz_advertised == '0.0':
hz_advertised = hz_actual
scale = 6
elif not hz_actual or hz_actual == '0.0':
hz_actual = hz_advertised
# Add the Hz if there is one
if _hz_short_to_full(hz_advertised, scale) > (0, 0):
info['hz_advertised_friendly'] = _hz_short_to_friendly(hz_advertised, scale)
info['hz_advertised'] = _hz_short_to_full(hz_advertised, scale)
if _hz_short_to_full(hz_actual, scale) > (0, 0):
info['hz_actual_friendly'] = _hz_short_to_friendly(hz_actual, 6)
info['hz_actual'] = _hz_short_to_full(hz_actual, 6)
info = {k: v for k, v in info.items() if v}
return info
except:
#raise # NOTE: To have this throw on error, uncomment this line
return {}
def _get_cpu_info_from_cpufreq_info():
'''
Returns the CPU info gathered from cpufreq-info.
Returns {} if cpufreq-info is not found.
'''
try:
hz_brand, scale = '0.0', 0
if not DataSource.has_cpufreq_info():
return {}
returncode, output = DataSource.cpufreq_info()
if returncode != 0:
return {}
hz_brand = output.split('current CPU frequency is')[1].split('\n')[0]
i = hz_brand.find('Hz')
assert(i != -1)
hz_brand = hz_brand[0 : i+2].strip().lower()
if hz_brand.endswith('mhz'):
scale = 6
elif hz_brand.endswith('ghz'):
scale = 9
hz_brand = hz_brand.rstrip('mhz').rstrip('ghz').strip()
hz_brand = _to_decimal_string(hz_brand)
info = {
'hz_advertised_friendly' : _hz_short_to_friendly(hz_brand, scale),
'hz_actual_friendly' : _hz_short_to_friendly(hz_brand, scale),
'hz_advertised' : _hz_short_to_full(hz_brand, scale),
'hz_actual' : _hz_short_to_full(hz_brand, scale),
}
info = {k: v for k, v in info.items() if v}
return info
except:
#raise # NOTE: To have this throw on error, uncomment this line
return {}
def _get_cpu_info_from_lscpu():
'''
Returns the CPU info gathered from lscpu.
Returns {} if lscpu is not found.
'''
try:
if not DataSource.has_lscpu():
return {}
returncode, output = DataSource.lscpu()
if returncode != 0:
return {}
info = {}
new_hz = _get_field(False, output, None, None, 'CPU max MHz', 'CPU MHz')
if new_hz:
new_hz = _to_decimal_string(new_hz)
scale = 6
info['hz_advertised_friendly'] = _hz_short_to_friendly(new_hz, scale)
info['hz_actual_friendly'] = _hz_short_to_friendly(new_hz, scale)
info['hz_advertised'] = _hz_short_to_full(new_hz, scale)
info['hz_actual'] = _hz_short_to_full(new_hz, scale)
vendor_id = _get_field(False, output, None, None, 'Vendor ID')
if vendor_id:
info['vendor_id_raw'] = vendor_id
brand = _get_field(False, output, None, None, 'Model name')
if brand:
info['brand_raw'] = brand
family = _get_field(False, output, None, None, 'CPU family')
if family and family.isdigit():
info['family'] = int(family)
stepping = _get_field(False, output, None, None, 'Stepping')
if stepping and stepping.isdigit():
info['stepping'] = int(stepping)
model = _get_field(False, output, None, None, 'Model')
if model and model.isdigit():
info['model'] = int(model)
l1_data_cache_size = _get_field(False, output, None, None, 'L1d cache')
if l1_data_cache_size:
info['l1_data_cache_size'] = _to_friendly_bytes(l1_data_cache_size)
l1_instruction_cache_size = _get_field(False, output, None, None, 'L1i cache')
if l1_instruction_cache_size:
info['l1_instruction_cache_size'] = _to_friendly_bytes(l1_instruction_cache_size)
l2_cache_size = _get_field(False, output, None, None, 'L2 cache')
if l2_cache_size:
info['l2_cache_size'] = _to_friendly_bytes(l2_cache_size)
l3_cache_size = _get_field(False, output, None, None, 'L3 cache')
if l3_cache_size:
info['l3_cache_size'] = _to_friendly_bytes(l3_cache_size)
# Flags
flags = _get_field(False, output, None, None, 'flags', 'Features')
if flags:
flags = flags.split()
flags.sort()
info['flags'] = flags
info = {k: v for k, v in info.items() if v}
return info
except:
#raise # NOTE: To have this throw on error, uncomment this line
return {}
def _get_cpu_info_from_dmesg():
'''
Returns the CPU info gathered from dmesg.
Returns {} if dmesg is not found or does not have the desired info.
'''
# Just return {} if there is no dmesg
if not DataSource.has_dmesg():
return {}
# If dmesg fails return {}
returncode, output = DataSource.dmesg_a()
if output == None or returncode != 0:
return {}
return _parse_dmesg_output(output)
# https://openpowerfoundation.org/wp-content/uploads/2016/05/LoPAPR_DRAFT_v11_24March2016_cmt1.pdf
# page 767
def _get_cpu_info_from_ibm_pa_features():
'''
Returns the CPU info gathered from lsprop /proc/device-tree/cpus/*/ibm,pa-features
Returns {} if lsprop is not found or ibm,pa-features does not have the desired info.
'''
try:
# Just return {} if there is no lsprop
if not DataSource.has_ibm_pa_features():
return {}
# If ibm,pa-features fails return {}
returncode, output = DataSource.ibm_pa_features()
if output == None or returncode != 0:
return {}
# Filter out invalid characters from output
value = output.split("ibm,pa-features")[1].lower()
value = [s for s in value if s in list('0123456789abcfed')]
value = ''.join(value)
# Get data converted to Uint32 chunks
left = int(value[0 : 8], 16)
right = int(value[8 : 16], 16)
# Get the CPU flags
flags = {
# Byte 0
'mmu' : _is_bit_set(left, 0),
'fpu' : _is_bit_set(left, 1),
'slb' : _is_bit_set(left, 2),
'run' : _is_bit_set(left, 3),
#'reserved' : _is_bit_set(left, 4),
'dabr' : _is_bit_set(left, 5),
'ne' : _is_bit_set(left, 6),
'wtr' : _is_bit_set(left, 7),
# Byte 1
'mcr' : _is_bit_set(left, 8),
'dsisr' : _is_bit_set(left, 9),
'lp' : _is_bit_set(left, 10),
'ri' : _is_bit_set(left, 11),
'dabrx' : _is_bit_set(left, 12),
'sprg3' : _is_bit_set(left, 13),
'rislb' : _is_bit_set(left, 14),
'pp' : _is_bit_set(left, 15),
# Byte 2
'vpm' : _is_bit_set(left, 16),
'dss_2.05' : _is_bit_set(left, 17),
#'reserved' : _is_bit_set(left, 18),
'dar' : _is_bit_set(left, 19),
#'reserved' : _is_bit_set(left, 20),
'ppr' : _is_bit_set(left, 21),
'dss_2.02' : _is_bit_set(left, 22),
'dss_2.06' : _is_bit_set(left, 23),
# Byte 3
'lsd_in_dscr' : _is_bit_set(left, 24),
'ugr_in_dscr' : _is_bit_set(left, 25),
#'reserved' : _is_bit_set(left, 26),
#'reserved' : _is_bit_set(left, 27),
#'reserved' : _is_bit_set(left, 28),
#'reserved' : _is_bit_set(left, 29),
#'reserved' : _is_bit_set(left, 30),
#'reserved' : _is_bit_set(left, 31),
# Byte 4
'sso_2.06' : _is_bit_set(right, 0),
#'reserved' : _is_bit_set(right, 1),
#'reserved' : _is_bit_set(right, 2),
#'reserved' : _is_bit_set(right, 3),
#'reserved' : _is_bit_set(right, 4),
#'reserved' : _is_bit_set(right, 5),
#'reserved' : _is_bit_set(right, 6),
#'reserved' : _is_bit_set(right, 7),
# Byte 5
'le' : _is_bit_set(right, 8),
'cfar' : _is_bit_set(right, 9),
'eb' : _is_bit_set(right, 10),
'lsq_2.07' : _is_bit_set(right, 11),
#'reserved' : _is_bit_set(right, 12),
#'reserved' : _is_bit_set(right, 13),
#'reserved' : _is_bit_set(right, 14),
#'reserved' : _is_bit_set(right, 15),
# Byte 6
'dss_2.07' : _is_bit_set(right, 16),
#'reserved' : _is_bit_set(right, 17),
#'reserved' : _is_bit_set(right, 18),
#'reserved' : _is_bit_set(right, 19),
#'reserved' : _is_bit_set(right, 20),
#'reserved' : _is_bit_set(right, 21),
#'reserved' : _is_bit_set(right, 22),
#'reserved' : _is_bit_set(right, 23),
# Byte 7
#'reserved' : _is_bit_set(right, 24),
#'reserved' : _is_bit_set(right, 25),
#'reserved' : _is_bit_set(right, 26),
#'reserved' : _is_bit_set(right, 27),
#'reserved' : _is_bit_set(right, 28),
#'reserved' : _is_bit_set(right, 29),
#'reserved' : _is_bit_set(right, 30),
#'reserved' : _is_bit_set(right, 31),
}
# Get a list of only the flags that are true
flags = [k for k, v in flags.items() if v]
flags.sort()
info = {
'flags' : flags
}
info = {k: v for k, v in info.items() if v}
return info
except:
return {}
def _get_cpu_info_from_cat_var_run_dmesg_boot():
'''
Returns the CPU info gathered from /var/run/dmesg.boot.
Returns {} if dmesg is not found or does not have the desired info.
'''
# Just return {} if there is no /var/run/dmesg.boot
if not DataSource.has_var_run_dmesg_boot():
return {}
# If dmesg.boot fails return {}
returncode, output = DataSource.cat_var_run_dmesg_boot()
if output == None or returncode != 0:
return {}
return _parse_dmesg_output(output)
def _get_cpu_info_from_sysctl():
'''
Returns the CPU info gathered from sysctl.
Returns {} if sysctl is not found.
'''
try:
# Just return {} if there is no sysctl
if not DataSource.has_sysctl():
return {}
# If sysctl fails return {}
returncode, output = DataSource.sysctl_machdep_cpu_hw_cpufrequency()
if output == None or returncode != 0:
return {}
# Various fields
vendor_id = _get_field(False, output, None, None, 'machdep.cpu.vendor')
processor_brand = _get_field(True, output, None, None, 'machdep.cpu.brand_string')
cache_size = _get_field(False, output, None, None, 'machdep.cpu.cache.size')
stepping = _get_field(False, output, int, 0, 'machdep.cpu.stepping')
model = _get_field(False, output, int, 0, 'machdep.cpu.model')
family = _get_field(False, output, int, 0, 'machdep.cpu.family')
# Flags
flags = _get_field(False, output, None, '', 'machdep.cpu.features').lower().split()
flags.extend(_get_field(False, output, None, '', 'machdep.cpu.leaf7_features').lower().split())
flags.extend(_get_field(False, output, None, '', 'machdep.cpu.extfeatures').lower().split())
flags.sort()
# Convert from GHz/MHz string to Hz
hz_advertised, scale = _parse_cpu_brand_string(processor_brand)
hz_actual = _get_field(False, output, None, None, 'hw.cpufrequency')
hz_actual = _to_decimal_string(hz_actual)
info = {
'vendor_id_raw' : vendor_id,
'brand_raw' : processor_brand,
'hz_advertised_friendly' : _hz_short_to_friendly(hz_advertised, scale),
'hz_actual_friendly' : _hz_short_to_friendly(hz_actual, 0),
'hz_advertised' : _hz_short_to_full(hz_advertised, scale),
'hz_actual' : _hz_short_to_full(hz_actual, 0),
'l2_cache_size' : _to_friendly_bytes(cache_size),
'stepping' : stepping,
'model' : model,
'family' : family,
'flags' : flags
}
info = {k: v for k, v in info.items() if v}
return info
except:
return {}
def _get_cpu_info_from_sysinfo():
'''
Returns the CPU info gathered from sysinfo.
Returns {} if sysinfo is not found.
'''
info = _get_cpu_info_from_sysinfo_v1()
info.update(_get_cpu_info_from_sysinfo_v2())
return info
def _get_cpu_info_from_sysinfo_v2():
'''
Returns the CPU info gathered from sysinfo.
Returns {} if sysinfo is not found.
'''
try:
# Just return {} if there is no sysinfo
if not DataSource.has_sysinfo():
return {}
# If sysinfo fails return {}
returncode, output = DataSource.sysinfo_cpu()
if output == None or returncode != 0:
return {}
# Various fields
vendor_id = '' #_get_field(False, output, None, None, 'CPU #0: ')
processor_brand = output.split('CPU #0: "')[1].split('"\n')[0].strip()
cache_size = '' #_get_field(False, output, None, None, 'machdep.cpu.cache.size')
signature = output.split('Signature:')[1].split('\n')[0].strip()
#
stepping = int(signature.split('stepping ')[1].split(',')[0].strip())
model = int(signature.split('model ')[1].split(',')[0].strip())
family = int(signature.split('family ')[1].split(',')[0].strip())
# Flags
def get_subsection_flags(output):
retval = []
for line in output.split('\n')[1:]:
if not line.startswith(' ') and not line.startswith(' '): break
for entry in line.strip().lower().split(' '):
retval.append(entry)
return retval
flags = get_subsection_flags(output.split('Features: ')[1]) + \
get_subsection_flags(output.split('Extended Features (0x00000001): ')[1]) + \
get_subsection_flags(output.split('Extended Features (0x80000001): ')[1])
flags.sort()
# Convert from GHz/MHz string to Hz
lines = [n for n in output.split('\n') if n]
raw_hz = lines[0].split('running at ')[1].strip().lower()
hz_advertised = raw_hz.rstrip('mhz').rstrip('ghz').strip()
hz_advertised = _to_decimal_string(hz_advertised)
hz_actual = hz_advertised
scale = 0
if raw_hz.endswith('mhz'):
scale = 6
elif raw_hz.endswith('ghz'):
scale = 9
info = {
'vendor_id_raw' : vendor_id,
'brand_raw' : processor_brand,
'hz_advertised_friendly' : _hz_short_to_friendly(hz_advertised, scale),
'hz_actual_friendly' : _hz_short_to_friendly(hz_actual, scale),
'hz_advertised' : _hz_short_to_full(hz_advertised, scale),
'hz_actual' : _hz_short_to_full(hz_actual, scale),
'l2_cache_size' : _to_friendly_bytes(cache_size),
'stepping' : stepping,
'model' : model,
'family' : family,
'flags' : flags
}
info = {k: v for k, v in info.items() if v}
return info
except:
#raise # NOTE: To have this throw on error, uncomment this line
return {}
def _get_cpu_info_from_wmic():
'''
Returns the CPU info gathered from WMI.
Returns {} if not on Windows, or wmic is not installed.
'''
try:
# Just return {} if not Windows or there is no wmic
if not DataSource.is_windows or not DataSource.has_wmic():
return {}
returncode, output = DataSource.wmic_cpu()
if output == None or returncode != 0:
return {}
# Break the list into key values pairs
value = output.split("\n")
value = [s.rstrip().split('=') for s in value if '=' in s]
value = {k: v for k, v in value if v}
# Get the advertised MHz
processor_brand = value.get('Name')
hz_advertised, scale_advertised = _parse_cpu_brand_string(processor_brand)
# Get the actual MHz
hz_actual = value.get('CurrentClockSpeed')
scale_actual = 6
if hz_actual:
hz_actual = _to_decimal_string(hz_actual)
# Get cache sizes
l2_cache_size = value.get('L2CacheSize')
if l2_cache_size:
l2_cache_size = l2_cache_size + ' KB'
l3_cache_size = value.get('L3CacheSize')
if l3_cache_size:
l3_cache_size = l3_cache_size + ' KB'
# Get family, model, and stepping
family, model, stepping = '', '', ''
description = value.get('Description') or value.get('Caption')
entries = description.split(' ')
if 'Family' in entries and entries.index('Family') < len(entries)-1:
i = entries.index('Family')
family = int(entries[i + 1])
if 'Model' in entries and entries.index('Model') < len(entries)-1:
i = entries.index('Model')
model = int(entries[i + 1])
if 'Stepping' in entries and entries.index('Stepping') < len(entries)-1:
i = entries.index('Stepping')
stepping = int(entries[i + 1])
info = {
'vendor_id_raw' : value.get('Manufacturer'),
'brand_raw' : processor_brand,
'hz_advertised_friendly' : _hz_short_to_friendly(hz_advertised, scale_advertised),
'hz_actual_friendly' : _hz_short_to_friendly(hz_actual, scale_actual),
'hz_advertised' : _hz_short_to_full(hz_advertised, scale_advertised),
'hz_actual' : _hz_short_to_full(hz_actual, scale_actual),
'l2_cache_size' : l2_cache_size,
'l3_cache_size' : l3_cache_size,
'stepping' : stepping,
'model' : model,
'family' : family,
}
info = {k: v for k, v in info.items() if v}
return info
except:
#raise # NOTE: To have this throw on error, uncomment this line
return {}
def _get_cpu_info_from_registry():
'''
FIXME: Is missing many of the newer CPU flags like sse3
Returns the CPU info gathered from the Windows Registry.
Returns {} if not on Windows.
'''
try:
# Just return {} if not on Windows
if not DataSource.is_windows:
return {}
# Get the CPU name
processor_brand = DataSource.winreg_processor_brand().strip()
# Get the CPU vendor id
vendor_id = DataSource.winreg_vendor_id_raw()
# Get the CPU arch and bits
arch_string_raw = DataSource.winreg_arch_string_raw()
arch, bits = _parse_arch(arch_string_raw)
# Get the actual CPU Hz
hz_actual = DataSource.winreg_hz_actual()
hz_actual = _to_decimal_string(hz_actual)
# Get the advertised CPU Hz
hz_advertised, scale = _parse_cpu_brand_string(processor_brand)
# If advertised hz not found, use the actual hz
if hz_advertised == '0.0':
scale = 6
hz_advertised = _to_decimal_string(hz_actual)
# Get the CPU features
feature_bits = DataSource.winreg_feature_bits()
def is_set(bit):
mask = 0x80000000 >> bit
retval = mask & feature_bits > 0
return retval
# http://en.wikipedia.org/wiki/CPUID
# http://unix.stackexchange.com/questions/43539/what-do-the-flags-in-proc-cpuinfo-mean
# http://www.lohninger.com/helpcsuite/public_constants_cpuid.htm
flags = {
'fpu' : is_set(0), # Floating Point Unit
'vme' : is_set(1), # V86 Mode Extensions
'de' : is_set(2), # Debug Extensions - I/O breakpoints supported
'pse' : is_set(3), # Page Size Extensions (4 MB pages supported)
'tsc' : is_set(4), # Time Stamp Counter and RDTSC instruction are available
'msr' : is_set(5), # Model Specific Registers
'pae' : is_set(6), # Physical Address Extensions (36 bit address, 2MB pages)
'mce' : is_set(7), # Machine Check Exception supported
'cx8' : is_set(8), # Compare Exchange Eight Byte instruction available
'apic' : is_set(9), # Local APIC present (multiprocessor operation support)
'sepamd' : is_set(10), # Fast system calls (AMD only)
'sep' : is_set(11), # Fast system calls
'mtrr' : is_set(12), # Memory Type Range Registers
'pge' : is_set(13), # Page Global Enable
'mca' : is_set(14), # Machine Check Architecture
'cmov' : is_set(15), # Conditional MOVe instructions
'pat' : is_set(16), # Page Attribute Table
'pse36' : is_set(17), # 36 bit Page Size Extensions
'serial' : is_set(18), # Processor Serial Number
'clflush' : is_set(19), # Cache Flush
#'reserved1' : is_set(20), # reserved
'dts' : is_set(21), # Debug Trace Store
'acpi' : is_set(22), # ACPI support
'mmx' : is_set(23), # MultiMedia Extensions
'fxsr' : is_set(24), # FXSAVE and FXRSTOR instructions
'sse' : is_set(25), # SSE instructions
'sse2' : is_set(26), # SSE2 (WNI) instructions
'ss' : is_set(27), # self snoop
#'reserved2' : is_set(28), # reserved
'tm' : is_set(29), # Automatic clock control
'ia64' : is_set(30), # IA64 instructions
'3dnow' : is_set(31) # 3DNow! instructions available
}
# Get a list of only the flags that are true
flags = [k for k, v in flags.items() if v]
flags.sort()
info = {
'vendor_id_raw' : vendor_id,
'brand_raw' : processor_brand,
'hz_advertised_friendly' : _hz_short_to_friendly(hz_advertised, scale),
'hz_actual_friendly' : _hz_short_to_friendly(hz_actual, 6),
'hz_advertised' : _hz_short_to_full(hz_advertised, scale),
'hz_actual' : _hz_short_to_full(hz_actual, 6),
'flags' : flags
}
info = {k: v for k, v in info.items() if v}
return info
except:
return {}
def _get_cpu_info_from_kstat():
'''
Returns the CPU info gathered from isainfo and kstat.
Returns {} if isainfo or kstat are not found.
'''
try:
# Just return {} if there is no isainfo or kstat
if not DataSource.has_isainfo() or not DataSource.has_kstat():
return {}
# If isainfo fails return {}
returncode, flag_output = DataSource.isainfo_vb()
if flag_output == None or returncode != 0:
return {}
# If kstat fails return {}
returncode, kstat = DataSource.kstat_m_cpu_info()
if kstat == None or returncode != 0:
return {}
# Various fields
vendor_id = kstat.split('\tvendor_id ')[1].split('\n')[0].strip()
processor_brand = kstat.split('\tbrand ')[1].split('\n')[0].strip()
stepping = int(kstat.split('\tstepping ')[1].split('\n')[0].strip())
model = int(kstat.split('\tmodel ')[1].split('\n')[0].strip())
family = int(kstat.split('\tfamily ')[1].split('\n')[0].strip())
# Flags
flags = flag_output.strip().split('\n')[-1].strip().lower().split()
flags.sort()
# Convert from GHz/MHz string to Hz
scale = 6
hz_advertised = kstat.split('\tclock_MHz ')[1].split('\n')[0].strip()
hz_advertised = _to_decimal_string(hz_advertised)
# Convert from GHz/MHz string to Hz
hz_actual = kstat.split('\tcurrent_clock_Hz ')[1].split('\n')[0].strip()
hz_actual = _to_decimal_string(hz_actual)
info = {
'vendor_id_raw' : vendor_id,
'brand_raw' : processor_brand,
'hz_advertised_friendly' : _hz_short_to_friendly(hz_advertised, scale),
'hz_actual_friendly' : _hz_short_to_friendly(hz_actual, 0),
'hz_advertised' : _hz_short_to_full(hz_advertised, scale),
'hz_actual' : _hz_short_to_full(hz_actual, 0),
'stepping' : stepping,
'model' : model,
'family' : family,
'flags' : flags
}
info = {k: v for k, v in info.items() if v}
return info
except:
return {}
def _get_cpu_info_from_platform_uname():
try:
uname = DataSource.uname_string_raw.split(',')[0]
family, model, stepping = (None, None, None)
entries = uname.split(' ')
if 'Family' in entries and entries.index('Family') < len(entries)-1:
i = entries.index('Family')
family = int(entries[i + 1])
if 'Model' in entries and entries.index('Model') < len(entries)-1:
i = entries.index('Model')
model = int(entries[i + 1])
if 'Stepping' in entries and entries.index('Stepping') < len(entries)-1:
i = entries.index('Stepping')
stepping = int(entries[i + 1])
info = {
'family' : family,
'model' : model,
'stepping' : stepping
}
info = {k: v for k, v in info.items() if v}
return info
except:
return {}
def _get_cpu_info_internal():
'''
Returns the CPU info by using the best sources of information for your OS.
Returns {} if nothing is found.
'''
# Get the CPU arch and bits
arch, bits = _parse_arch(DataSource.arch_string_raw)
friendly_maxsize = { 2**31-1: '32 bit', 2**63-1: '64 bit' }.get(sys.maxsize) or 'unknown bits'
friendly_version = "{0}.{1}.{2}.{3}.{4}".format(*sys.version_info)
PYTHON_VERSION = "{0} ({1})".format(friendly_version, friendly_maxsize)
info = {
'python_version' : PYTHON_VERSION,
'cpuinfo_version' : CPUINFO_VERSION,
'cpuinfo_version_string' : CPUINFO_VERSION_STRING,
'arch' : arch,
'bits' : bits,
'count' : DataSource.cpu_count,
'arch_string_raw' : DataSource.arch_string_raw,
}
# Try the Windows wmic
_copy_new_fields(info, _get_cpu_info_from_wmic())
# Try the Windows registry
_copy_new_fields(info, _get_cpu_info_from_registry())
# Try /proc/cpuinfo
_copy_new_fields(info, _get_cpu_info_from_proc_cpuinfo())
# Try cpufreq-info
_copy_new_fields(info, _get_cpu_info_from_cpufreq_info())
# Try LSCPU
_copy_new_fields(info, _get_cpu_info_from_lscpu())
# Try sysctl
_copy_new_fields(info, _get_cpu_info_from_sysctl())
# Try kstat
_copy_new_fields(info, _get_cpu_info_from_kstat())
# Try dmesg
_copy_new_fields(info, _get_cpu_info_from_dmesg())
# Try /var/run/dmesg.boot
_copy_new_fields(info, _get_cpu_info_from_cat_var_run_dmesg_boot())
# Try lsprop ibm,pa-features
_copy_new_fields(info, _get_cpu_info_from_ibm_pa_features())
# Try sysinfo
_copy_new_fields(info, _get_cpu_info_from_sysinfo())
# Try querying the CPU cpuid register
_copy_new_fields(info, _get_cpu_info_from_cpuid())
# Try platform.uname
_copy_new_fields(info, _get_cpu_info_from_platform_uname())
return info
def get_cpu_info_json():
'''
Returns the CPU info by using the best sources of information for your OS.
Returns the result in a json string
'''
import json
output = None
# If running under pyinstaller, run normally
if getattr(sys, 'frozen', False):
info = _get_cpu_info_internal()
output = json.dumps(info)
output = "{0}".format(output)
# if not running under pyinstaller, run in another process.
# This is done because multiprocesing has a design flaw that
# causes non main programs to run multiple times on Windows.
else:
from subprocess import Popen, PIPE
command = [sys.executable, __file__, '--json']
p1 = Popen(command, stdout=PIPE, stderr=PIPE, stdin=PIPE)
output = p1.communicate()[0]
if p1.returncode != 0:
return "{}"
if not IS_PY2:
output = output.decode(encoding='UTF-8')
return output
def get_cpu_info():
'''
Returns the CPU info by using the best sources of information for your OS.
Returns the result in a dict
'''
import json
output = get_cpu_info_json()
# Convert JSON to Python with non unicode strings
output = json.loads(output, object_hook = _utf_to_str)
return output
def main():
from argparse import ArgumentParser
import json
# Parse args
parser = ArgumentParser(description='Gets CPU info with pure Python 2 & 3')
parser.add_argument('--json', action='store_true', help='Return the info in JSON format')
parser.add_argument('--version', action='store_true', help='Return the version of py-cpuinfo')
args = parser.parse_args()
try:
_check_arch()
except Exception as err:
sys.stderr.write(str(err) + "\n")
sys.exit(1)
info = _get_cpu_info_internal()
if not info:
sys.stderr.write("Failed to find cpu info\n")
sys.exit(1)
if args.json:
print(json.dumps(info))
elif args.version:
print(CPUINFO_VERSION_STRING)
else:
print('Python Version: {0}'.format(info.get('python_version', '')))
print('Cpuinfo Version: {0}'.format(info.get('cpuinfo_version_string', '')))
print('Vendor ID Raw: {0}'.format(info.get('vendor_id_raw', '')))
print('Hardware Raw: {0}'.format(info.get('hardware_raw', '')))
print('Brand Raw: {0}'.format(info.get('brand_raw', '')))
print('Hz Advertised Friendly: {0}'.format(info.get('hz_advertised_friendly', '')))
print('Hz Actual Friendly: {0}'.format(info.get('hz_actual_friendly', '')))
print('Hz Advertised: {0}'.format(info.get('hz_advertised', '')))
print('Hz Actual: {0}'.format(info.get('hz_actual', '')))
print('Arch: {0}'.format(info.get('arch', '')))
print('Bits: {0}'.format(info.get('bits', '')))
print('Count: {0}'.format(info.get('count', '')))
print('Arch String Raw: {0}'.format(info.get('arch_string_raw', '')))
print('L1 Data Cache Size: {0}'.format(info.get('l1_data_cache_size', '')))
print('L1 Instruction Cache Size: {0}'.format(info.get('l1_instruction_cache_size', '')))
print('L2 Cache Size: {0}'.format(info.get('l2_cache_size', '')))
print('L2 Cache Line Size: {0}'.format(info.get('l2_cache_line_size', '')))
print('L2 Cache Associativity: {0}'.format(info.get('l2_cache_associativity', '')))
print('L3 Cache Size: {0}'.format(info.get('l3_cache_size', '')))
print('Stepping: {0}'.format(info.get('stepping', '')))
print('Model: {0}'.format(info.get('model', '')))
print('Family: {0}'.format(info.get('family', '')))
print('Processor Type: {0}'.format(info.get('processor_type', '')))
print('Extended Model: {0}'.format(info.get('extended_model', '')))
print('Extended Family: {0}'.format(info.get('extended_family', '')))
print('Flags: {0}'.format(', '.join(info.get('flags', ''))))
if __name__ == '__main__':
main()
else:
_check_arch()
|
workhorsy/py-cpuinfo
|
cpuinfo/cpuinfo.py
|
_get_cpu_info_from_sysinfo_v2
|
python
|
def _get_cpu_info_from_sysinfo_v2():
'''
Returns the CPU info gathered from sysinfo.
Returns {} if sysinfo is not found.
'''
try:
# Just return {} if there is no sysinfo
if not DataSource.has_sysinfo():
return {}
# If sysinfo fails return {}
returncode, output = DataSource.sysinfo_cpu()
if output == None or returncode != 0:
return {}
# Various fields
vendor_id = '' #_get_field(False, output, None, None, 'CPU #0: ')
processor_brand = output.split('CPU #0: "')[1].split('"\n')[0].strip()
cache_size = '' #_get_field(False, output, None, None, 'machdep.cpu.cache.size')
signature = output.split('Signature:')[1].split('\n')[0].strip()
#
stepping = int(signature.split('stepping ')[1].split(',')[0].strip())
model = int(signature.split('model ')[1].split(',')[0].strip())
family = int(signature.split('family ')[1].split(',')[0].strip())
# Flags
def get_subsection_flags(output):
retval = []
for line in output.split('\n')[1:]:
if not line.startswith(' ') and not line.startswith(' '): break
for entry in line.strip().lower().split(' '):
retval.append(entry)
return retval
flags = get_subsection_flags(output.split('Features: ')[1]) + \
get_subsection_flags(output.split('Extended Features (0x00000001): ')[1]) + \
get_subsection_flags(output.split('Extended Features (0x80000001): ')[1])
flags.sort()
# Convert from GHz/MHz string to Hz
lines = [n for n in output.split('\n') if n]
raw_hz = lines[0].split('running at ')[1].strip().lower()
hz_advertised = raw_hz.rstrip('mhz').rstrip('ghz').strip()
hz_advertised = _to_decimal_string(hz_advertised)
hz_actual = hz_advertised
scale = 0
if raw_hz.endswith('mhz'):
scale = 6
elif raw_hz.endswith('ghz'):
scale = 9
info = {
'vendor_id_raw' : vendor_id,
'brand_raw' : processor_brand,
'hz_advertised_friendly' : _hz_short_to_friendly(hz_advertised, scale),
'hz_actual_friendly' : _hz_short_to_friendly(hz_actual, scale),
'hz_advertised' : _hz_short_to_full(hz_advertised, scale),
'hz_actual' : _hz_short_to_full(hz_actual, scale),
'l2_cache_size' : _to_friendly_bytes(cache_size),
'stepping' : stepping,
'model' : model,
'family' : family,
'flags' : flags
}
info = {k: v for k, v in info.items() if v}
return info
except:
#raise # NOTE: To have this throw on error, uncomment this line
return {}
|
Returns the CPU info gathered from sysinfo.
Returns {} if sysinfo is not found.
|
train
|
https://github.com/workhorsy/py-cpuinfo/blob/c15afb770c1139bf76215852e17eb4f677ca3d2f/cpuinfo/cpuinfo.py#L1868-L1941
| null |
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
# Copyright (c) 2014-2019, Matthew Brennan Jones <matthew.brennan.jones@gmail.com>
# Py-cpuinfo gets CPU info with pure Python 2 & 3
# It uses the MIT License
# It is hosted at: https://github.com/workhorsy/py-cpuinfo
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
CPUINFO_VERSION = (5, 0, 0)
CPUINFO_VERSION_STRING = '.'.join([str(n) for n in CPUINFO_VERSION])
import os, sys
import platform
import multiprocessing
import ctypes
try:
import _winreg as winreg
except ImportError as err:
try:
import winreg
except ImportError as err:
pass
IS_PY2 = sys.version_info[0] == 2
class DataSource(object):
bits = platform.architecture()[0]
cpu_count = multiprocessing.cpu_count()
is_windows = platform.system().lower() == 'windows'
arch_string_raw = platform.machine()
uname_string_raw = platform.uname()[5]
can_cpuid = True
@staticmethod
def has_proc_cpuinfo():
return os.path.exists('/proc/cpuinfo')
@staticmethod
def has_dmesg():
return len(_program_paths('dmesg')) > 0
@staticmethod
def has_var_run_dmesg_boot():
uname = platform.system().strip().strip('"').strip("'").strip().lower()
return 'linux' in uname and os.path.exists('/var/run/dmesg.boot')
@staticmethod
def has_cpufreq_info():
return len(_program_paths('cpufreq-info')) > 0
@staticmethod
def has_sestatus():
return len(_program_paths('sestatus')) > 0
@staticmethod
def has_sysctl():
return len(_program_paths('sysctl')) > 0
@staticmethod
def has_isainfo():
return len(_program_paths('isainfo')) > 0
@staticmethod
def has_kstat():
return len(_program_paths('kstat')) > 0
@staticmethod
def has_sysinfo():
return len(_program_paths('sysinfo')) > 0
@staticmethod
def has_lscpu():
return len(_program_paths('lscpu')) > 0
@staticmethod
def has_ibm_pa_features():
return len(_program_paths('lsprop')) > 0
@staticmethod
def has_wmic():
returncode, output = _run_and_get_stdout(['wmic', 'os', 'get', 'Version'])
return returncode == 0 and len(output) > 0
@staticmethod
def cat_proc_cpuinfo():
return _run_and_get_stdout(['cat', '/proc/cpuinfo'])
@staticmethod
def cpufreq_info():
return _run_and_get_stdout(['cpufreq-info'])
@staticmethod
def sestatus_b():
return _run_and_get_stdout(['sestatus', '-b'])
@staticmethod
def dmesg_a():
return _run_and_get_stdout(['dmesg', '-a'])
@staticmethod
def cat_var_run_dmesg_boot():
return _run_and_get_stdout(['cat', '/var/run/dmesg.boot'])
@staticmethod
def sysctl_machdep_cpu_hw_cpufrequency():
return _run_and_get_stdout(['sysctl', 'machdep.cpu', 'hw.cpufrequency'])
@staticmethod
def isainfo_vb():
return _run_and_get_stdout(['isainfo', '-vb'])
@staticmethod
def kstat_m_cpu_info():
return _run_and_get_stdout(['kstat', '-m', 'cpu_info'])
@staticmethod
def sysinfo_cpu():
return _run_and_get_stdout(['sysinfo', '-cpu'])
@staticmethod
def lscpu():
return _run_and_get_stdout(['lscpu'])
@staticmethod
def ibm_pa_features():
import glob
ibm_features = glob.glob('/proc/device-tree/cpus/*/ibm,pa-features')
if ibm_features:
return _run_and_get_stdout(['lsprop', ibm_features[0]])
@staticmethod
def wmic_cpu():
return _run_and_get_stdout(['wmic', 'cpu', 'get', 'Name,CurrentClockSpeed,L2CacheSize,L3CacheSize,Description,Caption,Manufacturer', '/format:list'])
@staticmethod
def winreg_processor_brand():
key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, r"Hardware\Description\System\CentralProcessor\0")
processor_brand = winreg.QueryValueEx(key, "ProcessorNameString")[0]
winreg.CloseKey(key)
return processor_brand.strip()
@staticmethod
def winreg_vendor_id_raw():
key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, r"Hardware\Description\System\CentralProcessor\0")
vendor_id_raw = winreg.QueryValueEx(key, "VendorIdentifier")[0]
winreg.CloseKey(key)
return vendor_id_raw
@staticmethod
def winreg_arch_string_raw():
key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, r"SYSTEM\CurrentControlSet\Control\Session Manager\Environment")
arch_string_raw = winreg.QueryValueEx(key, "PROCESSOR_ARCHITECTURE")[0]
winreg.CloseKey(key)
return arch_string_raw
@staticmethod
def winreg_hz_actual():
key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, r"Hardware\Description\System\CentralProcessor\0")
hz_actual = winreg.QueryValueEx(key, "~Mhz")[0]
winreg.CloseKey(key)
hz_actual = _to_decimal_string(hz_actual)
return hz_actual
@staticmethod
def winreg_feature_bits():
key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, r"Hardware\Description\System\CentralProcessor\0")
feature_bits = winreg.QueryValueEx(key, "FeatureSet")[0]
winreg.CloseKey(key)
return feature_bits
def _program_paths(program_name):
paths = []
exts = filter(None, os.environ.get('PATHEXT', '').split(os.pathsep))
path = os.environ['PATH']
for p in os.environ['PATH'].split(os.pathsep):
p = os.path.join(p, program_name)
if os.access(p, os.X_OK):
paths.append(p)
for e in exts:
pext = p + e
if os.access(pext, os.X_OK):
paths.append(pext)
return paths
def _run_and_get_stdout(command, pipe_command=None):
from subprocess import Popen, PIPE
if not pipe_command:
p1 = Popen(command, stdout=PIPE, stderr=PIPE, stdin=PIPE)
output = p1.communicate()[0]
if not IS_PY2:
output = output.decode(encoding='UTF-8')
return p1.returncode, output
else:
p1 = Popen(command, stdout=PIPE, stderr=PIPE, stdin=PIPE)
p2 = Popen(pipe_command, stdin=p1.stdout, stdout=PIPE, stderr=PIPE)
p1.stdout.close()
output = p2.communicate()[0]
if not IS_PY2:
output = output.decode(encoding='UTF-8')
return p2.returncode, output
# Make sure we are running on a supported system
def _check_arch():
arch, bits = _parse_arch(DataSource.arch_string_raw)
if not arch in ['X86_32', 'X86_64', 'ARM_7', 'ARM_8', 'PPC_64']:
raise Exception("py-cpuinfo currently only works on X86 and some PPC and ARM CPUs.")
def _obj_to_b64(thing):
import pickle
import base64
a = thing
b = pickle.dumps(a)
c = base64.b64encode(b)
d = c.decode('utf8')
return d
def _b64_to_obj(thing):
import pickle
import base64
try:
a = base64.b64decode(thing)
b = pickle.loads(a)
return b
except:
return {}
def _utf_to_str(input):
if IS_PY2 and isinstance(input, unicode):
return input.encode('utf-8')
elif isinstance(input, list):
return [_utf_to_str(element) for element in input]
elif isinstance(input, dict):
return {_utf_to_str(key): _utf_to_str(value)
for key, value in input.items()}
else:
return input
def _copy_new_fields(info, new_info):
keys = [
'vendor_id_raw', 'hardware_raw', 'brand_raw', 'hz_advertised_friendly', 'hz_actual_friendly',
'hz_advertised', 'hz_actual', 'arch', 'bits', 'count',
'arch_string_raw', 'uname_string_raw',
'l2_cache_size', 'l2_cache_line_size', 'l2_cache_associativity',
'stepping', 'model', 'family',
'processor_type', 'extended_model', 'extended_family', 'flags',
'l3_cache_size', 'l1_data_cache_size', 'l1_instruction_cache_size'
]
for key in keys:
if new_info.get(key, None) and not info.get(key, None):
info[key] = new_info[key]
elif key == 'flags' and new_info.get('flags'):
for f in new_info['flags']:
if f not in info['flags']: info['flags'].append(f)
info['flags'].sort()
def _get_field_actual(cant_be_number, raw_string, field_names):
for line in raw_string.splitlines():
for field_name in field_names:
field_name = field_name.lower()
if ':' in line:
left, right = line.split(':', 1)
left = left.strip().lower()
right = right.strip()
if left == field_name and len(right) > 0:
if cant_be_number:
if not right.isdigit():
return right
else:
return right
return None
def _get_field(cant_be_number, raw_string, convert_to, default_value, *field_names):
retval = _get_field_actual(cant_be_number, raw_string, field_names)
# Convert the return value
if retval and convert_to:
try:
retval = convert_to(retval)
except:
retval = default_value
# Return the default if there is no return value
if retval is None:
retval = default_value
return retval
def _to_decimal_string(ticks):
try:
# Convert to string
ticks = '{0}'.format(ticks)
# Strip off non numbers and decimal places
ticks = "".join(n for n in ticks if n.isdigit() or n=='.').strip()
if ticks == '':
ticks = '0'
# Add decimal if missing
if '.' not in ticks:
ticks = '{0}.0'.format(ticks)
# Remove trailing zeros
ticks = ticks.rstrip('0')
# Add one trailing zero for empty right side
if ticks.endswith('.'):
ticks = '{0}0'.format(ticks)
# Make sure the number can be converted to a float
ticks = float(ticks)
ticks = '{0}'.format(ticks)
return ticks
except:
return '0.0'
def _hz_short_to_full(ticks, scale):
try:
# Make sure the number can be converted to a float
ticks = float(ticks)
ticks = '{0}'.format(ticks)
# Scale the numbers
hz = ticks.lstrip('0')
old_index = hz.index('.')
hz = hz.replace('.', '')
hz = hz.ljust(scale + old_index+1, '0')
new_index = old_index + scale
hz = '{0}.{1}'.format(hz[:new_index], hz[new_index:])
left, right = hz.split('.')
left, right = int(left), int(right)
return (left, right)
except:
return (0, 0)
def _hz_friendly_to_full(hz_string):
try:
hz_string = hz_string.strip().lower()
hz, scale = (None, None)
if hz_string.endswith('ghz'):
scale = 9
elif hz_string.endswith('mhz'):
scale = 6
elif hz_string.endswith('hz'):
scale = 0
hz = "".join(n for n in hz_string if n.isdigit() or n=='.').strip()
if not '.' in hz:
hz += '.0'
hz, scale = _hz_short_to_full(hz, scale)
return (hz, scale)
except:
return (0, 0)
def _hz_short_to_friendly(ticks, scale):
try:
# Get the raw Hz as a string
left, right = _hz_short_to_full(ticks, scale)
result = '{0}.{1}'.format(left, right)
# Get the location of the dot, and remove said dot
dot_index = result.index('.')
result = result.replace('.', '')
# Get the Hz symbol and scale
symbol = "Hz"
scale = 0
if dot_index > 9:
symbol = "GHz"
scale = 9
elif dot_index > 6:
symbol = "MHz"
scale = 6
elif dot_index > 3:
symbol = "KHz"
scale = 3
# Get the Hz with the dot at the new scaled point
result = '{0}.{1}'.format(result[:-scale-1], result[-scale-1:])
# Format the ticks to have 4 numbers after the decimal
# and remove any superfluous zeroes.
result = '{0:.4f} {1}'.format(float(result), symbol)
result = result.rstrip('0')
return result
except:
return '0.0000 Hz'
def _to_friendly_bytes(input):
import re
if not input:
return input
input = "{0}".format(input)
formats = {
r"^[0-9]+B$" : 'B',
r"^[0-9]+K$" : 'KB',
r"^[0-9]+M$" : 'MB',
r"^[0-9]+G$" : 'GB'
}
for pattern, friendly_size in formats.items():
if re.match(pattern, input):
return "{0} {1}".format(input[ : -1].strip(), friendly_size)
return input
def _parse_cpu_brand_string(cpu_string):
# Just return 0 if the processor brand does not have the Hz
if not 'hz' in cpu_string.lower():
return ('0.0', 0)
hz = cpu_string.lower()
scale = 0
if hz.endswith('mhz'):
scale = 6
elif hz.endswith('ghz'):
scale = 9
if '@' in hz:
hz = hz.split('@')[1]
else:
hz = hz.rsplit(None, 1)[1]
hz = hz.rstrip('mhz').rstrip('ghz').strip()
hz = _to_decimal_string(hz)
return (hz, scale)
def _parse_cpu_brand_string_dx(cpu_string):
import re
# Find all the strings inside brackets ()
starts = [m.start() for m in re.finditer('\(', cpu_string)]
ends = [m.start() for m in re.finditer('\)', cpu_string)]
insides = {k: v for k, v in zip(starts, ends)}
insides = [cpu_string[start+1 : end] for start, end in insides.items()]
# Find all the fields
vendor_id, stepping, model, family = (None, None, None, None)
for inside in insides:
for pair in inside.split(','):
pair = [n.strip() for n in pair.split(':')]
if len(pair) > 1:
name, value = pair[0], pair[1]
if name == 'origin':
vendor_id = value.strip('"')
elif name == 'stepping':
stepping = int(value.lstrip('0x'), 16)
elif name == 'model':
model = int(value.lstrip('0x'), 16)
elif name in ['fam', 'family']:
family = int(value.lstrip('0x'), 16)
# Find the Processor Brand
# Strip off extra strings in brackets at end
brand = cpu_string.strip()
is_working = True
while is_working:
is_working = False
for inside in insides:
full = "({0})".format(inside)
if brand.endswith(full):
brand = brand[ :-len(full)].strip()
is_working = True
# Find the Hz in the brand string
hz_brand, scale = _parse_cpu_brand_string(brand)
# Find Hz inside brackets () after the brand string
if hz_brand == '0.0':
for inside in insides:
hz = inside
for entry in ['GHz', 'MHz', 'Hz']:
if entry in hz:
hz = "CPU @ " + hz[ : hz.find(entry) + len(entry)]
hz_brand, scale = _parse_cpu_brand_string(hz)
break
return (hz_brand, scale, brand, vendor_id, stepping, model, family)
def _parse_dmesg_output(output):
try:
# Get all the dmesg lines that might contain a CPU string
lines = output.split(' CPU0:')[1:] + \
output.split(' CPU1:')[1:] + \
output.split(' CPU:')[1:] + \
output.split('\nCPU0:')[1:] + \
output.split('\nCPU1:')[1:] + \
output.split('\nCPU:')[1:]
lines = [l.split('\n')[0].strip() for l in lines]
# Convert the lines to CPU strings
cpu_strings = [_parse_cpu_brand_string_dx(l) for l in lines]
# Find the CPU string that has the most fields
best_string = None
highest_count = 0
for cpu_string in cpu_strings:
count = sum([n is not None for n in cpu_string])
if count > highest_count:
highest_count = count
best_string = cpu_string
# If no CPU string was found, return {}
if not best_string:
return {}
hz_actual, scale, processor_brand, vendor_id, stepping, model, family = best_string
# Origin
if ' Origin=' in output:
fields = output[output.find(' Origin=') : ].split('\n')[0]
fields = fields.strip().split()
fields = [n.strip().split('=') for n in fields]
fields = [{n[0].strip().lower() : n[1].strip()} for n in fields]
for field in fields:
name = list(field.keys())[0]
value = list(field.values())[0]
if name == 'origin':
vendor_id = value.strip('"')
elif name == 'stepping':
stepping = int(value.lstrip('0x'), 16)
elif name == 'model':
model = int(value.lstrip('0x'), 16)
elif name in ['fam', 'family']:
family = int(value.lstrip('0x'), 16)
# Features
flag_lines = []
for category in [' Features=', ' Features2=', ' AMD Features=', ' AMD Features2=']:
if category in output:
flag_lines.append(output.split(category)[1].split('\n')[0])
flags = []
for line in flag_lines:
line = line.split('<')[1].split('>')[0].lower()
for flag in line.split(','):
flags.append(flag)
flags.sort()
# Convert from GHz/MHz string to Hz
hz_advertised, scale = _parse_cpu_brand_string(processor_brand)
# If advertised hz not found, use the actual hz
if hz_advertised == '0.0':
scale = 6
hz_advertised = _to_decimal_string(hz_actual)
info = {
'vendor_id_raw' : vendor_id,
'brand_raw' : processor_brand,
'stepping' : stepping,
'model' : model,
'family' : family,
'flags' : flags
}
if hz_advertised and hz_advertised != '0.0':
info['hz_advertised_friendly'] = _hz_short_to_friendly(hz_advertised, scale)
info['hz_actual_friendly'] = _hz_short_to_friendly(hz_actual, scale)
if hz_advertised and hz_advertised != '0.0':
info['hz_advertised'] = _hz_short_to_full(hz_advertised, scale)
info['hz_actual'] = _hz_short_to_full(hz_actual, scale)
return {k: v for k, v in info.items() if v}
except:
#raise
pass
return {}
def _parse_arch(arch_string_raw):
import re
arch, bits = None, None
arch_string_raw = arch_string_raw.lower()
# X86
if re.match('^i\d86$|^x86$|^x86_32$|^i86pc$|^ia32$|^ia-32$|^bepc$', arch_string_raw):
arch = 'X86_32'
bits = 32
elif re.match('^x64$|^x86_64$|^x86_64t$|^i686-64$|^amd64$|^ia64$|^ia-64$', arch_string_raw):
arch = 'X86_64'
bits = 64
# ARM
elif re.match('^armv8-a|aarch64$', arch_string_raw):
arch = 'ARM_8'
bits = 64
elif re.match('^armv7$|^armv7[a-z]$|^armv7-[a-z]$|^armv6[a-z]$', arch_string_raw):
arch = 'ARM_7'
bits = 32
elif re.match('^armv8$|^armv8[a-z]$|^armv8-[a-z]$', arch_string_raw):
arch = 'ARM_8'
bits = 32
# PPC
elif re.match('^ppc32$|^prep$|^pmac$|^powermac$', arch_string_raw):
arch = 'PPC_32'
bits = 32
elif re.match('^powerpc$|^ppc64$|^ppc64le$', arch_string_raw):
arch = 'PPC_64'
bits = 64
# SPARC
elif re.match('^sparc32$|^sparc$', arch_string_raw):
arch = 'SPARC_32'
bits = 32
elif re.match('^sparc64$|^sun4u$|^sun4v$', arch_string_raw):
arch = 'SPARC_64'
bits = 64
return (arch, bits)
def _is_bit_set(reg, bit):
mask = 1 << bit
is_set = reg & mask > 0
return is_set
def _is_selinux_enforcing():
# Just return if the SE Linux Status Tool is not installed
if not DataSource.has_sestatus():
return False
# Run the sestatus, and just return if it failed to run
returncode, output = DataSource.sestatus_b()
if returncode != 0:
return False
# Figure out if explicitly in enforcing mode
for line in output.splitlines():
line = line.strip().lower()
if line.startswith("current mode:"):
if line.endswith("enforcing"):
return True
else:
return False
# Figure out if we can execute heap and execute memory
can_selinux_exec_heap = False
can_selinux_exec_memory = False
for line in output.splitlines():
line = line.strip().lower()
if line.startswith("allow_execheap") and line.endswith("on"):
can_selinux_exec_heap = True
elif line.startswith("allow_execmem") and line.endswith("on"):
can_selinux_exec_memory = True
return (not can_selinux_exec_heap or not can_selinux_exec_memory)
class CPUID(object):
def __init__(self):
self.prochandle = None
# Figure out if SE Linux is on and in enforcing mode
self.is_selinux_enforcing = _is_selinux_enforcing()
def _asm_func(self, restype=None, argtypes=(), byte_code=[]):
byte_code = bytes.join(b'', byte_code)
address = None
if DataSource.is_windows:
# Allocate a memory segment the size of the byte code, and make it executable
size = len(byte_code)
# Alloc at least 1 page to ensure we own all pages that we want to change protection on
if size < 0x1000: size = 0x1000
MEM_COMMIT = ctypes.c_ulong(0x1000)
PAGE_READWRITE = ctypes.c_ulong(0x4)
pfnVirtualAlloc = ctypes.windll.kernel32.VirtualAlloc
pfnVirtualAlloc.restype = ctypes.c_void_p
address = pfnVirtualAlloc(None, ctypes.c_size_t(size), MEM_COMMIT, PAGE_READWRITE)
if not address:
raise Exception("Failed to VirtualAlloc")
# Copy the byte code into the memory segment
memmove = ctypes.CFUNCTYPE(ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_size_t)(ctypes._memmove_addr)
if memmove(address, byte_code, size) < 0:
raise Exception("Failed to memmove")
# Enable execute permissions
PAGE_EXECUTE = ctypes.c_ulong(0x10)
old_protect = ctypes.c_ulong(0)
pfnVirtualProtect = ctypes.windll.kernel32.VirtualProtect
res = pfnVirtualProtect(ctypes.c_void_p(address), ctypes.c_size_t(size), PAGE_EXECUTE, ctypes.byref(old_protect))
if not res:
raise Exception("Failed VirtualProtect")
# Flush Instruction Cache
# First, get process Handle
if not self.prochandle:
pfnGetCurrentProcess = ctypes.windll.kernel32.GetCurrentProcess
pfnGetCurrentProcess.restype = ctypes.c_void_p
self.prochandle = ctypes.c_void_p(pfnGetCurrentProcess())
# Actually flush cache
res = ctypes.windll.kernel32.FlushInstructionCache(self.prochandle, ctypes.c_void_p(address), ctypes.c_size_t(size))
if not res:
raise Exception("Failed FlushInstructionCache")
else:
# Allocate a memory segment the size of the byte code
size = len(byte_code)
pfnvalloc = ctypes.pythonapi.valloc
pfnvalloc.restype = ctypes.c_void_p
address = pfnvalloc(ctypes.c_size_t(size))
if not address:
raise Exception("Failed to valloc")
# Mark the memory segment as writeable only
if not self.is_selinux_enforcing:
WRITE = 0x2
if ctypes.pythonapi.mprotect(ctypes.c_void_p(address), size, WRITE) < 0:
raise Exception("Failed to mprotect")
# Copy the byte code into the memory segment
if ctypes.pythonapi.memmove(ctypes.c_void_p(address), byte_code, ctypes.c_size_t(size)) < 0:
raise Exception("Failed to memmove")
# Mark the memory segment as writeable and executable only
if not self.is_selinux_enforcing:
WRITE_EXECUTE = 0x2 | 0x4
if ctypes.pythonapi.mprotect(ctypes.c_void_p(address), size, WRITE_EXECUTE) < 0:
raise Exception("Failed to mprotect")
# Cast the memory segment into a function
functype = ctypes.CFUNCTYPE(restype, *argtypes)
fun = functype(address)
return fun, address
def _run_asm(self, *byte_code):
# Convert the byte code into a function that returns an int
restype = ctypes.c_uint32
argtypes = ()
func, address = self._asm_func(restype, argtypes, byte_code)
# Call the byte code like a function
retval = func()
byte_code = bytes.join(b'', byte_code)
size = ctypes.c_size_t(len(byte_code))
# Free the function memory segment
if DataSource.is_windows:
MEM_RELEASE = ctypes.c_ulong(0x8000)
ctypes.windll.kernel32.VirtualFree(ctypes.c_void_p(address), ctypes.c_size_t(0), MEM_RELEASE)
else:
# Remove the executable tag on the memory
READ_WRITE = 0x1 | 0x2
if ctypes.pythonapi.mprotect(ctypes.c_void_p(address), size, READ_WRITE) < 0:
raise Exception("Failed to mprotect")
ctypes.pythonapi.free(ctypes.c_void_p(address))
return retval
# FIXME: We should not have to use different instructions to
# set eax to 0 or 1, on 32bit and 64bit machines.
def _zero_eax(self):
return (
b"\x31\xC0" # xor eax,eax
)
def _zero_ecx(self):
return (
b"\x31\xC9" # xor ecx,ecx
)
def _one_eax(self):
return (
b"\xB8\x01\x00\x00\x00" # mov eax,0x1"
)
# http://en.wikipedia.org/wiki/CPUID#EAX.3D0:_Get_vendor_ID
def get_vendor_id(self):
# EBX
ebx = self._run_asm(
self._zero_eax(),
b"\x0F\xA2" # cpuid
b"\x89\xD8" # mov ax,bx
b"\xC3" # ret
)
# ECX
ecx = self._run_asm(
self._zero_eax(),
b"\x0f\xa2" # cpuid
b"\x89\xC8" # mov ax,cx
b"\xC3" # ret
)
# EDX
edx = self._run_asm(
self._zero_eax(),
b"\x0f\xa2" # cpuid
b"\x89\xD0" # mov ax,dx
b"\xC3" # ret
)
# Each 4bits is a ascii letter in the name
vendor_id = []
for reg in [ebx, edx, ecx]:
for n in [0, 8, 16, 24]:
vendor_id.append(chr((reg >> n) & 0xFF))
vendor_id = ''.join(vendor_id)
return vendor_id
# http://en.wikipedia.org/wiki/CPUID#EAX.3D1:_Processor_Info_and_Feature_Bits
def get_info(self):
# EAX
eax = self._run_asm(
self._one_eax(),
b"\x0f\xa2" # cpuid
b"\xC3" # ret
)
# Get the CPU info
stepping = (eax >> 0) & 0xF # 4 bits
model = (eax >> 4) & 0xF # 4 bits
family = (eax >> 8) & 0xF # 4 bits
processor_type = (eax >> 12) & 0x3 # 2 bits
extended_model = (eax >> 16) & 0xF # 4 bits
extended_family = (eax >> 20) & 0xFF # 8 bits
return {
'stepping' : stepping,
'model' : model,
'family' : family,
'processor_type' : processor_type,
'extended_model' : extended_model,
'extended_family' : extended_family
}
# http://en.wikipedia.org/wiki/CPUID#EAX.3D80000000h:_Get_Highest_Extended_Function_Supported
def get_max_extension_support(self):
# Check for extension support
max_extension_support = self._run_asm(
b"\xB8\x00\x00\x00\x80" # mov ax,0x80000000
b"\x0f\xa2" # cpuid
b"\xC3" # ret
)
return max_extension_support
# http://en.wikipedia.org/wiki/CPUID#EAX.3D1:_Processor_Info_and_Feature_Bits
def get_flags(self, max_extension_support):
# EDX
edx = self._run_asm(
self._one_eax(),
b"\x0f\xa2" # cpuid
b"\x89\xD0" # mov ax,dx
b"\xC3" # ret
)
# ECX
ecx = self._run_asm(
self._one_eax(),
b"\x0f\xa2" # cpuid
b"\x89\xC8" # mov ax,cx
b"\xC3" # ret
)
# Get the CPU flags
flags = {
'fpu' : _is_bit_set(edx, 0),
'vme' : _is_bit_set(edx, 1),
'de' : _is_bit_set(edx, 2),
'pse' : _is_bit_set(edx, 3),
'tsc' : _is_bit_set(edx, 4),
'msr' : _is_bit_set(edx, 5),
'pae' : _is_bit_set(edx, 6),
'mce' : _is_bit_set(edx, 7),
'cx8' : _is_bit_set(edx, 8),
'apic' : _is_bit_set(edx, 9),
#'reserved1' : _is_bit_set(edx, 10),
'sep' : _is_bit_set(edx, 11),
'mtrr' : _is_bit_set(edx, 12),
'pge' : _is_bit_set(edx, 13),
'mca' : _is_bit_set(edx, 14),
'cmov' : _is_bit_set(edx, 15),
'pat' : _is_bit_set(edx, 16),
'pse36' : _is_bit_set(edx, 17),
'pn' : _is_bit_set(edx, 18),
'clflush' : _is_bit_set(edx, 19),
#'reserved2' : _is_bit_set(edx, 20),
'dts' : _is_bit_set(edx, 21),
'acpi' : _is_bit_set(edx, 22),
'mmx' : _is_bit_set(edx, 23),
'fxsr' : _is_bit_set(edx, 24),
'sse' : _is_bit_set(edx, 25),
'sse2' : _is_bit_set(edx, 26),
'ss' : _is_bit_set(edx, 27),
'ht' : _is_bit_set(edx, 28),
'tm' : _is_bit_set(edx, 29),
'ia64' : _is_bit_set(edx, 30),
'pbe' : _is_bit_set(edx, 31),
'pni' : _is_bit_set(ecx, 0),
'pclmulqdq' : _is_bit_set(ecx, 1),
'dtes64' : _is_bit_set(ecx, 2),
'monitor' : _is_bit_set(ecx, 3),
'ds_cpl' : _is_bit_set(ecx, 4),
'vmx' : _is_bit_set(ecx, 5),
'smx' : _is_bit_set(ecx, 6),
'est' : _is_bit_set(ecx, 7),
'tm2' : _is_bit_set(ecx, 8),
'ssse3' : _is_bit_set(ecx, 9),
'cid' : _is_bit_set(ecx, 10),
#'reserved3' : _is_bit_set(ecx, 11),
'fma' : _is_bit_set(ecx, 12),
'cx16' : _is_bit_set(ecx, 13),
'xtpr' : _is_bit_set(ecx, 14),
'pdcm' : _is_bit_set(ecx, 15),
#'reserved4' : _is_bit_set(ecx, 16),
'pcid' : _is_bit_set(ecx, 17),
'dca' : _is_bit_set(ecx, 18),
'sse4_1' : _is_bit_set(ecx, 19),
'sse4_2' : _is_bit_set(ecx, 20),
'x2apic' : _is_bit_set(ecx, 21),
'movbe' : _is_bit_set(ecx, 22),
'popcnt' : _is_bit_set(ecx, 23),
'tscdeadline' : _is_bit_set(ecx, 24),
'aes' : _is_bit_set(ecx, 25),
'xsave' : _is_bit_set(ecx, 26),
'osxsave' : _is_bit_set(ecx, 27),
'avx' : _is_bit_set(ecx, 28),
'f16c' : _is_bit_set(ecx, 29),
'rdrnd' : _is_bit_set(ecx, 30),
'hypervisor' : _is_bit_set(ecx, 31)
}
# Get a list of only the flags that are true
flags = [k for k, v in flags.items() if v]
# http://en.wikipedia.org/wiki/CPUID#EAX.3D7.2C_ECX.3D0:_Extended_Features
if max_extension_support >= 7:
# EBX
ebx = self._run_asm(
self._zero_ecx(),
b"\xB8\x07\x00\x00\x00" # mov eax,7
b"\x0f\xa2" # cpuid
b"\x89\xD8" # mov ax,bx
b"\xC3" # ret
)
# ECX
ecx = self._run_asm(
self._zero_ecx(),
b"\xB8\x07\x00\x00\x00" # mov eax,7
b"\x0f\xa2" # cpuid
b"\x89\xC8" # mov ax,cx
b"\xC3" # ret
)
# Get the extended CPU flags
extended_flags = {
#'fsgsbase' : _is_bit_set(ebx, 0),
#'IA32_TSC_ADJUST' : _is_bit_set(ebx, 1),
'sgx' : _is_bit_set(ebx, 2),
'bmi1' : _is_bit_set(ebx, 3),
'hle' : _is_bit_set(ebx, 4),
'avx2' : _is_bit_set(ebx, 5),
#'reserved' : _is_bit_set(ebx, 6),
'smep' : _is_bit_set(ebx, 7),
'bmi2' : _is_bit_set(ebx, 8),
'erms' : _is_bit_set(ebx, 9),
'invpcid' : _is_bit_set(ebx, 10),
'rtm' : _is_bit_set(ebx, 11),
'pqm' : _is_bit_set(ebx, 12),
#'FPU CS and FPU DS deprecated' : _is_bit_set(ebx, 13),
'mpx' : _is_bit_set(ebx, 14),
'pqe' : _is_bit_set(ebx, 15),
'avx512f' : _is_bit_set(ebx, 16),
'avx512dq' : _is_bit_set(ebx, 17),
'rdseed' : _is_bit_set(ebx, 18),
'adx' : _is_bit_set(ebx, 19),
'smap' : _is_bit_set(ebx, 20),
'avx512ifma' : _is_bit_set(ebx, 21),
'pcommit' : _is_bit_set(ebx, 22),
'clflushopt' : _is_bit_set(ebx, 23),
'clwb' : _is_bit_set(ebx, 24),
'intel_pt' : _is_bit_set(ebx, 25),
'avx512pf' : _is_bit_set(ebx, 26),
'avx512er' : _is_bit_set(ebx, 27),
'avx512cd' : _is_bit_set(ebx, 28),
'sha' : _is_bit_set(ebx, 29),
'avx512bw' : _is_bit_set(ebx, 30),
'avx512vl' : _is_bit_set(ebx, 31),
'prefetchwt1' : _is_bit_set(ecx, 0),
'avx512vbmi' : _is_bit_set(ecx, 1),
'umip' : _is_bit_set(ecx, 2),
'pku' : _is_bit_set(ecx, 3),
'ospke' : _is_bit_set(ecx, 4),
#'reserved' : _is_bit_set(ecx, 5),
'avx512vbmi2' : _is_bit_set(ecx, 6),
#'reserved' : _is_bit_set(ecx, 7),
'gfni' : _is_bit_set(ecx, 8),
'vaes' : _is_bit_set(ecx, 9),
'vpclmulqdq' : _is_bit_set(ecx, 10),
'avx512vnni' : _is_bit_set(ecx, 11),
'avx512bitalg' : _is_bit_set(ecx, 12),
#'reserved' : _is_bit_set(ecx, 13),
'avx512vpopcntdq' : _is_bit_set(ecx, 14),
#'reserved' : _is_bit_set(ecx, 15),
#'reserved' : _is_bit_set(ecx, 16),
#'mpx0' : _is_bit_set(ecx, 17),
#'mpx1' : _is_bit_set(ecx, 18),
#'mpx2' : _is_bit_set(ecx, 19),
#'mpx3' : _is_bit_set(ecx, 20),
#'mpx4' : _is_bit_set(ecx, 21),
'rdpid' : _is_bit_set(ecx, 22),
#'reserved' : _is_bit_set(ecx, 23),
#'reserved' : _is_bit_set(ecx, 24),
#'reserved' : _is_bit_set(ecx, 25),
#'reserved' : _is_bit_set(ecx, 26),
#'reserved' : _is_bit_set(ecx, 27),
#'reserved' : _is_bit_set(ecx, 28),
#'reserved' : _is_bit_set(ecx, 29),
'sgx_lc' : _is_bit_set(ecx, 30),
#'reserved' : _is_bit_set(ecx, 31)
}
# Get a list of only the flags that are true
extended_flags = [k for k, v in extended_flags.items() if v]
flags += extended_flags
# http://en.wikipedia.org/wiki/CPUID#EAX.3D80000001h:_Extended_Processor_Info_and_Feature_Bits
if max_extension_support >= 0x80000001:
# EBX
ebx = self._run_asm(
b"\xB8\x01\x00\x00\x80" # mov ax,0x80000001
b"\x0f\xa2" # cpuid
b"\x89\xD8" # mov ax,bx
b"\xC3" # ret
)
# ECX
ecx = self._run_asm(
b"\xB8\x01\x00\x00\x80" # mov ax,0x80000001
b"\x0f\xa2" # cpuid
b"\x89\xC8" # mov ax,cx
b"\xC3" # ret
)
# Get the extended CPU flags
extended_flags = {
'fpu' : _is_bit_set(ebx, 0),
'vme' : _is_bit_set(ebx, 1),
'de' : _is_bit_set(ebx, 2),
'pse' : _is_bit_set(ebx, 3),
'tsc' : _is_bit_set(ebx, 4),
'msr' : _is_bit_set(ebx, 5),
'pae' : _is_bit_set(ebx, 6),
'mce' : _is_bit_set(ebx, 7),
'cx8' : _is_bit_set(ebx, 8),
'apic' : _is_bit_set(ebx, 9),
#'reserved' : _is_bit_set(ebx, 10),
'syscall' : _is_bit_set(ebx, 11),
'mtrr' : _is_bit_set(ebx, 12),
'pge' : _is_bit_set(ebx, 13),
'mca' : _is_bit_set(ebx, 14),
'cmov' : _is_bit_set(ebx, 15),
'pat' : _is_bit_set(ebx, 16),
'pse36' : _is_bit_set(ebx, 17),
#'reserved' : _is_bit_set(ebx, 18),
'mp' : _is_bit_set(ebx, 19),
'nx' : _is_bit_set(ebx, 20),
#'reserved' : _is_bit_set(ebx, 21),
'mmxext' : _is_bit_set(ebx, 22),
'mmx' : _is_bit_set(ebx, 23),
'fxsr' : _is_bit_set(ebx, 24),
'fxsr_opt' : _is_bit_set(ebx, 25),
'pdpe1gp' : _is_bit_set(ebx, 26),
'rdtscp' : _is_bit_set(ebx, 27),
#'reserved' : _is_bit_set(ebx, 28),
'lm' : _is_bit_set(ebx, 29),
'3dnowext' : _is_bit_set(ebx, 30),
'3dnow' : _is_bit_set(ebx, 31),
'lahf_lm' : _is_bit_set(ecx, 0),
'cmp_legacy' : _is_bit_set(ecx, 1),
'svm' : _is_bit_set(ecx, 2),
'extapic' : _is_bit_set(ecx, 3),
'cr8_legacy' : _is_bit_set(ecx, 4),
'abm' : _is_bit_set(ecx, 5),
'sse4a' : _is_bit_set(ecx, 6),
'misalignsse' : _is_bit_set(ecx, 7),
'3dnowprefetch' : _is_bit_set(ecx, 8),
'osvw' : _is_bit_set(ecx, 9),
'ibs' : _is_bit_set(ecx, 10),
'xop' : _is_bit_set(ecx, 11),
'skinit' : _is_bit_set(ecx, 12),
'wdt' : _is_bit_set(ecx, 13),
#'reserved' : _is_bit_set(ecx, 14),
'lwp' : _is_bit_set(ecx, 15),
'fma4' : _is_bit_set(ecx, 16),
'tce' : _is_bit_set(ecx, 17),
#'reserved' : _is_bit_set(ecx, 18),
'nodeid_msr' : _is_bit_set(ecx, 19),
#'reserved' : _is_bit_set(ecx, 20),
'tbm' : _is_bit_set(ecx, 21),
'topoext' : _is_bit_set(ecx, 22),
'perfctr_core' : _is_bit_set(ecx, 23),
'perfctr_nb' : _is_bit_set(ecx, 24),
#'reserved' : _is_bit_set(ecx, 25),
'dbx' : _is_bit_set(ecx, 26),
'perftsc' : _is_bit_set(ecx, 27),
'pci_l2i' : _is_bit_set(ecx, 28),
#'reserved' : _is_bit_set(ecx, 29),
#'reserved' : _is_bit_set(ecx, 30),
#'reserved' : _is_bit_set(ecx, 31)
}
# Get a list of only the flags that are true
extended_flags = [k for k, v in extended_flags.items() if v]
flags += extended_flags
flags.sort()
return flags
# http://en.wikipedia.org/wiki/CPUID#EAX.3D80000002h.2C80000003h.2C80000004h:_Processor_Brand_String
def get_processor_brand(self, max_extension_support):
processor_brand = ""
# Processor brand string
if max_extension_support >= 0x80000004:
instructions = [
b"\xB8\x02\x00\x00\x80", # mov ax,0x80000002
b"\xB8\x03\x00\x00\x80", # mov ax,0x80000003
b"\xB8\x04\x00\x00\x80" # mov ax,0x80000004
]
for instruction in instructions:
# EAX
eax = self._run_asm(
instruction, # mov ax,0x8000000?
b"\x0f\xa2" # cpuid
b"\x89\xC0" # mov ax,ax
b"\xC3" # ret
)
# EBX
ebx = self._run_asm(
instruction, # mov ax,0x8000000?
b"\x0f\xa2" # cpuid
b"\x89\xD8" # mov ax,bx
b"\xC3" # ret
)
# ECX
ecx = self._run_asm(
instruction, # mov ax,0x8000000?
b"\x0f\xa2" # cpuid
b"\x89\xC8" # mov ax,cx
b"\xC3" # ret
)
# EDX
edx = self._run_asm(
instruction, # mov ax,0x8000000?
b"\x0f\xa2" # cpuid
b"\x89\xD0" # mov ax,dx
b"\xC3" # ret
)
# Combine each of the 4 bytes in each register into the string
for reg in [eax, ebx, ecx, edx]:
for n in [0, 8, 16, 24]:
processor_brand += chr((reg >> n) & 0xFF)
# Strip off any trailing NULL terminators and white space
processor_brand = processor_brand.strip("\0").strip()
return processor_brand
# http://en.wikipedia.org/wiki/CPUID#EAX.3D80000006h:_Extended_L2_Cache_Features
def get_cache(self, max_extension_support):
cache_info = {}
# Just return if the cache feature is not supported
if max_extension_support < 0x80000006:
return cache_info
# ECX
ecx = self._run_asm(
b"\xB8\x06\x00\x00\x80" # mov ax,0x80000006
b"\x0f\xa2" # cpuid
b"\x89\xC8" # mov ax,cx
b"\xC3" # ret
)
cache_info = {
'size_kb' : ecx & 0xFF,
'line_size_b' : (ecx >> 12) & 0xF,
'associativity' : (ecx >> 16) & 0xFFFF
}
return cache_info
def get_ticks(self):
retval = None
if DataSource.bits == '32bit':
# Works on x86_32
restype = None
argtypes = (ctypes.POINTER(ctypes.c_uint), ctypes.POINTER(ctypes.c_uint))
get_ticks_x86_32, address = self._asm_func(restype, argtypes,
[
b"\x55", # push bp
b"\x89\xE5", # mov bp,sp
b"\x31\xC0", # xor ax,ax
b"\x0F\xA2", # cpuid
b"\x0F\x31", # rdtsc
b"\x8B\x5D\x08", # mov bx,[di+0x8]
b"\x8B\x4D\x0C", # mov cx,[di+0xc]
b"\x89\x13", # mov [bp+di],dx
b"\x89\x01", # mov [bx+di],ax
b"\x5D", # pop bp
b"\xC3" # ret
]
)
high = ctypes.c_uint32(0)
low = ctypes.c_uint32(0)
get_ticks_x86_32(ctypes.byref(high), ctypes.byref(low))
retval = ((high.value << 32) & 0xFFFFFFFF00000000) | low.value
elif DataSource.bits == '64bit':
# Works on x86_64
restype = ctypes.c_uint64
argtypes = ()
get_ticks_x86_64, address = self._asm_func(restype, argtypes,
[
b"\x48", # dec ax
b"\x31\xC0", # xor ax,ax
b"\x0F\xA2", # cpuid
b"\x0F\x31", # rdtsc
b"\x48", # dec ax
b"\xC1\xE2\x20", # shl dx,byte 0x20
b"\x48", # dec ax
b"\x09\xD0", # or ax,dx
b"\xC3", # ret
]
)
retval = get_ticks_x86_64()
return retval
def get_raw_hz(self):
import time
start = self.get_ticks()
time.sleep(1)
end = self.get_ticks()
ticks = (end - start)
return ticks
def _actual_get_cpu_info_from_cpuid(queue):
'''
Warning! This function has the potential to crash the Python runtime.
Do not call it directly. Use the _get_cpu_info_from_cpuid function instead.
It will safely call this function in another process.
'''
# Pipe all output to nothing
sys.stdout = open(os.devnull, 'w')
sys.stderr = open(os.devnull, 'w')
# Get the CPU arch and bits
arch, bits = _parse_arch(DataSource.arch_string_raw)
# Return none if this is not an X86 CPU
if not arch in ['X86_32', 'X86_64']:
queue.put(_obj_to_b64({}))
return
# Return none if SE Linux is in enforcing mode
cpuid = CPUID()
if cpuid.is_selinux_enforcing:
queue.put(_obj_to_b64({}))
return
# Get the cpu info from the CPUID register
max_extension_support = cpuid.get_max_extension_support()
cache_info = cpuid.get_cache(max_extension_support)
info = cpuid.get_info()
processor_brand = cpuid.get_processor_brand(max_extension_support)
# Get the Hz and scale
hz_actual = cpuid.get_raw_hz()
hz_actual = _to_decimal_string(hz_actual)
# Get the Hz and scale
hz_advertised, scale = _parse_cpu_brand_string(processor_brand)
info = {
'vendor_id_raw' : cpuid.get_vendor_id(),
'hardware_raw' : '',
'brand_raw' : processor_brand,
'hz_advertised_friendly' : _hz_short_to_friendly(hz_advertised, scale),
'hz_actual_friendly' : _hz_short_to_friendly(hz_actual, 0),
'hz_advertised' : _hz_short_to_full(hz_advertised, scale),
'hz_actual' : _hz_short_to_full(hz_actual, 0),
'l2_cache_size' : _to_friendly_bytes(cache_info['size_kb']),
'l2_cache_line_size' : cache_info['line_size_b'],
'l2_cache_associativity' : hex(cache_info['associativity']),
'stepping' : info['stepping'],
'model' : info['model'],
'family' : info['family'],
'processor_type' : info['processor_type'],
'extended_model' : info['extended_model'],
'extended_family' : info['extended_family'],
'flags' : cpuid.get_flags(max_extension_support)
}
info = {k: v for k, v in info.items() if v}
queue.put(_obj_to_b64(info))
def _get_cpu_info_from_cpuid():
'''
Returns the CPU info gathered by querying the X86 cpuid register in a new process.
Returns {} on non X86 cpus.
Returns {} if SELinux is in enforcing mode.
'''
from multiprocessing import Process, Queue
# Return {} if can't cpuid
if not DataSource.can_cpuid:
return {}
# Get the CPU arch and bits
arch, bits = _parse_arch(DataSource.arch_string_raw)
# Return {} if this is not an X86 CPU
if not arch in ['X86_32', 'X86_64']:
return {}
try:
# Start running the function in a subprocess
queue = Queue()
p = Process(target=_actual_get_cpu_info_from_cpuid, args=(queue,))
p.start()
# Wait for the process to end, while it is still alive
while p.is_alive():
p.join(0)
# Return {} if it failed
if p.exitcode != 0:
return {}
# Return the result, only if there is something to read
if not queue.empty():
output = queue.get()
return _b64_to_obj(output)
except:
pass
# Return {} if everything failed
return {}
def _get_cpu_info_from_proc_cpuinfo():
'''
Returns the CPU info gathered from /proc/cpuinfo.
Returns {} if /proc/cpuinfo is not found.
'''
try:
# Just return {} if there is no cpuinfo
if not DataSource.has_proc_cpuinfo():
return {}
returncode, output = DataSource.cat_proc_cpuinfo()
if returncode != 0:
return {}
# Various fields
vendor_id = _get_field(False, output, None, '', 'vendor_id', 'vendor id', 'vendor')
processor_brand = _get_field(True, output, None, None, 'model name','cpu', 'processor')
cache_size = _get_field(False, output, None, '', 'cache size')
stepping = _get_field(False, output, int, 0, 'stepping')
model = _get_field(False, output, int, 0, 'model')
family = _get_field(False, output, int, 0, 'cpu family')
hardware = _get_field(False, output, None, '', 'Hardware')
# Flags
flags = _get_field(False, output, None, None, 'flags', 'Features')
if flags:
flags = flags.split()
flags.sort()
# Convert from MHz string to Hz
hz_actual = _get_field(False, output, None, '', 'cpu MHz', 'cpu speed', 'clock')
hz_actual = hz_actual.lower().rstrip('mhz').strip()
hz_actual = _to_decimal_string(hz_actual)
# Convert from GHz/MHz string to Hz
hz_advertised, scale = (None, 0)
try:
hz_advertised, scale = _parse_cpu_brand_string(processor_brand)
except Exception:
pass
info = {
'hardware_raw' : hardware,
'brand_raw' : processor_brand,
'l3_cache_size' : _to_friendly_bytes(cache_size),
'flags' : flags,
'vendor_id_raw' : vendor_id,
'stepping' : stepping,
'model' : model,
'family' : family,
}
# Make the Hz the same for actual and advertised if missing any
if not hz_advertised or hz_advertised == '0.0':
hz_advertised = hz_actual
scale = 6
elif not hz_actual or hz_actual == '0.0':
hz_actual = hz_advertised
# Add the Hz if there is one
if _hz_short_to_full(hz_advertised, scale) > (0, 0):
info['hz_advertised_friendly'] = _hz_short_to_friendly(hz_advertised, scale)
info['hz_advertised'] = _hz_short_to_full(hz_advertised, scale)
if _hz_short_to_full(hz_actual, scale) > (0, 0):
info['hz_actual_friendly'] = _hz_short_to_friendly(hz_actual, 6)
info['hz_actual'] = _hz_short_to_full(hz_actual, 6)
info = {k: v for k, v in info.items() if v}
return info
except:
#raise # NOTE: To have this throw on error, uncomment this line
return {}
def _get_cpu_info_from_cpufreq_info():
'''
Returns the CPU info gathered from cpufreq-info.
Returns {} if cpufreq-info is not found.
'''
try:
hz_brand, scale = '0.0', 0
if not DataSource.has_cpufreq_info():
return {}
returncode, output = DataSource.cpufreq_info()
if returncode != 0:
return {}
hz_brand = output.split('current CPU frequency is')[1].split('\n')[0]
i = hz_brand.find('Hz')
assert(i != -1)
hz_brand = hz_brand[0 : i+2].strip().lower()
if hz_brand.endswith('mhz'):
scale = 6
elif hz_brand.endswith('ghz'):
scale = 9
hz_brand = hz_brand.rstrip('mhz').rstrip('ghz').strip()
hz_brand = _to_decimal_string(hz_brand)
info = {
'hz_advertised_friendly' : _hz_short_to_friendly(hz_brand, scale),
'hz_actual_friendly' : _hz_short_to_friendly(hz_brand, scale),
'hz_advertised' : _hz_short_to_full(hz_brand, scale),
'hz_actual' : _hz_short_to_full(hz_brand, scale),
}
info = {k: v for k, v in info.items() if v}
return info
except:
#raise # NOTE: To have this throw on error, uncomment this line
return {}
def _get_cpu_info_from_lscpu():
'''
Returns the CPU info gathered from lscpu.
Returns {} if lscpu is not found.
'''
try:
if not DataSource.has_lscpu():
return {}
returncode, output = DataSource.lscpu()
if returncode != 0:
return {}
info = {}
new_hz = _get_field(False, output, None, None, 'CPU max MHz', 'CPU MHz')
if new_hz:
new_hz = _to_decimal_string(new_hz)
scale = 6
info['hz_advertised_friendly'] = _hz_short_to_friendly(new_hz, scale)
info['hz_actual_friendly'] = _hz_short_to_friendly(new_hz, scale)
info['hz_advertised'] = _hz_short_to_full(new_hz, scale)
info['hz_actual'] = _hz_short_to_full(new_hz, scale)
vendor_id = _get_field(False, output, None, None, 'Vendor ID')
if vendor_id:
info['vendor_id_raw'] = vendor_id
brand = _get_field(False, output, None, None, 'Model name')
if brand:
info['brand_raw'] = brand
family = _get_field(False, output, None, None, 'CPU family')
if family and family.isdigit():
info['family'] = int(family)
stepping = _get_field(False, output, None, None, 'Stepping')
if stepping and stepping.isdigit():
info['stepping'] = int(stepping)
model = _get_field(False, output, None, None, 'Model')
if model and model.isdigit():
info['model'] = int(model)
l1_data_cache_size = _get_field(False, output, None, None, 'L1d cache')
if l1_data_cache_size:
info['l1_data_cache_size'] = _to_friendly_bytes(l1_data_cache_size)
l1_instruction_cache_size = _get_field(False, output, None, None, 'L1i cache')
if l1_instruction_cache_size:
info['l1_instruction_cache_size'] = _to_friendly_bytes(l1_instruction_cache_size)
l2_cache_size = _get_field(False, output, None, None, 'L2 cache')
if l2_cache_size:
info['l2_cache_size'] = _to_friendly_bytes(l2_cache_size)
l3_cache_size = _get_field(False, output, None, None, 'L3 cache')
if l3_cache_size:
info['l3_cache_size'] = _to_friendly_bytes(l3_cache_size)
# Flags
flags = _get_field(False, output, None, None, 'flags', 'Features')
if flags:
flags = flags.split()
flags.sort()
info['flags'] = flags
info = {k: v for k, v in info.items() if v}
return info
except:
#raise # NOTE: To have this throw on error, uncomment this line
return {}
def _get_cpu_info_from_dmesg():
'''
Returns the CPU info gathered from dmesg.
Returns {} if dmesg is not found or does not have the desired info.
'''
# Just return {} if there is no dmesg
if not DataSource.has_dmesg():
return {}
# If dmesg fails return {}
returncode, output = DataSource.dmesg_a()
if output == None or returncode != 0:
return {}
return _parse_dmesg_output(output)
# https://openpowerfoundation.org/wp-content/uploads/2016/05/LoPAPR_DRAFT_v11_24March2016_cmt1.pdf
# page 767
def _get_cpu_info_from_ibm_pa_features():
'''
Returns the CPU info gathered from lsprop /proc/device-tree/cpus/*/ibm,pa-features
Returns {} if lsprop is not found or ibm,pa-features does not have the desired info.
'''
try:
# Just return {} if there is no lsprop
if not DataSource.has_ibm_pa_features():
return {}
# If ibm,pa-features fails return {}
returncode, output = DataSource.ibm_pa_features()
if output == None or returncode != 0:
return {}
# Filter out invalid characters from output
value = output.split("ibm,pa-features")[1].lower()
value = [s for s in value if s in list('0123456789abcfed')]
value = ''.join(value)
# Get data converted to Uint32 chunks
left = int(value[0 : 8], 16)
right = int(value[8 : 16], 16)
# Get the CPU flags
flags = {
# Byte 0
'mmu' : _is_bit_set(left, 0),
'fpu' : _is_bit_set(left, 1),
'slb' : _is_bit_set(left, 2),
'run' : _is_bit_set(left, 3),
#'reserved' : _is_bit_set(left, 4),
'dabr' : _is_bit_set(left, 5),
'ne' : _is_bit_set(left, 6),
'wtr' : _is_bit_set(left, 7),
# Byte 1
'mcr' : _is_bit_set(left, 8),
'dsisr' : _is_bit_set(left, 9),
'lp' : _is_bit_set(left, 10),
'ri' : _is_bit_set(left, 11),
'dabrx' : _is_bit_set(left, 12),
'sprg3' : _is_bit_set(left, 13),
'rislb' : _is_bit_set(left, 14),
'pp' : _is_bit_set(left, 15),
# Byte 2
'vpm' : _is_bit_set(left, 16),
'dss_2.05' : _is_bit_set(left, 17),
#'reserved' : _is_bit_set(left, 18),
'dar' : _is_bit_set(left, 19),
#'reserved' : _is_bit_set(left, 20),
'ppr' : _is_bit_set(left, 21),
'dss_2.02' : _is_bit_set(left, 22),
'dss_2.06' : _is_bit_set(left, 23),
# Byte 3
'lsd_in_dscr' : _is_bit_set(left, 24),
'ugr_in_dscr' : _is_bit_set(left, 25),
#'reserved' : _is_bit_set(left, 26),
#'reserved' : _is_bit_set(left, 27),
#'reserved' : _is_bit_set(left, 28),
#'reserved' : _is_bit_set(left, 29),
#'reserved' : _is_bit_set(left, 30),
#'reserved' : _is_bit_set(left, 31),
# Byte 4
'sso_2.06' : _is_bit_set(right, 0),
#'reserved' : _is_bit_set(right, 1),
#'reserved' : _is_bit_set(right, 2),
#'reserved' : _is_bit_set(right, 3),
#'reserved' : _is_bit_set(right, 4),
#'reserved' : _is_bit_set(right, 5),
#'reserved' : _is_bit_set(right, 6),
#'reserved' : _is_bit_set(right, 7),
# Byte 5
'le' : _is_bit_set(right, 8),
'cfar' : _is_bit_set(right, 9),
'eb' : _is_bit_set(right, 10),
'lsq_2.07' : _is_bit_set(right, 11),
#'reserved' : _is_bit_set(right, 12),
#'reserved' : _is_bit_set(right, 13),
#'reserved' : _is_bit_set(right, 14),
#'reserved' : _is_bit_set(right, 15),
# Byte 6
'dss_2.07' : _is_bit_set(right, 16),
#'reserved' : _is_bit_set(right, 17),
#'reserved' : _is_bit_set(right, 18),
#'reserved' : _is_bit_set(right, 19),
#'reserved' : _is_bit_set(right, 20),
#'reserved' : _is_bit_set(right, 21),
#'reserved' : _is_bit_set(right, 22),
#'reserved' : _is_bit_set(right, 23),
# Byte 7
#'reserved' : _is_bit_set(right, 24),
#'reserved' : _is_bit_set(right, 25),
#'reserved' : _is_bit_set(right, 26),
#'reserved' : _is_bit_set(right, 27),
#'reserved' : _is_bit_set(right, 28),
#'reserved' : _is_bit_set(right, 29),
#'reserved' : _is_bit_set(right, 30),
#'reserved' : _is_bit_set(right, 31),
}
# Get a list of only the flags that are true
flags = [k for k, v in flags.items() if v]
flags.sort()
info = {
'flags' : flags
}
info = {k: v for k, v in info.items() if v}
return info
except:
return {}
def _get_cpu_info_from_cat_var_run_dmesg_boot():
'''
Returns the CPU info gathered from /var/run/dmesg.boot.
Returns {} if dmesg is not found or does not have the desired info.
'''
# Just return {} if there is no /var/run/dmesg.boot
if not DataSource.has_var_run_dmesg_boot():
return {}
# If dmesg.boot fails return {}
returncode, output = DataSource.cat_var_run_dmesg_boot()
if output == None or returncode != 0:
return {}
return _parse_dmesg_output(output)
def _get_cpu_info_from_sysctl():
'''
Returns the CPU info gathered from sysctl.
Returns {} if sysctl is not found.
'''
try:
# Just return {} if there is no sysctl
if not DataSource.has_sysctl():
return {}
# If sysctl fails return {}
returncode, output = DataSource.sysctl_machdep_cpu_hw_cpufrequency()
if output == None or returncode != 0:
return {}
# Various fields
vendor_id = _get_field(False, output, None, None, 'machdep.cpu.vendor')
processor_brand = _get_field(True, output, None, None, 'machdep.cpu.brand_string')
cache_size = _get_field(False, output, None, None, 'machdep.cpu.cache.size')
stepping = _get_field(False, output, int, 0, 'machdep.cpu.stepping')
model = _get_field(False, output, int, 0, 'machdep.cpu.model')
family = _get_field(False, output, int, 0, 'machdep.cpu.family')
# Flags
flags = _get_field(False, output, None, '', 'machdep.cpu.features').lower().split()
flags.extend(_get_field(False, output, None, '', 'machdep.cpu.leaf7_features').lower().split())
flags.extend(_get_field(False, output, None, '', 'machdep.cpu.extfeatures').lower().split())
flags.sort()
# Convert from GHz/MHz string to Hz
hz_advertised, scale = _parse_cpu_brand_string(processor_brand)
hz_actual = _get_field(False, output, None, None, 'hw.cpufrequency')
hz_actual = _to_decimal_string(hz_actual)
info = {
'vendor_id_raw' : vendor_id,
'brand_raw' : processor_brand,
'hz_advertised_friendly' : _hz_short_to_friendly(hz_advertised, scale),
'hz_actual_friendly' : _hz_short_to_friendly(hz_actual, 0),
'hz_advertised' : _hz_short_to_full(hz_advertised, scale),
'hz_actual' : _hz_short_to_full(hz_actual, 0),
'l2_cache_size' : _to_friendly_bytes(cache_size),
'stepping' : stepping,
'model' : model,
'family' : family,
'flags' : flags
}
info = {k: v for k, v in info.items() if v}
return info
except:
return {}
def _get_cpu_info_from_sysinfo():
'''
Returns the CPU info gathered from sysinfo.
Returns {} if sysinfo is not found.
'''
info = _get_cpu_info_from_sysinfo_v1()
info.update(_get_cpu_info_from_sysinfo_v2())
return info
def _get_cpu_info_from_sysinfo_v1():
'''
Returns the CPU info gathered from sysinfo.
Returns {} if sysinfo is not found.
'''
try:
# Just return {} if there is no sysinfo
if not DataSource.has_sysinfo():
return {}
# If sysinfo fails return {}
returncode, output = DataSource.sysinfo_cpu()
if output == None or returncode != 0:
return {}
# Various fields
vendor_id = '' #_get_field(False, output, None, None, 'CPU #0: ')
processor_brand = output.split('CPU #0: "')[1].split('"\n')[0].strip()
cache_size = '' #_get_field(False, output, None, None, 'machdep.cpu.cache.size')
stepping = int(output.split(', stepping ')[1].split(',')[0].strip())
model = int(output.split(', model ')[1].split(',')[0].strip())
family = int(output.split(', family ')[1].split(',')[0].strip())
# Flags
flags = []
for line in output.split('\n'):
if line.startswith('\t\t'):
for flag in line.strip().lower().split():
flags.append(flag)
flags.sort()
# Convert from GHz/MHz string to Hz
hz_advertised, scale = _parse_cpu_brand_string(processor_brand)
hz_actual = hz_advertised
info = {
'vendor_id_raw' : vendor_id,
'brand_raw' : processor_brand,
'hz_advertised_friendly' : _hz_short_to_friendly(hz_advertised, scale),
'hz_actual_friendly' : _hz_short_to_friendly(hz_actual, scale),
'hz_advertised' : _hz_short_to_full(hz_advertised, scale),
'hz_actual' : _hz_short_to_full(hz_actual, scale),
'l2_cache_size' : _to_friendly_bytes(cache_size),
'stepping' : stepping,
'model' : model,
'family' : family,
'flags' : flags
}
info = {k: v for k, v in info.items() if v}
return info
except:
#raise # NOTE: To have this throw on error, uncomment this line
return {}
def _get_cpu_info_from_wmic():
'''
Returns the CPU info gathered from WMI.
Returns {} if not on Windows, or wmic is not installed.
'''
try:
# Just return {} if not Windows or there is no wmic
if not DataSource.is_windows or not DataSource.has_wmic():
return {}
returncode, output = DataSource.wmic_cpu()
if output == None or returncode != 0:
return {}
# Break the list into key values pairs
value = output.split("\n")
value = [s.rstrip().split('=') for s in value if '=' in s]
value = {k: v for k, v in value if v}
# Get the advertised MHz
processor_brand = value.get('Name')
hz_advertised, scale_advertised = _parse_cpu_brand_string(processor_brand)
# Get the actual MHz
hz_actual = value.get('CurrentClockSpeed')
scale_actual = 6
if hz_actual:
hz_actual = _to_decimal_string(hz_actual)
# Get cache sizes
l2_cache_size = value.get('L2CacheSize')
if l2_cache_size:
l2_cache_size = l2_cache_size + ' KB'
l3_cache_size = value.get('L3CacheSize')
if l3_cache_size:
l3_cache_size = l3_cache_size + ' KB'
# Get family, model, and stepping
family, model, stepping = '', '', ''
description = value.get('Description') or value.get('Caption')
entries = description.split(' ')
if 'Family' in entries and entries.index('Family') < len(entries)-1:
i = entries.index('Family')
family = int(entries[i + 1])
if 'Model' in entries and entries.index('Model') < len(entries)-1:
i = entries.index('Model')
model = int(entries[i + 1])
if 'Stepping' in entries and entries.index('Stepping') < len(entries)-1:
i = entries.index('Stepping')
stepping = int(entries[i + 1])
info = {
'vendor_id_raw' : value.get('Manufacturer'),
'brand_raw' : processor_brand,
'hz_advertised_friendly' : _hz_short_to_friendly(hz_advertised, scale_advertised),
'hz_actual_friendly' : _hz_short_to_friendly(hz_actual, scale_actual),
'hz_advertised' : _hz_short_to_full(hz_advertised, scale_advertised),
'hz_actual' : _hz_short_to_full(hz_actual, scale_actual),
'l2_cache_size' : l2_cache_size,
'l3_cache_size' : l3_cache_size,
'stepping' : stepping,
'model' : model,
'family' : family,
}
info = {k: v for k, v in info.items() if v}
return info
except:
#raise # NOTE: To have this throw on error, uncomment this line
return {}
def _get_cpu_info_from_registry():
'''
FIXME: Is missing many of the newer CPU flags like sse3
Returns the CPU info gathered from the Windows Registry.
Returns {} if not on Windows.
'''
try:
# Just return {} if not on Windows
if not DataSource.is_windows:
return {}
# Get the CPU name
processor_brand = DataSource.winreg_processor_brand().strip()
# Get the CPU vendor id
vendor_id = DataSource.winreg_vendor_id_raw()
# Get the CPU arch and bits
arch_string_raw = DataSource.winreg_arch_string_raw()
arch, bits = _parse_arch(arch_string_raw)
# Get the actual CPU Hz
hz_actual = DataSource.winreg_hz_actual()
hz_actual = _to_decimal_string(hz_actual)
# Get the advertised CPU Hz
hz_advertised, scale = _parse_cpu_brand_string(processor_brand)
# If advertised hz not found, use the actual hz
if hz_advertised == '0.0':
scale = 6
hz_advertised = _to_decimal_string(hz_actual)
# Get the CPU features
feature_bits = DataSource.winreg_feature_bits()
def is_set(bit):
mask = 0x80000000 >> bit
retval = mask & feature_bits > 0
return retval
# http://en.wikipedia.org/wiki/CPUID
# http://unix.stackexchange.com/questions/43539/what-do-the-flags-in-proc-cpuinfo-mean
# http://www.lohninger.com/helpcsuite/public_constants_cpuid.htm
flags = {
'fpu' : is_set(0), # Floating Point Unit
'vme' : is_set(1), # V86 Mode Extensions
'de' : is_set(2), # Debug Extensions - I/O breakpoints supported
'pse' : is_set(3), # Page Size Extensions (4 MB pages supported)
'tsc' : is_set(4), # Time Stamp Counter and RDTSC instruction are available
'msr' : is_set(5), # Model Specific Registers
'pae' : is_set(6), # Physical Address Extensions (36 bit address, 2MB pages)
'mce' : is_set(7), # Machine Check Exception supported
'cx8' : is_set(8), # Compare Exchange Eight Byte instruction available
'apic' : is_set(9), # Local APIC present (multiprocessor operation support)
'sepamd' : is_set(10), # Fast system calls (AMD only)
'sep' : is_set(11), # Fast system calls
'mtrr' : is_set(12), # Memory Type Range Registers
'pge' : is_set(13), # Page Global Enable
'mca' : is_set(14), # Machine Check Architecture
'cmov' : is_set(15), # Conditional MOVe instructions
'pat' : is_set(16), # Page Attribute Table
'pse36' : is_set(17), # 36 bit Page Size Extensions
'serial' : is_set(18), # Processor Serial Number
'clflush' : is_set(19), # Cache Flush
#'reserved1' : is_set(20), # reserved
'dts' : is_set(21), # Debug Trace Store
'acpi' : is_set(22), # ACPI support
'mmx' : is_set(23), # MultiMedia Extensions
'fxsr' : is_set(24), # FXSAVE and FXRSTOR instructions
'sse' : is_set(25), # SSE instructions
'sse2' : is_set(26), # SSE2 (WNI) instructions
'ss' : is_set(27), # self snoop
#'reserved2' : is_set(28), # reserved
'tm' : is_set(29), # Automatic clock control
'ia64' : is_set(30), # IA64 instructions
'3dnow' : is_set(31) # 3DNow! instructions available
}
# Get a list of only the flags that are true
flags = [k for k, v in flags.items() if v]
flags.sort()
info = {
'vendor_id_raw' : vendor_id,
'brand_raw' : processor_brand,
'hz_advertised_friendly' : _hz_short_to_friendly(hz_advertised, scale),
'hz_actual_friendly' : _hz_short_to_friendly(hz_actual, 6),
'hz_advertised' : _hz_short_to_full(hz_advertised, scale),
'hz_actual' : _hz_short_to_full(hz_actual, 6),
'flags' : flags
}
info = {k: v for k, v in info.items() if v}
return info
except:
return {}
def _get_cpu_info_from_kstat():
'''
Returns the CPU info gathered from isainfo and kstat.
Returns {} if isainfo or kstat are not found.
'''
try:
# Just return {} if there is no isainfo or kstat
if not DataSource.has_isainfo() or not DataSource.has_kstat():
return {}
# If isainfo fails return {}
returncode, flag_output = DataSource.isainfo_vb()
if flag_output == None or returncode != 0:
return {}
# If kstat fails return {}
returncode, kstat = DataSource.kstat_m_cpu_info()
if kstat == None or returncode != 0:
return {}
# Various fields
vendor_id = kstat.split('\tvendor_id ')[1].split('\n')[0].strip()
processor_brand = kstat.split('\tbrand ')[1].split('\n')[0].strip()
stepping = int(kstat.split('\tstepping ')[1].split('\n')[0].strip())
model = int(kstat.split('\tmodel ')[1].split('\n')[0].strip())
family = int(kstat.split('\tfamily ')[1].split('\n')[0].strip())
# Flags
flags = flag_output.strip().split('\n')[-1].strip().lower().split()
flags.sort()
# Convert from GHz/MHz string to Hz
scale = 6
hz_advertised = kstat.split('\tclock_MHz ')[1].split('\n')[0].strip()
hz_advertised = _to_decimal_string(hz_advertised)
# Convert from GHz/MHz string to Hz
hz_actual = kstat.split('\tcurrent_clock_Hz ')[1].split('\n')[0].strip()
hz_actual = _to_decimal_string(hz_actual)
info = {
'vendor_id_raw' : vendor_id,
'brand_raw' : processor_brand,
'hz_advertised_friendly' : _hz_short_to_friendly(hz_advertised, scale),
'hz_actual_friendly' : _hz_short_to_friendly(hz_actual, 0),
'hz_advertised' : _hz_short_to_full(hz_advertised, scale),
'hz_actual' : _hz_short_to_full(hz_actual, 0),
'stepping' : stepping,
'model' : model,
'family' : family,
'flags' : flags
}
info = {k: v for k, v in info.items() if v}
return info
except:
return {}
def _get_cpu_info_from_platform_uname():
try:
uname = DataSource.uname_string_raw.split(',')[0]
family, model, stepping = (None, None, None)
entries = uname.split(' ')
if 'Family' in entries and entries.index('Family') < len(entries)-1:
i = entries.index('Family')
family = int(entries[i + 1])
if 'Model' in entries and entries.index('Model') < len(entries)-1:
i = entries.index('Model')
model = int(entries[i + 1])
if 'Stepping' in entries and entries.index('Stepping') < len(entries)-1:
i = entries.index('Stepping')
stepping = int(entries[i + 1])
info = {
'family' : family,
'model' : model,
'stepping' : stepping
}
info = {k: v for k, v in info.items() if v}
return info
except:
return {}
def _get_cpu_info_internal():
'''
Returns the CPU info by using the best sources of information for your OS.
Returns {} if nothing is found.
'''
# Get the CPU arch and bits
arch, bits = _parse_arch(DataSource.arch_string_raw)
friendly_maxsize = { 2**31-1: '32 bit', 2**63-1: '64 bit' }.get(sys.maxsize) or 'unknown bits'
friendly_version = "{0}.{1}.{2}.{3}.{4}".format(*sys.version_info)
PYTHON_VERSION = "{0} ({1})".format(friendly_version, friendly_maxsize)
info = {
'python_version' : PYTHON_VERSION,
'cpuinfo_version' : CPUINFO_VERSION,
'cpuinfo_version_string' : CPUINFO_VERSION_STRING,
'arch' : arch,
'bits' : bits,
'count' : DataSource.cpu_count,
'arch_string_raw' : DataSource.arch_string_raw,
}
# Try the Windows wmic
_copy_new_fields(info, _get_cpu_info_from_wmic())
# Try the Windows registry
_copy_new_fields(info, _get_cpu_info_from_registry())
# Try /proc/cpuinfo
_copy_new_fields(info, _get_cpu_info_from_proc_cpuinfo())
# Try cpufreq-info
_copy_new_fields(info, _get_cpu_info_from_cpufreq_info())
# Try LSCPU
_copy_new_fields(info, _get_cpu_info_from_lscpu())
# Try sysctl
_copy_new_fields(info, _get_cpu_info_from_sysctl())
# Try kstat
_copy_new_fields(info, _get_cpu_info_from_kstat())
# Try dmesg
_copy_new_fields(info, _get_cpu_info_from_dmesg())
# Try /var/run/dmesg.boot
_copy_new_fields(info, _get_cpu_info_from_cat_var_run_dmesg_boot())
# Try lsprop ibm,pa-features
_copy_new_fields(info, _get_cpu_info_from_ibm_pa_features())
# Try sysinfo
_copy_new_fields(info, _get_cpu_info_from_sysinfo())
# Try querying the CPU cpuid register
_copy_new_fields(info, _get_cpu_info_from_cpuid())
# Try platform.uname
_copy_new_fields(info, _get_cpu_info_from_platform_uname())
return info
def get_cpu_info_json():
'''
Returns the CPU info by using the best sources of information for your OS.
Returns the result in a json string
'''
import json
output = None
# If running under pyinstaller, run normally
if getattr(sys, 'frozen', False):
info = _get_cpu_info_internal()
output = json.dumps(info)
output = "{0}".format(output)
# if not running under pyinstaller, run in another process.
# This is done because multiprocesing has a design flaw that
# causes non main programs to run multiple times on Windows.
else:
from subprocess import Popen, PIPE
command = [sys.executable, __file__, '--json']
p1 = Popen(command, stdout=PIPE, stderr=PIPE, stdin=PIPE)
output = p1.communicate()[0]
if p1.returncode != 0:
return "{}"
if not IS_PY2:
output = output.decode(encoding='UTF-8')
return output
def get_cpu_info():
'''
Returns the CPU info by using the best sources of information for your OS.
Returns the result in a dict
'''
import json
output = get_cpu_info_json()
# Convert JSON to Python with non unicode strings
output = json.loads(output, object_hook = _utf_to_str)
return output
def main():
from argparse import ArgumentParser
import json
# Parse args
parser = ArgumentParser(description='Gets CPU info with pure Python 2 & 3')
parser.add_argument('--json', action='store_true', help='Return the info in JSON format')
parser.add_argument('--version', action='store_true', help='Return the version of py-cpuinfo')
args = parser.parse_args()
try:
_check_arch()
except Exception as err:
sys.stderr.write(str(err) + "\n")
sys.exit(1)
info = _get_cpu_info_internal()
if not info:
sys.stderr.write("Failed to find cpu info\n")
sys.exit(1)
if args.json:
print(json.dumps(info))
elif args.version:
print(CPUINFO_VERSION_STRING)
else:
print('Python Version: {0}'.format(info.get('python_version', '')))
print('Cpuinfo Version: {0}'.format(info.get('cpuinfo_version_string', '')))
print('Vendor ID Raw: {0}'.format(info.get('vendor_id_raw', '')))
print('Hardware Raw: {0}'.format(info.get('hardware_raw', '')))
print('Brand Raw: {0}'.format(info.get('brand_raw', '')))
print('Hz Advertised Friendly: {0}'.format(info.get('hz_advertised_friendly', '')))
print('Hz Actual Friendly: {0}'.format(info.get('hz_actual_friendly', '')))
print('Hz Advertised: {0}'.format(info.get('hz_advertised', '')))
print('Hz Actual: {0}'.format(info.get('hz_actual', '')))
print('Arch: {0}'.format(info.get('arch', '')))
print('Bits: {0}'.format(info.get('bits', '')))
print('Count: {0}'.format(info.get('count', '')))
print('Arch String Raw: {0}'.format(info.get('arch_string_raw', '')))
print('L1 Data Cache Size: {0}'.format(info.get('l1_data_cache_size', '')))
print('L1 Instruction Cache Size: {0}'.format(info.get('l1_instruction_cache_size', '')))
print('L2 Cache Size: {0}'.format(info.get('l2_cache_size', '')))
print('L2 Cache Line Size: {0}'.format(info.get('l2_cache_line_size', '')))
print('L2 Cache Associativity: {0}'.format(info.get('l2_cache_associativity', '')))
print('L3 Cache Size: {0}'.format(info.get('l3_cache_size', '')))
print('Stepping: {0}'.format(info.get('stepping', '')))
print('Model: {0}'.format(info.get('model', '')))
print('Family: {0}'.format(info.get('family', '')))
print('Processor Type: {0}'.format(info.get('processor_type', '')))
print('Extended Model: {0}'.format(info.get('extended_model', '')))
print('Extended Family: {0}'.format(info.get('extended_family', '')))
print('Flags: {0}'.format(', '.join(info.get('flags', ''))))
if __name__ == '__main__':
main()
else:
_check_arch()
|
workhorsy/py-cpuinfo
|
cpuinfo/cpuinfo.py
|
_get_cpu_info_from_wmic
|
python
|
def _get_cpu_info_from_wmic():
'''
Returns the CPU info gathered from WMI.
Returns {} if not on Windows, or wmic is not installed.
'''
try:
# Just return {} if not Windows or there is no wmic
if not DataSource.is_windows or not DataSource.has_wmic():
return {}
returncode, output = DataSource.wmic_cpu()
if output == None or returncode != 0:
return {}
# Break the list into key values pairs
value = output.split("\n")
value = [s.rstrip().split('=') for s in value if '=' in s]
value = {k: v for k, v in value if v}
# Get the advertised MHz
processor_brand = value.get('Name')
hz_advertised, scale_advertised = _parse_cpu_brand_string(processor_brand)
# Get the actual MHz
hz_actual = value.get('CurrentClockSpeed')
scale_actual = 6
if hz_actual:
hz_actual = _to_decimal_string(hz_actual)
# Get cache sizes
l2_cache_size = value.get('L2CacheSize')
if l2_cache_size:
l2_cache_size = l2_cache_size + ' KB'
l3_cache_size = value.get('L3CacheSize')
if l3_cache_size:
l3_cache_size = l3_cache_size + ' KB'
# Get family, model, and stepping
family, model, stepping = '', '', ''
description = value.get('Description') or value.get('Caption')
entries = description.split(' ')
if 'Family' in entries and entries.index('Family') < len(entries)-1:
i = entries.index('Family')
family = int(entries[i + 1])
if 'Model' in entries and entries.index('Model') < len(entries)-1:
i = entries.index('Model')
model = int(entries[i + 1])
if 'Stepping' in entries and entries.index('Stepping') < len(entries)-1:
i = entries.index('Stepping')
stepping = int(entries[i + 1])
info = {
'vendor_id_raw' : value.get('Manufacturer'),
'brand_raw' : processor_brand,
'hz_advertised_friendly' : _hz_short_to_friendly(hz_advertised, scale_advertised),
'hz_actual_friendly' : _hz_short_to_friendly(hz_actual, scale_actual),
'hz_advertised' : _hz_short_to_full(hz_advertised, scale_advertised),
'hz_actual' : _hz_short_to_full(hz_actual, scale_actual),
'l2_cache_size' : l2_cache_size,
'l3_cache_size' : l3_cache_size,
'stepping' : stepping,
'model' : model,
'family' : family,
}
info = {k: v for k, v in info.items() if v}
return info
except:
#raise # NOTE: To have this throw on error, uncomment this line
return {}
|
Returns the CPU info gathered from WMI.
Returns {} if not on Windows, or wmic is not installed.
|
train
|
https://github.com/workhorsy/py-cpuinfo/blob/c15afb770c1139bf76215852e17eb4f677ca3d2f/cpuinfo/cpuinfo.py#L1943-L2020
|
[
"def has_wmic():\n\treturncode, output = _run_and_get_stdout(['wmic', 'os', 'get', 'Version'])\n\treturn returncode == 0 and len(output) > 0\n"
] |
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
# Copyright (c) 2014-2019, Matthew Brennan Jones <matthew.brennan.jones@gmail.com>
# Py-cpuinfo gets CPU info with pure Python 2 & 3
# It uses the MIT License
# It is hosted at: https://github.com/workhorsy/py-cpuinfo
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
CPUINFO_VERSION = (5, 0, 0)
CPUINFO_VERSION_STRING = '.'.join([str(n) for n in CPUINFO_VERSION])
import os, sys
import platform
import multiprocessing
import ctypes
try:
import _winreg as winreg
except ImportError as err:
try:
import winreg
except ImportError as err:
pass
IS_PY2 = sys.version_info[0] == 2
class DataSource(object):
bits = platform.architecture()[0]
cpu_count = multiprocessing.cpu_count()
is_windows = platform.system().lower() == 'windows'
arch_string_raw = platform.machine()
uname_string_raw = platform.uname()[5]
can_cpuid = True
@staticmethod
def has_proc_cpuinfo():
return os.path.exists('/proc/cpuinfo')
@staticmethod
def has_dmesg():
return len(_program_paths('dmesg')) > 0
@staticmethod
def has_var_run_dmesg_boot():
uname = platform.system().strip().strip('"').strip("'").strip().lower()
return 'linux' in uname and os.path.exists('/var/run/dmesg.boot')
@staticmethod
def has_cpufreq_info():
return len(_program_paths('cpufreq-info')) > 0
@staticmethod
def has_sestatus():
return len(_program_paths('sestatus')) > 0
@staticmethod
def has_sysctl():
return len(_program_paths('sysctl')) > 0
@staticmethod
def has_isainfo():
return len(_program_paths('isainfo')) > 0
@staticmethod
def has_kstat():
return len(_program_paths('kstat')) > 0
@staticmethod
def has_sysinfo():
return len(_program_paths('sysinfo')) > 0
@staticmethod
def has_lscpu():
return len(_program_paths('lscpu')) > 0
@staticmethod
def has_ibm_pa_features():
return len(_program_paths('lsprop')) > 0
@staticmethod
def has_wmic():
returncode, output = _run_and_get_stdout(['wmic', 'os', 'get', 'Version'])
return returncode == 0 and len(output) > 0
@staticmethod
def cat_proc_cpuinfo():
return _run_and_get_stdout(['cat', '/proc/cpuinfo'])
@staticmethod
def cpufreq_info():
return _run_and_get_stdout(['cpufreq-info'])
@staticmethod
def sestatus_b():
return _run_and_get_stdout(['sestatus', '-b'])
@staticmethod
def dmesg_a():
return _run_and_get_stdout(['dmesg', '-a'])
@staticmethod
def cat_var_run_dmesg_boot():
return _run_and_get_stdout(['cat', '/var/run/dmesg.boot'])
@staticmethod
def sysctl_machdep_cpu_hw_cpufrequency():
return _run_and_get_stdout(['sysctl', 'machdep.cpu', 'hw.cpufrequency'])
@staticmethod
def isainfo_vb():
return _run_and_get_stdout(['isainfo', '-vb'])
@staticmethod
def kstat_m_cpu_info():
return _run_and_get_stdout(['kstat', '-m', 'cpu_info'])
@staticmethod
def sysinfo_cpu():
return _run_and_get_stdout(['sysinfo', '-cpu'])
@staticmethod
def lscpu():
return _run_and_get_stdout(['lscpu'])
@staticmethod
def ibm_pa_features():
import glob
ibm_features = glob.glob('/proc/device-tree/cpus/*/ibm,pa-features')
if ibm_features:
return _run_and_get_stdout(['lsprop', ibm_features[0]])
@staticmethod
def wmic_cpu():
return _run_and_get_stdout(['wmic', 'cpu', 'get', 'Name,CurrentClockSpeed,L2CacheSize,L3CacheSize,Description,Caption,Manufacturer', '/format:list'])
@staticmethod
def winreg_processor_brand():
key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, r"Hardware\Description\System\CentralProcessor\0")
processor_brand = winreg.QueryValueEx(key, "ProcessorNameString")[0]
winreg.CloseKey(key)
return processor_brand.strip()
@staticmethod
def winreg_vendor_id_raw():
key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, r"Hardware\Description\System\CentralProcessor\0")
vendor_id_raw = winreg.QueryValueEx(key, "VendorIdentifier")[0]
winreg.CloseKey(key)
return vendor_id_raw
@staticmethod
def winreg_arch_string_raw():
key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, r"SYSTEM\CurrentControlSet\Control\Session Manager\Environment")
arch_string_raw = winreg.QueryValueEx(key, "PROCESSOR_ARCHITECTURE")[0]
winreg.CloseKey(key)
return arch_string_raw
@staticmethod
def winreg_hz_actual():
key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, r"Hardware\Description\System\CentralProcessor\0")
hz_actual = winreg.QueryValueEx(key, "~Mhz")[0]
winreg.CloseKey(key)
hz_actual = _to_decimal_string(hz_actual)
return hz_actual
@staticmethod
def winreg_feature_bits():
key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, r"Hardware\Description\System\CentralProcessor\0")
feature_bits = winreg.QueryValueEx(key, "FeatureSet")[0]
winreg.CloseKey(key)
return feature_bits
def _program_paths(program_name):
paths = []
exts = filter(None, os.environ.get('PATHEXT', '').split(os.pathsep))
path = os.environ['PATH']
for p in os.environ['PATH'].split(os.pathsep):
p = os.path.join(p, program_name)
if os.access(p, os.X_OK):
paths.append(p)
for e in exts:
pext = p + e
if os.access(pext, os.X_OK):
paths.append(pext)
return paths
def _run_and_get_stdout(command, pipe_command=None):
from subprocess import Popen, PIPE
if not pipe_command:
p1 = Popen(command, stdout=PIPE, stderr=PIPE, stdin=PIPE)
output = p1.communicate()[0]
if not IS_PY2:
output = output.decode(encoding='UTF-8')
return p1.returncode, output
else:
p1 = Popen(command, stdout=PIPE, stderr=PIPE, stdin=PIPE)
p2 = Popen(pipe_command, stdin=p1.stdout, stdout=PIPE, stderr=PIPE)
p1.stdout.close()
output = p2.communicate()[0]
if not IS_PY2:
output = output.decode(encoding='UTF-8')
return p2.returncode, output
# Make sure we are running on a supported system
def _check_arch():
arch, bits = _parse_arch(DataSource.arch_string_raw)
if not arch in ['X86_32', 'X86_64', 'ARM_7', 'ARM_8', 'PPC_64']:
raise Exception("py-cpuinfo currently only works on X86 and some PPC and ARM CPUs.")
def _obj_to_b64(thing):
import pickle
import base64
a = thing
b = pickle.dumps(a)
c = base64.b64encode(b)
d = c.decode('utf8')
return d
def _b64_to_obj(thing):
import pickle
import base64
try:
a = base64.b64decode(thing)
b = pickle.loads(a)
return b
except:
return {}
def _utf_to_str(input):
if IS_PY2 and isinstance(input, unicode):
return input.encode('utf-8')
elif isinstance(input, list):
return [_utf_to_str(element) for element in input]
elif isinstance(input, dict):
return {_utf_to_str(key): _utf_to_str(value)
for key, value in input.items()}
else:
return input
def _copy_new_fields(info, new_info):
keys = [
'vendor_id_raw', 'hardware_raw', 'brand_raw', 'hz_advertised_friendly', 'hz_actual_friendly',
'hz_advertised', 'hz_actual', 'arch', 'bits', 'count',
'arch_string_raw', 'uname_string_raw',
'l2_cache_size', 'l2_cache_line_size', 'l2_cache_associativity',
'stepping', 'model', 'family',
'processor_type', 'extended_model', 'extended_family', 'flags',
'l3_cache_size', 'l1_data_cache_size', 'l1_instruction_cache_size'
]
for key in keys:
if new_info.get(key, None) and not info.get(key, None):
info[key] = new_info[key]
elif key == 'flags' and new_info.get('flags'):
for f in new_info['flags']:
if f not in info['flags']: info['flags'].append(f)
info['flags'].sort()
def _get_field_actual(cant_be_number, raw_string, field_names):
for line in raw_string.splitlines():
for field_name in field_names:
field_name = field_name.lower()
if ':' in line:
left, right = line.split(':', 1)
left = left.strip().lower()
right = right.strip()
if left == field_name and len(right) > 0:
if cant_be_number:
if not right.isdigit():
return right
else:
return right
return None
def _get_field(cant_be_number, raw_string, convert_to, default_value, *field_names):
retval = _get_field_actual(cant_be_number, raw_string, field_names)
# Convert the return value
if retval and convert_to:
try:
retval = convert_to(retval)
except:
retval = default_value
# Return the default if there is no return value
if retval is None:
retval = default_value
return retval
def _to_decimal_string(ticks):
try:
# Convert to string
ticks = '{0}'.format(ticks)
# Strip off non numbers and decimal places
ticks = "".join(n for n in ticks if n.isdigit() or n=='.').strip()
if ticks == '':
ticks = '0'
# Add decimal if missing
if '.' not in ticks:
ticks = '{0}.0'.format(ticks)
# Remove trailing zeros
ticks = ticks.rstrip('0')
# Add one trailing zero for empty right side
if ticks.endswith('.'):
ticks = '{0}0'.format(ticks)
# Make sure the number can be converted to a float
ticks = float(ticks)
ticks = '{0}'.format(ticks)
return ticks
except:
return '0.0'
def _hz_short_to_full(ticks, scale):
try:
# Make sure the number can be converted to a float
ticks = float(ticks)
ticks = '{0}'.format(ticks)
# Scale the numbers
hz = ticks.lstrip('0')
old_index = hz.index('.')
hz = hz.replace('.', '')
hz = hz.ljust(scale + old_index+1, '0')
new_index = old_index + scale
hz = '{0}.{1}'.format(hz[:new_index], hz[new_index:])
left, right = hz.split('.')
left, right = int(left), int(right)
return (left, right)
except:
return (0, 0)
def _hz_friendly_to_full(hz_string):
try:
hz_string = hz_string.strip().lower()
hz, scale = (None, None)
if hz_string.endswith('ghz'):
scale = 9
elif hz_string.endswith('mhz'):
scale = 6
elif hz_string.endswith('hz'):
scale = 0
hz = "".join(n for n in hz_string if n.isdigit() or n=='.').strip()
if not '.' in hz:
hz += '.0'
hz, scale = _hz_short_to_full(hz, scale)
return (hz, scale)
except:
return (0, 0)
def _hz_short_to_friendly(ticks, scale):
try:
# Get the raw Hz as a string
left, right = _hz_short_to_full(ticks, scale)
result = '{0}.{1}'.format(left, right)
# Get the location of the dot, and remove said dot
dot_index = result.index('.')
result = result.replace('.', '')
# Get the Hz symbol and scale
symbol = "Hz"
scale = 0
if dot_index > 9:
symbol = "GHz"
scale = 9
elif dot_index > 6:
symbol = "MHz"
scale = 6
elif dot_index > 3:
symbol = "KHz"
scale = 3
# Get the Hz with the dot at the new scaled point
result = '{0}.{1}'.format(result[:-scale-1], result[-scale-1:])
# Format the ticks to have 4 numbers after the decimal
# and remove any superfluous zeroes.
result = '{0:.4f} {1}'.format(float(result), symbol)
result = result.rstrip('0')
return result
except:
return '0.0000 Hz'
def _to_friendly_bytes(input):
import re
if not input:
return input
input = "{0}".format(input)
formats = {
r"^[0-9]+B$" : 'B',
r"^[0-9]+K$" : 'KB',
r"^[0-9]+M$" : 'MB',
r"^[0-9]+G$" : 'GB'
}
for pattern, friendly_size in formats.items():
if re.match(pattern, input):
return "{0} {1}".format(input[ : -1].strip(), friendly_size)
return input
def _parse_cpu_brand_string(cpu_string):
# Just return 0 if the processor brand does not have the Hz
if not 'hz' in cpu_string.lower():
return ('0.0', 0)
hz = cpu_string.lower()
scale = 0
if hz.endswith('mhz'):
scale = 6
elif hz.endswith('ghz'):
scale = 9
if '@' in hz:
hz = hz.split('@')[1]
else:
hz = hz.rsplit(None, 1)[1]
hz = hz.rstrip('mhz').rstrip('ghz').strip()
hz = _to_decimal_string(hz)
return (hz, scale)
def _parse_cpu_brand_string_dx(cpu_string):
import re
# Find all the strings inside brackets ()
starts = [m.start() for m in re.finditer('\(', cpu_string)]
ends = [m.start() for m in re.finditer('\)', cpu_string)]
insides = {k: v for k, v in zip(starts, ends)}
insides = [cpu_string[start+1 : end] for start, end in insides.items()]
# Find all the fields
vendor_id, stepping, model, family = (None, None, None, None)
for inside in insides:
for pair in inside.split(','):
pair = [n.strip() for n in pair.split(':')]
if len(pair) > 1:
name, value = pair[0], pair[1]
if name == 'origin':
vendor_id = value.strip('"')
elif name == 'stepping':
stepping = int(value.lstrip('0x'), 16)
elif name == 'model':
model = int(value.lstrip('0x'), 16)
elif name in ['fam', 'family']:
family = int(value.lstrip('0x'), 16)
# Find the Processor Brand
# Strip off extra strings in brackets at end
brand = cpu_string.strip()
is_working = True
while is_working:
is_working = False
for inside in insides:
full = "({0})".format(inside)
if brand.endswith(full):
brand = brand[ :-len(full)].strip()
is_working = True
# Find the Hz in the brand string
hz_brand, scale = _parse_cpu_brand_string(brand)
# Find Hz inside brackets () after the brand string
if hz_brand == '0.0':
for inside in insides:
hz = inside
for entry in ['GHz', 'MHz', 'Hz']:
if entry in hz:
hz = "CPU @ " + hz[ : hz.find(entry) + len(entry)]
hz_brand, scale = _parse_cpu_brand_string(hz)
break
return (hz_brand, scale, brand, vendor_id, stepping, model, family)
def _parse_dmesg_output(output):
try:
# Get all the dmesg lines that might contain a CPU string
lines = output.split(' CPU0:')[1:] + \
output.split(' CPU1:')[1:] + \
output.split(' CPU:')[1:] + \
output.split('\nCPU0:')[1:] + \
output.split('\nCPU1:')[1:] + \
output.split('\nCPU:')[1:]
lines = [l.split('\n')[0].strip() for l in lines]
# Convert the lines to CPU strings
cpu_strings = [_parse_cpu_brand_string_dx(l) for l in lines]
# Find the CPU string that has the most fields
best_string = None
highest_count = 0
for cpu_string in cpu_strings:
count = sum([n is not None for n in cpu_string])
if count > highest_count:
highest_count = count
best_string = cpu_string
# If no CPU string was found, return {}
if not best_string:
return {}
hz_actual, scale, processor_brand, vendor_id, stepping, model, family = best_string
# Origin
if ' Origin=' in output:
fields = output[output.find(' Origin=') : ].split('\n')[0]
fields = fields.strip().split()
fields = [n.strip().split('=') for n in fields]
fields = [{n[0].strip().lower() : n[1].strip()} for n in fields]
for field in fields:
name = list(field.keys())[0]
value = list(field.values())[0]
if name == 'origin':
vendor_id = value.strip('"')
elif name == 'stepping':
stepping = int(value.lstrip('0x'), 16)
elif name == 'model':
model = int(value.lstrip('0x'), 16)
elif name in ['fam', 'family']:
family = int(value.lstrip('0x'), 16)
# Features
flag_lines = []
for category in [' Features=', ' Features2=', ' AMD Features=', ' AMD Features2=']:
if category in output:
flag_lines.append(output.split(category)[1].split('\n')[0])
flags = []
for line in flag_lines:
line = line.split('<')[1].split('>')[0].lower()
for flag in line.split(','):
flags.append(flag)
flags.sort()
# Convert from GHz/MHz string to Hz
hz_advertised, scale = _parse_cpu_brand_string(processor_brand)
# If advertised hz not found, use the actual hz
if hz_advertised == '0.0':
scale = 6
hz_advertised = _to_decimal_string(hz_actual)
info = {
'vendor_id_raw' : vendor_id,
'brand_raw' : processor_brand,
'stepping' : stepping,
'model' : model,
'family' : family,
'flags' : flags
}
if hz_advertised and hz_advertised != '0.0':
info['hz_advertised_friendly'] = _hz_short_to_friendly(hz_advertised, scale)
info['hz_actual_friendly'] = _hz_short_to_friendly(hz_actual, scale)
if hz_advertised and hz_advertised != '0.0':
info['hz_advertised'] = _hz_short_to_full(hz_advertised, scale)
info['hz_actual'] = _hz_short_to_full(hz_actual, scale)
return {k: v for k, v in info.items() if v}
except:
#raise
pass
return {}
def _parse_arch(arch_string_raw):
import re
arch, bits = None, None
arch_string_raw = arch_string_raw.lower()
# X86
if re.match('^i\d86$|^x86$|^x86_32$|^i86pc$|^ia32$|^ia-32$|^bepc$', arch_string_raw):
arch = 'X86_32'
bits = 32
elif re.match('^x64$|^x86_64$|^x86_64t$|^i686-64$|^amd64$|^ia64$|^ia-64$', arch_string_raw):
arch = 'X86_64'
bits = 64
# ARM
elif re.match('^armv8-a|aarch64$', arch_string_raw):
arch = 'ARM_8'
bits = 64
elif re.match('^armv7$|^armv7[a-z]$|^armv7-[a-z]$|^armv6[a-z]$', arch_string_raw):
arch = 'ARM_7'
bits = 32
elif re.match('^armv8$|^armv8[a-z]$|^armv8-[a-z]$', arch_string_raw):
arch = 'ARM_8'
bits = 32
# PPC
elif re.match('^ppc32$|^prep$|^pmac$|^powermac$', arch_string_raw):
arch = 'PPC_32'
bits = 32
elif re.match('^powerpc$|^ppc64$|^ppc64le$', arch_string_raw):
arch = 'PPC_64'
bits = 64
# SPARC
elif re.match('^sparc32$|^sparc$', arch_string_raw):
arch = 'SPARC_32'
bits = 32
elif re.match('^sparc64$|^sun4u$|^sun4v$', arch_string_raw):
arch = 'SPARC_64'
bits = 64
return (arch, bits)
def _is_bit_set(reg, bit):
mask = 1 << bit
is_set = reg & mask > 0
return is_set
def _is_selinux_enforcing():
# Just return if the SE Linux Status Tool is not installed
if not DataSource.has_sestatus():
return False
# Run the sestatus, and just return if it failed to run
returncode, output = DataSource.sestatus_b()
if returncode != 0:
return False
# Figure out if explicitly in enforcing mode
for line in output.splitlines():
line = line.strip().lower()
if line.startswith("current mode:"):
if line.endswith("enforcing"):
return True
else:
return False
# Figure out if we can execute heap and execute memory
can_selinux_exec_heap = False
can_selinux_exec_memory = False
for line in output.splitlines():
line = line.strip().lower()
if line.startswith("allow_execheap") and line.endswith("on"):
can_selinux_exec_heap = True
elif line.startswith("allow_execmem") and line.endswith("on"):
can_selinux_exec_memory = True
return (not can_selinux_exec_heap or not can_selinux_exec_memory)
class CPUID(object):
def __init__(self):
self.prochandle = None
# Figure out if SE Linux is on and in enforcing mode
self.is_selinux_enforcing = _is_selinux_enforcing()
def _asm_func(self, restype=None, argtypes=(), byte_code=[]):
byte_code = bytes.join(b'', byte_code)
address = None
if DataSource.is_windows:
# Allocate a memory segment the size of the byte code, and make it executable
size = len(byte_code)
# Alloc at least 1 page to ensure we own all pages that we want to change protection on
if size < 0x1000: size = 0x1000
MEM_COMMIT = ctypes.c_ulong(0x1000)
PAGE_READWRITE = ctypes.c_ulong(0x4)
pfnVirtualAlloc = ctypes.windll.kernel32.VirtualAlloc
pfnVirtualAlloc.restype = ctypes.c_void_p
address = pfnVirtualAlloc(None, ctypes.c_size_t(size), MEM_COMMIT, PAGE_READWRITE)
if not address:
raise Exception("Failed to VirtualAlloc")
# Copy the byte code into the memory segment
memmove = ctypes.CFUNCTYPE(ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_size_t)(ctypes._memmove_addr)
if memmove(address, byte_code, size) < 0:
raise Exception("Failed to memmove")
# Enable execute permissions
PAGE_EXECUTE = ctypes.c_ulong(0x10)
old_protect = ctypes.c_ulong(0)
pfnVirtualProtect = ctypes.windll.kernel32.VirtualProtect
res = pfnVirtualProtect(ctypes.c_void_p(address), ctypes.c_size_t(size), PAGE_EXECUTE, ctypes.byref(old_protect))
if not res:
raise Exception("Failed VirtualProtect")
# Flush Instruction Cache
# First, get process Handle
if not self.prochandle:
pfnGetCurrentProcess = ctypes.windll.kernel32.GetCurrentProcess
pfnGetCurrentProcess.restype = ctypes.c_void_p
self.prochandle = ctypes.c_void_p(pfnGetCurrentProcess())
# Actually flush cache
res = ctypes.windll.kernel32.FlushInstructionCache(self.prochandle, ctypes.c_void_p(address), ctypes.c_size_t(size))
if not res:
raise Exception("Failed FlushInstructionCache")
else:
# Allocate a memory segment the size of the byte code
size = len(byte_code)
pfnvalloc = ctypes.pythonapi.valloc
pfnvalloc.restype = ctypes.c_void_p
address = pfnvalloc(ctypes.c_size_t(size))
if not address:
raise Exception("Failed to valloc")
# Mark the memory segment as writeable only
if not self.is_selinux_enforcing:
WRITE = 0x2
if ctypes.pythonapi.mprotect(ctypes.c_void_p(address), size, WRITE) < 0:
raise Exception("Failed to mprotect")
# Copy the byte code into the memory segment
if ctypes.pythonapi.memmove(ctypes.c_void_p(address), byte_code, ctypes.c_size_t(size)) < 0:
raise Exception("Failed to memmove")
# Mark the memory segment as writeable and executable only
if not self.is_selinux_enforcing:
WRITE_EXECUTE = 0x2 | 0x4
if ctypes.pythonapi.mprotect(ctypes.c_void_p(address), size, WRITE_EXECUTE) < 0:
raise Exception("Failed to mprotect")
# Cast the memory segment into a function
functype = ctypes.CFUNCTYPE(restype, *argtypes)
fun = functype(address)
return fun, address
def _run_asm(self, *byte_code):
# Convert the byte code into a function that returns an int
restype = ctypes.c_uint32
argtypes = ()
func, address = self._asm_func(restype, argtypes, byte_code)
# Call the byte code like a function
retval = func()
byte_code = bytes.join(b'', byte_code)
size = ctypes.c_size_t(len(byte_code))
# Free the function memory segment
if DataSource.is_windows:
MEM_RELEASE = ctypes.c_ulong(0x8000)
ctypes.windll.kernel32.VirtualFree(ctypes.c_void_p(address), ctypes.c_size_t(0), MEM_RELEASE)
else:
# Remove the executable tag on the memory
READ_WRITE = 0x1 | 0x2
if ctypes.pythonapi.mprotect(ctypes.c_void_p(address), size, READ_WRITE) < 0:
raise Exception("Failed to mprotect")
ctypes.pythonapi.free(ctypes.c_void_p(address))
return retval
# FIXME: We should not have to use different instructions to
# set eax to 0 or 1, on 32bit and 64bit machines.
def _zero_eax(self):
return (
b"\x31\xC0" # xor eax,eax
)
def _zero_ecx(self):
return (
b"\x31\xC9" # xor ecx,ecx
)
def _one_eax(self):
return (
b"\xB8\x01\x00\x00\x00" # mov eax,0x1"
)
# http://en.wikipedia.org/wiki/CPUID#EAX.3D0:_Get_vendor_ID
def get_vendor_id(self):
# EBX
ebx = self._run_asm(
self._zero_eax(),
b"\x0F\xA2" # cpuid
b"\x89\xD8" # mov ax,bx
b"\xC3" # ret
)
# ECX
ecx = self._run_asm(
self._zero_eax(),
b"\x0f\xa2" # cpuid
b"\x89\xC8" # mov ax,cx
b"\xC3" # ret
)
# EDX
edx = self._run_asm(
self._zero_eax(),
b"\x0f\xa2" # cpuid
b"\x89\xD0" # mov ax,dx
b"\xC3" # ret
)
# Each 4bits is a ascii letter in the name
vendor_id = []
for reg in [ebx, edx, ecx]:
for n in [0, 8, 16, 24]:
vendor_id.append(chr((reg >> n) & 0xFF))
vendor_id = ''.join(vendor_id)
return vendor_id
# http://en.wikipedia.org/wiki/CPUID#EAX.3D1:_Processor_Info_and_Feature_Bits
def get_info(self):
# EAX
eax = self._run_asm(
self._one_eax(),
b"\x0f\xa2" # cpuid
b"\xC3" # ret
)
# Get the CPU info
stepping = (eax >> 0) & 0xF # 4 bits
model = (eax >> 4) & 0xF # 4 bits
family = (eax >> 8) & 0xF # 4 bits
processor_type = (eax >> 12) & 0x3 # 2 bits
extended_model = (eax >> 16) & 0xF # 4 bits
extended_family = (eax >> 20) & 0xFF # 8 bits
return {
'stepping' : stepping,
'model' : model,
'family' : family,
'processor_type' : processor_type,
'extended_model' : extended_model,
'extended_family' : extended_family
}
# http://en.wikipedia.org/wiki/CPUID#EAX.3D80000000h:_Get_Highest_Extended_Function_Supported
def get_max_extension_support(self):
# Check for extension support
max_extension_support = self._run_asm(
b"\xB8\x00\x00\x00\x80" # mov ax,0x80000000
b"\x0f\xa2" # cpuid
b"\xC3" # ret
)
return max_extension_support
# http://en.wikipedia.org/wiki/CPUID#EAX.3D1:_Processor_Info_and_Feature_Bits
def get_flags(self, max_extension_support):
# EDX
edx = self._run_asm(
self._one_eax(),
b"\x0f\xa2" # cpuid
b"\x89\xD0" # mov ax,dx
b"\xC3" # ret
)
# ECX
ecx = self._run_asm(
self._one_eax(),
b"\x0f\xa2" # cpuid
b"\x89\xC8" # mov ax,cx
b"\xC3" # ret
)
# Get the CPU flags
flags = {
'fpu' : _is_bit_set(edx, 0),
'vme' : _is_bit_set(edx, 1),
'de' : _is_bit_set(edx, 2),
'pse' : _is_bit_set(edx, 3),
'tsc' : _is_bit_set(edx, 4),
'msr' : _is_bit_set(edx, 5),
'pae' : _is_bit_set(edx, 6),
'mce' : _is_bit_set(edx, 7),
'cx8' : _is_bit_set(edx, 8),
'apic' : _is_bit_set(edx, 9),
#'reserved1' : _is_bit_set(edx, 10),
'sep' : _is_bit_set(edx, 11),
'mtrr' : _is_bit_set(edx, 12),
'pge' : _is_bit_set(edx, 13),
'mca' : _is_bit_set(edx, 14),
'cmov' : _is_bit_set(edx, 15),
'pat' : _is_bit_set(edx, 16),
'pse36' : _is_bit_set(edx, 17),
'pn' : _is_bit_set(edx, 18),
'clflush' : _is_bit_set(edx, 19),
#'reserved2' : _is_bit_set(edx, 20),
'dts' : _is_bit_set(edx, 21),
'acpi' : _is_bit_set(edx, 22),
'mmx' : _is_bit_set(edx, 23),
'fxsr' : _is_bit_set(edx, 24),
'sse' : _is_bit_set(edx, 25),
'sse2' : _is_bit_set(edx, 26),
'ss' : _is_bit_set(edx, 27),
'ht' : _is_bit_set(edx, 28),
'tm' : _is_bit_set(edx, 29),
'ia64' : _is_bit_set(edx, 30),
'pbe' : _is_bit_set(edx, 31),
'pni' : _is_bit_set(ecx, 0),
'pclmulqdq' : _is_bit_set(ecx, 1),
'dtes64' : _is_bit_set(ecx, 2),
'monitor' : _is_bit_set(ecx, 3),
'ds_cpl' : _is_bit_set(ecx, 4),
'vmx' : _is_bit_set(ecx, 5),
'smx' : _is_bit_set(ecx, 6),
'est' : _is_bit_set(ecx, 7),
'tm2' : _is_bit_set(ecx, 8),
'ssse3' : _is_bit_set(ecx, 9),
'cid' : _is_bit_set(ecx, 10),
#'reserved3' : _is_bit_set(ecx, 11),
'fma' : _is_bit_set(ecx, 12),
'cx16' : _is_bit_set(ecx, 13),
'xtpr' : _is_bit_set(ecx, 14),
'pdcm' : _is_bit_set(ecx, 15),
#'reserved4' : _is_bit_set(ecx, 16),
'pcid' : _is_bit_set(ecx, 17),
'dca' : _is_bit_set(ecx, 18),
'sse4_1' : _is_bit_set(ecx, 19),
'sse4_2' : _is_bit_set(ecx, 20),
'x2apic' : _is_bit_set(ecx, 21),
'movbe' : _is_bit_set(ecx, 22),
'popcnt' : _is_bit_set(ecx, 23),
'tscdeadline' : _is_bit_set(ecx, 24),
'aes' : _is_bit_set(ecx, 25),
'xsave' : _is_bit_set(ecx, 26),
'osxsave' : _is_bit_set(ecx, 27),
'avx' : _is_bit_set(ecx, 28),
'f16c' : _is_bit_set(ecx, 29),
'rdrnd' : _is_bit_set(ecx, 30),
'hypervisor' : _is_bit_set(ecx, 31)
}
# Get a list of only the flags that are true
flags = [k for k, v in flags.items() if v]
# http://en.wikipedia.org/wiki/CPUID#EAX.3D7.2C_ECX.3D0:_Extended_Features
if max_extension_support >= 7:
# EBX
ebx = self._run_asm(
self._zero_ecx(),
b"\xB8\x07\x00\x00\x00" # mov eax,7
b"\x0f\xa2" # cpuid
b"\x89\xD8" # mov ax,bx
b"\xC3" # ret
)
# ECX
ecx = self._run_asm(
self._zero_ecx(),
b"\xB8\x07\x00\x00\x00" # mov eax,7
b"\x0f\xa2" # cpuid
b"\x89\xC8" # mov ax,cx
b"\xC3" # ret
)
# Get the extended CPU flags
extended_flags = {
#'fsgsbase' : _is_bit_set(ebx, 0),
#'IA32_TSC_ADJUST' : _is_bit_set(ebx, 1),
'sgx' : _is_bit_set(ebx, 2),
'bmi1' : _is_bit_set(ebx, 3),
'hle' : _is_bit_set(ebx, 4),
'avx2' : _is_bit_set(ebx, 5),
#'reserved' : _is_bit_set(ebx, 6),
'smep' : _is_bit_set(ebx, 7),
'bmi2' : _is_bit_set(ebx, 8),
'erms' : _is_bit_set(ebx, 9),
'invpcid' : _is_bit_set(ebx, 10),
'rtm' : _is_bit_set(ebx, 11),
'pqm' : _is_bit_set(ebx, 12),
#'FPU CS and FPU DS deprecated' : _is_bit_set(ebx, 13),
'mpx' : _is_bit_set(ebx, 14),
'pqe' : _is_bit_set(ebx, 15),
'avx512f' : _is_bit_set(ebx, 16),
'avx512dq' : _is_bit_set(ebx, 17),
'rdseed' : _is_bit_set(ebx, 18),
'adx' : _is_bit_set(ebx, 19),
'smap' : _is_bit_set(ebx, 20),
'avx512ifma' : _is_bit_set(ebx, 21),
'pcommit' : _is_bit_set(ebx, 22),
'clflushopt' : _is_bit_set(ebx, 23),
'clwb' : _is_bit_set(ebx, 24),
'intel_pt' : _is_bit_set(ebx, 25),
'avx512pf' : _is_bit_set(ebx, 26),
'avx512er' : _is_bit_set(ebx, 27),
'avx512cd' : _is_bit_set(ebx, 28),
'sha' : _is_bit_set(ebx, 29),
'avx512bw' : _is_bit_set(ebx, 30),
'avx512vl' : _is_bit_set(ebx, 31),
'prefetchwt1' : _is_bit_set(ecx, 0),
'avx512vbmi' : _is_bit_set(ecx, 1),
'umip' : _is_bit_set(ecx, 2),
'pku' : _is_bit_set(ecx, 3),
'ospke' : _is_bit_set(ecx, 4),
#'reserved' : _is_bit_set(ecx, 5),
'avx512vbmi2' : _is_bit_set(ecx, 6),
#'reserved' : _is_bit_set(ecx, 7),
'gfni' : _is_bit_set(ecx, 8),
'vaes' : _is_bit_set(ecx, 9),
'vpclmulqdq' : _is_bit_set(ecx, 10),
'avx512vnni' : _is_bit_set(ecx, 11),
'avx512bitalg' : _is_bit_set(ecx, 12),
#'reserved' : _is_bit_set(ecx, 13),
'avx512vpopcntdq' : _is_bit_set(ecx, 14),
#'reserved' : _is_bit_set(ecx, 15),
#'reserved' : _is_bit_set(ecx, 16),
#'mpx0' : _is_bit_set(ecx, 17),
#'mpx1' : _is_bit_set(ecx, 18),
#'mpx2' : _is_bit_set(ecx, 19),
#'mpx3' : _is_bit_set(ecx, 20),
#'mpx4' : _is_bit_set(ecx, 21),
'rdpid' : _is_bit_set(ecx, 22),
#'reserved' : _is_bit_set(ecx, 23),
#'reserved' : _is_bit_set(ecx, 24),
#'reserved' : _is_bit_set(ecx, 25),
#'reserved' : _is_bit_set(ecx, 26),
#'reserved' : _is_bit_set(ecx, 27),
#'reserved' : _is_bit_set(ecx, 28),
#'reserved' : _is_bit_set(ecx, 29),
'sgx_lc' : _is_bit_set(ecx, 30),
#'reserved' : _is_bit_set(ecx, 31)
}
# Get a list of only the flags that are true
extended_flags = [k for k, v in extended_flags.items() if v]
flags += extended_flags
# http://en.wikipedia.org/wiki/CPUID#EAX.3D80000001h:_Extended_Processor_Info_and_Feature_Bits
if max_extension_support >= 0x80000001:
# EBX
ebx = self._run_asm(
b"\xB8\x01\x00\x00\x80" # mov ax,0x80000001
b"\x0f\xa2" # cpuid
b"\x89\xD8" # mov ax,bx
b"\xC3" # ret
)
# ECX
ecx = self._run_asm(
b"\xB8\x01\x00\x00\x80" # mov ax,0x80000001
b"\x0f\xa2" # cpuid
b"\x89\xC8" # mov ax,cx
b"\xC3" # ret
)
# Get the extended CPU flags
extended_flags = {
'fpu' : _is_bit_set(ebx, 0),
'vme' : _is_bit_set(ebx, 1),
'de' : _is_bit_set(ebx, 2),
'pse' : _is_bit_set(ebx, 3),
'tsc' : _is_bit_set(ebx, 4),
'msr' : _is_bit_set(ebx, 5),
'pae' : _is_bit_set(ebx, 6),
'mce' : _is_bit_set(ebx, 7),
'cx8' : _is_bit_set(ebx, 8),
'apic' : _is_bit_set(ebx, 9),
#'reserved' : _is_bit_set(ebx, 10),
'syscall' : _is_bit_set(ebx, 11),
'mtrr' : _is_bit_set(ebx, 12),
'pge' : _is_bit_set(ebx, 13),
'mca' : _is_bit_set(ebx, 14),
'cmov' : _is_bit_set(ebx, 15),
'pat' : _is_bit_set(ebx, 16),
'pse36' : _is_bit_set(ebx, 17),
#'reserved' : _is_bit_set(ebx, 18),
'mp' : _is_bit_set(ebx, 19),
'nx' : _is_bit_set(ebx, 20),
#'reserved' : _is_bit_set(ebx, 21),
'mmxext' : _is_bit_set(ebx, 22),
'mmx' : _is_bit_set(ebx, 23),
'fxsr' : _is_bit_set(ebx, 24),
'fxsr_opt' : _is_bit_set(ebx, 25),
'pdpe1gp' : _is_bit_set(ebx, 26),
'rdtscp' : _is_bit_set(ebx, 27),
#'reserved' : _is_bit_set(ebx, 28),
'lm' : _is_bit_set(ebx, 29),
'3dnowext' : _is_bit_set(ebx, 30),
'3dnow' : _is_bit_set(ebx, 31),
'lahf_lm' : _is_bit_set(ecx, 0),
'cmp_legacy' : _is_bit_set(ecx, 1),
'svm' : _is_bit_set(ecx, 2),
'extapic' : _is_bit_set(ecx, 3),
'cr8_legacy' : _is_bit_set(ecx, 4),
'abm' : _is_bit_set(ecx, 5),
'sse4a' : _is_bit_set(ecx, 6),
'misalignsse' : _is_bit_set(ecx, 7),
'3dnowprefetch' : _is_bit_set(ecx, 8),
'osvw' : _is_bit_set(ecx, 9),
'ibs' : _is_bit_set(ecx, 10),
'xop' : _is_bit_set(ecx, 11),
'skinit' : _is_bit_set(ecx, 12),
'wdt' : _is_bit_set(ecx, 13),
#'reserved' : _is_bit_set(ecx, 14),
'lwp' : _is_bit_set(ecx, 15),
'fma4' : _is_bit_set(ecx, 16),
'tce' : _is_bit_set(ecx, 17),
#'reserved' : _is_bit_set(ecx, 18),
'nodeid_msr' : _is_bit_set(ecx, 19),
#'reserved' : _is_bit_set(ecx, 20),
'tbm' : _is_bit_set(ecx, 21),
'topoext' : _is_bit_set(ecx, 22),
'perfctr_core' : _is_bit_set(ecx, 23),
'perfctr_nb' : _is_bit_set(ecx, 24),
#'reserved' : _is_bit_set(ecx, 25),
'dbx' : _is_bit_set(ecx, 26),
'perftsc' : _is_bit_set(ecx, 27),
'pci_l2i' : _is_bit_set(ecx, 28),
#'reserved' : _is_bit_set(ecx, 29),
#'reserved' : _is_bit_set(ecx, 30),
#'reserved' : _is_bit_set(ecx, 31)
}
# Get a list of only the flags that are true
extended_flags = [k for k, v in extended_flags.items() if v]
flags += extended_flags
flags.sort()
return flags
# http://en.wikipedia.org/wiki/CPUID#EAX.3D80000002h.2C80000003h.2C80000004h:_Processor_Brand_String
def get_processor_brand(self, max_extension_support):
processor_brand = ""
# Processor brand string
if max_extension_support >= 0x80000004:
instructions = [
b"\xB8\x02\x00\x00\x80", # mov ax,0x80000002
b"\xB8\x03\x00\x00\x80", # mov ax,0x80000003
b"\xB8\x04\x00\x00\x80" # mov ax,0x80000004
]
for instruction in instructions:
# EAX
eax = self._run_asm(
instruction, # mov ax,0x8000000?
b"\x0f\xa2" # cpuid
b"\x89\xC0" # mov ax,ax
b"\xC3" # ret
)
# EBX
ebx = self._run_asm(
instruction, # mov ax,0x8000000?
b"\x0f\xa2" # cpuid
b"\x89\xD8" # mov ax,bx
b"\xC3" # ret
)
# ECX
ecx = self._run_asm(
instruction, # mov ax,0x8000000?
b"\x0f\xa2" # cpuid
b"\x89\xC8" # mov ax,cx
b"\xC3" # ret
)
# EDX
edx = self._run_asm(
instruction, # mov ax,0x8000000?
b"\x0f\xa2" # cpuid
b"\x89\xD0" # mov ax,dx
b"\xC3" # ret
)
# Combine each of the 4 bytes in each register into the string
for reg in [eax, ebx, ecx, edx]:
for n in [0, 8, 16, 24]:
processor_brand += chr((reg >> n) & 0xFF)
# Strip off any trailing NULL terminators and white space
processor_brand = processor_brand.strip("\0").strip()
return processor_brand
# http://en.wikipedia.org/wiki/CPUID#EAX.3D80000006h:_Extended_L2_Cache_Features
def get_cache(self, max_extension_support):
cache_info = {}
# Just return if the cache feature is not supported
if max_extension_support < 0x80000006:
return cache_info
# ECX
ecx = self._run_asm(
b"\xB8\x06\x00\x00\x80" # mov ax,0x80000006
b"\x0f\xa2" # cpuid
b"\x89\xC8" # mov ax,cx
b"\xC3" # ret
)
cache_info = {
'size_kb' : ecx & 0xFF,
'line_size_b' : (ecx >> 12) & 0xF,
'associativity' : (ecx >> 16) & 0xFFFF
}
return cache_info
def get_ticks(self):
retval = None
if DataSource.bits == '32bit':
# Works on x86_32
restype = None
argtypes = (ctypes.POINTER(ctypes.c_uint), ctypes.POINTER(ctypes.c_uint))
get_ticks_x86_32, address = self._asm_func(restype, argtypes,
[
b"\x55", # push bp
b"\x89\xE5", # mov bp,sp
b"\x31\xC0", # xor ax,ax
b"\x0F\xA2", # cpuid
b"\x0F\x31", # rdtsc
b"\x8B\x5D\x08", # mov bx,[di+0x8]
b"\x8B\x4D\x0C", # mov cx,[di+0xc]
b"\x89\x13", # mov [bp+di],dx
b"\x89\x01", # mov [bx+di],ax
b"\x5D", # pop bp
b"\xC3" # ret
]
)
high = ctypes.c_uint32(0)
low = ctypes.c_uint32(0)
get_ticks_x86_32(ctypes.byref(high), ctypes.byref(low))
retval = ((high.value << 32) & 0xFFFFFFFF00000000) | low.value
elif DataSource.bits == '64bit':
# Works on x86_64
restype = ctypes.c_uint64
argtypes = ()
get_ticks_x86_64, address = self._asm_func(restype, argtypes,
[
b"\x48", # dec ax
b"\x31\xC0", # xor ax,ax
b"\x0F\xA2", # cpuid
b"\x0F\x31", # rdtsc
b"\x48", # dec ax
b"\xC1\xE2\x20", # shl dx,byte 0x20
b"\x48", # dec ax
b"\x09\xD0", # or ax,dx
b"\xC3", # ret
]
)
retval = get_ticks_x86_64()
return retval
def get_raw_hz(self):
import time
start = self.get_ticks()
time.sleep(1)
end = self.get_ticks()
ticks = (end - start)
return ticks
def _actual_get_cpu_info_from_cpuid(queue):
'''
Warning! This function has the potential to crash the Python runtime.
Do not call it directly. Use the _get_cpu_info_from_cpuid function instead.
It will safely call this function in another process.
'''
# Pipe all output to nothing
sys.stdout = open(os.devnull, 'w')
sys.stderr = open(os.devnull, 'w')
# Get the CPU arch and bits
arch, bits = _parse_arch(DataSource.arch_string_raw)
# Return none if this is not an X86 CPU
if not arch in ['X86_32', 'X86_64']:
queue.put(_obj_to_b64({}))
return
# Return none if SE Linux is in enforcing mode
cpuid = CPUID()
if cpuid.is_selinux_enforcing:
queue.put(_obj_to_b64({}))
return
# Get the cpu info from the CPUID register
max_extension_support = cpuid.get_max_extension_support()
cache_info = cpuid.get_cache(max_extension_support)
info = cpuid.get_info()
processor_brand = cpuid.get_processor_brand(max_extension_support)
# Get the Hz and scale
hz_actual = cpuid.get_raw_hz()
hz_actual = _to_decimal_string(hz_actual)
# Get the Hz and scale
hz_advertised, scale = _parse_cpu_brand_string(processor_brand)
info = {
'vendor_id_raw' : cpuid.get_vendor_id(),
'hardware_raw' : '',
'brand_raw' : processor_brand,
'hz_advertised_friendly' : _hz_short_to_friendly(hz_advertised, scale),
'hz_actual_friendly' : _hz_short_to_friendly(hz_actual, 0),
'hz_advertised' : _hz_short_to_full(hz_advertised, scale),
'hz_actual' : _hz_short_to_full(hz_actual, 0),
'l2_cache_size' : _to_friendly_bytes(cache_info['size_kb']),
'l2_cache_line_size' : cache_info['line_size_b'],
'l2_cache_associativity' : hex(cache_info['associativity']),
'stepping' : info['stepping'],
'model' : info['model'],
'family' : info['family'],
'processor_type' : info['processor_type'],
'extended_model' : info['extended_model'],
'extended_family' : info['extended_family'],
'flags' : cpuid.get_flags(max_extension_support)
}
info = {k: v for k, v in info.items() if v}
queue.put(_obj_to_b64(info))
def _get_cpu_info_from_cpuid():
'''
Returns the CPU info gathered by querying the X86 cpuid register in a new process.
Returns {} on non X86 cpus.
Returns {} if SELinux is in enforcing mode.
'''
from multiprocessing import Process, Queue
# Return {} if can't cpuid
if not DataSource.can_cpuid:
return {}
# Get the CPU arch and bits
arch, bits = _parse_arch(DataSource.arch_string_raw)
# Return {} if this is not an X86 CPU
if not arch in ['X86_32', 'X86_64']:
return {}
try:
# Start running the function in a subprocess
queue = Queue()
p = Process(target=_actual_get_cpu_info_from_cpuid, args=(queue,))
p.start()
# Wait for the process to end, while it is still alive
while p.is_alive():
p.join(0)
# Return {} if it failed
if p.exitcode != 0:
return {}
# Return the result, only if there is something to read
if not queue.empty():
output = queue.get()
return _b64_to_obj(output)
except:
pass
# Return {} if everything failed
return {}
def _get_cpu_info_from_proc_cpuinfo():
'''
Returns the CPU info gathered from /proc/cpuinfo.
Returns {} if /proc/cpuinfo is not found.
'''
try:
# Just return {} if there is no cpuinfo
if not DataSource.has_proc_cpuinfo():
return {}
returncode, output = DataSource.cat_proc_cpuinfo()
if returncode != 0:
return {}
# Various fields
vendor_id = _get_field(False, output, None, '', 'vendor_id', 'vendor id', 'vendor')
processor_brand = _get_field(True, output, None, None, 'model name','cpu', 'processor')
cache_size = _get_field(False, output, None, '', 'cache size')
stepping = _get_field(False, output, int, 0, 'stepping')
model = _get_field(False, output, int, 0, 'model')
family = _get_field(False, output, int, 0, 'cpu family')
hardware = _get_field(False, output, None, '', 'Hardware')
# Flags
flags = _get_field(False, output, None, None, 'flags', 'Features')
if flags:
flags = flags.split()
flags.sort()
# Convert from MHz string to Hz
hz_actual = _get_field(False, output, None, '', 'cpu MHz', 'cpu speed', 'clock')
hz_actual = hz_actual.lower().rstrip('mhz').strip()
hz_actual = _to_decimal_string(hz_actual)
# Convert from GHz/MHz string to Hz
hz_advertised, scale = (None, 0)
try:
hz_advertised, scale = _parse_cpu_brand_string(processor_brand)
except Exception:
pass
info = {
'hardware_raw' : hardware,
'brand_raw' : processor_brand,
'l3_cache_size' : _to_friendly_bytes(cache_size),
'flags' : flags,
'vendor_id_raw' : vendor_id,
'stepping' : stepping,
'model' : model,
'family' : family,
}
# Make the Hz the same for actual and advertised if missing any
if not hz_advertised or hz_advertised == '0.0':
hz_advertised = hz_actual
scale = 6
elif not hz_actual or hz_actual == '0.0':
hz_actual = hz_advertised
# Add the Hz if there is one
if _hz_short_to_full(hz_advertised, scale) > (0, 0):
info['hz_advertised_friendly'] = _hz_short_to_friendly(hz_advertised, scale)
info['hz_advertised'] = _hz_short_to_full(hz_advertised, scale)
if _hz_short_to_full(hz_actual, scale) > (0, 0):
info['hz_actual_friendly'] = _hz_short_to_friendly(hz_actual, 6)
info['hz_actual'] = _hz_short_to_full(hz_actual, 6)
info = {k: v for k, v in info.items() if v}
return info
except:
#raise # NOTE: To have this throw on error, uncomment this line
return {}
def _get_cpu_info_from_cpufreq_info():
'''
Returns the CPU info gathered from cpufreq-info.
Returns {} if cpufreq-info is not found.
'''
try:
hz_brand, scale = '0.0', 0
if not DataSource.has_cpufreq_info():
return {}
returncode, output = DataSource.cpufreq_info()
if returncode != 0:
return {}
hz_brand = output.split('current CPU frequency is')[1].split('\n')[0]
i = hz_brand.find('Hz')
assert(i != -1)
hz_brand = hz_brand[0 : i+2].strip().lower()
if hz_brand.endswith('mhz'):
scale = 6
elif hz_brand.endswith('ghz'):
scale = 9
hz_brand = hz_brand.rstrip('mhz').rstrip('ghz').strip()
hz_brand = _to_decimal_string(hz_brand)
info = {
'hz_advertised_friendly' : _hz_short_to_friendly(hz_brand, scale),
'hz_actual_friendly' : _hz_short_to_friendly(hz_brand, scale),
'hz_advertised' : _hz_short_to_full(hz_brand, scale),
'hz_actual' : _hz_short_to_full(hz_brand, scale),
}
info = {k: v for k, v in info.items() if v}
return info
except:
#raise # NOTE: To have this throw on error, uncomment this line
return {}
def _get_cpu_info_from_lscpu():
'''
Returns the CPU info gathered from lscpu.
Returns {} if lscpu is not found.
'''
try:
if not DataSource.has_lscpu():
return {}
returncode, output = DataSource.lscpu()
if returncode != 0:
return {}
info = {}
new_hz = _get_field(False, output, None, None, 'CPU max MHz', 'CPU MHz')
if new_hz:
new_hz = _to_decimal_string(new_hz)
scale = 6
info['hz_advertised_friendly'] = _hz_short_to_friendly(new_hz, scale)
info['hz_actual_friendly'] = _hz_short_to_friendly(new_hz, scale)
info['hz_advertised'] = _hz_short_to_full(new_hz, scale)
info['hz_actual'] = _hz_short_to_full(new_hz, scale)
vendor_id = _get_field(False, output, None, None, 'Vendor ID')
if vendor_id:
info['vendor_id_raw'] = vendor_id
brand = _get_field(False, output, None, None, 'Model name')
if brand:
info['brand_raw'] = brand
family = _get_field(False, output, None, None, 'CPU family')
if family and family.isdigit():
info['family'] = int(family)
stepping = _get_field(False, output, None, None, 'Stepping')
if stepping and stepping.isdigit():
info['stepping'] = int(stepping)
model = _get_field(False, output, None, None, 'Model')
if model and model.isdigit():
info['model'] = int(model)
l1_data_cache_size = _get_field(False, output, None, None, 'L1d cache')
if l1_data_cache_size:
info['l1_data_cache_size'] = _to_friendly_bytes(l1_data_cache_size)
l1_instruction_cache_size = _get_field(False, output, None, None, 'L1i cache')
if l1_instruction_cache_size:
info['l1_instruction_cache_size'] = _to_friendly_bytes(l1_instruction_cache_size)
l2_cache_size = _get_field(False, output, None, None, 'L2 cache')
if l2_cache_size:
info['l2_cache_size'] = _to_friendly_bytes(l2_cache_size)
l3_cache_size = _get_field(False, output, None, None, 'L3 cache')
if l3_cache_size:
info['l3_cache_size'] = _to_friendly_bytes(l3_cache_size)
# Flags
flags = _get_field(False, output, None, None, 'flags', 'Features')
if flags:
flags = flags.split()
flags.sort()
info['flags'] = flags
info = {k: v for k, v in info.items() if v}
return info
except:
#raise # NOTE: To have this throw on error, uncomment this line
return {}
def _get_cpu_info_from_dmesg():
'''
Returns the CPU info gathered from dmesg.
Returns {} if dmesg is not found or does not have the desired info.
'''
# Just return {} if there is no dmesg
if not DataSource.has_dmesg():
return {}
# If dmesg fails return {}
returncode, output = DataSource.dmesg_a()
if output == None or returncode != 0:
return {}
return _parse_dmesg_output(output)
# https://openpowerfoundation.org/wp-content/uploads/2016/05/LoPAPR_DRAFT_v11_24March2016_cmt1.pdf
# page 767
def _get_cpu_info_from_ibm_pa_features():
'''
Returns the CPU info gathered from lsprop /proc/device-tree/cpus/*/ibm,pa-features
Returns {} if lsprop is not found or ibm,pa-features does not have the desired info.
'''
try:
# Just return {} if there is no lsprop
if not DataSource.has_ibm_pa_features():
return {}
# If ibm,pa-features fails return {}
returncode, output = DataSource.ibm_pa_features()
if output == None or returncode != 0:
return {}
# Filter out invalid characters from output
value = output.split("ibm,pa-features")[1].lower()
value = [s for s in value if s in list('0123456789abcfed')]
value = ''.join(value)
# Get data converted to Uint32 chunks
left = int(value[0 : 8], 16)
right = int(value[8 : 16], 16)
# Get the CPU flags
flags = {
# Byte 0
'mmu' : _is_bit_set(left, 0),
'fpu' : _is_bit_set(left, 1),
'slb' : _is_bit_set(left, 2),
'run' : _is_bit_set(left, 3),
#'reserved' : _is_bit_set(left, 4),
'dabr' : _is_bit_set(left, 5),
'ne' : _is_bit_set(left, 6),
'wtr' : _is_bit_set(left, 7),
# Byte 1
'mcr' : _is_bit_set(left, 8),
'dsisr' : _is_bit_set(left, 9),
'lp' : _is_bit_set(left, 10),
'ri' : _is_bit_set(left, 11),
'dabrx' : _is_bit_set(left, 12),
'sprg3' : _is_bit_set(left, 13),
'rislb' : _is_bit_set(left, 14),
'pp' : _is_bit_set(left, 15),
# Byte 2
'vpm' : _is_bit_set(left, 16),
'dss_2.05' : _is_bit_set(left, 17),
#'reserved' : _is_bit_set(left, 18),
'dar' : _is_bit_set(left, 19),
#'reserved' : _is_bit_set(left, 20),
'ppr' : _is_bit_set(left, 21),
'dss_2.02' : _is_bit_set(left, 22),
'dss_2.06' : _is_bit_set(left, 23),
# Byte 3
'lsd_in_dscr' : _is_bit_set(left, 24),
'ugr_in_dscr' : _is_bit_set(left, 25),
#'reserved' : _is_bit_set(left, 26),
#'reserved' : _is_bit_set(left, 27),
#'reserved' : _is_bit_set(left, 28),
#'reserved' : _is_bit_set(left, 29),
#'reserved' : _is_bit_set(left, 30),
#'reserved' : _is_bit_set(left, 31),
# Byte 4
'sso_2.06' : _is_bit_set(right, 0),
#'reserved' : _is_bit_set(right, 1),
#'reserved' : _is_bit_set(right, 2),
#'reserved' : _is_bit_set(right, 3),
#'reserved' : _is_bit_set(right, 4),
#'reserved' : _is_bit_set(right, 5),
#'reserved' : _is_bit_set(right, 6),
#'reserved' : _is_bit_set(right, 7),
# Byte 5
'le' : _is_bit_set(right, 8),
'cfar' : _is_bit_set(right, 9),
'eb' : _is_bit_set(right, 10),
'lsq_2.07' : _is_bit_set(right, 11),
#'reserved' : _is_bit_set(right, 12),
#'reserved' : _is_bit_set(right, 13),
#'reserved' : _is_bit_set(right, 14),
#'reserved' : _is_bit_set(right, 15),
# Byte 6
'dss_2.07' : _is_bit_set(right, 16),
#'reserved' : _is_bit_set(right, 17),
#'reserved' : _is_bit_set(right, 18),
#'reserved' : _is_bit_set(right, 19),
#'reserved' : _is_bit_set(right, 20),
#'reserved' : _is_bit_set(right, 21),
#'reserved' : _is_bit_set(right, 22),
#'reserved' : _is_bit_set(right, 23),
# Byte 7
#'reserved' : _is_bit_set(right, 24),
#'reserved' : _is_bit_set(right, 25),
#'reserved' : _is_bit_set(right, 26),
#'reserved' : _is_bit_set(right, 27),
#'reserved' : _is_bit_set(right, 28),
#'reserved' : _is_bit_set(right, 29),
#'reserved' : _is_bit_set(right, 30),
#'reserved' : _is_bit_set(right, 31),
}
# Get a list of only the flags that are true
flags = [k for k, v in flags.items() if v]
flags.sort()
info = {
'flags' : flags
}
info = {k: v for k, v in info.items() if v}
return info
except:
return {}
def _get_cpu_info_from_cat_var_run_dmesg_boot():
'''
Returns the CPU info gathered from /var/run/dmesg.boot.
Returns {} if dmesg is not found or does not have the desired info.
'''
# Just return {} if there is no /var/run/dmesg.boot
if not DataSource.has_var_run_dmesg_boot():
return {}
# If dmesg.boot fails return {}
returncode, output = DataSource.cat_var_run_dmesg_boot()
if output == None or returncode != 0:
return {}
return _parse_dmesg_output(output)
def _get_cpu_info_from_sysctl():
'''
Returns the CPU info gathered from sysctl.
Returns {} if sysctl is not found.
'''
try:
# Just return {} if there is no sysctl
if not DataSource.has_sysctl():
return {}
# If sysctl fails return {}
returncode, output = DataSource.sysctl_machdep_cpu_hw_cpufrequency()
if output == None or returncode != 0:
return {}
# Various fields
vendor_id = _get_field(False, output, None, None, 'machdep.cpu.vendor')
processor_brand = _get_field(True, output, None, None, 'machdep.cpu.brand_string')
cache_size = _get_field(False, output, None, None, 'machdep.cpu.cache.size')
stepping = _get_field(False, output, int, 0, 'machdep.cpu.stepping')
model = _get_field(False, output, int, 0, 'machdep.cpu.model')
family = _get_field(False, output, int, 0, 'machdep.cpu.family')
# Flags
flags = _get_field(False, output, None, '', 'machdep.cpu.features').lower().split()
flags.extend(_get_field(False, output, None, '', 'machdep.cpu.leaf7_features').lower().split())
flags.extend(_get_field(False, output, None, '', 'machdep.cpu.extfeatures').lower().split())
flags.sort()
# Convert from GHz/MHz string to Hz
hz_advertised, scale = _parse_cpu_brand_string(processor_brand)
hz_actual = _get_field(False, output, None, None, 'hw.cpufrequency')
hz_actual = _to_decimal_string(hz_actual)
info = {
'vendor_id_raw' : vendor_id,
'brand_raw' : processor_brand,
'hz_advertised_friendly' : _hz_short_to_friendly(hz_advertised, scale),
'hz_actual_friendly' : _hz_short_to_friendly(hz_actual, 0),
'hz_advertised' : _hz_short_to_full(hz_advertised, scale),
'hz_actual' : _hz_short_to_full(hz_actual, 0),
'l2_cache_size' : _to_friendly_bytes(cache_size),
'stepping' : stepping,
'model' : model,
'family' : family,
'flags' : flags
}
info = {k: v for k, v in info.items() if v}
return info
except:
return {}
def _get_cpu_info_from_sysinfo():
'''
Returns the CPU info gathered from sysinfo.
Returns {} if sysinfo is not found.
'''
info = _get_cpu_info_from_sysinfo_v1()
info.update(_get_cpu_info_from_sysinfo_v2())
return info
def _get_cpu_info_from_sysinfo_v1():
'''
Returns the CPU info gathered from sysinfo.
Returns {} if sysinfo is not found.
'''
try:
# Just return {} if there is no sysinfo
if not DataSource.has_sysinfo():
return {}
# If sysinfo fails return {}
returncode, output = DataSource.sysinfo_cpu()
if output == None or returncode != 0:
return {}
# Various fields
vendor_id = '' #_get_field(False, output, None, None, 'CPU #0: ')
processor_brand = output.split('CPU #0: "')[1].split('"\n')[0].strip()
cache_size = '' #_get_field(False, output, None, None, 'machdep.cpu.cache.size')
stepping = int(output.split(', stepping ')[1].split(',')[0].strip())
model = int(output.split(', model ')[1].split(',')[0].strip())
family = int(output.split(', family ')[1].split(',')[0].strip())
# Flags
flags = []
for line in output.split('\n'):
if line.startswith('\t\t'):
for flag in line.strip().lower().split():
flags.append(flag)
flags.sort()
# Convert from GHz/MHz string to Hz
hz_advertised, scale = _parse_cpu_brand_string(processor_brand)
hz_actual = hz_advertised
info = {
'vendor_id_raw' : vendor_id,
'brand_raw' : processor_brand,
'hz_advertised_friendly' : _hz_short_to_friendly(hz_advertised, scale),
'hz_actual_friendly' : _hz_short_to_friendly(hz_actual, scale),
'hz_advertised' : _hz_short_to_full(hz_advertised, scale),
'hz_actual' : _hz_short_to_full(hz_actual, scale),
'l2_cache_size' : _to_friendly_bytes(cache_size),
'stepping' : stepping,
'model' : model,
'family' : family,
'flags' : flags
}
info = {k: v for k, v in info.items() if v}
return info
except:
#raise # NOTE: To have this throw on error, uncomment this line
return {}
def _get_cpu_info_from_sysinfo_v2():
'''
Returns the CPU info gathered from sysinfo.
Returns {} if sysinfo is not found.
'''
try:
# Just return {} if there is no sysinfo
if not DataSource.has_sysinfo():
return {}
# If sysinfo fails return {}
returncode, output = DataSource.sysinfo_cpu()
if output == None or returncode != 0:
return {}
# Various fields
vendor_id = '' #_get_field(False, output, None, None, 'CPU #0: ')
processor_brand = output.split('CPU #0: "')[1].split('"\n')[0].strip()
cache_size = '' #_get_field(False, output, None, None, 'machdep.cpu.cache.size')
signature = output.split('Signature:')[1].split('\n')[0].strip()
#
stepping = int(signature.split('stepping ')[1].split(',')[0].strip())
model = int(signature.split('model ')[1].split(',')[0].strip())
family = int(signature.split('family ')[1].split(',')[0].strip())
# Flags
def get_subsection_flags(output):
retval = []
for line in output.split('\n')[1:]:
if not line.startswith(' ') and not line.startswith(' '): break
for entry in line.strip().lower().split(' '):
retval.append(entry)
return retval
flags = get_subsection_flags(output.split('Features: ')[1]) + \
get_subsection_flags(output.split('Extended Features (0x00000001): ')[1]) + \
get_subsection_flags(output.split('Extended Features (0x80000001): ')[1])
flags.sort()
# Convert from GHz/MHz string to Hz
lines = [n for n in output.split('\n') if n]
raw_hz = lines[0].split('running at ')[1].strip().lower()
hz_advertised = raw_hz.rstrip('mhz').rstrip('ghz').strip()
hz_advertised = _to_decimal_string(hz_advertised)
hz_actual = hz_advertised
scale = 0
if raw_hz.endswith('mhz'):
scale = 6
elif raw_hz.endswith('ghz'):
scale = 9
info = {
'vendor_id_raw' : vendor_id,
'brand_raw' : processor_brand,
'hz_advertised_friendly' : _hz_short_to_friendly(hz_advertised, scale),
'hz_actual_friendly' : _hz_short_to_friendly(hz_actual, scale),
'hz_advertised' : _hz_short_to_full(hz_advertised, scale),
'hz_actual' : _hz_short_to_full(hz_actual, scale),
'l2_cache_size' : _to_friendly_bytes(cache_size),
'stepping' : stepping,
'model' : model,
'family' : family,
'flags' : flags
}
info = {k: v for k, v in info.items() if v}
return info
except:
#raise # NOTE: To have this throw on error, uncomment this line
return {}
def _get_cpu_info_from_registry():
'''
FIXME: Is missing many of the newer CPU flags like sse3
Returns the CPU info gathered from the Windows Registry.
Returns {} if not on Windows.
'''
try:
# Just return {} if not on Windows
if not DataSource.is_windows:
return {}
# Get the CPU name
processor_brand = DataSource.winreg_processor_brand().strip()
# Get the CPU vendor id
vendor_id = DataSource.winreg_vendor_id_raw()
# Get the CPU arch and bits
arch_string_raw = DataSource.winreg_arch_string_raw()
arch, bits = _parse_arch(arch_string_raw)
# Get the actual CPU Hz
hz_actual = DataSource.winreg_hz_actual()
hz_actual = _to_decimal_string(hz_actual)
# Get the advertised CPU Hz
hz_advertised, scale = _parse_cpu_brand_string(processor_brand)
# If advertised hz not found, use the actual hz
if hz_advertised == '0.0':
scale = 6
hz_advertised = _to_decimal_string(hz_actual)
# Get the CPU features
feature_bits = DataSource.winreg_feature_bits()
def is_set(bit):
mask = 0x80000000 >> bit
retval = mask & feature_bits > 0
return retval
# http://en.wikipedia.org/wiki/CPUID
# http://unix.stackexchange.com/questions/43539/what-do-the-flags-in-proc-cpuinfo-mean
# http://www.lohninger.com/helpcsuite/public_constants_cpuid.htm
flags = {
'fpu' : is_set(0), # Floating Point Unit
'vme' : is_set(1), # V86 Mode Extensions
'de' : is_set(2), # Debug Extensions - I/O breakpoints supported
'pse' : is_set(3), # Page Size Extensions (4 MB pages supported)
'tsc' : is_set(4), # Time Stamp Counter and RDTSC instruction are available
'msr' : is_set(5), # Model Specific Registers
'pae' : is_set(6), # Physical Address Extensions (36 bit address, 2MB pages)
'mce' : is_set(7), # Machine Check Exception supported
'cx8' : is_set(8), # Compare Exchange Eight Byte instruction available
'apic' : is_set(9), # Local APIC present (multiprocessor operation support)
'sepamd' : is_set(10), # Fast system calls (AMD only)
'sep' : is_set(11), # Fast system calls
'mtrr' : is_set(12), # Memory Type Range Registers
'pge' : is_set(13), # Page Global Enable
'mca' : is_set(14), # Machine Check Architecture
'cmov' : is_set(15), # Conditional MOVe instructions
'pat' : is_set(16), # Page Attribute Table
'pse36' : is_set(17), # 36 bit Page Size Extensions
'serial' : is_set(18), # Processor Serial Number
'clflush' : is_set(19), # Cache Flush
#'reserved1' : is_set(20), # reserved
'dts' : is_set(21), # Debug Trace Store
'acpi' : is_set(22), # ACPI support
'mmx' : is_set(23), # MultiMedia Extensions
'fxsr' : is_set(24), # FXSAVE and FXRSTOR instructions
'sse' : is_set(25), # SSE instructions
'sse2' : is_set(26), # SSE2 (WNI) instructions
'ss' : is_set(27), # self snoop
#'reserved2' : is_set(28), # reserved
'tm' : is_set(29), # Automatic clock control
'ia64' : is_set(30), # IA64 instructions
'3dnow' : is_set(31) # 3DNow! instructions available
}
# Get a list of only the flags that are true
flags = [k for k, v in flags.items() if v]
flags.sort()
info = {
'vendor_id_raw' : vendor_id,
'brand_raw' : processor_brand,
'hz_advertised_friendly' : _hz_short_to_friendly(hz_advertised, scale),
'hz_actual_friendly' : _hz_short_to_friendly(hz_actual, 6),
'hz_advertised' : _hz_short_to_full(hz_advertised, scale),
'hz_actual' : _hz_short_to_full(hz_actual, 6),
'flags' : flags
}
info = {k: v for k, v in info.items() if v}
return info
except:
return {}
def _get_cpu_info_from_kstat():
'''
Returns the CPU info gathered from isainfo and kstat.
Returns {} if isainfo or kstat are not found.
'''
try:
# Just return {} if there is no isainfo or kstat
if not DataSource.has_isainfo() or not DataSource.has_kstat():
return {}
# If isainfo fails return {}
returncode, flag_output = DataSource.isainfo_vb()
if flag_output == None or returncode != 0:
return {}
# If kstat fails return {}
returncode, kstat = DataSource.kstat_m_cpu_info()
if kstat == None or returncode != 0:
return {}
# Various fields
vendor_id = kstat.split('\tvendor_id ')[1].split('\n')[0].strip()
processor_brand = kstat.split('\tbrand ')[1].split('\n')[0].strip()
stepping = int(kstat.split('\tstepping ')[1].split('\n')[0].strip())
model = int(kstat.split('\tmodel ')[1].split('\n')[0].strip())
family = int(kstat.split('\tfamily ')[1].split('\n')[0].strip())
# Flags
flags = flag_output.strip().split('\n')[-1].strip().lower().split()
flags.sort()
# Convert from GHz/MHz string to Hz
scale = 6
hz_advertised = kstat.split('\tclock_MHz ')[1].split('\n')[0].strip()
hz_advertised = _to_decimal_string(hz_advertised)
# Convert from GHz/MHz string to Hz
hz_actual = kstat.split('\tcurrent_clock_Hz ')[1].split('\n')[0].strip()
hz_actual = _to_decimal_string(hz_actual)
info = {
'vendor_id_raw' : vendor_id,
'brand_raw' : processor_brand,
'hz_advertised_friendly' : _hz_short_to_friendly(hz_advertised, scale),
'hz_actual_friendly' : _hz_short_to_friendly(hz_actual, 0),
'hz_advertised' : _hz_short_to_full(hz_advertised, scale),
'hz_actual' : _hz_short_to_full(hz_actual, 0),
'stepping' : stepping,
'model' : model,
'family' : family,
'flags' : flags
}
info = {k: v for k, v in info.items() if v}
return info
except:
return {}
def _get_cpu_info_from_platform_uname():
try:
uname = DataSource.uname_string_raw.split(',')[0]
family, model, stepping = (None, None, None)
entries = uname.split(' ')
if 'Family' in entries and entries.index('Family') < len(entries)-1:
i = entries.index('Family')
family = int(entries[i + 1])
if 'Model' in entries and entries.index('Model') < len(entries)-1:
i = entries.index('Model')
model = int(entries[i + 1])
if 'Stepping' in entries and entries.index('Stepping') < len(entries)-1:
i = entries.index('Stepping')
stepping = int(entries[i + 1])
info = {
'family' : family,
'model' : model,
'stepping' : stepping
}
info = {k: v for k, v in info.items() if v}
return info
except:
return {}
def _get_cpu_info_internal():
'''
Returns the CPU info by using the best sources of information for your OS.
Returns {} if nothing is found.
'''
# Get the CPU arch and bits
arch, bits = _parse_arch(DataSource.arch_string_raw)
friendly_maxsize = { 2**31-1: '32 bit', 2**63-1: '64 bit' }.get(sys.maxsize) or 'unknown bits'
friendly_version = "{0}.{1}.{2}.{3}.{4}".format(*sys.version_info)
PYTHON_VERSION = "{0} ({1})".format(friendly_version, friendly_maxsize)
info = {
'python_version' : PYTHON_VERSION,
'cpuinfo_version' : CPUINFO_VERSION,
'cpuinfo_version_string' : CPUINFO_VERSION_STRING,
'arch' : arch,
'bits' : bits,
'count' : DataSource.cpu_count,
'arch_string_raw' : DataSource.arch_string_raw,
}
# Try the Windows wmic
_copy_new_fields(info, _get_cpu_info_from_wmic())
# Try the Windows registry
_copy_new_fields(info, _get_cpu_info_from_registry())
# Try /proc/cpuinfo
_copy_new_fields(info, _get_cpu_info_from_proc_cpuinfo())
# Try cpufreq-info
_copy_new_fields(info, _get_cpu_info_from_cpufreq_info())
# Try LSCPU
_copy_new_fields(info, _get_cpu_info_from_lscpu())
# Try sysctl
_copy_new_fields(info, _get_cpu_info_from_sysctl())
# Try kstat
_copy_new_fields(info, _get_cpu_info_from_kstat())
# Try dmesg
_copy_new_fields(info, _get_cpu_info_from_dmesg())
# Try /var/run/dmesg.boot
_copy_new_fields(info, _get_cpu_info_from_cat_var_run_dmesg_boot())
# Try lsprop ibm,pa-features
_copy_new_fields(info, _get_cpu_info_from_ibm_pa_features())
# Try sysinfo
_copy_new_fields(info, _get_cpu_info_from_sysinfo())
# Try querying the CPU cpuid register
_copy_new_fields(info, _get_cpu_info_from_cpuid())
# Try platform.uname
_copy_new_fields(info, _get_cpu_info_from_platform_uname())
return info
def get_cpu_info_json():
'''
Returns the CPU info by using the best sources of information for your OS.
Returns the result in a json string
'''
import json
output = None
# If running under pyinstaller, run normally
if getattr(sys, 'frozen', False):
info = _get_cpu_info_internal()
output = json.dumps(info)
output = "{0}".format(output)
# if not running under pyinstaller, run in another process.
# This is done because multiprocesing has a design flaw that
# causes non main programs to run multiple times on Windows.
else:
from subprocess import Popen, PIPE
command = [sys.executable, __file__, '--json']
p1 = Popen(command, stdout=PIPE, stderr=PIPE, stdin=PIPE)
output = p1.communicate()[0]
if p1.returncode != 0:
return "{}"
if not IS_PY2:
output = output.decode(encoding='UTF-8')
return output
def get_cpu_info():
'''
Returns the CPU info by using the best sources of information for your OS.
Returns the result in a dict
'''
import json
output = get_cpu_info_json()
# Convert JSON to Python with non unicode strings
output = json.loads(output, object_hook = _utf_to_str)
return output
def main():
from argparse import ArgumentParser
import json
# Parse args
parser = ArgumentParser(description='Gets CPU info with pure Python 2 & 3')
parser.add_argument('--json', action='store_true', help='Return the info in JSON format')
parser.add_argument('--version', action='store_true', help='Return the version of py-cpuinfo')
args = parser.parse_args()
try:
_check_arch()
except Exception as err:
sys.stderr.write(str(err) + "\n")
sys.exit(1)
info = _get_cpu_info_internal()
if not info:
sys.stderr.write("Failed to find cpu info\n")
sys.exit(1)
if args.json:
print(json.dumps(info))
elif args.version:
print(CPUINFO_VERSION_STRING)
else:
print('Python Version: {0}'.format(info.get('python_version', '')))
print('Cpuinfo Version: {0}'.format(info.get('cpuinfo_version_string', '')))
print('Vendor ID Raw: {0}'.format(info.get('vendor_id_raw', '')))
print('Hardware Raw: {0}'.format(info.get('hardware_raw', '')))
print('Brand Raw: {0}'.format(info.get('brand_raw', '')))
print('Hz Advertised Friendly: {0}'.format(info.get('hz_advertised_friendly', '')))
print('Hz Actual Friendly: {0}'.format(info.get('hz_actual_friendly', '')))
print('Hz Advertised: {0}'.format(info.get('hz_advertised', '')))
print('Hz Actual: {0}'.format(info.get('hz_actual', '')))
print('Arch: {0}'.format(info.get('arch', '')))
print('Bits: {0}'.format(info.get('bits', '')))
print('Count: {0}'.format(info.get('count', '')))
print('Arch String Raw: {0}'.format(info.get('arch_string_raw', '')))
print('L1 Data Cache Size: {0}'.format(info.get('l1_data_cache_size', '')))
print('L1 Instruction Cache Size: {0}'.format(info.get('l1_instruction_cache_size', '')))
print('L2 Cache Size: {0}'.format(info.get('l2_cache_size', '')))
print('L2 Cache Line Size: {0}'.format(info.get('l2_cache_line_size', '')))
print('L2 Cache Associativity: {0}'.format(info.get('l2_cache_associativity', '')))
print('L3 Cache Size: {0}'.format(info.get('l3_cache_size', '')))
print('Stepping: {0}'.format(info.get('stepping', '')))
print('Model: {0}'.format(info.get('model', '')))
print('Family: {0}'.format(info.get('family', '')))
print('Processor Type: {0}'.format(info.get('processor_type', '')))
print('Extended Model: {0}'.format(info.get('extended_model', '')))
print('Extended Family: {0}'.format(info.get('extended_family', '')))
print('Flags: {0}'.format(', '.join(info.get('flags', ''))))
if __name__ == '__main__':
main()
else:
_check_arch()
|
workhorsy/py-cpuinfo
|
cpuinfo/cpuinfo.py
|
_get_cpu_info_from_registry
|
python
|
def _get_cpu_info_from_registry():
'''
FIXME: Is missing many of the newer CPU flags like sse3
Returns the CPU info gathered from the Windows Registry.
Returns {} if not on Windows.
'''
try:
# Just return {} if not on Windows
if not DataSource.is_windows:
return {}
# Get the CPU name
processor_brand = DataSource.winreg_processor_brand().strip()
# Get the CPU vendor id
vendor_id = DataSource.winreg_vendor_id_raw()
# Get the CPU arch and bits
arch_string_raw = DataSource.winreg_arch_string_raw()
arch, bits = _parse_arch(arch_string_raw)
# Get the actual CPU Hz
hz_actual = DataSource.winreg_hz_actual()
hz_actual = _to_decimal_string(hz_actual)
# Get the advertised CPU Hz
hz_advertised, scale = _parse_cpu_brand_string(processor_brand)
# If advertised hz not found, use the actual hz
if hz_advertised == '0.0':
scale = 6
hz_advertised = _to_decimal_string(hz_actual)
# Get the CPU features
feature_bits = DataSource.winreg_feature_bits()
def is_set(bit):
mask = 0x80000000 >> bit
retval = mask & feature_bits > 0
return retval
# http://en.wikipedia.org/wiki/CPUID
# http://unix.stackexchange.com/questions/43539/what-do-the-flags-in-proc-cpuinfo-mean
# http://www.lohninger.com/helpcsuite/public_constants_cpuid.htm
flags = {
'fpu' : is_set(0), # Floating Point Unit
'vme' : is_set(1), # V86 Mode Extensions
'de' : is_set(2), # Debug Extensions - I/O breakpoints supported
'pse' : is_set(3), # Page Size Extensions (4 MB pages supported)
'tsc' : is_set(4), # Time Stamp Counter and RDTSC instruction are available
'msr' : is_set(5), # Model Specific Registers
'pae' : is_set(6), # Physical Address Extensions (36 bit address, 2MB pages)
'mce' : is_set(7), # Machine Check Exception supported
'cx8' : is_set(8), # Compare Exchange Eight Byte instruction available
'apic' : is_set(9), # Local APIC present (multiprocessor operation support)
'sepamd' : is_set(10), # Fast system calls (AMD only)
'sep' : is_set(11), # Fast system calls
'mtrr' : is_set(12), # Memory Type Range Registers
'pge' : is_set(13), # Page Global Enable
'mca' : is_set(14), # Machine Check Architecture
'cmov' : is_set(15), # Conditional MOVe instructions
'pat' : is_set(16), # Page Attribute Table
'pse36' : is_set(17), # 36 bit Page Size Extensions
'serial' : is_set(18), # Processor Serial Number
'clflush' : is_set(19), # Cache Flush
#'reserved1' : is_set(20), # reserved
'dts' : is_set(21), # Debug Trace Store
'acpi' : is_set(22), # ACPI support
'mmx' : is_set(23), # MultiMedia Extensions
'fxsr' : is_set(24), # FXSAVE and FXRSTOR instructions
'sse' : is_set(25), # SSE instructions
'sse2' : is_set(26), # SSE2 (WNI) instructions
'ss' : is_set(27), # self snoop
#'reserved2' : is_set(28), # reserved
'tm' : is_set(29), # Automatic clock control
'ia64' : is_set(30), # IA64 instructions
'3dnow' : is_set(31) # 3DNow! instructions available
}
# Get a list of only the flags that are true
flags = [k for k, v in flags.items() if v]
flags.sort()
info = {
'vendor_id_raw' : vendor_id,
'brand_raw' : processor_brand,
'hz_advertised_friendly' : _hz_short_to_friendly(hz_advertised, scale),
'hz_actual_friendly' : _hz_short_to_friendly(hz_actual, 6),
'hz_advertised' : _hz_short_to_full(hz_advertised, scale),
'hz_actual' : _hz_short_to_full(hz_actual, 6),
'flags' : flags
}
info = {k: v for k, v in info.items() if v}
return info
except:
return {}
|
FIXME: Is missing many of the newer CPU flags like sse3
Returns the CPU info gathered from the Windows Registry.
Returns {} if not on Windows.
|
train
|
https://github.com/workhorsy/py-cpuinfo/blob/c15afb770c1139bf76215852e17eb4f677ca3d2f/cpuinfo/cpuinfo.py#L2022-L2120
|
[
"def _to_decimal_string(ticks):\n\ttry:\n\t\t# Convert to string\n\t\tticks = '{0}'.format(ticks)\n\n\t\t# Strip off non numbers and decimal places\n\t\tticks = \"\".join(n for n in ticks if n.isdigit() or n=='.').strip()\n\t\tif ticks == '':\n\t\t\tticks = '0'\n\n\t\t# Add decimal if missing\n\t\tif '.' not in ticks:\n\t\t\tticks = '{0}.0'.format(ticks)\n\n\t\t# Remove trailing zeros\n\t\tticks = ticks.rstrip('0')\n\n\t\t# Add one trailing zero for empty right side\n\t\tif ticks.endswith('.'):\n\t\t\tticks = '{0}0'.format(ticks)\n\n\t\t# Make sure the number can be converted to a float\n\t\tticks = float(ticks)\n\t\tticks = '{0}'.format(ticks)\n\t\treturn ticks\n\texcept:\n\t\treturn '0.0'\n",
"def _parse_arch(arch_string_raw):\n\timport re\n\n\tarch, bits = None, None\n\tarch_string_raw = arch_string_raw.lower()\n\n\t# X86\n\tif re.match('^i\\d86$|^x86$|^x86_32$|^i86pc$|^ia32$|^ia-32$|^bepc$', arch_string_raw):\n\t\tarch = 'X86_32'\n\t\tbits = 32\n\telif re.match('^x64$|^x86_64$|^x86_64t$|^i686-64$|^amd64$|^ia64$|^ia-64$', arch_string_raw):\n\t\tarch = 'X86_64'\n\t\tbits = 64\n\t# ARM\n\telif re.match('^armv8-a|aarch64$', arch_string_raw):\n\t\tarch = 'ARM_8'\n\t\tbits = 64\n\telif re.match('^armv7$|^armv7[a-z]$|^armv7-[a-z]$|^armv6[a-z]$', arch_string_raw):\n\t\tarch = 'ARM_7'\n\t\tbits = 32\n\telif re.match('^armv8$|^armv8[a-z]$|^armv8-[a-z]$', arch_string_raw):\n\t\tarch = 'ARM_8'\n\t\tbits = 32\n\t# PPC\n\telif re.match('^ppc32$|^prep$|^pmac$|^powermac$', arch_string_raw):\n\t\tarch = 'PPC_32'\n\t\tbits = 32\n\telif re.match('^powerpc$|^ppc64$|^ppc64le$', arch_string_raw):\n\t\tarch = 'PPC_64'\n\t\tbits = 64\n\t# SPARC\n\telif re.match('^sparc32$|^sparc$', arch_string_raw):\n\t\tarch = 'SPARC_32'\n\t\tbits = 32\n\telif re.match('^sparc64$|^sun4u$|^sun4v$', arch_string_raw):\n\t\tarch = 'SPARC_64'\n\t\tbits = 64\n\n\treturn (arch, bits)\n",
"def _hz_short_to_full(ticks, scale):\n\ttry:\n\t\t# Make sure the number can be converted to a float\n\t\tticks = float(ticks)\n\t\tticks = '{0}'.format(ticks)\n\n\t\t# Scale the numbers\n\t\thz = ticks.lstrip('0')\n\t\told_index = hz.index('.')\n\t\thz = hz.replace('.', '')\n\t\thz = hz.ljust(scale + old_index+1, '0')\n\t\tnew_index = old_index + scale\n\t\thz = '{0}.{1}'.format(hz[:new_index], hz[new_index:])\n\t\tleft, right = hz.split('.')\n\t\tleft, right = int(left), int(right)\n\t\treturn (left, right)\n\texcept:\n\t\treturn (0, 0)\n",
"def _hz_short_to_friendly(ticks, scale):\n\ttry:\n\t\t# Get the raw Hz as a string\n\t\tleft, right = _hz_short_to_full(ticks, scale)\n\t\tresult = '{0}.{1}'.format(left, right)\n\n\t\t# Get the location of the dot, and remove said dot\n\t\tdot_index = result.index('.')\n\t\tresult = result.replace('.', '')\n\n\t\t# Get the Hz symbol and scale\n\t\tsymbol = \"Hz\"\n\t\tscale = 0\n\t\tif dot_index > 9:\n\t\t\tsymbol = \"GHz\"\n\t\t\tscale = 9\n\t\telif dot_index > 6:\n\t\t\tsymbol = \"MHz\"\n\t\t\tscale = 6\n\t\telif dot_index > 3:\n\t\t\tsymbol = \"KHz\"\n\t\t\tscale = 3\n\n\t\t# Get the Hz with the dot at the new scaled point\n\t\tresult = '{0}.{1}'.format(result[:-scale-1], result[-scale-1:])\n\n\t\t# Format the ticks to have 4 numbers after the decimal\n\t\t# and remove any superfluous zeroes.\n\t\tresult = '{0:.4f} {1}'.format(float(result), symbol)\n\t\tresult = result.rstrip('0')\n\t\treturn result\n\texcept:\n\t\treturn '0.0000 Hz'\n",
"def _parse_cpu_brand_string(cpu_string):\n\t# Just return 0 if the processor brand does not have the Hz\n\tif not 'hz' in cpu_string.lower():\n\t\treturn ('0.0', 0)\n\n\thz = cpu_string.lower()\n\tscale = 0\n\n\tif hz.endswith('mhz'):\n\t\tscale = 6\n\telif hz.endswith('ghz'):\n\t\tscale = 9\n\tif '@' in hz:\n\t\thz = hz.split('@')[1]\n\telse:\n\t\thz = hz.rsplit(None, 1)[1]\n\n\thz = hz.rstrip('mhz').rstrip('ghz').strip()\n\thz = _to_decimal_string(hz)\n\n\treturn (hz, scale)\n",
"def winreg_processor_brand():\n\tkey = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, r\"Hardware\\Description\\System\\CentralProcessor\\0\")\n\tprocessor_brand = winreg.QueryValueEx(key, \"ProcessorNameString\")[0]\n\twinreg.CloseKey(key)\n\treturn processor_brand.strip()\n",
"def winreg_vendor_id_raw():\n\tkey = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, r\"Hardware\\Description\\System\\CentralProcessor\\0\")\n\tvendor_id_raw = winreg.QueryValueEx(key, \"VendorIdentifier\")[0]\n\twinreg.CloseKey(key)\n\treturn vendor_id_raw\n",
"def winreg_arch_string_raw():\n\tkey = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, r\"SYSTEM\\CurrentControlSet\\Control\\Session Manager\\Environment\")\n\tarch_string_raw = winreg.QueryValueEx(key, \"PROCESSOR_ARCHITECTURE\")[0]\n\twinreg.CloseKey(key)\n\treturn arch_string_raw\n",
"def winreg_hz_actual():\n\tkey = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, r\"Hardware\\Description\\System\\CentralProcessor\\0\")\n\thz_actual = winreg.QueryValueEx(key, \"~Mhz\")[0]\n\twinreg.CloseKey(key)\n\thz_actual = _to_decimal_string(hz_actual)\n\treturn hz_actual\n",
"def winreg_feature_bits():\n\tkey = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, r\"Hardware\\Description\\System\\CentralProcessor\\0\")\n\tfeature_bits = winreg.QueryValueEx(key, \"FeatureSet\")[0]\n\twinreg.CloseKey(key)\n\treturn feature_bits\n",
"def is_set(bit):\n\tmask = 0x80000000 >> bit\n\tretval = mask & feature_bits > 0\n\treturn retval\n"
] |
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
# Copyright (c) 2014-2019, Matthew Brennan Jones <matthew.brennan.jones@gmail.com>
# Py-cpuinfo gets CPU info with pure Python 2 & 3
# It uses the MIT License
# It is hosted at: https://github.com/workhorsy/py-cpuinfo
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
CPUINFO_VERSION = (5, 0, 0)
CPUINFO_VERSION_STRING = '.'.join([str(n) for n in CPUINFO_VERSION])
import os, sys
import platform
import multiprocessing
import ctypes
try:
import _winreg as winreg
except ImportError as err:
try:
import winreg
except ImportError as err:
pass
IS_PY2 = sys.version_info[0] == 2
class DataSource(object):
bits = platform.architecture()[0]
cpu_count = multiprocessing.cpu_count()
is_windows = platform.system().lower() == 'windows'
arch_string_raw = platform.machine()
uname_string_raw = platform.uname()[5]
can_cpuid = True
@staticmethod
def has_proc_cpuinfo():
return os.path.exists('/proc/cpuinfo')
@staticmethod
def has_dmesg():
return len(_program_paths('dmesg')) > 0
@staticmethod
def has_var_run_dmesg_boot():
uname = platform.system().strip().strip('"').strip("'").strip().lower()
return 'linux' in uname and os.path.exists('/var/run/dmesg.boot')
@staticmethod
def has_cpufreq_info():
return len(_program_paths('cpufreq-info')) > 0
@staticmethod
def has_sestatus():
return len(_program_paths('sestatus')) > 0
@staticmethod
def has_sysctl():
return len(_program_paths('sysctl')) > 0
@staticmethod
def has_isainfo():
return len(_program_paths('isainfo')) > 0
@staticmethod
def has_kstat():
return len(_program_paths('kstat')) > 0
@staticmethod
def has_sysinfo():
return len(_program_paths('sysinfo')) > 0
@staticmethod
def has_lscpu():
return len(_program_paths('lscpu')) > 0
@staticmethod
def has_ibm_pa_features():
return len(_program_paths('lsprop')) > 0
@staticmethod
def has_wmic():
returncode, output = _run_and_get_stdout(['wmic', 'os', 'get', 'Version'])
return returncode == 0 and len(output) > 0
@staticmethod
def cat_proc_cpuinfo():
return _run_and_get_stdout(['cat', '/proc/cpuinfo'])
@staticmethod
def cpufreq_info():
return _run_and_get_stdout(['cpufreq-info'])
@staticmethod
def sestatus_b():
return _run_and_get_stdout(['sestatus', '-b'])
@staticmethod
def dmesg_a():
return _run_and_get_stdout(['dmesg', '-a'])
@staticmethod
def cat_var_run_dmesg_boot():
return _run_and_get_stdout(['cat', '/var/run/dmesg.boot'])
@staticmethod
def sysctl_machdep_cpu_hw_cpufrequency():
return _run_and_get_stdout(['sysctl', 'machdep.cpu', 'hw.cpufrequency'])
@staticmethod
def isainfo_vb():
return _run_and_get_stdout(['isainfo', '-vb'])
@staticmethod
def kstat_m_cpu_info():
return _run_and_get_stdout(['kstat', '-m', 'cpu_info'])
@staticmethod
def sysinfo_cpu():
return _run_and_get_stdout(['sysinfo', '-cpu'])
@staticmethod
def lscpu():
return _run_and_get_stdout(['lscpu'])
@staticmethod
def ibm_pa_features():
import glob
ibm_features = glob.glob('/proc/device-tree/cpus/*/ibm,pa-features')
if ibm_features:
return _run_and_get_stdout(['lsprop', ibm_features[0]])
@staticmethod
def wmic_cpu():
return _run_and_get_stdout(['wmic', 'cpu', 'get', 'Name,CurrentClockSpeed,L2CacheSize,L3CacheSize,Description,Caption,Manufacturer', '/format:list'])
@staticmethod
def winreg_processor_brand():
key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, r"Hardware\Description\System\CentralProcessor\0")
processor_brand = winreg.QueryValueEx(key, "ProcessorNameString")[0]
winreg.CloseKey(key)
return processor_brand.strip()
@staticmethod
def winreg_vendor_id_raw():
key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, r"Hardware\Description\System\CentralProcessor\0")
vendor_id_raw = winreg.QueryValueEx(key, "VendorIdentifier")[0]
winreg.CloseKey(key)
return vendor_id_raw
@staticmethod
def winreg_arch_string_raw():
key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, r"SYSTEM\CurrentControlSet\Control\Session Manager\Environment")
arch_string_raw = winreg.QueryValueEx(key, "PROCESSOR_ARCHITECTURE")[0]
winreg.CloseKey(key)
return arch_string_raw
@staticmethod
def winreg_hz_actual():
key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, r"Hardware\Description\System\CentralProcessor\0")
hz_actual = winreg.QueryValueEx(key, "~Mhz")[0]
winreg.CloseKey(key)
hz_actual = _to_decimal_string(hz_actual)
return hz_actual
@staticmethod
def winreg_feature_bits():
key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, r"Hardware\Description\System\CentralProcessor\0")
feature_bits = winreg.QueryValueEx(key, "FeatureSet")[0]
winreg.CloseKey(key)
return feature_bits
def _program_paths(program_name):
paths = []
exts = filter(None, os.environ.get('PATHEXT', '').split(os.pathsep))
path = os.environ['PATH']
for p in os.environ['PATH'].split(os.pathsep):
p = os.path.join(p, program_name)
if os.access(p, os.X_OK):
paths.append(p)
for e in exts:
pext = p + e
if os.access(pext, os.X_OK):
paths.append(pext)
return paths
def _run_and_get_stdout(command, pipe_command=None):
from subprocess import Popen, PIPE
if not pipe_command:
p1 = Popen(command, stdout=PIPE, stderr=PIPE, stdin=PIPE)
output = p1.communicate()[0]
if not IS_PY2:
output = output.decode(encoding='UTF-8')
return p1.returncode, output
else:
p1 = Popen(command, stdout=PIPE, stderr=PIPE, stdin=PIPE)
p2 = Popen(pipe_command, stdin=p1.stdout, stdout=PIPE, stderr=PIPE)
p1.stdout.close()
output = p2.communicate()[0]
if not IS_PY2:
output = output.decode(encoding='UTF-8')
return p2.returncode, output
# Make sure we are running on a supported system
def _check_arch():
arch, bits = _parse_arch(DataSource.arch_string_raw)
if not arch in ['X86_32', 'X86_64', 'ARM_7', 'ARM_8', 'PPC_64']:
raise Exception("py-cpuinfo currently only works on X86 and some PPC and ARM CPUs.")
def _obj_to_b64(thing):
import pickle
import base64
a = thing
b = pickle.dumps(a)
c = base64.b64encode(b)
d = c.decode('utf8')
return d
def _b64_to_obj(thing):
import pickle
import base64
try:
a = base64.b64decode(thing)
b = pickle.loads(a)
return b
except:
return {}
def _utf_to_str(input):
if IS_PY2 and isinstance(input, unicode):
return input.encode('utf-8')
elif isinstance(input, list):
return [_utf_to_str(element) for element in input]
elif isinstance(input, dict):
return {_utf_to_str(key): _utf_to_str(value)
for key, value in input.items()}
else:
return input
def _copy_new_fields(info, new_info):
keys = [
'vendor_id_raw', 'hardware_raw', 'brand_raw', 'hz_advertised_friendly', 'hz_actual_friendly',
'hz_advertised', 'hz_actual', 'arch', 'bits', 'count',
'arch_string_raw', 'uname_string_raw',
'l2_cache_size', 'l2_cache_line_size', 'l2_cache_associativity',
'stepping', 'model', 'family',
'processor_type', 'extended_model', 'extended_family', 'flags',
'l3_cache_size', 'l1_data_cache_size', 'l1_instruction_cache_size'
]
for key in keys:
if new_info.get(key, None) and not info.get(key, None):
info[key] = new_info[key]
elif key == 'flags' and new_info.get('flags'):
for f in new_info['flags']:
if f not in info['flags']: info['flags'].append(f)
info['flags'].sort()
def _get_field_actual(cant_be_number, raw_string, field_names):
for line in raw_string.splitlines():
for field_name in field_names:
field_name = field_name.lower()
if ':' in line:
left, right = line.split(':', 1)
left = left.strip().lower()
right = right.strip()
if left == field_name and len(right) > 0:
if cant_be_number:
if not right.isdigit():
return right
else:
return right
return None
def _get_field(cant_be_number, raw_string, convert_to, default_value, *field_names):
retval = _get_field_actual(cant_be_number, raw_string, field_names)
# Convert the return value
if retval and convert_to:
try:
retval = convert_to(retval)
except:
retval = default_value
# Return the default if there is no return value
if retval is None:
retval = default_value
return retval
def _to_decimal_string(ticks):
try:
# Convert to string
ticks = '{0}'.format(ticks)
# Strip off non numbers and decimal places
ticks = "".join(n for n in ticks if n.isdigit() or n=='.').strip()
if ticks == '':
ticks = '0'
# Add decimal if missing
if '.' not in ticks:
ticks = '{0}.0'.format(ticks)
# Remove trailing zeros
ticks = ticks.rstrip('0')
# Add one trailing zero for empty right side
if ticks.endswith('.'):
ticks = '{0}0'.format(ticks)
# Make sure the number can be converted to a float
ticks = float(ticks)
ticks = '{0}'.format(ticks)
return ticks
except:
return '0.0'
def _hz_short_to_full(ticks, scale):
try:
# Make sure the number can be converted to a float
ticks = float(ticks)
ticks = '{0}'.format(ticks)
# Scale the numbers
hz = ticks.lstrip('0')
old_index = hz.index('.')
hz = hz.replace('.', '')
hz = hz.ljust(scale + old_index+1, '0')
new_index = old_index + scale
hz = '{0}.{1}'.format(hz[:new_index], hz[new_index:])
left, right = hz.split('.')
left, right = int(left), int(right)
return (left, right)
except:
return (0, 0)
def _hz_friendly_to_full(hz_string):
try:
hz_string = hz_string.strip().lower()
hz, scale = (None, None)
if hz_string.endswith('ghz'):
scale = 9
elif hz_string.endswith('mhz'):
scale = 6
elif hz_string.endswith('hz'):
scale = 0
hz = "".join(n for n in hz_string if n.isdigit() or n=='.').strip()
if not '.' in hz:
hz += '.0'
hz, scale = _hz_short_to_full(hz, scale)
return (hz, scale)
except:
return (0, 0)
def _hz_short_to_friendly(ticks, scale):
try:
# Get the raw Hz as a string
left, right = _hz_short_to_full(ticks, scale)
result = '{0}.{1}'.format(left, right)
# Get the location of the dot, and remove said dot
dot_index = result.index('.')
result = result.replace('.', '')
# Get the Hz symbol and scale
symbol = "Hz"
scale = 0
if dot_index > 9:
symbol = "GHz"
scale = 9
elif dot_index > 6:
symbol = "MHz"
scale = 6
elif dot_index > 3:
symbol = "KHz"
scale = 3
# Get the Hz with the dot at the new scaled point
result = '{0}.{1}'.format(result[:-scale-1], result[-scale-1:])
# Format the ticks to have 4 numbers after the decimal
# and remove any superfluous zeroes.
result = '{0:.4f} {1}'.format(float(result), symbol)
result = result.rstrip('0')
return result
except:
return '0.0000 Hz'
def _to_friendly_bytes(input):
import re
if not input:
return input
input = "{0}".format(input)
formats = {
r"^[0-9]+B$" : 'B',
r"^[0-9]+K$" : 'KB',
r"^[0-9]+M$" : 'MB',
r"^[0-9]+G$" : 'GB'
}
for pattern, friendly_size in formats.items():
if re.match(pattern, input):
return "{0} {1}".format(input[ : -1].strip(), friendly_size)
return input
def _parse_cpu_brand_string(cpu_string):
# Just return 0 if the processor brand does not have the Hz
if not 'hz' in cpu_string.lower():
return ('0.0', 0)
hz = cpu_string.lower()
scale = 0
if hz.endswith('mhz'):
scale = 6
elif hz.endswith('ghz'):
scale = 9
if '@' in hz:
hz = hz.split('@')[1]
else:
hz = hz.rsplit(None, 1)[1]
hz = hz.rstrip('mhz').rstrip('ghz').strip()
hz = _to_decimal_string(hz)
return (hz, scale)
def _parse_cpu_brand_string_dx(cpu_string):
import re
# Find all the strings inside brackets ()
starts = [m.start() for m in re.finditer('\(', cpu_string)]
ends = [m.start() for m in re.finditer('\)', cpu_string)]
insides = {k: v for k, v in zip(starts, ends)}
insides = [cpu_string[start+1 : end] for start, end in insides.items()]
# Find all the fields
vendor_id, stepping, model, family = (None, None, None, None)
for inside in insides:
for pair in inside.split(','):
pair = [n.strip() for n in pair.split(':')]
if len(pair) > 1:
name, value = pair[0], pair[1]
if name == 'origin':
vendor_id = value.strip('"')
elif name == 'stepping':
stepping = int(value.lstrip('0x'), 16)
elif name == 'model':
model = int(value.lstrip('0x'), 16)
elif name in ['fam', 'family']:
family = int(value.lstrip('0x'), 16)
# Find the Processor Brand
# Strip off extra strings in brackets at end
brand = cpu_string.strip()
is_working = True
while is_working:
is_working = False
for inside in insides:
full = "({0})".format(inside)
if brand.endswith(full):
brand = brand[ :-len(full)].strip()
is_working = True
# Find the Hz in the brand string
hz_brand, scale = _parse_cpu_brand_string(brand)
# Find Hz inside brackets () after the brand string
if hz_brand == '0.0':
for inside in insides:
hz = inside
for entry in ['GHz', 'MHz', 'Hz']:
if entry in hz:
hz = "CPU @ " + hz[ : hz.find(entry) + len(entry)]
hz_brand, scale = _parse_cpu_brand_string(hz)
break
return (hz_brand, scale, brand, vendor_id, stepping, model, family)
def _parse_dmesg_output(output):
try:
# Get all the dmesg lines that might contain a CPU string
lines = output.split(' CPU0:')[1:] + \
output.split(' CPU1:')[1:] + \
output.split(' CPU:')[1:] + \
output.split('\nCPU0:')[1:] + \
output.split('\nCPU1:')[1:] + \
output.split('\nCPU:')[1:]
lines = [l.split('\n')[0].strip() for l in lines]
# Convert the lines to CPU strings
cpu_strings = [_parse_cpu_brand_string_dx(l) for l in lines]
# Find the CPU string that has the most fields
best_string = None
highest_count = 0
for cpu_string in cpu_strings:
count = sum([n is not None for n in cpu_string])
if count > highest_count:
highest_count = count
best_string = cpu_string
# If no CPU string was found, return {}
if not best_string:
return {}
hz_actual, scale, processor_brand, vendor_id, stepping, model, family = best_string
# Origin
if ' Origin=' in output:
fields = output[output.find(' Origin=') : ].split('\n')[0]
fields = fields.strip().split()
fields = [n.strip().split('=') for n in fields]
fields = [{n[0].strip().lower() : n[1].strip()} for n in fields]
for field in fields:
name = list(field.keys())[0]
value = list(field.values())[0]
if name == 'origin':
vendor_id = value.strip('"')
elif name == 'stepping':
stepping = int(value.lstrip('0x'), 16)
elif name == 'model':
model = int(value.lstrip('0x'), 16)
elif name in ['fam', 'family']:
family = int(value.lstrip('0x'), 16)
# Features
flag_lines = []
for category in [' Features=', ' Features2=', ' AMD Features=', ' AMD Features2=']:
if category in output:
flag_lines.append(output.split(category)[1].split('\n')[0])
flags = []
for line in flag_lines:
line = line.split('<')[1].split('>')[0].lower()
for flag in line.split(','):
flags.append(flag)
flags.sort()
# Convert from GHz/MHz string to Hz
hz_advertised, scale = _parse_cpu_brand_string(processor_brand)
# If advertised hz not found, use the actual hz
if hz_advertised == '0.0':
scale = 6
hz_advertised = _to_decimal_string(hz_actual)
info = {
'vendor_id_raw' : vendor_id,
'brand_raw' : processor_brand,
'stepping' : stepping,
'model' : model,
'family' : family,
'flags' : flags
}
if hz_advertised and hz_advertised != '0.0':
info['hz_advertised_friendly'] = _hz_short_to_friendly(hz_advertised, scale)
info['hz_actual_friendly'] = _hz_short_to_friendly(hz_actual, scale)
if hz_advertised and hz_advertised != '0.0':
info['hz_advertised'] = _hz_short_to_full(hz_advertised, scale)
info['hz_actual'] = _hz_short_to_full(hz_actual, scale)
return {k: v for k, v in info.items() if v}
except:
#raise
pass
return {}
def _parse_arch(arch_string_raw):
import re
arch, bits = None, None
arch_string_raw = arch_string_raw.lower()
# X86
if re.match('^i\d86$|^x86$|^x86_32$|^i86pc$|^ia32$|^ia-32$|^bepc$', arch_string_raw):
arch = 'X86_32'
bits = 32
elif re.match('^x64$|^x86_64$|^x86_64t$|^i686-64$|^amd64$|^ia64$|^ia-64$', arch_string_raw):
arch = 'X86_64'
bits = 64
# ARM
elif re.match('^armv8-a|aarch64$', arch_string_raw):
arch = 'ARM_8'
bits = 64
elif re.match('^armv7$|^armv7[a-z]$|^armv7-[a-z]$|^armv6[a-z]$', arch_string_raw):
arch = 'ARM_7'
bits = 32
elif re.match('^armv8$|^armv8[a-z]$|^armv8-[a-z]$', arch_string_raw):
arch = 'ARM_8'
bits = 32
# PPC
elif re.match('^ppc32$|^prep$|^pmac$|^powermac$', arch_string_raw):
arch = 'PPC_32'
bits = 32
elif re.match('^powerpc$|^ppc64$|^ppc64le$', arch_string_raw):
arch = 'PPC_64'
bits = 64
# SPARC
elif re.match('^sparc32$|^sparc$', arch_string_raw):
arch = 'SPARC_32'
bits = 32
elif re.match('^sparc64$|^sun4u$|^sun4v$', arch_string_raw):
arch = 'SPARC_64'
bits = 64
return (arch, bits)
def _is_bit_set(reg, bit):
mask = 1 << bit
is_set = reg & mask > 0
return is_set
def _is_selinux_enforcing():
# Just return if the SE Linux Status Tool is not installed
if not DataSource.has_sestatus():
return False
# Run the sestatus, and just return if it failed to run
returncode, output = DataSource.sestatus_b()
if returncode != 0:
return False
# Figure out if explicitly in enforcing mode
for line in output.splitlines():
line = line.strip().lower()
if line.startswith("current mode:"):
if line.endswith("enforcing"):
return True
else:
return False
# Figure out if we can execute heap and execute memory
can_selinux_exec_heap = False
can_selinux_exec_memory = False
for line in output.splitlines():
line = line.strip().lower()
if line.startswith("allow_execheap") and line.endswith("on"):
can_selinux_exec_heap = True
elif line.startswith("allow_execmem") and line.endswith("on"):
can_selinux_exec_memory = True
return (not can_selinux_exec_heap or not can_selinux_exec_memory)
class CPUID(object):
def __init__(self):
self.prochandle = None
# Figure out if SE Linux is on and in enforcing mode
self.is_selinux_enforcing = _is_selinux_enforcing()
def _asm_func(self, restype=None, argtypes=(), byte_code=[]):
byte_code = bytes.join(b'', byte_code)
address = None
if DataSource.is_windows:
# Allocate a memory segment the size of the byte code, and make it executable
size = len(byte_code)
# Alloc at least 1 page to ensure we own all pages that we want to change protection on
if size < 0x1000: size = 0x1000
MEM_COMMIT = ctypes.c_ulong(0x1000)
PAGE_READWRITE = ctypes.c_ulong(0x4)
pfnVirtualAlloc = ctypes.windll.kernel32.VirtualAlloc
pfnVirtualAlloc.restype = ctypes.c_void_p
address = pfnVirtualAlloc(None, ctypes.c_size_t(size), MEM_COMMIT, PAGE_READWRITE)
if not address:
raise Exception("Failed to VirtualAlloc")
# Copy the byte code into the memory segment
memmove = ctypes.CFUNCTYPE(ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_size_t)(ctypes._memmove_addr)
if memmove(address, byte_code, size) < 0:
raise Exception("Failed to memmove")
# Enable execute permissions
PAGE_EXECUTE = ctypes.c_ulong(0x10)
old_protect = ctypes.c_ulong(0)
pfnVirtualProtect = ctypes.windll.kernel32.VirtualProtect
res = pfnVirtualProtect(ctypes.c_void_p(address), ctypes.c_size_t(size), PAGE_EXECUTE, ctypes.byref(old_protect))
if not res:
raise Exception("Failed VirtualProtect")
# Flush Instruction Cache
# First, get process Handle
if not self.prochandle:
pfnGetCurrentProcess = ctypes.windll.kernel32.GetCurrentProcess
pfnGetCurrentProcess.restype = ctypes.c_void_p
self.prochandle = ctypes.c_void_p(pfnGetCurrentProcess())
# Actually flush cache
res = ctypes.windll.kernel32.FlushInstructionCache(self.prochandle, ctypes.c_void_p(address), ctypes.c_size_t(size))
if not res:
raise Exception("Failed FlushInstructionCache")
else:
# Allocate a memory segment the size of the byte code
size = len(byte_code)
pfnvalloc = ctypes.pythonapi.valloc
pfnvalloc.restype = ctypes.c_void_p
address = pfnvalloc(ctypes.c_size_t(size))
if not address:
raise Exception("Failed to valloc")
# Mark the memory segment as writeable only
if not self.is_selinux_enforcing:
WRITE = 0x2
if ctypes.pythonapi.mprotect(ctypes.c_void_p(address), size, WRITE) < 0:
raise Exception("Failed to mprotect")
# Copy the byte code into the memory segment
if ctypes.pythonapi.memmove(ctypes.c_void_p(address), byte_code, ctypes.c_size_t(size)) < 0:
raise Exception("Failed to memmove")
# Mark the memory segment as writeable and executable only
if not self.is_selinux_enforcing:
WRITE_EXECUTE = 0x2 | 0x4
if ctypes.pythonapi.mprotect(ctypes.c_void_p(address), size, WRITE_EXECUTE) < 0:
raise Exception("Failed to mprotect")
# Cast the memory segment into a function
functype = ctypes.CFUNCTYPE(restype, *argtypes)
fun = functype(address)
return fun, address
def _run_asm(self, *byte_code):
# Convert the byte code into a function that returns an int
restype = ctypes.c_uint32
argtypes = ()
func, address = self._asm_func(restype, argtypes, byte_code)
# Call the byte code like a function
retval = func()
byte_code = bytes.join(b'', byte_code)
size = ctypes.c_size_t(len(byte_code))
# Free the function memory segment
if DataSource.is_windows:
MEM_RELEASE = ctypes.c_ulong(0x8000)
ctypes.windll.kernel32.VirtualFree(ctypes.c_void_p(address), ctypes.c_size_t(0), MEM_RELEASE)
else:
# Remove the executable tag on the memory
READ_WRITE = 0x1 | 0x2
if ctypes.pythonapi.mprotect(ctypes.c_void_p(address), size, READ_WRITE) < 0:
raise Exception("Failed to mprotect")
ctypes.pythonapi.free(ctypes.c_void_p(address))
return retval
# FIXME: We should not have to use different instructions to
# set eax to 0 or 1, on 32bit and 64bit machines.
def _zero_eax(self):
return (
b"\x31\xC0" # xor eax,eax
)
def _zero_ecx(self):
return (
b"\x31\xC9" # xor ecx,ecx
)
def _one_eax(self):
return (
b"\xB8\x01\x00\x00\x00" # mov eax,0x1"
)
# http://en.wikipedia.org/wiki/CPUID#EAX.3D0:_Get_vendor_ID
def get_vendor_id(self):
# EBX
ebx = self._run_asm(
self._zero_eax(),
b"\x0F\xA2" # cpuid
b"\x89\xD8" # mov ax,bx
b"\xC3" # ret
)
# ECX
ecx = self._run_asm(
self._zero_eax(),
b"\x0f\xa2" # cpuid
b"\x89\xC8" # mov ax,cx
b"\xC3" # ret
)
# EDX
edx = self._run_asm(
self._zero_eax(),
b"\x0f\xa2" # cpuid
b"\x89\xD0" # mov ax,dx
b"\xC3" # ret
)
# Each 4bits is a ascii letter in the name
vendor_id = []
for reg in [ebx, edx, ecx]:
for n in [0, 8, 16, 24]:
vendor_id.append(chr((reg >> n) & 0xFF))
vendor_id = ''.join(vendor_id)
return vendor_id
# http://en.wikipedia.org/wiki/CPUID#EAX.3D1:_Processor_Info_and_Feature_Bits
def get_info(self):
# EAX
eax = self._run_asm(
self._one_eax(),
b"\x0f\xa2" # cpuid
b"\xC3" # ret
)
# Get the CPU info
stepping = (eax >> 0) & 0xF # 4 bits
model = (eax >> 4) & 0xF # 4 bits
family = (eax >> 8) & 0xF # 4 bits
processor_type = (eax >> 12) & 0x3 # 2 bits
extended_model = (eax >> 16) & 0xF # 4 bits
extended_family = (eax >> 20) & 0xFF # 8 bits
return {
'stepping' : stepping,
'model' : model,
'family' : family,
'processor_type' : processor_type,
'extended_model' : extended_model,
'extended_family' : extended_family
}
# http://en.wikipedia.org/wiki/CPUID#EAX.3D80000000h:_Get_Highest_Extended_Function_Supported
def get_max_extension_support(self):
# Check for extension support
max_extension_support = self._run_asm(
b"\xB8\x00\x00\x00\x80" # mov ax,0x80000000
b"\x0f\xa2" # cpuid
b"\xC3" # ret
)
return max_extension_support
# http://en.wikipedia.org/wiki/CPUID#EAX.3D1:_Processor_Info_and_Feature_Bits
def get_flags(self, max_extension_support):
# EDX
edx = self._run_asm(
self._one_eax(),
b"\x0f\xa2" # cpuid
b"\x89\xD0" # mov ax,dx
b"\xC3" # ret
)
# ECX
ecx = self._run_asm(
self._one_eax(),
b"\x0f\xa2" # cpuid
b"\x89\xC8" # mov ax,cx
b"\xC3" # ret
)
# Get the CPU flags
flags = {
'fpu' : _is_bit_set(edx, 0),
'vme' : _is_bit_set(edx, 1),
'de' : _is_bit_set(edx, 2),
'pse' : _is_bit_set(edx, 3),
'tsc' : _is_bit_set(edx, 4),
'msr' : _is_bit_set(edx, 5),
'pae' : _is_bit_set(edx, 6),
'mce' : _is_bit_set(edx, 7),
'cx8' : _is_bit_set(edx, 8),
'apic' : _is_bit_set(edx, 9),
#'reserved1' : _is_bit_set(edx, 10),
'sep' : _is_bit_set(edx, 11),
'mtrr' : _is_bit_set(edx, 12),
'pge' : _is_bit_set(edx, 13),
'mca' : _is_bit_set(edx, 14),
'cmov' : _is_bit_set(edx, 15),
'pat' : _is_bit_set(edx, 16),
'pse36' : _is_bit_set(edx, 17),
'pn' : _is_bit_set(edx, 18),
'clflush' : _is_bit_set(edx, 19),
#'reserved2' : _is_bit_set(edx, 20),
'dts' : _is_bit_set(edx, 21),
'acpi' : _is_bit_set(edx, 22),
'mmx' : _is_bit_set(edx, 23),
'fxsr' : _is_bit_set(edx, 24),
'sse' : _is_bit_set(edx, 25),
'sse2' : _is_bit_set(edx, 26),
'ss' : _is_bit_set(edx, 27),
'ht' : _is_bit_set(edx, 28),
'tm' : _is_bit_set(edx, 29),
'ia64' : _is_bit_set(edx, 30),
'pbe' : _is_bit_set(edx, 31),
'pni' : _is_bit_set(ecx, 0),
'pclmulqdq' : _is_bit_set(ecx, 1),
'dtes64' : _is_bit_set(ecx, 2),
'monitor' : _is_bit_set(ecx, 3),
'ds_cpl' : _is_bit_set(ecx, 4),
'vmx' : _is_bit_set(ecx, 5),
'smx' : _is_bit_set(ecx, 6),
'est' : _is_bit_set(ecx, 7),
'tm2' : _is_bit_set(ecx, 8),
'ssse3' : _is_bit_set(ecx, 9),
'cid' : _is_bit_set(ecx, 10),
#'reserved3' : _is_bit_set(ecx, 11),
'fma' : _is_bit_set(ecx, 12),
'cx16' : _is_bit_set(ecx, 13),
'xtpr' : _is_bit_set(ecx, 14),
'pdcm' : _is_bit_set(ecx, 15),
#'reserved4' : _is_bit_set(ecx, 16),
'pcid' : _is_bit_set(ecx, 17),
'dca' : _is_bit_set(ecx, 18),
'sse4_1' : _is_bit_set(ecx, 19),
'sse4_2' : _is_bit_set(ecx, 20),
'x2apic' : _is_bit_set(ecx, 21),
'movbe' : _is_bit_set(ecx, 22),
'popcnt' : _is_bit_set(ecx, 23),
'tscdeadline' : _is_bit_set(ecx, 24),
'aes' : _is_bit_set(ecx, 25),
'xsave' : _is_bit_set(ecx, 26),
'osxsave' : _is_bit_set(ecx, 27),
'avx' : _is_bit_set(ecx, 28),
'f16c' : _is_bit_set(ecx, 29),
'rdrnd' : _is_bit_set(ecx, 30),
'hypervisor' : _is_bit_set(ecx, 31)
}
# Get a list of only the flags that are true
flags = [k for k, v in flags.items() if v]
# http://en.wikipedia.org/wiki/CPUID#EAX.3D7.2C_ECX.3D0:_Extended_Features
if max_extension_support >= 7:
# EBX
ebx = self._run_asm(
self._zero_ecx(),
b"\xB8\x07\x00\x00\x00" # mov eax,7
b"\x0f\xa2" # cpuid
b"\x89\xD8" # mov ax,bx
b"\xC3" # ret
)
# ECX
ecx = self._run_asm(
self._zero_ecx(),
b"\xB8\x07\x00\x00\x00" # mov eax,7
b"\x0f\xa2" # cpuid
b"\x89\xC8" # mov ax,cx
b"\xC3" # ret
)
# Get the extended CPU flags
extended_flags = {
#'fsgsbase' : _is_bit_set(ebx, 0),
#'IA32_TSC_ADJUST' : _is_bit_set(ebx, 1),
'sgx' : _is_bit_set(ebx, 2),
'bmi1' : _is_bit_set(ebx, 3),
'hle' : _is_bit_set(ebx, 4),
'avx2' : _is_bit_set(ebx, 5),
#'reserved' : _is_bit_set(ebx, 6),
'smep' : _is_bit_set(ebx, 7),
'bmi2' : _is_bit_set(ebx, 8),
'erms' : _is_bit_set(ebx, 9),
'invpcid' : _is_bit_set(ebx, 10),
'rtm' : _is_bit_set(ebx, 11),
'pqm' : _is_bit_set(ebx, 12),
#'FPU CS and FPU DS deprecated' : _is_bit_set(ebx, 13),
'mpx' : _is_bit_set(ebx, 14),
'pqe' : _is_bit_set(ebx, 15),
'avx512f' : _is_bit_set(ebx, 16),
'avx512dq' : _is_bit_set(ebx, 17),
'rdseed' : _is_bit_set(ebx, 18),
'adx' : _is_bit_set(ebx, 19),
'smap' : _is_bit_set(ebx, 20),
'avx512ifma' : _is_bit_set(ebx, 21),
'pcommit' : _is_bit_set(ebx, 22),
'clflushopt' : _is_bit_set(ebx, 23),
'clwb' : _is_bit_set(ebx, 24),
'intel_pt' : _is_bit_set(ebx, 25),
'avx512pf' : _is_bit_set(ebx, 26),
'avx512er' : _is_bit_set(ebx, 27),
'avx512cd' : _is_bit_set(ebx, 28),
'sha' : _is_bit_set(ebx, 29),
'avx512bw' : _is_bit_set(ebx, 30),
'avx512vl' : _is_bit_set(ebx, 31),
'prefetchwt1' : _is_bit_set(ecx, 0),
'avx512vbmi' : _is_bit_set(ecx, 1),
'umip' : _is_bit_set(ecx, 2),
'pku' : _is_bit_set(ecx, 3),
'ospke' : _is_bit_set(ecx, 4),
#'reserved' : _is_bit_set(ecx, 5),
'avx512vbmi2' : _is_bit_set(ecx, 6),
#'reserved' : _is_bit_set(ecx, 7),
'gfni' : _is_bit_set(ecx, 8),
'vaes' : _is_bit_set(ecx, 9),
'vpclmulqdq' : _is_bit_set(ecx, 10),
'avx512vnni' : _is_bit_set(ecx, 11),
'avx512bitalg' : _is_bit_set(ecx, 12),
#'reserved' : _is_bit_set(ecx, 13),
'avx512vpopcntdq' : _is_bit_set(ecx, 14),
#'reserved' : _is_bit_set(ecx, 15),
#'reserved' : _is_bit_set(ecx, 16),
#'mpx0' : _is_bit_set(ecx, 17),
#'mpx1' : _is_bit_set(ecx, 18),
#'mpx2' : _is_bit_set(ecx, 19),
#'mpx3' : _is_bit_set(ecx, 20),
#'mpx4' : _is_bit_set(ecx, 21),
'rdpid' : _is_bit_set(ecx, 22),
#'reserved' : _is_bit_set(ecx, 23),
#'reserved' : _is_bit_set(ecx, 24),
#'reserved' : _is_bit_set(ecx, 25),
#'reserved' : _is_bit_set(ecx, 26),
#'reserved' : _is_bit_set(ecx, 27),
#'reserved' : _is_bit_set(ecx, 28),
#'reserved' : _is_bit_set(ecx, 29),
'sgx_lc' : _is_bit_set(ecx, 30),
#'reserved' : _is_bit_set(ecx, 31)
}
# Get a list of only the flags that are true
extended_flags = [k for k, v in extended_flags.items() if v]
flags += extended_flags
# http://en.wikipedia.org/wiki/CPUID#EAX.3D80000001h:_Extended_Processor_Info_and_Feature_Bits
if max_extension_support >= 0x80000001:
# EBX
ebx = self._run_asm(
b"\xB8\x01\x00\x00\x80" # mov ax,0x80000001
b"\x0f\xa2" # cpuid
b"\x89\xD8" # mov ax,bx
b"\xC3" # ret
)
# ECX
ecx = self._run_asm(
b"\xB8\x01\x00\x00\x80" # mov ax,0x80000001
b"\x0f\xa2" # cpuid
b"\x89\xC8" # mov ax,cx
b"\xC3" # ret
)
# Get the extended CPU flags
extended_flags = {
'fpu' : _is_bit_set(ebx, 0),
'vme' : _is_bit_set(ebx, 1),
'de' : _is_bit_set(ebx, 2),
'pse' : _is_bit_set(ebx, 3),
'tsc' : _is_bit_set(ebx, 4),
'msr' : _is_bit_set(ebx, 5),
'pae' : _is_bit_set(ebx, 6),
'mce' : _is_bit_set(ebx, 7),
'cx8' : _is_bit_set(ebx, 8),
'apic' : _is_bit_set(ebx, 9),
#'reserved' : _is_bit_set(ebx, 10),
'syscall' : _is_bit_set(ebx, 11),
'mtrr' : _is_bit_set(ebx, 12),
'pge' : _is_bit_set(ebx, 13),
'mca' : _is_bit_set(ebx, 14),
'cmov' : _is_bit_set(ebx, 15),
'pat' : _is_bit_set(ebx, 16),
'pse36' : _is_bit_set(ebx, 17),
#'reserved' : _is_bit_set(ebx, 18),
'mp' : _is_bit_set(ebx, 19),
'nx' : _is_bit_set(ebx, 20),
#'reserved' : _is_bit_set(ebx, 21),
'mmxext' : _is_bit_set(ebx, 22),
'mmx' : _is_bit_set(ebx, 23),
'fxsr' : _is_bit_set(ebx, 24),
'fxsr_opt' : _is_bit_set(ebx, 25),
'pdpe1gp' : _is_bit_set(ebx, 26),
'rdtscp' : _is_bit_set(ebx, 27),
#'reserved' : _is_bit_set(ebx, 28),
'lm' : _is_bit_set(ebx, 29),
'3dnowext' : _is_bit_set(ebx, 30),
'3dnow' : _is_bit_set(ebx, 31),
'lahf_lm' : _is_bit_set(ecx, 0),
'cmp_legacy' : _is_bit_set(ecx, 1),
'svm' : _is_bit_set(ecx, 2),
'extapic' : _is_bit_set(ecx, 3),
'cr8_legacy' : _is_bit_set(ecx, 4),
'abm' : _is_bit_set(ecx, 5),
'sse4a' : _is_bit_set(ecx, 6),
'misalignsse' : _is_bit_set(ecx, 7),
'3dnowprefetch' : _is_bit_set(ecx, 8),
'osvw' : _is_bit_set(ecx, 9),
'ibs' : _is_bit_set(ecx, 10),
'xop' : _is_bit_set(ecx, 11),
'skinit' : _is_bit_set(ecx, 12),
'wdt' : _is_bit_set(ecx, 13),
#'reserved' : _is_bit_set(ecx, 14),
'lwp' : _is_bit_set(ecx, 15),
'fma4' : _is_bit_set(ecx, 16),
'tce' : _is_bit_set(ecx, 17),
#'reserved' : _is_bit_set(ecx, 18),
'nodeid_msr' : _is_bit_set(ecx, 19),
#'reserved' : _is_bit_set(ecx, 20),
'tbm' : _is_bit_set(ecx, 21),
'topoext' : _is_bit_set(ecx, 22),
'perfctr_core' : _is_bit_set(ecx, 23),
'perfctr_nb' : _is_bit_set(ecx, 24),
#'reserved' : _is_bit_set(ecx, 25),
'dbx' : _is_bit_set(ecx, 26),
'perftsc' : _is_bit_set(ecx, 27),
'pci_l2i' : _is_bit_set(ecx, 28),
#'reserved' : _is_bit_set(ecx, 29),
#'reserved' : _is_bit_set(ecx, 30),
#'reserved' : _is_bit_set(ecx, 31)
}
# Get a list of only the flags that are true
extended_flags = [k for k, v in extended_flags.items() if v]
flags += extended_flags
flags.sort()
return flags
# http://en.wikipedia.org/wiki/CPUID#EAX.3D80000002h.2C80000003h.2C80000004h:_Processor_Brand_String
def get_processor_brand(self, max_extension_support):
processor_brand = ""
# Processor brand string
if max_extension_support >= 0x80000004:
instructions = [
b"\xB8\x02\x00\x00\x80", # mov ax,0x80000002
b"\xB8\x03\x00\x00\x80", # mov ax,0x80000003
b"\xB8\x04\x00\x00\x80" # mov ax,0x80000004
]
for instruction in instructions:
# EAX
eax = self._run_asm(
instruction, # mov ax,0x8000000?
b"\x0f\xa2" # cpuid
b"\x89\xC0" # mov ax,ax
b"\xC3" # ret
)
# EBX
ebx = self._run_asm(
instruction, # mov ax,0x8000000?
b"\x0f\xa2" # cpuid
b"\x89\xD8" # mov ax,bx
b"\xC3" # ret
)
# ECX
ecx = self._run_asm(
instruction, # mov ax,0x8000000?
b"\x0f\xa2" # cpuid
b"\x89\xC8" # mov ax,cx
b"\xC3" # ret
)
# EDX
edx = self._run_asm(
instruction, # mov ax,0x8000000?
b"\x0f\xa2" # cpuid
b"\x89\xD0" # mov ax,dx
b"\xC3" # ret
)
# Combine each of the 4 bytes in each register into the string
for reg in [eax, ebx, ecx, edx]:
for n in [0, 8, 16, 24]:
processor_brand += chr((reg >> n) & 0xFF)
# Strip off any trailing NULL terminators and white space
processor_brand = processor_brand.strip("\0").strip()
return processor_brand
# http://en.wikipedia.org/wiki/CPUID#EAX.3D80000006h:_Extended_L2_Cache_Features
def get_cache(self, max_extension_support):
cache_info = {}
# Just return if the cache feature is not supported
if max_extension_support < 0x80000006:
return cache_info
# ECX
ecx = self._run_asm(
b"\xB8\x06\x00\x00\x80" # mov ax,0x80000006
b"\x0f\xa2" # cpuid
b"\x89\xC8" # mov ax,cx
b"\xC3" # ret
)
cache_info = {
'size_kb' : ecx & 0xFF,
'line_size_b' : (ecx >> 12) & 0xF,
'associativity' : (ecx >> 16) & 0xFFFF
}
return cache_info
def get_ticks(self):
retval = None
if DataSource.bits == '32bit':
# Works on x86_32
restype = None
argtypes = (ctypes.POINTER(ctypes.c_uint), ctypes.POINTER(ctypes.c_uint))
get_ticks_x86_32, address = self._asm_func(restype, argtypes,
[
b"\x55", # push bp
b"\x89\xE5", # mov bp,sp
b"\x31\xC0", # xor ax,ax
b"\x0F\xA2", # cpuid
b"\x0F\x31", # rdtsc
b"\x8B\x5D\x08", # mov bx,[di+0x8]
b"\x8B\x4D\x0C", # mov cx,[di+0xc]
b"\x89\x13", # mov [bp+di],dx
b"\x89\x01", # mov [bx+di],ax
b"\x5D", # pop bp
b"\xC3" # ret
]
)
high = ctypes.c_uint32(0)
low = ctypes.c_uint32(0)
get_ticks_x86_32(ctypes.byref(high), ctypes.byref(low))
retval = ((high.value << 32) & 0xFFFFFFFF00000000) | low.value
elif DataSource.bits == '64bit':
# Works on x86_64
restype = ctypes.c_uint64
argtypes = ()
get_ticks_x86_64, address = self._asm_func(restype, argtypes,
[
b"\x48", # dec ax
b"\x31\xC0", # xor ax,ax
b"\x0F\xA2", # cpuid
b"\x0F\x31", # rdtsc
b"\x48", # dec ax
b"\xC1\xE2\x20", # shl dx,byte 0x20
b"\x48", # dec ax
b"\x09\xD0", # or ax,dx
b"\xC3", # ret
]
)
retval = get_ticks_x86_64()
return retval
def get_raw_hz(self):
import time
start = self.get_ticks()
time.sleep(1)
end = self.get_ticks()
ticks = (end - start)
return ticks
def _actual_get_cpu_info_from_cpuid(queue):
'''
Warning! This function has the potential to crash the Python runtime.
Do not call it directly. Use the _get_cpu_info_from_cpuid function instead.
It will safely call this function in another process.
'''
# Pipe all output to nothing
sys.stdout = open(os.devnull, 'w')
sys.stderr = open(os.devnull, 'w')
# Get the CPU arch and bits
arch, bits = _parse_arch(DataSource.arch_string_raw)
# Return none if this is not an X86 CPU
if not arch in ['X86_32', 'X86_64']:
queue.put(_obj_to_b64({}))
return
# Return none if SE Linux is in enforcing mode
cpuid = CPUID()
if cpuid.is_selinux_enforcing:
queue.put(_obj_to_b64({}))
return
# Get the cpu info from the CPUID register
max_extension_support = cpuid.get_max_extension_support()
cache_info = cpuid.get_cache(max_extension_support)
info = cpuid.get_info()
processor_brand = cpuid.get_processor_brand(max_extension_support)
# Get the Hz and scale
hz_actual = cpuid.get_raw_hz()
hz_actual = _to_decimal_string(hz_actual)
# Get the Hz and scale
hz_advertised, scale = _parse_cpu_brand_string(processor_brand)
info = {
'vendor_id_raw' : cpuid.get_vendor_id(),
'hardware_raw' : '',
'brand_raw' : processor_brand,
'hz_advertised_friendly' : _hz_short_to_friendly(hz_advertised, scale),
'hz_actual_friendly' : _hz_short_to_friendly(hz_actual, 0),
'hz_advertised' : _hz_short_to_full(hz_advertised, scale),
'hz_actual' : _hz_short_to_full(hz_actual, 0),
'l2_cache_size' : _to_friendly_bytes(cache_info['size_kb']),
'l2_cache_line_size' : cache_info['line_size_b'],
'l2_cache_associativity' : hex(cache_info['associativity']),
'stepping' : info['stepping'],
'model' : info['model'],
'family' : info['family'],
'processor_type' : info['processor_type'],
'extended_model' : info['extended_model'],
'extended_family' : info['extended_family'],
'flags' : cpuid.get_flags(max_extension_support)
}
info = {k: v for k, v in info.items() if v}
queue.put(_obj_to_b64(info))
def _get_cpu_info_from_cpuid():
'''
Returns the CPU info gathered by querying the X86 cpuid register in a new process.
Returns {} on non X86 cpus.
Returns {} if SELinux is in enforcing mode.
'''
from multiprocessing import Process, Queue
# Return {} if can't cpuid
if not DataSource.can_cpuid:
return {}
# Get the CPU arch and bits
arch, bits = _parse_arch(DataSource.arch_string_raw)
# Return {} if this is not an X86 CPU
if not arch in ['X86_32', 'X86_64']:
return {}
try:
# Start running the function in a subprocess
queue = Queue()
p = Process(target=_actual_get_cpu_info_from_cpuid, args=(queue,))
p.start()
# Wait for the process to end, while it is still alive
while p.is_alive():
p.join(0)
# Return {} if it failed
if p.exitcode != 0:
return {}
# Return the result, only if there is something to read
if not queue.empty():
output = queue.get()
return _b64_to_obj(output)
except:
pass
# Return {} if everything failed
return {}
def _get_cpu_info_from_proc_cpuinfo():
'''
Returns the CPU info gathered from /proc/cpuinfo.
Returns {} if /proc/cpuinfo is not found.
'''
try:
# Just return {} if there is no cpuinfo
if not DataSource.has_proc_cpuinfo():
return {}
returncode, output = DataSource.cat_proc_cpuinfo()
if returncode != 0:
return {}
# Various fields
vendor_id = _get_field(False, output, None, '', 'vendor_id', 'vendor id', 'vendor')
processor_brand = _get_field(True, output, None, None, 'model name','cpu', 'processor')
cache_size = _get_field(False, output, None, '', 'cache size')
stepping = _get_field(False, output, int, 0, 'stepping')
model = _get_field(False, output, int, 0, 'model')
family = _get_field(False, output, int, 0, 'cpu family')
hardware = _get_field(False, output, None, '', 'Hardware')
# Flags
flags = _get_field(False, output, None, None, 'flags', 'Features')
if flags:
flags = flags.split()
flags.sort()
# Convert from MHz string to Hz
hz_actual = _get_field(False, output, None, '', 'cpu MHz', 'cpu speed', 'clock')
hz_actual = hz_actual.lower().rstrip('mhz').strip()
hz_actual = _to_decimal_string(hz_actual)
# Convert from GHz/MHz string to Hz
hz_advertised, scale = (None, 0)
try:
hz_advertised, scale = _parse_cpu_brand_string(processor_brand)
except Exception:
pass
info = {
'hardware_raw' : hardware,
'brand_raw' : processor_brand,
'l3_cache_size' : _to_friendly_bytes(cache_size),
'flags' : flags,
'vendor_id_raw' : vendor_id,
'stepping' : stepping,
'model' : model,
'family' : family,
}
# Make the Hz the same for actual and advertised if missing any
if not hz_advertised or hz_advertised == '0.0':
hz_advertised = hz_actual
scale = 6
elif not hz_actual or hz_actual == '0.0':
hz_actual = hz_advertised
# Add the Hz if there is one
if _hz_short_to_full(hz_advertised, scale) > (0, 0):
info['hz_advertised_friendly'] = _hz_short_to_friendly(hz_advertised, scale)
info['hz_advertised'] = _hz_short_to_full(hz_advertised, scale)
if _hz_short_to_full(hz_actual, scale) > (0, 0):
info['hz_actual_friendly'] = _hz_short_to_friendly(hz_actual, 6)
info['hz_actual'] = _hz_short_to_full(hz_actual, 6)
info = {k: v for k, v in info.items() if v}
return info
except:
#raise # NOTE: To have this throw on error, uncomment this line
return {}
def _get_cpu_info_from_cpufreq_info():
'''
Returns the CPU info gathered from cpufreq-info.
Returns {} if cpufreq-info is not found.
'''
try:
hz_brand, scale = '0.0', 0
if not DataSource.has_cpufreq_info():
return {}
returncode, output = DataSource.cpufreq_info()
if returncode != 0:
return {}
hz_brand = output.split('current CPU frequency is')[1].split('\n')[0]
i = hz_brand.find('Hz')
assert(i != -1)
hz_brand = hz_brand[0 : i+2].strip().lower()
if hz_brand.endswith('mhz'):
scale = 6
elif hz_brand.endswith('ghz'):
scale = 9
hz_brand = hz_brand.rstrip('mhz').rstrip('ghz').strip()
hz_brand = _to_decimal_string(hz_brand)
info = {
'hz_advertised_friendly' : _hz_short_to_friendly(hz_brand, scale),
'hz_actual_friendly' : _hz_short_to_friendly(hz_brand, scale),
'hz_advertised' : _hz_short_to_full(hz_brand, scale),
'hz_actual' : _hz_short_to_full(hz_brand, scale),
}
info = {k: v for k, v in info.items() if v}
return info
except:
#raise # NOTE: To have this throw on error, uncomment this line
return {}
def _get_cpu_info_from_lscpu():
'''
Returns the CPU info gathered from lscpu.
Returns {} if lscpu is not found.
'''
try:
if not DataSource.has_lscpu():
return {}
returncode, output = DataSource.lscpu()
if returncode != 0:
return {}
info = {}
new_hz = _get_field(False, output, None, None, 'CPU max MHz', 'CPU MHz')
if new_hz:
new_hz = _to_decimal_string(new_hz)
scale = 6
info['hz_advertised_friendly'] = _hz_short_to_friendly(new_hz, scale)
info['hz_actual_friendly'] = _hz_short_to_friendly(new_hz, scale)
info['hz_advertised'] = _hz_short_to_full(new_hz, scale)
info['hz_actual'] = _hz_short_to_full(new_hz, scale)
vendor_id = _get_field(False, output, None, None, 'Vendor ID')
if vendor_id:
info['vendor_id_raw'] = vendor_id
brand = _get_field(False, output, None, None, 'Model name')
if brand:
info['brand_raw'] = brand
family = _get_field(False, output, None, None, 'CPU family')
if family and family.isdigit():
info['family'] = int(family)
stepping = _get_field(False, output, None, None, 'Stepping')
if stepping and stepping.isdigit():
info['stepping'] = int(stepping)
model = _get_field(False, output, None, None, 'Model')
if model and model.isdigit():
info['model'] = int(model)
l1_data_cache_size = _get_field(False, output, None, None, 'L1d cache')
if l1_data_cache_size:
info['l1_data_cache_size'] = _to_friendly_bytes(l1_data_cache_size)
l1_instruction_cache_size = _get_field(False, output, None, None, 'L1i cache')
if l1_instruction_cache_size:
info['l1_instruction_cache_size'] = _to_friendly_bytes(l1_instruction_cache_size)
l2_cache_size = _get_field(False, output, None, None, 'L2 cache')
if l2_cache_size:
info['l2_cache_size'] = _to_friendly_bytes(l2_cache_size)
l3_cache_size = _get_field(False, output, None, None, 'L3 cache')
if l3_cache_size:
info['l3_cache_size'] = _to_friendly_bytes(l3_cache_size)
# Flags
flags = _get_field(False, output, None, None, 'flags', 'Features')
if flags:
flags = flags.split()
flags.sort()
info['flags'] = flags
info = {k: v for k, v in info.items() if v}
return info
except:
#raise # NOTE: To have this throw on error, uncomment this line
return {}
def _get_cpu_info_from_dmesg():
'''
Returns the CPU info gathered from dmesg.
Returns {} if dmesg is not found or does not have the desired info.
'''
# Just return {} if there is no dmesg
if not DataSource.has_dmesg():
return {}
# If dmesg fails return {}
returncode, output = DataSource.dmesg_a()
if output == None or returncode != 0:
return {}
return _parse_dmesg_output(output)
# https://openpowerfoundation.org/wp-content/uploads/2016/05/LoPAPR_DRAFT_v11_24March2016_cmt1.pdf
# page 767
def _get_cpu_info_from_ibm_pa_features():
'''
Returns the CPU info gathered from lsprop /proc/device-tree/cpus/*/ibm,pa-features
Returns {} if lsprop is not found or ibm,pa-features does not have the desired info.
'''
try:
# Just return {} if there is no lsprop
if not DataSource.has_ibm_pa_features():
return {}
# If ibm,pa-features fails return {}
returncode, output = DataSource.ibm_pa_features()
if output == None or returncode != 0:
return {}
# Filter out invalid characters from output
value = output.split("ibm,pa-features")[1].lower()
value = [s for s in value if s in list('0123456789abcfed')]
value = ''.join(value)
# Get data converted to Uint32 chunks
left = int(value[0 : 8], 16)
right = int(value[8 : 16], 16)
# Get the CPU flags
flags = {
# Byte 0
'mmu' : _is_bit_set(left, 0),
'fpu' : _is_bit_set(left, 1),
'slb' : _is_bit_set(left, 2),
'run' : _is_bit_set(left, 3),
#'reserved' : _is_bit_set(left, 4),
'dabr' : _is_bit_set(left, 5),
'ne' : _is_bit_set(left, 6),
'wtr' : _is_bit_set(left, 7),
# Byte 1
'mcr' : _is_bit_set(left, 8),
'dsisr' : _is_bit_set(left, 9),
'lp' : _is_bit_set(left, 10),
'ri' : _is_bit_set(left, 11),
'dabrx' : _is_bit_set(left, 12),
'sprg3' : _is_bit_set(left, 13),
'rislb' : _is_bit_set(left, 14),
'pp' : _is_bit_set(left, 15),
# Byte 2
'vpm' : _is_bit_set(left, 16),
'dss_2.05' : _is_bit_set(left, 17),
#'reserved' : _is_bit_set(left, 18),
'dar' : _is_bit_set(left, 19),
#'reserved' : _is_bit_set(left, 20),
'ppr' : _is_bit_set(left, 21),
'dss_2.02' : _is_bit_set(left, 22),
'dss_2.06' : _is_bit_set(left, 23),
# Byte 3
'lsd_in_dscr' : _is_bit_set(left, 24),
'ugr_in_dscr' : _is_bit_set(left, 25),
#'reserved' : _is_bit_set(left, 26),
#'reserved' : _is_bit_set(left, 27),
#'reserved' : _is_bit_set(left, 28),
#'reserved' : _is_bit_set(left, 29),
#'reserved' : _is_bit_set(left, 30),
#'reserved' : _is_bit_set(left, 31),
# Byte 4
'sso_2.06' : _is_bit_set(right, 0),
#'reserved' : _is_bit_set(right, 1),
#'reserved' : _is_bit_set(right, 2),
#'reserved' : _is_bit_set(right, 3),
#'reserved' : _is_bit_set(right, 4),
#'reserved' : _is_bit_set(right, 5),
#'reserved' : _is_bit_set(right, 6),
#'reserved' : _is_bit_set(right, 7),
# Byte 5
'le' : _is_bit_set(right, 8),
'cfar' : _is_bit_set(right, 9),
'eb' : _is_bit_set(right, 10),
'lsq_2.07' : _is_bit_set(right, 11),
#'reserved' : _is_bit_set(right, 12),
#'reserved' : _is_bit_set(right, 13),
#'reserved' : _is_bit_set(right, 14),
#'reserved' : _is_bit_set(right, 15),
# Byte 6
'dss_2.07' : _is_bit_set(right, 16),
#'reserved' : _is_bit_set(right, 17),
#'reserved' : _is_bit_set(right, 18),
#'reserved' : _is_bit_set(right, 19),
#'reserved' : _is_bit_set(right, 20),
#'reserved' : _is_bit_set(right, 21),
#'reserved' : _is_bit_set(right, 22),
#'reserved' : _is_bit_set(right, 23),
# Byte 7
#'reserved' : _is_bit_set(right, 24),
#'reserved' : _is_bit_set(right, 25),
#'reserved' : _is_bit_set(right, 26),
#'reserved' : _is_bit_set(right, 27),
#'reserved' : _is_bit_set(right, 28),
#'reserved' : _is_bit_set(right, 29),
#'reserved' : _is_bit_set(right, 30),
#'reserved' : _is_bit_set(right, 31),
}
# Get a list of only the flags that are true
flags = [k for k, v in flags.items() if v]
flags.sort()
info = {
'flags' : flags
}
info = {k: v for k, v in info.items() if v}
return info
except:
return {}
def _get_cpu_info_from_cat_var_run_dmesg_boot():
'''
Returns the CPU info gathered from /var/run/dmesg.boot.
Returns {} if dmesg is not found or does not have the desired info.
'''
# Just return {} if there is no /var/run/dmesg.boot
if not DataSource.has_var_run_dmesg_boot():
return {}
# If dmesg.boot fails return {}
returncode, output = DataSource.cat_var_run_dmesg_boot()
if output == None or returncode != 0:
return {}
return _parse_dmesg_output(output)
def _get_cpu_info_from_sysctl():
'''
Returns the CPU info gathered from sysctl.
Returns {} if sysctl is not found.
'''
try:
# Just return {} if there is no sysctl
if not DataSource.has_sysctl():
return {}
# If sysctl fails return {}
returncode, output = DataSource.sysctl_machdep_cpu_hw_cpufrequency()
if output == None or returncode != 0:
return {}
# Various fields
vendor_id = _get_field(False, output, None, None, 'machdep.cpu.vendor')
processor_brand = _get_field(True, output, None, None, 'machdep.cpu.brand_string')
cache_size = _get_field(False, output, None, None, 'machdep.cpu.cache.size')
stepping = _get_field(False, output, int, 0, 'machdep.cpu.stepping')
model = _get_field(False, output, int, 0, 'machdep.cpu.model')
family = _get_field(False, output, int, 0, 'machdep.cpu.family')
# Flags
flags = _get_field(False, output, None, '', 'machdep.cpu.features').lower().split()
flags.extend(_get_field(False, output, None, '', 'machdep.cpu.leaf7_features').lower().split())
flags.extend(_get_field(False, output, None, '', 'machdep.cpu.extfeatures').lower().split())
flags.sort()
# Convert from GHz/MHz string to Hz
hz_advertised, scale = _parse_cpu_brand_string(processor_brand)
hz_actual = _get_field(False, output, None, None, 'hw.cpufrequency')
hz_actual = _to_decimal_string(hz_actual)
info = {
'vendor_id_raw' : vendor_id,
'brand_raw' : processor_brand,
'hz_advertised_friendly' : _hz_short_to_friendly(hz_advertised, scale),
'hz_actual_friendly' : _hz_short_to_friendly(hz_actual, 0),
'hz_advertised' : _hz_short_to_full(hz_advertised, scale),
'hz_actual' : _hz_short_to_full(hz_actual, 0),
'l2_cache_size' : _to_friendly_bytes(cache_size),
'stepping' : stepping,
'model' : model,
'family' : family,
'flags' : flags
}
info = {k: v for k, v in info.items() if v}
return info
except:
return {}
def _get_cpu_info_from_sysinfo():
'''
Returns the CPU info gathered from sysinfo.
Returns {} if sysinfo is not found.
'''
info = _get_cpu_info_from_sysinfo_v1()
info.update(_get_cpu_info_from_sysinfo_v2())
return info
def _get_cpu_info_from_sysinfo_v1():
'''
Returns the CPU info gathered from sysinfo.
Returns {} if sysinfo is not found.
'''
try:
# Just return {} if there is no sysinfo
if not DataSource.has_sysinfo():
return {}
# If sysinfo fails return {}
returncode, output = DataSource.sysinfo_cpu()
if output == None or returncode != 0:
return {}
# Various fields
vendor_id = '' #_get_field(False, output, None, None, 'CPU #0: ')
processor_brand = output.split('CPU #0: "')[1].split('"\n')[0].strip()
cache_size = '' #_get_field(False, output, None, None, 'machdep.cpu.cache.size')
stepping = int(output.split(', stepping ')[1].split(',')[0].strip())
model = int(output.split(', model ')[1].split(',')[0].strip())
family = int(output.split(', family ')[1].split(',')[0].strip())
# Flags
flags = []
for line in output.split('\n'):
if line.startswith('\t\t'):
for flag in line.strip().lower().split():
flags.append(flag)
flags.sort()
# Convert from GHz/MHz string to Hz
hz_advertised, scale = _parse_cpu_brand_string(processor_brand)
hz_actual = hz_advertised
info = {
'vendor_id_raw' : vendor_id,
'brand_raw' : processor_brand,
'hz_advertised_friendly' : _hz_short_to_friendly(hz_advertised, scale),
'hz_actual_friendly' : _hz_short_to_friendly(hz_actual, scale),
'hz_advertised' : _hz_short_to_full(hz_advertised, scale),
'hz_actual' : _hz_short_to_full(hz_actual, scale),
'l2_cache_size' : _to_friendly_bytes(cache_size),
'stepping' : stepping,
'model' : model,
'family' : family,
'flags' : flags
}
info = {k: v for k, v in info.items() if v}
return info
except:
#raise # NOTE: To have this throw on error, uncomment this line
return {}
def _get_cpu_info_from_sysinfo_v2():
'''
Returns the CPU info gathered from sysinfo.
Returns {} if sysinfo is not found.
'''
try:
# Just return {} if there is no sysinfo
if not DataSource.has_sysinfo():
return {}
# If sysinfo fails return {}
returncode, output = DataSource.sysinfo_cpu()
if output == None or returncode != 0:
return {}
# Various fields
vendor_id = '' #_get_field(False, output, None, None, 'CPU #0: ')
processor_brand = output.split('CPU #0: "')[1].split('"\n')[0].strip()
cache_size = '' #_get_field(False, output, None, None, 'machdep.cpu.cache.size')
signature = output.split('Signature:')[1].split('\n')[0].strip()
#
stepping = int(signature.split('stepping ')[1].split(',')[0].strip())
model = int(signature.split('model ')[1].split(',')[0].strip())
family = int(signature.split('family ')[1].split(',')[0].strip())
# Flags
def get_subsection_flags(output):
retval = []
for line in output.split('\n')[1:]:
if not line.startswith(' ') and not line.startswith(' '): break
for entry in line.strip().lower().split(' '):
retval.append(entry)
return retval
flags = get_subsection_flags(output.split('Features: ')[1]) + \
get_subsection_flags(output.split('Extended Features (0x00000001): ')[1]) + \
get_subsection_flags(output.split('Extended Features (0x80000001): ')[1])
flags.sort()
# Convert from GHz/MHz string to Hz
lines = [n for n in output.split('\n') if n]
raw_hz = lines[0].split('running at ')[1].strip().lower()
hz_advertised = raw_hz.rstrip('mhz').rstrip('ghz').strip()
hz_advertised = _to_decimal_string(hz_advertised)
hz_actual = hz_advertised
scale = 0
if raw_hz.endswith('mhz'):
scale = 6
elif raw_hz.endswith('ghz'):
scale = 9
info = {
'vendor_id_raw' : vendor_id,
'brand_raw' : processor_brand,
'hz_advertised_friendly' : _hz_short_to_friendly(hz_advertised, scale),
'hz_actual_friendly' : _hz_short_to_friendly(hz_actual, scale),
'hz_advertised' : _hz_short_to_full(hz_advertised, scale),
'hz_actual' : _hz_short_to_full(hz_actual, scale),
'l2_cache_size' : _to_friendly_bytes(cache_size),
'stepping' : stepping,
'model' : model,
'family' : family,
'flags' : flags
}
info = {k: v for k, v in info.items() if v}
return info
except:
#raise # NOTE: To have this throw on error, uncomment this line
return {}
def _get_cpu_info_from_wmic():
'''
Returns the CPU info gathered from WMI.
Returns {} if not on Windows, or wmic is not installed.
'''
try:
# Just return {} if not Windows or there is no wmic
if not DataSource.is_windows or not DataSource.has_wmic():
return {}
returncode, output = DataSource.wmic_cpu()
if output == None or returncode != 0:
return {}
# Break the list into key values pairs
value = output.split("\n")
value = [s.rstrip().split('=') for s in value if '=' in s]
value = {k: v for k, v in value if v}
# Get the advertised MHz
processor_brand = value.get('Name')
hz_advertised, scale_advertised = _parse_cpu_brand_string(processor_brand)
# Get the actual MHz
hz_actual = value.get('CurrentClockSpeed')
scale_actual = 6
if hz_actual:
hz_actual = _to_decimal_string(hz_actual)
# Get cache sizes
l2_cache_size = value.get('L2CacheSize')
if l2_cache_size:
l2_cache_size = l2_cache_size + ' KB'
l3_cache_size = value.get('L3CacheSize')
if l3_cache_size:
l3_cache_size = l3_cache_size + ' KB'
# Get family, model, and stepping
family, model, stepping = '', '', ''
description = value.get('Description') or value.get('Caption')
entries = description.split(' ')
if 'Family' in entries and entries.index('Family') < len(entries)-1:
i = entries.index('Family')
family = int(entries[i + 1])
if 'Model' in entries and entries.index('Model') < len(entries)-1:
i = entries.index('Model')
model = int(entries[i + 1])
if 'Stepping' in entries and entries.index('Stepping') < len(entries)-1:
i = entries.index('Stepping')
stepping = int(entries[i + 1])
info = {
'vendor_id_raw' : value.get('Manufacturer'),
'brand_raw' : processor_brand,
'hz_advertised_friendly' : _hz_short_to_friendly(hz_advertised, scale_advertised),
'hz_actual_friendly' : _hz_short_to_friendly(hz_actual, scale_actual),
'hz_advertised' : _hz_short_to_full(hz_advertised, scale_advertised),
'hz_actual' : _hz_short_to_full(hz_actual, scale_actual),
'l2_cache_size' : l2_cache_size,
'l3_cache_size' : l3_cache_size,
'stepping' : stepping,
'model' : model,
'family' : family,
}
info = {k: v for k, v in info.items() if v}
return info
except:
#raise # NOTE: To have this throw on error, uncomment this line
return {}
def _get_cpu_info_from_kstat():
'''
Returns the CPU info gathered from isainfo and kstat.
Returns {} if isainfo or kstat are not found.
'''
try:
# Just return {} if there is no isainfo or kstat
if not DataSource.has_isainfo() or not DataSource.has_kstat():
return {}
# If isainfo fails return {}
returncode, flag_output = DataSource.isainfo_vb()
if flag_output == None or returncode != 0:
return {}
# If kstat fails return {}
returncode, kstat = DataSource.kstat_m_cpu_info()
if kstat == None or returncode != 0:
return {}
# Various fields
vendor_id = kstat.split('\tvendor_id ')[1].split('\n')[0].strip()
processor_brand = kstat.split('\tbrand ')[1].split('\n')[0].strip()
stepping = int(kstat.split('\tstepping ')[1].split('\n')[0].strip())
model = int(kstat.split('\tmodel ')[1].split('\n')[0].strip())
family = int(kstat.split('\tfamily ')[1].split('\n')[0].strip())
# Flags
flags = flag_output.strip().split('\n')[-1].strip().lower().split()
flags.sort()
# Convert from GHz/MHz string to Hz
scale = 6
hz_advertised = kstat.split('\tclock_MHz ')[1].split('\n')[0].strip()
hz_advertised = _to_decimal_string(hz_advertised)
# Convert from GHz/MHz string to Hz
hz_actual = kstat.split('\tcurrent_clock_Hz ')[1].split('\n')[0].strip()
hz_actual = _to_decimal_string(hz_actual)
info = {
'vendor_id_raw' : vendor_id,
'brand_raw' : processor_brand,
'hz_advertised_friendly' : _hz_short_to_friendly(hz_advertised, scale),
'hz_actual_friendly' : _hz_short_to_friendly(hz_actual, 0),
'hz_advertised' : _hz_short_to_full(hz_advertised, scale),
'hz_actual' : _hz_short_to_full(hz_actual, 0),
'stepping' : stepping,
'model' : model,
'family' : family,
'flags' : flags
}
info = {k: v for k, v in info.items() if v}
return info
except:
return {}
def _get_cpu_info_from_platform_uname():
try:
uname = DataSource.uname_string_raw.split(',')[0]
family, model, stepping = (None, None, None)
entries = uname.split(' ')
if 'Family' in entries and entries.index('Family') < len(entries)-1:
i = entries.index('Family')
family = int(entries[i + 1])
if 'Model' in entries and entries.index('Model') < len(entries)-1:
i = entries.index('Model')
model = int(entries[i + 1])
if 'Stepping' in entries and entries.index('Stepping') < len(entries)-1:
i = entries.index('Stepping')
stepping = int(entries[i + 1])
info = {
'family' : family,
'model' : model,
'stepping' : stepping
}
info = {k: v for k, v in info.items() if v}
return info
except:
return {}
def _get_cpu_info_internal():
'''
Returns the CPU info by using the best sources of information for your OS.
Returns {} if nothing is found.
'''
# Get the CPU arch and bits
arch, bits = _parse_arch(DataSource.arch_string_raw)
friendly_maxsize = { 2**31-1: '32 bit', 2**63-1: '64 bit' }.get(sys.maxsize) or 'unknown bits'
friendly_version = "{0}.{1}.{2}.{3}.{4}".format(*sys.version_info)
PYTHON_VERSION = "{0} ({1})".format(friendly_version, friendly_maxsize)
info = {
'python_version' : PYTHON_VERSION,
'cpuinfo_version' : CPUINFO_VERSION,
'cpuinfo_version_string' : CPUINFO_VERSION_STRING,
'arch' : arch,
'bits' : bits,
'count' : DataSource.cpu_count,
'arch_string_raw' : DataSource.arch_string_raw,
}
# Try the Windows wmic
_copy_new_fields(info, _get_cpu_info_from_wmic())
# Try the Windows registry
_copy_new_fields(info, _get_cpu_info_from_registry())
# Try /proc/cpuinfo
_copy_new_fields(info, _get_cpu_info_from_proc_cpuinfo())
# Try cpufreq-info
_copy_new_fields(info, _get_cpu_info_from_cpufreq_info())
# Try LSCPU
_copy_new_fields(info, _get_cpu_info_from_lscpu())
# Try sysctl
_copy_new_fields(info, _get_cpu_info_from_sysctl())
# Try kstat
_copy_new_fields(info, _get_cpu_info_from_kstat())
# Try dmesg
_copy_new_fields(info, _get_cpu_info_from_dmesg())
# Try /var/run/dmesg.boot
_copy_new_fields(info, _get_cpu_info_from_cat_var_run_dmesg_boot())
# Try lsprop ibm,pa-features
_copy_new_fields(info, _get_cpu_info_from_ibm_pa_features())
# Try sysinfo
_copy_new_fields(info, _get_cpu_info_from_sysinfo())
# Try querying the CPU cpuid register
_copy_new_fields(info, _get_cpu_info_from_cpuid())
# Try platform.uname
_copy_new_fields(info, _get_cpu_info_from_platform_uname())
return info
def get_cpu_info_json():
'''
Returns the CPU info by using the best sources of information for your OS.
Returns the result in a json string
'''
import json
output = None
# If running under pyinstaller, run normally
if getattr(sys, 'frozen', False):
info = _get_cpu_info_internal()
output = json.dumps(info)
output = "{0}".format(output)
# if not running under pyinstaller, run in another process.
# This is done because multiprocesing has a design flaw that
# causes non main programs to run multiple times on Windows.
else:
from subprocess import Popen, PIPE
command = [sys.executable, __file__, '--json']
p1 = Popen(command, stdout=PIPE, stderr=PIPE, stdin=PIPE)
output = p1.communicate()[0]
if p1.returncode != 0:
return "{}"
if not IS_PY2:
output = output.decode(encoding='UTF-8')
return output
def get_cpu_info():
'''
Returns the CPU info by using the best sources of information for your OS.
Returns the result in a dict
'''
import json
output = get_cpu_info_json()
# Convert JSON to Python with non unicode strings
output = json.loads(output, object_hook = _utf_to_str)
return output
def main():
from argparse import ArgumentParser
import json
# Parse args
parser = ArgumentParser(description='Gets CPU info with pure Python 2 & 3')
parser.add_argument('--json', action='store_true', help='Return the info in JSON format')
parser.add_argument('--version', action='store_true', help='Return the version of py-cpuinfo')
args = parser.parse_args()
try:
_check_arch()
except Exception as err:
sys.stderr.write(str(err) + "\n")
sys.exit(1)
info = _get_cpu_info_internal()
if not info:
sys.stderr.write("Failed to find cpu info\n")
sys.exit(1)
if args.json:
print(json.dumps(info))
elif args.version:
print(CPUINFO_VERSION_STRING)
else:
print('Python Version: {0}'.format(info.get('python_version', '')))
print('Cpuinfo Version: {0}'.format(info.get('cpuinfo_version_string', '')))
print('Vendor ID Raw: {0}'.format(info.get('vendor_id_raw', '')))
print('Hardware Raw: {0}'.format(info.get('hardware_raw', '')))
print('Brand Raw: {0}'.format(info.get('brand_raw', '')))
print('Hz Advertised Friendly: {0}'.format(info.get('hz_advertised_friendly', '')))
print('Hz Actual Friendly: {0}'.format(info.get('hz_actual_friendly', '')))
print('Hz Advertised: {0}'.format(info.get('hz_advertised', '')))
print('Hz Actual: {0}'.format(info.get('hz_actual', '')))
print('Arch: {0}'.format(info.get('arch', '')))
print('Bits: {0}'.format(info.get('bits', '')))
print('Count: {0}'.format(info.get('count', '')))
print('Arch String Raw: {0}'.format(info.get('arch_string_raw', '')))
print('L1 Data Cache Size: {0}'.format(info.get('l1_data_cache_size', '')))
print('L1 Instruction Cache Size: {0}'.format(info.get('l1_instruction_cache_size', '')))
print('L2 Cache Size: {0}'.format(info.get('l2_cache_size', '')))
print('L2 Cache Line Size: {0}'.format(info.get('l2_cache_line_size', '')))
print('L2 Cache Associativity: {0}'.format(info.get('l2_cache_associativity', '')))
print('L3 Cache Size: {0}'.format(info.get('l3_cache_size', '')))
print('Stepping: {0}'.format(info.get('stepping', '')))
print('Model: {0}'.format(info.get('model', '')))
print('Family: {0}'.format(info.get('family', '')))
print('Processor Type: {0}'.format(info.get('processor_type', '')))
print('Extended Model: {0}'.format(info.get('extended_model', '')))
print('Extended Family: {0}'.format(info.get('extended_family', '')))
print('Flags: {0}'.format(', '.join(info.get('flags', ''))))
if __name__ == '__main__':
main()
else:
_check_arch()
|
workhorsy/py-cpuinfo
|
cpuinfo/cpuinfo.py
|
_get_cpu_info_from_kstat
|
python
|
def _get_cpu_info_from_kstat():
'''
Returns the CPU info gathered from isainfo and kstat.
Returns {} if isainfo or kstat are not found.
'''
try:
# Just return {} if there is no isainfo or kstat
if not DataSource.has_isainfo() or not DataSource.has_kstat():
return {}
# If isainfo fails return {}
returncode, flag_output = DataSource.isainfo_vb()
if flag_output == None or returncode != 0:
return {}
# If kstat fails return {}
returncode, kstat = DataSource.kstat_m_cpu_info()
if kstat == None or returncode != 0:
return {}
# Various fields
vendor_id = kstat.split('\tvendor_id ')[1].split('\n')[0].strip()
processor_brand = kstat.split('\tbrand ')[1].split('\n')[0].strip()
stepping = int(kstat.split('\tstepping ')[1].split('\n')[0].strip())
model = int(kstat.split('\tmodel ')[1].split('\n')[0].strip())
family = int(kstat.split('\tfamily ')[1].split('\n')[0].strip())
# Flags
flags = flag_output.strip().split('\n')[-1].strip().lower().split()
flags.sort()
# Convert from GHz/MHz string to Hz
scale = 6
hz_advertised = kstat.split('\tclock_MHz ')[1].split('\n')[0].strip()
hz_advertised = _to_decimal_string(hz_advertised)
# Convert from GHz/MHz string to Hz
hz_actual = kstat.split('\tcurrent_clock_Hz ')[1].split('\n')[0].strip()
hz_actual = _to_decimal_string(hz_actual)
info = {
'vendor_id_raw' : vendor_id,
'brand_raw' : processor_brand,
'hz_advertised_friendly' : _hz_short_to_friendly(hz_advertised, scale),
'hz_actual_friendly' : _hz_short_to_friendly(hz_actual, 0),
'hz_advertised' : _hz_short_to_full(hz_advertised, scale),
'hz_actual' : _hz_short_to_full(hz_actual, 0),
'stepping' : stepping,
'model' : model,
'family' : family,
'flags' : flags
}
info = {k: v for k, v in info.items() if v}
return info
except:
return {}
|
Returns the CPU info gathered from isainfo and kstat.
Returns {} if isainfo or kstat are not found.
|
train
|
https://github.com/workhorsy/py-cpuinfo/blob/c15afb770c1139bf76215852e17eb4f677ca3d2f/cpuinfo/cpuinfo.py#L2122-L2180
|
[
"def has_isainfo():\n\treturn len(_program_paths('isainfo')) > 0\n"
] |
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
# Copyright (c) 2014-2019, Matthew Brennan Jones <matthew.brennan.jones@gmail.com>
# Py-cpuinfo gets CPU info with pure Python 2 & 3
# It uses the MIT License
# It is hosted at: https://github.com/workhorsy/py-cpuinfo
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
CPUINFO_VERSION = (5, 0, 0)
CPUINFO_VERSION_STRING = '.'.join([str(n) for n in CPUINFO_VERSION])
import os, sys
import platform
import multiprocessing
import ctypes
try:
import _winreg as winreg
except ImportError as err:
try:
import winreg
except ImportError as err:
pass
IS_PY2 = sys.version_info[0] == 2
class DataSource(object):
bits = platform.architecture()[0]
cpu_count = multiprocessing.cpu_count()
is_windows = platform.system().lower() == 'windows'
arch_string_raw = platform.machine()
uname_string_raw = platform.uname()[5]
can_cpuid = True
@staticmethod
def has_proc_cpuinfo():
return os.path.exists('/proc/cpuinfo')
@staticmethod
def has_dmesg():
return len(_program_paths('dmesg')) > 0
@staticmethod
def has_var_run_dmesg_boot():
uname = platform.system().strip().strip('"').strip("'").strip().lower()
return 'linux' in uname and os.path.exists('/var/run/dmesg.boot')
@staticmethod
def has_cpufreq_info():
return len(_program_paths('cpufreq-info')) > 0
@staticmethod
def has_sestatus():
return len(_program_paths('sestatus')) > 0
@staticmethod
def has_sysctl():
return len(_program_paths('sysctl')) > 0
@staticmethod
def has_isainfo():
return len(_program_paths('isainfo')) > 0
@staticmethod
def has_kstat():
return len(_program_paths('kstat')) > 0
@staticmethod
def has_sysinfo():
return len(_program_paths('sysinfo')) > 0
@staticmethod
def has_lscpu():
return len(_program_paths('lscpu')) > 0
@staticmethod
def has_ibm_pa_features():
return len(_program_paths('lsprop')) > 0
@staticmethod
def has_wmic():
returncode, output = _run_and_get_stdout(['wmic', 'os', 'get', 'Version'])
return returncode == 0 and len(output) > 0
@staticmethod
def cat_proc_cpuinfo():
return _run_and_get_stdout(['cat', '/proc/cpuinfo'])
@staticmethod
def cpufreq_info():
return _run_and_get_stdout(['cpufreq-info'])
@staticmethod
def sestatus_b():
return _run_and_get_stdout(['sestatus', '-b'])
@staticmethod
def dmesg_a():
return _run_and_get_stdout(['dmesg', '-a'])
@staticmethod
def cat_var_run_dmesg_boot():
return _run_and_get_stdout(['cat', '/var/run/dmesg.boot'])
@staticmethod
def sysctl_machdep_cpu_hw_cpufrequency():
return _run_and_get_stdout(['sysctl', 'machdep.cpu', 'hw.cpufrequency'])
@staticmethod
def isainfo_vb():
return _run_and_get_stdout(['isainfo', '-vb'])
@staticmethod
def kstat_m_cpu_info():
return _run_and_get_stdout(['kstat', '-m', 'cpu_info'])
@staticmethod
def sysinfo_cpu():
return _run_and_get_stdout(['sysinfo', '-cpu'])
@staticmethod
def lscpu():
return _run_and_get_stdout(['lscpu'])
@staticmethod
def ibm_pa_features():
import glob
ibm_features = glob.glob('/proc/device-tree/cpus/*/ibm,pa-features')
if ibm_features:
return _run_and_get_stdout(['lsprop', ibm_features[0]])
@staticmethod
def wmic_cpu():
return _run_and_get_stdout(['wmic', 'cpu', 'get', 'Name,CurrentClockSpeed,L2CacheSize,L3CacheSize,Description,Caption,Manufacturer', '/format:list'])
@staticmethod
def winreg_processor_brand():
key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, r"Hardware\Description\System\CentralProcessor\0")
processor_brand = winreg.QueryValueEx(key, "ProcessorNameString")[0]
winreg.CloseKey(key)
return processor_brand.strip()
@staticmethod
def winreg_vendor_id_raw():
key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, r"Hardware\Description\System\CentralProcessor\0")
vendor_id_raw = winreg.QueryValueEx(key, "VendorIdentifier")[0]
winreg.CloseKey(key)
return vendor_id_raw
@staticmethod
def winreg_arch_string_raw():
key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, r"SYSTEM\CurrentControlSet\Control\Session Manager\Environment")
arch_string_raw = winreg.QueryValueEx(key, "PROCESSOR_ARCHITECTURE")[0]
winreg.CloseKey(key)
return arch_string_raw
@staticmethod
def winreg_hz_actual():
key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, r"Hardware\Description\System\CentralProcessor\0")
hz_actual = winreg.QueryValueEx(key, "~Mhz")[0]
winreg.CloseKey(key)
hz_actual = _to_decimal_string(hz_actual)
return hz_actual
@staticmethod
def winreg_feature_bits():
key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, r"Hardware\Description\System\CentralProcessor\0")
feature_bits = winreg.QueryValueEx(key, "FeatureSet")[0]
winreg.CloseKey(key)
return feature_bits
def _program_paths(program_name):
paths = []
exts = filter(None, os.environ.get('PATHEXT', '').split(os.pathsep))
path = os.environ['PATH']
for p in os.environ['PATH'].split(os.pathsep):
p = os.path.join(p, program_name)
if os.access(p, os.X_OK):
paths.append(p)
for e in exts:
pext = p + e
if os.access(pext, os.X_OK):
paths.append(pext)
return paths
def _run_and_get_stdout(command, pipe_command=None):
from subprocess import Popen, PIPE
if not pipe_command:
p1 = Popen(command, stdout=PIPE, stderr=PIPE, stdin=PIPE)
output = p1.communicate()[0]
if not IS_PY2:
output = output.decode(encoding='UTF-8')
return p1.returncode, output
else:
p1 = Popen(command, stdout=PIPE, stderr=PIPE, stdin=PIPE)
p2 = Popen(pipe_command, stdin=p1.stdout, stdout=PIPE, stderr=PIPE)
p1.stdout.close()
output = p2.communicate()[0]
if not IS_PY2:
output = output.decode(encoding='UTF-8')
return p2.returncode, output
# Make sure we are running on a supported system
def _check_arch():
arch, bits = _parse_arch(DataSource.arch_string_raw)
if not arch in ['X86_32', 'X86_64', 'ARM_7', 'ARM_8', 'PPC_64']:
raise Exception("py-cpuinfo currently only works on X86 and some PPC and ARM CPUs.")
def _obj_to_b64(thing):
import pickle
import base64
a = thing
b = pickle.dumps(a)
c = base64.b64encode(b)
d = c.decode('utf8')
return d
def _b64_to_obj(thing):
import pickle
import base64
try:
a = base64.b64decode(thing)
b = pickle.loads(a)
return b
except:
return {}
def _utf_to_str(input):
if IS_PY2 and isinstance(input, unicode):
return input.encode('utf-8')
elif isinstance(input, list):
return [_utf_to_str(element) for element in input]
elif isinstance(input, dict):
return {_utf_to_str(key): _utf_to_str(value)
for key, value in input.items()}
else:
return input
def _copy_new_fields(info, new_info):
keys = [
'vendor_id_raw', 'hardware_raw', 'brand_raw', 'hz_advertised_friendly', 'hz_actual_friendly',
'hz_advertised', 'hz_actual', 'arch', 'bits', 'count',
'arch_string_raw', 'uname_string_raw',
'l2_cache_size', 'l2_cache_line_size', 'l2_cache_associativity',
'stepping', 'model', 'family',
'processor_type', 'extended_model', 'extended_family', 'flags',
'l3_cache_size', 'l1_data_cache_size', 'l1_instruction_cache_size'
]
for key in keys:
if new_info.get(key, None) and not info.get(key, None):
info[key] = new_info[key]
elif key == 'flags' and new_info.get('flags'):
for f in new_info['flags']:
if f not in info['flags']: info['flags'].append(f)
info['flags'].sort()
def _get_field_actual(cant_be_number, raw_string, field_names):
for line in raw_string.splitlines():
for field_name in field_names:
field_name = field_name.lower()
if ':' in line:
left, right = line.split(':', 1)
left = left.strip().lower()
right = right.strip()
if left == field_name and len(right) > 0:
if cant_be_number:
if not right.isdigit():
return right
else:
return right
return None
def _get_field(cant_be_number, raw_string, convert_to, default_value, *field_names):
retval = _get_field_actual(cant_be_number, raw_string, field_names)
# Convert the return value
if retval and convert_to:
try:
retval = convert_to(retval)
except:
retval = default_value
# Return the default if there is no return value
if retval is None:
retval = default_value
return retval
def _to_decimal_string(ticks):
try:
# Convert to string
ticks = '{0}'.format(ticks)
# Strip off non numbers and decimal places
ticks = "".join(n for n in ticks if n.isdigit() or n=='.').strip()
if ticks == '':
ticks = '0'
# Add decimal if missing
if '.' not in ticks:
ticks = '{0}.0'.format(ticks)
# Remove trailing zeros
ticks = ticks.rstrip('0')
# Add one trailing zero for empty right side
if ticks.endswith('.'):
ticks = '{0}0'.format(ticks)
# Make sure the number can be converted to a float
ticks = float(ticks)
ticks = '{0}'.format(ticks)
return ticks
except:
return '0.0'
def _hz_short_to_full(ticks, scale):
try:
# Make sure the number can be converted to a float
ticks = float(ticks)
ticks = '{0}'.format(ticks)
# Scale the numbers
hz = ticks.lstrip('0')
old_index = hz.index('.')
hz = hz.replace('.', '')
hz = hz.ljust(scale + old_index+1, '0')
new_index = old_index + scale
hz = '{0}.{1}'.format(hz[:new_index], hz[new_index:])
left, right = hz.split('.')
left, right = int(left), int(right)
return (left, right)
except:
return (0, 0)
def _hz_friendly_to_full(hz_string):
try:
hz_string = hz_string.strip().lower()
hz, scale = (None, None)
if hz_string.endswith('ghz'):
scale = 9
elif hz_string.endswith('mhz'):
scale = 6
elif hz_string.endswith('hz'):
scale = 0
hz = "".join(n for n in hz_string if n.isdigit() or n=='.').strip()
if not '.' in hz:
hz += '.0'
hz, scale = _hz_short_to_full(hz, scale)
return (hz, scale)
except:
return (0, 0)
def _hz_short_to_friendly(ticks, scale):
try:
# Get the raw Hz as a string
left, right = _hz_short_to_full(ticks, scale)
result = '{0}.{1}'.format(left, right)
# Get the location of the dot, and remove said dot
dot_index = result.index('.')
result = result.replace('.', '')
# Get the Hz symbol and scale
symbol = "Hz"
scale = 0
if dot_index > 9:
symbol = "GHz"
scale = 9
elif dot_index > 6:
symbol = "MHz"
scale = 6
elif dot_index > 3:
symbol = "KHz"
scale = 3
# Get the Hz with the dot at the new scaled point
result = '{0}.{1}'.format(result[:-scale-1], result[-scale-1:])
# Format the ticks to have 4 numbers after the decimal
# and remove any superfluous zeroes.
result = '{0:.4f} {1}'.format(float(result), symbol)
result = result.rstrip('0')
return result
except:
return '0.0000 Hz'
def _to_friendly_bytes(input):
import re
if not input:
return input
input = "{0}".format(input)
formats = {
r"^[0-9]+B$" : 'B',
r"^[0-9]+K$" : 'KB',
r"^[0-9]+M$" : 'MB',
r"^[0-9]+G$" : 'GB'
}
for pattern, friendly_size in formats.items():
if re.match(pattern, input):
return "{0} {1}".format(input[ : -1].strip(), friendly_size)
return input
def _parse_cpu_brand_string(cpu_string):
# Just return 0 if the processor brand does not have the Hz
if not 'hz' in cpu_string.lower():
return ('0.0', 0)
hz = cpu_string.lower()
scale = 0
if hz.endswith('mhz'):
scale = 6
elif hz.endswith('ghz'):
scale = 9
if '@' in hz:
hz = hz.split('@')[1]
else:
hz = hz.rsplit(None, 1)[1]
hz = hz.rstrip('mhz').rstrip('ghz').strip()
hz = _to_decimal_string(hz)
return (hz, scale)
def _parse_cpu_brand_string_dx(cpu_string):
import re
# Find all the strings inside brackets ()
starts = [m.start() for m in re.finditer('\(', cpu_string)]
ends = [m.start() for m in re.finditer('\)', cpu_string)]
insides = {k: v for k, v in zip(starts, ends)}
insides = [cpu_string[start+1 : end] for start, end in insides.items()]
# Find all the fields
vendor_id, stepping, model, family = (None, None, None, None)
for inside in insides:
for pair in inside.split(','):
pair = [n.strip() for n in pair.split(':')]
if len(pair) > 1:
name, value = pair[0], pair[1]
if name == 'origin':
vendor_id = value.strip('"')
elif name == 'stepping':
stepping = int(value.lstrip('0x'), 16)
elif name == 'model':
model = int(value.lstrip('0x'), 16)
elif name in ['fam', 'family']:
family = int(value.lstrip('0x'), 16)
# Find the Processor Brand
# Strip off extra strings in brackets at end
brand = cpu_string.strip()
is_working = True
while is_working:
is_working = False
for inside in insides:
full = "({0})".format(inside)
if brand.endswith(full):
brand = brand[ :-len(full)].strip()
is_working = True
# Find the Hz in the brand string
hz_brand, scale = _parse_cpu_brand_string(brand)
# Find Hz inside brackets () after the brand string
if hz_brand == '0.0':
for inside in insides:
hz = inside
for entry in ['GHz', 'MHz', 'Hz']:
if entry in hz:
hz = "CPU @ " + hz[ : hz.find(entry) + len(entry)]
hz_brand, scale = _parse_cpu_brand_string(hz)
break
return (hz_brand, scale, brand, vendor_id, stepping, model, family)
def _parse_dmesg_output(output):
try:
# Get all the dmesg lines that might contain a CPU string
lines = output.split(' CPU0:')[1:] + \
output.split(' CPU1:')[1:] + \
output.split(' CPU:')[1:] + \
output.split('\nCPU0:')[1:] + \
output.split('\nCPU1:')[1:] + \
output.split('\nCPU:')[1:]
lines = [l.split('\n')[0].strip() for l in lines]
# Convert the lines to CPU strings
cpu_strings = [_parse_cpu_brand_string_dx(l) for l in lines]
# Find the CPU string that has the most fields
best_string = None
highest_count = 0
for cpu_string in cpu_strings:
count = sum([n is not None for n in cpu_string])
if count > highest_count:
highest_count = count
best_string = cpu_string
# If no CPU string was found, return {}
if not best_string:
return {}
hz_actual, scale, processor_brand, vendor_id, stepping, model, family = best_string
# Origin
if ' Origin=' in output:
fields = output[output.find(' Origin=') : ].split('\n')[0]
fields = fields.strip().split()
fields = [n.strip().split('=') for n in fields]
fields = [{n[0].strip().lower() : n[1].strip()} for n in fields]
for field in fields:
name = list(field.keys())[0]
value = list(field.values())[0]
if name == 'origin':
vendor_id = value.strip('"')
elif name == 'stepping':
stepping = int(value.lstrip('0x'), 16)
elif name == 'model':
model = int(value.lstrip('0x'), 16)
elif name in ['fam', 'family']:
family = int(value.lstrip('0x'), 16)
# Features
flag_lines = []
for category in [' Features=', ' Features2=', ' AMD Features=', ' AMD Features2=']:
if category in output:
flag_lines.append(output.split(category)[1].split('\n')[0])
flags = []
for line in flag_lines:
line = line.split('<')[1].split('>')[0].lower()
for flag in line.split(','):
flags.append(flag)
flags.sort()
# Convert from GHz/MHz string to Hz
hz_advertised, scale = _parse_cpu_brand_string(processor_brand)
# If advertised hz not found, use the actual hz
if hz_advertised == '0.0':
scale = 6
hz_advertised = _to_decimal_string(hz_actual)
info = {
'vendor_id_raw' : vendor_id,
'brand_raw' : processor_brand,
'stepping' : stepping,
'model' : model,
'family' : family,
'flags' : flags
}
if hz_advertised and hz_advertised != '0.0':
info['hz_advertised_friendly'] = _hz_short_to_friendly(hz_advertised, scale)
info['hz_actual_friendly'] = _hz_short_to_friendly(hz_actual, scale)
if hz_advertised and hz_advertised != '0.0':
info['hz_advertised'] = _hz_short_to_full(hz_advertised, scale)
info['hz_actual'] = _hz_short_to_full(hz_actual, scale)
return {k: v for k, v in info.items() if v}
except:
#raise
pass
return {}
def _parse_arch(arch_string_raw):
import re
arch, bits = None, None
arch_string_raw = arch_string_raw.lower()
# X86
if re.match('^i\d86$|^x86$|^x86_32$|^i86pc$|^ia32$|^ia-32$|^bepc$', arch_string_raw):
arch = 'X86_32'
bits = 32
elif re.match('^x64$|^x86_64$|^x86_64t$|^i686-64$|^amd64$|^ia64$|^ia-64$', arch_string_raw):
arch = 'X86_64'
bits = 64
# ARM
elif re.match('^armv8-a|aarch64$', arch_string_raw):
arch = 'ARM_8'
bits = 64
elif re.match('^armv7$|^armv7[a-z]$|^armv7-[a-z]$|^armv6[a-z]$', arch_string_raw):
arch = 'ARM_7'
bits = 32
elif re.match('^armv8$|^armv8[a-z]$|^armv8-[a-z]$', arch_string_raw):
arch = 'ARM_8'
bits = 32
# PPC
elif re.match('^ppc32$|^prep$|^pmac$|^powermac$', arch_string_raw):
arch = 'PPC_32'
bits = 32
elif re.match('^powerpc$|^ppc64$|^ppc64le$', arch_string_raw):
arch = 'PPC_64'
bits = 64
# SPARC
elif re.match('^sparc32$|^sparc$', arch_string_raw):
arch = 'SPARC_32'
bits = 32
elif re.match('^sparc64$|^sun4u$|^sun4v$', arch_string_raw):
arch = 'SPARC_64'
bits = 64
return (arch, bits)
def _is_bit_set(reg, bit):
mask = 1 << bit
is_set = reg & mask > 0
return is_set
def _is_selinux_enforcing():
# Just return if the SE Linux Status Tool is not installed
if not DataSource.has_sestatus():
return False
# Run the sestatus, and just return if it failed to run
returncode, output = DataSource.sestatus_b()
if returncode != 0:
return False
# Figure out if explicitly in enforcing mode
for line in output.splitlines():
line = line.strip().lower()
if line.startswith("current mode:"):
if line.endswith("enforcing"):
return True
else:
return False
# Figure out if we can execute heap and execute memory
can_selinux_exec_heap = False
can_selinux_exec_memory = False
for line in output.splitlines():
line = line.strip().lower()
if line.startswith("allow_execheap") and line.endswith("on"):
can_selinux_exec_heap = True
elif line.startswith("allow_execmem") and line.endswith("on"):
can_selinux_exec_memory = True
return (not can_selinux_exec_heap or not can_selinux_exec_memory)
class CPUID(object):
def __init__(self):
self.prochandle = None
# Figure out if SE Linux is on and in enforcing mode
self.is_selinux_enforcing = _is_selinux_enforcing()
def _asm_func(self, restype=None, argtypes=(), byte_code=[]):
byte_code = bytes.join(b'', byte_code)
address = None
if DataSource.is_windows:
# Allocate a memory segment the size of the byte code, and make it executable
size = len(byte_code)
# Alloc at least 1 page to ensure we own all pages that we want to change protection on
if size < 0x1000: size = 0x1000
MEM_COMMIT = ctypes.c_ulong(0x1000)
PAGE_READWRITE = ctypes.c_ulong(0x4)
pfnVirtualAlloc = ctypes.windll.kernel32.VirtualAlloc
pfnVirtualAlloc.restype = ctypes.c_void_p
address = pfnVirtualAlloc(None, ctypes.c_size_t(size), MEM_COMMIT, PAGE_READWRITE)
if not address:
raise Exception("Failed to VirtualAlloc")
# Copy the byte code into the memory segment
memmove = ctypes.CFUNCTYPE(ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_size_t)(ctypes._memmove_addr)
if memmove(address, byte_code, size) < 0:
raise Exception("Failed to memmove")
# Enable execute permissions
PAGE_EXECUTE = ctypes.c_ulong(0x10)
old_protect = ctypes.c_ulong(0)
pfnVirtualProtect = ctypes.windll.kernel32.VirtualProtect
res = pfnVirtualProtect(ctypes.c_void_p(address), ctypes.c_size_t(size), PAGE_EXECUTE, ctypes.byref(old_protect))
if not res:
raise Exception("Failed VirtualProtect")
# Flush Instruction Cache
# First, get process Handle
if not self.prochandle:
pfnGetCurrentProcess = ctypes.windll.kernel32.GetCurrentProcess
pfnGetCurrentProcess.restype = ctypes.c_void_p
self.prochandle = ctypes.c_void_p(pfnGetCurrentProcess())
# Actually flush cache
res = ctypes.windll.kernel32.FlushInstructionCache(self.prochandle, ctypes.c_void_p(address), ctypes.c_size_t(size))
if not res:
raise Exception("Failed FlushInstructionCache")
else:
# Allocate a memory segment the size of the byte code
size = len(byte_code)
pfnvalloc = ctypes.pythonapi.valloc
pfnvalloc.restype = ctypes.c_void_p
address = pfnvalloc(ctypes.c_size_t(size))
if not address:
raise Exception("Failed to valloc")
# Mark the memory segment as writeable only
if not self.is_selinux_enforcing:
WRITE = 0x2
if ctypes.pythonapi.mprotect(ctypes.c_void_p(address), size, WRITE) < 0:
raise Exception("Failed to mprotect")
# Copy the byte code into the memory segment
if ctypes.pythonapi.memmove(ctypes.c_void_p(address), byte_code, ctypes.c_size_t(size)) < 0:
raise Exception("Failed to memmove")
# Mark the memory segment as writeable and executable only
if not self.is_selinux_enforcing:
WRITE_EXECUTE = 0x2 | 0x4
if ctypes.pythonapi.mprotect(ctypes.c_void_p(address), size, WRITE_EXECUTE) < 0:
raise Exception("Failed to mprotect")
# Cast the memory segment into a function
functype = ctypes.CFUNCTYPE(restype, *argtypes)
fun = functype(address)
return fun, address
def _run_asm(self, *byte_code):
# Convert the byte code into a function that returns an int
restype = ctypes.c_uint32
argtypes = ()
func, address = self._asm_func(restype, argtypes, byte_code)
# Call the byte code like a function
retval = func()
byte_code = bytes.join(b'', byte_code)
size = ctypes.c_size_t(len(byte_code))
# Free the function memory segment
if DataSource.is_windows:
MEM_RELEASE = ctypes.c_ulong(0x8000)
ctypes.windll.kernel32.VirtualFree(ctypes.c_void_p(address), ctypes.c_size_t(0), MEM_RELEASE)
else:
# Remove the executable tag on the memory
READ_WRITE = 0x1 | 0x2
if ctypes.pythonapi.mprotect(ctypes.c_void_p(address), size, READ_WRITE) < 0:
raise Exception("Failed to mprotect")
ctypes.pythonapi.free(ctypes.c_void_p(address))
return retval
# FIXME: We should not have to use different instructions to
# set eax to 0 or 1, on 32bit and 64bit machines.
def _zero_eax(self):
return (
b"\x31\xC0" # xor eax,eax
)
def _zero_ecx(self):
return (
b"\x31\xC9" # xor ecx,ecx
)
def _one_eax(self):
return (
b"\xB8\x01\x00\x00\x00" # mov eax,0x1"
)
# http://en.wikipedia.org/wiki/CPUID#EAX.3D0:_Get_vendor_ID
def get_vendor_id(self):
# EBX
ebx = self._run_asm(
self._zero_eax(),
b"\x0F\xA2" # cpuid
b"\x89\xD8" # mov ax,bx
b"\xC3" # ret
)
# ECX
ecx = self._run_asm(
self._zero_eax(),
b"\x0f\xa2" # cpuid
b"\x89\xC8" # mov ax,cx
b"\xC3" # ret
)
# EDX
edx = self._run_asm(
self._zero_eax(),
b"\x0f\xa2" # cpuid
b"\x89\xD0" # mov ax,dx
b"\xC3" # ret
)
# Each 4bits is a ascii letter in the name
vendor_id = []
for reg in [ebx, edx, ecx]:
for n in [0, 8, 16, 24]:
vendor_id.append(chr((reg >> n) & 0xFF))
vendor_id = ''.join(vendor_id)
return vendor_id
# http://en.wikipedia.org/wiki/CPUID#EAX.3D1:_Processor_Info_and_Feature_Bits
def get_info(self):
# EAX
eax = self._run_asm(
self._one_eax(),
b"\x0f\xa2" # cpuid
b"\xC3" # ret
)
# Get the CPU info
stepping = (eax >> 0) & 0xF # 4 bits
model = (eax >> 4) & 0xF # 4 bits
family = (eax >> 8) & 0xF # 4 bits
processor_type = (eax >> 12) & 0x3 # 2 bits
extended_model = (eax >> 16) & 0xF # 4 bits
extended_family = (eax >> 20) & 0xFF # 8 bits
return {
'stepping' : stepping,
'model' : model,
'family' : family,
'processor_type' : processor_type,
'extended_model' : extended_model,
'extended_family' : extended_family
}
# http://en.wikipedia.org/wiki/CPUID#EAX.3D80000000h:_Get_Highest_Extended_Function_Supported
def get_max_extension_support(self):
# Check for extension support
max_extension_support = self._run_asm(
b"\xB8\x00\x00\x00\x80" # mov ax,0x80000000
b"\x0f\xa2" # cpuid
b"\xC3" # ret
)
return max_extension_support
# http://en.wikipedia.org/wiki/CPUID#EAX.3D1:_Processor_Info_and_Feature_Bits
def get_flags(self, max_extension_support):
# EDX
edx = self._run_asm(
self._one_eax(),
b"\x0f\xa2" # cpuid
b"\x89\xD0" # mov ax,dx
b"\xC3" # ret
)
# ECX
ecx = self._run_asm(
self._one_eax(),
b"\x0f\xa2" # cpuid
b"\x89\xC8" # mov ax,cx
b"\xC3" # ret
)
# Get the CPU flags
flags = {
'fpu' : _is_bit_set(edx, 0),
'vme' : _is_bit_set(edx, 1),
'de' : _is_bit_set(edx, 2),
'pse' : _is_bit_set(edx, 3),
'tsc' : _is_bit_set(edx, 4),
'msr' : _is_bit_set(edx, 5),
'pae' : _is_bit_set(edx, 6),
'mce' : _is_bit_set(edx, 7),
'cx8' : _is_bit_set(edx, 8),
'apic' : _is_bit_set(edx, 9),
#'reserved1' : _is_bit_set(edx, 10),
'sep' : _is_bit_set(edx, 11),
'mtrr' : _is_bit_set(edx, 12),
'pge' : _is_bit_set(edx, 13),
'mca' : _is_bit_set(edx, 14),
'cmov' : _is_bit_set(edx, 15),
'pat' : _is_bit_set(edx, 16),
'pse36' : _is_bit_set(edx, 17),
'pn' : _is_bit_set(edx, 18),
'clflush' : _is_bit_set(edx, 19),
#'reserved2' : _is_bit_set(edx, 20),
'dts' : _is_bit_set(edx, 21),
'acpi' : _is_bit_set(edx, 22),
'mmx' : _is_bit_set(edx, 23),
'fxsr' : _is_bit_set(edx, 24),
'sse' : _is_bit_set(edx, 25),
'sse2' : _is_bit_set(edx, 26),
'ss' : _is_bit_set(edx, 27),
'ht' : _is_bit_set(edx, 28),
'tm' : _is_bit_set(edx, 29),
'ia64' : _is_bit_set(edx, 30),
'pbe' : _is_bit_set(edx, 31),
'pni' : _is_bit_set(ecx, 0),
'pclmulqdq' : _is_bit_set(ecx, 1),
'dtes64' : _is_bit_set(ecx, 2),
'monitor' : _is_bit_set(ecx, 3),
'ds_cpl' : _is_bit_set(ecx, 4),
'vmx' : _is_bit_set(ecx, 5),
'smx' : _is_bit_set(ecx, 6),
'est' : _is_bit_set(ecx, 7),
'tm2' : _is_bit_set(ecx, 8),
'ssse3' : _is_bit_set(ecx, 9),
'cid' : _is_bit_set(ecx, 10),
#'reserved3' : _is_bit_set(ecx, 11),
'fma' : _is_bit_set(ecx, 12),
'cx16' : _is_bit_set(ecx, 13),
'xtpr' : _is_bit_set(ecx, 14),
'pdcm' : _is_bit_set(ecx, 15),
#'reserved4' : _is_bit_set(ecx, 16),
'pcid' : _is_bit_set(ecx, 17),
'dca' : _is_bit_set(ecx, 18),
'sse4_1' : _is_bit_set(ecx, 19),
'sse4_2' : _is_bit_set(ecx, 20),
'x2apic' : _is_bit_set(ecx, 21),
'movbe' : _is_bit_set(ecx, 22),
'popcnt' : _is_bit_set(ecx, 23),
'tscdeadline' : _is_bit_set(ecx, 24),
'aes' : _is_bit_set(ecx, 25),
'xsave' : _is_bit_set(ecx, 26),
'osxsave' : _is_bit_set(ecx, 27),
'avx' : _is_bit_set(ecx, 28),
'f16c' : _is_bit_set(ecx, 29),
'rdrnd' : _is_bit_set(ecx, 30),
'hypervisor' : _is_bit_set(ecx, 31)
}
# Get a list of only the flags that are true
flags = [k for k, v in flags.items() if v]
# http://en.wikipedia.org/wiki/CPUID#EAX.3D7.2C_ECX.3D0:_Extended_Features
if max_extension_support >= 7:
# EBX
ebx = self._run_asm(
self._zero_ecx(),
b"\xB8\x07\x00\x00\x00" # mov eax,7
b"\x0f\xa2" # cpuid
b"\x89\xD8" # mov ax,bx
b"\xC3" # ret
)
# ECX
ecx = self._run_asm(
self._zero_ecx(),
b"\xB8\x07\x00\x00\x00" # mov eax,7
b"\x0f\xa2" # cpuid
b"\x89\xC8" # mov ax,cx
b"\xC3" # ret
)
# Get the extended CPU flags
extended_flags = {
#'fsgsbase' : _is_bit_set(ebx, 0),
#'IA32_TSC_ADJUST' : _is_bit_set(ebx, 1),
'sgx' : _is_bit_set(ebx, 2),
'bmi1' : _is_bit_set(ebx, 3),
'hle' : _is_bit_set(ebx, 4),
'avx2' : _is_bit_set(ebx, 5),
#'reserved' : _is_bit_set(ebx, 6),
'smep' : _is_bit_set(ebx, 7),
'bmi2' : _is_bit_set(ebx, 8),
'erms' : _is_bit_set(ebx, 9),
'invpcid' : _is_bit_set(ebx, 10),
'rtm' : _is_bit_set(ebx, 11),
'pqm' : _is_bit_set(ebx, 12),
#'FPU CS and FPU DS deprecated' : _is_bit_set(ebx, 13),
'mpx' : _is_bit_set(ebx, 14),
'pqe' : _is_bit_set(ebx, 15),
'avx512f' : _is_bit_set(ebx, 16),
'avx512dq' : _is_bit_set(ebx, 17),
'rdseed' : _is_bit_set(ebx, 18),
'adx' : _is_bit_set(ebx, 19),
'smap' : _is_bit_set(ebx, 20),
'avx512ifma' : _is_bit_set(ebx, 21),
'pcommit' : _is_bit_set(ebx, 22),
'clflushopt' : _is_bit_set(ebx, 23),
'clwb' : _is_bit_set(ebx, 24),
'intel_pt' : _is_bit_set(ebx, 25),
'avx512pf' : _is_bit_set(ebx, 26),
'avx512er' : _is_bit_set(ebx, 27),
'avx512cd' : _is_bit_set(ebx, 28),
'sha' : _is_bit_set(ebx, 29),
'avx512bw' : _is_bit_set(ebx, 30),
'avx512vl' : _is_bit_set(ebx, 31),
'prefetchwt1' : _is_bit_set(ecx, 0),
'avx512vbmi' : _is_bit_set(ecx, 1),
'umip' : _is_bit_set(ecx, 2),
'pku' : _is_bit_set(ecx, 3),
'ospke' : _is_bit_set(ecx, 4),
#'reserved' : _is_bit_set(ecx, 5),
'avx512vbmi2' : _is_bit_set(ecx, 6),
#'reserved' : _is_bit_set(ecx, 7),
'gfni' : _is_bit_set(ecx, 8),
'vaes' : _is_bit_set(ecx, 9),
'vpclmulqdq' : _is_bit_set(ecx, 10),
'avx512vnni' : _is_bit_set(ecx, 11),
'avx512bitalg' : _is_bit_set(ecx, 12),
#'reserved' : _is_bit_set(ecx, 13),
'avx512vpopcntdq' : _is_bit_set(ecx, 14),
#'reserved' : _is_bit_set(ecx, 15),
#'reserved' : _is_bit_set(ecx, 16),
#'mpx0' : _is_bit_set(ecx, 17),
#'mpx1' : _is_bit_set(ecx, 18),
#'mpx2' : _is_bit_set(ecx, 19),
#'mpx3' : _is_bit_set(ecx, 20),
#'mpx4' : _is_bit_set(ecx, 21),
'rdpid' : _is_bit_set(ecx, 22),
#'reserved' : _is_bit_set(ecx, 23),
#'reserved' : _is_bit_set(ecx, 24),
#'reserved' : _is_bit_set(ecx, 25),
#'reserved' : _is_bit_set(ecx, 26),
#'reserved' : _is_bit_set(ecx, 27),
#'reserved' : _is_bit_set(ecx, 28),
#'reserved' : _is_bit_set(ecx, 29),
'sgx_lc' : _is_bit_set(ecx, 30),
#'reserved' : _is_bit_set(ecx, 31)
}
# Get a list of only the flags that are true
extended_flags = [k for k, v in extended_flags.items() if v]
flags += extended_flags
# http://en.wikipedia.org/wiki/CPUID#EAX.3D80000001h:_Extended_Processor_Info_and_Feature_Bits
if max_extension_support >= 0x80000001:
# EBX
ebx = self._run_asm(
b"\xB8\x01\x00\x00\x80" # mov ax,0x80000001
b"\x0f\xa2" # cpuid
b"\x89\xD8" # mov ax,bx
b"\xC3" # ret
)
# ECX
ecx = self._run_asm(
b"\xB8\x01\x00\x00\x80" # mov ax,0x80000001
b"\x0f\xa2" # cpuid
b"\x89\xC8" # mov ax,cx
b"\xC3" # ret
)
# Get the extended CPU flags
extended_flags = {
'fpu' : _is_bit_set(ebx, 0),
'vme' : _is_bit_set(ebx, 1),
'de' : _is_bit_set(ebx, 2),
'pse' : _is_bit_set(ebx, 3),
'tsc' : _is_bit_set(ebx, 4),
'msr' : _is_bit_set(ebx, 5),
'pae' : _is_bit_set(ebx, 6),
'mce' : _is_bit_set(ebx, 7),
'cx8' : _is_bit_set(ebx, 8),
'apic' : _is_bit_set(ebx, 9),
#'reserved' : _is_bit_set(ebx, 10),
'syscall' : _is_bit_set(ebx, 11),
'mtrr' : _is_bit_set(ebx, 12),
'pge' : _is_bit_set(ebx, 13),
'mca' : _is_bit_set(ebx, 14),
'cmov' : _is_bit_set(ebx, 15),
'pat' : _is_bit_set(ebx, 16),
'pse36' : _is_bit_set(ebx, 17),
#'reserved' : _is_bit_set(ebx, 18),
'mp' : _is_bit_set(ebx, 19),
'nx' : _is_bit_set(ebx, 20),
#'reserved' : _is_bit_set(ebx, 21),
'mmxext' : _is_bit_set(ebx, 22),
'mmx' : _is_bit_set(ebx, 23),
'fxsr' : _is_bit_set(ebx, 24),
'fxsr_opt' : _is_bit_set(ebx, 25),
'pdpe1gp' : _is_bit_set(ebx, 26),
'rdtscp' : _is_bit_set(ebx, 27),
#'reserved' : _is_bit_set(ebx, 28),
'lm' : _is_bit_set(ebx, 29),
'3dnowext' : _is_bit_set(ebx, 30),
'3dnow' : _is_bit_set(ebx, 31),
'lahf_lm' : _is_bit_set(ecx, 0),
'cmp_legacy' : _is_bit_set(ecx, 1),
'svm' : _is_bit_set(ecx, 2),
'extapic' : _is_bit_set(ecx, 3),
'cr8_legacy' : _is_bit_set(ecx, 4),
'abm' : _is_bit_set(ecx, 5),
'sse4a' : _is_bit_set(ecx, 6),
'misalignsse' : _is_bit_set(ecx, 7),
'3dnowprefetch' : _is_bit_set(ecx, 8),
'osvw' : _is_bit_set(ecx, 9),
'ibs' : _is_bit_set(ecx, 10),
'xop' : _is_bit_set(ecx, 11),
'skinit' : _is_bit_set(ecx, 12),
'wdt' : _is_bit_set(ecx, 13),
#'reserved' : _is_bit_set(ecx, 14),
'lwp' : _is_bit_set(ecx, 15),
'fma4' : _is_bit_set(ecx, 16),
'tce' : _is_bit_set(ecx, 17),
#'reserved' : _is_bit_set(ecx, 18),
'nodeid_msr' : _is_bit_set(ecx, 19),
#'reserved' : _is_bit_set(ecx, 20),
'tbm' : _is_bit_set(ecx, 21),
'topoext' : _is_bit_set(ecx, 22),
'perfctr_core' : _is_bit_set(ecx, 23),
'perfctr_nb' : _is_bit_set(ecx, 24),
#'reserved' : _is_bit_set(ecx, 25),
'dbx' : _is_bit_set(ecx, 26),
'perftsc' : _is_bit_set(ecx, 27),
'pci_l2i' : _is_bit_set(ecx, 28),
#'reserved' : _is_bit_set(ecx, 29),
#'reserved' : _is_bit_set(ecx, 30),
#'reserved' : _is_bit_set(ecx, 31)
}
# Get a list of only the flags that are true
extended_flags = [k for k, v in extended_flags.items() if v]
flags += extended_flags
flags.sort()
return flags
# http://en.wikipedia.org/wiki/CPUID#EAX.3D80000002h.2C80000003h.2C80000004h:_Processor_Brand_String
def get_processor_brand(self, max_extension_support):
processor_brand = ""
# Processor brand string
if max_extension_support >= 0x80000004:
instructions = [
b"\xB8\x02\x00\x00\x80", # mov ax,0x80000002
b"\xB8\x03\x00\x00\x80", # mov ax,0x80000003
b"\xB8\x04\x00\x00\x80" # mov ax,0x80000004
]
for instruction in instructions:
# EAX
eax = self._run_asm(
instruction, # mov ax,0x8000000?
b"\x0f\xa2" # cpuid
b"\x89\xC0" # mov ax,ax
b"\xC3" # ret
)
# EBX
ebx = self._run_asm(
instruction, # mov ax,0x8000000?
b"\x0f\xa2" # cpuid
b"\x89\xD8" # mov ax,bx
b"\xC3" # ret
)
# ECX
ecx = self._run_asm(
instruction, # mov ax,0x8000000?
b"\x0f\xa2" # cpuid
b"\x89\xC8" # mov ax,cx
b"\xC3" # ret
)
# EDX
edx = self._run_asm(
instruction, # mov ax,0x8000000?
b"\x0f\xa2" # cpuid
b"\x89\xD0" # mov ax,dx
b"\xC3" # ret
)
# Combine each of the 4 bytes in each register into the string
for reg in [eax, ebx, ecx, edx]:
for n in [0, 8, 16, 24]:
processor_brand += chr((reg >> n) & 0xFF)
# Strip off any trailing NULL terminators and white space
processor_brand = processor_brand.strip("\0").strip()
return processor_brand
# http://en.wikipedia.org/wiki/CPUID#EAX.3D80000006h:_Extended_L2_Cache_Features
def get_cache(self, max_extension_support):
cache_info = {}
# Just return if the cache feature is not supported
if max_extension_support < 0x80000006:
return cache_info
# ECX
ecx = self._run_asm(
b"\xB8\x06\x00\x00\x80" # mov ax,0x80000006
b"\x0f\xa2" # cpuid
b"\x89\xC8" # mov ax,cx
b"\xC3" # ret
)
cache_info = {
'size_kb' : ecx & 0xFF,
'line_size_b' : (ecx >> 12) & 0xF,
'associativity' : (ecx >> 16) & 0xFFFF
}
return cache_info
def get_ticks(self):
retval = None
if DataSource.bits == '32bit':
# Works on x86_32
restype = None
argtypes = (ctypes.POINTER(ctypes.c_uint), ctypes.POINTER(ctypes.c_uint))
get_ticks_x86_32, address = self._asm_func(restype, argtypes,
[
b"\x55", # push bp
b"\x89\xE5", # mov bp,sp
b"\x31\xC0", # xor ax,ax
b"\x0F\xA2", # cpuid
b"\x0F\x31", # rdtsc
b"\x8B\x5D\x08", # mov bx,[di+0x8]
b"\x8B\x4D\x0C", # mov cx,[di+0xc]
b"\x89\x13", # mov [bp+di],dx
b"\x89\x01", # mov [bx+di],ax
b"\x5D", # pop bp
b"\xC3" # ret
]
)
high = ctypes.c_uint32(0)
low = ctypes.c_uint32(0)
get_ticks_x86_32(ctypes.byref(high), ctypes.byref(low))
retval = ((high.value << 32) & 0xFFFFFFFF00000000) | low.value
elif DataSource.bits == '64bit':
# Works on x86_64
restype = ctypes.c_uint64
argtypes = ()
get_ticks_x86_64, address = self._asm_func(restype, argtypes,
[
b"\x48", # dec ax
b"\x31\xC0", # xor ax,ax
b"\x0F\xA2", # cpuid
b"\x0F\x31", # rdtsc
b"\x48", # dec ax
b"\xC1\xE2\x20", # shl dx,byte 0x20
b"\x48", # dec ax
b"\x09\xD0", # or ax,dx
b"\xC3", # ret
]
)
retval = get_ticks_x86_64()
return retval
def get_raw_hz(self):
import time
start = self.get_ticks()
time.sleep(1)
end = self.get_ticks()
ticks = (end - start)
return ticks
def _actual_get_cpu_info_from_cpuid(queue):
'''
Warning! This function has the potential to crash the Python runtime.
Do not call it directly. Use the _get_cpu_info_from_cpuid function instead.
It will safely call this function in another process.
'''
# Pipe all output to nothing
sys.stdout = open(os.devnull, 'w')
sys.stderr = open(os.devnull, 'w')
# Get the CPU arch and bits
arch, bits = _parse_arch(DataSource.arch_string_raw)
# Return none if this is not an X86 CPU
if not arch in ['X86_32', 'X86_64']:
queue.put(_obj_to_b64({}))
return
# Return none if SE Linux is in enforcing mode
cpuid = CPUID()
if cpuid.is_selinux_enforcing:
queue.put(_obj_to_b64({}))
return
# Get the cpu info from the CPUID register
max_extension_support = cpuid.get_max_extension_support()
cache_info = cpuid.get_cache(max_extension_support)
info = cpuid.get_info()
processor_brand = cpuid.get_processor_brand(max_extension_support)
# Get the Hz and scale
hz_actual = cpuid.get_raw_hz()
hz_actual = _to_decimal_string(hz_actual)
# Get the Hz and scale
hz_advertised, scale = _parse_cpu_brand_string(processor_brand)
info = {
'vendor_id_raw' : cpuid.get_vendor_id(),
'hardware_raw' : '',
'brand_raw' : processor_brand,
'hz_advertised_friendly' : _hz_short_to_friendly(hz_advertised, scale),
'hz_actual_friendly' : _hz_short_to_friendly(hz_actual, 0),
'hz_advertised' : _hz_short_to_full(hz_advertised, scale),
'hz_actual' : _hz_short_to_full(hz_actual, 0),
'l2_cache_size' : _to_friendly_bytes(cache_info['size_kb']),
'l2_cache_line_size' : cache_info['line_size_b'],
'l2_cache_associativity' : hex(cache_info['associativity']),
'stepping' : info['stepping'],
'model' : info['model'],
'family' : info['family'],
'processor_type' : info['processor_type'],
'extended_model' : info['extended_model'],
'extended_family' : info['extended_family'],
'flags' : cpuid.get_flags(max_extension_support)
}
info = {k: v for k, v in info.items() if v}
queue.put(_obj_to_b64(info))
def _get_cpu_info_from_cpuid():
'''
Returns the CPU info gathered by querying the X86 cpuid register in a new process.
Returns {} on non X86 cpus.
Returns {} if SELinux is in enforcing mode.
'''
from multiprocessing import Process, Queue
# Return {} if can't cpuid
if not DataSource.can_cpuid:
return {}
# Get the CPU arch and bits
arch, bits = _parse_arch(DataSource.arch_string_raw)
# Return {} if this is not an X86 CPU
if not arch in ['X86_32', 'X86_64']:
return {}
try:
# Start running the function in a subprocess
queue = Queue()
p = Process(target=_actual_get_cpu_info_from_cpuid, args=(queue,))
p.start()
# Wait for the process to end, while it is still alive
while p.is_alive():
p.join(0)
# Return {} if it failed
if p.exitcode != 0:
return {}
# Return the result, only if there is something to read
if not queue.empty():
output = queue.get()
return _b64_to_obj(output)
except:
pass
# Return {} if everything failed
return {}
def _get_cpu_info_from_proc_cpuinfo():
'''
Returns the CPU info gathered from /proc/cpuinfo.
Returns {} if /proc/cpuinfo is not found.
'''
try:
# Just return {} if there is no cpuinfo
if not DataSource.has_proc_cpuinfo():
return {}
returncode, output = DataSource.cat_proc_cpuinfo()
if returncode != 0:
return {}
# Various fields
vendor_id = _get_field(False, output, None, '', 'vendor_id', 'vendor id', 'vendor')
processor_brand = _get_field(True, output, None, None, 'model name','cpu', 'processor')
cache_size = _get_field(False, output, None, '', 'cache size')
stepping = _get_field(False, output, int, 0, 'stepping')
model = _get_field(False, output, int, 0, 'model')
family = _get_field(False, output, int, 0, 'cpu family')
hardware = _get_field(False, output, None, '', 'Hardware')
# Flags
flags = _get_field(False, output, None, None, 'flags', 'Features')
if flags:
flags = flags.split()
flags.sort()
# Convert from MHz string to Hz
hz_actual = _get_field(False, output, None, '', 'cpu MHz', 'cpu speed', 'clock')
hz_actual = hz_actual.lower().rstrip('mhz').strip()
hz_actual = _to_decimal_string(hz_actual)
# Convert from GHz/MHz string to Hz
hz_advertised, scale = (None, 0)
try:
hz_advertised, scale = _parse_cpu_brand_string(processor_brand)
except Exception:
pass
info = {
'hardware_raw' : hardware,
'brand_raw' : processor_brand,
'l3_cache_size' : _to_friendly_bytes(cache_size),
'flags' : flags,
'vendor_id_raw' : vendor_id,
'stepping' : stepping,
'model' : model,
'family' : family,
}
# Make the Hz the same for actual and advertised if missing any
if not hz_advertised or hz_advertised == '0.0':
hz_advertised = hz_actual
scale = 6
elif not hz_actual or hz_actual == '0.0':
hz_actual = hz_advertised
# Add the Hz if there is one
if _hz_short_to_full(hz_advertised, scale) > (0, 0):
info['hz_advertised_friendly'] = _hz_short_to_friendly(hz_advertised, scale)
info['hz_advertised'] = _hz_short_to_full(hz_advertised, scale)
if _hz_short_to_full(hz_actual, scale) > (0, 0):
info['hz_actual_friendly'] = _hz_short_to_friendly(hz_actual, 6)
info['hz_actual'] = _hz_short_to_full(hz_actual, 6)
info = {k: v for k, v in info.items() if v}
return info
except:
#raise # NOTE: To have this throw on error, uncomment this line
return {}
def _get_cpu_info_from_cpufreq_info():
'''
Returns the CPU info gathered from cpufreq-info.
Returns {} if cpufreq-info is not found.
'''
try:
hz_brand, scale = '0.0', 0
if not DataSource.has_cpufreq_info():
return {}
returncode, output = DataSource.cpufreq_info()
if returncode != 0:
return {}
hz_brand = output.split('current CPU frequency is')[1].split('\n')[0]
i = hz_brand.find('Hz')
assert(i != -1)
hz_brand = hz_brand[0 : i+2].strip().lower()
if hz_brand.endswith('mhz'):
scale = 6
elif hz_brand.endswith('ghz'):
scale = 9
hz_brand = hz_brand.rstrip('mhz').rstrip('ghz').strip()
hz_brand = _to_decimal_string(hz_brand)
info = {
'hz_advertised_friendly' : _hz_short_to_friendly(hz_brand, scale),
'hz_actual_friendly' : _hz_short_to_friendly(hz_brand, scale),
'hz_advertised' : _hz_short_to_full(hz_brand, scale),
'hz_actual' : _hz_short_to_full(hz_brand, scale),
}
info = {k: v for k, v in info.items() if v}
return info
except:
#raise # NOTE: To have this throw on error, uncomment this line
return {}
def _get_cpu_info_from_lscpu():
'''
Returns the CPU info gathered from lscpu.
Returns {} if lscpu is not found.
'''
try:
if not DataSource.has_lscpu():
return {}
returncode, output = DataSource.lscpu()
if returncode != 0:
return {}
info = {}
new_hz = _get_field(False, output, None, None, 'CPU max MHz', 'CPU MHz')
if new_hz:
new_hz = _to_decimal_string(new_hz)
scale = 6
info['hz_advertised_friendly'] = _hz_short_to_friendly(new_hz, scale)
info['hz_actual_friendly'] = _hz_short_to_friendly(new_hz, scale)
info['hz_advertised'] = _hz_short_to_full(new_hz, scale)
info['hz_actual'] = _hz_short_to_full(new_hz, scale)
vendor_id = _get_field(False, output, None, None, 'Vendor ID')
if vendor_id:
info['vendor_id_raw'] = vendor_id
brand = _get_field(False, output, None, None, 'Model name')
if brand:
info['brand_raw'] = brand
family = _get_field(False, output, None, None, 'CPU family')
if family and family.isdigit():
info['family'] = int(family)
stepping = _get_field(False, output, None, None, 'Stepping')
if stepping and stepping.isdigit():
info['stepping'] = int(stepping)
model = _get_field(False, output, None, None, 'Model')
if model and model.isdigit():
info['model'] = int(model)
l1_data_cache_size = _get_field(False, output, None, None, 'L1d cache')
if l1_data_cache_size:
info['l1_data_cache_size'] = _to_friendly_bytes(l1_data_cache_size)
l1_instruction_cache_size = _get_field(False, output, None, None, 'L1i cache')
if l1_instruction_cache_size:
info['l1_instruction_cache_size'] = _to_friendly_bytes(l1_instruction_cache_size)
l2_cache_size = _get_field(False, output, None, None, 'L2 cache')
if l2_cache_size:
info['l2_cache_size'] = _to_friendly_bytes(l2_cache_size)
l3_cache_size = _get_field(False, output, None, None, 'L3 cache')
if l3_cache_size:
info['l3_cache_size'] = _to_friendly_bytes(l3_cache_size)
# Flags
flags = _get_field(False, output, None, None, 'flags', 'Features')
if flags:
flags = flags.split()
flags.sort()
info['flags'] = flags
info = {k: v for k, v in info.items() if v}
return info
except:
#raise # NOTE: To have this throw on error, uncomment this line
return {}
def _get_cpu_info_from_dmesg():
'''
Returns the CPU info gathered from dmesg.
Returns {} if dmesg is not found or does not have the desired info.
'''
# Just return {} if there is no dmesg
if not DataSource.has_dmesg():
return {}
# If dmesg fails return {}
returncode, output = DataSource.dmesg_a()
if output == None or returncode != 0:
return {}
return _parse_dmesg_output(output)
# https://openpowerfoundation.org/wp-content/uploads/2016/05/LoPAPR_DRAFT_v11_24March2016_cmt1.pdf
# page 767
def _get_cpu_info_from_ibm_pa_features():
'''
Returns the CPU info gathered from lsprop /proc/device-tree/cpus/*/ibm,pa-features
Returns {} if lsprop is not found or ibm,pa-features does not have the desired info.
'''
try:
# Just return {} if there is no lsprop
if not DataSource.has_ibm_pa_features():
return {}
# If ibm,pa-features fails return {}
returncode, output = DataSource.ibm_pa_features()
if output == None or returncode != 0:
return {}
# Filter out invalid characters from output
value = output.split("ibm,pa-features")[1].lower()
value = [s for s in value if s in list('0123456789abcfed')]
value = ''.join(value)
# Get data converted to Uint32 chunks
left = int(value[0 : 8], 16)
right = int(value[8 : 16], 16)
# Get the CPU flags
flags = {
# Byte 0
'mmu' : _is_bit_set(left, 0),
'fpu' : _is_bit_set(left, 1),
'slb' : _is_bit_set(left, 2),
'run' : _is_bit_set(left, 3),
#'reserved' : _is_bit_set(left, 4),
'dabr' : _is_bit_set(left, 5),
'ne' : _is_bit_set(left, 6),
'wtr' : _is_bit_set(left, 7),
# Byte 1
'mcr' : _is_bit_set(left, 8),
'dsisr' : _is_bit_set(left, 9),
'lp' : _is_bit_set(left, 10),
'ri' : _is_bit_set(left, 11),
'dabrx' : _is_bit_set(left, 12),
'sprg3' : _is_bit_set(left, 13),
'rislb' : _is_bit_set(left, 14),
'pp' : _is_bit_set(left, 15),
# Byte 2
'vpm' : _is_bit_set(left, 16),
'dss_2.05' : _is_bit_set(left, 17),
#'reserved' : _is_bit_set(left, 18),
'dar' : _is_bit_set(left, 19),
#'reserved' : _is_bit_set(left, 20),
'ppr' : _is_bit_set(left, 21),
'dss_2.02' : _is_bit_set(left, 22),
'dss_2.06' : _is_bit_set(left, 23),
# Byte 3
'lsd_in_dscr' : _is_bit_set(left, 24),
'ugr_in_dscr' : _is_bit_set(left, 25),
#'reserved' : _is_bit_set(left, 26),
#'reserved' : _is_bit_set(left, 27),
#'reserved' : _is_bit_set(left, 28),
#'reserved' : _is_bit_set(left, 29),
#'reserved' : _is_bit_set(left, 30),
#'reserved' : _is_bit_set(left, 31),
# Byte 4
'sso_2.06' : _is_bit_set(right, 0),
#'reserved' : _is_bit_set(right, 1),
#'reserved' : _is_bit_set(right, 2),
#'reserved' : _is_bit_set(right, 3),
#'reserved' : _is_bit_set(right, 4),
#'reserved' : _is_bit_set(right, 5),
#'reserved' : _is_bit_set(right, 6),
#'reserved' : _is_bit_set(right, 7),
# Byte 5
'le' : _is_bit_set(right, 8),
'cfar' : _is_bit_set(right, 9),
'eb' : _is_bit_set(right, 10),
'lsq_2.07' : _is_bit_set(right, 11),
#'reserved' : _is_bit_set(right, 12),
#'reserved' : _is_bit_set(right, 13),
#'reserved' : _is_bit_set(right, 14),
#'reserved' : _is_bit_set(right, 15),
# Byte 6
'dss_2.07' : _is_bit_set(right, 16),
#'reserved' : _is_bit_set(right, 17),
#'reserved' : _is_bit_set(right, 18),
#'reserved' : _is_bit_set(right, 19),
#'reserved' : _is_bit_set(right, 20),
#'reserved' : _is_bit_set(right, 21),
#'reserved' : _is_bit_set(right, 22),
#'reserved' : _is_bit_set(right, 23),
# Byte 7
#'reserved' : _is_bit_set(right, 24),
#'reserved' : _is_bit_set(right, 25),
#'reserved' : _is_bit_set(right, 26),
#'reserved' : _is_bit_set(right, 27),
#'reserved' : _is_bit_set(right, 28),
#'reserved' : _is_bit_set(right, 29),
#'reserved' : _is_bit_set(right, 30),
#'reserved' : _is_bit_set(right, 31),
}
# Get a list of only the flags that are true
flags = [k for k, v in flags.items() if v]
flags.sort()
info = {
'flags' : flags
}
info = {k: v for k, v in info.items() if v}
return info
except:
return {}
def _get_cpu_info_from_cat_var_run_dmesg_boot():
'''
Returns the CPU info gathered from /var/run/dmesg.boot.
Returns {} if dmesg is not found or does not have the desired info.
'''
# Just return {} if there is no /var/run/dmesg.boot
if not DataSource.has_var_run_dmesg_boot():
return {}
# If dmesg.boot fails return {}
returncode, output = DataSource.cat_var_run_dmesg_boot()
if output == None or returncode != 0:
return {}
return _parse_dmesg_output(output)
def _get_cpu_info_from_sysctl():
'''
Returns the CPU info gathered from sysctl.
Returns {} if sysctl is not found.
'''
try:
# Just return {} if there is no sysctl
if not DataSource.has_sysctl():
return {}
# If sysctl fails return {}
returncode, output = DataSource.sysctl_machdep_cpu_hw_cpufrequency()
if output == None or returncode != 0:
return {}
# Various fields
vendor_id = _get_field(False, output, None, None, 'machdep.cpu.vendor')
processor_brand = _get_field(True, output, None, None, 'machdep.cpu.brand_string')
cache_size = _get_field(False, output, None, None, 'machdep.cpu.cache.size')
stepping = _get_field(False, output, int, 0, 'machdep.cpu.stepping')
model = _get_field(False, output, int, 0, 'machdep.cpu.model')
family = _get_field(False, output, int, 0, 'machdep.cpu.family')
# Flags
flags = _get_field(False, output, None, '', 'machdep.cpu.features').lower().split()
flags.extend(_get_field(False, output, None, '', 'machdep.cpu.leaf7_features').lower().split())
flags.extend(_get_field(False, output, None, '', 'machdep.cpu.extfeatures').lower().split())
flags.sort()
# Convert from GHz/MHz string to Hz
hz_advertised, scale = _parse_cpu_brand_string(processor_brand)
hz_actual = _get_field(False, output, None, None, 'hw.cpufrequency')
hz_actual = _to_decimal_string(hz_actual)
info = {
'vendor_id_raw' : vendor_id,
'brand_raw' : processor_brand,
'hz_advertised_friendly' : _hz_short_to_friendly(hz_advertised, scale),
'hz_actual_friendly' : _hz_short_to_friendly(hz_actual, 0),
'hz_advertised' : _hz_short_to_full(hz_advertised, scale),
'hz_actual' : _hz_short_to_full(hz_actual, 0),
'l2_cache_size' : _to_friendly_bytes(cache_size),
'stepping' : stepping,
'model' : model,
'family' : family,
'flags' : flags
}
info = {k: v for k, v in info.items() if v}
return info
except:
return {}
def _get_cpu_info_from_sysinfo():
'''
Returns the CPU info gathered from sysinfo.
Returns {} if sysinfo is not found.
'''
info = _get_cpu_info_from_sysinfo_v1()
info.update(_get_cpu_info_from_sysinfo_v2())
return info
def _get_cpu_info_from_sysinfo_v1():
'''
Returns the CPU info gathered from sysinfo.
Returns {} if sysinfo is not found.
'''
try:
# Just return {} if there is no sysinfo
if not DataSource.has_sysinfo():
return {}
# If sysinfo fails return {}
returncode, output = DataSource.sysinfo_cpu()
if output == None or returncode != 0:
return {}
# Various fields
vendor_id = '' #_get_field(False, output, None, None, 'CPU #0: ')
processor_brand = output.split('CPU #0: "')[1].split('"\n')[0].strip()
cache_size = '' #_get_field(False, output, None, None, 'machdep.cpu.cache.size')
stepping = int(output.split(', stepping ')[1].split(',')[0].strip())
model = int(output.split(', model ')[1].split(',')[0].strip())
family = int(output.split(', family ')[1].split(',')[0].strip())
# Flags
flags = []
for line in output.split('\n'):
if line.startswith('\t\t'):
for flag in line.strip().lower().split():
flags.append(flag)
flags.sort()
# Convert from GHz/MHz string to Hz
hz_advertised, scale = _parse_cpu_brand_string(processor_brand)
hz_actual = hz_advertised
info = {
'vendor_id_raw' : vendor_id,
'brand_raw' : processor_brand,
'hz_advertised_friendly' : _hz_short_to_friendly(hz_advertised, scale),
'hz_actual_friendly' : _hz_short_to_friendly(hz_actual, scale),
'hz_advertised' : _hz_short_to_full(hz_advertised, scale),
'hz_actual' : _hz_short_to_full(hz_actual, scale),
'l2_cache_size' : _to_friendly_bytes(cache_size),
'stepping' : stepping,
'model' : model,
'family' : family,
'flags' : flags
}
info = {k: v for k, v in info.items() if v}
return info
except:
#raise # NOTE: To have this throw on error, uncomment this line
return {}
def _get_cpu_info_from_sysinfo_v2():
'''
Returns the CPU info gathered from sysinfo.
Returns {} if sysinfo is not found.
'''
try:
# Just return {} if there is no sysinfo
if not DataSource.has_sysinfo():
return {}
# If sysinfo fails return {}
returncode, output = DataSource.sysinfo_cpu()
if output == None or returncode != 0:
return {}
# Various fields
vendor_id = '' #_get_field(False, output, None, None, 'CPU #0: ')
processor_brand = output.split('CPU #0: "')[1].split('"\n')[0].strip()
cache_size = '' #_get_field(False, output, None, None, 'machdep.cpu.cache.size')
signature = output.split('Signature:')[1].split('\n')[0].strip()
#
stepping = int(signature.split('stepping ')[1].split(',')[0].strip())
model = int(signature.split('model ')[1].split(',')[0].strip())
family = int(signature.split('family ')[1].split(',')[0].strip())
# Flags
def get_subsection_flags(output):
retval = []
for line in output.split('\n')[1:]:
if not line.startswith(' ') and not line.startswith(' '): break
for entry in line.strip().lower().split(' '):
retval.append(entry)
return retval
flags = get_subsection_flags(output.split('Features: ')[1]) + \
get_subsection_flags(output.split('Extended Features (0x00000001): ')[1]) + \
get_subsection_flags(output.split('Extended Features (0x80000001): ')[1])
flags.sort()
# Convert from GHz/MHz string to Hz
lines = [n for n in output.split('\n') if n]
raw_hz = lines[0].split('running at ')[1].strip().lower()
hz_advertised = raw_hz.rstrip('mhz').rstrip('ghz').strip()
hz_advertised = _to_decimal_string(hz_advertised)
hz_actual = hz_advertised
scale = 0
if raw_hz.endswith('mhz'):
scale = 6
elif raw_hz.endswith('ghz'):
scale = 9
info = {
'vendor_id_raw' : vendor_id,
'brand_raw' : processor_brand,
'hz_advertised_friendly' : _hz_short_to_friendly(hz_advertised, scale),
'hz_actual_friendly' : _hz_short_to_friendly(hz_actual, scale),
'hz_advertised' : _hz_short_to_full(hz_advertised, scale),
'hz_actual' : _hz_short_to_full(hz_actual, scale),
'l2_cache_size' : _to_friendly_bytes(cache_size),
'stepping' : stepping,
'model' : model,
'family' : family,
'flags' : flags
}
info = {k: v for k, v in info.items() if v}
return info
except:
#raise # NOTE: To have this throw on error, uncomment this line
return {}
def _get_cpu_info_from_wmic():
'''
Returns the CPU info gathered from WMI.
Returns {} if not on Windows, or wmic is not installed.
'''
try:
# Just return {} if not Windows or there is no wmic
if not DataSource.is_windows or not DataSource.has_wmic():
return {}
returncode, output = DataSource.wmic_cpu()
if output == None or returncode != 0:
return {}
# Break the list into key values pairs
value = output.split("\n")
value = [s.rstrip().split('=') for s in value if '=' in s]
value = {k: v for k, v in value if v}
# Get the advertised MHz
processor_brand = value.get('Name')
hz_advertised, scale_advertised = _parse_cpu_brand_string(processor_brand)
# Get the actual MHz
hz_actual = value.get('CurrentClockSpeed')
scale_actual = 6
if hz_actual:
hz_actual = _to_decimal_string(hz_actual)
# Get cache sizes
l2_cache_size = value.get('L2CacheSize')
if l2_cache_size:
l2_cache_size = l2_cache_size + ' KB'
l3_cache_size = value.get('L3CacheSize')
if l3_cache_size:
l3_cache_size = l3_cache_size + ' KB'
# Get family, model, and stepping
family, model, stepping = '', '', ''
description = value.get('Description') or value.get('Caption')
entries = description.split(' ')
if 'Family' in entries and entries.index('Family') < len(entries)-1:
i = entries.index('Family')
family = int(entries[i + 1])
if 'Model' in entries and entries.index('Model') < len(entries)-1:
i = entries.index('Model')
model = int(entries[i + 1])
if 'Stepping' in entries and entries.index('Stepping') < len(entries)-1:
i = entries.index('Stepping')
stepping = int(entries[i + 1])
info = {
'vendor_id_raw' : value.get('Manufacturer'),
'brand_raw' : processor_brand,
'hz_advertised_friendly' : _hz_short_to_friendly(hz_advertised, scale_advertised),
'hz_actual_friendly' : _hz_short_to_friendly(hz_actual, scale_actual),
'hz_advertised' : _hz_short_to_full(hz_advertised, scale_advertised),
'hz_actual' : _hz_short_to_full(hz_actual, scale_actual),
'l2_cache_size' : l2_cache_size,
'l3_cache_size' : l3_cache_size,
'stepping' : stepping,
'model' : model,
'family' : family,
}
info = {k: v for k, v in info.items() if v}
return info
except:
#raise # NOTE: To have this throw on error, uncomment this line
return {}
def _get_cpu_info_from_registry():
'''
FIXME: Is missing many of the newer CPU flags like sse3
Returns the CPU info gathered from the Windows Registry.
Returns {} if not on Windows.
'''
try:
# Just return {} if not on Windows
if not DataSource.is_windows:
return {}
# Get the CPU name
processor_brand = DataSource.winreg_processor_brand().strip()
# Get the CPU vendor id
vendor_id = DataSource.winreg_vendor_id_raw()
# Get the CPU arch and bits
arch_string_raw = DataSource.winreg_arch_string_raw()
arch, bits = _parse_arch(arch_string_raw)
# Get the actual CPU Hz
hz_actual = DataSource.winreg_hz_actual()
hz_actual = _to_decimal_string(hz_actual)
# Get the advertised CPU Hz
hz_advertised, scale = _parse_cpu_brand_string(processor_brand)
# If advertised hz not found, use the actual hz
if hz_advertised == '0.0':
scale = 6
hz_advertised = _to_decimal_string(hz_actual)
# Get the CPU features
feature_bits = DataSource.winreg_feature_bits()
def is_set(bit):
mask = 0x80000000 >> bit
retval = mask & feature_bits > 0
return retval
# http://en.wikipedia.org/wiki/CPUID
# http://unix.stackexchange.com/questions/43539/what-do-the-flags-in-proc-cpuinfo-mean
# http://www.lohninger.com/helpcsuite/public_constants_cpuid.htm
flags = {
'fpu' : is_set(0), # Floating Point Unit
'vme' : is_set(1), # V86 Mode Extensions
'de' : is_set(2), # Debug Extensions - I/O breakpoints supported
'pse' : is_set(3), # Page Size Extensions (4 MB pages supported)
'tsc' : is_set(4), # Time Stamp Counter and RDTSC instruction are available
'msr' : is_set(5), # Model Specific Registers
'pae' : is_set(6), # Physical Address Extensions (36 bit address, 2MB pages)
'mce' : is_set(7), # Machine Check Exception supported
'cx8' : is_set(8), # Compare Exchange Eight Byte instruction available
'apic' : is_set(9), # Local APIC present (multiprocessor operation support)
'sepamd' : is_set(10), # Fast system calls (AMD only)
'sep' : is_set(11), # Fast system calls
'mtrr' : is_set(12), # Memory Type Range Registers
'pge' : is_set(13), # Page Global Enable
'mca' : is_set(14), # Machine Check Architecture
'cmov' : is_set(15), # Conditional MOVe instructions
'pat' : is_set(16), # Page Attribute Table
'pse36' : is_set(17), # 36 bit Page Size Extensions
'serial' : is_set(18), # Processor Serial Number
'clflush' : is_set(19), # Cache Flush
#'reserved1' : is_set(20), # reserved
'dts' : is_set(21), # Debug Trace Store
'acpi' : is_set(22), # ACPI support
'mmx' : is_set(23), # MultiMedia Extensions
'fxsr' : is_set(24), # FXSAVE and FXRSTOR instructions
'sse' : is_set(25), # SSE instructions
'sse2' : is_set(26), # SSE2 (WNI) instructions
'ss' : is_set(27), # self snoop
#'reserved2' : is_set(28), # reserved
'tm' : is_set(29), # Automatic clock control
'ia64' : is_set(30), # IA64 instructions
'3dnow' : is_set(31) # 3DNow! instructions available
}
# Get a list of only the flags that are true
flags = [k for k, v in flags.items() if v]
flags.sort()
info = {
'vendor_id_raw' : vendor_id,
'brand_raw' : processor_brand,
'hz_advertised_friendly' : _hz_short_to_friendly(hz_advertised, scale),
'hz_actual_friendly' : _hz_short_to_friendly(hz_actual, 6),
'hz_advertised' : _hz_short_to_full(hz_advertised, scale),
'hz_actual' : _hz_short_to_full(hz_actual, 6),
'flags' : flags
}
info = {k: v for k, v in info.items() if v}
return info
except:
return {}
def _get_cpu_info_from_platform_uname():
try:
uname = DataSource.uname_string_raw.split(',')[0]
family, model, stepping = (None, None, None)
entries = uname.split(' ')
if 'Family' in entries and entries.index('Family') < len(entries)-1:
i = entries.index('Family')
family = int(entries[i + 1])
if 'Model' in entries and entries.index('Model') < len(entries)-1:
i = entries.index('Model')
model = int(entries[i + 1])
if 'Stepping' in entries and entries.index('Stepping') < len(entries)-1:
i = entries.index('Stepping')
stepping = int(entries[i + 1])
info = {
'family' : family,
'model' : model,
'stepping' : stepping
}
info = {k: v for k, v in info.items() if v}
return info
except:
return {}
def _get_cpu_info_internal():
'''
Returns the CPU info by using the best sources of information for your OS.
Returns {} if nothing is found.
'''
# Get the CPU arch and bits
arch, bits = _parse_arch(DataSource.arch_string_raw)
friendly_maxsize = { 2**31-1: '32 bit', 2**63-1: '64 bit' }.get(sys.maxsize) or 'unknown bits'
friendly_version = "{0}.{1}.{2}.{3}.{4}".format(*sys.version_info)
PYTHON_VERSION = "{0} ({1})".format(friendly_version, friendly_maxsize)
info = {
'python_version' : PYTHON_VERSION,
'cpuinfo_version' : CPUINFO_VERSION,
'cpuinfo_version_string' : CPUINFO_VERSION_STRING,
'arch' : arch,
'bits' : bits,
'count' : DataSource.cpu_count,
'arch_string_raw' : DataSource.arch_string_raw,
}
# Try the Windows wmic
_copy_new_fields(info, _get_cpu_info_from_wmic())
# Try the Windows registry
_copy_new_fields(info, _get_cpu_info_from_registry())
# Try /proc/cpuinfo
_copy_new_fields(info, _get_cpu_info_from_proc_cpuinfo())
# Try cpufreq-info
_copy_new_fields(info, _get_cpu_info_from_cpufreq_info())
# Try LSCPU
_copy_new_fields(info, _get_cpu_info_from_lscpu())
# Try sysctl
_copy_new_fields(info, _get_cpu_info_from_sysctl())
# Try kstat
_copy_new_fields(info, _get_cpu_info_from_kstat())
# Try dmesg
_copy_new_fields(info, _get_cpu_info_from_dmesg())
# Try /var/run/dmesg.boot
_copy_new_fields(info, _get_cpu_info_from_cat_var_run_dmesg_boot())
# Try lsprop ibm,pa-features
_copy_new_fields(info, _get_cpu_info_from_ibm_pa_features())
# Try sysinfo
_copy_new_fields(info, _get_cpu_info_from_sysinfo())
# Try querying the CPU cpuid register
_copy_new_fields(info, _get_cpu_info_from_cpuid())
# Try platform.uname
_copy_new_fields(info, _get_cpu_info_from_platform_uname())
return info
def get_cpu_info_json():
'''
Returns the CPU info by using the best sources of information for your OS.
Returns the result in a json string
'''
import json
output = None
# If running under pyinstaller, run normally
if getattr(sys, 'frozen', False):
info = _get_cpu_info_internal()
output = json.dumps(info)
output = "{0}".format(output)
# if not running under pyinstaller, run in another process.
# This is done because multiprocesing has a design flaw that
# causes non main programs to run multiple times on Windows.
else:
from subprocess import Popen, PIPE
command = [sys.executable, __file__, '--json']
p1 = Popen(command, stdout=PIPE, stderr=PIPE, stdin=PIPE)
output = p1.communicate()[0]
if p1.returncode != 0:
return "{}"
if not IS_PY2:
output = output.decode(encoding='UTF-8')
return output
def get_cpu_info():
'''
Returns the CPU info by using the best sources of information for your OS.
Returns the result in a dict
'''
import json
output = get_cpu_info_json()
# Convert JSON to Python with non unicode strings
output = json.loads(output, object_hook = _utf_to_str)
return output
def main():
from argparse import ArgumentParser
import json
# Parse args
parser = ArgumentParser(description='Gets CPU info with pure Python 2 & 3')
parser.add_argument('--json', action='store_true', help='Return the info in JSON format')
parser.add_argument('--version', action='store_true', help='Return the version of py-cpuinfo')
args = parser.parse_args()
try:
_check_arch()
except Exception as err:
sys.stderr.write(str(err) + "\n")
sys.exit(1)
info = _get_cpu_info_internal()
if not info:
sys.stderr.write("Failed to find cpu info\n")
sys.exit(1)
if args.json:
print(json.dumps(info))
elif args.version:
print(CPUINFO_VERSION_STRING)
else:
print('Python Version: {0}'.format(info.get('python_version', '')))
print('Cpuinfo Version: {0}'.format(info.get('cpuinfo_version_string', '')))
print('Vendor ID Raw: {0}'.format(info.get('vendor_id_raw', '')))
print('Hardware Raw: {0}'.format(info.get('hardware_raw', '')))
print('Brand Raw: {0}'.format(info.get('brand_raw', '')))
print('Hz Advertised Friendly: {0}'.format(info.get('hz_advertised_friendly', '')))
print('Hz Actual Friendly: {0}'.format(info.get('hz_actual_friendly', '')))
print('Hz Advertised: {0}'.format(info.get('hz_advertised', '')))
print('Hz Actual: {0}'.format(info.get('hz_actual', '')))
print('Arch: {0}'.format(info.get('arch', '')))
print('Bits: {0}'.format(info.get('bits', '')))
print('Count: {0}'.format(info.get('count', '')))
print('Arch String Raw: {0}'.format(info.get('arch_string_raw', '')))
print('L1 Data Cache Size: {0}'.format(info.get('l1_data_cache_size', '')))
print('L1 Instruction Cache Size: {0}'.format(info.get('l1_instruction_cache_size', '')))
print('L2 Cache Size: {0}'.format(info.get('l2_cache_size', '')))
print('L2 Cache Line Size: {0}'.format(info.get('l2_cache_line_size', '')))
print('L2 Cache Associativity: {0}'.format(info.get('l2_cache_associativity', '')))
print('L3 Cache Size: {0}'.format(info.get('l3_cache_size', '')))
print('Stepping: {0}'.format(info.get('stepping', '')))
print('Model: {0}'.format(info.get('model', '')))
print('Family: {0}'.format(info.get('family', '')))
print('Processor Type: {0}'.format(info.get('processor_type', '')))
print('Extended Model: {0}'.format(info.get('extended_model', '')))
print('Extended Family: {0}'.format(info.get('extended_family', '')))
print('Flags: {0}'.format(', '.join(info.get('flags', ''))))
if __name__ == '__main__':
main()
else:
_check_arch()
|
workhorsy/py-cpuinfo
|
cpuinfo/cpuinfo.py
|
_get_cpu_info_internal
|
python
|
def _get_cpu_info_internal():
'''
Returns the CPU info by using the best sources of information for your OS.
Returns {} if nothing is found.
'''
# Get the CPU arch and bits
arch, bits = _parse_arch(DataSource.arch_string_raw)
friendly_maxsize = { 2**31-1: '32 bit', 2**63-1: '64 bit' }.get(sys.maxsize) or 'unknown bits'
friendly_version = "{0}.{1}.{2}.{3}.{4}".format(*sys.version_info)
PYTHON_VERSION = "{0} ({1})".format(friendly_version, friendly_maxsize)
info = {
'python_version' : PYTHON_VERSION,
'cpuinfo_version' : CPUINFO_VERSION,
'cpuinfo_version_string' : CPUINFO_VERSION_STRING,
'arch' : arch,
'bits' : bits,
'count' : DataSource.cpu_count,
'arch_string_raw' : DataSource.arch_string_raw,
}
# Try the Windows wmic
_copy_new_fields(info, _get_cpu_info_from_wmic())
# Try the Windows registry
_copy_new_fields(info, _get_cpu_info_from_registry())
# Try /proc/cpuinfo
_copy_new_fields(info, _get_cpu_info_from_proc_cpuinfo())
# Try cpufreq-info
_copy_new_fields(info, _get_cpu_info_from_cpufreq_info())
# Try LSCPU
_copy_new_fields(info, _get_cpu_info_from_lscpu())
# Try sysctl
_copy_new_fields(info, _get_cpu_info_from_sysctl())
# Try kstat
_copy_new_fields(info, _get_cpu_info_from_kstat())
# Try dmesg
_copy_new_fields(info, _get_cpu_info_from_dmesg())
# Try /var/run/dmesg.boot
_copy_new_fields(info, _get_cpu_info_from_cat_var_run_dmesg_boot())
# Try lsprop ibm,pa-features
_copy_new_fields(info, _get_cpu_info_from_ibm_pa_features())
# Try sysinfo
_copy_new_fields(info, _get_cpu_info_from_sysinfo())
# Try querying the CPU cpuid register
_copy_new_fields(info, _get_cpu_info_from_cpuid())
# Try platform.uname
_copy_new_fields(info, _get_cpu_info_from_platform_uname())
return info
|
Returns the CPU info by using the best sources of information for your OS.
Returns {} if nothing is found.
|
train
|
https://github.com/workhorsy/py-cpuinfo/blob/c15afb770c1139bf76215852e17eb4f677ca3d2f/cpuinfo/cpuinfo.py#L2211-L2273
|
[
"def _parse_arch(arch_string_raw):\n\timport re\n\n\tarch, bits = None, None\n\tarch_string_raw = arch_string_raw.lower()\n\n\t# X86\n\tif re.match('^i\\d86$|^x86$|^x86_32$|^i86pc$|^ia32$|^ia-32$|^bepc$', arch_string_raw):\n\t\tarch = 'X86_32'\n\t\tbits = 32\n\telif re.match('^x64$|^x86_64$|^x86_64t$|^i686-64$|^amd64$|^ia64$|^ia-64$', arch_string_raw):\n\t\tarch = 'X86_64'\n\t\tbits = 64\n\t# ARM\n\telif re.match('^armv8-a|aarch64$', arch_string_raw):\n\t\tarch = 'ARM_8'\n\t\tbits = 64\n\telif re.match('^armv7$|^armv7[a-z]$|^armv7-[a-z]$|^armv6[a-z]$', arch_string_raw):\n\t\tarch = 'ARM_7'\n\t\tbits = 32\n\telif re.match('^armv8$|^armv8[a-z]$|^armv8-[a-z]$', arch_string_raw):\n\t\tarch = 'ARM_8'\n\t\tbits = 32\n\t# PPC\n\telif re.match('^ppc32$|^prep$|^pmac$|^powermac$', arch_string_raw):\n\t\tarch = 'PPC_32'\n\t\tbits = 32\n\telif re.match('^powerpc$|^ppc64$|^ppc64le$', arch_string_raw):\n\t\tarch = 'PPC_64'\n\t\tbits = 64\n\t# SPARC\n\telif re.match('^sparc32$|^sparc$', arch_string_raw):\n\t\tarch = 'SPARC_32'\n\t\tbits = 32\n\telif re.match('^sparc64$|^sun4u$|^sun4v$', arch_string_raw):\n\t\tarch = 'SPARC_64'\n\t\tbits = 64\n\n\treturn (arch, bits)\n",
"def _copy_new_fields(info, new_info):\n\tkeys = [\n\t\t'vendor_id_raw', 'hardware_raw', 'brand_raw', 'hz_advertised_friendly', 'hz_actual_friendly',\n\t\t'hz_advertised', 'hz_actual', 'arch', 'bits', 'count',\n\t\t'arch_string_raw', 'uname_string_raw',\n\t\t'l2_cache_size', 'l2_cache_line_size', 'l2_cache_associativity',\n\t\t'stepping', 'model', 'family',\n\t\t'processor_type', 'extended_model', 'extended_family', 'flags',\n\t\t'l3_cache_size', 'l1_data_cache_size', 'l1_instruction_cache_size'\n\t]\n\n\tfor key in keys:\n\t\tif new_info.get(key, None) and not info.get(key, None):\n\t\t\tinfo[key] = new_info[key]\n\t\telif key == 'flags' and new_info.get('flags'):\n\t\t\tfor f in new_info['flags']:\n\t\t\t\tif f not in info['flags']: info['flags'].append(f)\n\t\t\tinfo['flags'].sort()\n",
"def _get_cpu_info_from_cpuid():\n\t'''\n\tReturns the CPU info gathered by querying the X86 cpuid register in a new process.\n\tReturns {} on non X86 cpus.\n\tReturns {} if SELinux is in enforcing mode.\n\t'''\n\tfrom multiprocessing import Process, Queue\n\n\t# Return {} if can't cpuid\n\tif not DataSource.can_cpuid:\n\t\treturn {}\n\n\t# Get the CPU arch and bits\n\tarch, bits = _parse_arch(DataSource.arch_string_raw)\n\n\t# Return {} if this is not an X86 CPU\n\tif not arch in ['X86_32', 'X86_64']:\n\t\treturn {}\n\n\ttry:\n\t\t# Start running the function in a subprocess\n\t\tqueue = Queue()\n\t\tp = Process(target=_actual_get_cpu_info_from_cpuid, args=(queue,))\n\t\tp.start()\n\n\t\t# Wait for the process to end, while it is still alive\n\t\twhile p.is_alive():\n\t\t\tp.join(0)\n\n\t\t# Return {} if it failed\n\t\tif p.exitcode != 0:\n\t\t\treturn {}\n\n\t\t# Return the result, only if there is something to read\n\t\tif not queue.empty():\n\t\t\toutput = queue.get()\n\t\t\treturn _b64_to_obj(output)\n\texcept:\n\t\tpass\n\n\t# Return {} if everything failed\n\treturn {}\n",
"def _get_cpu_info_from_proc_cpuinfo():\n\t'''\n\tReturns the CPU info gathered from /proc/cpuinfo.\n\tReturns {} if /proc/cpuinfo is not found.\n\t'''\n\ttry:\n\t\t# Just return {} if there is no cpuinfo\n\t\tif not DataSource.has_proc_cpuinfo():\n\t\t\treturn {}\n\n\t\treturncode, output = DataSource.cat_proc_cpuinfo()\n\t\tif returncode != 0:\n\t\t\treturn {}\n\n\t\t# Various fields\n\t\tvendor_id = _get_field(False, output, None, '', 'vendor_id', 'vendor id', 'vendor')\n\t\tprocessor_brand = _get_field(True, output, None, None, 'model name','cpu', 'processor')\n\t\tcache_size = _get_field(False, output, None, '', 'cache size')\n\t\tstepping = _get_field(False, output, int, 0, 'stepping')\n\t\tmodel = _get_field(False, output, int, 0, 'model')\n\t\tfamily = _get_field(False, output, int, 0, 'cpu family')\n\t\thardware = _get_field(False, output, None, '', 'Hardware')\n\t\t# Flags\n\t\tflags = _get_field(False, output, None, None, 'flags', 'Features')\n\t\tif flags:\n\t\t\tflags = flags.split()\n\t\t\tflags.sort()\n\n\t\t# Convert from MHz string to Hz\n\t\thz_actual = _get_field(False, output, None, '', 'cpu MHz', 'cpu speed', 'clock')\n\t\thz_actual = hz_actual.lower().rstrip('mhz').strip()\n\t\thz_actual = _to_decimal_string(hz_actual)\n\n\t\t# Convert from GHz/MHz string to Hz\n\t\thz_advertised, scale = (None, 0)\n\t\ttry:\n\t\t\thz_advertised, scale = _parse_cpu_brand_string(processor_brand)\n\t\texcept Exception:\n\t\t\tpass\n\n\t\tinfo = {\n\t\t'hardware_raw' : hardware,\n\t\t'brand_raw' : processor_brand,\n\n\t\t'l3_cache_size' : _to_friendly_bytes(cache_size),\n\t\t'flags' : flags,\n\t\t'vendor_id_raw' : vendor_id,\n\t\t'stepping' : stepping,\n\t\t'model' : model,\n\t\t'family' : family,\n\t\t}\n\n\t\t# Make the Hz the same for actual and advertised if missing any\n\t\tif not hz_advertised or hz_advertised == '0.0':\n\t\t\thz_advertised = hz_actual\n\t\t\tscale = 6\n\t\telif not hz_actual or hz_actual == '0.0':\n\t\t\thz_actual = hz_advertised\n\n\t\t# Add the Hz if there is one\n\t\tif _hz_short_to_full(hz_advertised, scale) > (0, 0):\n\t\t\tinfo['hz_advertised_friendly'] = _hz_short_to_friendly(hz_advertised, scale)\n\t\t\tinfo['hz_advertised'] = _hz_short_to_full(hz_advertised, scale)\n\t\tif _hz_short_to_full(hz_actual, scale) > (0, 0):\n\t\t\tinfo['hz_actual_friendly'] = _hz_short_to_friendly(hz_actual, 6)\n\t\t\tinfo['hz_actual'] = _hz_short_to_full(hz_actual, 6)\n\n\t\tinfo = {k: v for k, v in info.items() if v}\n\t\treturn info\n\texcept:\n\t\t#raise # NOTE: To have this throw on error, uncomment this line\n\t\treturn {}\n",
"def _get_cpu_info_from_cpufreq_info():\n\t'''\n\tReturns the CPU info gathered from cpufreq-info.\n\tReturns {} if cpufreq-info is not found.\n\t'''\n\ttry:\n\t\thz_brand, scale = '0.0', 0\n\n\t\tif not DataSource.has_cpufreq_info():\n\t\t\treturn {}\n\n\t\treturncode, output = DataSource.cpufreq_info()\n\t\tif returncode != 0:\n\t\t\treturn {}\n\n\t\thz_brand = output.split('current CPU frequency is')[1].split('\\n')[0]\n\t\ti = hz_brand.find('Hz')\n\t\tassert(i != -1)\n\t\thz_brand = hz_brand[0 : i+2].strip().lower()\n\n\t\tif hz_brand.endswith('mhz'):\n\t\t\tscale = 6\n\t\telif hz_brand.endswith('ghz'):\n\t\t\tscale = 9\n\t\thz_brand = hz_brand.rstrip('mhz').rstrip('ghz').strip()\n\t\thz_brand = _to_decimal_string(hz_brand)\n\n\t\tinfo = {\n\t\t\t'hz_advertised_friendly' : _hz_short_to_friendly(hz_brand, scale),\n\t\t\t'hz_actual_friendly' : _hz_short_to_friendly(hz_brand, scale),\n\t\t\t'hz_advertised' : _hz_short_to_full(hz_brand, scale),\n\t\t\t'hz_actual' : _hz_short_to_full(hz_brand, scale),\n\t\t}\n\n\t\tinfo = {k: v for k, v in info.items() if v}\n\t\treturn info\n\texcept:\n\t\t#raise # NOTE: To have this throw on error, uncomment this line\n\t\treturn {}\n",
"def _get_cpu_info_from_lscpu():\n\t'''\n\tReturns the CPU info gathered from lscpu.\n\tReturns {} if lscpu is not found.\n\t'''\n\ttry:\n\t\tif not DataSource.has_lscpu():\n\t\t\treturn {}\n\n\t\treturncode, output = DataSource.lscpu()\n\t\tif returncode != 0:\n\t\t\treturn {}\n\n\t\tinfo = {}\n\n\t\tnew_hz = _get_field(False, output, None, None, 'CPU max MHz', 'CPU MHz')\n\t\tif new_hz:\n\t\t\tnew_hz = _to_decimal_string(new_hz)\n\t\t\tscale = 6\n\t\t\tinfo['hz_advertised_friendly'] = _hz_short_to_friendly(new_hz, scale)\n\t\t\tinfo['hz_actual_friendly'] = _hz_short_to_friendly(new_hz, scale)\n\t\t\tinfo['hz_advertised'] = _hz_short_to_full(new_hz, scale)\n\t\t\tinfo['hz_actual'] = _hz_short_to_full(new_hz, scale)\n\n\t\tvendor_id = _get_field(False, output, None, None, 'Vendor ID')\n\t\tif vendor_id:\n\t\t\tinfo['vendor_id_raw'] = vendor_id\n\n\t\tbrand = _get_field(False, output, None, None, 'Model name')\n\t\tif brand:\n\t\t\tinfo['brand_raw'] = brand\n\n\t\tfamily = _get_field(False, output, None, None, 'CPU family')\n\t\tif family and family.isdigit():\n\t\t\tinfo['family'] = int(family)\n\n\t\tstepping = _get_field(False, output, None, None, 'Stepping')\n\t\tif stepping and stepping.isdigit():\n\t\t\tinfo['stepping'] = int(stepping)\n\n\t\tmodel = _get_field(False, output, None, None, 'Model')\n\t\tif model and model.isdigit():\n\t\t\tinfo['model'] = int(model)\n\n\t\tl1_data_cache_size = _get_field(False, output, None, None, 'L1d cache')\n\t\tif l1_data_cache_size:\n\t\t\tinfo['l1_data_cache_size'] = _to_friendly_bytes(l1_data_cache_size)\n\n\t\tl1_instruction_cache_size = _get_field(False, output, None, None, 'L1i cache')\n\t\tif l1_instruction_cache_size:\n\t\t\tinfo['l1_instruction_cache_size'] = _to_friendly_bytes(l1_instruction_cache_size)\n\n\t\tl2_cache_size = _get_field(False, output, None, None, 'L2 cache')\n\t\tif l2_cache_size:\n\t\t\tinfo['l2_cache_size'] = _to_friendly_bytes(l2_cache_size)\n\n\t\tl3_cache_size = _get_field(False, output, None, None, 'L3 cache')\n\t\tif l3_cache_size:\n\t\t\tinfo['l3_cache_size'] = _to_friendly_bytes(l3_cache_size)\n\n\t\t# Flags\n\t\tflags = _get_field(False, output, None, None, 'flags', 'Features')\n\t\tif flags:\n\t\t\tflags = flags.split()\n\t\t\tflags.sort()\n\t\t\tinfo['flags'] = flags\n\n\t\tinfo = {k: v for k, v in info.items() if v}\n\t\treturn info\n\texcept:\n\t\t#raise # NOTE: To have this throw on error, uncomment this line\n\t\treturn {}\n",
"def _get_cpu_info_from_dmesg():\n\t'''\n\tReturns the CPU info gathered from dmesg.\n\tReturns {} if dmesg is not found or does not have the desired info.\n\t'''\n\t# Just return {} if there is no dmesg\n\tif not DataSource.has_dmesg():\n\t\treturn {}\n\n\t# If dmesg fails return {}\n\treturncode, output = DataSource.dmesg_a()\n\tif output == None or returncode != 0:\n\t\treturn {}\n\n\treturn _parse_dmesg_output(output)\n",
"def _get_cpu_info_from_ibm_pa_features():\n\t'''\n\tReturns the CPU info gathered from lsprop /proc/device-tree/cpus/*/ibm,pa-features\n\tReturns {} if lsprop is not found or ibm,pa-features does not have the desired info.\n\t'''\n\ttry:\n\t\t# Just return {} if there is no lsprop\n\t\tif not DataSource.has_ibm_pa_features():\n\t\t\treturn {}\n\n\t\t# If ibm,pa-features fails return {}\n\t\treturncode, output = DataSource.ibm_pa_features()\n\t\tif output == None or returncode != 0:\n\t\t\treturn {}\n\n\t\t# Filter out invalid characters from output\n\t\tvalue = output.split(\"ibm,pa-features\")[1].lower()\n\t\tvalue = [s for s in value if s in list('0123456789abcfed')]\n\t\tvalue = ''.join(value)\n\n\t\t# Get data converted to Uint32 chunks\n\t\tleft = int(value[0 : 8], 16)\n\t\tright = int(value[8 : 16], 16)\n\n\t\t# Get the CPU flags\n\t\tflags = {\n\t\t\t# Byte 0\n\t\t\t'mmu' : _is_bit_set(left, 0),\n\t\t\t'fpu' : _is_bit_set(left, 1),\n\t\t\t'slb' : _is_bit_set(left, 2),\n\t\t\t'run' : _is_bit_set(left, 3),\n\t\t\t#'reserved' : _is_bit_set(left, 4),\n\t\t\t'dabr' : _is_bit_set(left, 5),\n\t\t\t'ne' : _is_bit_set(left, 6),\n\t\t\t'wtr' : _is_bit_set(left, 7),\n\n\t\t\t# Byte 1\n\t\t\t'mcr' : _is_bit_set(left, 8),\n\t\t\t'dsisr' : _is_bit_set(left, 9),\n\t\t\t'lp' : _is_bit_set(left, 10),\n\t\t\t'ri' : _is_bit_set(left, 11),\n\t\t\t'dabrx' : _is_bit_set(left, 12),\n\t\t\t'sprg3' : _is_bit_set(left, 13),\n\t\t\t'rislb' : _is_bit_set(left, 14),\n\t\t\t'pp' : _is_bit_set(left, 15),\n\n\t\t\t# Byte 2\n\t\t\t'vpm' : _is_bit_set(left, 16),\n\t\t\t'dss_2.05' : _is_bit_set(left, 17),\n\t\t\t#'reserved' : _is_bit_set(left, 18),\n\t\t\t'dar' : _is_bit_set(left, 19),\n\t\t\t#'reserved' : _is_bit_set(left, 20),\n\t\t\t'ppr' : _is_bit_set(left, 21),\n\t\t\t'dss_2.02' : _is_bit_set(left, 22),\n\t\t\t'dss_2.06' : _is_bit_set(left, 23),\n\n\t\t\t# Byte 3\n\t\t\t'lsd_in_dscr' : _is_bit_set(left, 24),\n\t\t\t'ugr_in_dscr' : _is_bit_set(left, 25),\n\t\t\t#'reserved' : _is_bit_set(left, 26),\n\t\t\t#'reserved' : _is_bit_set(left, 27),\n\t\t\t#'reserved' : _is_bit_set(left, 28),\n\t\t\t#'reserved' : _is_bit_set(left, 29),\n\t\t\t#'reserved' : _is_bit_set(left, 30),\n\t\t\t#'reserved' : _is_bit_set(left, 31),\n\n\t\t\t# Byte 4\n\t\t\t'sso_2.06' : _is_bit_set(right, 0),\n\t\t\t#'reserved' : _is_bit_set(right, 1),\n\t\t\t#'reserved' : _is_bit_set(right, 2),\n\t\t\t#'reserved' : _is_bit_set(right, 3),\n\t\t\t#'reserved' : _is_bit_set(right, 4),\n\t\t\t#'reserved' : _is_bit_set(right, 5),\n\t\t\t#'reserved' : _is_bit_set(right, 6),\n\t\t\t#'reserved' : _is_bit_set(right, 7),\n\n\t\t\t# Byte 5\n\t\t\t'le' : _is_bit_set(right, 8),\n\t\t\t'cfar' : _is_bit_set(right, 9),\n\t\t\t'eb' : _is_bit_set(right, 10),\n\t\t\t'lsq_2.07' : _is_bit_set(right, 11),\n\t\t\t#'reserved' : _is_bit_set(right, 12),\n\t\t\t#'reserved' : _is_bit_set(right, 13),\n\t\t\t#'reserved' : _is_bit_set(right, 14),\n\t\t\t#'reserved' : _is_bit_set(right, 15),\n\n\t\t\t# Byte 6\n\t\t\t'dss_2.07' : _is_bit_set(right, 16),\n\t\t\t#'reserved' : _is_bit_set(right, 17),\n\t\t\t#'reserved' : _is_bit_set(right, 18),\n\t\t\t#'reserved' : _is_bit_set(right, 19),\n\t\t\t#'reserved' : _is_bit_set(right, 20),\n\t\t\t#'reserved' : _is_bit_set(right, 21),\n\t\t\t#'reserved' : _is_bit_set(right, 22),\n\t\t\t#'reserved' : _is_bit_set(right, 23),\n\n\t\t\t# Byte 7\n\t\t\t#'reserved' : _is_bit_set(right, 24),\n\t\t\t#'reserved' : _is_bit_set(right, 25),\n\t\t\t#'reserved' : _is_bit_set(right, 26),\n\t\t\t#'reserved' : _is_bit_set(right, 27),\n\t\t\t#'reserved' : _is_bit_set(right, 28),\n\t\t\t#'reserved' : _is_bit_set(right, 29),\n\t\t\t#'reserved' : _is_bit_set(right, 30),\n\t\t\t#'reserved' : _is_bit_set(right, 31),\n\t\t}\n\n\t\t# Get a list of only the flags that are true\n\t\tflags = [k for k, v in flags.items() if v]\n\t\tflags.sort()\n\n\t\tinfo = {\n\t\t\t'flags' : flags\n\t\t}\n\t\tinfo = {k: v for k, v in info.items() if v}\n\n\t\treturn info\n\texcept:\n\t\treturn {}\n",
"def _get_cpu_info_from_cat_var_run_dmesg_boot():\n\t'''\n\tReturns the CPU info gathered from /var/run/dmesg.boot.\n\tReturns {} if dmesg is not found or does not have the desired info.\n\t'''\n\t# Just return {} if there is no /var/run/dmesg.boot\n\tif not DataSource.has_var_run_dmesg_boot():\n\t\treturn {}\n\n\t# If dmesg.boot fails return {}\n\treturncode, output = DataSource.cat_var_run_dmesg_boot()\n\tif output == None or returncode != 0:\n\t\treturn {}\n\n\treturn _parse_dmesg_output(output)\n",
"def _get_cpu_info_from_sysctl():\n\t'''\n\tReturns the CPU info gathered from sysctl.\n\tReturns {} if sysctl is not found.\n\t'''\n\ttry:\n\t\t# Just return {} if there is no sysctl\n\t\tif not DataSource.has_sysctl():\n\t\t\treturn {}\n\n\t\t# If sysctl fails return {}\n\t\treturncode, output = DataSource.sysctl_machdep_cpu_hw_cpufrequency()\n\t\tif output == None or returncode != 0:\n\t\t\treturn {}\n\n\t\t# Various fields\n\t\tvendor_id = _get_field(False, output, None, None, 'machdep.cpu.vendor')\n\t\tprocessor_brand = _get_field(True, output, None, None, 'machdep.cpu.brand_string')\n\t\tcache_size = _get_field(False, output, None, None, 'machdep.cpu.cache.size')\n\t\tstepping = _get_field(False, output, int, 0, 'machdep.cpu.stepping')\n\t\tmodel = _get_field(False, output, int, 0, 'machdep.cpu.model')\n\t\tfamily = _get_field(False, output, int, 0, 'machdep.cpu.family')\n\n\t\t# Flags\n\t\tflags = _get_field(False, output, None, '', 'machdep.cpu.features').lower().split()\n\t\tflags.extend(_get_field(False, output, None, '', 'machdep.cpu.leaf7_features').lower().split())\n\t\tflags.extend(_get_field(False, output, None, '', 'machdep.cpu.extfeatures').lower().split())\n\t\tflags.sort()\n\n\t\t# Convert from GHz/MHz string to Hz\n\t\thz_advertised, scale = _parse_cpu_brand_string(processor_brand)\n\t\thz_actual = _get_field(False, output, None, None, 'hw.cpufrequency')\n\t\thz_actual = _to_decimal_string(hz_actual)\n\n\t\tinfo = {\n\t\t'vendor_id_raw' : vendor_id,\n\t\t'brand_raw' : processor_brand,\n\n\t\t'hz_advertised_friendly' : _hz_short_to_friendly(hz_advertised, scale),\n\t\t'hz_actual_friendly' : _hz_short_to_friendly(hz_actual, 0),\n\t\t'hz_advertised' : _hz_short_to_full(hz_advertised, scale),\n\t\t'hz_actual' : _hz_short_to_full(hz_actual, 0),\n\n\t\t'l2_cache_size' : _to_friendly_bytes(cache_size),\n\n\t\t'stepping' : stepping,\n\t\t'model' : model,\n\t\t'family' : family,\n\t\t'flags' : flags\n\t\t}\n\n\t\tinfo = {k: v for k, v in info.items() if v}\n\t\treturn info\n\texcept:\n\t\treturn {}\n",
"def _get_cpu_info_from_sysinfo():\n\t'''\n\tReturns the CPU info gathered from sysinfo.\n\tReturns {} if sysinfo is not found.\n\t'''\n\tinfo = _get_cpu_info_from_sysinfo_v1()\n\tinfo.update(_get_cpu_info_from_sysinfo_v2())\n\treturn info\n",
"def _get_cpu_info_from_wmic():\n\t'''\n\tReturns the CPU info gathered from WMI.\n\tReturns {} if not on Windows, or wmic is not installed.\n\t'''\n\n\ttry:\n\t\t# Just return {} if not Windows or there is no wmic\n\t\tif not DataSource.is_windows or not DataSource.has_wmic():\n\t\t\treturn {}\n\n\t\treturncode, output = DataSource.wmic_cpu()\n\t\tif output == None or returncode != 0:\n\t\t\treturn {}\n\n\t\t# Break the list into key values pairs\n\t\tvalue = output.split(\"\\n\")\n\t\tvalue = [s.rstrip().split('=') for s in value if '=' in s]\n\t\tvalue = {k: v for k, v in value if v}\n\n\t\t# Get the advertised MHz\n\t\tprocessor_brand = value.get('Name')\n\t\thz_advertised, scale_advertised = _parse_cpu_brand_string(processor_brand)\n\n\t\t# Get the actual MHz\n\t\thz_actual = value.get('CurrentClockSpeed')\n\t\tscale_actual = 6\n\t\tif hz_actual:\n\t\t\thz_actual = _to_decimal_string(hz_actual)\n\n\t\t# Get cache sizes\n\t\tl2_cache_size = value.get('L2CacheSize')\n\t\tif l2_cache_size:\n\t\t\tl2_cache_size = l2_cache_size + ' KB'\n\n\t\tl3_cache_size = value.get('L3CacheSize')\n\t\tif l3_cache_size:\n\t\t\tl3_cache_size = l3_cache_size + ' KB'\n\n\t\t# Get family, model, and stepping\n\t\tfamily, model, stepping = '', '', ''\n\t\tdescription = value.get('Description') or value.get('Caption')\n\t\tentries = description.split(' ')\n\n\t\tif 'Family' in entries and entries.index('Family') < len(entries)-1:\n\t\t\ti = entries.index('Family')\n\t\t\tfamily = int(entries[i + 1])\n\n\t\tif 'Model' in entries and entries.index('Model') < len(entries)-1:\n\t\t\ti = entries.index('Model')\n\t\t\tmodel = int(entries[i + 1])\n\n\t\tif 'Stepping' in entries and entries.index('Stepping') < len(entries)-1:\n\t\t\ti = entries.index('Stepping')\n\t\t\tstepping = int(entries[i + 1])\n\n\t\tinfo = {\n\t\t\t'vendor_id_raw' : value.get('Manufacturer'),\n\t\t\t'brand_raw' : processor_brand,\n\n\t\t\t'hz_advertised_friendly' : _hz_short_to_friendly(hz_advertised, scale_advertised),\n\t\t\t'hz_actual_friendly' : _hz_short_to_friendly(hz_actual, scale_actual),\n\t\t\t'hz_advertised' : _hz_short_to_full(hz_advertised, scale_advertised),\n\t\t\t'hz_actual' : _hz_short_to_full(hz_actual, scale_actual),\n\n\t\t\t'l2_cache_size' : l2_cache_size,\n\t\t\t'l3_cache_size' : l3_cache_size,\n\n\t\t\t'stepping' : stepping,\n\t\t\t'model' : model,\n\t\t\t'family' : family,\n\t\t}\n\n\t\tinfo = {k: v for k, v in info.items() if v}\n\t\treturn info\n\texcept:\n\t\t#raise # NOTE: To have this throw on error, uncomment this line\n\t\treturn {}\n",
"def _get_cpu_info_from_registry():\n\t'''\n\tFIXME: Is missing many of the newer CPU flags like sse3\n\tReturns the CPU info gathered from the Windows Registry.\n\tReturns {} if not on Windows.\n\t'''\n\ttry:\n\t\t# Just return {} if not on Windows\n\t\tif not DataSource.is_windows:\n\t\t\treturn {}\n\n\t\t# Get the CPU name\n\t\tprocessor_brand = DataSource.winreg_processor_brand().strip()\n\n\t\t# Get the CPU vendor id\n\t\tvendor_id = DataSource.winreg_vendor_id_raw()\n\n\t\t# Get the CPU arch and bits\n\t\tarch_string_raw = DataSource.winreg_arch_string_raw()\n\t\tarch, bits = _parse_arch(arch_string_raw)\n\n\t\t# Get the actual CPU Hz\n\t\thz_actual = DataSource.winreg_hz_actual()\n\t\thz_actual = _to_decimal_string(hz_actual)\n\n\t\t# Get the advertised CPU Hz\n\t\thz_advertised, scale = _parse_cpu_brand_string(processor_brand)\n\n\t\t# If advertised hz not found, use the actual hz\n\t\tif hz_advertised == '0.0':\n\t\t\tscale = 6\n\t\t\thz_advertised = _to_decimal_string(hz_actual)\n\n\t\t# Get the CPU features\n\t\tfeature_bits = DataSource.winreg_feature_bits()\n\n\t\tdef is_set(bit):\n\t\t\tmask = 0x80000000 >> bit\n\t\t\tretval = mask & feature_bits > 0\n\t\t\treturn retval\n\n\t\t# http://en.wikipedia.org/wiki/CPUID\n\t\t# http://unix.stackexchange.com/questions/43539/what-do-the-flags-in-proc-cpuinfo-mean\n\t\t# http://www.lohninger.com/helpcsuite/public_constants_cpuid.htm\n\t\tflags = {\n\t\t\t'fpu' : is_set(0), # Floating Point Unit\n\t\t\t'vme' : is_set(1), # V86 Mode Extensions\n\t\t\t'de' : is_set(2), # Debug Extensions - I/O breakpoints supported\n\t\t\t'pse' : is_set(3), # Page Size Extensions (4 MB pages supported)\n\t\t\t'tsc' : is_set(4), # Time Stamp Counter and RDTSC instruction are available\n\t\t\t'msr' : is_set(5), # Model Specific Registers\n\t\t\t'pae' : is_set(6), # Physical Address Extensions (36 bit address, 2MB pages)\n\t\t\t'mce' : is_set(7), # Machine Check Exception supported\n\t\t\t'cx8' : is_set(8), # Compare Exchange Eight Byte instruction available\n\t\t\t'apic' : is_set(9), # Local APIC present (multiprocessor operation support)\n\t\t\t'sepamd' : is_set(10), # Fast system calls (AMD only)\n\t\t\t'sep' : is_set(11), # Fast system calls\n\t\t\t'mtrr' : is_set(12), # Memory Type Range Registers\n\t\t\t'pge' : is_set(13), # Page Global Enable\n\t\t\t'mca' : is_set(14), # Machine Check Architecture\n\t\t\t'cmov' : is_set(15), # Conditional MOVe instructions\n\t\t\t'pat' : is_set(16), # Page Attribute Table\n\t\t\t'pse36' : is_set(17), # 36 bit Page Size Extensions\n\t\t\t'serial' : is_set(18), # Processor Serial Number\n\t\t\t'clflush' : is_set(19), # Cache Flush\n\t\t\t#'reserved1' : is_set(20), # reserved\n\t\t\t'dts' : is_set(21), # Debug Trace Store\n\t\t\t'acpi' : is_set(22), # ACPI support\n\t\t\t'mmx' : is_set(23), # MultiMedia Extensions\n\t\t\t'fxsr' : is_set(24), # FXSAVE and FXRSTOR instructions\n\t\t\t'sse' : is_set(25), # SSE instructions\n\t\t\t'sse2' : is_set(26), # SSE2 (WNI) instructions\n\t\t\t'ss' : is_set(27), # self snoop\n\t\t\t#'reserved2' : is_set(28), # reserved\n\t\t\t'tm' : is_set(29), # Automatic clock control\n\t\t\t'ia64' : is_set(30), # IA64 instructions\n\t\t\t'3dnow' : is_set(31) # 3DNow! instructions available\n\t\t}\n\n\t\t# Get a list of only the flags that are true\n\t\tflags = [k for k, v in flags.items() if v]\n\t\tflags.sort()\n\n\t\tinfo = {\n\t\t'vendor_id_raw' : vendor_id,\n\t\t'brand_raw' : processor_brand,\n\n\t\t'hz_advertised_friendly' : _hz_short_to_friendly(hz_advertised, scale),\n\t\t'hz_actual_friendly' : _hz_short_to_friendly(hz_actual, 6),\n\t\t'hz_advertised' : _hz_short_to_full(hz_advertised, scale),\n\t\t'hz_actual' : _hz_short_to_full(hz_actual, 6),\n\n\t\t'flags' : flags\n\t\t}\n\n\t\tinfo = {k: v for k, v in info.items() if v}\n\t\treturn info\n\texcept:\n\t\treturn {}\n",
"def _get_cpu_info_from_kstat():\n\t'''\n\tReturns the CPU info gathered from isainfo and kstat.\n\tReturns {} if isainfo or kstat are not found.\n\t'''\n\ttry:\n\t\t# Just return {} if there is no isainfo or kstat\n\t\tif not DataSource.has_isainfo() or not DataSource.has_kstat():\n\t\t\treturn {}\n\n\t\t# If isainfo fails return {}\n\t\treturncode, flag_output = DataSource.isainfo_vb()\n\t\tif flag_output == None or returncode != 0:\n\t\t\treturn {}\n\n\t\t# If kstat fails return {}\n\t\treturncode, kstat = DataSource.kstat_m_cpu_info()\n\t\tif kstat == None or returncode != 0:\n\t\t\treturn {}\n\n\t\t# Various fields\n\t\tvendor_id = kstat.split('\\tvendor_id ')[1].split('\\n')[0].strip()\n\t\tprocessor_brand = kstat.split('\\tbrand ')[1].split('\\n')[0].strip()\n\t\tstepping = int(kstat.split('\\tstepping ')[1].split('\\n')[0].strip())\n\t\tmodel = int(kstat.split('\\tmodel ')[1].split('\\n')[0].strip())\n\t\tfamily = int(kstat.split('\\tfamily ')[1].split('\\n')[0].strip())\n\n\t\t# Flags\n\t\tflags = flag_output.strip().split('\\n')[-1].strip().lower().split()\n\t\tflags.sort()\n\n\t\t# Convert from GHz/MHz string to Hz\n\t\tscale = 6\n\t\thz_advertised = kstat.split('\\tclock_MHz ')[1].split('\\n')[0].strip()\n\t\thz_advertised = _to_decimal_string(hz_advertised)\n\n\t\t# Convert from GHz/MHz string to Hz\n\t\thz_actual = kstat.split('\\tcurrent_clock_Hz ')[1].split('\\n')[0].strip()\n\t\thz_actual = _to_decimal_string(hz_actual)\n\n\t\tinfo = {\n\t\t'vendor_id_raw' : vendor_id,\n\t\t'brand_raw' : processor_brand,\n\n\t\t'hz_advertised_friendly' : _hz_short_to_friendly(hz_advertised, scale),\n\t\t'hz_actual_friendly' : _hz_short_to_friendly(hz_actual, 0),\n\t\t'hz_advertised' : _hz_short_to_full(hz_advertised, scale),\n\t\t'hz_actual' : _hz_short_to_full(hz_actual, 0),\n\n\t\t'stepping' : stepping,\n\t\t'model' : model,\n\t\t'family' : family,\n\t\t'flags' : flags\n\t\t}\n\n\t\tinfo = {k: v for k, v in info.items() if v}\n\t\treturn info\n\texcept:\n\t\treturn {}\n",
"def _get_cpu_info_from_platform_uname():\n\ttry:\n\t\tuname = DataSource.uname_string_raw.split(',')[0]\n\n\t\tfamily, model, stepping = (None, None, None)\n\t\tentries = uname.split(' ')\n\n\t\tif 'Family' in entries and entries.index('Family') < len(entries)-1:\n\t\t\ti = entries.index('Family')\n\t\t\tfamily = int(entries[i + 1])\n\n\t\tif 'Model' in entries and entries.index('Model') < len(entries)-1:\n\t\t\ti = entries.index('Model')\n\t\t\tmodel = int(entries[i + 1])\n\n\t\tif 'Stepping' in entries and entries.index('Stepping') < len(entries)-1:\n\t\t\ti = entries.index('Stepping')\n\t\t\tstepping = int(entries[i + 1])\n\n\t\tinfo = {\n\t\t\t'family' : family,\n\t\t\t'model' : model,\n\t\t\t'stepping' : stepping\n\t\t}\n\t\tinfo = {k: v for k, v in info.items() if v}\n\t\treturn info\n\texcept:\n\t\treturn {}\n"
] |
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
# Copyright (c) 2014-2019, Matthew Brennan Jones <matthew.brennan.jones@gmail.com>
# Py-cpuinfo gets CPU info with pure Python 2 & 3
# It uses the MIT License
# It is hosted at: https://github.com/workhorsy/py-cpuinfo
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
CPUINFO_VERSION = (5, 0, 0)
CPUINFO_VERSION_STRING = '.'.join([str(n) for n in CPUINFO_VERSION])
import os, sys
import platform
import multiprocessing
import ctypes
try:
import _winreg as winreg
except ImportError as err:
try:
import winreg
except ImportError as err:
pass
IS_PY2 = sys.version_info[0] == 2
class DataSource(object):
bits = platform.architecture()[0]
cpu_count = multiprocessing.cpu_count()
is_windows = platform.system().lower() == 'windows'
arch_string_raw = platform.machine()
uname_string_raw = platform.uname()[5]
can_cpuid = True
@staticmethod
def has_proc_cpuinfo():
return os.path.exists('/proc/cpuinfo')
@staticmethod
def has_dmesg():
return len(_program_paths('dmesg')) > 0
@staticmethod
def has_var_run_dmesg_boot():
uname = platform.system().strip().strip('"').strip("'").strip().lower()
return 'linux' in uname and os.path.exists('/var/run/dmesg.boot')
@staticmethod
def has_cpufreq_info():
return len(_program_paths('cpufreq-info')) > 0
@staticmethod
def has_sestatus():
return len(_program_paths('sestatus')) > 0
@staticmethod
def has_sysctl():
return len(_program_paths('sysctl')) > 0
@staticmethod
def has_isainfo():
return len(_program_paths('isainfo')) > 0
@staticmethod
def has_kstat():
return len(_program_paths('kstat')) > 0
@staticmethod
def has_sysinfo():
return len(_program_paths('sysinfo')) > 0
@staticmethod
def has_lscpu():
return len(_program_paths('lscpu')) > 0
@staticmethod
def has_ibm_pa_features():
return len(_program_paths('lsprop')) > 0
@staticmethod
def has_wmic():
returncode, output = _run_and_get_stdout(['wmic', 'os', 'get', 'Version'])
return returncode == 0 and len(output) > 0
@staticmethod
def cat_proc_cpuinfo():
return _run_and_get_stdout(['cat', '/proc/cpuinfo'])
@staticmethod
def cpufreq_info():
return _run_and_get_stdout(['cpufreq-info'])
@staticmethod
def sestatus_b():
return _run_and_get_stdout(['sestatus', '-b'])
@staticmethod
def dmesg_a():
return _run_and_get_stdout(['dmesg', '-a'])
@staticmethod
def cat_var_run_dmesg_boot():
return _run_and_get_stdout(['cat', '/var/run/dmesg.boot'])
@staticmethod
def sysctl_machdep_cpu_hw_cpufrequency():
return _run_and_get_stdout(['sysctl', 'machdep.cpu', 'hw.cpufrequency'])
@staticmethod
def isainfo_vb():
return _run_and_get_stdout(['isainfo', '-vb'])
@staticmethod
def kstat_m_cpu_info():
return _run_and_get_stdout(['kstat', '-m', 'cpu_info'])
@staticmethod
def sysinfo_cpu():
return _run_and_get_stdout(['sysinfo', '-cpu'])
@staticmethod
def lscpu():
return _run_and_get_stdout(['lscpu'])
@staticmethod
def ibm_pa_features():
import glob
ibm_features = glob.glob('/proc/device-tree/cpus/*/ibm,pa-features')
if ibm_features:
return _run_and_get_stdout(['lsprop', ibm_features[0]])
@staticmethod
def wmic_cpu():
return _run_and_get_stdout(['wmic', 'cpu', 'get', 'Name,CurrentClockSpeed,L2CacheSize,L3CacheSize,Description,Caption,Manufacturer', '/format:list'])
@staticmethod
def winreg_processor_brand():
key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, r"Hardware\Description\System\CentralProcessor\0")
processor_brand = winreg.QueryValueEx(key, "ProcessorNameString")[0]
winreg.CloseKey(key)
return processor_brand.strip()
@staticmethod
def winreg_vendor_id_raw():
key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, r"Hardware\Description\System\CentralProcessor\0")
vendor_id_raw = winreg.QueryValueEx(key, "VendorIdentifier")[0]
winreg.CloseKey(key)
return vendor_id_raw
@staticmethod
def winreg_arch_string_raw():
key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, r"SYSTEM\CurrentControlSet\Control\Session Manager\Environment")
arch_string_raw = winreg.QueryValueEx(key, "PROCESSOR_ARCHITECTURE")[0]
winreg.CloseKey(key)
return arch_string_raw
@staticmethod
def winreg_hz_actual():
key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, r"Hardware\Description\System\CentralProcessor\0")
hz_actual = winreg.QueryValueEx(key, "~Mhz")[0]
winreg.CloseKey(key)
hz_actual = _to_decimal_string(hz_actual)
return hz_actual
@staticmethod
def winreg_feature_bits():
key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, r"Hardware\Description\System\CentralProcessor\0")
feature_bits = winreg.QueryValueEx(key, "FeatureSet")[0]
winreg.CloseKey(key)
return feature_bits
def _program_paths(program_name):
paths = []
exts = filter(None, os.environ.get('PATHEXT', '').split(os.pathsep))
path = os.environ['PATH']
for p in os.environ['PATH'].split(os.pathsep):
p = os.path.join(p, program_name)
if os.access(p, os.X_OK):
paths.append(p)
for e in exts:
pext = p + e
if os.access(pext, os.X_OK):
paths.append(pext)
return paths
def _run_and_get_stdout(command, pipe_command=None):
from subprocess import Popen, PIPE
if not pipe_command:
p1 = Popen(command, stdout=PIPE, stderr=PIPE, stdin=PIPE)
output = p1.communicate()[0]
if not IS_PY2:
output = output.decode(encoding='UTF-8')
return p1.returncode, output
else:
p1 = Popen(command, stdout=PIPE, stderr=PIPE, stdin=PIPE)
p2 = Popen(pipe_command, stdin=p1.stdout, stdout=PIPE, stderr=PIPE)
p1.stdout.close()
output = p2.communicate()[0]
if not IS_PY2:
output = output.decode(encoding='UTF-8')
return p2.returncode, output
# Make sure we are running on a supported system
def _check_arch():
arch, bits = _parse_arch(DataSource.arch_string_raw)
if not arch in ['X86_32', 'X86_64', 'ARM_7', 'ARM_8', 'PPC_64']:
raise Exception("py-cpuinfo currently only works on X86 and some PPC and ARM CPUs.")
def _obj_to_b64(thing):
import pickle
import base64
a = thing
b = pickle.dumps(a)
c = base64.b64encode(b)
d = c.decode('utf8')
return d
def _b64_to_obj(thing):
import pickle
import base64
try:
a = base64.b64decode(thing)
b = pickle.loads(a)
return b
except:
return {}
def _utf_to_str(input):
if IS_PY2 and isinstance(input, unicode):
return input.encode('utf-8')
elif isinstance(input, list):
return [_utf_to_str(element) for element in input]
elif isinstance(input, dict):
return {_utf_to_str(key): _utf_to_str(value)
for key, value in input.items()}
else:
return input
def _copy_new_fields(info, new_info):
keys = [
'vendor_id_raw', 'hardware_raw', 'brand_raw', 'hz_advertised_friendly', 'hz_actual_friendly',
'hz_advertised', 'hz_actual', 'arch', 'bits', 'count',
'arch_string_raw', 'uname_string_raw',
'l2_cache_size', 'l2_cache_line_size', 'l2_cache_associativity',
'stepping', 'model', 'family',
'processor_type', 'extended_model', 'extended_family', 'flags',
'l3_cache_size', 'l1_data_cache_size', 'l1_instruction_cache_size'
]
for key in keys:
if new_info.get(key, None) and not info.get(key, None):
info[key] = new_info[key]
elif key == 'flags' and new_info.get('flags'):
for f in new_info['flags']:
if f not in info['flags']: info['flags'].append(f)
info['flags'].sort()
def _get_field_actual(cant_be_number, raw_string, field_names):
for line in raw_string.splitlines():
for field_name in field_names:
field_name = field_name.lower()
if ':' in line:
left, right = line.split(':', 1)
left = left.strip().lower()
right = right.strip()
if left == field_name and len(right) > 0:
if cant_be_number:
if not right.isdigit():
return right
else:
return right
return None
def _get_field(cant_be_number, raw_string, convert_to, default_value, *field_names):
retval = _get_field_actual(cant_be_number, raw_string, field_names)
# Convert the return value
if retval and convert_to:
try:
retval = convert_to(retval)
except:
retval = default_value
# Return the default if there is no return value
if retval is None:
retval = default_value
return retval
def _to_decimal_string(ticks):
try:
# Convert to string
ticks = '{0}'.format(ticks)
# Strip off non numbers and decimal places
ticks = "".join(n for n in ticks if n.isdigit() or n=='.').strip()
if ticks == '':
ticks = '0'
# Add decimal if missing
if '.' not in ticks:
ticks = '{0}.0'.format(ticks)
# Remove trailing zeros
ticks = ticks.rstrip('0')
# Add one trailing zero for empty right side
if ticks.endswith('.'):
ticks = '{0}0'.format(ticks)
# Make sure the number can be converted to a float
ticks = float(ticks)
ticks = '{0}'.format(ticks)
return ticks
except:
return '0.0'
def _hz_short_to_full(ticks, scale):
try:
# Make sure the number can be converted to a float
ticks = float(ticks)
ticks = '{0}'.format(ticks)
# Scale the numbers
hz = ticks.lstrip('0')
old_index = hz.index('.')
hz = hz.replace('.', '')
hz = hz.ljust(scale + old_index+1, '0')
new_index = old_index + scale
hz = '{0}.{1}'.format(hz[:new_index], hz[new_index:])
left, right = hz.split('.')
left, right = int(left), int(right)
return (left, right)
except:
return (0, 0)
def _hz_friendly_to_full(hz_string):
try:
hz_string = hz_string.strip().lower()
hz, scale = (None, None)
if hz_string.endswith('ghz'):
scale = 9
elif hz_string.endswith('mhz'):
scale = 6
elif hz_string.endswith('hz'):
scale = 0
hz = "".join(n for n in hz_string if n.isdigit() or n=='.').strip()
if not '.' in hz:
hz += '.0'
hz, scale = _hz_short_to_full(hz, scale)
return (hz, scale)
except:
return (0, 0)
def _hz_short_to_friendly(ticks, scale):
try:
# Get the raw Hz as a string
left, right = _hz_short_to_full(ticks, scale)
result = '{0}.{1}'.format(left, right)
# Get the location of the dot, and remove said dot
dot_index = result.index('.')
result = result.replace('.', '')
# Get the Hz symbol and scale
symbol = "Hz"
scale = 0
if dot_index > 9:
symbol = "GHz"
scale = 9
elif dot_index > 6:
symbol = "MHz"
scale = 6
elif dot_index > 3:
symbol = "KHz"
scale = 3
# Get the Hz with the dot at the new scaled point
result = '{0}.{1}'.format(result[:-scale-1], result[-scale-1:])
# Format the ticks to have 4 numbers after the decimal
# and remove any superfluous zeroes.
result = '{0:.4f} {1}'.format(float(result), symbol)
result = result.rstrip('0')
return result
except:
return '0.0000 Hz'
def _to_friendly_bytes(input):
import re
if not input:
return input
input = "{0}".format(input)
formats = {
r"^[0-9]+B$" : 'B',
r"^[0-9]+K$" : 'KB',
r"^[0-9]+M$" : 'MB',
r"^[0-9]+G$" : 'GB'
}
for pattern, friendly_size in formats.items():
if re.match(pattern, input):
return "{0} {1}".format(input[ : -1].strip(), friendly_size)
return input
def _parse_cpu_brand_string(cpu_string):
# Just return 0 if the processor brand does not have the Hz
if not 'hz' in cpu_string.lower():
return ('0.0', 0)
hz = cpu_string.lower()
scale = 0
if hz.endswith('mhz'):
scale = 6
elif hz.endswith('ghz'):
scale = 9
if '@' in hz:
hz = hz.split('@')[1]
else:
hz = hz.rsplit(None, 1)[1]
hz = hz.rstrip('mhz').rstrip('ghz').strip()
hz = _to_decimal_string(hz)
return (hz, scale)
def _parse_cpu_brand_string_dx(cpu_string):
import re
# Find all the strings inside brackets ()
starts = [m.start() for m in re.finditer('\(', cpu_string)]
ends = [m.start() for m in re.finditer('\)', cpu_string)]
insides = {k: v for k, v in zip(starts, ends)}
insides = [cpu_string[start+1 : end] for start, end in insides.items()]
# Find all the fields
vendor_id, stepping, model, family = (None, None, None, None)
for inside in insides:
for pair in inside.split(','):
pair = [n.strip() for n in pair.split(':')]
if len(pair) > 1:
name, value = pair[0], pair[1]
if name == 'origin':
vendor_id = value.strip('"')
elif name == 'stepping':
stepping = int(value.lstrip('0x'), 16)
elif name == 'model':
model = int(value.lstrip('0x'), 16)
elif name in ['fam', 'family']:
family = int(value.lstrip('0x'), 16)
# Find the Processor Brand
# Strip off extra strings in brackets at end
brand = cpu_string.strip()
is_working = True
while is_working:
is_working = False
for inside in insides:
full = "({0})".format(inside)
if brand.endswith(full):
brand = brand[ :-len(full)].strip()
is_working = True
# Find the Hz in the brand string
hz_brand, scale = _parse_cpu_brand_string(brand)
# Find Hz inside brackets () after the brand string
if hz_brand == '0.0':
for inside in insides:
hz = inside
for entry in ['GHz', 'MHz', 'Hz']:
if entry in hz:
hz = "CPU @ " + hz[ : hz.find(entry) + len(entry)]
hz_brand, scale = _parse_cpu_brand_string(hz)
break
return (hz_brand, scale, brand, vendor_id, stepping, model, family)
def _parse_dmesg_output(output):
try:
# Get all the dmesg lines that might contain a CPU string
lines = output.split(' CPU0:')[1:] + \
output.split(' CPU1:')[1:] + \
output.split(' CPU:')[1:] + \
output.split('\nCPU0:')[1:] + \
output.split('\nCPU1:')[1:] + \
output.split('\nCPU:')[1:]
lines = [l.split('\n')[0].strip() for l in lines]
# Convert the lines to CPU strings
cpu_strings = [_parse_cpu_brand_string_dx(l) for l in lines]
# Find the CPU string that has the most fields
best_string = None
highest_count = 0
for cpu_string in cpu_strings:
count = sum([n is not None for n in cpu_string])
if count > highest_count:
highest_count = count
best_string = cpu_string
# If no CPU string was found, return {}
if not best_string:
return {}
hz_actual, scale, processor_brand, vendor_id, stepping, model, family = best_string
# Origin
if ' Origin=' in output:
fields = output[output.find(' Origin=') : ].split('\n')[0]
fields = fields.strip().split()
fields = [n.strip().split('=') for n in fields]
fields = [{n[0].strip().lower() : n[1].strip()} for n in fields]
for field in fields:
name = list(field.keys())[0]
value = list(field.values())[0]
if name == 'origin':
vendor_id = value.strip('"')
elif name == 'stepping':
stepping = int(value.lstrip('0x'), 16)
elif name == 'model':
model = int(value.lstrip('0x'), 16)
elif name in ['fam', 'family']:
family = int(value.lstrip('0x'), 16)
# Features
flag_lines = []
for category in [' Features=', ' Features2=', ' AMD Features=', ' AMD Features2=']:
if category in output:
flag_lines.append(output.split(category)[1].split('\n')[0])
flags = []
for line in flag_lines:
line = line.split('<')[1].split('>')[0].lower()
for flag in line.split(','):
flags.append(flag)
flags.sort()
# Convert from GHz/MHz string to Hz
hz_advertised, scale = _parse_cpu_brand_string(processor_brand)
# If advertised hz not found, use the actual hz
if hz_advertised == '0.0':
scale = 6
hz_advertised = _to_decimal_string(hz_actual)
info = {
'vendor_id_raw' : vendor_id,
'brand_raw' : processor_brand,
'stepping' : stepping,
'model' : model,
'family' : family,
'flags' : flags
}
if hz_advertised and hz_advertised != '0.0':
info['hz_advertised_friendly'] = _hz_short_to_friendly(hz_advertised, scale)
info['hz_actual_friendly'] = _hz_short_to_friendly(hz_actual, scale)
if hz_advertised and hz_advertised != '0.0':
info['hz_advertised'] = _hz_short_to_full(hz_advertised, scale)
info['hz_actual'] = _hz_short_to_full(hz_actual, scale)
return {k: v for k, v in info.items() if v}
except:
#raise
pass
return {}
def _parse_arch(arch_string_raw):
import re
arch, bits = None, None
arch_string_raw = arch_string_raw.lower()
# X86
if re.match('^i\d86$|^x86$|^x86_32$|^i86pc$|^ia32$|^ia-32$|^bepc$', arch_string_raw):
arch = 'X86_32'
bits = 32
elif re.match('^x64$|^x86_64$|^x86_64t$|^i686-64$|^amd64$|^ia64$|^ia-64$', arch_string_raw):
arch = 'X86_64'
bits = 64
# ARM
elif re.match('^armv8-a|aarch64$', arch_string_raw):
arch = 'ARM_8'
bits = 64
elif re.match('^armv7$|^armv7[a-z]$|^armv7-[a-z]$|^armv6[a-z]$', arch_string_raw):
arch = 'ARM_7'
bits = 32
elif re.match('^armv8$|^armv8[a-z]$|^armv8-[a-z]$', arch_string_raw):
arch = 'ARM_8'
bits = 32
# PPC
elif re.match('^ppc32$|^prep$|^pmac$|^powermac$', arch_string_raw):
arch = 'PPC_32'
bits = 32
elif re.match('^powerpc$|^ppc64$|^ppc64le$', arch_string_raw):
arch = 'PPC_64'
bits = 64
# SPARC
elif re.match('^sparc32$|^sparc$', arch_string_raw):
arch = 'SPARC_32'
bits = 32
elif re.match('^sparc64$|^sun4u$|^sun4v$', arch_string_raw):
arch = 'SPARC_64'
bits = 64
return (arch, bits)
def _is_bit_set(reg, bit):
mask = 1 << bit
is_set = reg & mask > 0
return is_set
def _is_selinux_enforcing():
# Just return if the SE Linux Status Tool is not installed
if not DataSource.has_sestatus():
return False
# Run the sestatus, and just return if it failed to run
returncode, output = DataSource.sestatus_b()
if returncode != 0:
return False
# Figure out if explicitly in enforcing mode
for line in output.splitlines():
line = line.strip().lower()
if line.startswith("current mode:"):
if line.endswith("enforcing"):
return True
else:
return False
# Figure out if we can execute heap and execute memory
can_selinux_exec_heap = False
can_selinux_exec_memory = False
for line in output.splitlines():
line = line.strip().lower()
if line.startswith("allow_execheap") and line.endswith("on"):
can_selinux_exec_heap = True
elif line.startswith("allow_execmem") and line.endswith("on"):
can_selinux_exec_memory = True
return (not can_selinux_exec_heap or not can_selinux_exec_memory)
class CPUID(object):
def __init__(self):
self.prochandle = None
# Figure out if SE Linux is on and in enforcing mode
self.is_selinux_enforcing = _is_selinux_enforcing()
def _asm_func(self, restype=None, argtypes=(), byte_code=[]):
byte_code = bytes.join(b'', byte_code)
address = None
if DataSource.is_windows:
# Allocate a memory segment the size of the byte code, and make it executable
size = len(byte_code)
# Alloc at least 1 page to ensure we own all pages that we want to change protection on
if size < 0x1000: size = 0x1000
MEM_COMMIT = ctypes.c_ulong(0x1000)
PAGE_READWRITE = ctypes.c_ulong(0x4)
pfnVirtualAlloc = ctypes.windll.kernel32.VirtualAlloc
pfnVirtualAlloc.restype = ctypes.c_void_p
address = pfnVirtualAlloc(None, ctypes.c_size_t(size), MEM_COMMIT, PAGE_READWRITE)
if not address:
raise Exception("Failed to VirtualAlloc")
# Copy the byte code into the memory segment
memmove = ctypes.CFUNCTYPE(ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_size_t)(ctypes._memmove_addr)
if memmove(address, byte_code, size) < 0:
raise Exception("Failed to memmove")
# Enable execute permissions
PAGE_EXECUTE = ctypes.c_ulong(0x10)
old_protect = ctypes.c_ulong(0)
pfnVirtualProtect = ctypes.windll.kernel32.VirtualProtect
res = pfnVirtualProtect(ctypes.c_void_p(address), ctypes.c_size_t(size), PAGE_EXECUTE, ctypes.byref(old_protect))
if not res:
raise Exception("Failed VirtualProtect")
# Flush Instruction Cache
# First, get process Handle
if not self.prochandle:
pfnGetCurrentProcess = ctypes.windll.kernel32.GetCurrentProcess
pfnGetCurrentProcess.restype = ctypes.c_void_p
self.prochandle = ctypes.c_void_p(pfnGetCurrentProcess())
# Actually flush cache
res = ctypes.windll.kernel32.FlushInstructionCache(self.prochandle, ctypes.c_void_p(address), ctypes.c_size_t(size))
if not res:
raise Exception("Failed FlushInstructionCache")
else:
# Allocate a memory segment the size of the byte code
size = len(byte_code)
pfnvalloc = ctypes.pythonapi.valloc
pfnvalloc.restype = ctypes.c_void_p
address = pfnvalloc(ctypes.c_size_t(size))
if not address:
raise Exception("Failed to valloc")
# Mark the memory segment as writeable only
if not self.is_selinux_enforcing:
WRITE = 0x2
if ctypes.pythonapi.mprotect(ctypes.c_void_p(address), size, WRITE) < 0:
raise Exception("Failed to mprotect")
# Copy the byte code into the memory segment
if ctypes.pythonapi.memmove(ctypes.c_void_p(address), byte_code, ctypes.c_size_t(size)) < 0:
raise Exception("Failed to memmove")
# Mark the memory segment as writeable and executable only
if not self.is_selinux_enforcing:
WRITE_EXECUTE = 0x2 | 0x4
if ctypes.pythonapi.mprotect(ctypes.c_void_p(address), size, WRITE_EXECUTE) < 0:
raise Exception("Failed to mprotect")
# Cast the memory segment into a function
functype = ctypes.CFUNCTYPE(restype, *argtypes)
fun = functype(address)
return fun, address
def _run_asm(self, *byte_code):
# Convert the byte code into a function that returns an int
restype = ctypes.c_uint32
argtypes = ()
func, address = self._asm_func(restype, argtypes, byte_code)
# Call the byte code like a function
retval = func()
byte_code = bytes.join(b'', byte_code)
size = ctypes.c_size_t(len(byte_code))
# Free the function memory segment
if DataSource.is_windows:
MEM_RELEASE = ctypes.c_ulong(0x8000)
ctypes.windll.kernel32.VirtualFree(ctypes.c_void_p(address), ctypes.c_size_t(0), MEM_RELEASE)
else:
# Remove the executable tag on the memory
READ_WRITE = 0x1 | 0x2
if ctypes.pythonapi.mprotect(ctypes.c_void_p(address), size, READ_WRITE) < 0:
raise Exception("Failed to mprotect")
ctypes.pythonapi.free(ctypes.c_void_p(address))
return retval
# FIXME: We should not have to use different instructions to
# set eax to 0 or 1, on 32bit and 64bit machines.
def _zero_eax(self):
return (
b"\x31\xC0" # xor eax,eax
)
def _zero_ecx(self):
return (
b"\x31\xC9" # xor ecx,ecx
)
def _one_eax(self):
return (
b"\xB8\x01\x00\x00\x00" # mov eax,0x1"
)
# http://en.wikipedia.org/wiki/CPUID#EAX.3D0:_Get_vendor_ID
def get_vendor_id(self):
# EBX
ebx = self._run_asm(
self._zero_eax(),
b"\x0F\xA2" # cpuid
b"\x89\xD8" # mov ax,bx
b"\xC3" # ret
)
# ECX
ecx = self._run_asm(
self._zero_eax(),
b"\x0f\xa2" # cpuid
b"\x89\xC8" # mov ax,cx
b"\xC3" # ret
)
# EDX
edx = self._run_asm(
self._zero_eax(),
b"\x0f\xa2" # cpuid
b"\x89\xD0" # mov ax,dx
b"\xC3" # ret
)
# Each 4bits is a ascii letter in the name
vendor_id = []
for reg in [ebx, edx, ecx]:
for n in [0, 8, 16, 24]:
vendor_id.append(chr((reg >> n) & 0xFF))
vendor_id = ''.join(vendor_id)
return vendor_id
# http://en.wikipedia.org/wiki/CPUID#EAX.3D1:_Processor_Info_and_Feature_Bits
def get_info(self):
# EAX
eax = self._run_asm(
self._one_eax(),
b"\x0f\xa2" # cpuid
b"\xC3" # ret
)
# Get the CPU info
stepping = (eax >> 0) & 0xF # 4 bits
model = (eax >> 4) & 0xF # 4 bits
family = (eax >> 8) & 0xF # 4 bits
processor_type = (eax >> 12) & 0x3 # 2 bits
extended_model = (eax >> 16) & 0xF # 4 bits
extended_family = (eax >> 20) & 0xFF # 8 bits
return {
'stepping' : stepping,
'model' : model,
'family' : family,
'processor_type' : processor_type,
'extended_model' : extended_model,
'extended_family' : extended_family
}
# http://en.wikipedia.org/wiki/CPUID#EAX.3D80000000h:_Get_Highest_Extended_Function_Supported
def get_max_extension_support(self):
# Check for extension support
max_extension_support = self._run_asm(
b"\xB8\x00\x00\x00\x80" # mov ax,0x80000000
b"\x0f\xa2" # cpuid
b"\xC3" # ret
)
return max_extension_support
# http://en.wikipedia.org/wiki/CPUID#EAX.3D1:_Processor_Info_and_Feature_Bits
def get_flags(self, max_extension_support):
# EDX
edx = self._run_asm(
self._one_eax(),
b"\x0f\xa2" # cpuid
b"\x89\xD0" # mov ax,dx
b"\xC3" # ret
)
# ECX
ecx = self._run_asm(
self._one_eax(),
b"\x0f\xa2" # cpuid
b"\x89\xC8" # mov ax,cx
b"\xC3" # ret
)
# Get the CPU flags
flags = {
'fpu' : _is_bit_set(edx, 0),
'vme' : _is_bit_set(edx, 1),
'de' : _is_bit_set(edx, 2),
'pse' : _is_bit_set(edx, 3),
'tsc' : _is_bit_set(edx, 4),
'msr' : _is_bit_set(edx, 5),
'pae' : _is_bit_set(edx, 6),
'mce' : _is_bit_set(edx, 7),
'cx8' : _is_bit_set(edx, 8),
'apic' : _is_bit_set(edx, 9),
#'reserved1' : _is_bit_set(edx, 10),
'sep' : _is_bit_set(edx, 11),
'mtrr' : _is_bit_set(edx, 12),
'pge' : _is_bit_set(edx, 13),
'mca' : _is_bit_set(edx, 14),
'cmov' : _is_bit_set(edx, 15),
'pat' : _is_bit_set(edx, 16),
'pse36' : _is_bit_set(edx, 17),
'pn' : _is_bit_set(edx, 18),
'clflush' : _is_bit_set(edx, 19),
#'reserved2' : _is_bit_set(edx, 20),
'dts' : _is_bit_set(edx, 21),
'acpi' : _is_bit_set(edx, 22),
'mmx' : _is_bit_set(edx, 23),
'fxsr' : _is_bit_set(edx, 24),
'sse' : _is_bit_set(edx, 25),
'sse2' : _is_bit_set(edx, 26),
'ss' : _is_bit_set(edx, 27),
'ht' : _is_bit_set(edx, 28),
'tm' : _is_bit_set(edx, 29),
'ia64' : _is_bit_set(edx, 30),
'pbe' : _is_bit_set(edx, 31),
'pni' : _is_bit_set(ecx, 0),
'pclmulqdq' : _is_bit_set(ecx, 1),
'dtes64' : _is_bit_set(ecx, 2),
'monitor' : _is_bit_set(ecx, 3),
'ds_cpl' : _is_bit_set(ecx, 4),
'vmx' : _is_bit_set(ecx, 5),
'smx' : _is_bit_set(ecx, 6),
'est' : _is_bit_set(ecx, 7),
'tm2' : _is_bit_set(ecx, 8),
'ssse3' : _is_bit_set(ecx, 9),
'cid' : _is_bit_set(ecx, 10),
#'reserved3' : _is_bit_set(ecx, 11),
'fma' : _is_bit_set(ecx, 12),
'cx16' : _is_bit_set(ecx, 13),
'xtpr' : _is_bit_set(ecx, 14),
'pdcm' : _is_bit_set(ecx, 15),
#'reserved4' : _is_bit_set(ecx, 16),
'pcid' : _is_bit_set(ecx, 17),
'dca' : _is_bit_set(ecx, 18),
'sse4_1' : _is_bit_set(ecx, 19),
'sse4_2' : _is_bit_set(ecx, 20),
'x2apic' : _is_bit_set(ecx, 21),
'movbe' : _is_bit_set(ecx, 22),
'popcnt' : _is_bit_set(ecx, 23),
'tscdeadline' : _is_bit_set(ecx, 24),
'aes' : _is_bit_set(ecx, 25),
'xsave' : _is_bit_set(ecx, 26),
'osxsave' : _is_bit_set(ecx, 27),
'avx' : _is_bit_set(ecx, 28),
'f16c' : _is_bit_set(ecx, 29),
'rdrnd' : _is_bit_set(ecx, 30),
'hypervisor' : _is_bit_set(ecx, 31)
}
# Get a list of only the flags that are true
flags = [k for k, v in flags.items() if v]
# http://en.wikipedia.org/wiki/CPUID#EAX.3D7.2C_ECX.3D0:_Extended_Features
if max_extension_support >= 7:
# EBX
ebx = self._run_asm(
self._zero_ecx(),
b"\xB8\x07\x00\x00\x00" # mov eax,7
b"\x0f\xa2" # cpuid
b"\x89\xD8" # mov ax,bx
b"\xC3" # ret
)
# ECX
ecx = self._run_asm(
self._zero_ecx(),
b"\xB8\x07\x00\x00\x00" # mov eax,7
b"\x0f\xa2" # cpuid
b"\x89\xC8" # mov ax,cx
b"\xC3" # ret
)
# Get the extended CPU flags
extended_flags = {
#'fsgsbase' : _is_bit_set(ebx, 0),
#'IA32_TSC_ADJUST' : _is_bit_set(ebx, 1),
'sgx' : _is_bit_set(ebx, 2),
'bmi1' : _is_bit_set(ebx, 3),
'hle' : _is_bit_set(ebx, 4),
'avx2' : _is_bit_set(ebx, 5),
#'reserved' : _is_bit_set(ebx, 6),
'smep' : _is_bit_set(ebx, 7),
'bmi2' : _is_bit_set(ebx, 8),
'erms' : _is_bit_set(ebx, 9),
'invpcid' : _is_bit_set(ebx, 10),
'rtm' : _is_bit_set(ebx, 11),
'pqm' : _is_bit_set(ebx, 12),
#'FPU CS and FPU DS deprecated' : _is_bit_set(ebx, 13),
'mpx' : _is_bit_set(ebx, 14),
'pqe' : _is_bit_set(ebx, 15),
'avx512f' : _is_bit_set(ebx, 16),
'avx512dq' : _is_bit_set(ebx, 17),
'rdseed' : _is_bit_set(ebx, 18),
'adx' : _is_bit_set(ebx, 19),
'smap' : _is_bit_set(ebx, 20),
'avx512ifma' : _is_bit_set(ebx, 21),
'pcommit' : _is_bit_set(ebx, 22),
'clflushopt' : _is_bit_set(ebx, 23),
'clwb' : _is_bit_set(ebx, 24),
'intel_pt' : _is_bit_set(ebx, 25),
'avx512pf' : _is_bit_set(ebx, 26),
'avx512er' : _is_bit_set(ebx, 27),
'avx512cd' : _is_bit_set(ebx, 28),
'sha' : _is_bit_set(ebx, 29),
'avx512bw' : _is_bit_set(ebx, 30),
'avx512vl' : _is_bit_set(ebx, 31),
'prefetchwt1' : _is_bit_set(ecx, 0),
'avx512vbmi' : _is_bit_set(ecx, 1),
'umip' : _is_bit_set(ecx, 2),
'pku' : _is_bit_set(ecx, 3),
'ospke' : _is_bit_set(ecx, 4),
#'reserved' : _is_bit_set(ecx, 5),
'avx512vbmi2' : _is_bit_set(ecx, 6),
#'reserved' : _is_bit_set(ecx, 7),
'gfni' : _is_bit_set(ecx, 8),
'vaes' : _is_bit_set(ecx, 9),
'vpclmulqdq' : _is_bit_set(ecx, 10),
'avx512vnni' : _is_bit_set(ecx, 11),
'avx512bitalg' : _is_bit_set(ecx, 12),
#'reserved' : _is_bit_set(ecx, 13),
'avx512vpopcntdq' : _is_bit_set(ecx, 14),
#'reserved' : _is_bit_set(ecx, 15),
#'reserved' : _is_bit_set(ecx, 16),
#'mpx0' : _is_bit_set(ecx, 17),
#'mpx1' : _is_bit_set(ecx, 18),
#'mpx2' : _is_bit_set(ecx, 19),
#'mpx3' : _is_bit_set(ecx, 20),
#'mpx4' : _is_bit_set(ecx, 21),
'rdpid' : _is_bit_set(ecx, 22),
#'reserved' : _is_bit_set(ecx, 23),
#'reserved' : _is_bit_set(ecx, 24),
#'reserved' : _is_bit_set(ecx, 25),
#'reserved' : _is_bit_set(ecx, 26),
#'reserved' : _is_bit_set(ecx, 27),
#'reserved' : _is_bit_set(ecx, 28),
#'reserved' : _is_bit_set(ecx, 29),
'sgx_lc' : _is_bit_set(ecx, 30),
#'reserved' : _is_bit_set(ecx, 31)
}
# Get a list of only the flags that are true
extended_flags = [k for k, v in extended_flags.items() if v]
flags += extended_flags
# http://en.wikipedia.org/wiki/CPUID#EAX.3D80000001h:_Extended_Processor_Info_and_Feature_Bits
if max_extension_support >= 0x80000001:
# EBX
ebx = self._run_asm(
b"\xB8\x01\x00\x00\x80" # mov ax,0x80000001
b"\x0f\xa2" # cpuid
b"\x89\xD8" # mov ax,bx
b"\xC3" # ret
)
# ECX
ecx = self._run_asm(
b"\xB8\x01\x00\x00\x80" # mov ax,0x80000001
b"\x0f\xa2" # cpuid
b"\x89\xC8" # mov ax,cx
b"\xC3" # ret
)
# Get the extended CPU flags
extended_flags = {
'fpu' : _is_bit_set(ebx, 0),
'vme' : _is_bit_set(ebx, 1),
'de' : _is_bit_set(ebx, 2),
'pse' : _is_bit_set(ebx, 3),
'tsc' : _is_bit_set(ebx, 4),
'msr' : _is_bit_set(ebx, 5),
'pae' : _is_bit_set(ebx, 6),
'mce' : _is_bit_set(ebx, 7),
'cx8' : _is_bit_set(ebx, 8),
'apic' : _is_bit_set(ebx, 9),
#'reserved' : _is_bit_set(ebx, 10),
'syscall' : _is_bit_set(ebx, 11),
'mtrr' : _is_bit_set(ebx, 12),
'pge' : _is_bit_set(ebx, 13),
'mca' : _is_bit_set(ebx, 14),
'cmov' : _is_bit_set(ebx, 15),
'pat' : _is_bit_set(ebx, 16),
'pse36' : _is_bit_set(ebx, 17),
#'reserved' : _is_bit_set(ebx, 18),
'mp' : _is_bit_set(ebx, 19),
'nx' : _is_bit_set(ebx, 20),
#'reserved' : _is_bit_set(ebx, 21),
'mmxext' : _is_bit_set(ebx, 22),
'mmx' : _is_bit_set(ebx, 23),
'fxsr' : _is_bit_set(ebx, 24),
'fxsr_opt' : _is_bit_set(ebx, 25),
'pdpe1gp' : _is_bit_set(ebx, 26),
'rdtscp' : _is_bit_set(ebx, 27),
#'reserved' : _is_bit_set(ebx, 28),
'lm' : _is_bit_set(ebx, 29),
'3dnowext' : _is_bit_set(ebx, 30),
'3dnow' : _is_bit_set(ebx, 31),
'lahf_lm' : _is_bit_set(ecx, 0),
'cmp_legacy' : _is_bit_set(ecx, 1),
'svm' : _is_bit_set(ecx, 2),
'extapic' : _is_bit_set(ecx, 3),
'cr8_legacy' : _is_bit_set(ecx, 4),
'abm' : _is_bit_set(ecx, 5),
'sse4a' : _is_bit_set(ecx, 6),
'misalignsse' : _is_bit_set(ecx, 7),
'3dnowprefetch' : _is_bit_set(ecx, 8),
'osvw' : _is_bit_set(ecx, 9),
'ibs' : _is_bit_set(ecx, 10),
'xop' : _is_bit_set(ecx, 11),
'skinit' : _is_bit_set(ecx, 12),
'wdt' : _is_bit_set(ecx, 13),
#'reserved' : _is_bit_set(ecx, 14),
'lwp' : _is_bit_set(ecx, 15),
'fma4' : _is_bit_set(ecx, 16),
'tce' : _is_bit_set(ecx, 17),
#'reserved' : _is_bit_set(ecx, 18),
'nodeid_msr' : _is_bit_set(ecx, 19),
#'reserved' : _is_bit_set(ecx, 20),
'tbm' : _is_bit_set(ecx, 21),
'topoext' : _is_bit_set(ecx, 22),
'perfctr_core' : _is_bit_set(ecx, 23),
'perfctr_nb' : _is_bit_set(ecx, 24),
#'reserved' : _is_bit_set(ecx, 25),
'dbx' : _is_bit_set(ecx, 26),
'perftsc' : _is_bit_set(ecx, 27),
'pci_l2i' : _is_bit_set(ecx, 28),
#'reserved' : _is_bit_set(ecx, 29),
#'reserved' : _is_bit_set(ecx, 30),
#'reserved' : _is_bit_set(ecx, 31)
}
# Get a list of only the flags that are true
extended_flags = [k for k, v in extended_flags.items() if v]
flags += extended_flags
flags.sort()
return flags
# http://en.wikipedia.org/wiki/CPUID#EAX.3D80000002h.2C80000003h.2C80000004h:_Processor_Brand_String
def get_processor_brand(self, max_extension_support):
processor_brand = ""
# Processor brand string
if max_extension_support >= 0x80000004:
instructions = [
b"\xB8\x02\x00\x00\x80", # mov ax,0x80000002
b"\xB8\x03\x00\x00\x80", # mov ax,0x80000003
b"\xB8\x04\x00\x00\x80" # mov ax,0x80000004
]
for instruction in instructions:
# EAX
eax = self._run_asm(
instruction, # mov ax,0x8000000?
b"\x0f\xa2" # cpuid
b"\x89\xC0" # mov ax,ax
b"\xC3" # ret
)
# EBX
ebx = self._run_asm(
instruction, # mov ax,0x8000000?
b"\x0f\xa2" # cpuid
b"\x89\xD8" # mov ax,bx
b"\xC3" # ret
)
# ECX
ecx = self._run_asm(
instruction, # mov ax,0x8000000?
b"\x0f\xa2" # cpuid
b"\x89\xC8" # mov ax,cx
b"\xC3" # ret
)
# EDX
edx = self._run_asm(
instruction, # mov ax,0x8000000?
b"\x0f\xa2" # cpuid
b"\x89\xD0" # mov ax,dx
b"\xC3" # ret
)
# Combine each of the 4 bytes in each register into the string
for reg in [eax, ebx, ecx, edx]:
for n in [0, 8, 16, 24]:
processor_brand += chr((reg >> n) & 0xFF)
# Strip off any trailing NULL terminators and white space
processor_brand = processor_brand.strip("\0").strip()
return processor_brand
# http://en.wikipedia.org/wiki/CPUID#EAX.3D80000006h:_Extended_L2_Cache_Features
def get_cache(self, max_extension_support):
cache_info = {}
# Just return if the cache feature is not supported
if max_extension_support < 0x80000006:
return cache_info
# ECX
ecx = self._run_asm(
b"\xB8\x06\x00\x00\x80" # mov ax,0x80000006
b"\x0f\xa2" # cpuid
b"\x89\xC8" # mov ax,cx
b"\xC3" # ret
)
cache_info = {
'size_kb' : ecx & 0xFF,
'line_size_b' : (ecx >> 12) & 0xF,
'associativity' : (ecx >> 16) & 0xFFFF
}
return cache_info
def get_ticks(self):
retval = None
if DataSource.bits == '32bit':
# Works on x86_32
restype = None
argtypes = (ctypes.POINTER(ctypes.c_uint), ctypes.POINTER(ctypes.c_uint))
get_ticks_x86_32, address = self._asm_func(restype, argtypes,
[
b"\x55", # push bp
b"\x89\xE5", # mov bp,sp
b"\x31\xC0", # xor ax,ax
b"\x0F\xA2", # cpuid
b"\x0F\x31", # rdtsc
b"\x8B\x5D\x08", # mov bx,[di+0x8]
b"\x8B\x4D\x0C", # mov cx,[di+0xc]
b"\x89\x13", # mov [bp+di],dx
b"\x89\x01", # mov [bx+di],ax
b"\x5D", # pop bp
b"\xC3" # ret
]
)
high = ctypes.c_uint32(0)
low = ctypes.c_uint32(0)
get_ticks_x86_32(ctypes.byref(high), ctypes.byref(low))
retval = ((high.value << 32) & 0xFFFFFFFF00000000) | low.value
elif DataSource.bits == '64bit':
# Works on x86_64
restype = ctypes.c_uint64
argtypes = ()
get_ticks_x86_64, address = self._asm_func(restype, argtypes,
[
b"\x48", # dec ax
b"\x31\xC0", # xor ax,ax
b"\x0F\xA2", # cpuid
b"\x0F\x31", # rdtsc
b"\x48", # dec ax
b"\xC1\xE2\x20", # shl dx,byte 0x20
b"\x48", # dec ax
b"\x09\xD0", # or ax,dx
b"\xC3", # ret
]
)
retval = get_ticks_x86_64()
return retval
def get_raw_hz(self):
import time
start = self.get_ticks()
time.sleep(1)
end = self.get_ticks()
ticks = (end - start)
return ticks
def _actual_get_cpu_info_from_cpuid(queue):
'''
Warning! This function has the potential to crash the Python runtime.
Do not call it directly. Use the _get_cpu_info_from_cpuid function instead.
It will safely call this function in another process.
'''
# Pipe all output to nothing
sys.stdout = open(os.devnull, 'w')
sys.stderr = open(os.devnull, 'w')
# Get the CPU arch and bits
arch, bits = _parse_arch(DataSource.arch_string_raw)
# Return none if this is not an X86 CPU
if not arch in ['X86_32', 'X86_64']:
queue.put(_obj_to_b64({}))
return
# Return none if SE Linux is in enforcing mode
cpuid = CPUID()
if cpuid.is_selinux_enforcing:
queue.put(_obj_to_b64({}))
return
# Get the cpu info from the CPUID register
max_extension_support = cpuid.get_max_extension_support()
cache_info = cpuid.get_cache(max_extension_support)
info = cpuid.get_info()
processor_brand = cpuid.get_processor_brand(max_extension_support)
# Get the Hz and scale
hz_actual = cpuid.get_raw_hz()
hz_actual = _to_decimal_string(hz_actual)
# Get the Hz and scale
hz_advertised, scale = _parse_cpu_brand_string(processor_brand)
info = {
'vendor_id_raw' : cpuid.get_vendor_id(),
'hardware_raw' : '',
'brand_raw' : processor_brand,
'hz_advertised_friendly' : _hz_short_to_friendly(hz_advertised, scale),
'hz_actual_friendly' : _hz_short_to_friendly(hz_actual, 0),
'hz_advertised' : _hz_short_to_full(hz_advertised, scale),
'hz_actual' : _hz_short_to_full(hz_actual, 0),
'l2_cache_size' : _to_friendly_bytes(cache_info['size_kb']),
'l2_cache_line_size' : cache_info['line_size_b'],
'l2_cache_associativity' : hex(cache_info['associativity']),
'stepping' : info['stepping'],
'model' : info['model'],
'family' : info['family'],
'processor_type' : info['processor_type'],
'extended_model' : info['extended_model'],
'extended_family' : info['extended_family'],
'flags' : cpuid.get_flags(max_extension_support)
}
info = {k: v for k, v in info.items() if v}
queue.put(_obj_to_b64(info))
def _get_cpu_info_from_cpuid():
'''
Returns the CPU info gathered by querying the X86 cpuid register in a new process.
Returns {} on non X86 cpus.
Returns {} if SELinux is in enforcing mode.
'''
from multiprocessing import Process, Queue
# Return {} if can't cpuid
if not DataSource.can_cpuid:
return {}
# Get the CPU arch and bits
arch, bits = _parse_arch(DataSource.arch_string_raw)
# Return {} if this is not an X86 CPU
if not arch in ['X86_32', 'X86_64']:
return {}
try:
# Start running the function in a subprocess
queue = Queue()
p = Process(target=_actual_get_cpu_info_from_cpuid, args=(queue,))
p.start()
# Wait for the process to end, while it is still alive
while p.is_alive():
p.join(0)
# Return {} if it failed
if p.exitcode != 0:
return {}
# Return the result, only if there is something to read
if not queue.empty():
output = queue.get()
return _b64_to_obj(output)
except:
pass
# Return {} if everything failed
return {}
def _get_cpu_info_from_proc_cpuinfo():
'''
Returns the CPU info gathered from /proc/cpuinfo.
Returns {} if /proc/cpuinfo is not found.
'''
try:
# Just return {} if there is no cpuinfo
if not DataSource.has_proc_cpuinfo():
return {}
returncode, output = DataSource.cat_proc_cpuinfo()
if returncode != 0:
return {}
# Various fields
vendor_id = _get_field(False, output, None, '', 'vendor_id', 'vendor id', 'vendor')
processor_brand = _get_field(True, output, None, None, 'model name','cpu', 'processor')
cache_size = _get_field(False, output, None, '', 'cache size')
stepping = _get_field(False, output, int, 0, 'stepping')
model = _get_field(False, output, int, 0, 'model')
family = _get_field(False, output, int, 0, 'cpu family')
hardware = _get_field(False, output, None, '', 'Hardware')
# Flags
flags = _get_field(False, output, None, None, 'flags', 'Features')
if flags:
flags = flags.split()
flags.sort()
# Convert from MHz string to Hz
hz_actual = _get_field(False, output, None, '', 'cpu MHz', 'cpu speed', 'clock')
hz_actual = hz_actual.lower().rstrip('mhz').strip()
hz_actual = _to_decimal_string(hz_actual)
# Convert from GHz/MHz string to Hz
hz_advertised, scale = (None, 0)
try:
hz_advertised, scale = _parse_cpu_brand_string(processor_brand)
except Exception:
pass
info = {
'hardware_raw' : hardware,
'brand_raw' : processor_brand,
'l3_cache_size' : _to_friendly_bytes(cache_size),
'flags' : flags,
'vendor_id_raw' : vendor_id,
'stepping' : stepping,
'model' : model,
'family' : family,
}
# Make the Hz the same for actual and advertised if missing any
if not hz_advertised or hz_advertised == '0.0':
hz_advertised = hz_actual
scale = 6
elif not hz_actual or hz_actual == '0.0':
hz_actual = hz_advertised
# Add the Hz if there is one
if _hz_short_to_full(hz_advertised, scale) > (0, 0):
info['hz_advertised_friendly'] = _hz_short_to_friendly(hz_advertised, scale)
info['hz_advertised'] = _hz_short_to_full(hz_advertised, scale)
if _hz_short_to_full(hz_actual, scale) > (0, 0):
info['hz_actual_friendly'] = _hz_short_to_friendly(hz_actual, 6)
info['hz_actual'] = _hz_short_to_full(hz_actual, 6)
info = {k: v for k, v in info.items() if v}
return info
except:
#raise # NOTE: To have this throw on error, uncomment this line
return {}
def _get_cpu_info_from_cpufreq_info():
'''
Returns the CPU info gathered from cpufreq-info.
Returns {} if cpufreq-info is not found.
'''
try:
hz_brand, scale = '0.0', 0
if not DataSource.has_cpufreq_info():
return {}
returncode, output = DataSource.cpufreq_info()
if returncode != 0:
return {}
hz_brand = output.split('current CPU frequency is')[1].split('\n')[0]
i = hz_brand.find('Hz')
assert(i != -1)
hz_brand = hz_brand[0 : i+2].strip().lower()
if hz_brand.endswith('mhz'):
scale = 6
elif hz_brand.endswith('ghz'):
scale = 9
hz_brand = hz_brand.rstrip('mhz').rstrip('ghz').strip()
hz_brand = _to_decimal_string(hz_brand)
info = {
'hz_advertised_friendly' : _hz_short_to_friendly(hz_brand, scale),
'hz_actual_friendly' : _hz_short_to_friendly(hz_brand, scale),
'hz_advertised' : _hz_short_to_full(hz_brand, scale),
'hz_actual' : _hz_short_to_full(hz_brand, scale),
}
info = {k: v for k, v in info.items() if v}
return info
except:
#raise # NOTE: To have this throw on error, uncomment this line
return {}
def _get_cpu_info_from_lscpu():
'''
Returns the CPU info gathered from lscpu.
Returns {} if lscpu is not found.
'''
try:
if not DataSource.has_lscpu():
return {}
returncode, output = DataSource.lscpu()
if returncode != 0:
return {}
info = {}
new_hz = _get_field(False, output, None, None, 'CPU max MHz', 'CPU MHz')
if new_hz:
new_hz = _to_decimal_string(new_hz)
scale = 6
info['hz_advertised_friendly'] = _hz_short_to_friendly(new_hz, scale)
info['hz_actual_friendly'] = _hz_short_to_friendly(new_hz, scale)
info['hz_advertised'] = _hz_short_to_full(new_hz, scale)
info['hz_actual'] = _hz_short_to_full(new_hz, scale)
vendor_id = _get_field(False, output, None, None, 'Vendor ID')
if vendor_id:
info['vendor_id_raw'] = vendor_id
brand = _get_field(False, output, None, None, 'Model name')
if brand:
info['brand_raw'] = brand
family = _get_field(False, output, None, None, 'CPU family')
if family and family.isdigit():
info['family'] = int(family)
stepping = _get_field(False, output, None, None, 'Stepping')
if stepping and stepping.isdigit():
info['stepping'] = int(stepping)
model = _get_field(False, output, None, None, 'Model')
if model and model.isdigit():
info['model'] = int(model)
l1_data_cache_size = _get_field(False, output, None, None, 'L1d cache')
if l1_data_cache_size:
info['l1_data_cache_size'] = _to_friendly_bytes(l1_data_cache_size)
l1_instruction_cache_size = _get_field(False, output, None, None, 'L1i cache')
if l1_instruction_cache_size:
info['l1_instruction_cache_size'] = _to_friendly_bytes(l1_instruction_cache_size)
l2_cache_size = _get_field(False, output, None, None, 'L2 cache')
if l2_cache_size:
info['l2_cache_size'] = _to_friendly_bytes(l2_cache_size)
l3_cache_size = _get_field(False, output, None, None, 'L3 cache')
if l3_cache_size:
info['l3_cache_size'] = _to_friendly_bytes(l3_cache_size)
# Flags
flags = _get_field(False, output, None, None, 'flags', 'Features')
if flags:
flags = flags.split()
flags.sort()
info['flags'] = flags
info = {k: v for k, v in info.items() if v}
return info
except:
#raise # NOTE: To have this throw on error, uncomment this line
return {}
def _get_cpu_info_from_dmesg():
'''
Returns the CPU info gathered from dmesg.
Returns {} if dmesg is not found or does not have the desired info.
'''
# Just return {} if there is no dmesg
if not DataSource.has_dmesg():
return {}
# If dmesg fails return {}
returncode, output = DataSource.dmesg_a()
if output == None or returncode != 0:
return {}
return _parse_dmesg_output(output)
# https://openpowerfoundation.org/wp-content/uploads/2016/05/LoPAPR_DRAFT_v11_24March2016_cmt1.pdf
# page 767
def _get_cpu_info_from_ibm_pa_features():
'''
Returns the CPU info gathered from lsprop /proc/device-tree/cpus/*/ibm,pa-features
Returns {} if lsprop is not found or ibm,pa-features does not have the desired info.
'''
try:
# Just return {} if there is no lsprop
if not DataSource.has_ibm_pa_features():
return {}
# If ibm,pa-features fails return {}
returncode, output = DataSource.ibm_pa_features()
if output == None or returncode != 0:
return {}
# Filter out invalid characters from output
value = output.split("ibm,pa-features")[1].lower()
value = [s for s in value if s in list('0123456789abcfed')]
value = ''.join(value)
# Get data converted to Uint32 chunks
left = int(value[0 : 8], 16)
right = int(value[8 : 16], 16)
# Get the CPU flags
flags = {
# Byte 0
'mmu' : _is_bit_set(left, 0),
'fpu' : _is_bit_set(left, 1),
'slb' : _is_bit_set(left, 2),
'run' : _is_bit_set(left, 3),
#'reserved' : _is_bit_set(left, 4),
'dabr' : _is_bit_set(left, 5),
'ne' : _is_bit_set(left, 6),
'wtr' : _is_bit_set(left, 7),
# Byte 1
'mcr' : _is_bit_set(left, 8),
'dsisr' : _is_bit_set(left, 9),
'lp' : _is_bit_set(left, 10),
'ri' : _is_bit_set(left, 11),
'dabrx' : _is_bit_set(left, 12),
'sprg3' : _is_bit_set(left, 13),
'rislb' : _is_bit_set(left, 14),
'pp' : _is_bit_set(left, 15),
# Byte 2
'vpm' : _is_bit_set(left, 16),
'dss_2.05' : _is_bit_set(left, 17),
#'reserved' : _is_bit_set(left, 18),
'dar' : _is_bit_set(left, 19),
#'reserved' : _is_bit_set(left, 20),
'ppr' : _is_bit_set(left, 21),
'dss_2.02' : _is_bit_set(left, 22),
'dss_2.06' : _is_bit_set(left, 23),
# Byte 3
'lsd_in_dscr' : _is_bit_set(left, 24),
'ugr_in_dscr' : _is_bit_set(left, 25),
#'reserved' : _is_bit_set(left, 26),
#'reserved' : _is_bit_set(left, 27),
#'reserved' : _is_bit_set(left, 28),
#'reserved' : _is_bit_set(left, 29),
#'reserved' : _is_bit_set(left, 30),
#'reserved' : _is_bit_set(left, 31),
# Byte 4
'sso_2.06' : _is_bit_set(right, 0),
#'reserved' : _is_bit_set(right, 1),
#'reserved' : _is_bit_set(right, 2),
#'reserved' : _is_bit_set(right, 3),
#'reserved' : _is_bit_set(right, 4),
#'reserved' : _is_bit_set(right, 5),
#'reserved' : _is_bit_set(right, 6),
#'reserved' : _is_bit_set(right, 7),
# Byte 5
'le' : _is_bit_set(right, 8),
'cfar' : _is_bit_set(right, 9),
'eb' : _is_bit_set(right, 10),
'lsq_2.07' : _is_bit_set(right, 11),
#'reserved' : _is_bit_set(right, 12),
#'reserved' : _is_bit_set(right, 13),
#'reserved' : _is_bit_set(right, 14),
#'reserved' : _is_bit_set(right, 15),
# Byte 6
'dss_2.07' : _is_bit_set(right, 16),
#'reserved' : _is_bit_set(right, 17),
#'reserved' : _is_bit_set(right, 18),
#'reserved' : _is_bit_set(right, 19),
#'reserved' : _is_bit_set(right, 20),
#'reserved' : _is_bit_set(right, 21),
#'reserved' : _is_bit_set(right, 22),
#'reserved' : _is_bit_set(right, 23),
# Byte 7
#'reserved' : _is_bit_set(right, 24),
#'reserved' : _is_bit_set(right, 25),
#'reserved' : _is_bit_set(right, 26),
#'reserved' : _is_bit_set(right, 27),
#'reserved' : _is_bit_set(right, 28),
#'reserved' : _is_bit_set(right, 29),
#'reserved' : _is_bit_set(right, 30),
#'reserved' : _is_bit_set(right, 31),
}
# Get a list of only the flags that are true
flags = [k for k, v in flags.items() if v]
flags.sort()
info = {
'flags' : flags
}
info = {k: v for k, v in info.items() if v}
return info
except:
return {}
def _get_cpu_info_from_cat_var_run_dmesg_boot():
'''
Returns the CPU info gathered from /var/run/dmesg.boot.
Returns {} if dmesg is not found or does not have the desired info.
'''
# Just return {} if there is no /var/run/dmesg.boot
if not DataSource.has_var_run_dmesg_boot():
return {}
# If dmesg.boot fails return {}
returncode, output = DataSource.cat_var_run_dmesg_boot()
if output == None or returncode != 0:
return {}
return _parse_dmesg_output(output)
def _get_cpu_info_from_sysctl():
'''
Returns the CPU info gathered from sysctl.
Returns {} if sysctl is not found.
'''
try:
# Just return {} if there is no sysctl
if not DataSource.has_sysctl():
return {}
# If sysctl fails return {}
returncode, output = DataSource.sysctl_machdep_cpu_hw_cpufrequency()
if output == None or returncode != 0:
return {}
# Various fields
vendor_id = _get_field(False, output, None, None, 'machdep.cpu.vendor')
processor_brand = _get_field(True, output, None, None, 'machdep.cpu.brand_string')
cache_size = _get_field(False, output, None, None, 'machdep.cpu.cache.size')
stepping = _get_field(False, output, int, 0, 'machdep.cpu.stepping')
model = _get_field(False, output, int, 0, 'machdep.cpu.model')
family = _get_field(False, output, int, 0, 'machdep.cpu.family')
# Flags
flags = _get_field(False, output, None, '', 'machdep.cpu.features').lower().split()
flags.extend(_get_field(False, output, None, '', 'machdep.cpu.leaf7_features').lower().split())
flags.extend(_get_field(False, output, None, '', 'machdep.cpu.extfeatures').lower().split())
flags.sort()
# Convert from GHz/MHz string to Hz
hz_advertised, scale = _parse_cpu_brand_string(processor_brand)
hz_actual = _get_field(False, output, None, None, 'hw.cpufrequency')
hz_actual = _to_decimal_string(hz_actual)
info = {
'vendor_id_raw' : vendor_id,
'brand_raw' : processor_brand,
'hz_advertised_friendly' : _hz_short_to_friendly(hz_advertised, scale),
'hz_actual_friendly' : _hz_short_to_friendly(hz_actual, 0),
'hz_advertised' : _hz_short_to_full(hz_advertised, scale),
'hz_actual' : _hz_short_to_full(hz_actual, 0),
'l2_cache_size' : _to_friendly_bytes(cache_size),
'stepping' : stepping,
'model' : model,
'family' : family,
'flags' : flags
}
info = {k: v for k, v in info.items() if v}
return info
except:
return {}
def _get_cpu_info_from_sysinfo():
'''
Returns the CPU info gathered from sysinfo.
Returns {} if sysinfo is not found.
'''
info = _get_cpu_info_from_sysinfo_v1()
info.update(_get_cpu_info_from_sysinfo_v2())
return info
def _get_cpu_info_from_sysinfo_v1():
'''
Returns the CPU info gathered from sysinfo.
Returns {} if sysinfo is not found.
'''
try:
# Just return {} if there is no sysinfo
if not DataSource.has_sysinfo():
return {}
# If sysinfo fails return {}
returncode, output = DataSource.sysinfo_cpu()
if output == None or returncode != 0:
return {}
# Various fields
vendor_id = '' #_get_field(False, output, None, None, 'CPU #0: ')
processor_brand = output.split('CPU #0: "')[1].split('"\n')[0].strip()
cache_size = '' #_get_field(False, output, None, None, 'machdep.cpu.cache.size')
stepping = int(output.split(', stepping ')[1].split(',')[0].strip())
model = int(output.split(', model ')[1].split(',')[0].strip())
family = int(output.split(', family ')[1].split(',')[0].strip())
# Flags
flags = []
for line in output.split('\n'):
if line.startswith('\t\t'):
for flag in line.strip().lower().split():
flags.append(flag)
flags.sort()
# Convert from GHz/MHz string to Hz
hz_advertised, scale = _parse_cpu_brand_string(processor_brand)
hz_actual = hz_advertised
info = {
'vendor_id_raw' : vendor_id,
'brand_raw' : processor_brand,
'hz_advertised_friendly' : _hz_short_to_friendly(hz_advertised, scale),
'hz_actual_friendly' : _hz_short_to_friendly(hz_actual, scale),
'hz_advertised' : _hz_short_to_full(hz_advertised, scale),
'hz_actual' : _hz_short_to_full(hz_actual, scale),
'l2_cache_size' : _to_friendly_bytes(cache_size),
'stepping' : stepping,
'model' : model,
'family' : family,
'flags' : flags
}
info = {k: v for k, v in info.items() if v}
return info
except:
#raise # NOTE: To have this throw on error, uncomment this line
return {}
def _get_cpu_info_from_sysinfo_v2():
'''
Returns the CPU info gathered from sysinfo.
Returns {} if sysinfo is not found.
'''
try:
# Just return {} if there is no sysinfo
if not DataSource.has_sysinfo():
return {}
# If sysinfo fails return {}
returncode, output = DataSource.sysinfo_cpu()
if output == None or returncode != 0:
return {}
# Various fields
vendor_id = '' #_get_field(False, output, None, None, 'CPU #0: ')
processor_brand = output.split('CPU #0: "')[1].split('"\n')[0].strip()
cache_size = '' #_get_field(False, output, None, None, 'machdep.cpu.cache.size')
signature = output.split('Signature:')[1].split('\n')[0].strip()
#
stepping = int(signature.split('stepping ')[1].split(',')[0].strip())
model = int(signature.split('model ')[1].split(',')[0].strip())
family = int(signature.split('family ')[1].split(',')[0].strip())
# Flags
def get_subsection_flags(output):
retval = []
for line in output.split('\n')[1:]:
if not line.startswith(' ') and not line.startswith(' '): break
for entry in line.strip().lower().split(' '):
retval.append(entry)
return retval
flags = get_subsection_flags(output.split('Features: ')[1]) + \
get_subsection_flags(output.split('Extended Features (0x00000001): ')[1]) + \
get_subsection_flags(output.split('Extended Features (0x80000001): ')[1])
flags.sort()
# Convert from GHz/MHz string to Hz
lines = [n for n in output.split('\n') if n]
raw_hz = lines[0].split('running at ')[1].strip().lower()
hz_advertised = raw_hz.rstrip('mhz').rstrip('ghz').strip()
hz_advertised = _to_decimal_string(hz_advertised)
hz_actual = hz_advertised
scale = 0
if raw_hz.endswith('mhz'):
scale = 6
elif raw_hz.endswith('ghz'):
scale = 9
info = {
'vendor_id_raw' : vendor_id,
'brand_raw' : processor_brand,
'hz_advertised_friendly' : _hz_short_to_friendly(hz_advertised, scale),
'hz_actual_friendly' : _hz_short_to_friendly(hz_actual, scale),
'hz_advertised' : _hz_short_to_full(hz_advertised, scale),
'hz_actual' : _hz_short_to_full(hz_actual, scale),
'l2_cache_size' : _to_friendly_bytes(cache_size),
'stepping' : stepping,
'model' : model,
'family' : family,
'flags' : flags
}
info = {k: v for k, v in info.items() if v}
return info
except:
#raise # NOTE: To have this throw on error, uncomment this line
return {}
def _get_cpu_info_from_wmic():
'''
Returns the CPU info gathered from WMI.
Returns {} if not on Windows, or wmic is not installed.
'''
try:
# Just return {} if not Windows or there is no wmic
if not DataSource.is_windows or not DataSource.has_wmic():
return {}
returncode, output = DataSource.wmic_cpu()
if output == None or returncode != 0:
return {}
# Break the list into key values pairs
value = output.split("\n")
value = [s.rstrip().split('=') for s in value if '=' in s]
value = {k: v for k, v in value if v}
# Get the advertised MHz
processor_brand = value.get('Name')
hz_advertised, scale_advertised = _parse_cpu_brand_string(processor_brand)
# Get the actual MHz
hz_actual = value.get('CurrentClockSpeed')
scale_actual = 6
if hz_actual:
hz_actual = _to_decimal_string(hz_actual)
# Get cache sizes
l2_cache_size = value.get('L2CacheSize')
if l2_cache_size:
l2_cache_size = l2_cache_size + ' KB'
l3_cache_size = value.get('L3CacheSize')
if l3_cache_size:
l3_cache_size = l3_cache_size + ' KB'
# Get family, model, and stepping
family, model, stepping = '', '', ''
description = value.get('Description') or value.get('Caption')
entries = description.split(' ')
if 'Family' in entries and entries.index('Family') < len(entries)-1:
i = entries.index('Family')
family = int(entries[i + 1])
if 'Model' in entries and entries.index('Model') < len(entries)-1:
i = entries.index('Model')
model = int(entries[i + 1])
if 'Stepping' in entries and entries.index('Stepping') < len(entries)-1:
i = entries.index('Stepping')
stepping = int(entries[i + 1])
info = {
'vendor_id_raw' : value.get('Manufacturer'),
'brand_raw' : processor_brand,
'hz_advertised_friendly' : _hz_short_to_friendly(hz_advertised, scale_advertised),
'hz_actual_friendly' : _hz_short_to_friendly(hz_actual, scale_actual),
'hz_advertised' : _hz_short_to_full(hz_advertised, scale_advertised),
'hz_actual' : _hz_short_to_full(hz_actual, scale_actual),
'l2_cache_size' : l2_cache_size,
'l3_cache_size' : l3_cache_size,
'stepping' : stepping,
'model' : model,
'family' : family,
}
info = {k: v for k, v in info.items() if v}
return info
except:
#raise # NOTE: To have this throw on error, uncomment this line
return {}
def _get_cpu_info_from_registry():
'''
FIXME: Is missing many of the newer CPU flags like sse3
Returns the CPU info gathered from the Windows Registry.
Returns {} if not on Windows.
'''
try:
# Just return {} if not on Windows
if not DataSource.is_windows:
return {}
# Get the CPU name
processor_brand = DataSource.winreg_processor_brand().strip()
# Get the CPU vendor id
vendor_id = DataSource.winreg_vendor_id_raw()
# Get the CPU arch and bits
arch_string_raw = DataSource.winreg_arch_string_raw()
arch, bits = _parse_arch(arch_string_raw)
# Get the actual CPU Hz
hz_actual = DataSource.winreg_hz_actual()
hz_actual = _to_decimal_string(hz_actual)
# Get the advertised CPU Hz
hz_advertised, scale = _parse_cpu_brand_string(processor_brand)
# If advertised hz not found, use the actual hz
if hz_advertised == '0.0':
scale = 6
hz_advertised = _to_decimal_string(hz_actual)
# Get the CPU features
feature_bits = DataSource.winreg_feature_bits()
def is_set(bit):
mask = 0x80000000 >> bit
retval = mask & feature_bits > 0
return retval
# http://en.wikipedia.org/wiki/CPUID
# http://unix.stackexchange.com/questions/43539/what-do-the-flags-in-proc-cpuinfo-mean
# http://www.lohninger.com/helpcsuite/public_constants_cpuid.htm
flags = {
'fpu' : is_set(0), # Floating Point Unit
'vme' : is_set(1), # V86 Mode Extensions
'de' : is_set(2), # Debug Extensions - I/O breakpoints supported
'pse' : is_set(3), # Page Size Extensions (4 MB pages supported)
'tsc' : is_set(4), # Time Stamp Counter and RDTSC instruction are available
'msr' : is_set(5), # Model Specific Registers
'pae' : is_set(6), # Physical Address Extensions (36 bit address, 2MB pages)
'mce' : is_set(7), # Machine Check Exception supported
'cx8' : is_set(8), # Compare Exchange Eight Byte instruction available
'apic' : is_set(9), # Local APIC present (multiprocessor operation support)
'sepamd' : is_set(10), # Fast system calls (AMD only)
'sep' : is_set(11), # Fast system calls
'mtrr' : is_set(12), # Memory Type Range Registers
'pge' : is_set(13), # Page Global Enable
'mca' : is_set(14), # Machine Check Architecture
'cmov' : is_set(15), # Conditional MOVe instructions
'pat' : is_set(16), # Page Attribute Table
'pse36' : is_set(17), # 36 bit Page Size Extensions
'serial' : is_set(18), # Processor Serial Number
'clflush' : is_set(19), # Cache Flush
#'reserved1' : is_set(20), # reserved
'dts' : is_set(21), # Debug Trace Store
'acpi' : is_set(22), # ACPI support
'mmx' : is_set(23), # MultiMedia Extensions
'fxsr' : is_set(24), # FXSAVE and FXRSTOR instructions
'sse' : is_set(25), # SSE instructions
'sse2' : is_set(26), # SSE2 (WNI) instructions
'ss' : is_set(27), # self snoop
#'reserved2' : is_set(28), # reserved
'tm' : is_set(29), # Automatic clock control
'ia64' : is_set(30), # IA64 instructions
'3dnow' : is_set(31) # 3DNow! instructions available
}
# Get a list of only the flags that are true
flags = [k for k, v in flags.items() if v]
flags.sort()
info = {
'vendor_id_raw' : vendor_id,
'brand_raw' : processor_brand,
'hz_advertised_friendly' : _hz_short_to_friendly(hz_advertised, scale),
'hz_actual_friendly' : _hz_short_to_friendly(hz_actual, 6),
'hz_advertised' : _hz_short_to_full(hz_advertised, scale),
'hz_actual' : _hz_short_to_full(hz_actual, 6),
'flags' : flags
}
info = {k: v for k, v in info.items() if v}
return info
except:
return {}
def _get_cpu_info_from_kstat():
'''
Returns the CPU info gathered from isainfo and kstat.
Returns {} if isainfo or kstat are not found.
'''
try:
# Just return {} if there is no isainfo or kstat
if not DataSource.has_isainfo() or not DataSource.has_kstat():
return {}
# If isainfo fails return {}
returncode, flag_output = DataSource.isainfo_vb()
if flag_output == None or returncode != 0:
return {}
# If kstat fails return {}
returncode, kstat = DataSource.kstat_m_cpu_info()
if kstat == None or returncode != 0:
return {}
# Various fields
vendor_id = kstat.split('\tvendor_id ')[1].split('\n')[0].strip()
processor_brand = kstat.split('\tbrand ')[1].split('\n')[0].strip()
stepping = int(kstat.split('\tstepping ')[1].split('\n')[0].strip())
model = int(kstat.split('\tmodel ')[1].split('\n')[0].strip())
family = int(kstat.split('\tfamily ')[1].split('\n')[0].strip())
# Flags
flags = flag_output.strip().split('\n')[-1].strip().lower().split()
flags.sort()
# Convert from GHz/MHz string to Hz
scale = 6
hz_advertised = kstat.split('\tclock_MHz ')[1].split('\n')[0].strip()
hz_advertised = _to_decimal_string(hz_advertised)
# Convert from GHz/MHz string to Hz
hz_actual = kstat.split('\tcurrent_clock_Hz ')[1].split('\n')[0].strip()
hz_actual = _to_decimal_string(hz_actual)
info = {
'vendor_id_raw' : vendor_id,
'brand_raw' : processor_brand,
'hz_advertised_friendly' : _hz_short_to_friendly(hz_advertised, scale),
'hz_actual_friendly' : _hz_short_to_friendly(hz_actual, 0),
'hz_advertised' : _hz_short_to_full(hz_advertised, scale),
'hz_actual' : _hz_short_to_full(hz_actual, 0),
'stepping' : stepping,
'model' : model,
'family' : family,
'flags' : flags
}
info = {k: v for k, v in info.items() if v}
return info
except:
return {}
def _get_cpu_info_from_platform_uname():
try:
uname = DataSource.uname_string_raw.split(',')[0]
family, model, stepping = (None, None, None)
entries = uname.split(' ')
if 'Family' in entries and entries.index('Family') < len(entries)-1:
i = entries.index('Family')
family = int(entries[i + 1])
if 'Model' in entries and entries.index('Model') < len(entries)-1:
i = entries.index('Model')
model = int(entries[i + 1])
if 'Stepping' in entries and entries.index('Stepping') < len(entries)-1:
i = entries.index('Stepping')
stepping = int(entries[i + 1])
info = {
'family' : family,
'model' : model,
'stepping' : stepping
}
info = {k: v for k, v in info.items() if v}
return info
except:
return {}
def get_cpu_info_json():
'''
Returns the CPU info by using the best sources of information for your OS.
Returns the result in a json string
'''
import json
output = None
# If running under pyinstaller, run normally
if getattr(sys, 'frozen', False):
info = _get_cpu_info_internal()
output = json.dumps(info)
output = "{0}".format(output)
# if not running under pyinstaller, run in another process.
# This is done because multiprocesing has a design flaw that
# causes non main programs to run multiple times on Windows.
else:
from subprocess import Popen, PIPE
command = [sys.executable, __file__, '--json']
p1 = Popen(command, stdout=PIPE, stderr=PIPE, stdin=PIPE)
output = p1.communicate()[0]
if p1.returncode != 0:
return "{}"
if not IS_PY2:
output = output.decode(encoding='UTF-8')
return output
def get_cpu_info():
'''
Returns the CPU info by using the best sources of information for your OS.
Returns the result in a dict
'''
import json
output = get_cpu_info_json()
# Convert JSON to Python with non unicode strings
output = json.loads(output, object_hook = _utf_to_str)
return output
def main():
from argparse import ArgumentParser
import json
# Parse args
parser = ArgumentParser(description='Gets CPU info with pure Python 2 & 3')
parser.add_argument('--json', action='store_true', help='Return the info in JSON format')
parser.add_argument('--version', action='store_true', help='Return the version of py-cpuinfo')
args = parser.parse_args()
try:
_check_arch()
except Exception as err:
sys.stderr.write(str(err) + "\n")
sys.exit(1)
info = _get_cpu_info_internal()
if not info:
sys.stderr.write("Failed to find cpu info\n")
sys.exit(1)
if args.json:
print(json.dumps(info))
elif args.version:
print(CPUINFO_VERSION_STRING)
else:
print('Python Version: {0}'.format(info.get('python_version', '')))
print('Cpuinfo Version: {0}'.format(info.get('cpuinfo_version_string', '')))
print('Vendor ID Raw: {0}'.format(info.get('vendor_id_raw', '')))
print('Hardware Raw: {0}'.format(info.get('hardware_raw', '')))
print('Brand Raw: {0}'.format(info.get('brand_raw', '')))
print('Hz Advertised Friendly: {0}'.format(info.get('hz_advertised_friendly', '')))
print('Hz Actual Friendly: {0}'.format(info.get('hz_actual_friendly', '')))
print('Hz Advertised: {0}'.format(info.get('hz_advertised', '')))
print('Hz Actual: {0}'.format(info.get('hz_actual', '')))
print('Arch: {0}'.format(info.get('arch', '')))
print('Bits: {0}'.format(info.get('bits', '')))
print('Count: {0}'.format(info.get('count', '')))
print('Arch String Raw: {0}'.format(info.get('arch_string_raw', '')))
print('L1 Data Cache Size: {0}'.format(info.get('l1_data_cache_size', '')))
print('L1 Instruction Cache Size: {0}'.format(info.get('l1_instruction_cache_size', '')))
print('L2 Cache Size: {0}'.format(info.get('l2_cache_size', '')))
print('L2 Cache Line Size: {0}'.format(info.get('l2_cache_line_size', '')))
print('L2 Cache Associativity: {0}'.format(info.get('l2_cache_associativity', '')))
print('L3 Cache Size: {0}'.format(info.get('l3_cache_size', '')))
print('Stepping: {0}'.format(info.get('stepping', '')))
print('Model: {0}'.format(info.get('model', '')))
print('Family: {0}'.format(info.get('family', '')))
print('Processor Type: {0}'.format(info.get('processor_type', '')))
print('Extended Model: {0}'.format(info.get('extended_model', '')))
print('Extended Family: {0}'.format(info.get('extended_family', '')))
print('Flags: {0}'.format(', '.join(info.get('flags', ''))))
if __name__ == '__main__':
main()
else:
_check_arch()
|
workhorsy/py-cpuinfo
|
cpuinfo/cpuinfo.py
|
get_cpu_info_json
|
python
|
def get_cpu_info_json():
'''
Returns the CPU info by using the best sources of information for your OS.
Returns the result in a json string
'''
import json
output = None
# If running under pyinstaller, run normally
if getattr(sys, 'frozen', False):
info = _get_cpu_info_internal()
output = json.dumps(info)
output = "{0}".format(output)
# if not running under pyinstaller, run in another process.
# This is done because multiprocesing has a design flaw that
# causes non main programs to run multiple times on Windows.
else:
from subprocess import Popen, PIPE
command = [sys.executable, __file__, '--json']
p1 = Popen(command, stdout=PIPE, stderr=PIPE, stdin=PIPE)
output = p1.communicate()[0]
if p1.returncode != 0:
return "{}"
if not IS_PY2:
output = output.decode(encoding='UTF-8')
return output
|
Returns the CPU info by using the best sources of information for your OS.
Returns the result in a json string
|
train
|
https://github.com/workhorsy/py-cpuinfo/blob/c15afb770c1139bf76215852e17eb4f677ca3d2f/cpuinfo/cpuinfo.py#L2275-L2306
|
[
"def _get_cpu_info_internal():\n\t'''\n\tReturns the CPU info by using the best sources of information for your OS.\n\tReturns {} if nothing is found.\n\t'''\n\n\t# Get the CPU arch and bits\n\tarch, bits = _parse_arch(DataSource.arch_string_raw)\n\n\tfriendly_maxsize = { 2**31-1: '32 bit', 2**63-1: '64 bit' }.get(sys.maxsize) or 'unknown bits'\n\tfriendly_version = \"{0}.{1}.{2}.{3}.{4}\".format(*sys.version_info)\n\tPYTHON_VERSION = \"{0} ({1})\".format(friendly_version, friendly_maxsize)\n\n\tinfo = {\n\t\t'python_version' : PYTHON_VERSION,\n\t\t'cpuinfo_version' : CPUINFO_VERSION,\n\t\t'cpuinfo_version_string' : CPUINFO_VERSION_STRING,\n\t\t'arch' : arch,\n\t\t'bits' : bits,\n\t\t'count' : DataSource.cpu_count,\n\t\t'arch_string_raw' : DataSource.arch_string_raw,\n\t}\n\n\t# Try the Windows wmic\n\t_copy_new_fields(info, _get_cpu_info_from_wmic())\n\n\t# Try the Windows registry\n\t_copy_new_fields(info, _get_cpu_info_from_registry())\n\n\t# Try /proc/cpuinfo\n\t_copy_new_fields(info, _get_cpu_info_from_proc_cpuinfo())\n\n\t# Try cpufreq-info\n\t_copy_new_fields(info, _get_cpu_info_from_cpufreq_info())\n\n\t# Try LSCPU\n\t_copy_new_fields(info, _get_cpu_info_from_lscpu())\n\n\t# Try sysctl\n\t_copy_new_fields(info, _get_cpu_info_from_sysctl())\n\n\t# Try kstat\n\t_copy_new_fields(info, _get_cpu_info_from_kstat())\n\n\t# Try dmesg\n\t_copy_new_fields(info, _get_cpu_info_from_dmesg())\n\n\t# Try /var/run/dmesg.boot\n\t_copy_new_fields(info, _get_cpu_info_from_cat_var_run_dmesg_boot())\n\n\t# Try lsprop ibm,pa-features\n\t_copy_new_fields(info, _get_cpu_info_from_ibm_pa_features())\n\n\t# Try sysinfo\n\t_copy_new_fields(info, _get_cpu_info_from_sysinfo())\n\n\t# Try querying the CPU cpuid register\n\t_copy_new_fields(info, _get_cpu_info_from_cpuid())\n\n\t# Try platform.uname\n\t_copy_new_fields(info, _get_cpu_info_from_platform_uname())\n\n\treturn info\n"
] |
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
# Copyright (c) 2014-2019, Matthew Brennan Jones <matthew.brennan.jones@gmail.com>
# Py-cpuinfo gets CPU info with pure Python 2 & 3
# It uses the MIT License
# It is hosted at: https://github.com/workhorsy/py-cpuinfo
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
CPUINFO_VERSION = (5, 0, 0)
CPUINFO_VERSION_STRING = '.'.join([str(n) for n in CPUINFO_VERSION])
import os, sys
import platform
import multiprocessing
import ctypes
try:
import _winreg as winreg
except ImportError as err:
try:
import winreg
except ImportError as err:
pass
IS_PY2 = sys.version_info[0] == 2
class DataSource(object):
bits = platform.architecture()[0]
cpu_count = multiprocessing.cpu_count()
is_windows = platform.system().lower() == 'windows'
arch_string_raw = platform.machine()
uname_string_raw = platform.uname()[5]
can_cpuid = True
@staticmethod
def has_proc_cpuinfo():
return os.path.exists('/proc/cpuinfo')
@staticmethod
def has_dmesg():
return len(_program_paths('dmesg')) > 0
@staticmethod
def has_var_run_dmesg_boot():
uname = platform.system().strip().strip('"').strip("'").strip().lower()
return 'linux' in uname and os.path.exists('/var/run/dmesg.boot')
@staticmethod
def has_cpufreq_info():
return len(_program_paths('cpufreq-info')) > 0
@staticmethod
def has_sestatus():
return len(_program_paths('sestatus')) > 0
@staticmethod
def has_sysctl():
return len(_program_paths('sysctl')) > 0
@staticmethod
def has_isainfo():
return len(_program_paths('isainfo')) > 0
@staticmethod
def has_kstat():
return len(_program_paths('kstat')) > 0
@staticmethod
def has_sysinfo():
return len(_program_paths('sysinfo')) > 0
@staticmethod
def has_lscpu():
return len(_program_paths('lscpu')) > 0
@staticmethod
def has_ibm_pa_features():
return len(_program_paths('lsprop')) > 0
@staticmethod
def has_wmic():
returncode, output = _run_and_get_stdout(['wmic', 'os', 'get', 'Version'])
return returncode == 0 and len(output) > 0
@staticmethod
def cat_proc_cpuinfo():
return _run_and_get_stdout(['cat', '/proc/cpuinfo'])
@staticmethod
def cpufreq_info():
return _run_and_get_stdout(['cpufreq-info'])
@staticmethod
def sestatus_b():
return _run_and_get_stdout(['sestatus', '-b'])
@staticmethod
def dmesg_a():
return _run_and_get_stdout(['dmesg', '-a'])
@staticmethod
def cat_var_run_dmesg_boot():
return _run_and_get_stdout(['cat', '/var/run/dmesg.boot'])
@staticmethod
def sysctl_machdep_cpu_hw_cpufrequency():
return _run_and_get_stdout(['sysctl', 'machdep.cpu', 'hw.cpufrequency'])
@staticmethod
def isainfo_vb():
return _run_and_get_stdout(['isainfo', '-vb'])
@staticmethod
def kstat_m_cpu_info():
return _run_and_get_stdout(['kstat', '-m', 'cpu_info'])
@staticmethod
def sysinfo_cpu():
return _run_and_get_stdout(['sysinfo', '-cpu'])
@staticmethod
def lscpu():
return _run_and_get_stdout(['lscpu'])
@staticmethod
def ibm_pa_features():
import glob
ibm_features = glob.glob('/proc/device-tree/cpus/*/ibm,pa-features')
if ibm_features:
return _run_and_get_stdout(['lsprop', ibm_features[0]])
@staticmethod
def wmic_cpu():
return _run_and_get_stdout(['wmic', 'cpu', 'get', 'Name,CurrentClockSpeed,L2CacheSize,L3CacheSize,Description,Caption,Manufacturer', '/format:list'])
@staticmethod
def winreg_processor_brand():
key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, r"Hardware\Description\System\CentralProcessor\0")
processor_brand = winreg.QueryValueEx(key, "ProcessorNameString")[0]
winreg.CloseKey(key)
return processor_brand.strip()
@staticmethod
def winreg_vendor_id_raw():
key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, r"Hardware\Description\System\CentralProcessor\0")
vendor_id_raw = winreg.QueryValueEx(key, "VendorIdentifier")[0]
winreg.CloseKey(key)
return vendor_id_raw
@staticmethod
def winreg_arch_string_raw():
key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, r"SYSTEM\CurrentControlSet\Control\Session Manager\Environment")
arch_string_raw = winreg.QueryValueEx(key, "PROCESSOR_ARCHITECTURE")[0]
winreg.CloseKey(key)
return arch_string_raw
@staticmethod
def winreg_hz_actual():
key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, r"Hardware\Description\System\CentralProcessor\0")
hz_actual = winreg.QueryValueEx(key, "~Mhz")[0]
winreg.CloseKey(key)
hz_actual = _to_decimal_string(hz_actual)
return hz_actual
@staticmethod
def winreg_feature_bits():
key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, r"Hardware\Description\System\CentralProcessor\0")
feature_bits = winreg.QueryValueEx(key, "FeatureSet")[0]
winreg.CloseKey(key)
return feature_bits
def _program_paths(program_name):
paths = []
exts = filter(None, os.environ.get('PATHEXT', '').split(os.pathsep))
path = os.environ['PATH']
for p in os.environ['PATH'].split(os.pathsep):
p = os.path.join(p, program_name)
if os.access(p, os.X_OK):
paths.append(p)
for e in exts:
pext = p + e
if os.access(pext, os.X_OK):
paths.append(pext)
return paths
def _run_and_get_stdout(command, pipe_command=None):
from subprocess import Popen, PIPE
if not pipe_command:
p1 = Popen(command, stdout=PIPE, stderr=PIPE, stdin=PIPE)
output = p1.communicate()[0]
if not IS_PY2:
output = output.decode(encoding='UTF-8')
return p1.returncode, output
else:
p1 = Popen(command, stdout=PIPE, stderr=PIPE, stdin=PIPE)
p2 = Popen(pipe_command, stdin=p1.stdout, stdout=PIPE, stderr=PIPE)
p1.stdout.close()
output = p2.communicate()[0]
if not IS_PY2:
output = output.decode(encoding='UTF-8')
return p2.returncode, output
# Make sure we are running on a supported system
def _check_arch():
arch, bits = _parse_arch(DataSource.arch_string_raw)
if not arch in ['X86_32', 'X86_64', 'ARM_7', 'ARM_8', 'PPC_64']:
raise Exception("py-cpuinfo currently only works on X86 and some PPC and ARM CPUs.")
def _obj_to_b64(thing):
import pickle
import base64
a = thing
b = pickle.dumps(a)
c = base64.b64encode(b)
d = c.decode('utf8')
return d
def _b64_to_obj(thing):
import pickle
import base64
try:
a = base64.b64decode(thing)
b = pickle.loads(a)
return b
except:
return {}
def _utf_to_str(input):
if IS_PY2 and isinstance(input, unicode):
return input.encode('utf-8')
elif isinstance(input, list):
return [_utf_to_str(element) for element in input]
elif isinstance(input, dict):
return {_utf_to_str(key): _utf_to_str(value)
for key, value in input.items()}
else:
return input
def _copy_new_fields(info, new_info):
keys = [
'vendor_id_raw', 'hardware_raw', 'brand_raw', 'hz_advertised_friendly', 'hz_actual_friendly',
'hz_advertised', 'hz_actual', 'arch', 'bits', 'count',
'arch_string_raw', 'uname_string_raw',
'l2_cache_size', 'l2_cache_line_size', 'l2_cache_associativity',
'stepping', 'model', 'family',
'processor_type', 'extended_model', 'extended_family', 'flags',
'l3_cache_size', 'l1_data_cache_size', 'l1_instruction_cache_size'
]
for key in keys:
if new_info.get(key, None) and not info.get(key, None):
info[key] = new_info[key]
elif key == 'flags' and new_info.get('flags'):
for f in new_info['flags']:
if f not in info['flags']: info['flags'].append(f)
info['flags'].sort()
def _get_field_actual(cant_be_number, raw_string, field_names):
for line in raw_string.splitlines():
for field_name in field_names:
field_name = field_name.lower()
if ':' in line:
left, right = line.split(':', 1)
left = left.strip().lower()
right = right.strip()
if left == field_name and len(right) > 0:
if cant_be_number:
if not right.isdigit():
return right
else:
return right
return None
def _get_field(cant_be_number, raw_string, convert_to, default_value, *field_names):
retval = _get_field_actual(cant_be_number, raw_string, field_names)
# Convert the return value
if retval and convert_to:
try:
retval = convert_to(retval)
except:
retval = default_value
# Return the default if there is no return value
if retval is None:
retval = default_value
return retval
def _to_decimal_string(ticks):
try:
# Convert to string
ticks = '{0}'.format(ticks)
# Strip off non numbers and decimal places
ticks = "".join(n for n in ticks if n.isdigit() or n=='.').strip()
if ticks == '':
ticks = '0'
# Add decimal if missing
if '.' not in ticks:
ticks = '{0}.0'.format(ticks)
# Remove trailing zeros
ticks = ticks.rstrip('0')
# Add one trailing zero for empty right side
if ticks.endswith('.'):
ticks = '{0}0'.format(ticks)
# Make sure the number can be converted to a float
ticks = float(ticks)
ticks = '{0}'.format(ticks)
return ticks
except:
return '0.0'
def _hz_short_to_full(ticks, scale):
try:
# Make sure the number can be converted to a float
ticks = float(ticks)
ticks = '{0}'.format(ticks)
# Scale the numbers
hz = ticks.lstrip('0')
old_index = hz.index('.')
hz = hz.replace('.', '')
hz = hz.ljust(scale + old_index+1, '0')
new_index = old_index + scale
hz = '{0}.{1}'.format(hz[:new_index], hz[new_index:])
left, right = hz.split('.')
left, right = int(left), int(right)
return (left, right)
except:
return (0, 0)
def _hz_friendly_to_full(hz_string):
try:
hz_string = hz_string.strip().lower()
hz, scale = (None, None)
if hz_string.endswith('ghz'):
scale = 9
elif hz_string.endswith('mhz'):
scale = 6
elif hz_string.endswith('hz'):
scale = 0
hz = "".join(n for n in hz_string if n.isdigit() or n=='.').strip()
if not '.' in hz:
hz += '.0'
hz, scale = _hz_short_to_full(hz, scale)
return (hz, scale)
except:
return (0, 0)
def _hz_short_to_friendly(ticks, scale):
try:
# Get the raw Hz as a string
left, right = _hz_short_to_full(ticks, scale)
result = '{0}.{1}'.format(left, right)
# Get the location of the dot, and remove said dot
dot_index = result.index('.')
result = result.replace('.', '')
# Get the Hz symbol and scale
symbol = "Hz"
scale = 0
if dot_index > 9:
symbol = "GHz"
scale = 9
elif dot_index > 6:
symbol = "MHz"
scale = 6
elif dot_index > 3:
symbol = "KHz"
scale = 3
# Get the Hz with the dot at the new scaled point
result = '{0}.{1}'.format(result[:-scale-1], result[-scale-1:])
# Format the ticks to have 4 numbers after the decimal
# and remove any superfluous zeroes.
result = '{0:.4f} {1}'.format(float(result), symbol)
result = result.rstrip('0')
return result
except:
return '0.0000 Hz'
def _to_friendly_bytes(input):
import re
if not input:
return input
input = "{0}".format(input)
formats = {
r"^[0-9]+B$" : 'B',
r"^[0-9]+K$" : 'KB',
r"^[0-9]+M$" : 'MB',
r"^[0-9]+G$" : 'GB'
}
for pattern, friendly_size in formats.items():
if re.match(pattern, input):
return "{0} {1}".format(input[ : -1].strip(), friendly_size)
return input
def _parse_cpu_brand_string(cpu_string):
# Just return 0 if the processor brand does not have the Hz
if not 'hz' in cpu_string.lower():
return ('0.0', 0)
hz = cpu_string.lower()
scale = 0
if hz.endswith('mhz'):
scale = 6
elif hz.endswith('ghz'):
scale = 9
if '@' in hz:
hz = hz.split('@')[1]
else:
hz = hz.rsplit(None, 1)[1]
hz = hz.rstrip('mhz').rstrip('ghz').strip()
hz = _to_decimal_string(hz)
return (hz, scale)
def _parse_cpu_brand_string_dx(cpu_string):
import re
# Find all the strings inside brackets ()
starts = [m.start() for m in re.finditer('\(', cpu_string)]
ends = [m.start() for m in re.finditer('\)', cpu_string)]
insides = {k: v for k, v in zip(starts, ends)}
insides = [cpu_string[start+1 : end] for start, end in insides.items()]
# Find all the fields
vendor_id, stepping, model, family = (None, None, None, None)
for inside in insides:
for pair in inside.split(','):
pair = [n.strip() for n in pair.split(':')]
if len(pair) > 1:
name, value = pair[0], pair[1]
if name == 'origin':
vendor_id = value.strip('"')
elif name == 'stepping':
stepping = int(value.lstrip('0x'), 16)
elif name == 'model':
model = int(value.lstrip('0x'), 16)
elif name in ['fam', 'family']:
family = int(value.lstrip('0x'), 16)
# Find the Processor Brand
# Strip off extra strings in brackets at end
brand = cpu_string.strip()
is_working = True
while is_working:
is_working = False
for inside in insides:
full = "({0})".format(inside)
if brand.endswith(full):
brand = brand[ :-len(full)].strip()
is_working = True
# Find the Hz in the brand string
hz_brand, scale = _parse_cpu_brand_string(brand)
# Find Hz inside brackets () after the brand string
if hz_brand == '0.0':
for inside in insides:
hz = inside
for entry in ['GHz', 'MHz', 'Hz']:
if entry in hz:
hz = "CPU @ " + hz[ : hz.find(entry) + len(entry)]
hz_brand, scale = _parse_cpu_brand_string(hz)
break
return (hz_brand, scale, brand, vendor_id, stepping, model, family)
def _parse_dmesg_output(output):
try:
# Get all the dmesg lines that might contain a CPU string
lines = output.split(' CPU0:')[1:] + \
output.split(' CPU1:')[1:] + \
output.split(' CPU:')[1:] + \
output.split('\nCPU0:')[1:] + \
output.split('\nCPU1:')[1:] + \
output.split('\nCPU:')[1:]
lines = [l.split('\n')[0].strip() for l in lines]
# Convert the lines to CPU strings
cpu_strings = [_parse_cpu_brand_string_dx(l) for l in lines]
# Find the CPU string that has the most fields
best_string = None
highest_count = 0
for cpu_string in cpu_strings:
count = sum([n is not None for n in cpu_string])
if count > highest_count:
highest_count = count
best_string = cpu_string
# If no CPU string was found, return {}
if not best_string:
return {}
hz_actual, scale, processor_brand, vendor_id, stepping, model, family = best_string
# Origin
if ' Origin=' in output:
fields = output[output.find(' Origin=') : ].split('\n')[0]
fields = fields.strip().split()
fields = [n.strip().split('=') for n in fields]
fields = [{n[0].strip().lower() : n[1].strip()} for n in fields]
for field in fields:
name = list(field.keys())[0]
value = list(field.values())[0]
if name == 'origin':
vendor_id = value.strip('"')
elif name == 'stepping':
stepping = int(value.lstrip('0x'), 16)
elif name == 'model':
model = int(value.lstrip('0x'), 16)
elif name in ['fam', 'family']:
family = int(value.lstrip('0x'), 16)
# Features
flag_lines = []
for category in [' Features=', ' Features2=', ' AMD Features=', ' AMD Features2=']:
if category in output:
flag_lines.append(output.split(category)[1].split('\n')[0])
flags = []
for line in flag_lines:
line = line.split('<')[1].split('>')[0].lower()
for flag in line.split(','):
flags.append(flag)
flags.sort()
# Convert from GHz/MHz string to Hz
hz_advertised, scale = _parse_cpu_brand_string(processor_brand)
# If advertised hz not found, use the actual hz
if hz_advertised == '0.0':
scale = 6
hz_advertised = _to_decimal_string(hz_actual)
info = {
'vendor_id_raw' : vendor_id,
'brand_raw' : processor_brand,
'stepping' : stepping,
'model' : model,
'family' : family,
'flags' : flags
}
if hz_advertised and hz_advertised != '0.0':
info['hz_advertised_friendly'] = _hz_short_to_friendly(hz_advertised, scale)
info['hz_actual_friendly'] = _hz_short_to_friendly(hz_actual, scale)
if hz_advertised and hz_advertised != '0.0':
info['hz_advertised'] = _hz_short_to_full(hz_advertised, scale)
info['hz_actual'] = _hz_short_to_full(hz_actual, scale)
return {k: v for k, v in info.items() if v}
except:
#raise
pass
return {}
def _parse_arch(arch_string_raw):
import re
arch, bits = None, None
arch_string_raw = arch_string_raw.lower()
# X86
if re.match('^i\d86$|^x86$|^x86_32$|^i86pc$|^ia32$|^ia-32$|^bepc$', arch_string_raw):
arch = 'X86_32'
bits = 32
elif re.match('^x64$|^x86_64$|^x86_64t$|^i686-64$|^amd64$|^ia64$|^ia-64$', arch_string_raw):
arch = 'X86_64'
bits = 64
# ARM
elif re.match('^armv8-a|aarch64$', arch_string_raw):
arch = 'ARM_8'
bits = 64
elif re.match('^armv7$|^armv7[a-z]$|^armv7-[a-z]$|^armv6[a-z]$', arch_string_raw):
arch = 'ARM_7'
bits = 32
elif re.match('^armv8$|^armv8[a-z]$|^armv8-[a-z]$', arch_string_raw):
arch = 'ARM_8'
bits = 32
# PPC
elif re.match('^ppc32$|^prep$|^pmac$|^powermac$', arch_string_raw):
arch = 'PPC_32'
bits = 32
elif re.match('^powerpc$|^ppc64$|^ppc64le$', arch_string_raw):
arch = 'PPC_64'
bits = 64
# SPARC
elif re.match('^sparc32$|^sparc$', arch_string_raw):
arch = 'SPARC_32'
bits = 32
elif re.match('^sparc64$|^sun4u$|^sun4v$', arch_string_raw):
arch = 'SPARC_64'
bits = 64
return (arch, bits)
def _is_bit_set(reg, bit):
mask = 1 << bit
is_set = reg & mask > 0
return is_set
def _is_selinux_enforcing():
# Just return if the SE Linux Status Tool is not installed
if not DataSource.has_sestatus():
return False
# Run the sestatus, and just return if it failed to run
returncode, output = DataSource.sestatus_b()
if returncode != 0:
return False
# Figure out if explicitly in enforcing mode
for line in output.splitlines():
line = line.strip().lower()
if line.startswith("current mode:"):
if line.endswith("enforcing"):
return True
else:
return False
# Figure out if we can execute heap and execute memory
can_selinux_exec_heap = False
can_selinux_exec_memory = False
for line in output.splitlines():
line = line.strip().lower()
if line.startswith("allow_execheap") and line.endswith("on"):
can_selinux_exec_heap = True
elif line.startswith("allow_execmem") and line.endswith("on"):
can_selinux_exec_memory = True
return (not can_selinux_exec_heap or not can_selinux_exec_memory)
class CPUID(object):
def __init__(self):
self.prochandle = None
# Figure out if SE Linux is on and in enforcing mode
self.is_selinux_enforcing = _is_selinux_enforcing()
def _asm_func(self, restype=None, argtypes=(), byte_code=[]):
byte_code = bytes.join(b'', byte_code)
address = None
if DataSource.is_windows:
# Allocate a memory segment the size of the byte code, and make it executable
size = len(byte_code)
# Alloc at least 1 page to ensure we own all pages that we want to change protection on
if size < 0x1000: size = 0x1000
MEM_COMMIT = ctypes.c_ulong(0x1000)
PAGE_READWRITE = ctypes.c_ulong(0x4)
pfnVirtualAlloc = ctypes.windll.kernel32.VirtualAlloc
pfnVirtualAlloc.restype = ctypes.c_void_p
address = pfnVirtualAlloc(None, ctypes.c_size_t(size), MEM_COMMIT, PAGE_READWRITE)
if not address:
raise Exception("Failed to VirtualAlloc")
# Copy the byte code into the memory segment
memmove = ctypes.CFUNCTYPE(ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_size_t)(ctypes._memmove_addr)
if memmove(address, byte_code, size) < 0:
raise Exception("Failed to memmove")
# Enable execute permissions
PAGE_EXECUTE = ctypes.c_ulong(0x10)
old_protect = ctypes.c_ulong(0)
pfnVirtualProtect = ctypes.windll.kernel32.VirtualProtect
res = pfnVirtualProtect(ctypes.c_void_p(address), ctypes.c_size_t(size), PAGE_EXECUTE, ctypes.byref(old_protect))
if not res:
raise Exception("Failed VirtualProtect")
# Flush Instruction Cache
# First, get process Handle
if not self.prochandle:
pfnGetCurrentProcess = ctypes.windll.kernel32.GetCurrentProcess
pfnGetCurrentProcess.restype = ctypes.c_void_p
self.prochandle = ctypes.c_void_p(pfnGetCurrentProcess())
# Actually flush cache
res = ctypes.windll.kernel32.FlushInstructionCache(self.prochandle, ctypes.c_void_p(address), ctypes.c_size_t(size))
if not res:
raise Exception("Failed FlushInstructionCache")
else:
# Allocate a memory segment the size of the byte code
size = len(byte_code)
pfnvalloc = ctypes.pythonapi.valloc
pfnvalloc.restype = ctypes.c_void_p
address = pfnvalloc(ctypes.c_size_t(size))
if not address:
raise Exception("Failed to valloc")
# Mark the memory segment as writeable only
if not self.is_selinux_enforcing:
WRITE = 0x2
if ctypes.pythonapi.mprotect(ctypes.c_void_p(address), size, WRITE) < 0:
raise Exception("Failed to mprotect")
# Copy the byte code into the memory segment
if ctypes.pythonapi.memmove(ctypes.c_void_p(address), byte_code, ctypes.c_size_t(size)) < 0:
raise Exception("Failed to memmove")
# Mark the memory segment as writeable and executable only
if not self.is_selinux_enforcing:
WRITE_EXECUTE = 0x2 | 0x4
if ctypes.pythonapi.mprotect(ctypes.c_void_p(address), size, WRITE_EXECUTE) < 0:
raise Exception("Failed to mprotect")
# Cast the memory segment into a function
functype = ctypes.CFUNCTYPE(restype, *argtypes)
fun = functype(address)
return fun, address
def _run_asm(self, *byte_code):
# Convert the byte code into a function that returns an int
restype = ctypes.c_uint32
argtypes = ()
func, address = self._asm_func(restype, argtypes, byte_code)
# Call the byte code like a function
retval = func()
byte_code = bytes.join(b'', byte_code)
size = ctypes.c_size_t(len(byte_code))
# Free the function memory segment
if DataSource.is_windows:
MEM_RELEASE = ctypes.c_ulong(0x8000)
ctypes.windll.kernel32.VirtualFree(ctypes.c_void_p(address), ctypes.c_size_t(0), MEM_RELEASE)
else:
# Remove the executable tag on the memory
READ_WRITE = 0x1 | 0x2
if ctypes.pythonapi.mprotect(ctypes.c_void_p(address), size, READ_WRITE) < 0:
raise Exception("Failed to mprotect")
ctypes.pythonapi.free(ctypes.c_void_p(address))
return retval
# FIXME: We should not have to use different instructions to
# set eax to 0 or 1, on 32bit and 64bit machines.
def _zero_eax(self):
return (
b"\x31\xC0" # xor eax,eax
)
def _zero_ecx(self):
return (
b"\x31\xC9" # xor ecx,ecx
)
def _one_eax(self):
return (
b"\xB8\x01\x00\x00\x00" # mov eax,0x1"
)
# http://en.wikipedia.org/wiki/CPUID#EAX.3D0:_Get_vendor_ID
def get_vendor_id(self):
# EBX
ebx = self._run_asm(
self._zero_eax(),
b"\x0F\xA2" # cpuid
b"\x89\xD8" # mov ax,bx
b"\xC3" # ret
)
# ECX
ecx = self._run_asm(
self._zero_eax(),
b"\x0f\xa2" # cpuid
b"\x89\xC8" # mov ax,cx
b"\xC3" # ret
)
# EDX
edx = self._run_asm(
self._zero_eax(),
b"\x0f\xa2" # cpuid
b"\x89\xD0" # mov ax,dx
b"\xC3" # ret
)
# Each 4bits is a ascii letter in the name
vendor_id = []
for reg in [ebx, edx, ecx]:
for n in [0, 8, 16, 24]:
vendor_id.append(chr((reg >> n) & 0xFF))
vendor_id = ''.join(vendor_id)
return vendor_id
# http://en.wikipedia.org/wiki/CPUID#EAX.3D1:_Processor_Info_and_Feature_Bits
def get_info(self):
# EAX
eax = self._run_asm(
self._one_eax(),
b"\x0f\xa2" # cpuid
b"\xC3" # ret
)
# Get the CPU info
stepping = (eax >> 0) & 0xF # 4 bits
model = (eax >> 4) & 0xF # 4 bits
family = (eax >> 8) & 0xF # 4 bits
processor_type = (eax >> 12) & 0x3 # 2 bits
extended_model = (eax >> 16) & 0xF # 4 bits
extended_family = (eax >> 20) & 0xFF # 8 bits
return {
'stepping' : stepping,
'model' : model,
'family' : family,
'processor_type' : processor_type,
'extended_model' : extended_model,
'extended_family' : extended_family
}
# http://en.wikipedia.org/wiki/CPUID#EAX.3D80000000h:_Get_Highest_Extended_Function_Supported
def get_max_extension_support(self):
# Check for extension support
max_extension_support = self._run_asm(
b"\xB8\x00\x00\x00\x80" # mov ax,0x80000000
b"\x0f\xa2" # cpuid
b"\xC3" # ret
)
return max_extension_support
# http://en.wikipedia.org/wiki/CPUID#EAX.3D1:_Processor_Info_and_Feature_Bits
def get_flags(self, max_extension_support):
# EDX
edx = self._run_asm(
self._one_eax(),
b"\x0f\xa2" # cpuid
b"\x89\xD0" # mov ax,dx
b"\xC3" # ret
)
# ECX
ecx = self._run_asm(
self._one_eax(),
b"\x0f\xa2" # cpuid
b"\x89\xC8" # mov ax,cx
b"\xC3" # ret
)
# Get the CPU flags
flags = {
'fpu' : _is_bit_set(edx, 0),
'vme' : _is_bit_set(edx, 1),
'de' : _is_bit_set(edx, 2),
'pse' : _is_bit_set(edx, 3),
'tsc' : _is_bit_set(edx, 4),
'msr' : _is_bit_set(edx, 5),
'pae' : _is_bit_set(edx, 6),
'mce' : _is_bit_set(edx, 7),
'cx8' : _is_bit_set(edx, 8),
'apic' : _is_bit_set(edx, 9),
#'reserved1' : _is_bit_set(edx, 10),
'sep' : _is_bit_set(edx, 11),
'mtrr' : _is_bit_set(edx, 12),
'pge' : _is_bit_set(edx, 13),
'mca' : _is_bit_set(edx, 14),
'cmov' : _is_bit_set(edx, 15),
'pat' : _is_bit_set(edx, 16),
'pse36' : _is_bit_set(edx, 17),
'pn' : _is_bit_set(edx, 18),
'clflush' : _is_bit_set(edx, 19),
#'reserved2' : _is_bit_set(edx, 20),
'dts' : _is_bit_set(edx, 21),
'acpi' : _is_bit_set(edx, 22),
'mmx' : _is_bit_set(edx, 23),
'fxsr' : _is_bit_set(edx, 24),
'sse' : _is_bit_set(edx, 25),
'sse2' : _is_bit_set(edx, 26),
'ss' : _is_bit_set(edx, 27),
'ht' : _is_bit_set(edx, 28),
'tm' : _is_bit_set(edx, 29),
'ia64' : _is_bit_set(edx, 30),
'pbe' : _is_bit_set(edx, 31),
'pni' : _is_bit_set(ecx, 0),
'pclmulqdq' : _is_bit_set(ecx, 1),
'dtes64' : _is_bit_set(ecx, 2),
'monitor' : _is_bit_set(ecx, 3),
'ds_cpl' : _is_bit_set(ecx, 4),
'vmx' : _is_bit_set(ecx, 5),
'smx' : _is_bit_set(ecx, 6),
'est' : _is_bit_set(ecx, 7),
'tm2' : _is_bit_set(ecx, 8),
'ssse3' : _is_bit_set(ecx, 9),
'cid' : _is_bit_set(ecx, 10),
#'reserved3' : _is_bit_set(ecx, 11),
'fma' : _is_bit_set(ecx, 12),
'cx16' : _is_bit_set(ecx, 13),
'xtpr' : _is_bit_set(ecx, 14),
'pdcm' : _is_bit_set(ecx, 15),
#'reserved4' : _is_bit_set(ecx, 16),
'pcid' : _is_bit_set(ecx, 17),
'dca' : _is_bit_set(ecx, 18),
'sse4_1' : _is_bit_set(ecx, 19),
'sse4_2' : _is_bit_set(ecx, 20),
'x2apic' : _is_bit_set(ecx, 21),
'movbe' : _is_bit_set(ecx, 22),
'popcnt' : _is_bit_set(ecx, 23),
'tscdeadline' : _is_bit_set(ecx, 24),
'aes' : _is_bit_set(ecx, 25),
'xsave' : _is_bit_set(ecx, 26),
'osxsave' : _is_bit_set(ecx, 27),
'avx' : _is_bit_set(ecx, 28),
'f16c' : _is_bit_set(ecx, 29),
'rdrnd' : _is_bit_set(ecx, 30),
'hypervisor' : _is_bit_set(ecx, 31)
}
# Get a list of only the flags that are true
flags = [k for k, v in flags.items() if v]
# http://en.wikipedia.org/wiki/CPUID#EAX.3D7.2C_ECX.3D0:_Extended_Features
if max_extension_support >= 7:
# EBX
ebx = self._run_asm(
self._zero_ecx(),
b"\xB8\x07\x00\x00\x00" # mov eax,7
b"\x0f\xa2" # cpuid
b"\x89\xD8" # mov ax,bx
b"\xC3" # ret
)
# ECX
ecx = self._run_asm(
self._zero_ecx(),
b"\xB8\x07\x00\x00\x00" # mov eax,7
b"\x0f\xa2" # cpuid
b"\x89\xC8" # mov ax,cx
b"\xC3" # ret
)
# Get the extended CPU flags
extended_flags = {
#'fsgsbase' : _is_bit_set(ebx, 0),
#'IA32_TSC_ADJUST' : _is_bit_set(ebx, 1),
'sgx' : _is_bit_set(ebx, 2),
'bmi1' : _is_bit_set(ebx, 3),
'hle' : _is_bit_set(ebx, 4),
'avx2' : _is_bit_set(ebx, 5),
#'reserved' : _is_bit_set(ebx, 6),
'smep' : _is_bit_set(ebx, 7),
'bmi2' : _is_bit_set(ebx, 8),
'erms' : _is_bit_set(ebx, 9),
'invpcid' : _is_bit_set(ebx, 10),
'rtm' : _is_bit_set(ebx, 11),
'pqm' : _is_bit_set(ebx, 12),
#'FPU CS and FPU DS deprecated' : _is_bit_set(ebx, 13),
'mpx' : _is_bit_set(ebx, 14),
'pqe' : _is_bit_set(ebx, 15),
'avx512f' : _is_bit_set(ebx, 16),
'avx512dq' : _is_bit_set(ebx, 17),
'rdseed' : _is_bit_set(ebx, 18),
'adx' : _is_bit_set(ebx, 19),
'smap' : _is_bit_set(ebx, 20),
'avx512ifma' : _is_bit_set(ebx, 21),
'pcommit' : _is_bit_set(ebx, 22),
'clflushopt' : _is_bit_set(ebx, 23),
'clwb' : _is_bit_set(ebx, 24),
'intel_pt' : _is_bit_set(ebx, 25),
'avx512pf' : _is_bit_set(ebx, 26),
'avx512er' : _is_bit_set(ebx, 27),
'avx512cd' : _is_bit_set(ebx, 28),
'sha' : _is_bit_set(ebx, 29),
'avx512bw' : _is_bit_set(ebx, 30),
'avx512vl' : _is_bit_set(ebx, 31),
'prefetchwt1' : _is_bit_set(ecx, 0),
'avx512vbmi' : _is_bit_set(ecx, 1),
'umip' : _is_bit_set(ecx, 2),
'pku' : _is_bit_set(ecx, 3),
'ospke' : _is_bit_set(ecx, 4),
#'reserved' : _is_bit_set(ecx, 5),
'avx512vbmi2' : _is_bit_set(ecx, 6),
#'reserved' : _is_bit_set(ecx, 7),
'gfni' : _is_bit_set(ecx, 8),
'vaes' : _is_bit_set(ecx, 9),
'vpclmulqdq' : _is_bit_set(ecx, 10),
'avx512vnni' : _is_bit_set(ecx, 11),
'avx512bitalg' : _is_bit_set(ecx, 12),
#'reserved' : _is_bit_set(ecx, 13),
'avx512vpopcntdq' : _is_bit_set(ecx, 14),
#'reserved' : _is_bit_set(ecx, 15),
#'reserved' : _is_bit_set(ecx, 16),
#'mpx0' : _is_bit_set(ecx, 17),
#'mpx1' : _is_bit_set(ecx, 18),
#'mpx2' : _is_bit_set(ecx, 19),
#'mpx3' : _is_bit_set(ecx, 20),
#'mpx4' : _is_bit_set(ecx, 21),
'rdpid' : _is_bit_set(ecx, 22),
#'reserved' : _is_bit_set(ecx, 23),
#'reserved' : _is_bit_set(ecx, 24),
#'reserved' : _is_bit_set(ecx, 25),
#'reserved' : _is_bit_set(ecx, 26),
#'reserved' : _is_bit_set(ecx, 27),
#'reserved' : _is_bit_set(ecx, 28),
#'reserved' : _is_bit_set(ecx, 29),
'sgx_lc' : _is_bit_set(ecx, 30),
#'reserved' : _is_bit_set(ecx, 31)
}
# Get a list of only the flags that are true
extended_flags = [k for k, v in extended_flags.items() if v]
flags += extended_flags
# http://en.wikipedia.org/wiki/CPUID#EAX.3D80000001h:_Extended_Processor_Info_and_Feature_Bits
if max_extension_support >= 0x80000001:
# EBX
ebx = self._run_asm(
b"\xB8\x01\x00\x00\x80" # mov ax,0x80000001
b"\x0f\xa2" # cpuid
b"\x89\xD8" # mov ax,bx
b"\xC3" # ret
)
# ECX
ecx = self._run_asm(
b"\xB8\x01\x00\x00\x80" # mov ax,0x80000001
b"\x0f\xa2" # cpuid
b"\x89\xC8" # mov ax,cx
b"\xC3" # ret
)
# Get the extended CPU flags
extended_flags = {
'fpu' : _is_bit_set(ebx, 0),
'vme' : _is_bit_set(ebx, 1),
'de' : _is_bit_set(ebx, 2),
'pse' : _is_bit_set(ebx, 3),
'tsc' : _is_bit_set(ebx, 4),
'msr' : _is_bit_set(ebx, 5),
'pae' : _is_bit_set(ebx, 6),
'mce' : _is_bit_set(ebx, 7),
'cx8' : _is_bit_set(ebx, 8),
'apic' : _is_bit_set(ebx, 9),
#'reserved' : _is_bit_set(ebx, 10),
'syscall' : _is_bit_set(ebx, 11),
'mtrr' : _is_bit_set(ebx, 12),
'pge' : _is_bit_set(ebx, 13),
'mca' : _is_bit_set(ebx, 14),
'cmov' : _is_bit_set(ebx, 15),
'pat' : _is_bit_set(ebx, 16),
'pse36' : _is_bit_set(ebx, 17),
#'reserved' : _is_bit_set(ebx, 18),
'mp' : _is_bit_set(ebx, 19),
'nx' : _is_bit_set(ebx, 20),
#'reserved' : _is_bit_set(ebx, 21),
'mmxext' : _is_bit_set(ebx, 22),
'mmx' : _is_bit_set(ebx, 23),
'fxsr' : _is_bit_set(ebx, 24),
'fxsr_opt' : _is_bit_set(ebx, 25),
'pdpe1gp' : _is_bit_set(ebx, 26),
'rdtscp' : _is_bit_set(ebx, 27),
#'reserved' : _is_bit_set(ebx, 28),
'lm' : _is_bit_set(ebx, 29),
'3dnowext' : _is_bit_set(ebx, 30),
'3dnow' : _is_bit_set(ebx, 31),
'lahf_lm' : _is_bit_set(ecx, 0),
'cmp_legacy' : _is_bit_set(ecx, 1),
'svm' : _is_bit_set(ecx, 2),
'extapic' : _is_bit_set(ecx, 3),
'cr8_legacy' : _is_bit_set(ecx, 4),
'abm' : _is_bit_set(ecx, 5),
'sse4a' : _is_bit_set(ecx, 6),
'misalignsse' : _is_bit_set(ecx, 7),
'3dnowprefetch' : _is_bit_set(ecx, 8),
'osvw' : _is_bit_set(ecx, 9),
'ibs' : _is_bit_set(ecx, 10),
'xop' : _is_bit_set(ecx, 11),
'skinit' : _is_bit_set(ecx, 12),
'wdt' : _is_bit_set(ecx, 13),
#'reserved' : _is_bit_set(ecx, 14),
'lwp' : _is_bit_set(ecx, 15),
'fma4' : _is_bit_set(ecx, 16),
'tce' : _is_bit_set(ecx, 17),
#'reserved' : _is_bit_set(ecx, 18),
'nodeid_msr' : _is_bit_set(ecx, 19),
#'reserved' : _is_bit_set(ecx, 20),
'tbm' : _is_bit_set(ecx, 21),
'topoext' : _is_bit_set(ecx, 22),
'perfctr_core' : _is_bit_set(ecx, 23),
'perfctr_nb' : _is_bit_set(ecx, 24),
#'reserved' : _is_bit_set(ecx, 25),
'dbx' : _is_bit_set(ecx, 26),
'perftsc' : _is_bit_set(ecx, 27),
'pci_l2i' : _is_bit_set(ecx, 28),
#'reserved' : _is_bit_set(ecx, 29),
#'reserved' : _is_bit_set(ecx, 30),
#'reserved' : _is_bit_set(ecx, 31)
}
# Get a list of only the flags that are true
extended_flags = [k for k, v in extended_flags.items() if v]
flags += extended_flags
flags.sort()
return flags
# http://en.wikipedia.org/wiki/CPUID#EAX.3D80000002h.2C80000003h.2C80000004h:_Processor_Brand_String
def get_processor_brand(self, max_extension_support):
processor_brand = ""
# Processor brand string
if max_extension_support >= 0x80000004:
instructions = [
b"\xB8\x02\x00\x00\x80", # mov ax,0x80000002
b"\xB8\x03\x00\x00\x80", # mov ax,0x80000003
b"\xB8\x04\x00\x00\x80" # mov ax,0x80000004
]
for instruction in instructions:
# EAX
eax = self._run_asm(
instruction, # mov ax,0x8000000?
b"\x0f\xa2" # cpuid
b"\x89\xC0" # mov ax,ax
b"\xC3" # ret
)
# EBX
ebx = self._run_asm(
instruction, # mov ax,0x8000000?
b"\x0f\xa2" # cpuid
b"\x89\xD8" # mov ax,bx
b"\xC3" # ret
)
# ECX
ecx = self._run_asm(
instruction, # mov ax,0x8000000?
b"\x0f\xa2" # cpuid
b"\x89\xC8" # mov ax,cx
b"\xC3" # ret
)
# EDX
edx = self._run_asm(
instruction, # mov ax,0x8000000?
b"\x0f\xa2" # cpuid
b"\x89\xD0" # mov ax,dx
b"\xC3" # ret
)
# Combine each of the 4 bytes in each register into the string
for reg in [eax, ebx, ecx, edx]:
for n in [0, 8, 16, 24]:
processor_brand += chr((reg >> n) & 0xFF)
# Strip off any trailing NULL terminators and white space
processor_brand = processor_brand.strip("\0").strip()
return processor_brand
# http://en.wikipedia.org/wiki/CPUID#EAX.3D80000006h:_Extended_L2_Cache_Features
def get_cache(self, max_extension_support):
cache_info = {}
# Just return if the cache feature is not supported
if max_extension_support < 0x80000006:
return cache_info
# ECX
ecx = self._run_asm(
b"\xB8\x06\x00\x00\x80" # mov ax,0x80000006
b"\x0f\xa2" # cpuid
b"\x89\xC8" # mov ax,cx
b"\xC3" # ret
)
cache_info = {
'size_kb' : ecx & 0xFF,
'line_size_b' : (ecx >> 12) & 0xF,
'associativity' : (ecx >> 16) & 0xFFFF
}
return cache_info
def get_ticks(self):
retval = None
if DataSource.bits == '32bit':
# Works on x86_32
restype = None
argtypes = (ctypes.POINTER(ctypes.c_uint), ctypes.POINTER(ctypes.c_uint))
get_ticks_x86_32, address = self._asm_func(restype, argtypes,
[
b"\x55", # push bp
b"\x89\xE5", # mov bp,sp
b"\x31\xC0", # xor ax,ax
b"\x0F\xA2", # cpuid
b"\x0F\x31", # rdtsc
b"\x8B\x5D\x08", # mov bx,[di+0x8]
b"\x8B\x4D\x0C", # mov cx,[di+0xc]
b"\x89\x13", # mov [bp+di],dx
b"\x89\x01", # mov [bx+di],ax
b"\x5D", # pop bp
b"\xC3" # ret
]
)
high = ctypes.c_uint32(0)
low = ctypes.c_uint32(0)
get_ticks_x86_32(ctypes.byref(high), ctypes.byref(low))
retval = ((high.value << 32) & 0xFFFFFFFF00000000) | low.value
elif DataSource.bits == '64bit':
# Works on x86_64
restype = ctypes.c_uint64
argtypes = ()
get_ticks_x86_64, address = self._asm_func(restype, argtypes,
[
b"\x48", # dec ax
b"\x31\xC0", # xor ax,ax
b"\x0F\xA2", # cpuid
b"\x0F\x31", # rdtsc
b"\x48", # dec ax
b"\xC1\xE2\x20", # shl dx,byte 0x20
b"\x48", # dec ax
b"\x09\xD0", # or ax,dx
b"\xC3", # ret
]
)
retval = get_ticks_x86_64()
return retval
def get_raw_hz(self):
import time
start = self.get_ticks()
time.sleep(1)
end = self.get_ticks()
ticks = (end - start)
return ticks
def _actual_get_cpu_info_from_cpuid(queue):
'''
Warning! This function has the potential to crash the Python runtime.
Do not call it directly. Use the _get_cpu_info_from_cpuid function instead.
It will safely call this function in another process.
'''
# Pipe all output to nothing
sys.stdout = open(os.devnull, 'w')
sys.stderr = open(os.devnull, 'w')
# Get the CPU arch and bits
arch, bits = _parse_arch(DataSource.arch_string_raw)
# Return none if this is not an X86 CPU
if not arch in ['X86_32', 'X86_64']:
queue.put(_obj_to_b64({}))
return
# Return none if SE Linux is in enforcing mode
cpuid = CPUID()
if cpuid.is_selinux_enforcing:
queue.put(_obj_to_b64({}))
return
# Get the cpu info from the CPUID register
max_extension_support = cpuid.get_max_extension_support()
cache_info = cpuid.get_cache(max_extension_support)
info = cpuid.get_info()
processor_brand = cpuid.get_processor_brand(max_extension_support)
# Get the Hz and scale
hz_actual = cpuid.get_raw_hz()
hz_actual = _to_decimal_string(hz_actual)
# Get the Hz and scale
hz_advertised, scale = _parse_cpu_brand_string(processor_brand)
info = {
'vendor_id_raw' : cpuid.get_vendor_id(),
'hardware_raw' : '',
'brand_raw' : processor_brand,
'hz_advertised_friendly' : _hz_short_to_friendly(hz_advertised, scale),
'hz_actual_friendly' : _hz_short_to_friendly(hz_actual, 0),
'hz_advertised' : _hz_short_to_full(hz_advertised, scale),
'hz_actual' : _hz_short_to_full(hz_actual, 0),
'l2_cache_size' : _to_friendly_bytes(cache_info['size_kb']),
'l2_cache_line_size' : cache_info['line_size_b'],
'l2_cache_associativity' : hex(cache_info['associativity']),
'stepping' : info['stepping'],
'model' : info['model'],
'family' : info['family'],
'processor_type' : info['processor_type'],
'extended_model' : info['extended_model'],
'extended_family' : info['extended_family'],
'flags' : cpuid.get_flags(max_extension_support)
}
info = {k: v for k, v in info.items() if v}
queue.put(_obj_to_b64(info))
def _get_cpu_info_from_cpuid():
'''
Returns the CPU info gathered by querying the X86 cpuid register in a new process.
Returns {} on non X86 cpus.
Returns {} if SELinux is in enforcing mode.
'''
from multiprocessing import Process, Queue
# Return {} if can't cpuid
if not DataSource.can_cpuid:
return {}
# Get the CPU arch and bits
arch, bits = _parse_arch(DataSource.arch_string_raw)
# Return {} if this is not an X86 CPU
if not arch in ['X86_32', 'X86_64']:
return {}
try:
# Start running the function in a subprocess
queue = Queue()
p = Process(target=_actual_get_cpu_info_from_cpuid, args=(queue,))
p.start()
# Wait for the process to end, while it is still alive
while p.is_alive():
p.join(0)
# Return {} if it failed
if p.exitcode != 0:
return {}
# Return the result, only if there is something to read
if not queue.empty():
output = queue.get()
return _b64_to_obj(output)
except:
pass
# Return {} if everything failed
return {}
def _get_cpu_info_from_proc_cpuinfo():
'''
Returns the CPU info gathered from /proc/cpuinfo.
Returns {} if /proc/cpuinfo is not found.
'''
try:
# Just return {} if there is no cpuinfo
if not DataSource.has_proc_cpuinfo():
return {}
returncode, output = DataSource.cat_proc_cpuinfo()
if returncode != 0:
return {}
# Various fields
vendor_id = _get_field(False, output, None, '', 'vendor_id', 'vendor id', 'vendor')
processor_brand = _get_field(True, output, None, None, 'model name','cpu', 'processor')
cache_size = _get_field(False, output, None, '', 'cache size')
stepping = _get_field(False, output, int, 0, 'stepping')
model = _get_field(False, output, int, 0, 'model')
family = _get_field(False, output, int, 0, 'cpu family')
hardware = _get_field(False, output, None, '', 'Hardware')
# Flags
flags = _get_field(False, output, None, None, 'flags', 'Features')
if flags:
flags = flags.split()
flags.sort()
# Convert from MHz string to Hz
hz_actual = _get_field(False, output, None, '', 'cpu MHz', 'cpu speed', 'clock')
hz_actual = hz_actual.lower().rstrip('mhz').strip()
hz_actual = _to_decimal_string(hz_actual)
# Convert from GHz/MHz string to Hz
hz_advertised, scale = (None, 0)
try:
hz_advertised, scale = _parse_cpu_brand_string(processor_brand)
except Exception:
pass
info = {
'hardware_raw' : hardware,
'brand_raw' : processor_brand,
'l3_cache_size' : _to_friendly_bytes(cache_size),
'flags' : flags,
'vendor_id_raw' : vendor_id,
'stepping' : stepping,
'model' : model,
'family' : family,
}
# Make the Hz the same for actual and advertised if missing any
if not hz_advertised or hz_advertised == '0.0':
hz_advertised = hz_actual
scale = 6
elif not hz_actual or hz_actual == '0.0':
hz_actual = hz_advertised
# Add the Hz if there is one
if _hz_short_to_full(hz_advertised, scale) > (0, 0):
info['hz_advertised_friendly'] = _hz_short_to_friendly(hz_advertised, scale)
info['hz_advertised'] = _hz_short_to_full(hz_advertised, scale)
if _hz_short_to_full(hz_actual, scale) > (0, 0):
info['hz_actual_friendly'] = _hz_short_to_friendly(hz_actual, 6)
info['hz_actual'] = _hz_short_to_full(hz_actual, 6)
info = {k: v for k, v in info.items() if v}
return info
except:
#raise # NOTE: To have this throw on error, uncomment this line
return {}
def _get_cpu_info_from_cpufreq_info():
'''
Returns the CPU info gathered from cpufreq-info.
Returns {} if cpufreq-info is not found.
'''
try:
hz_brand, scale = '0.0', 0
if not DataSource.has_cpufreq_info():
return {}
returncode, output = DataSource.cpufreq_info()
if returncode != 0:
return {}
hz_brand = output.split('current CPU frequency is')[1].split('\n')[0]
i = hz_brand.find('Hz')
assert(i != -1)
hz_brand = hz_brand[0 : i+2].strip().lower()
if hz_brand.endswith('mhz'):
scale = 6
elif hz_brand.endswith('ghz'):
scale = 9
hz_brand = hz_brand.rstrip('mhz').rstrip('ghz').strip()
hz_brand = _to_decimal_string(hz_brand)
info = {
'hz_advertised_friendly' : _hz_short_to_friendly(hz_brand, scale),
'hz_actual_friendly' : _hz_short_to_friendly(hz_brand, scale),
'hz_advertised' : _hz_short_to_full(hz_brand, scale),
'hz_actual' : _hz_short_to_full(hz_brand, scale),
}
info = {k: v for k, v in info.items() if v}
return info
except:
#raise # NOTE: To have this throw on error, uncomment this line
return {}
def _get_cpu_info_from_lscpu():
'''
Returns the CPU info gathered from lscpu.
Returns {} if lscpu is not found.
'''
try:
if not DataSource.has_lscpu():
return {}
returncode, output = DataSource.lscpu()
if returncode != 0:
return {}
info = {}
new_hz = _get_field(False, output, None, None, 'CPU max MHz', 'CPU MHz')
if new_hz:
new_hz = _to_decimal_string(new_hz)
scale = 6
info['hz_advertised_friendly'] = _hz_short_to_friendly(new_hz, scale)
info['hz_actual_friendly'] = _hz_short_to_friendly(new_hz, scale)
info['hz_advertised'] = _hz_short_to_full(new_hz, scale)
info['hz_actual'] = _hz_short_to_full(new_hz, scale)
vendor_id = _get_field(False, output, None, None, 'Vendor ID')
if vendor_id:
info['vendor_id_raw'] = vendor_id
brand = _get_field(False, output, None, None, 'Model name')
if brand:
info['brand_raw'] = brand
family = _get_field(False, output, None, None, 'CPU family')
if family and family.isdigit():
info['family'] = int(family)
stepping = _get_field(False, output, None, None, 'Stepping')
if stepping and stepping.isdigit():
info['stepping'] = int(stepping)
model = _get_field(False, output, None, None, 'Model')
if model and model.isdigit():
info['model'] = int(model)
l1_data_cache_size = _get_field(False, output, None, None, 'L1d cache')
if l1_data_cache_size:
info['l1_data_cache_size'] = _to_friendly_bytes(l1_data_cache_size)
l1_instruction_cache_size = _get_field(False, output, None, None, 'L1i cache')
if l1_instruction_cache_size:
info['l1_instruction_cache_size'] = _to_friendly_bytes(l1_instruction_cache_size)
l2_cache_size = _get_field(False, output, None, None, 'L2 cache')
if l2_cache_size:
info['l2_cache_size'] = _to_friendly_bytes(l2_cache_size)
l3_cache_size = _get_field(False, output, None, None, 'L3 cache')
if l3_cache_size:
info['l3_cache_size'] = _to_friendly_bytes(l3_cache_size)
# Flags
flags = _get_field(False, output, None, None, 'flags', 'Features')
if flags:
flags = flags.split()
flags.sort()
info['flags'] = flags
info = {k: v for k, v in info.items() if v}
return info
except:
#raise # NOTE: To have this throw on error, uncomment this line
return {}
def _get_cpu_info_from_dmesg():
'''
Returns the CPU info gathered from dmesg.
Returns {} if dmesg is not found or does not have the desired info.
'''
# Just return {} if there is no dmesg
if not DataSource.has_dmesg():
return {}
# If dmesg fails return {}
returncode, output = DataSource.dmesg_a()
if output == None or returncode != 0:
return {}
return _parse_dmesg_output(output)
# https://openpowerfoundation.org/wp-content/uploads/2016/05/LoPAPR_DRAFT_v11_24March2016_cmt1.pdf
# page 767
def _get_cpu_info_from_ibm_pa_features():
'''
Returns the CPU info gathered from lsprop /proc/device-tree/cpus/*/ibm,pa-features
Returns {} if lsprop is not found or ibm,pa-features does not have the desired info.
'''
try:
# Just return {} if there is no lsprop
if not DataSource.has_ibm_pa_features():
return {}
# If ibm,pa-features fails return {}
returncode, output = DataSource.ibm_pa_features()
if output == None or returncode != 0:
return {}
# Filter out invalid characters from output
value = output.split("ibm,pa-features")[1].lower()
value = [s for s in value if s in list('0123456789abcfed')]
value = ''.join(value)
# Get data converted to Uint32 chunks
left = int(value[0 : 8], 16)
right = int(value[8 : 16], 16)
# Get the CPU flags
flags = {
# Byte 0
'mmu' : _is_bit_set(left, 0),
'fpu' : _is_bit_set(left, 1),
'slb' : _is_bit_set(left, 2),
'run' : _is_bit_set(left, 3),
#'reserved' : _is_bit_set(left, 4),
'dabr' : _is_bit_set(left, 5),
'ne' : _is_bit_set(left, 6),
'wtr' : _is_bit_set(left, 7),
# Byte 1
'mcr' : _is_bit_set(left, 8),
'dsisr' : _is_bit_set(left, 9),
'lp' : _is_bit_set(left, 10),
'ri' : _is_bit_set(left, 11),
'dabrx' : _is_bit_set(left, 12),
'sprg3' : _is_bit_set(left, 13),
'rislb' : _is_bit_set(left, 14),
'pp' : _is_bit_set(left, 15),
# Byte 2
'vpm' : _is_bit_set(left, 16),
'dss_2.05' : _is_bit_set(left, 17),
#'reserved' : _is_bit_set(left, 18),
'dar' : _is_bit_set(left, 19),
#'reserved' : _is_bit_set(left, 20),
'ppr' : _is_bit_set(left, 21),
'dss_2.02' : _is_bit_set(left, 22),
'dss_2.06' : _is_bit_set(left, 23),
# Byte 3
'lsd_in_dscr' : _is_bit_set(left, 24),
'ugr_in_dscr' : _is_bit_set(left, 25),
#'reserved' : _is_bit_set(left, 26),
#'reserved' : _is_bit_set(left, 27),
#'reserved' : _is_bit_set(left, 28),
#'reserved' : _is_bit_set(left, 29),
#'reserved' : _is_bit_set(left, 30),
#'reserved' : _is_bit_set(left, 31),
# Byte 4
'sso_2.06' : _is_bit_set(right, 0),
#'reserved' : _is_bit_set(right, 1),
#'reserved' : _is_bit_set(right, 2),
#'reserved' : _is_bit_set(right, 3),
#'reserved' : _is_bit_set(right, 4),
#'reserved' : _is_bit_set(right, 5),
#'reserved' : _is_bit_set(right, 6),
#'reserved' : _is_bit_set(right, 7),
# Byte 5
'le' : _is_bit_set(right, 8),
'cfar' : _is_bit_set(right, 9),
'eb' : _is_bit_set(right, 10),
'lsq_2.07' : _is_bit_set(right, 11),
#'reserved' : _is_bit_set(right, 12),
#'reserved' : _is_bit_set(right, 13),
#'reserved' : _is_bit_set(right, 14),
#'reserved' : _is_bit_set(right, 15),
# Byte 6
'dss_2.07' : _is_bit_set(right, 16),
#'reserved' : _is_bit_set(right, 17),
#'reserved' : _is_bit_set(right, 18),
#'reserved' : _is_bit_set(right, 19),
#'reserved' : _is_bit_set(right, 20),
#'reserved' : _is_bit_set(right, 21),
#'reserved' : _is_bit_set(right, 22),
#'reserved' : _is_bit_set(right, 23),
# Byte 7
#'reserved' : _is_bit_set(right, 24),
#'reserved' : _is_bit_set(right, 25),
#'reserved' : _is_bit_set(right, 26),
#'reserved' : _is_bit_set(right, 27),
#'reserved' : _is_bit_set(right, 28),
#'reserved' : _is_bit_set(right, 29),
#'reserved' : _is_bit_set(right, 30),
#'reserved' : _is_bit_set(right, 31),
}
# Get a list of only the flags that are true
flags = [k for k, v in flags.items() if v]
flags.sort()
info = {
'flags' : flags
}
info = {k: v for k, v in info.items() if v}
return info
except:
return {}
def _get_cpu_info_from_cat_var_run_dmesg_boot():
'''
Returns the CPU info gathered from /var/run/dmesg.boot.
Returns {} if dmesg is not found or does not have the desired info.
'''
# Just return {} if there is no /var/run/dmesg.boot
if not DataSource.has_var_run_dmesg_boot():
return {}
# If dmesg.boot fails return {}
returncode, output = DataSource.cat_var_run_dmesg_boot()
if output == None or returncode != 0:
return {}
return _parse_dmesg_output(output)
def _get_cpu_info_from_sysctl():
'''
Returns the CPU info gathered from sysctl.
Returns {} if sysctl is not found.
'''
try:
# Just return {} if there is no sysctl
if not DataSource.has_sysctl():
return {}
# If sysctl fails return {}
returncode, output = DataSource.sysctl_machdep_cpu_hw_cpufrequency()
if output == None or returncode != 0:
return {}
# Various fields
vendor_id = _get_field(False, output, None, None, 'machdep.cpu.vendor')
processor_brand = _get_field(True, output, None, None, 'machdep.cpu.brand_string')
cache_size = _get_field(False, output, None, None, 'machdep.cpu.cache.size')
stepping = _get_field(False, output, int, 0, 'machdep.cpu.stepping')
model = _get_field(False, output, int, 0, 'machdep.cpu.model')
family = _get_field(False, output, int, 0, 'machdep.cpu.family')
# Flags
flags = _get_field(False, output, None, '', 'machdep.cpu.features').lower().split()
flags.extend(_get_field(False, output, None, '', 'machdep.cpu.leaf7_features').lower().split())
flags.extend(_get_field(False, output, None, '', 'machdep.cpu.extfeatures').lower().split())
flags.sort()
# Convert from GHz/MHz string to Hz
hz_advertised, scale = _parse_cpu_brand_string(processor_brand)
hz_actual = _get_field(False, output, None, None, 'hw.cpufrequency')
hz_actual = _to_decimal_string(hz_actual)
info = {
'vendor_id_raw' : vendor_id,
'brand_raw' : processor_brand,
'hz_advertised_friendly' : _hz_short_to_friendly(hz_advertised, scale),
'hz_actual_friendly' : _hz_short_to_friendly(hz_actual, 0),
'hz_advertised' : _hz_short_to_full(hz_advertised, scale),
'hz_actual' : _hz_short_to_full(hz_actual, 0),
'l2_cache_size' : _to_friendly_bytes(cache_size),
'stepping' : stepping,
'model' : model,
'family' : family,
'flags' : flags
}
info = {k: v for k, v in info.items() if v}
return info
except:
return {}
def _get_cpu_info_from_sysinfo():
'''
Returns the CPU info gathered from sysinfo.
Returns {} if sysinfo is not found.
'''
info = _get_cpu_info_from_sysinfo_v1()
info.update(_get_cpu_info_from_sysinfo_v2())
return info
def _get_cpu_info_from_sysinfo_v1():
'''
Returns the CPU info gathered from sysinfo.
Returns {} if sysinfo is not found.
'''
try:
# Just return {} if there is no sysinfo
if not DataSource.has_sysinfo():
return {}
# If sysinfo fails return {}
returncode, output = DataSource.sysinfo_cpu()
if output == None or returncode != 0:
return {}
# Various fields
vendor_id = '' #_get_field(False, output, None, None, 'CPU #0: ')
processor_brand = output.split('CPU #0: "')[1].split('"\n')[0].strip()
cache_size = '' #_get_field(False, output, None, None, 'machdep.cpu.cache.size')
stepping = int(output.split(', stepping ')[1].split(',')[0].strip())
model = int(output.split(', model ')[1].split(',')[0].strip())
family = int(output.split(', family ')[1].split(',')[0].strip())
# Flags
flags = []
for line in output.split('\n'):
if line.startswith('\t\t'):
for flag in line.strip().lower().split():
flags.append(flag)
flags.sort()
# Convert from GHz/MHz string to Hz
hz_advertised, scale = _parse_cpu_brand_string(processor_brand)
hz_actual = hz_advertised
info = {
'vendor_id_raw' : vendor_id,
'brand_raw' : processor_brand,
'hz_advertised_friendly' : _hz_short_to_friendly(hz_advertised, scale),
'hz_actual_friendly' : _hz_short_to_friendly(hz_actual, scale),
'hz_advertised' : _hz_short_to_full(hz_advertised, scale),
'hz_actual' : _hz_short_to_full(hz_actual, scale),
'l2_cache_size' : _to_friendly_bytes(cache_size),
'stepping' : stepping,
'model' : model,
'family' : family,
'flags' : flags
}
info = {k: v for k, v in info.items() if v}
return info
except:
#raise # NOTE: To have this throw on error, uncomment this line
return {}
def _get_cpu_info_from_sysinfo_v2():
'''
Returns the CPU info gathered from sysinfo.
Returns {} if sysinfo is not found.
'''
try:
# Just return {} if there is no sysinfo
if not DataSource.has_sysinfo():
return {}
# If sysinfo fails return {}
returncode, output = DataSource.sysinfo_cpu()
if output == None or returncode != 0:
return {}
# Various fields
vendor_id = '' #_get_field(False, output, None, None, 'CPU #0: ')
processor_brand = output.split('CPU #0: "')[1].split('"\n')[0].strip()
cache_size = '' #_get_field(False, output, None, None, 'machdep.cpu.cache.size')
signature = output.split('Signature:')[1].split('\n')[0].strip()
#
stepping = int(signature.split('stepping ')[1].split(',')[0].strip())
model = int(signature.split('model ')[1].split(',')[0].strip())
family = int(signature.split('family ')[1].split(',')[0].strip())
# Flags
def get_subsection_flags(output):
retval = []
for line in output.split('\n')[1:]:
if not line.startswith(' ') and not line.startswith(' '): break
for entry in line.strip().lower().split(' '):
retval.append(entry)
return retval
flags = get_subsection_flags(output.split('Features: ')[1]) + \
get_subsection_flags(output.split('Extended Features (0x00000001): ')[1]) + \
get_subsection_flags(output.split('Extended Features (0x80000001): ')[1])
flags.sort()
# Convert from GHz/MHz string to Hz
lines = [n for n in output.split('\n') if n]
raw_hz = lines[0].split('running at ')[1].strip().lower()
hz_advertised = raw_hz.rstrip('mhz').rstrip('ghz').strip()
hz_advertised = _to_decimal_string(hz_advertised)
hz_actual = hz_advertised
scale = 0
if raw_hz.endswith('mhz'):
scale = 6
elif raw_hz.endswith('ghz'):
scale = 9
info = {
'vendor_id_raw' : vendor_id,
'brand_raw' : processor_brand,
'hz_advertised_friendly' : _hz_short_to_friendly(hz_advertised, scale),
'hz_actual_friendly' : _hz_short_to_friendly(hz_actual, scale),
'hz_advertised' : _hz_short_to_full(hz_advertised, scale),
'hz_actual' : _hz_short_to_full(hz_actual, scale),
'l2_cache_size' : _to_friendly_bytes(cache_size),
'stepping' : stepping,
'model' : model,
'family' : family,
'flags' : flags
}
info = {k: v for k, v in info.items() if v}
return info
except:
#raise # NOTE: To have this throw on error, uncomment this line
return {}
def _get_cpu_info_from_wmic():
'''
Returns the CPU info gathered from WMI.
Returns {} if not on Windows, or wmic is not installed.
'''
try:
# Just return {} if not Windows or there is no wmic
if not DataSource.is_windows or not DataSource.has_wmic():
return {}
returncode, output = DataSource.wmic_cpu()
if output == None or returncode != 0:
return {}
# Break the list into key values pairs
value = output.split("\n")
value = [s.rstrip().split('=') for s in value if '=' in s]
value = {k: v for k, v in value if v}
# Get the advertised MHz
processor_brand = value.get('Name')
hz_advertised, scale_advertised = _parse_cpu_brand_string(processor_brand)
# Get the actual MHz
hz_actual = value.get('CurrentClockSpeed')
scale_actual = 6
if hz_actual:
hz_actual = _to_decimal_string(hz_actual)
# Get cache sizes
l2_cache_size = value.get('L2CacheSize')
if l2_cache_size:
l2_cache_size = l2_cache_size + ' KB'
l3_cache_size = value.get('L3CacheSize')
if l3_cache_size:
l3_cache_size = l3_cache_size + ' KB'
# Get family, model, and stepping
family, model, stepping = '', '', ''
description = value.get('Description') or value.get('Caption')
entries = description.split(' ')
if 'Family' in entries and entries.index('Family') < len(entries)-1:
i = entries.index('Family')
family = int(entries[i + 1])
if 'Model' in entries and entries.index('Model') < len(entries)-1:
i = entries.index('Model')
model = int(entries[i + 1])
if 'Stepping' in entries and entries.index('Stepping') < len(entries)-1:
i = entries.index('Stepping')
stepping = int(entries[i + 1])
info = {
'vendor_id_raw' : value.get('Manufacturer'),
'brand_raw' : processor_brand,
'hz_advertised_friendly' : _hz_short_to_friendly(hz_advertised, scale_advertised),
'hz_actual_friendly' : _hz_short_to_friendly(hz_actual, scale_actual),
'hz_advertised' : _hz_short_to_full(hz_advertised, scale_advertised),
'hz_actual' : _hz_short_to_full(hz_actual, scale_actual),
'l2_cache_size' : l2_cache_size,
'l3_cache_size' : l3_cache_size,
'stepping' : stepping,
'model' : model,
'family' : family,
}
info = {k: v for k, v in info.items() if v}
return info
except:
#raise # NOTE: To have this throw on error, uncomment this line
return {}
def _get_cpu_info_from_registry():
'''
FIXME: Is missing many of the newer CPU flags like sse3
Returns the CPU info gathered from the Windows Registry.
Returns {} if not on Windows.
'''
try:
# Just return {} if not on Windows
if not DataSource.is_windows:
return {}
# Get the CPU name
processor_brand = DataSource.winreg_processor_brand().strip()
# Get the CPU vendor id
vendor_id = DataSource.winreg_vendor_id_raw()
# Get the CPU arch and bits
arch_string_raw = DataSource.winreg_arch_string_raw()
arch, bits = _parse_arch(arch_string_raw)
# Get the actual CPU Hz
hz_actual = DataSource.winreg_hz_actual()
hz_actual = _to_decimal_string(hz_actual)
# Get the advertised CPU Hz
hz_advertised, scale = _parse_cpu_brand_string(processor_brand)
# If advertised hz not found, use the actual hz
if hz_advertised == '0.0':
scale = 6
hz_advertised = _to_decimal_string(hz_actual)
# Get the CPU features
feature_bits = DataSource.winreg_feature_bits()
def is_set(bit):
mask = 0x80000000 >> bit
retval = mask & feature_bits > 0
return retval
# http://en.wikipedia.org/wiki/CPUID
# http://unix.stackexchange.com/questions/43539/what-do-the-flags-in-proc-cpuinfo-mean
# http://www.lohninger.com/helpcsuite/public_constants_cpuid.htm
flags = {
'fpu' : is_set(0), # Floating Point Unit
'vme' : is_set(1), # V86 Mode Extensions
'de' : is_set(2), # Debug Extensions - I/O breakpoints supported
'pse' : is_set(3), # Page Size Extensions (4 MB pages supported)
'tsc' : is_set(4), # Time Stamp Counter and RDTSC instruction are available
'msr' : is_set(5), # Model Specific Registers
'pae' : is_set(6), # Physical Address Extensions (36 bit address, 2MB pages)
'mce' : is_set(7), # Machine Check Exception supported
'cx8' : is_set(8), # Compare Exchange Eight Byte instruction available
'apic' : is_set(9), # Local APIC present (multiprocessor operation support)
'sepamd' : is_set(10), # Fast system calls (AMD only)
'sep' : is_set(11), # Fast system calls
'mtrr' : is_set(12), # Memory Type Range Registers
'pge' : is_set(13), # Page Global Enable
'mca' : is_set(14), # Machine Check Architecture
'cmov' : is_set(15), # Conditional MOVe instructions
'pat' : is_set(16), # Page Attribute Table
'pse36' : is_set(17), # 36 bit Page Size Extensions
'serial' : is_set(18), # Processor Serial Number
'clflush' : is_set(19), # Cache Flush
#'reserved1' : is_set(20), # reserved
'dts' : is_set(21), # Debug Trace Store
'acpi' : is_set(22), # ACPI support
'mmx' : is_set(23), # MultiMedia Extensions
'fxsr' : is_set(24), # FXSAVE and FXRSTOR instructions
'sse' : is_set(25), # SSE instructions
'sse2' : is_set(26), # SSE2 (WNI) instructions
'ss' : is_set(27), # self snoop
#'reserved2' : is_set(28), # reserved
'tm' : is_set(29), # Automatic clock control
'ia64' : is_set(30), # IA64 instructions
'3dnow' : is_set(31) # 3DNow! instructions available
}
# Get a list of only the flags that are true
flags = [k for k, v in flags.items() if v]
flags.sort()
info = {
'vendor_id_raw' : vendor_id,
'brand_raw' : processor_brand,
'hz_advertised_friendly' : _hz_short_to_friendly(hz_advertised, scale),
'hz_actual_friendly' : _hz_short_to_friendly(hz_actual, 6),
'hz_advertised' : _hz_short_to_full(hz_advertised, scale),
'hz_actual' : _hz_short_to_full(hz_actual, 6),
'flags' : flags
}
info = {k: v for k, v in info.items() if v}
return info
except:
return {}
def _get_cpu_info_from_kstat():
'''
Returns the CPU info gathered from isainfo and kstat.
Returns {} if isainfo or kstat are not found.
'''
try:
# Just return {} if there is no isainfo or kstat
if not DataSource.has_isainfo() or not DataSource.has_kstat():
return {}
# If isainfo fails return {}
returncode, flag_output = DataSource.isainfo_vb()
if flag_output == None or returncode != 0:
return {}
# If kstat fails return {}
returncode, kstat = DataSource.kstat_m_cpu_info()
if kstat == None or returncode != 0:
return {}
# Various fields
vendor_id = kstat.split('\tvendor_id ')[1].split('\n')[0].strip()
processor_brand = kstat.split('\tbrand ')[1].split('\n')[0].strip()
stepping = int(kstat.split('\tstepping ')[1].split('\n')[0].strip())
model = int(kstat.split('\tmodel ')[1].split('\n')[0].strip())
family = int(kstat.split('\tfamily ')[1].split('\n')[0].strip())
# Flags
flags = flag_output.strip().split('\n')[-1].strip().lower().split()
flags.sort()
# Convert from GHz/MHz string to Hz
scale = 6
hz_advertised = kstat.split('\tclock_MHz ')[1].split('\n')[0].strip()
hz_advertised = _to_decimal_string(hz_advertised)
# Convert from GHz/MHz string to Hz
hz_actual = kstat.split('\tcurrent_clock_Hz ')[1].split('\n')[0].strip()
hz_actual = _to_decimal_string(hz_actual)
info = {
'vendor_id_raw' : vendor_id,
'brand_raw' : processor_brand,
'hz_advertised_friendly' : _hz_short_to_friendly(hz_advertised, scale),
'hz_actual_friendly' : _hz_short_to_friendly(hz_actual, 0),
'hz_advertised' : _hz_short_to_full(hz_advertised, scale),
'hz_actual' : _hz_short_to_full(hz_actual, 0),
'stepping' : stepping,
'model' : model,
'family' : family,
'flags' : flags
}
info = {k: v for k, v in info.items() if v}
return info
except:
return {}
def _get_cpu_info_from_platform_uname():
try:
uname = DataSource.uname_string_raw.split(',')[0]
family, model, stepping = (None, None, None)
entries = uname.split(' ')
if 'Family' in entries and entries.index('Family') < len(entries)-1:
i = entries.index('Family')
family = int(entries[i + 1])
if 'Model' in entries and entries.index('Model') < len(entries)-1:
i = entries.index('Model')
model = int(entries[i + 1])
if 'Stepping' in entries and entries.index('Stepping') < len(entries)-1:
i = entries.index('Stepping')
stepping = int(entries[i + 1])
info = {
'family' : family,
'model' : model,
'stepping' : stepping
}
info = {k: v for k, v in info.items() if v}
return info
except:
return {}
def _get_cpu_info_internal():
'''
Returns the CPU info by using the best sources of information for your OS.
Returns {} if nothing is found.
'''
# Get the CPU arch and bits
arch, bits = _parse_arch(DataSource.arch_string_raw)
friendly_maxsize = { 2**31-1: '32 bit', 2**63-1: '64 bit' }.get(sys.maxsize) or 'unknown bits'
friendly_version = "{0}.{1}.{2}.{3}.{4}".format(*sys.version_info)
PYTHON_VERSION = "{0} ({1})".format(friendly_version, friendly_maxsize)
info = {
'python_version' : PYTHON_VERSION,
'cpuinfo_version' : CPUINFO_VERSION,
'cpuinfo_version_string' : CPUINFO_VERSION_STRING,
'arch' : arch,
'bits' : bits,
'count' : DataSource.cpu_count,
'arch_string_raw' : DataSource.arch_string_raw,
}
# Try the Windows wmic
_copy_new_fields(info, _get_cpu_info_from_wmic())
# Try the Windows registry
_copy_new_fields(info, _get_cpu_info_from_registry())
# Try /proc/cpuinfo
_copy_new_fields(info, _get_cpu_info_from_proc_cpuinfo())
# Try cpufreq-info
_copy_new_fields(info, _get_cpu_info_from_cpufreq_info())
# Try LSCPU
_copy_new_fields(info, _get_cpu_info_from_lscpu())
# Try sysctl
_copy_new_fields(info, _get_cpu_info_from_sysctl())
# Try kstat
_copy_new_fields(info, _get_cpu_info_from_kstat())
# Try dmesg
_copy_new_fields(info, _get_cpu_info_from_dmesg())
# Try /var/run/dmesg.boot
_copy_new_fields(info, _get_cpu_info_from_cat_var_run_dmesg_boot())
# Try lsprop ibm,pa-features
_copy_new_fields(info, _get_cpu_info_from_ibm_pa_features())
# Try sysinfo
_copy_new_fields(info, _get_cpu_info_from_sysinfo())
# Try querying the CPU cpuid register
_copy_new_fields(info, _get_cpu_info_from_cpuid())
# Try platform.uname
_copy_new_fields(info, _get_cpu_info_from_platform_uname())
return info
def get_cpu_info():
'''
Returns the CPU info by using the best sources of information for your OS.
Returns the result in a dict
'''
import json
output = get_cpu_info_json()
# Convert JSON to Python with non unicode strings
output = json.loads(output, object_hook = _utf_to_str)
return output
def main():
from argparse import ArgumentParser
import json
# Parse args
parser = ArgumentParser(description='Gets CPU info with pure Python 2 & 3')
parser.add_argument('--json', action='store_true', help='Return the info in JSON format')
parser.add_argument('--version', action='store_true', help='Return the version of py-cpuinfo')
args = parser.parse_args()
try:
_check_arch()
except Exception as err:
sys.stderr.write(str(err) + "\n")
sys.exit(1)
info = _get_cpu_info_internal()
if not info:
sys.stderr.write("Failed to find cpu info\n")
sys.exit(1)
if args.json:
print(json.dumps(info))
elif args.version:
print(CPUINFO_VERSION_STRING)
else:
print('Python Version: {0}'.format(info.get('python_version', '')))
print('Cpuinfo Version: {0}'.format(info.get('cpuinfo_version_string', '')))
print('Vendor ID Raw: {0}'.format(info.get('vendor_id_raw', '')))
print('Hardware Raw: {0}'.format(info.get('hardware_raw', '')))
print('Brand Raw: {0}'.format(info.get('brand_raw', '')))
print('Hz Advertised Friendly: {0}'.format(info.get('hz_advertised_friendly', '')))
print('Hz Actual Friendly: {0}'.format(info.get('hz_actual_friendly', '')))
print('Hz Advertised: {0}'.format(info.get('hz_advertised', '')))
print('Hz Actual: {0}'.format(info.get('hz_actual', '')))
print('Arch: {0}'.format(info.get('arch', '')))
print('Bits: {0}'.format(info.get('bits', '')))
print('Count: {0}'.format(info.get('count', '')))
print('Arch String Raw: {0}'.format(info.get('arch_string_raw', '')))
print('L1 Data Cache Size: {0}'.format(info.get('l1_data_cache_size', '')))
print('L1 Instruction Cache Size: {0}'.format(info.get('l1_instruction_cache_size', '')))
print('L2 Cache Size: {0}'.format(info.get('l2_cache_size', '')))
print('L2 Cache Line Size: {0}'.format(info.get('l2_cache_line_size', '')))
print('L2 Cache Associativity: {0}'.format(info.get('l2_cache_associativity', '')))
print('L3 Cache Size: {0}'.format(info.get('l3_cache_size', '')))
print('Stepping: {0}'.format(info.get('stepping', '')))
print('Model: {0}'.format(info.get('model', '')))
print('Family: {0}'.format(info.get('family', '')))
print('Processor Type: {0}'.format(info.get('processor_type', '')))
print('Extended Model: {0}'.format(info.get('extended_model', '')))
print('Extended Family: {0}'.format(info.get('extended_family', '')))
print('Flags: {0}'.format(', '.join(info.get('flags', ''))))
if __name__ == '__main__':
main()
else:
_check_arch()
|
workhorsy/py-cpuinfo
|
cpuinfo/cpuinfo.py
|
get_cpu_info
|
python
|
def get_cpu_info():
'''
Returns the CPU info by using the best sources of information for your OS.
Returns the result in a dict
'''
import json
output = get_cpu_info_json()
# Convert JSON to Python with non unicode strings
output = json.loads(output, object_hook = _utf_to_str)
return output
|
Returns the CPU info by using the best sources of information for your OS.
Returns the result in a dict
|
train
|
https://github.com/workhorsy/py-cpuinfo/blob/c15afb770c1139bf76215852e17eb4f677ca3d2f/cpuinfo/cpuinfo.py#L2308-L2321
|
[
"def get_cpu_info_json():\n\t'''\n\tReturns the CPU info by using the best sources of information for your OS.\n\tReturns the result in a json string\n\t'''\n\n\timport json\n\n\toutput = None\n\n\t# If running under pyinstaller, run normally\n\tif getattr(sys, 'frozen', False):\n\t\tinfo = _get_cpu_info_internal()\n\t\toutput = json.dumps(info)\n\t\toutput = \"{0}\".format(output)\n\t# if not running under pyinstaller, run in another process.\n\t# This is done because multiprocesing has a design flaw that\n\t# causes non main programs to run multiple times on Windows.\n\telse:\n\t\tfrom subprocess import Popen, PIPE\n\n\t\tcommand = [sys.executable, __file__, '--json']\n\t\tp1 = Popen(command, stdout=PIPE, stderr=PIPE, stdin=PIPE)\n\t\toutput = p1.communicate()[0]\n\n\t\tif p1.returncode != 0:\n\t\t\treturn \"{}\"\n\n\t\tif not IS_PY2:\n\t\t\toutput = output.decode(encoding='UTF-8')\n\n\treturn output\n"
] |
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
# Copyright (c) 2014-2019, Matthew Brennan Jones <matthew.brennan.jones@gmail.com>
# Py-cpuinfo gets CPU info with pure Python 2 & 3
# It uses the MIT License
# It is hosted at: https://github.com/workhorsy/py-cpuinfo
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
CPUINFO_VERSION = (5, 0, 0)
CPUINFO_VERSION_STRING = '.'.join([str(n) for n in CPUINFO_VERSION])
import os, sys
import platform
import multiprocessing
import ctypes
try:
import _winreg as winreg
except ImportError as err:
try:
import winreg
except ImportError as err:
pass
IS_PY2 = sys.version_info[0] == 2
class DataSource(object):
bits = platform.architecture()[0]
cpu_count = multiprocessing.cpu_count()
is_windows = platform.system().lower() == 'windows'
arch_string_raw = platform.machine()
uname_string_raw = platform.uname()[5]
can_cpuid = True
@staticmethod
def has_proc_cpuinfo():
return os.path.exists('/proc/cpuinfo')
@staticmethod
def has_dmesg():
return len(_program_paths('dmesg')) > 0
@staticmethod
def has_var_run_dmesg_boot():
uname = platform.system().strip().strip('"').strip("'").strip().lower()
return 'linux' in uname and os.path.exists('/var/run/dmesg.boot')
@staticmethod
def has_cpufreq_info():
return len(_program_paths('cpufreq-info')) > 0
@staticmethod
def has_sestatus():
return len(_program_paths('sestatus')) > 0
@staticmethod
def has_sysctl():
return len(_program_paths('sysctl')) > 0
@staticmethod
def has_isainfo():
return len(_program_paths('isainfo')) > 0
@staticmethod
def has_kstat():
return len(_program_paths('kstat')) > 0
@staticmethod
def has_sysinfo():
return len(_program_paths('sysinfo')) > 0
@staticmethod
def has_lscpu():
return len(_program_paths('lscpu')) > 0
@staticmethod
def has_ibm_pa_features():
return len(_program_paths('lsprop')) > 0
@staticmethod
def has_wmic():
returncode, output = _run_and_get_stdout(['wmic', 'os', 'get', 'Version'])
return returncode == 0 and len(output) > 0
@staticmethod
def cat_proc_cpuinfo():
return _run_and_get_stdout(['cat', '/proc/cpuinfo'])
@staticmethod
def cpufreq_info():
return _run_and_get_stdout(['cpufreq-info'])
@staticmethod
def sestatus_b():
return _run_and_get_stdout(['sestatus', '-b'])
@staticmethod
def dmesg_a():
return _run_and_get_stdout(['dmesg', '-a'])
@staticmethod
def cat_var_run_dmesg_boot():
return _run_and_get_stdout(['cat', '/var/run/dmesg.boot'])
@staticmethod
def sysctl_machdep_cpu_hw_cpufrequency():
return _run_and_get_stdout(['sysctl', 'machdep.cpu', 'hw.cpufrequency'])
@staticmethod
def isainfo_vb():
return _run_and_get_stdout(['isainfo', '-vb'])
@staticmethod
def kstat_m_cpu_info():
return _run_and_get_stdout(['kstat', '-m', 'cpu_info'])
@staticmethod
def sysinfo_cpu():
return _run_and_get_stdout(['sysinfo', '-cpu'])
@staticmethod
def lscpu():
return _run_and_get_stdout(['lscpu'])
@staticmethod
def ibm_pa_features():
import glob
ibm_features = glob.glob('/proc/device-tree/cpus/*/ibm,pa-features')
if ibm_features:
return _run_and_get_stdout(['lsprop', ibm_features[0]])
@staticmethod
def wmic_cpu():
return _run_and_get_stdout(['wmic', 'cpu', 'get', 'Name,CurrentClockSpeed,L2CacheSize,L3CacheSize,Description,Caption,Manufacturer', '/format:list'])
@staticmethod
def winreg_processor_brand():
key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, r"Hardware\Description\System\CentralProcessor\0")
processor_brand = winreg.QueryValueEx(key, "ProcessorNameString")[0]
winreg.CloseKey(key)
return processor_brand.strip()
@staticmethod
def winreg_vendor_id_raw():
key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, r"Hardware\Description\System\CentralProcessor\0")
vendor_id_raw = winreg.QueryValueEx(key, "VendorIdentifier")[0]
winreg.CloseKey(key)
return vendor_id_raw
@staticmethod
def winreg_arch_string_raw():
key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, r"SYSTEM\CurrentControlSet\Control\Session Manager\Environment")
arch_string_raw = winreg.QueryValueEx(key, "PROCESSOR_ARCHITECTURE")[0]
winreg.CloseKey(key)
return arch_string_raw
@staticmethod
def winreg_hz_actual():
key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, r"Hardware\Description\System\CentralProcessor\0")
hz_actual = winreg.QueryValueEx(key, "~Mhz")[0]
winreg.CloseKey(key)
hz_actual = _to_decimal_string(hz_actual)
return hz_actual
@staticmethod
def winreg_feature_bits():
key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, r"Hardware\Description\System\CentralProcessor\0")
feature_bits = winreg.QueryValueEx(key, "FeatureSet")[0]
winreg.CloseKey(key)
return feature_bits
def _program_paths(program_name):
paths = []
exts = filter(None, os.environ.get('PATHEXT', '').split(os.pathsep))
path = os.environ['PATH']
for p in os.environ['PATH'].split(os.pathsep):
p = os.path.join(p, program_name)
if os.access(p, os.X_OK):
paths.append(p)
for e in exts:
pext = p + e
if os.access(pext, os.X_OK):
paths.append(pext)
return paths
def _run_and_get_stdout(command, pipe_command=None):
from subprocess import Popen, PIPE
if not pipe_command:
p1 = Popen(command, stdout=PIPE, stderr=PIPE, stdin=PIPE)
output = p1.communicate()[0]
if not IS_PY2:
output = output.decode(encoding='UTF-8')
return p1.returncode, output
else:
p1 = Popen(command, stdout=PIPE, stderr=PIPE, stdin=PIPE)
p2 = Popen(pipe_command, stdin=p1.stdout, stdout=PIPE, stderr=PIPE)
p1.stdout.close()
output = p2.communicate()[0]
if not IS_PY2:
output = output.decode(encoding='UTF-8')
return p2.returncode, output
# Make sure we are running on a supported system
def _check_arch():
arch, bits = _parse_arch(DataSource.arch_string_raw)
if not arch in ['X86_32', 'X86_64', 'ARM_7', 'ARM_8', 'PPC_64']:
raise Exception("py-cpuinfo currently only works on X86 and some PPC and ARM CPUs.")
def _obj_to_b64(thing):
import pickle
import base64
a = thing
b = pickle.dumps(a)
c = base64.b64encode(b)
d = c.decode('utf8')
return d
def _b64_to_obj(thing):
import pickle
import base64
try:
a = base64.b64decode(thing)
b = pickle.loads(a)
return b
except:
return {}
def _utf_to_str(input):
if IS_PY2 and isinstance(input, unicode):
return input.encode('utf-8')
elif isinstance(input, list):
return [_utf_to_str(element) for element in input]
elif isinstance(input, dict):
return {_utf_to_str(key): _utf_to_str(value)
for key, value in input.items()}
else:
return input
def _copy_new_fields(info, new_info):
keys = [
'vendor_id_raw', 'hardware_raw', 'brand_raw', 'hz_advertised_friendly', 'hz_actual_friendly',
'hz_advertised', 'hz_actual', 'arch', 'bits', 'count',
'arch_string_raw', 'uname_string_raw',
'l2_cache_size', 'l2_cache_line_size', 'l2_cache_associativity',
'stepping', 'model', 'family',
'processor_type', 'extended_model', 'extended_family', 'flags',
'l3_cache_size', 'l1_data_cache_size', 'l1_instruction_cache_size'
]
for key in keys:
if new_info.get(key, None) and not info.get(key, None):
info[key] = new_info[key]
elif key == 'flags' and new_info.get('flags'):
for f in new_info['flags']:
if f not in info['flags']: info['flags'].append(f)
info['flags'].sort()
def _get_field_actual(cant_be_number, raw_string, field_names):
for line in raw_string.splitlines():
for field_name in field_names:
field_name = field_name.lower()
if ':' in line:
left, right = line.split(':', 1)
left = left.strip().lower()
right = right.strip()
if left == field_name and len(right) > 0:
if cant_be_number:
if not right.isdigit():
return right
else:
return right
return None
def _get_field(cant_be_number, raw_string, convert_to, default_value, *field_names):
retval = _get_field_actual(cant_be_number, raw_string, field_names)
# Convert the return value
if retval and convert_to:
try:
retval = convert_to(retval)
except:
retval = default_value
# Return the default if there is no return value
if retval is None:
retval = default_value
return retval
def _to_decimal_string(ticks):
try:
# Convert to string
ticks = '{0}'.format(ticks)
# Strip off non numbers and decimal places
ticks = "".join(n for n in ticks if n.isdigit() or n=='.').strip()
if ticks == '':
ticks = '0'
# Add decimal if missing
if '.' not in ticks:
ticks = '{0}.0'.format(ticks)
# Remove trailing zeros
ticks = ticks.rstrip('0')
# Add one trailing zero for empty right side
if ticks.endswith('.'):
ticks = '{0}0'.format(ticks)
# Make sure the number can be converted to a float
ticks = float(ticks)
ticks = '{0}'.format(ticks)
return ticks
except:
return '0.0'
def _hz_short_to_full(ticks, scale):
try:
# Make sure the number can be converted to a float
ticks = float(ticks)
ticks = '{0}'.format(ticks)
# Scale the numbers
hz = ticks.lstrip('0')
old_index = hz.index('.')
hz = hz.replace('.', '')
hz = hz.ljust(scale + old_index+1, '0')
new_index = old_index + scale
hz = '{0}.{1}'.format(hz[:new_index], hz[new_index:])
left, right = hz.split('.')
left, right = int(left), int(right)
return (left, right)
except:
return (0, 0)
def _hz_friendly_to_full(hz_string):
try:
hz_string = hz_string.strip().lower()
hz, scale = (None, None)
if hz_string.endswith('ghz'):
scale = 9
elif hz_string.endswith('mhz'):
scale = 6
elif hz_string.endswith('hz'):
scale = 0
hz = "".join(n for n in hz_string if n.isdigit() or n=='.').strip()
if not '.' in hz:
hz += '.0'
hz, scale = _hz_short_to_full(hz, scale)
return (hz, scale)
except:
return (0, 0)
def _hz_short_to_friendly(ticks, scale):
try:
# Get the raw Hz as a string
left, right = _hz_short_to_full(ticks, scale)
result = '{0}.{1}'.format(left, right)
# Get the location of the dot, and remove said dot
dot_index = result.index('.')
result = result.replace('.', '')
# Get the Hz symbol and scale
symbol = "Hz"
scale = 0
if dot_index > 9:
symbol = "GHz"
scale = 9
elif dot_index > 6:
symbol = "MHz"
scale = 6
elif dot_index > 3:
symbol = "KHz"
scale = 3
# Get the Hz with the dot at the new scaled point
result = '{0}.{1}'.format(result[:-scale-1], result[-scale-1:])
# Format the ticks to have 4 numbers after the decimal
# and remove any superfluous zeroes.
result = '{0:.4f} {1}'.format(float(result), symbol)
result = result.rstrip('0')
return result
except:
return '0.0000 Hz'
def _to_friendly_bytes(input):
import re
if not input:
return input
input = "{0}".format(input)
formats = {
r"^[0-9]+B$" : 'B',
r"^[0-9]+K$" : 'KB',
r"^[0-9]+M$" : 'MB',
r"^[0-9]+G$" : 'GB'
}
for pattern, friendly_size in formats.items():
if re.match(pattern, input):
return "{0} {1}".format(input[ : -1].strip(), friendly_size)
return input
def _parse_cpu_brand_string(cpu_string):
# Just return 0 if the processor brand does not have the Hz
if not 'hz' in cpu_string.lower():
return ('0.0', 0)
hz = cpu_string.lower()
scale = 0
if hz.endswith('mhz'):
scale = 6
elif hz.endswith('ghz'):
scale = 9
if '@' in hz:
hz = hz.split('@')[1]
else:
hz = hz.rsplit(None, 1)[1]
hz = hz.rstrip('mhz').rstrip('ghz').strip()
hz = _to_decimal_string(hz)
return (hz, scale)
def _parse_cpu_brand_string_dx(cpu_string):
import re
# Find all the strings inside brackets ()
starts = [m.start() for m in re.finditer('\(', cpu_string)]
ends = [m.start() for m in re.finditer('\)', cpu_string)]
insides = {k: v for k, v in zip(starts, ends)}
insides = [cpu_string[start+1 : end] for start, end in insides.items()]
# Find all the fields
vendor_id, stepping, model, family = (None, None, None, None)
for inside in insides:
for pair in inside.split(','):
pair = [n.strip() for n in pair.split(':')]
if len(pair) > 1:
name, value = pair[0], pair[1]
if name == 'origin':
vendor_id = value.strip('"')
elif name == 'stepping':
stepping = int(value.lstrip('0x'), 16)
elif name == 'model':
model = int(value.lstrip('0x'), 16)
elif name in ['fam', 'family']:
family = int(value.lstrip('0x'), 16)
# Find the Processor Brand
# Strip off extra strings in brackets at end
brand = cpu_string.strip()
is_working = True
while is_working:
is_working = False
for inside in insides:
full = "({0})".format(inside)
if brand.endswith(full):
brand = brand[ :-len(full)].strip()
is_working = True
# Find the Hz in the brand string
hz_brand, scale = _parse_cpu_brand_string(brand)
# Find Hz inside brackets () after the brand string
if hz_brand == '0.0':
for inside in insides:
hz = inside
for entry in ['GHz', 'MHz', 'Hz']:
if entry in hz:
hz = "CPU @ " + hz[ : hz.find(entry) + len(entry)]
hz_brand, scale = _parse_cpu_brand_string(hz)
break
return (hz_brand, scale, brand, vendor_id, stepping, model, family)
def _parse_dmesg_output(output):
try:
# Get all the dmesg lines that might contain a CPU string
lines = output.split(' CPU0:')[1:] + \
output.split(' CPU1:')[1:] + \
output.split(' CPU:')[1:] + \
output.split('\nCPU0:')[1:] + \
output.split('\nCPU1:')[1:] + \
output.split('\nCPU:')[1:]
lines = [l.split('\n')[0].strip() for l in lines]
# Convert the lines to CPU strings
cpu_strings = [_parse_cpu_brand_string_dx(l) for l in lines]
# Find the CPU string that has the most fields
best_string = None
highest_count = 0
for cpu_string in cpu_strings:
count = sum([n is not None for n in cpu_string])
if count > highest_count:
highest_count = count
best_string = cpu_string
# If no CPU string was found, return {}
if not best_string:
return {}
hz_actual, scale, processor_brand, vendor_id, stepping, model, family = best_string
# Origin
if ' Origin=' in output:
fields = output[output.find(' Origin=') : ].split('\n')[0]
fields = fields.strip().split()
fields = [n.strip().split('=') for n in fields]
fields = [{n[0].strip().lower() : n[1].strip()} for n in fields]
for field in fields:
name = list(field.keys())[0]
value = list(field.values())[0]
if name == 'origin':
vendor_id = value.strip('"')
elif name == 'stepping':
stepping = int(value.lstrip('0x'), 16)
elif name == 'model':
model = int(value.lstrip('0x'), 16)
elif name in ['fam', 'family']:
family = int(value.lstrip('0x'), 16)
# Features
flag_lines = []
for category in [' Features=', ' Features2=', ' AMD Features=', ' AMD Features2=']:
if category in output:
flag_lines.append(output.split(category)[1].split('\n')[0])
flags = []
for line in flag_lines:
line = line.split('<')[1].split('>')[0].lower()
for flag in line.split(','):
flags.append(flag)
flags.sort()
# Convert from GHz/MHz string to Hz
hz_advertised, scale = _parse_cpu_brand_string(processor_brand)
# If advertised hz not found, use the actual hz
if hz_advertised == '0.0':
scale = 6
hz_advertised = _to_decimal_string(hz_actual)
info = {
'vendor_id_raw' : vendor_id,
'brand_raw' : processor_brand,
'stepping' : stepping,
'model' : model,
'family' : family,
'flags' : flags
}
if hz_advertised and hz_advertised != '0.0':
info['hz_advertised_friendly'] = _hz_short_to_friendly(hz_advertised, scale)
info['hz_actual_friendly'] = _hz_short_to_friendly(hz_actual, scale)
if hz_advertised and hz_advertised != '0.0':
info['hz_advertised'] = _hz_short_to_full(hz_advertised, scale)
info['hz_actual'] = _hz_short_to_full(hz_actual, scale)
return {k: v for k, v in info.items() if v}
except:
#raise
pass
return {}
def _parse_arch(arch_string_raw):
import re
arch, bits = None, None
arch_string_raw = arch_string_raw.lower()
# X86
if re.match('^i\d86$|^x86$|^x86_32$|^i86pc$|^ia32$|^ia-32$|^bepc$', arch_string_raw):
arch = 'X86_32'
bits = 32
elif re.match('^x64$|^x86_64$|^x86_64t$|^i686-64$|^amd64$|^ia64$|^ia-64$', arch_string_raw):
arch = 'X86_64'
bits = 64
# ARM
elif re.match('^armv8-a|aarch64$', arch_string_raw):
arch = 'ARM_8'
bits = 64
elif re.match('^armv7$|^armv7[a-z]$|^armv7-[a-z]$|^armv6[a-z]$', arch_string_raw):
arch = 'ARM_7'
bits = 32
elif re.match('^armv8$|^armv8[a-z]$|^armv8-[a-z]$', arch_string_raw):
arch = 'ARM_8'
bits = 32
# PPC
elif re.match('^ppc32$|^prep$|^pmac$|^powermac$', arch_string_raw):
arch = 'PPC_32'
bits = 32
elif re.match('^powerpc$|^ppc64$|^ppc64le$', arch_string_raw):
arch = 'PPC_64'
bits = 64
# SPARC
elif re.match('^sparc32$|^sparc$', arch_string_raw):
arch = 'SPARC_32'
bits = 32
elif re.match('^sparc64$|^sun4u$|^sun4v$', arch_string_raw):
arch = 'SPARC_64'
bits = 64
return (arch, bits)
def _is_bit_set(reg, bit):
mask = 1 << bit
is_set = reg & mask > 0
return is_set
def _is_selinux_enforcing():
# Just return if the SE Linux Status Tool is not installed
if not DataSource.has_sestatus():
return False
# Run the sestatus, and just return if it failed to run
returncode, output = DataSource.sestatus_b()
if returncode != 0:
return False
# Figure out if explicitly in enforcing mode
for line in output.splitlines():
line = line.strip().lower()
if line.startswith("current mode:"):
if line.endswith("enforcing"):
return True
else:
return False
# Figure out if we can execute heap and execute memory
can_selinux_exec_heap = False
can_selinux_exec_memory = False
for line in output.splitlines():
line = line.strip().lower()
if line.startswith("allow_execheap") and line.endswith("on"):
can_selinux_exec_heap = True
elif line.startswith("allow_execmem") and line.endswith("on"):
can_selinux_exec_memory = True
return (not can_selinux_exec_heap or not can_selinux_exec_memory)
class CPUID(object):
def __init__(self):
self.prochandle = None
# Figure out if SE Linux is on and in enforcing mode
self.is_selinux_enforcing = _is_selinux_enforcing()
def _asm_func(self, restype=None, argtypes=(), byte_code=[]):
byte_code = bytes.join(b'', byte_code)
address = None
if DataSource.is_windows:
# Allocate a memory segment the size of the byte code, and make it executable
size = len(byte_code)
# Alloc at least 1 page to ensure we own all pages that we want to change protection on
if size < 0x1000: size = 0x1000
MEM_COMMIT = ctypes.c_ulong(0x1000)
PAGE_READWRITE = ctypes.c_ulong(0x4)
pfnVirtualAlloc = ctypes.windll.kernel32.VirtualAlloc
pfnVirtualAlloc.restype = ctypes.c_void_p
address = pfnVirtualAlloc(None, ctypes.c_size_t(size), MEM_COMMIT, PAGE_READWRITE)
if not address:
raise Exception("Failed to VirtualAlloc")
# Copy the byte code into the memory segment
memmove = ctypes.CFUNCTYPE(ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_size_t)(ctypes._memmove_addr)
if memmove(address, byte_code, size) < 0:
raise Exception("Failed to memmove")
# Enable execute permissions
PAGE_EXECUTE = ctypes.c_ulong(0x10)
old_protect = ctypes.c_ulong(0)
pfnVirtualProtect = ctypes.windll.kernel32.VirtualProtect
res = pfnVirtualProtect(ctypes.c_void_p(address), ctypes.c_size_t(size), PAGE_EXECUTE, ctypes.byref(old_protect))
if not res:
raise Exception("Failed VirtualProtect")
# Flush Instruction Cache
# First, get process Handle
if not self.prochandle:
pfnGetCurrentProcess = ctypes.windll.kernel32.GetCurrentProcess
pfnGetCurrentProcess.restype = ctypes.c_void_p
self.prochandle = ctypes.c_void_p(pfnGetCurrentProcess())
# Actually flush cache
res = ctypes.windll.kernel32.FlushInstructionCache(self.prochandle, ctypes.c_void_p(address), ctypes.c_size_t(size))
if not res:
raise Exception("Failed FlushInstructionCache")
else:
# Allocate a memory segment the size of the byte code
size = len(byte_code)
pfnvalloc = ctypes.pythonapi.valloc
pfnvalloc.restype = ctypes.c_void_p
address = pfnvalloc(ctypes.c_size_t(size))
if not address:
raise Exception("Failed to valloc")
# Mark the memory segment as writeable only
if not self.is_selinux_enforcing:
WRITE = 0x2
if ctypes.pythonapi.mprotect(ctypes.c_void_p(address), size, WRITE) < 0:
raise Exception("Failed to mprotect")
# Copy the byte code into the memory segment
if ctypes.pythonapi.memmove(ctypes.c_void_p(address), byte_code, ctypes.c_size_t(size)) < 0:
raise Exception("Failed to memmove")
# Mark the memory segment as writeable and executable only
if not self.is_selinux_enforcing:
WRITE_EXECUTE = 0x2 | 0x4
if ctypes.pythonapi.mprotect(ctypes.c_void_p(address), size, WRITE_EXECUTE) < 0:
raise Exception("Failed to mprotect")
# Cast the memory segment into a function
functype = ctypes.CFUNCTYPE(restype, *argtypes)
fun = functype(address)
return fun, address
def _run_asm(self, *byte_code):
# Convert the byte code into a function that returns an int
restype = ctypes.c_uint32
argtypes = ()
func, address = self._asm_func(restype, argtypes, byte_code)
# Call the byte code like a function
retval = func()
byte_code = bytes.join(b'', byte_code)
size = ctypes.c_size_t(len(byte_code))
# Free the function memory segment
if DataSource.is_windows:
MEM_RELEASE = ctypes.c_ulong(0x8000)
ctypes.windll.kernel32.VirtualFree(ctypes.c_void_p(address), ctypes.c_size_t(0), MEM_RELEASE)
else:
# Remove the executable tag on the memory
READ_WRITE = 0x1 | 0x2
if ctypes.pythonapi.mprotect(ctypes.c_void_p(address), size, READ_WRITE) < 0:
raise Exception("Failed to mprotect")
ctypes.pythonapi.free(ctypes.c_void_p(address))
return retval
# FIXME: We should not have to use different instructions to
# set eax to 0 or 1, on 32bit and 64bit machines.
def _zero_eax(self):
return (
b"\x31\xC0" # xor eax,eax
)
def _zero_ecx(self):
return (
b"\x31\xC9" # xor ecx,ecx
)
def _one_eax(self):
return (
b"\xB8\x01\x00\x00\x00" # mov eax,0x1"
)
# http://en.wikipedia.org/wiki/CPUID#EAX.3D0:_Get_vendor_ID
def get_vendor_id(self):
# EBX
ebx = self._run_asm(
self._zero_eax(),
b"\x0F\xA2" # cpuid
b"\x89\xD8" # mov ax,bx
b"\xC3" # ret
)
# ECX
ecx = self._run_asm(
self._zero_eax(),
b"\x0f\xa2" # cpuid
b"\x89\xC8" # mov ax,cx
b"\xC3" # ret
)
# EDX
edx = self._run_asm(
self._zero_eax(),
b"\x0f\xa2" # cpuid
b"\x89\xD0" # mov ax,dx
b"\xC3" # ret
)
# Each 4bits is a ascii letter in the name
vendor_id = []
for reg in [ebx, edx, ecx]:
for n in [0, 8, 16, 24]:
vendor_id.append(chr((reg >> n) & 0xFF))
vendor_id = ''.join(vendor_id)
return vendor_id
# http://en.wikipedia.org/wiki/CPUID#EAX.3D1:_Processor_Info_and_Feature_Bits
def get_info(self):
# EAX
eax = self._run_asm(
self._one_eax(),
b"\x0f\xa2" # cpuid
b"\xC3" # ret
)
# Get the CPU info
stepping = (eax >> 0) & 0xF # 4 bits
model = (eax >> 4) & 0xF # 4 bits
family = (eax >> 8) & 0xF # 4 bits
processor_type = (eax >> 12) & 0x3 # 2 bits
extended_model = (eax >> 16) & 0xF # 4 bits
extended_family = (eax >> 20) & 0xFF # 8 bits
return {
'stepping' : stepping,
'model' : model,
'family' : family,
'processor_type' : processor_type,
'extended_model' : extended_model,
'extended_family' : extended_family
}
# http://en.wikipedia.org/wiki/CPUID#EAX.3D80000000h:_Get_Highest_Extended_Function_Supported
def get_max_extension_support(self):
# Check for extension support
max_extension_support = self._run_asm(
b"\xB8\x00\x00\x00\x80" # mov ax,0x80000000
b"\x0f\xa2" # cpuid
b"\xC3" # ret
)
return max_extension_support
# http://en.wikipedia.org/wiki/CPUID#EAX.3D1:_Processor_Info_and_Feature_Bits
def get_flags(self, max_extension_support):
# EDX
edx = self._run_asm(
self._one_eax(),
b"\x0f\xa2" # cpuid
b"\x89\xD0" # mov ax,dx
b"\xC3" # ret
)
# ECX
ecx = self._run_asm(
self._one_eax(),
b"\x0f\xa2" # cpuid
b"\x89\xC8" # mov ax,cx
b"\xC3" # ret
)
# Get the CPU flags
flags = {
'fpu' : _is_bit_set(edx, 0),
'vme' : _is_bit_set(edx, 1),
'de' : _is_bit_set(edx, 2),
'pse' : _is_bit_set(edx, 3),
'tsc' : _is_bit_set(edx, 4),
'msr' : _is_bit_set(edx, 5),
'pae' : _is_bit_set(edx, 6),
'mce' : _is_bit_set(edx, 7),
'cx8' : _is_bit_set(edx, 8),
'apic' : _is_bit_set(edx, 9),
#'reserved1' : _is_bit_set(edx, 10),
'sep' : _is_bit_set(edx, 11),
'mtrr' : _is_bit_set(edx, 12),
'pge' : _is_bit_set(edx, 13),
'mca' : _is_bit_set(edx, 14),
'cmov' : _is_bit_set(edx, 15),
'pat' : _is_bit_set(edx, 16),
'pse36' : _is_bit_set(edx, 17),
'pn' : _is_bit_set(edx, 18),
'clflush' : _is_bit_set(edx, 19),
#'reserved2' : _is_bit_set(edx, 20),
'dts' : _is_bit_set(edx, 21),
'acpi' : _is_bit_set(edx, 22),
'mmx' : _is_bit_set(edx, 23),
'fxsr' : _is_bit_set(edx, 24),
'sse' : _is_bit_set(edx, 25),
'sse2' : _is_bit_set(edx, 26),
'ss' : _is_bit_set(edx, 27),
'ht' : _is_bit_set(edx, 28),
'tm' : _is_bit_set(edx, 29),
'ia64' : _is_bit_set(edx, 30),
'pbe' : _is_bit_set(edx, 31),
'pni' : _is_bit_set(ecx, 0),
'pclmulqdq' : _is_bit_set(ecx, 1),
'dtes64' : _is_bit_set(ecx, 2),
'monitor' : _is_bit_set(ecx, 3),
'ds_cpl' : _is_bit_set(ecx, 4),
'vmx' : _is_bit_set(ecx, 5),
'smx' : _is_bit_set(ecx, 6),
'est' : _is_bit_set(ecx, 7),
'tm2' : _is_bit_set(ecx, 8),
'ssse3' : _is_bit_set(ecx, 9),
'cid' : _is_bit_set(ecx, 10),
#'reserved3' : _is_bit_set(ecx, 11),
'fma' : _is_bit_set(ecx, 12),
'cx16' : _is_bit_set(ecx, 13),
'xtpr' : _is_bit_set(ecx, 14),
'pdcm' : _is_bit_set(ecx, 15),
#'reserved4' : _is_bit_set(ecx, 16),
'pcid' : _is_bit_set(ecx, 17),
'dca' : _is_bit_set(ecx, 18),
'sse4_1' : _is_bit_set(ecx, 19),
'sse4_2' : _is_bit_set(ecx, 20),
'x2apic' : _is_bit_set(ecx, 21),
'movbe' : _is_bit_set(ecx, 22),
'popcnt' : _is_bit_set(ecx, 23),
'tscdeadline' : _is_bit_set(ecx, 24),
'aes' : _is_bit_set(ecx, 25),
'xsave' : _is_bit_set(ecx, 26),
'osxsave' : _is_bit_set(ecx, 27),
'avx' : _is_bit_set(ecx, 28),
'f16c' : _is_bit_set(ecx, 29),
'rdrnd' : _is_bit_set(ecx, 30),
'hypervisor' : _is_bit_set(ecx, 31)
}
# Get a list of only the flags that are true
flags = [k for k, v in flags.items() if v]
# http://en.wikipedia.org/wiki/CPUID#EAX.3D7.2C_ECX.3D0:_Extended_Features
if max_extension_support >= 7:
# EBX
ebx = self._run_asm(
self._zero_ecx(),
b"\xB8\x07\x00\x00\x00" # mov eax,7
b"\x0f\xa2" # cpuid
b"\x89\xD8" # mov ax,bx
b"\xC3" # ret
)
# ECX
ecx = self._run_asm(
self._zero_ecx(),
b"\xB8\x07\x00\x00\x00" # mov eax,7
b"\x0f\xa2" # cpuid
b"\x89\xC8" # mov ax,cx
b"\xC3" # ret
)
# Get the extended CPU flags
extended_flags = {
#'fsgsbase' : _is_bit_set(ebx, 0),
#'IA32_TSC_ADJUST' : _is_bit_set(ebx, 1),
'sgx' : _is_bit_set(ebx, 2),
'bmi1' : _is_bit_set(ebx, 3),
'hle' : _is_bit_set(ebx, 4),
'avx2' : _is_bit_set(ebx, 5),
#'reserved' : _is_bit_set(ebx, 6),
'smep' : _is_bit_set(ebx, 7),
'bmi2' : _is_bit_set(ebx, 8),
'erms' : _is_bit_set(ebx, 9),
'invpcid' : _is_bit_set(ebx, 10),
'rtm' : _is_bit_set(ebx, 11),
'pqm' : _is_bit_set(ebx, 12),
#'FPU CS and FPU DS deprecated' : _is_bit_set(ebx, 13),
'mpx' : _is_bit_set(ebx, 14),
'pqe' : _is_bit_set(ebx, 15),
'avx512f' : _is_bit_set(ebx, 16),
'avx512dq' : _is_bit_set(ebx, 17),
'rdseed' : _is_bit_set(ebx, 18),
'adx' : _is_bit_set(ebx, 19),
'smap' : _is_bit_set(ebx, 20),
'avx512ifma' : _is_bit_set(ebx, 21),
'pcommit' : _is_bit_set(ebx, 22),
'clflushopt' : _is_bit_set(ebx, 23),
'clwb' : _is_bit_set(ebx, 24),
'intel_pt' : _is_bit_set(ebx, 25),
'avx512pf' : _is_bit_set(ebx, 26),
'avx512er' : _is_bit_set(ebx, 27),
'avx512cd' : _is_bit_set(ebx, 28),
'sha' : _is_bit_set(ebx, 29),
'avx512bw' : _is_bit_set(ebx, 30),
'avx512vl' : _is_bit_set(ebx, 31),
'prefetchwt1' : _is_bit_set(ecx, 0),
'avx512vbmi' : _is_bit_set(ecx, 1),
'umip' : _is_bit_set(ecx, 2),
'pku' : _is_bit_set(ecx, 3),
'ospke' : _is_bit_set(ecx, 4),
#'reserved' : _is_bit_set(ecx, 5),
'avx512vbmi2' : _is_bit_set(ecx, 6),
#'reserved' : _is_bit_set(ecx, 7),
'gfni' : _is_bit_set(ecx, 8),
'vaes' : _is_bit_set(ecx, 9),
'vpclmulqdq' : _is_bit_set(ecx, 10),
'avx512vnni' : _is_bit_set(ecx, 11),
'avx512bitalg' : _is_bit_set(ecx, 12),
#'reserved' : _is_bit_set(ecx, 13),
'avx512vpopcntdq' : _is_bit_set(ecx, 14),
#'reserved' : _is_bit_set(ecx, 15),
#'reserved' : _is_bit_set(ecx, 16),
#'mpx0' : _is_bit_set(ecx, 17),
#'mpx1' : _is_bit_set(ecx, 18),
#'mpx2' : _is_bit_set(ecx, 19),
#'mpx3' : _is_bit_set(ecx, 20),
#'mpx4' : _is_bit_set(ecx, 21),
'rdpid' : _is_bit_set(ecx, 22),
#'reserved' : _is_bit_set(ecx, 23),
#'reserved' : _is_bit_set(ecx, 24),
#'reserved' : _is_bit_set(ecx, 25),
#'reserved' : _is_bit_set(ecx, 26),
#'reserved' : _is_bit_set(ecx, 27),
#'reserved' : _is_bit_set(ecx, 28),
#'reserved' : _is_bit_set(ecx, 29),
'sgx_lc' : _is_bit_set(ecx, 30),
#'reserved' : _is_bit_set(ecx, 31)
}
# Get a list of only the flags that are true
extended_flags = [k for k, v in extended_flags.items() if v]
flags += extended_flags
# http://en.wikipedia.org/wiki/CPUID#EAX.3D80000001h:_Extended_Processor_Info_and_Feature_Bits
if max_extension_support >= 0x80000001:
# EBX
ebx = self._run_asm(
b"\xB8\x01\x00\x00\x80" # mov ax,0x80000001
b"\x0f\xa2" # cpuid
b"\x89\xD8" # mov ax,bx
b"\xC3" # ret
)
# ECX
ecx = self._run_asm(
b"\xB8\x01\x00\x00\x80" # mov ax,0x80000001
b"\x0f\xa2" # cpuid
b"\x89\xC8" # mov ax,cx
b"\xC3" # ret
)
# Get the extended CPU flags
extended_flags = {
'fpu' : _is_bit_set(ebx, 0),
'vme' : _is_bit_set(ebx, 1),
'de' : _is_bit_set(ebx, 2),
'pse' : _is_bit_set(ebx, 3),
'tsc' : _is_bit_set(ebx, 4),
'msr' : _is_bit_set(ebx, 5),
'pae' : _is_bit_set(ebx, 6),
'mce' : _is_bit_set(ebx, 7),
'cx8' : _is_bit_set(ebx, 8),
'apic' : _is_bit_set(ebx, 9),
#'reserved' : _is_bit_set(ebx, 10),
'syscall' : _is_bit_set(ebx, 11),
'mtrr' : _is_bit_set(ebx, 12),
'pge' : _is_bit_set(ebx, 13),
'mca' : _is_bit_set(ebx, 14),
'cmov' : _is_bit_set(ebx, 15),
'pat' : _is_bit_set(ebx, 16),
'pse36' : _is_bit_set(ebx, 17),
#'reserved' : _is_bit_set(ebx, 18),
'mp' : _is_bit_set(ebx, 19),
'nx' : _is_bit_set(ebx, 20),
#'reserved' : _is_bit_set(ebx, 21),
'mmxext' : _is_bit_set(ebx, 22),
'mmx' : _is_bit_set(ebx, 23),
'fxsr' : _is_bit_set(ebx, 24),
'fxsr_opt' : _is_bit_set(ebx, 25),
'pdpe1gp' : _is_bit_set(ebx, 26),
'rdtscp' : _is_bit_set(ebx, 27),
#'reserved' : _is_bit_set(ebx, 28),
'lm' : _is_bit_set(ebx, 29),
'3dnowext' : _is_bit_set(ebx, 30),
'3dnow' : _is_bit_set(ebx, 31),
'lahf_lm' : _is_bit_set(ecx, 0),
'cmp_legacy' : _is_bit_set(ecx, 1),
'svm' : _is_bit_set(ecx, 2),
'extapic' : _is_bit_set(ecx, 3),
'cr8_legacy' : _is_bit_set(ecx, 4),
'abm' : _is_bit_set(ecx, 5),
'sse4a' : _is_bit_set(ecx, 6),
'misalignsse' : _is_bit_set(ecx, 7),
'3dnowprefetch' : _is_bit_set(ecx, 8),
'osvw' : _is_bit_set(ecx, 9),
'ibs' : _is_bit_set(ecx, 10),
'xop' : _is_bit_set(ecx, 11),
'skinit' : _is_bit_set(ecx, 12),
'wdt' : _is_bit_set(ecx, 13),
#'reserved' : _is_bit_set(ecx, 14),
'lwp' : _is_bit_set(ecx, 15),
'fma4' : _is_bit_set(ecx, 16),
'tce' : _is_bit_set(ecx, 17),
#'reserved' : _is_bit_set(ecx, 18),
'nodeid_msr' : _is_bit_set(ecx, 19),
#'reserved' : _is_bit_set(ecx, 20),
'tbm' : _is_bit_set(ecx, 21),
'topoext' : _is_bit_set(ecx, 22),
'perfctr_core' : _is_bit_set(ecx, 23),
'perfctr_nb' : _is_bit_set(ecx, 24),
#'reserved' : _is_bit_set(ecx, 25),
'dbx' : _is_bit_set(ecx, 26),
'perftsc' : _is_bit_set(ecx, 27),
'pci_l2i' : _is_bit_set(ecx, 28),
#'reserved' : _is_bit_set(ecx, 29),
#'reserved' : _is_bit_set(ecx, 30),
#'reserved' : _is_bit_set(ecx, 31)
}
# Get a list of only the flags that are true
extended_flags = [k for k, v in extended_flags.items() if v]
flags += extended_flags
flags.sort()
return flags
# http://en.wikipedia.org/wiki/CPUID#EAX.3D80000002h.2C80000003h.2C80000004h:_Processor_Brand_String
def get_processor_brand(self, max_extension_support):
processor_brand = ""
# Processor brand string
if max_extension_support >= 0x80000004:
instructions = [
b"\xB8\x02\x00\x00\x80", # mov ax,0x80000002
b"\xB8\x03\x00\x00\x80", # mov ax,0x80000003
b"\xB8\x04\x00\x00\x80" # mov ax,0x80000004
]
for instruction in instructions:
# EAX
eax = self._run_asm(
instruction, # mov ax,0x8000000?
b"\x0f\xa2" # cpuid
b"\x89\xC0" # mov ax,ax
b"\xC3" # ret
)
# EBX
ebx = self._run_asm(
instruction, # mov ax,0x8000000?
b"\x0f\xa2" # cpuid
b"\x89\xD8" # mov ax,bx
b"\xC3" # ret
)
# ECX
ecx = self._run_asm(
instruction, # mov ax,0x8000000?
b"\x0f\xa2" # cpuid
b"\x89\xC8" # mov ax,cx
b"\xC3" # ret
)
# EDX
edx = self._run_asm(
instruction, # mov ax,0x8000000?
b"\x0f\xa2" # cpuid
b"\x89\xD0" # mov ax,dx
b"\xC3" # ret
)
# Combine each of the 4 bytes in each register into the string
for reg in [eax, ebx, ecx, edx]:
for n in [0, 8, 16, 24]:
processor_brand += chr((reg >> n) & 0xFF)
# Strip off any trailing NULL terminators and white space
processor_brand = processor_brand.strip("\0").strip()
return processor_brand
# http://en.wikipedia.org/wiki/CPUID#EAX.3D80000006h:_Extended_L2_Cache_Features
def get_cache(self, max_extension_support):
cache_info = {}
# Just return if the cache feature is not supported
if max_extension_support < 0x80000006:
return cache_info
# ECX
ecx = self._run_asm(
b"\xB8\x06\x00\x00\x80" # mov ax,0x80000006
b"\x0f\xa2" # cpuid
b"\x89\xC8" # mov ax,cx
b"\xC3" # ret
)
cache_info = {
'size_kb' : ecx & 0xFF,
'line_size_b' : (ecx >> 12) & 0xF,
'associativity' : (ecx >> 16) & 0xFFFF
}
return cache_info
def get_ticks(self):
retval = None
if DataSource.bits == '32bit':
# Works on x86_32
restype = None
argtypes = (ctypes.POINTER(ctypes.c_uint), ctypes.POINTER(ctypes.c_uint))
get_ticks_x86_32, address = self._asm_func(restype, argtypes,
[
b"\x55", # push bp
b"\x89\xE5", # mov bp,sp
b"\x31\xC0", # xor ax,ax
b"\x0F\xA2", # cpuid
b"\x0F\x31", # rdtsc
b"\x8B\x5D\x08", # mov bx,[di+0x8]
b"\x8B\x4D\x0C", # mov cx,[di+0xc]
b"\x89\x13", # mov [bp+di],dx
b"\x89\x01", # mov [bx+di],ax
b"\x5D", # pop bp
b"\xC3" # ret
]
)
high = ctypes.c_uint32(0)
low = ctypes.c_uint32(0)
get_ticks_x86_32(ctypes.byref(high), ctypes.byref(low))
retval = ((high.value << 32) & 0xFFFFFFFF00000000) | low.value
elif DataSource.bits == '64bit':
# Works on x86_64
restype = ctypes.c_uint64
argtypes = ()
get_ticks_x86_64, address = self._asm_func(restype, argtypes,
[
b"\x48", # dec ax
b"\x31\xC0", # xor ax,ax
b"\x0F\xA2", # cpuid
b"\x0F\x31", # rdtsc
b"\x48", # dec ax
b"\xC1\xE2\x20", # shl dx,byte 0x20
b"\x48", # dec ax
b"\x09\xD0", # or ax,dx
b"\xC3", # ret
]
)
retval = get_ticks_x86_64()
return retval
def get_raw_hz(self):
import time
start = self.get_ticks()
time.sleep(1)
end = self.get_ticks()
ticks = (end - start)
return ticks
def _actual_get_cpu_info_from_cpuid(queue):
'''
Warning! This function has the potential to crash the Python runtime.
Do not call it directly. Use the _get_cpu_info_from_cpuid function instead.
It will safely call this function in another process.
'''
# Pipe all output to nothing
sys.stdout = open(os.devnull, 'w')
sys.stderr = open(os.devnull, 'w')
# Get the CPU arch and bits
arch, bits = _parse_arch(DataSource.arch_string_raw)
# Return none if this is not an X86 CPU
if not arch in ['X86_32', 'X86_64']:
queue.put(_obj_to_b64({}))
return
# Return none if SE Linux is in enforcing mode
cpuid = CPUID()
if cpuid.is_selinux_enforcing:
queue.put(_obj_to_b64({}))
return
# Get the cpu info from the CPUID register
max_extension_support = cpuid.get_max_extension_support()
cache_info = cpuid.get_cache(max_extension_support)
info = cpuid.get_info()
processor_brand = cpuid.get_processor_brand(max_extension_support)
# Get the Hz and scale
hz_actual = cpuid.get_raw_hz()
hz_actual = _to_decimal_string(hz_actual)
# Get the Hz and scale
hz_advertised, scale = _parse_cpu_brand_string(processor_brand)
info = {
'vendor_id_raw' : cpuid.get_vendor_id(),
'hardware_raw' : '',
'brand_raw' : processor_brand,
'hz_advertised_friendly' : _hz_short_to_friendly(hz_advertised, scale),
'hz_actual_friendly' : _hz_short_to_friendly(hz_actual, 0),
'hz_advertised' : _hz_short_to_full(hz_advertised, scale),
'hz_actual' : _hz_short_to_full(hz_actual, 0),
'l2_cache_size' : _to_friendly_bytes(cache_info['size_kb']),
'l2_cache_line_size' : cache_info['line_size_b'],
'l2_cache_associativity' : hex(cache_info['associativity']),
'stepping' : info['stepping'],
'model' : info['model'],
'family' : info['family'],
'processor_type' : info['processor_type'],
'extended_model' : info['extended_model'],
'extended_family' : info['extended_family'],
'flags' : cpuid.get_flags(max_extension_support)
}
info = {k: v for k, v in info.items() if v}
queue.put(_obj_to_b64(info))
def _get_cpu_info_from_cpuid():
'''
Returns the CPU info gathered by querying the X86 cpuid register in a new process.
Returns {} on non X86 cpus.
Returns {} if SELinux is in enforcing mode.
'''
from multiprocessing import Process, Queue
# Return {} if can't cpuid
if not DataSource.can_cpuid:
return {}
# Get the CPU arch and bits
arch, bits = _parse_arch(DataSource.arch_string_raw)
# Return {} if this is not an X86 CPU
if not arch in ['X86_32', 'X86_64']:
return {}
try:
# Start running the function in a subprocess
queue = Queue()
p = Process(target=_actual_get_cpu_info_from_cpuid, args=(queue,))
p.start()
# Wait for the process to end, while it is still alive
while p.is_alive():
p.join(0)
# Return {} if it failed
if p.exitcode != 0:
return {}
# Return the result, only if there is something to read
if not queue.empty():
output = queue.get()
return _b64_to_obj(output)
except:
pass
# Return {} if everything failed
return {}
def _get_cpu_info_from_proc_cpuinfo():
'''
Returns the CPU info gathered from /proc/cpuinfo.
Returns {} if /proc/cpuinfo is not found.
'''
try:
# Just return {} if there is no cpuinfo
if not DataSource.has_proc_cpuinfo():
return {}
returncode, output = DataSource.cat_proc_cpuinfo()
if returncode != 0:
return {}
# Various fields
vendor_id = _get_field(False, output, None, '', 'vendor_id', 'vendor id', 'vendor')
processor_brand = _get_field(True, output, None, None, 'model name','cpu', 'processor')
cache_size = _get_field(False, output, None, '', 'cache size')
stepping = _get_field(False, output, int, 0, 'stepping')
model = _get_field(False, output, int, 0, 'model')
family = _get_field(False, output, int, 0, 'cpu family')
hardware = _get_field(False, output, None, '', 'Hardware')
# Flags
flags = _get_field(False, output, None, None, 'flags', 'Features')
if flags:
flags = flags.split()
flags.sort()
# Convert from MHz string to Hz
hz_actual = _get_field(False, output, None, '', 'cpu MHz', 'cpu speed', 'clock')
hz_actual = hz_actual.lower().rstrip('mhz').strip()
hz_actual = _to_decimal_string(hz_actual)
# Convert from GHz/MHz string to Hz
hz_advertised, scale = (None, 0)
try:
hz_advertised, scale = _parse_cpu_brand_string(processor_brand)
except Exception:
pass
info = {
'hardware_raw' : hardware,
'brand_raw' : processor_brand,
'l3_cache_size' : _to_friendly_bytes(cache_size),
'flags' : flags,
'vendor_id_raw' : vendor_id,
'stepping' : stepping,
'model' : model,
'family' : family,
}
# Make the Hz the same for actual and advertised if missing any
if not hz_advertised or hz_advertised == '0.0':
hz_advertised = hz_actual
scale = 6
elif not hz_actual or hz_actual == '0.0':
hz_actual = hz_advertised
# Add the Hz if there is one
if _hz_short_to_full(hz_advertised, scale) > (0, 0):
info['hz_advertised_friendly'] = _hz_short_to_friendly(hz_advertised, scale)
info['hz_advertised'] = _hz_short_to_full(hz_advertised, scale)
if _hz_short_to_full(hz_actual, scale) > (0, 0):
info['hz_actual_friendly'] = _hz_short_to_friendly(hz_actual, 6)
info['hz_actual'] = _hz_short_to_full(hz_actual, 6)
info = {k: v for k, v in info.items() if v}
return info
except:
#raise # NOTE: To have this throw on error, uncomment this line
return {}
def _get_cpu_info_from_cpufreq_info():
'''
Returns the CPU info gathered from cpufreq-info.
Returns {} if cpufreq-info is not found.
'''
try:
hz_brand, scale = '0.0', 0
if not DataSource.has_cpufreq_info():
return {}
returncode, output = DataSource.cpufreq_info()
if returncode != 0:
return {}
hz_brand = output.split('current CPU frequency is')[1].split('\n')[0]
i = hz_brand.find('Hz')
assert(i != -1)
hz_brand = hz_brand[0 : i+2].strip().lower()
if hz_brand.endswith('mhz'):
scale = 6
elif hz_brand.endswith('ghz'):
scale = 9
hz_brand = hz_brand.rstrip('mhz').rstrip('ghz').strip()
hz_brand = _to_decimal_string(hz_brand)
info = {
'hz_advertised_friendly' : _hz_short_to_friendly(hz_brand, scale),
'hz_actual_friendly' : _hz_short_to_friendly(hz_brand, scale),
'hz_advertised' : _hz_short_to_full(hz_brand, scale),
'hz_actual' : _hz_short_to_full(hz_brand, scale),
}
info = {k: v for k, v in info.items() if v}
return info
except:
#raise # NOTE: To have this throw on error, uncomment this line
return {}
def _get_cpu_info_from_lscpu():
'''
Returns the CPU info gathered from lscpu.
Returns {} if lscpu is not found.
'''
try:
if not DataSource.has_lscpu():
return {}
returncode, output = DataSource.lscpu()
if returncode != 0:
return {}
info = {}
new_hz = _get_field(False, output, None, None, 'CPU max MHz', 'CPU MHz')
if new_hz:
new_hz = _to_decimal_string(new_hz)
scale = 6
info['hz_advertised_friendly'] = _hz_short_to_friendly(new_hz, scale)
info['hz_actual_friendly'] = _hz_short_to_friendly(new_hz, scale)
info['hz_advertised'] = _hz_short_to_full(new_hz, scale)
info['hz_actual'] = _hz_short_to_full(new_hz, scale)
vendor_id = _get_field(False, output, None, None, 'Vendor ID')
if vendor_id:
info['vendor_id_raw'] = vendor_id
brand = _get_field(False, output, None, None, 'Model name')
if brand:
info['brand_raw'] = brand
family = _get_field(False, output, None, None, 'CPU family')
if family and family.isdigit():
info['family'] = int(family)
stepping = _get_field(False, output, None, None, 'Stepping')
if stepping and stepping.isdigit():
info['stepping'] = int(stepping)
model = _get_field(False, output, None, None, 'Model')
if model and model.isdigit():
info['model'] = int(model)
l1_data_cache_size = _get_field(False, output, None, None, 'L1d cache')
if l1_data_cache_size:
info['l1_data_cache_size'] = _to_friendly_bytes(l1_data_cache_size)
l1_instruction_cache_size = _get_field(False, output, None, None, 'L1i cache')
if l1_instruction_cache_size:
info['l1_instruction_cache_size'] = _to_friendly_bytes(l1_instruction_cache_size)
l2_cache_size = _get_field(False, output, None, None, 'L2 cache')
if l2_cache_size:
info['l2_cache_size'] = _to_friendly_bytes(l2_cache_size)
l3_cache_size = _get_field(False, output, None, None, 'L3 cache')
if l3_cache_size:
info['l3_cache_size'] = _to_friendly_bytes(l3_cache_size)
# Flags
flags = _get_field(False, output, None, None, 'flags', 'Features')
if flags:
flags = flags.split()
flags.sort()
info['flags'] = flags
info = {k: v for k, v in info.items() if v}
return info
except:
#raise # NOTE: To have this throw on error, uncomment this line
return {}
def _get_cpu_info_from_dmesg():
'''
Returns the CPU info gathered from dmesg.
Returns {} if dmesg is not found or does not have the desired info.
'''
# Just return {} if there is no dmesg
if not DataSource.has_dmesg():
return {}
# If dmesg fails return {}
returncode, output = DataSource.dmesg_a()
if output == None or returncode != 0:
return {}
return _parse_dmesg_output(output)
# https://openpowerfoundation.org/wp-content/uploads/2016/05/LoPAPR_DRAFT_v11_24March2016_cmt1.pdf
# page 767
def _get_cpu_info_from_ibm_pa_features():
'''
Returns the CPU info gathered from lsprop /proc/device-tree/cpus/*/ibm,pa-features
Returns {} if lsprop is not found or ibm,pa-features does not have the desired info.
'''
try:
# Just return {} if there is no lsprop
if not DataSource.has_ibm_pa_features():
return {}
# If ibm,pa-features fails return {}
returncode, output = DataSource.ibm_pa_features()
if output == None or returncode != 0:
return {}
# Filter out invalid characters from output
value = output.split("ibm,pa-features")[1].lower()
value = [s for s in value if s in list('0123456789abcfed')]
value = ''.join(value)
# Get data converted to Uint32 chunks
left = int(value[0 : 8], 16)
right = int(value[8 : 16], 16)
# Get the CPU flags
flags = {
# Byte 0
'mmu' : _is_bit_set(left, 0),
'fpu' : _is_bit_set(left, 1),
'slb' : _is_bit_set(left, 2),
'run' : _is_bit_set(left, 3),
#'reserved' : _is_bit_set(left, 4),
'dabr' : _is_bit_set(left, 5),
'ne' : _is_bit_set(left, 6),
'wtr' : _is_bit_set(left, 7),
# Byte 1
'mcr' : _is_bit_set(left, 8),
'dsisr' : _is_bit_set(left, 9),
'lp' : _is_bit_set(left, 10),
'ri' : _is_bit_set(left, 11),
'dabrx' : _is_bit_set(left, 12),
'sprg3' : _is_bit_set(left, 13),
'rislb' : _is_bit_set(left, 14),
'pp' : _is_bit_set(left, 15),
# Byte 2
'vpm' : _is_bit_set(left, 16),
'dss_2.05' : _is_bit_set(left, 17),
#'reserved' : _is_bit_set(left, 18),
'dar' : _is_bit_set(left, 19),
#'reserved' : _is_bit_set(left, 20),
'ppr' : _is_bit_set(left, 21),
'dss_2.02' : _is_bit_set(left, 22),
'dss_2.06' : _is_bit_set(left, 23),
# Byte 3
'lsd_in_dscr' : _is_bit_set(left, 24),
'ugr_in_dscr' : _is_bit_set(left, 25),
#'reserved' : _is_bit_set(left, 26),
#'reserved' : _is_bit_set(left, 27),
#'reserved' : _is_bit_set(left, 28),
#'reserved' : _is_bit_set(left, 29),
#'reserved' : _is_bit_set(left, 30),
#'reserved' : _is_bit_set(left, 31),
# Byte 4
'sso_2.06' : _is_bit_set(right, 0),
#'reserved' : _is_bit_set(right, 1),
#'reserved' : _is_bit_set(right, 2),
#'reserved' : _is_bit_set(right, 3),
#'reserved' : _is_bit_set(right, 4),
#'reserved' : _is_bit_set(right, 5),
#'reserved' : _is_bit_set(right, 6),
#'reserved' : _is_bit_set(right, 7),
# Byte 5
'le' : _is_bit_set(right, 8),
'cfar' : _is_bit_set(right, 9),
'eb' : _is_bit_set(right, 10),
'lsq_2.07' : _is_bit_set(right, 11),
#'reserved' : _is_bit_set(right, 12),
#'reserved' : _is_bit_set(right, 13),
#'reserved' : _is_bit_set(right, 14),
#'reserved' : _is_bit_set(right, 15),
# Byte 6
'dss_2.07' : _is_bit_set(right, 16),
#'reserved' : _is_bit_set(right, 17),
#'reserved' : _is_bit_set(right, 18),
#'reserved' : _is_bit_set(right, 19),
#'reserved' : _is_bit_set(right, 20),
#'reserved' : _is_bit_set(right, 21),
#'reserved' : _is_bit_set(right, 22),
#'reserved' : _is_bit_set(right, 23),
# Byte 7
#'reserved' : _is_bit_set(right, 24),
#'reserved' : _is_bit_set(right, 25),
#'reserved' : _is_bit_set(right, 26),
#'reserved' : _is_bit_set(right, 27),
#'reserved' : _is_bit_set(right, 28),
#'reserved' : _is_bit_set(right, 29),
#'reserved' : _is_bit_set(right, 30),
#'reserved' : _is_bit_set(right, 31),
}
# Get a list of only the flags that are true
flags = [k for k, v in flags.items() if v]
flags.sort()
info = {
'flags' : flags
}
info = {k: v for k, v in info.items() if v}
return info
except:
return {}
def _get_cpu_info_from_cat_var_run_dmesg_boot():
'''
Returns the CPU info gathered from /var/run/dmesg.boot.
Returns {} if dmesg is not found or does not have the desired info.
'''
# Just return {} if there is no /var/run/dmesg.boot
if not DataSource.has_var_run_dmesg_boot():
return {}
# If dmesg.boot fails return {}
returncode, output = DataSource.cat_var_run_dmesg_boot()
if output == None or returncode != 0:
return {}
return _parse_dmesg_output(output)
def _get_cpu_info_from_sysctl():
'''
Returns the CPU info gathered from sysctl.
Returns {} if sysctl is not found.
'''
try:
# Just return {} if there is no sysctl
if not DataSource.has_sysctl():
return {}
# If sysctl fails return {}
returncode, output = DataSource.sysctl_machdep_cpu_hw_cpufrequency()
if output == None or returncode != 0:
return {}
# Various fields
vendor_id = _get_field(False, output, None, None, 'machdep.cpu.vendor')
processor_brand = _get_field(True, output, None, None, 'machdep.cpu.brand_string')
cache_size = _get_field(False, output, None, None, 'machdep.cpu.cache.size')
stepping = _get_field(False, output, int, 0, 'machdep.cpu.stepping')
model = _get_field(False, output, int, 0, 'machdep.cpu.model')
family = _get_field(False, output, int, 0, 'machdep.cpu.family')
# Flags
flags = _get_field(False, output, None, '', 'machdep.cpu.features').lower().split()
flags.extend(_get_field(False, output, None, '', 'machdep.cpu.leaf7_features').lower().split())
flags.extend(_get_field(False, output, None, '', 'machdep.cpu.extfeatures').lower().split())
flags.sort()
# Convert from GHz/MHz string to Hz
hz_advertised, scale = _parse_cpu_brand_string(processor_brand)
hz_actual = _get_field(False, output, None, None, 'hw.cpufrequency')
hz_actual = _to_decimal_string(hz_actual)
info = {
'vendor_id_raw' : vendor_id,
'brand_raw' : processor_brand,
'hz_advertised_friendly' : _hz_short_to_friendly(hz_advertised, scale),
'hz_actual_friendly' : _hz_short_to_friendly(hz_actual, 0),
'hz_advertised' : _hz_short_to_full(hz_advertised, scale),
'hz_actual' : _hz_short_to_full(hz_actual, 0),
'l2_cache_size' : _to_friendly_bytes(cache_size),
'stepping' : stepping,
'model' : model,
'family' : family,
'flags' : flags
}
info = {k: v for k, v in info.items() if v}
return info
except:
return {}
def _get_cpu_info_from_sysinfo():
'''
Returns the CPU info gathered from sysinfo.
Returns {} if sysinfo is not found.
'''
info = _get_cpu_info_from_sysinfo_v1()
info.update(_get_cpu_info_from_sysinfo_v2())
return info
def _get_cpu_info_from_sysinfo_v1():
'''
Returns the CPU info gathered from sysinfo.
Returns {} if sysinfo is not found.
'''
try:
# Just return {} if there is no sysinfo
if not DataSource.has_sysinfo():
return {}
# If sysinfo fails return {}
returncode, output = DataSource.sysinfo_cpu()
if output == None or returncode != 0:
return {}
# Various fields
vendor_id = '' #_get_field(False, output, None, None, 'CPU #0: ')
processor_brand = output.split('CPU #0: "')[1].split('"\n')[0].strip()
cache_size = '' #_get_field(False, output, None, None, 'machdep.cpu.cache.size')
stepping = int(output.split(', stepping ')[1].split(',')[0].strip())
model = int(output.split(', model ')[1].split(',')[0].strip())
family = int(output.split(', family ')[1].split(',')[0].strip())
# Flags
flags = []
for line in output.split('\n'):
if line.startswith('\t\t'):
for flag in line.strip().lower().split():
flags.append(flag)
flags.sort()
# Convert from GHz/MHz string to Hz
hz_advertised, scale = _parse_cpu_brand_string(processor_brand)
hz_actual = hz_advertised
info = {
'vendor_id_raw' : vendor_id,
'brand_raw' : processor_brand,
'hz_advertised_friendly' : _hz_short_to_friendly(hz_advertised, scale),
'hz_actual_friendly' : _hz_short_to_friendly(hz_actual, scale),
'hz_advertised' : _hz_short_to_full(hz_advertised, scale),
'hz_actual' : _hz_short_to_full(hz_actual, scale),
'l2_cache_size' : _to_friendly_bytes(cache_size),
'stepping' : stepping,
'model' : model,
'family' : family,
'flags' : flags
}
info = {k: v for k, v in info.items() if v}
return info
except:
#raise # NOTE: To have this throw on error, uncomment this line
return {}
def _get_cpu_info_from_sysinfo_v2():
'''
Returns the CPU info gathered from sysinfo.
Returns {} if sysinfo is not found.
'''
try:
# Just return {} if there is no sysinfo
if not DataSource.has_sysinfo():
return {}
# If sysinfo fails return {}
returncode, output = DataSource.sysinfo_cpu()
if output == None or returncode != 0:
return {}
# Various fields
vendor_id = '' #_get_field(False, output, None, None, 'CPU #0: ')
processor_brand = output.split('CPU #0: "')[1].split('"\n')[0].strip()
cache_size = '' #_get_field(False, output, None, None, 'machdep.cpu.cache.size')
signature = output.split('Signature:')[1].split('\n')[0].strip()
#
stepping = int(signature.split('stepping ')[1].split(',')[0].strip())
model = int(signature.split('model ')[1].split(',')[0].strip())
family = int(signature.split('family ')[1].split(',')[0].strip())
# Flags
def get_subsection_flags(output):
retval = []
for line in output.split('\n')[1:]:
if not line.startswith(' ') and not line.startswith(' '): break
for entry in line.strip().lower().split(' '):
retval.append(entry)
return retval
flags = get_subsection_flags(output.split('Features: ')[1]) + \
get_subsection_flags(output.split('Extended Features (0x00000001): ')[1]) + \
get_subsection_flags(output.split('Extended Features (0x80000001): ')[1])
flags.sort()
# Convert from GHz/MHz string to Hz
lines = [n for n in output.split('\n') if n]
raw_hz = lines[0].split('running at ')[1].strip().lower()
hz_advertised = raw_hz.rstrip('mhz').rstrip('ghz').strip()
hz_advertised = _to_decimal_string(hz_advertised)
hz_actual = hz_advertised
scale = 0
if raw_hz.endswith('mhz'):
scale = 6
elif raw_hz.endswith('ghz'):
scale = 9
info = {
'vendor_id_raw' : vendor_id,
'brand_raw' : processor_brand,
'hz_advertised_friendly' : _hz_short_to_friendly(hz_advertised, scale),
'hz_actual_friendly' : _hz_short_to_friendly(hz_actual, scale),
'hz_advertised' : _hz_short_to_full(hz_advertised, scale),
'hz_actual' : _hz_short_to_full(hz_actual, scale),
'l2_cache_size' : _to_friendly_bytes(cache_size),
'stepping' : stepping,
'model' : model,
'family' : family,
'flags' : flags
}
info = {k: v for k, v in info.items() if v}
return info
except:
#raise # NOTE: To have this throw on error, uncomment this line
return {}
def _get_cpu_info_from_wmic():
'''
Returns the CPU info gathered from WMI.
Returns {} if not on Windows, or wmic is not installed.
'''
try:
# Just return {} if not Windows or there is no wmic
if not DataSource.is_windows or not DataSource.has_wmic():
return {}
returncode, output = DataSource.wmic_cpu()
if output == None or returncode != 0:
return {}
# Break the list into key values pairs
value = output.split("\n")
value = [s.rstrip().split('=') for s in value if '=' in s]
value = {k: v for k, v in value if v}
# Get the advertised MHz
processor_brand = value.get('Name')
hz_advertised, scale_advertised = _parse_cpu_brand_string(processor_brand)
# Get the actual MHz
hz_actual = value.get('CurrentClockSpeed')
scale_actual = 6
if hz_actual:
hz_actual = _to_decimal_string(hz_actual)
# Get cache sizes
l2_cache_size = value.get('L2CacheSize')
if l2_cache_size:
l2_cache_size = l2_cache_size + ' KB'
l3_cache_size = value.get('L3CacheSize')
if l3_cache_size:
l3_cache_size = l3_cache_size + ' KB'
# Get family, model, and stepping
family, model, stepping = '', '', ''
description = value.get('Description') or value.get('Caption')
entries = description.split(' ')
if 'Family' in entries and entries.index('Family') < len(entries)-1:
i = entries.index('Family')
family = int(entries[i + 1])
if 'Model' in entries and entries.index('Model') < len(entries)-1:
i = entries.index('Model')
model = int(entries[i + 1])
if 'Stepping' in entries and entries.index('Stepping') < len(entries)-1:
i = entries.index('Stepping')
stepping = int(entries[i + 1])
info = {
'vendor_id_raw' : value.get('Manufacturer'),
'brand_raw' : processor_brand,
'hz_advertised_friendly' : _hz_short_to_friendly(hz_advertised, scale_advertised),
'hz_actual_friendly' : _hz_short_to_friendly(hz_actual, scale_actual),
'hz_advertised' : _hz_short_to_full(hz_advertised, scale_advertised),
'hz_actual' : _hz_short_to_full(hz_actual, scale_actual),
'l2_cache_size' : l2_cache_size,
'l3_cache_size' : l3_cache_size,
'stepping' : stepping,
'model' : model,
'family' : family,
}
info = {k: v for k, v in info.items() if v}
return info
except:
#raise # NOTE: To have this throw on error, uncomment this line
return {}
def _get_cpu_info_from_registry():
'''
FIXME: Is missing many of the newer CPU flags like sse3
Returns the CPU info gathered from the Windows Registry.
Returns {} if not on Windows.
'''
try:
# Just return {} if not on Windows
if not DataSource.is_windows:
return {}
# Get the CPU name
processor_brand = DataSource.winreg_processor_brand().strip()
# Get the CPU vendor id
vendor_id = DataSource.winreg_vendor_id_raw()
# Get the CPU arch and bits
arch_string_raw = DataSource.winreg_arch_string_raw()
arch, bits = _parse_arch(arch_string_raw)
# Get the actual CPU Hz
hz_actual = DataSource.winreg_hz_actual()
hz_actual = _to_decimal_string(hz_actual)
# Get the advertised CPU Hz
hz_advertised, scale = _parse_cpu_brand_string(processor_brand)
# If advertised hz not found, use the actual hz
if hz_advertised == '0.0':
scale = 6
hz_advertised = _to_decimal_string(hz_actual)
# Get the CPU features
feature_bits = DataSource.winreg_feature_bits()
def is_set(bit):
mask = 0x80000000 >> bit
retval = mask & feature_bits > 0
return retval
# http://en.wikipedia.org/wiki/CPUID
# http://unix.stackexchange.com/questions/43539/what-do-the-flags-in-proc-cpuinfo-mean
# http://www.lohninger.com/helpcsuite/public_constants_cpuid.htm
flags = {
'fpu' : is_set(0), # Floating Point Unit
'vme' : is_set(1), # V86 Mode Extensions
'de' : is_set(2), # Debug Extensions - I/O breakpoints supported
'pse' : is_set(3), # Page Size Extensions (4 MB pages supported)
'tsc' : is_set(4), # Time Stamp Counter and RDTSC instruction are available
'msr' : is_set(5), # Model Specific Registers
'pae' : is_set(6), # Physical Address Extensions (36 bit address, 2MB pages)
'mce' : is_set(7), # Machine Check Exception supported
'cx8' : is_set(8), # Compare Exchange Eight Byte instruction available
'apic' : is_set(9), # Local APIC present (multiprocessor operation support)
'sepamd' : is_set(10), # Fast system calls (AMD only)
'sep' : is_set(11), # Fast system calls
'mtrr' : is_set(12), # Memory Type Range Registers
'pge' : is_set(13), # Page Global Enable
'mca' : is_set(14), # Machine Check Architecture
'cmov' : is_set(15), # Conditional MOVe instructions
'pat' : is_set(16), # Page Attribute Table
'pse36' : is_set(17), # 36 bit Page Size Extensions
'serial' : is_set(18), # Processor Serial Number
'clflush' : is_set(19), # Cache Flush
#'reserved1' : is_set(20), # reserved
'dts' : is_set(21), # Debug Trace Store
'acpi' : is_set(22), # ACPI support
'mmx' : is_set(23), # MultiMedia Extensions
'fxsr' : is_set(24), # FXSAVE and FXRSTOR instructions
'sse' : is_set(25), # SSE instructions
'sse2' : is_set(26), # SSE2 (WNI) instructions
'ss' : is_set(27), # self snoop
#'reserved2' : is_set(28), # reserved
'tm' : is_set(29), # Automatic clock control
'ia64' : is_set(30), # IA64 instructions
'3dnow' : is_set(31) # 3DNow! instructions available
}
# Get a list of only the flags that are true
flags = [k for k, v in flags.items() if v]
flags.sort()
info = {
'vendor_id_raw' : vendor_id,
'brand_raw' : processor_brand,
'hz_advertised_friendly' : _hz_short_to_friendly(hz_advertised, scale),
'hz_actual_friendly' : _hz_short_to_friendly(hz_actual, 6),
'hz_advertised' : _hz_short_to_full(hz_advertised, scale),
'hz_actual' : _hz_short_to_full(hz_actual, 6),
'flags' : flags
}
info = {k: v for k, v in info.items() if v}
return info
except:
return {}
def _get_cpu_info_from_kstat():
'''
Returns the CPU info gathered from isainfo and kstat.
Returns {} if isainfo or kstat are not found.
'''
try:
# Just return {} if there is no isainfo or kstat
if not DataSource.has_isainfo() or not DataSource.has_kstat():
return {}
# If isainfo fails return {}
returncode, flag_output = DataSource.isainfo_vb()
if flag_output == None or returncode != 0:
return {}
# If kstat fails return {}
returncode, kstat = DataSource.kstat_m_cpu_info()
if kstat == None or returncode != 0:
return {}
# Various fields
vendor_id = kstat.split('\tvendor_id ')[1].split('\n')[0].strip()
processor_brand = kstat.split('\tbrand ')[1].split('\n')[0].strip()
stepping = int(kstat.split('\tstepping ')[1].split('\n')[0].strip())
model = int(kstat.split('\tmodel ')[1].split('\n')[0].strip())
family = int(kstat.split('\tfamily ')[1].split('\n')[0].strip())
# Flags
flags = flag_output.strip().split('\n')[-1].strip().lower().split()
flags.sort()
# Convert from GHz/MHz string to Hz
scale = 6
hz_advertised = kstat.split('\tclock_MHz ')[1].split('\n')[0].strip()
hz_advertised = _to_decimal_string(hz_advertised)
# Convert from GHz/MHz string to Hz
hz_actual = kstat.split('\tcurrent_clock_Hz ')[1].split('\n')[0].strip()
hz_actual = _to_decimal_string(hz_actual)
info = {
'vendor_id_raw' : vendor_id,
'brand_raw' : processor_brand,
'hz_advertised_friendly' : _hz_short_to_friendly(hz_advertised, scale),
'hz_actual_friendly' : _hz_short_to_friendly(hz_actual, 0),
'hz_advertised' : _hz_short_to_full(hz_advertised, scale),
'hz_actual' : _hz_short_to_full(hz_actual, 0),
'stepping' : stepping,
'model' : model,
'family' : family,
'flags' : flags
}
info = {k: v for k, v in info.items() if v}
return info
except:
return {}
def _get_cpu_info_from_platform_uname():
try:
uname = DataSource.uname_string_raw.split(',')[0]
family, model, stepping = (None, None, None)
entries = uname.split(' ')
if 'Family' in entries and entries.index('Family') < len(entries)-1:
i = entries.index('Family')
family = int(entries[i + 1])
if 'Model' in entries and entries.index('Model') < len(entries)-1:
i = entries.index('Model')
model = int(entries[i + 1])
if 'Stepping' in entries and entries.index('Stepping') < len(entries)-1:
i = entries.index('Stepping')
stepping = int(entries[i + 1])
info = {
'family' : family,
'model' : model,
'stepping' : stepping
}
info = {k: v for k, v in info.items() if v}
return info
except:
return {}
def _get_cpu_info_internal():
'''
Returns the CPU info by using the best sources of information for your OS.
Returns {} if nothing is found.
'''
# Get the CPU arch and bits
arch, bits = _parse_arch(DataSource.arch_string_raw)
friendly_maxsize = { 2**31-1: '32 bit', 2**63-1: '64 bit' }.get(sys.maxsize) or 'unknown bits'
friendly_version = "{0}.{1}.{2}.{3}.{4}".format(*sys.version_info)
PYTHON_VERSION = "{0} ({1})".format(friendly_version, friendly_maxsize)
info = {
'python_version' : PYTHON_VERSION,
'cpuinfo_version' : CPUINFO_VERSION,
'cpuinfo_version_string' : CPUINFO_VERSION_STRING,
'arch' : arch,
'bits' : bits,
'count' : DataSource.cpu_count,
'arch_string_raw' : DataSource.arch_string_raw,
}
# Try the Windows wmic
_copy_new_fields(info, _get_cpu_info_from_wmic())
# Try the Windows registry
_copy_new_fields(info, _get_cpu_info_from_registry())
# Try /proc/cpuinfo
_copy_new_fields(info, _get_cpu_info_from_proc_cpuinfo())
# Try cpufreq-info
_copy_new_fields(info, _get_cpu_info_from_cpufreq_info())
# Try LSCPU
_copy_new_fields(info, _get_cpu_info_from_lscpu())
# Try sysctl
_copy_new_fields(info, _get_cpu_info_from_sysctl())
# Try kstat
_copy_new_fields(info, _get_cpu_info_from_kstat())
# Try dmesg
_copy_new_fields(info, _get_cpu_info_from_dmesg())
# Try /var/run/dmesg.boot
_copy_new_fields(info, _get_cpu_info_from_cat_var_run_dmesg_boot())
# Try lsprop ibm,pa-features
_copy_new_fields(info, _get_cpu_info_from_ibm_pa_features())
# Try sysinfo
_copy_new_fields(info, _get_cpu_info_from_sysinfo())
# Try querying the CPU cpuid register
_copy_new_fields(info, _get_cpu_info_from_cpuid())
# Try platform.uname
_copy_new_fields(info, _get_cpu_info_from_platform_uname())
return info
def get_cpu_info_json():
'''
Returns the CPU info by using the best sources of information for your OS.
Returns the result in a json string
'''
import json
output = None
# If running under pyinstaller, run normally
if getattr(sys, 'frozen', False):
info = _get_cpu_info_internal()
output = json.dumps(info)
output = "{0}".format(output)
# if not running under pyinstaller, run in another process.
# This is done because multiprocesing has a design flaw that
# causes non main programs to run multiple times on Windows.
else:
from subprocess import Popen, PIPE
command = [sys.executable, __file__, '--json']
p1 = Popen(command, stdout=PIPE, stderr=PIPE, stdin=PIPE)
output = p1.communicate()[0]
if p1.returncode != 0:
return "{}"
if not IS_PY2:
output = output.decode(encoding='UTF-8')
return output
def main():
from argparse import ArgumentParser
import json
# Parse args
parser = ArgumentParser(description='Gets CPU info with pure Python 2 & 3')
parser.add_argument('--json', action='store_true', help='Return the info in JSON format')
parser.add_argument('--version', action='store_true', help='Return the version of py-cpuinfo')
args = parser.parse_args()
try:
_check_arch()
except Exception as err:
sys.stderr.write(str(err) + "\n")
sys.exit(1)
info = _get_cpu_info_internal()
if not info:
sys.stderr.write("Failed to find cpu info\n")
sys.exit(1)
if args.json:
print(json.dumps(info))
elif args.version:
print(CPUINFO_VERSION_STRING)
else:
print('Python Version: {0}'.format(info.get('python_version', '')))
print('Cpuinfo Version: {0}'.format(info.get('cpuinfo_version_string', '')))
print('Vendor ID Raw: {0}'.format(info.get('vendor_id_raw', '')))
print('Hardware Raw: {0}'.format(info.get('hardware_raw', '')))
print('Brand Raw: {0}'.format(info.get('brand_raw', '')))
print('Hz Advertised Friendly: {0}'.format(info.get('hz_advertised_friendly', '')))
print('Hz Actual Friendly: {0}'.format(info.get('hz_actual_friendly', '')))
print('Hz Advertised: {0}'.format(info.get('hz_advertised', '')))
print('Hz Actual: {0}'.format(info.get('hz_actual', '')))
print('Arch: {0}'.format(info.get('arch', '')))
print('Bits: {0}'.format(info.get('bits', '')))
print('Count: {0}'.format(info.get('count', '')))
print('Arch String Raw: {0}'.format(info.get('arch_string_raw', '')))
print('L1 Data Cache Size: {0}'.format(info.get('l1_data_cache_size', '')))
print('L1 Instruction Cache Size: {0}'.format(info.get('l1_instruction_cache_size', '')))
print('L2 Cache Size: {0}'.format(info.get('l2_cache_size', '')))
print('L2 Cache Line Size: {0}'.format(info.get('l2_cache_line_size', '')))
print('L2 Cache Associativity: {0}'.format(info.get('l2_cache_associativity', '')))
print('L3 Cache Size: {0}'.format(info.get('l3_cache_size', '')))
print('Stepping: {0}'.format(info.get('stepping', '')))
print('Model: {0}'.format(info.get('model', '')))
print('Family: {0}'.format(info.get('family', '')))
print('Processor Type: {0}'.format(info.get('processor_type', '')))
print('Extended Model: {0}'.format(info.get('extended_model', '')))
print('Extended Family: {0}'.format(info.get('extended_family', '')))
print('Flags: {0}'.format(', '.join(info.get('flags', ''))))
if __name__ == '__main__':
main()
else:
_check_arch()
|
yuma-m/pychord
|
pychord/quality.py
|
Quality.get_components
|
python
|
def get_components(self, root='C', visible=False):
root_val = note_to_val(root)
components = [v + root_val for v in self.components]
if visible:
components = [val_to_note(c, scale=root) for c in components]
return components
|
Get components of chord quality
:param str root: the root note of the chord
:param bool visible: returns the name of notes if True
:rtype: list[str|int]
:return: components of chord quality
|
train
|
https://github.com/yuma-m/pychord/blob/4aa39189082daae76e36a2701890f91776d86b47/pychord/quality.py#L40-L54
|
[
"def note_to_val(note):\n \"\"\" Convert note to int\n\n >>> note_to_val(\"C\")\n 0\n >>> note_to_val(\"B\")\n 11\n\n :type note: str\n :rtype: int\n \"\"\"\n if note not in NOTE_VAL_DICT:\n raise ValueError(\"Unknown note {}\".format(note))\n return NOTE_VAL_DICT[note]\n"
] |
class Quality(object):
""" Chord quality
:param str _quality: str expression of chord quality
"""
def __init__(self, quality):
""" Constructor of chord quality
:param str quality: name of quality
"""
if quality not in QUALITY_DICT:
raise ValueError("unknown quality {}".format(quality))
self._quality = quality
self.components = list(QUALITY_DICT[quality])
def __unicode__(self):
return self._quality
def __str__(self):
return self._quality
def __eq__(self, other):
if not isinstance(other, Quality):
raise TypeError("Cannot compare Quality object with {} object".format(type(other)))
return self._quality == other.quality
def __ne__(self, other):
return not self.__eq__(other)
@property
def quality(self):
""" Get name of quality """
return self._quality
def append_on_chord(self, on_chord, root):
""" Append on chord
To create Am7/G
q = Quality('m7')
q.append_on_chord('G', root='A')
:param str on_chord: bass note of the chord
:param str root: root note of the chord
"""
root_val = note_to_val(root)
on_chord_val = note_to_val(on_chord) - root_val
list_ = list(self.components)
for idx, val in enumerate(list_):
if val % 12 == on_chord_val:
self.components.remove(val)
break
if on_chord_val > root_val:
on_chord_val -= 12
if on_chord_val not in self.components:
self.components.insert(0, on_chord_val)
def append_note(self, note, root, scale=0):
""" Append a note to quality
:param str note: note to append on quality
:param str root: root note of chord
:param int scale: key scale
"""
root_val = note_to_val(root)
note_val = note_to_val(note) - root_val + scale * 12
if note_val not in self.components:
self.components.append(note_val)
self.components.sort()
def append_notes(self, notes, root, scale=0):
""" Append notes to quality
:param list[str] notes: notes to append on quality
:param str root: root note of chord
:param int scale: key scale
"""
for note in notes:
self.append_note(note, root, scale)
|
yuma-m/pychord
|
pychord/quality.py
|
Quality.append_on_chord
|
python
|
def append_on_chord(self, on_chord, root):
root_val = note_to_val(root)
on_chord_val = note_to_val(on_chord) - root_val
list_ = list(self.components)
for idx, val in enumerate(list_):
if val % 12 == on_chord_val:
self.components.remove(val)
break
if on_chord_val > root_val:
on_chord_val -= 12
if on_chord_val not in self.components:
self.components.insert(0, on_chord_val)
|
Append on chord
To create Am7/G
q = Quality('m7')
q.append_on_chord('G', root='A')
:param str on_chord: bass note of the chord
:param str root: root note of the chord
|
train
|
https://github.com/yuma-m/pychord/blob/4aa39189082daae76e36a2701890f91776d86b47/pychord/quality.py#L56-L79
|
[
"def note_to_val(note):\n \"\"\" Convert note to int\n\n >>> note_to_val(\"C\")\n 0\n >>> note_to_val(\"B\")\n 11\n\n :type note: str\n :rtype: int\n \"\"\"\n if note not in NOTE_VAL_DICT:\n raise ValueError(\"Unknown note {}\".format(note))\n return NOTE_VAL_DICT[note]\n"
] |
class Quality(object):
""" Chord quality
:param str _quality: str expression of chord quality
"""
def __init__(self, quality):
""" Constructor of chord quality
:param str quality: name of quality
"""
if quality not in QUALITY_DICT:
raise ValueError("unknown quality {}".format(quality))
self._quality = quality
self.components = list(QUALITY_DICT[quality])
def __unicode__(self):
return self._quality
def __str__(self):
return self._quality
def __eq__(self, other):
if not isinstance(other, Quality):
raise TypeError("Cannot compare Quality object with {} object".format(type(other)))
return self._quality == other.quality
def __ne__(self, other):
return not self.__eq__(other)
@property
def quality(self):
""" Get name of quality """
return self._quality
def get_components(self, root='C', visible=False):
""" Get components of chord quality
:param str root: the root note of the chord
:param bool visible: returns the name of notes if True
:rtype: list[str|int]
:return: components of chord quality
"""
root_val = note_to_val(root)
components = [v + root_val for v in self.components]
if visible:
components = [val_to_note(c, scale=root) for c in components]
return components
def append_note(self, note, root, scale=0):
""" Append a note to quality
:param str note: note to append on quality
:param str root: root note of chord
:param int scale: key scale
"""
root_val = note_to_val(root)
note_val = note_to_val(note) - root_val + scale * 12
if note_val not in self.components:
self.components.append(note_val)
self.components.sort()
def append_notes(self, notes, root, scale=0):
""" Append notes to quality
:param list[str] notes: notes to append on quality
:param str root: root note of chord
:param int scale: key scale
"""
for note in notes:
self.append_note(note, root, scale)
|
yuma-m/pychord
|
pychord/quality.py
|
Quality.append_note
|
python
|
def append_note(self, note, root, scale=0):
root_val = note_to_val(root)
note_val = note_to_val(note) - root_val + scale * 12
if note_val not in self.components:
self.components.append(note_val)
self.components.sort()
|
Append a note to quality
:param str note: note to append on quality
:param str root: root note of chord
:param int scale: key scale
|
train
|
https://github.com/yuma-m/pychord/blob/4aa39189082daae76e36a2701890f91776d86b47/pychord/quality.py#L81-L92
|
[
"def note_to_val(note):\n \"\"\" Convert note to int\n\n >>> note_to_val(\"C\")\n 0\n >>> note_to_val(\"B\")\n 11\n\n :type note: str\n :rtype: int\n \"\"\"\n if note not in NOTE_VAL_DICT:\n raise ValueError(\"Unknown note {}\".format(note))\n return NOTE_VAL_DICT[note]\n"
] |
class Quality(object):
""" Chord quality
:param str _quality: str expression of chord quality
"""
def __init__(self, quality):
""" Constructor of chord quality
:param str quality: name of quality
"""
if quality not in QUALITY_DICT:
raise ValueError("unknown quality {}".format(quality))
self._quality = quality
self.components = list(QUALITY_DICT[quality])
def __unicode__(self):
return self._quality
def __str__(self):
return self._quality
def __eq__(self, other):
if not isinstance(other, Quality):
raise TypeError("Cannot compare Quality object with {} object".format(type(other)))
return self._quality == other.quality
def __ne__(self, other):
return not self.__eq__(other)
@property
def quality(self):
""" Get name of quality """
return self._quality
def get_components(self, root='C', visible=False):
""" Get components of chord quality
:param str root: the root note of the chord
:param bool visible: returns the name of notes if True
:rtype: list[str|int]
:return: components of chord quality
"""
root_val = note_to_val(root)
components = [v + root_val for v in self.components]
if visible:
components = [val_to_note(c, scale=root) for c in components]
return components
def append_on_chord(self, on_chord, root):
""" Append on chord
To create Am7/G
q = Quality('m7')
q.append_on_chord('G', root='A')
:param str on_chord: bass note of the chord
:param str root: root note of the chord
"""
root_val = note_to_val(root)
on_chord_val = note_to_val(on_chord) - root_val
list_ = list(self.components)
for idx, val in enumerate(list_):
if val % 12 == on_chord_val:
self.components.remove(val)
break
if on_chord_val > root_val:
on_chord_val -= 12
if on_chord_val not in self.components:
self.components.insert(0, on_chord_val)
def append_notes(self, notes, root, scale=0):
""" Append notes to quality
:param list[str] notes: notes to append on quality
:param str root: root note of chord
:param int scale: key scale
"""
for note in notes:
self.append_note(note, root, scale)
|
yuma-m/pychord
|
pychord/quality.py
|
Quality.append_notes
|
python
|
def append_notes(self, notes, root, scale=0):
for note in notes:
self.append_note(note, root, scale)
|
Append notes to quality
:param list[str] notes: notes to append on quality
:param str root: root note of chord
:param int scale: key scale
|
train
|
https://github.com/yuma-m/pychord/blob/4aa39189082daae76e36a2701890f91776d86b47/pychord/quality.py#L94-L102
|
[
"def append_note(self, note, root, scale=0):\n \"\"\" Append a note to quality\n\n :param str note: note to append on quality\n :param str root: root note of chord\n :param int scale: key scale\n \"\"\"\n root_val = note_to_val(root)\n note_val = note_to_val(note) - root_val + scale * 12\n if note_val not in self.components:\n self.components.append(note_val)\n self.components.sort()\n"
] |
class Quality(object):
""" Chord quality
:param str _quality: str expression of chord quality
"""
def __init__(self, quality):
""" Constructor of chord quality
:param str quality: name of quality
"""
if quality not in QUALITY_DICT:
raise ValueError("unknown quality {}".format(quality))
self._quality = quality
self.components = list(QUALITY_DICT[quality])
def __unicode__(self):
return self._quality
def __str__(self):
return self._quality
def __eq__(self, other):
if not isinstance(other, Quality):
raise TypeError("Cannot compare Quality object with {} object".format(type(other)))
return self._quality == other.quality
def __ne__(self, other):
return not self.__eq__(other)
@property
def quality(self):
""" Get name of quality """
return self._quality
def get_components(self, root='C', visible=False):
""" Get components of chord quality
:param str root: the root note of the chord
:param bool visible: returns the name of notes if True
:rtype: list[str|int]
:return: components of chord quality
"""
root_val = note_to_val(root)
components = [v + root_val for v in self.components]
if visible:
components = [val_to_note(c, scale=root) for c in components]
return components
def append_on_chord(self, on_chord, root):
""" Append on chord
To create Am7/G
q = Quality('m7')
q.append_on_chord('G', root='A')
:param str on_chord: bass note of the chord
:param str root: root note of the chord
"""
root_val = note_to_val(root)
on_chord_val = note_to_val(on_chord) - root_val
list_ = list(self.components)
for idx, val in enumerate(list_):
if val % 12 == on_chord_val:
self.components.remove(val)
break
if on_chord_val > root_val:
on_chord_val -= 12
if on_chord_val not in self.components:
self.components.insert(0, on_chord_val)
def append_note(self, note, root, scale=0):
""" Append a note to quality
:param str note: note to append on quality
:param str root: root note of chord
:param int scale: key scale
"""
root_val = note_to_val(root)
note_val = note_to_val(note) - root_val + scale * 12
if note_val not in self.components:
self.components.append(note_val)
self.components.sort()
|
yuma-m/pychord
|
pychord/progression.py
|
ChordProgression.insert
|
python
|
def insert(self, index, chord):
self._chords.insert(index, as_chord(chord))
|
Insert a chord to chord progressions
:param int index: Index to insert a chord
:type chord: str|pychord.Chord
:param chord: A chord to insert
:return:
|
train
|
https://github.com/yuma-m/pychord/blob/4aa39189082daae76e36a2701890f91776d86b47/pychord/progression.py#L81-L89
|
[
"def as_chord(chord):\n \"\"\" convert from str to Chord instance if input is str\n\n :type chord: str|pychord.Chord\n :param chord: Chord name or Chord instance\n :rtype: pychord.Chord\n :return: Chord instance\n \"\"\"\n if isinstance(chord, Chord):\n return chord\n elif isinstance(chord, str):\n return Chord(chord)\n else:\n raise TypeError(\"input type should be str or Chord instance.\")\n"
] |
class ChordProgression(object):
""" Class to handle chord progressions.
:param list[pychord.Chord] _chords: component chords of chord progression.
"""
def __init__(self, initial_chords=None):
""" Constructor of ChordProgression instance.
:type initial_chords: str|pychord.Chord|list
:param initial_chords: Initial chord or chords of the chord progressions
"""
if initial_chords is None:
initial_chords = []
if isinstance(initial_chords, Chord):
self._chords = [initial_chords]
elif isinstance(initial_chords, str):
self._chords = [as_chord(initial_chords)]
elif isinstance(initial_chords, list):
self._chords = [as_chord(chord) for chord in initial_chords]
else:
raise TypeError("Cannot initialize ChordProgression with argument of {} type".format(type(initial_chords)))
def __unicode__(self):
return " | ".join([chord.chord for chord in self._chords])
def __str__(self):
return " | ".join([chord.chord for chord in self._chords])
def __repr__(self):
return "<ChordProgression: {}>".format(" | ".join([chord.chord for chord in self._chords]))
def __add__(self, other):
self._chords += other.chords
return self
def __len__(self):
return len(self._chords)
def __getitem__(self, item):
return self._chords[item]
def __setitem__(self, key, value):
self._chords[key] = value
def __eq__(self, other):
if not isinstance(other, ChordProgression):
raise TypeError("Cannot compare ChordProgression object with {} object".format(type(other)))
if len(self) != len(other):
return False
for c, o in zip(self, other):
if c != o:
return False
return True
def __ne__(self, other):
return not self.__eq__(other)
@property
def chords(self):
""" Get component chords of chord progression
:rtype: list[pychord.Chord]
"""
return self._chords
def append(self, chord):
""" Append a chord to chord progressions
:type chord: str|pychord.Chord
:param chord: A chord to append
:return:
"""
self._chords.append(as_chord(chord))
def pop(self, index=-1):
""" Pop a chord from chord progressions
:param int index: Index of the chord to pop (default: -1)
:return: pychord.Chord
"""
return self._chords.pop(index)
def transpose(self, trans):
""" Transpose whole chord progressions
:param int trans: Transpose key
:return:
"""
for chord in self._chords:
chord.transpose(trans)
|
yuma-m/pychord
|
pychord/chord.py
|
as_chord
|
python
|
def as_chord(chord):
if isinstance(chord, Chord):
return chord
elif isinstance(chord, str):
return Chord(chord)
else:
raise TypeError("input type should be str or Chord instance.")
|
convert from str to Chord instance if input is str
:type chord: str|pychord.Chord
:param chord: Chord name or Chord instance
:rtype: pychord.Chord
:return: Chord instance
|
train
|
https://github.com/yuma-m/pychord/blob/4aa39189082daae76e36a2701890f91776d86b47/pychord/chord.py#L130-L143
| null |
# -*- coding: utf-8 -*-
from .parser import parse
from .utils import transpose_note, display_appended, display_on, note_to_val
class Chord(object):
""" Class to handle a chord.
:param str _chord: Name of the chord. (e.g. C, Am7, F#m7-5/A)
:param str _root: The root note of chord.
:param pychord.Quality _quality: The quality of chord. (e.g. m7, 6, M9, ...)
:param list[str] _appended: The appended notes on chord.
:param str _on: The base note of slash chord.
"""
def __init__(self, chord):
""" Constructor of Chord instance
:param str chord: Name of chord.
"""
self._chord = chord
self._root, self._quality, self._appended, self._on = "", "", "", ""
self._parse(chord)
def __unicode__(self):
return self._chord
def __str__(self):
return self._chord
def __repr__(self):
return "<Chord: {}>".format(self._chord)
def __eq__(self, other):
if not isinstance(other, Chord):
raise TypeError("Cannot compare Chord object with {} object".format(type(other)))
if note_to_val(self._root) != note_to_val(other.root):
return False
if self._quality != other.quality:
return False
if self._appended != other.appended:
return False
if self._on and other.on:
if note_to_val(self._on) != note_to_val(other.on):
return False
return True
def __ne__(self, other):
return not self.__eq__(other)
@property
def chord(self):
""" The name of chord """
return self._chord
@property
def root(self):
""" The root note of chord """
return self._root
@property
def quality(self):
""" The quality of chord """
return self._quality
@property
def appended(self):
""" The appended notes on chord """
return self._appended
@property
def on(self):
""" The base note of slash chord """
return self._on
def info(self):
""" Return information of chord to display """
return """{}
root={}
quality={}
appended={}
on={}""".format(self._chord, self._root, self._quality, self._appended, self._on)
def transpose(self, trans, scale="C"):
""" Transpose the chord
:param int trans: Transpose key
:param str scale: key scale
:return:
"""
if not isinstance(trans, int):
raise TypeError("Expected integers, not {}".format(type(trans)))
self._root = transpose_note(self._root, trans, scale)
if self._on:
self._on = transpose_note(self._on, trans, scale)
self._reconfigure_chord()
def components(self, visible=True):
""" Return the component notes of chord
:param bool visible: returns the name of notes if True else list of int
:rtype: list[(str or int)]
:return: component notes of chord
"""
if self._on:
self._quality.append_on_chord(self.on, self.root)
return self._quality.get_components(root=self._root, visible=visible)
def _parse(self, chord):
""" parse a chord
:param str chord: Name of chord.
"""
root, quality, appended, on = parse(chord)
self._root = root
self._quality = quality
self._appended = appended
self._on = on
def _reconfigure_chord(self):
# TODO: Use appended
self._chord = "{}{}{}{}".format(self._root,
self._quality._quality,
display_appended(self._appended),
display_on(self._on))
|
yuma-m/pychord
|
pychord/chord.py
|
Chord.info
|
python
|
def info(self):
return """{}
root={}
quality={}
appended={}
on={}""".format(self._chord, self._root, self._quality, self._appended, self._on)
|
Return information of chord to display
|
train
|
https://github.com/yuma-m/pychord/blob/4aa39189082daae76e36a2701890f91776d86b47/pychord/chord.py#L77-L83
| null |
class Chord(object):
""" Class to handle a chord.
:param str _chord: Name of the chord. (e.g. C, Am7, F#m7-5/A)
:param str _root: The root note of chord.
:param pychord.Quality _quality: The quality of chord. (e.g. m7, 6, M9, ...)
:param list[str] _appended: The appended notes on chord.
:param str _on: The base note of slash chord.
"""
def __init__(self, chord):
""" Constructor of Chord instance
:param str chord: Name of chord.
"""
self._chord = chord
self._root, self._quality, self._appended, self._on = "", "", "", ""
self._parse(chord)
def __unicode__(self):
return self._chord
def __str__(self):
return self._chord
def __repr__(self):
return "<Chord: {}>".format(self._chord)
def __eq__(self, other):
if not isinstance(other, Chord):
raise TypeError("Cannot compare Chord object with {} object".format(type(other)))
if note_to_val(self._root) != note_to_val(other.root):
return False
if self._quality != other.quality:
return False
if self._appended != other.appended:
return False
if self._on and other.on:
if note_to_val(self._on) != note_to_val(other.on):
return False
return True
def __ne__(self, other):
return not self.__eq__(other)
@property
def chord(self):
""" The name of chord """
return self._chord
@property
def root(self):
""" The root note of chord """
return self._root
@property
def quality(self):
""" The quality of chord """
return self._quality
@property
def appended(self):
""" The appended notes on chord """
return self._appended
@property
def on(self):
""" The base note of slash chord """
return self._on
def transpose(self, trans, scale="C"):
""" Transpose the chord
:param int trans: Transpose key
:param str scale: key scale
:return:
"""
if not isinstance(trans, int):
raise TypeError("Expected integers, not {}".format(type(trans)))
self._root = transpose_note(self._root, trans, scale)
if self._on:
self._on = transpose_note(self._on, trans, scale)
self._reconfigure_chord()
def components(self, visible=True):
""" Return the component notes of chord
:param bool visible: returns the name of notes if True else list of int
:rtype: list[(str or int)]
:return: component notes of chord
"""
if self._on:
self._quality.append_on_chord(self.on, self.root)
return self._quality.get_components(root=self._root, visible=visible)
def _parse(self, chord):
""" parse a chord
:param str chord: Name of chord.
"""
root, quality, appended, on = parse(chord)
self._root = root
self._quality = quality
self._appended = appended
self._on = on
def _reconfigure_chord(self):
# TODO: Use appended
self._chord = "{}{}{}{}".format(self._root,
self._quality._quality,
display_appended(self._appended),
display_on(self._on))
|
yuma-m/pychord
|
pychord/chord.py
|
Chord.transpose
|
python
|
def transpose(self, trans, scale="C"):
if not isinstance(trans, int):
raise TypeError("Expected integers, not {}".format(type(trans)))
self._root = transpose_note(self._root, trans, scale)
if self._on:
self._on = transpose_note(self._on, trans, scale)
self._reconfigure_chord()
|
Transpose the chord
:param int trans: Transpose key
:param str scale: key scale
:return:
|
train
|
https://github.com/yuma-m/pychord/blob/4aa39189082daae76e36a2701890f91776d86b47/pychord/chord.py#L85-L97
|
[
"def transpose_note(note, transpose, scale=\"C\"):\n \"\"\" Transpose a note\n\n :param str note: note to transpose\n :type transpose: int\n :param str scale: key scale\n :rtype: str\n :return: transposed note\n \"\"\"\n val = note_to_val(note)\n val += transpose\n return val_to_note(val, scale)\n",
"def _reconfigure_chord(self):\n # TODO: Use appended\n self._chord = \"{}{}{}{}\".format(self._root,\n self._quality._quality,\n display_appended(self._appended),\n display_on(self._on))\n"
] |
class Chord(object):
""" Class to handle a chord.
:param str _chord: Name of the chord. (e.g. C, Am7, F#m7-5/A)
:param str _root: The root note of chord.
:param pychord.Quality _quality: The quality of chord. (e.g. m7, 6, M9, ...)
:param list[str] _appended: The appended notes on chord.
:param str _on: The base note of slash chord.
"""
def __init__(self, chord):
""" Constructor of Chord instance
:param str chord: Name of chord.
"""
self._chord = chord
self._root, self._quality, self._appended, self._on = "", "", "", ""
self._parse(chord)
def __unicode__(self):
return self._chord
def __str__(self):
return self._chord
def __repr__(self):
return "<Chord: {}>".format(self._chord)
def __eq__(self, other):
if not isinstance(other, Chord):
raise TypeError("Cannot compare Chord object with {} object".format(type(other)))
if note_to_val(self._root) != note_to_val(other.root):
return False
if self._quality != other.quality:
return False
if self._appended != other.appended:
return False
if self._on and other.on:
if note_to_val(self._on) != note_to_val(other.on):
return False
return True
def __ne__(self, other):
return not self.__eq__(other)
@property
def chord(self):
""" The name of chord """
return self._chord
@property
def root(self):
""" The root note of chord """
return self._root
@property
def quality(self):
""" The quality of chord """
return self._quality
@property
def appended(self):
""" The appended notes on chord """
return self._appended
@property
def on(self):
""" The base note of slash chord """
return self._on
def info(self):
""" Return information of chord to display """
return """{}
root={}
quality={}
appended={}
on={}""".format(self._chord, self._root, self._quality, self._appended, self._on)
def components(self, visible=True):
""" Return the component notes of chord
:param bool visible: returns the name of notes if True else list of int
:rtype: list[(str or int)]
:return: component notes of chord
"""
if self._on:
self._quality.append_on_chord(self.on, self.root)
return self._quality.get_components(root=self._root, visible=visible)
def _parse(self, chord):
""" parse a chord
:param str chord: Name of chord.
"""
root, quality, appended, on = parse(chord)
self._root = root
self._quality = quality
self._appended = appended
self._on = on
def _reconfigure_chord(self):
# TODO: Use appended
self._chord = "{}{}{}{}".format(self._root,
self._quality._quality,
display_appended(self._appended),
display_on(self._on))
|
yuma-m/pychord
|
pychord/chord.py
|
Chord.components
|
python
|
def components(self, visible=True):
if self._on:
self._quality.append_on_chord(self.on, self.root)
return self._quality.get_components(root=self._root, visible=visible)
|
Return the component notes of chord
:param bool visible: returns the name of notes if True else list of int
:rtype: list[(str or int)]
:return: component notes of chord
|
train
|
https://github.com/yuma-m/pychord/blob/4aa39189082daae76e36a2701890f91776d86b47/pychord/chord.py#L99-L109
| null |
class Chord(object):
""" Class to handle a chord.
:param str _chord: Name of the chord. (e.g. C, Am7, F#m7-5/A)
:param str _root: The root note of chord.
:param pychord.Quality _quality: The quality of chord. (e.g. m7, 6, M9, ...)
:param list[str] _appended: The appended notes on chord.
:param str _on: The base note of slash chord.
"""
def __init__(self, chord):
""" Constructor of Chord instance
:param str chord: Name of chord.
"""
self._chord = chord
self._root, self._quality, self._appended, self._on = "", "", "", ""
self._parse(chord)
def __unicode__(self):
return self._chord
def __str__(self):
return self._chord
def __repr__(self):
return "<Chord: {}>".format(self._chord)
def __eq__(self, other):
if not isinstance(other, Chord):
raise TypeError("Cannot compare Chord object with {} object".format(type(other)))
if note_to_val(self._root) != note_to_val(other.root):
return False
if self._quality != other.quality:
return False
if self._appended != other.appended:
return False
if self._on and other.on:
if note_to_val(self._on) != note_to_val(other.on):
return False
return True
def __ne__(self, other):
return not self.__eq__(other)
@property
def chord(self):
""" The name of chord """
return self._chord
@property
def root(self):
""" The root note of chord """
return self._root
@property
def quality(self):
""" The quality of chord """
return self._quality
@property
def appended(self):
""" The appended notes on chord """
return self._appended
@property
def on(self):
""" The base note of slash chord """
return self._on
def info(self):
""" Return information of chord to display """
return """{}
root={}
quality={}
appended={}
on={}""".format(self._chord, self._root, self._quality, self._appended, self._on)
def transpose(self, trans, scale="C"):
""" Transpose the chord
:param int trans: Transpose key
:param str scale: key scale
:return:
"""
if not isinstance(trans, int):
raise TypeError("Expected integers, not {}".format(type(trans)))
self._root = transpose_note(self._root, trans, scale)
if self._on:
self._on = transpose_note(self._on, trans, scale)
self._reconfigure_chord()
def _parse(self, chord):
""" parse a chord
:param str chord: Name of chord.
"""
root, quality, appended, on = parse(chord)
self._root = root
self._quality = quality
self._appended = appended
self._on = on
def _reconfigure_chord(self):
# TODO: Use appended
self._chord = "{}{}{}{}".format(self._root,
self._quality._quality,
display_appended(self._appended),
display_on(self._on))
|
yuma-m/pychord
|
pychord/chord.py
|
Chord._parse
|
python
|
def _parse(self, chord):
root, quality, appended, on = parse(chord)
self._root = root
self._quality = quality
self._appended = appended
self._on = on
|
parse a chord
:param str chord: Name of chord.
|
train
|
https://github.com/yuma-m/pychord/blob/4aa39189082daae76e36a2701890f91776d86b47/pychord/chord.py#L111-L120
|
[
"def parse(chord):\n \"\"\" Parse a string to get chord component\n\n :param str chord: str expression of a chord\n :rtype: (str, pychord.Quality, str, str)\n :return: (root, quality, appended, on)\n \"\"\"\n if len(chord) > 1 and chord[1] in (\"b\", \"#\"):\n root = chord[:2]\n rest = chord[2:]\n else:\n root = chord[:1]\n rest = chord[1:]\n check_note(root, chord)\n on_chord_idx = rest.find(\"/\")\n if on_chord_idx >= 0:\n on = rest[on_chord_idx + 1:]\n rest = rest[:on_chord_idx]\n check_note(on, chord)\n else:\n on = None\n if rest in QUALITY_DICT:\n quality = Quality(rest)\n else:\n raise ValueError(\"Invalid chord {}: Unknown quality {}\".format(chord, rest))\n # TODO: Implement parser for appended notes\n appended = []\n return root, quality, appended, on\n"
] |
class Chord(object):
""" Class to handle a chord.
:param str _chord: Name of the chord. (e.g. C, Am7, F#m7-5/A)
:param str _root: The root note of chord.
:param pychord.Quality _quality: The quality of chord. (e.g. m7, 6, M9, ...)
:param list[str] _appended: The appended notes on chord.
:param str _on: The base note of slash chord.
"""
def __init__(self, chord):
""" Constructor of Chord instance
:param str chord: Name of chord.
"""
self._chord = chord
self._root, self._quality, self._appended, self._on = "", "", "", ""
self._parse(chord)
def __unicode__(self):
return self._chord
def __str__(self):
return self._chord
def __repr__(self):
return "<Chord: {}>".format(self._chord)
def __eq__(self, other):
if not isinstance(other, Chord):
raise TypeError("Cannot compare Chord object with {} object".format(type(other)))
if note_to_val(self._root) != note_to_val(other.root):
return False
if self._quality != other.quality:
return False
if self._appended != other.appended:
return False
if self._on and other.on:
if note_to_val(self._on) != note_to_val(other.on):
return False
return True
def __ne__(self, other):
return not self.__eq__(other)
@property
def chord(self):
""" The name of chord """
return self._chord
@property
def root(self):
""" The root note of chord """
return self._root
@property
def quality(self):
""" The quality of chord """
return self._quality
@property
def appended(self):
""" The appended notes on chord """
return self._appended
@property
def on(self):
""" The base note of slash chord """
return self._on
def info(self):
""" Return information of chord to display """
return """{}
root={}
quality={}
appended={}
on={}""".format(self._chord, self._root, self._quality, self._appended, self._on)
def transpose(self, trans, scale="C"):
""" Transpose the chord
:param int trans: Transpose key
:param str scale: key scale
:return:
"""
if not isinstance(trans, int):
raise TypeError("Expected integers, not {}".format(type(trans)))
self._root = transpose_note(self._root, trans, scale)
if self._on:
self._on = transpose_note(self._on, trans, scale)
self._reconfigure_chord()
def components(self, visible=True):
""" Return the component notes of chord
:param bool visible: returns the name of notes if True else list of int
:rtype: list[(str or int)]
:return: component notes of chord
"""
if self._on:
self._quality.append_on_chord(self.on, self.root)
return self._quality.get_components(root=self._root, visible=visible)
def _reconfigure_chord(self):
# TODO: Use appended
self._chord = "{}{}{}{}".format(self._root,
self._quality._quality,
display_appended(self._appended),
display_on(self._on))
|
yuma-m/pychord
|
pychord/utils.py
|
transpose_note
|
python
|
def transpose_note(note, transpose, scale="C"):
val = note_to_val(note)
val += transpose
return val_to_note(val, scale)
|
Transpose a note
:param str note: note to transpose
:type transpose: int
:param str scale: key scale
:rtype: str
:return: transposed note
|
train
|
https://github.com/yuma-m/pychord/blob/4aa39189082daae76e36a2701890f91776d86b47/pychord/utils.py#L38-L49
|
[
"def note_to_val(note):\n \"\"\" Convert note to int\n\n >>> note_to_val(\"C\")\n 0\n >>> note_to_val(\"B\")\n 11\n\n :type note: str\n :rtype: int\n \"\"\"\n if note not in NOTE_VAL_DICT:\n raise ValueError(\"Unknown note {}\".format(note))\n return NOTE_VAL_DICT[note]\n",
"def val_to_note(val, scale=\"C\"):\n \"\"\" Convert int to note\n\n >>> val_to_note(0)\n \"C\"\n >>> val_to_note(11, \"D\")\n \"D#\"\n\n :type val: int\n :param str scale: key scale\n :rtype: str\n \"\"\"\n val %= 12\n return SCALE_VAL_DICT[scale][val]\n"
] |
# -*- coding: utf-8 -*-
from .constants import NOTE_VAL_DICT, SCALE_VAL_DICT
def note_to_val(note):
""" Convert note to int
>>> note_to_val("C")
0
>>> note_to_val("B")
11
:type note: str
:rtype: int
"""
if note not in NOTE_VAL_DICT:
raise ValueError("Unknown note {}".format(note))
return NOTE_VAL_DICT[note]
def val_to_note(val, scale="C"):
""" Convert int to note
>>> val_to_note(0)
"C"
>>> val_to_note(11, "D")
"D#"
:type val: int
:param str scale: key scale
:rtype: str
"""
val %= 12
return SCALE_VAL_DICT[scale][val]
def display_appended(appended):
# TODO: Implement this
return ""
def display_on(on_note):
if on_note:
return "/{}".format(on_note)
return ""
|
yuma-m/pychord
|
pychord/parser.py
|
parse
|
python
|
def parse(chord):
if len(chord) > 1 and chord[1] in ("b", "#"):
root = chord[:2]
rest = chord[2:]
else:
root = chord[:1]
rest = chord[1:]
check_note(root, chord)
on_chord_idx = rest.find("/")
if on_chord_idx >= 0:
on = rest[on_chord_idx + 1:]
rest = rest[:on_chord_idx]
check_note(on, chord)
else:
on = None
if rest in QUALITY_DICT:
quality = Quality(rest)
else:
raise ValueError("Invalid chord {}: Unknown quality {}".format(chord, rest))
# TODO: Implement parser for appended notes
appended = []
return root, quality, appended, on
|
Parse a string to get chord component
:param str chord: str expression of a chord
:rtype: (str, pychord.Quality, str, str)
:return: (root, quality, appended, on)
|
train
|
https://github.com/yuma-m/pychord/blob/4aa39189082daae76e36a2701890f91776d86b47/pychord/parser.py#L8-L35
|
[
"def check_note(note, chord):\n \"\"\" Return True if the note is valid.\n\n :param str note: note to check its validity\n :param str chord: the chord which includes the note\n :rtype: bool\n \"\"\"\n if note not in NOTE_VAL_DICT:\n raise ValueError(\"Invalid chord {}: Unknown note {}\".format(chord, note))\n return True\n"
] |
# -*- coding: utf-8 -*-
from .quality import Quality
from .constants import QUALITY_DICT
from .utils import NOTE_VAL_DICT
def check_note(note, chord):
""" Return True if the note is valid.
:param str note: note to check its validity
:param str chord: the chord which includes the note
:rtype: bool
"""
if note not in NOTE_VAL_DICT:
raise ValueError("Invalid chord {}: Unknown note {}".format(chord, note))
return True
|
yuma-m/pychord
|
pychord/parser.py
|
check_note
|
python
|
def check_note(note, chord):
if note not in NOTE_VAL_DICT:
raise ValueError("Invalid chord {}: Unknown note {}".format(chord, note))
return True
|
Return True if the note is valid.
:param str note: note to check its validity
:param str chord: the chord which includes the note
:rtype: bool
|
train
|
https://github.com/yuma-m/pychord/blob/4aa39189082daae76e36a2701890f91776d86b47/pychord/parser.py#L38-L47
| null |
# -*- coding: utf-8 -*-
from .quality import Quality
from .constants import QUALITY_DICT
from .utils import NOTE_VAL_DICT
def parse(chord):
""" Parse a string to get chord component
:param str chord: str expression of a chord
:rtype: (str, pychord.Quality, str, str)
:return: (root, quality, appended, on)
"""
if len(chord) > 1 and chord[1] in ("b", "#"):
root = chord[:2]
rest = chord[2:]
else:
root = chord[:1]
rest = chord[1:]
check_note(root, chord)
on_chord_idx = rest.find("/")
if on_chord_idx >= 0:
on = rest[on_chord_idx + 1:]
rest = rest[:on_chord_idx]
check_note(on, chord)
else:
on = None
if rest in QUALITY_DICT:
quality = Quality(rest)
else:
raise ValueError("Invalid chord {}: Unknown quality {}".format(chord, rest))
# TODO: Implement parser for appended notes
appended = []
return root, quality, appended, on
|
yuma-m/pychord
|
pychord/analyzer.py
|
note_to_chord
|
python
|
def note_to_chord(notes):
if not notes:
raise ValueError("Please specify notes which consist a chord.")
root = notes[0]
root_and_positions = []
for rotated_notes in get_all_rotated_notes(notes):
rotated_root = rotated_notes[0]
root_and_positions.append([rotated_root, notes_to_positions(rotated_notes, rotated_notes[0])])
chords = []
for temp_root, positions in root_and_positions:
quality = find_quality(positions)
if quality is None:
continue
if temp_root == root:
chord = "{}{}".format(root, quality)
else:
chord = "{}{}/{}".format(temp_root, quality, root)
chords.append(Chord(chord))
return chords
|
Convert note list to chord list
:param list[str] notes: list of note arranged from lower note. ex) ["C", "Eb", "G"]
:rtype: list[pychord.Chord]
:return: list of chord
|
train
|
https://github.com/yuma-m/pychord/blob/4aa39189082daae76e36a2701890f91776d86b47/pychord/analyzer.py#L8-L32
|
[
"def get_all_rotated_notes(notes):\n \"\"\" Get all rotated notes\n\n get_all_rotated_notes([1,3,5]) -> [[1,3,5],[3,5,1],[5,1,3]]\n\n :type notes: list[str]\n :rtype: list[list[str]]\n \"\"\"\n notes_list = []\n for x in range(len(notes)):\n notes_list.append(notes[x:] + notes[:x])\n return notes_list\n",
"def notes_to_positions(notes, root):\n \"\"\" Get notes positions.\n\n ex) notes_to_positions([\"C\", \"E\", \"G\"], \"C\") -> [0, 4, 7]\n\n :param list[str] notes: list of notes\n :param str root: the root note\n :rtype: list[int]\n :return: list of note positions\n \"\"\"\n root_pos = note_to_val(root)\n current_pos = root_pos\n positions = []\n for note in notes:\n note_pos = note_to_val(note)\n if note_pos < current_pos:\n note_pos += 12 * ((current_pos - note_pos) // 12 + 1)\n positions.append(note_pos - root_pos)\n current_pos = note_pos\n return positions\n",
"def find_quality(positions):\n \"\"\" Find a quality consists of positions\n\n :param list[int] positions: note positions\n :rtype: str|None\n \"\"\"\n for q, p in QUALITY_DICT.items():\n if positions == list(p):\n return q\n return None\n"
] |
# -*- coding: utf-8 -*-
from .chord import Chord
from .constants.qualities import QUALITY_DICT
from .utils import note_to_val
def notes_to_positions(notes, root):
""" Get notes positions.
ex) notes_to_positions(["C", "E", "G"], "C") -> [0, 4, 7]
:param list[str] notes: list of notes
:param str root: the root note
:rtype: list[int]
:return: list of note positions
"""
root_pos = note_to_val(root)
current_pos = root_pos
positions = []
for note in notes:
note_pos = note_to_val(note)
if note_pos < current_pos:
note_pos += 12 * ((current_pos - note_pos) // 12 + 1)
positions.append(note_pos - root_pos)
current_pos = note_pos
return positions
def get_all_rotated_notes(notes):
""" Get all rotated notes
get_all_rotated_notes([1,3,5]) -> [[1,3,5],[3,5,1],[5,1,3]]
:type notes: list[str]
:rtype: list[list[str]]
"""
notes_list = []
for x in range(len(notes)):
notes_list.append(notes[x:] + notes[:x])
return notes_list
def find_quality(positions):
""" Find a quality consists of positions
:param list[int] positions: note positions
:rtype: str|None
"""
for q, p in QUALITY_DICT.items():
if positions == list(p):
return q
return None
|
yuma-m/pychord
|
pychord/analyzer.py
|
notes_to_positions
|
python
|
def notes_to_positions(notes, root):
root_pos = note_to_val(root)
current_pos = root_pos
positions = []
for note in notes:
note_pos = note_to_val(note)
if note_pos < current_pos:
note_pos += 12 * ((current_pos - note_pos) // 12 + 1)
positions.append(note_pos - root_pos)
current_pos = note_pos
return positions
|
Get notes positions.
ex) notes_to_positions(["C", "E", "G"], "C") -> [0, 4, 7]
:param list[str] notes: list of notes
:param str root: the root note
:rtype: list[int]
:return: list of note positions
|
train
|
https://github.com/yuma-m/pychord/blob/4aa39189082daae76e36a2701890f91776d86b47/pychord/analyzer.py#L35-L54
|
[
"def note_to_val(note):\n \"\"\" Convert note to int\n\n >>> note_to_val(\"C\")\n 0\n >>> note_to_val(\"B\")\n 11\n\n :type note: str\n :rtype: int\n \"\"\"\n if note not in NOTE_VAL_DICT:\n raise ValueError(\"Unknown note {}\".format(note))\n return NOTE_VAL_DICT[note]\n"
] |
# -*- coding: utf-8 -*-
from .chord import Chord
from .constants.qualities import QUALITY_DICT
from .utils import note_to_val
def note_to_chord(notes):
""" Convert note list to chord list
:param list[str] notes: list of note arranged from lower note. ex) ["C", "Eb", "G"]
:rtype: list[pychord.Chord]
:return: list of chord
"""
if not notes:
raise ValueError("Please specify notes which consist a chord.")
root = notes[0]
root_and_positions = []
for rotated_notes in get_all_rotated_notes(notes):
rotated_root = rotated_notes[0]
root_and_positions.append([rotated_root, notes_to_positions(rotated_notes, rotated_notes[0])])
chords = []
for temp_root, positions in root_and_positions:
quality = find_quality(positions)
if quality is None:
continue
if temp_root == root:
chord = "{}{}".format(root, quality)
else:
chord = "{}{}/{}".format(temp_root, quality, root)
chords.append(Chord(chord))
return chords
def get_all_rotated_notes(notes):
""" Get all rotated notes
get_all_rotated_notes([1,3,5]) -> [[1,3,5],[3,5,1],[5,1,3]]
:type notes: list[str]
:rtype: list[list[str]]
"""
notes_list = []
for x in range(len(notes)):
notes_list.append(notes[x:] + notes[:x])
return notes_list
def find_quality(positions):
""" Find a quality consists of positions
:param list[int] positions: note positions
:rtype: str|None
"""
for q, p in QUALITY_DICT.items():
if positions == list(p):
return q
return None
|
yuma-m/pychord
|
pychord/analyzer.py
|
get_all_rotated_notes
|
python
|
def get_all_rotated_notes(notes):
notes_list = []
for x in range(len(notes)):
notes_list.append(notes[x:] + notes[:x])
return notes_list
|
Get all rotated notes
get_all_rotated_notes([1,3,5]) -> [[1,3,5],[3,5,1],[5,1,3]]
:type notes: list[str]
:rtype: list[list[str]]
|
train
|
https://github.com/yuma-m/pychord/blob/4aa39189082daae76e36a2701890f91776d86b47/pychord/analyzer.py#L57-L68
| null |
# -*- coding: utf-8 -*-
from .chord import Chord
from .constants.qualities import QUALITY_DICT
from .utils import note_to_val
def note_to_chord(notes):
""" Convert note list to chord list
:param list[str] notes: list of note arranged from lower note. ex) ["C", "Eb", "G"]
:rtype: list[pychord.Chord]
:return: list of chord
"""
if not notes:
raise ValueError("Please specify notes which consist a chord.")
root = notes[0]
root_and_positions = []
for rotated_notes in get_all_rotated_notes(notes):
rotated_root = rotated_notes[0]
root_and_positions.append([rotated_root, notes_to_positions(rotated_notes, rotated_notes[0])])
chords = []
for temp_root, positions in root_and_positions:
quality = find_quality(positions)
if quality is None:
continue
if temp_root == root:
chord = "{}{}".format(root, quality)
else:
chord = "{}{}/{}".format(temp_root, quality, root)
chords.append(Chord(chord))
return chords
def notes_to_positions(notes, root):
""" Get notes positions.
ex) notes_to_positions(["C", "E", "G"], "C") -> [0, 4, 7]
:param list[str] notes: list of notes
:param str root: the root note
:rtype: list[int]
:return: list of note positions
"""
root_pos = note_to_val(root)
current_pos = root_pos
positions = []
for note in notes:
note_pos = note_to_val(note)
if note_pos < current_pos:
note_pos += 12 * ((current_pos - note_pos) // 12 + 1)
positions.append(note_pos - root_pos)
current_pos = note_pos
return positions
def find_quality(positions):
""" Find a quality consists of positions
:param list[int] positions: note positions
:rtype: str|None
"""
for q, p in QUALITY_DICT.items():
if positions == list(p):
return q
return None
|
yuma-m/pychord
|
pychord/analyzer.py
|
find_quality
|
python
|
def find_quality(positions):
for q, p in QUALITY_DICT.items():
if positions == list(p):
return q
return None
|
Find a quality consists of positions
:param list[int] positions: note positions
:rtype: str|None
|
train
|
https://github.com/yuma-m/pychord/blob/4aa39189082daae76e36a2701890f91776d86b47/pychord/analyzer.py#L71-L80
| null |
# -*- coding: utf-8 -*-
from .chord import Chord
from .constants.qualities import QUALITY_DICT
from .utils import note_to_val
def note_to_chord(notes):
""" Convert note list to chord list
:param list[str] notes: list of note arranged from lower note. ex) ["C", "Eb", "G"]
:rtype: list[pychord.Chord]
:return: list of chord
"""
if not notes:
raise ValueError("Please specify notes which consist a chord.")
root = notes[0]
root_and_positions = []
for rotated_notes in get_all_rotated_notes(notes):
rotated_root = rotated_notes[0]
root_and_positions.append([rotated_root, notes_to_positions(rotated_notes, rotated_notes[0])])
chords = []
for temp_root, positions in root_and_positions:
quality = find_quality(positions)
if quality is None:
continue
if temp_root == root:
chord = "{}{}".format(root, quality)
else:
chord = "{}{}/{}".format(temp_root, quality, root)
chords.append(Chord(chord))
return chords
def notes_to_positions(notes, root):
""" Get notes positions.
ex) notes_to_positions(["C", "E", "G"], "C") -> [0, 4, 7]
:param list[str] notes: list of notes
:param str root: the root note
:rtype: list[int]
:return: list of note positions
"""
root_pos = note_to_val(root)
current_pos = root_pos
positions = []
for note in notes:
note_pos = note_to_val(note)
if note_pos < current_pos:
note_pos += 12 * ((current_pos - note_pos) // 12 + 1)
positions.append(note_pos - root_pos)
current_pos = note_pos
return positions
def get_all_rotated_notes(notes):
""" Get all rotated notes
get_all_rotated_notes([1,3,5]) -> [[1,3,5],[3,5,1],[5,1,3]]
:type notes: list[str]
:rtype: list[list[str]]
"""
notes_list = []
for x in range(len(notes)):
notes_list.append(notes[x:] + notes[:x])
return notes_list
|
Linaro/squad
|
squad/core/statistics.py
|
geomean
|
python
|
def geomean(values):
values = [v for v in values if v > 0]
if len(values) == 0:
return 0
n = len(values)
log_sum = 0.0
for v in values:
log_sum = log_sum + log(v)
return exp(log_sum / n)
|
The intuitive/naive way of calculating a geometric mean (first
multiply the n values, then take the nth-root of the result) does not
work in practice. When you multiple an large enough amount of large
enough numbers, their product will oferflow the float representation,
and the result will be Infinity.
We use the alternative method described in
https://en.wikipedia.org/wiki/Geometric_mean -- topic "Relationship with
arithmetic mean of logarithms" -- which is exp(sum(log(x_i)/n))
Zeros are excluded from the calculation. Since for us numbers are usually
measurements (time, counts, etc), we interpret 0 as "does not exist".
Negative numbers are also excluded on the basis that they most probably
represent anomalies in the data.
|
train
|
https://github.com/Linaro/squad/blob/27da5375e119312a86f231df95f99c979b9f48f0/squad/core/statistics.py#L4-L30
| null |
from math import log, exp
|
Linaro/squad
|
squad/core/notification.py
|
Notification.message
|
python
|
def message(self, do_html=True, custom_email_template=None):
context = {
'build': self.build,
'important_metadata': self.important_metadata,
'metadata': self.metadata,
'notification': self,
'previous_build': self.previous_build,
'regressions_grouped_by_suite': self.comparison.regressions_grouped_by_suite,
'fixes_grouped_by_suite': self.comparison.fixes_grouped_by_suite,
'known_issues': self.known_issues,
'regressions': self.comparison.regressions,
'fixes': self.comparison.fixes,
'thresholds': self.thresholds,
'settings': settings,
'summary': self.summary,
}
html_message = ''
if custom_email_template:
text_template = jinja2.from_string(custom_email_template.plain_text)
text_message = text_template.render(context)
if do_html:
html_template = jinja2.from_string(custom_email_template.html)
html_message = html_template.render(context)
else:
text_message = render_to_string(
'squad/notification/diff.txt.jinja2',
context=context,
)
if do_html:
html_message = render_to_string(
'squad/notification/diff.html.jinja2',
context=context,
)
return (text_message, html_message)
|
Returns a tuple with (text_message,html_message)
|
train
|
https://github.com/Linaro/squad/blob/27da5375e119312a86f231df95f99c979b9f48f0/squad/core/notification.py#L113-L151
| null |
class Notification(object):
"""
Represents a notification about a project status change, that may or may
not need to be sent.
"""
def __init__(self, status, previous=None):
self.status = status
self.build = status.build
if previous is None:
previous = status.get_previous()
self.previous_build = previous and previous.build or None
__comparison__ = None
@property
def comparison(self):
if self.__comparison__ is None:
self.__comparison__ = TestComparison.compare_builds(
self.previous_build,
self.build,
)
return self.__comparison__
@property
def diff(self):
return self.comparison.diff
@property
def project(self):
return self.build.project
@property
def metadata(self):
if self.build.metadata is not None:
return OrderedDict(sorted(self.build.metadata.items()))
else:
return {}
@property
def important_metadata(self):
return self.build.important_metadata
@property
def summary(self):
return self.build.test_summary
@property
def recipients(self):
emails = []
for subscription in self.project.subscriptions.all():
if subscription.notification_strategy == Subscription.NOTIFY_ON_CHANGE:
if not self.previous_build or not self.diff:
continue
elif subscription.notification_strategy == Subscription.NOTIFY_ON_REGRESSION:
if not self.previous_build or \
len(self.comparison.regressions) == 0:
continue
email = subscription.get_email()
if email:
emails.append(email)
return emails
@property
def known_issues(self):
return KnownIssue.active_by_project_and_test(self.project)
@property
def thresholds(self):
return self.status.get_exceeded_thresholds()
@property
def subject(self):
return self.create_subject()
def create_subject(self, custom_email_template=None):
summary = self.summary
subject_data = {
'build': self.build.version,
'important_metadata': self.important_metadata,
'metadata': self.metadata,
'project': self.project,
'regressions': len(self.comparison.regressions),
'tests_fail': summary.tests_fail,
'tests_pass': summary.tests_pass,
'tests_total': summary.tests_total,
}
if custom_email_template is None and self.project.custom_email_template is not None:
custom_email_template = self.project.custom_email_template
if custom_email_template and custom_email_template.subject:
template = custom_email_template.subject
else:
template = '{{project}}: {{tests_total}} tests, {{tests_fail}} failed, {{tests_pass}} passed (build {{build}})'
return jinja2.from_string(template).render(subject_data)
def send(self):
recipients = self.recipients
if not recipients:
return
sender = "%s <%s>" % (settings.SITE_NAME, settings.EMAIL_FROM)
subject = self.subject
txt, html = self.message(self.project.html_mail, self.project.custom_email_template)
if NotificationDelivery.exists(self.status, subject, txt, html):
return
message = Message(subject, txt, sender, recipients)
if self.project.html_mail:
message.attach_alternative(html, "text/html")
message.send()
self.mark_as_notified()
def mark_as_notified(self):
self.status.notified = True
self.status.save()
|
Linaro/squad
|
squad/api/filters.py
|
decode_complex_ops
|
python
|
def decode_complex_ops(encoded_querystring, operators=None, negation=True):
complex_op_re = COMPLEX_OP_NEG_RE if negation else COMPLEX_OP_RE
if operators is None:
operators = COMPLEX_OPERATORS
# decode into: (a%3D1) & (b%3D2) | ~(c%3D3)
decoded_querystring = unquote(encoded_querystring)
matches = [m for m in complex_op_re.finditer(decoded_querystring)]
if not matches:
msg = _("Unable to parse querystring. Decoded: '%(decoded)s'.")
raise SerializerValidationError(msg % {'decoded': decoded_querystring})
results, errors = [], []
for match, has_next in lookahead(matches):
negate, querystring, op = match.groups()
negate = negate == '~'
querystring = unquote(querystring)
op_func = operators.get(op.strip()) if op else None
if op_func is None and has_next:
msg = _("Invalid querystring operator. Matched: '%(op)s'.")
errors.append(msg % {'op': op})
results.append(ComplexOp(querystring, negate, op_func))
trailing_chars = decoded_querystring[matches[-1].end():]
if trailing_chars:
msg = _("Ending querystring must not have trailing characters. Matched: '%(chars)s'.")
errors.append(msg % {'chars': trailing_chars})
if errors:
raise SerializerValidationError(errors)
return results
|
Returns a list of (querystring, negate, op) tuples that represent complex operations.
This function will raise a `ValidationError`s if:
- the individual querystrings are not wrapped in parentheses
- the set operators do not match the provided `operators`
- there is trailing content after the ending querysting
Ex::
# unencoded query: (a=1) & (b=2) | ~(c=3)
>>> s = '%28a%253D1%29%20%26%20%28b%253D2%29%20%7C%20%7E%28c%253D3%29'
>>> decode_querystring_ops(s)
[
('a=1', False, QuerySet.__and__),
('b=2', False, QuerySet.__or__),
('c=3', True, None),
]
|
train
|
https://github.com/Linaro/squad/blob/27da5375e119312a86f231df95f99c979b9f48f0/squad/api/filters.py#L71-L121
|
[
"def lookahead(iterable):\n it = iter(iterable)\n try:\n current = next(it)\n except StopIteration:\n return\n\n for value in it:\n yield current, True\n current = value\n yield current, False\n"
] |
"""
Copyright (c) 2013-2015 Philip Neustrom <philipn@gmail.com>,
2016-2017 Ryan P Kilby <rpkilby@ncsu.edu>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is furnished
to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
This piece of code was copied from master branch of
https://github.com/philipn/django-rest-framework-filters
SQUAD currently uses version 0.10.2 which doesn't have the required features.
Once version 1.0 is release and SQUAD moves to this version this file
should be removed.
"""
import re
from collections import namedtuple
from urllib.parse import unquote
from django.db.models import QuerySet
from django.http import QueryDict
from django_filters.rest_framework import backends
from django.utils.translation import ugettext as _
from rest_framework.serializers import ValidationError as SerializerValidationError
from rest_framework_filters.filterset import FilterSet
from rest_framework.exceptions import ValidationError
# originally based on: https://regex101.com/r/5rPycz/1
# current iteration: https://regex101.com/r/5rPycz/3
# special thanks to @JohnDoe2 on the #regex IRC channel!
# matches groups of "<negate>(<encoded querystring>)<set op>"
COMPLEX_OP_RE = re.compile(r'()\(([^)]+)\)([^(]*?(?=\())?')
COMPLEX_OP_NEG_RE = re.compile(r'(~?)\(([^)]+)\)([^(]*?(?=~\(|\())?')
COMPLEX_OPERATORS = {
'&': QuerySet.__and__,
'|': QuerySet.__or__,
}
ComplexOp = namedtuple('ComplexOp', ['querystring', 'negate', 'op'])
def lookahead(iterable):
it = iter(iterable)
try:
current = next(it)
except StopIteration:
return
for value in it:
yield current, True
current = value
yield current, False
def combine_complex_queryset(querysets, complex_ops, negation=True):
# Negate querysets
for queryset, op in zip(querysets, complex_ops):
if negation and op.negate:
queryset.query.where.negate()
# Combine querysets
combined = querysets[0]
for queryset, op in zip(querysets[1:], complex_ops[:-1]):
combined = op.op(combined, queryset)
return combined
class RestFrameworkFilterBackend(backends.DjangoFilterBackend):
filterset_base = FilterSet
def to_html(self, request, queryset, view):
return super().to_html(request, queryset, view)
class ComplexFilterBackend(RestFrameworkFilterBackend):
complex_filter_param = 'filters'
operators = None
negation = True
def filter_queryset(self, request, queryset, view):
if self.complex_filter_param not in request.query_params:
return super().filter_queryset(request, queryset, view)
# Decode the set of complex operations
encoded_querystring = request.query_params[self.complex_filter_param]
try:
complex_ops = decode_complex_ops(encoded_querystring, self.operators, self.negation)
except ValidationError as exc:
raise ValidationError({self.complex_filter_param: exc.detail})
# Collect the individual filtered querysets
querystrings = [op.querystring for op in complex_ops]
try:
querysets = self.get_filtered_querysets(querystrings, request, queryset, view)
except ValidationError as exc:
raise ValidationError({self.complex_filter_param: exc.detail})
return combine_complex_queryset(querysets, complex_ops)
def get_filtered_querysets(self, querystrings, request, queryset, view):
original_GET = request._request.GET
querysets, errors = [], {}
for qs in querystrings:
request._request.GET = QueryDict(qs)
try:
result = super().filter_queryset(request, queryset, view)
querysets.append(result)
except ValidationError as exc:
errors[qs] = exc.detail
finally:
request._request.GET = original_GET
if errors:
raise ValidationError(errors)
return querysets
|
Linaro/squad
|
squad/core/management/commands/users.py
|
Command.handle
|
python
|
def handle(self, *args, **options):
if options["sub_command"] == "add":
self.handle_add(options)
elif options["sub_command"] == "update":
self.handle_update(options)
elif options["sub_command"] == "details":
self.handle_details(options["username"])
elif options["sub_command"] == "list":
self.handle_list(options["all"], options["csv"])
|
Forward to the right sub-handler
|
train
|
https://github.com/Linaro/squad/blob/27da5375e119312a86f231df95f99c979b9f48f0/squad/core/management/commands/users.py#L148-L157
| null |
class Command(BaseCommand):
help = "Manage users"
def add_arguments(self, parser):
cmd = self
class SubParser(CommandParser):
"""
Sub-parsers constructor that mimic Django constructor.
See http://stackoverflow.com/a/37414551
"""
def __init__(self, **kwargs):
super().__init__(cmd, **kwargs)
sub = parser.add_subparsers(
dest="sub_command", help="Sub commands", parser_class=SubParser
)
sub.required = True
# "add" sub-command
add_parser = sub.add_parser("add", help="Add a user")
add_parser.add_argument("username", help="Username of the user")
add_parser.add_argument(
"--email", type=str, default=None, help="email of the user"
)
add_parser.add_argument(
"--passwd",
type=str,
default=None,
help="Password for this user. If empty, a random password is generated.",
)
add_parser.add_argument(
"--staff",
default=False,
action="store_true",
help="Make this user a staff member",
)
add_parser.add_argument(
"--superuser",
default=False,
action="store_true",
help="Make this user a super user",
)
# "update" sub-command
update_parser = sub.add_parser("update", help="Update an existing user")
update_parser.add_argument("username", help="Username of the user")
update_parser.add_argument(
"--email", type=str, default=None, help="Change email of the user"
)
active_parser = update_parser.add_mutually_exclusive_group(required=False)
active_parser.add_argument(
"--active",
dest="active",
action="store_const",
const=True, # not a boolean
help="Make this user active",
)
active_parser.add_argument(
"--not-active",
dest="active",
action="store_const",
const=False,
help="Make this user inactive",
)
active_parser.set_defaults(active=None) # tri-state - None, True, False
staff_parser = update_parser.add_mutually_exclusive_group(required=False)
staff_parser.add_argument(
"--staff",
dest="staff",
action="store_const",
const=True,
help="Make this user a staff member",
)
staff_parser.add_argument(
"--not-staff",
dest="staff",
action="store_const",
const=False,
help="Make this user no longer a staff member",
)
staff_parser.set_defaults(staff=None)
superuser_parser = update_parser.add_mutually_exclusive_group(required=False)
superuser_parser.add_argument(
"--superuser",
dest="superuser",
action="store_const",
const=True,
help="Make this user a superuser",
)
superuser_parser.add_argument(
"--not-superuser",
dest="superuser",
action="store_const",
const=False,
help="Make this user no longer a superuser",
)
superuser_parser.set_defaults(superuser=None)
# "details" sub-command
details_parser = sub.add_parser("details", help="User details")
details_parser.add_argument("username", help="Username of the user")
# "list" sub-command
list_parser = sub.add_parser("list", help="List users")
list_parser.add_argument(
"--all",
dest="all",
default=False,
action="store_true",
help="Show all users including inactive ones",
)
list_parser.add_argument(
"--csv", dest="csv", default=False, action="store_true", help="Print as csv"
)
def handle_add(self, options):
""" Create a new user """
username = options["username"]
passwd = options["passwd"]
if passwd is None:
passwd = User.objects.make_random_password()
user = User.objects.create_user(username, options["email"], passwd)
if options["staff"]:
user.is_staff = True
if options["superuser"]:
user.is_superuser = True
user.save()
if options["passwd"] is None:
self.stdout.write(passwd)
def handle_update(self, options): # pylint: disable=no-self-use
""" Update existing user"""
username = options["username"]
try:
user = User.objects.get(username=username)
except User.DoesNotExist:
raise CommandError("User %s does not exist" % username)
if options["email"]:
user.email = options["email"]
# False is an allowed value, but not None.
if options["active"] in [True, False]:
user.is_active = options["active"]
if options["staff"] in [True, False]:
user.is_staff = options["staff"]
if options["superuser"] in [True, False]:
user.is_superuser = options["superuser"]
user.save()
def handle_details(self, username):
""" Print user details """
try:
user = User.objects.get(username=username)
except User.DoesNotExist:
raise CommandError("Unable to find user '%s'" % username)
self.stdout.write("username : %s" % username)
self.stdout.write("is_active : %s" % user.is_active)
self.stdout.write("is_staff : %s" % user.is_staff)
self.stdout.write("is_superuser: %s" % user.is_superuser)
groups = [g.name for g in user.groups.all().order_by("name")]
self.stdout.write("groups : [%s]" % ", ".join(groups))
def handle_list(self, show_all, format_as_csv):
""" List users """
users = User.objects.all().order_by("username")
if not show_all:
users = users.exclude(is_active=False)
if format_as_csv:
fields = ["username", "fullname", "email", "staff", "superuser"]
writer = csv.DictWriter(self.stdout, fieldnames=fields)
writer.writeheader()
for user in users:
writer.writerow(
{
"username": user.username,
"email": user.email,
"staff": user.is_staff,
"superuser": user.is_superuser,
}
)
else:
self.stdout.write("List of users:")
for user in users:
out = "* %s" % user.username
if user.get_full_name():
out = "%s (%s)" % (out, user.get_full_name())
if not user.is_active:
out = "%s [inactive]" % out
self.stdout.write(out)
|
Linaro/squad
|
squad/core/management/commands/users.py
|
Command.handle_add
|
python
|
def handle_add(self, options):
username = options["username"]
passwd = options["passwd"]
if passwd is None:
passwd = User.objects.make_random_password()
user = User.objects.create_user(username, options["email"], passwd)
if options["staff"]:
user.is_staff = True
if options["superuser"]:
user.is_superuser = True
user.save()
if options["passwd"] is None:
self.stdout.write(passwd)
|
Create a new user
|
train
|
https://github.com/Linaro/squad/blob/27da5375e119312a86f231df95f99c979b9f48f0/squad/core/management/commands/users.py#L159-L174
| null |
class Command(BaseCommand):
help = "Manage users"
def add_arguments(self, parser):
cmd = self
class SubParser(CommandParser):
"""
Sub-parsers constructor that mimic Django constructor.
See http://stackoverflow.com/a/37414551
"""
def __init__(self, **kwargs):
super().__init__(cmd, **kwargs)
sub = parser.add_subparsers(
dest="sub_command", help="Sub commands", parser_class=SubParser
)
sub.required = True
# "add" sub-command
add_parser = sub.add_parser("add", help="Add a user")
add_parser.add_argument("username", help="Username of the user")
add_parser.add_argument(
"--email", type=str, default=None, help="email of the user"
)
add_parser.add_argument(
"--passwd",
type=str,
default=None,
help="Password for this user. If empty, a random password is generated.",
)
add_parser.add_argument(
"--staff",
default=False,
action="store_true",
help="Make this user a staff member",
)
add_parser.add_argument(
"--superuser",
default=False,
action="store_true",
help="Make this user a super user",
)
# "update" sub-command
update_parser = sub.add_parser("update", help="Update an existing user")
update_parser.add_argument("username", help="Username of the user")
update_parser.add_argument(
"--email", type=str, default=None, help="Change email of the user"
)
active_parser = update_parser.add_mutually_exclusive_group(required=False)
active_parser.add_argument(
"--active",
dest="active",
action="store_const",
const=True, # not a boolean
help="Make this user active",
)
active_parser.add_argument(
"--not-active",
dest="active",
action="store_const",
const=False,
help="Make this user inactive",
)
active_parser.set_defaults(active=None) # tri-state - None, True, False
staff_parser = update_parser.add_mutually_exclusive_group(required=False)
staff_parser.add_argument(
"--staff",
dest="staff",
action="store_const",
const=True,
help="Make this user a staff member",
)
staff_parser.add_argument(
"--not-staff",
dest="staff",
action="store_const",
const=False,
help="Make this user no longer a staff member",
)
staff_parser.set_defaults(staff=None)
superuser_parser = update_parser.add_mutually_exclusive_group(required=False)
superuser_parser.add_argument(
"--superuser",
dest="superuser",
action="store_const",
const=True,
help="Make this user a superuser",
)
superuser_parser.add_argument(
"--not-superuser",
dest="superuser",
action="store_const",
const=False,
help="Make this user no longer a superuser",
)
superuser_parser.set_defaults(superuser=None)
# "details" sub-command
details_parser = sub.add_parser("details", help="User details")
details_parser.add_argument("username", help="Username of the user")
# "list" sub-command
list_parser = sub.add_parser("list", help="List users")
list_parser.add_argument(
"--all",
dest="all",
default=False,
action="store_true",
help="Show all users including inactive ones",
)
list_parser.add_argument(
"--csv", dest="csv", default=False, action="store_true", help="Print as csv"
)
def handle(self, *args, **options):
""" Forward to the right sub-handler """
if options["sub_command"] == "add":
self.handle_add(options)
elif options["sub_command"] == "update":
self.handle_update(options)
elif options["sub_command"] == "details":
self.handle_details(options["username"])
elif options["sub_command"] == "list":
self.handle_list(options["all"], options["csv"])
def handle_update(self, options): # pylint: disable=no-self-use
""" Update existing user"""
username = options["username"]
try:
user = User.objects.get(username=username)
except User.DoesNotExist:
raise CommandError("User %s does not exist" % username)
if options["email"]:
user.email = options["email"]
# False is an allowed value, but not None.
if options["active"] in [True, False]:
user.is_active = options["active"]
if options["staff"] in [True, False]:
user.is_staff = options["staff"]
if options["superuser"] in [True, False]:
user.is_superuser = options["superuser"]
user.save()
def handle_details(self, username):
""" Print user details """
try:
user = User.objects.get(username=username)
except User.DoesNotExist:
raise CommandError("Unable to find user '%s'" % username)
self.stdout.write("username : %s" % username)
self.stdout.write("is_active : %s" % user.is_active)
self.stdout.write("is_staff : %s" % user.is_staff)
self.stdout.write("is_superuser: %s" % user.is_superuser)
groups = [g.name for g in user.groups.all().order_by("name")]
self.stdout.write("groups : [%s]" % ", ".join(groups))
def handle_list(self, show_all, format_as_csv):
""" List users """
users = User.objects.all().order_by("username")
if not show_all:
users = users.exclude(is_active=False)
if format_as_csv:
fields = ["username", "fullname", "email", "staff", "superuser"]
writer = csv.DictWriter(self.stdout, fieldnames=fields)
writer.writeheader()
for user in users:
writer.writerow(
{
"username": user.username,
"email": user.email,
"staff": user.is_staff,
"superuser": user.is_superuser,
}
)
else:
self.stdout.write("List of users:")
for user in users:
out = "* %s" % user.username
if user.get_full_name():
out = "%s (%s)" % (out, user.get_full_name())
if not user.is_active:
out = "%s [inactive]" % out
self.stdout.write(out)
|
Linaro/squad
|
squad/core/management/commands/users.py
|
Command.handle_update
|
python
|
def handle_update(self, options): # pylint: disable=no-self-use
username = options["username"]
try:
user = User.objects.get(username=username)
except User.DoesNotExist:
raise CommandError("User %s does not exist" % username)
if options["email"]:
user.email = options["email"]
# False is an allowed value, but not None.
if options["active"] in [True, False]:
user.is_active = options["active"]
if options["staff"] in [True, False]:
user.is_staff = options["staff"]
if options["superuser"] in [True, False]:
user.is_superuser = options["superuser"]
user.save()
|
Update existing user
|
train
|
https://github.com/Linaro/squad/blob/27da5375e119312a86f231df95f99c979b9f48f0/squad/core/management/commands/users.py#L176-L192
| null |
class Command(BaseCommand):
help = "Manage users"
def add_arguments(self, parser):
cmd = self
class SubParser(CommandParser):
"""
Sub-parsers constructor that mimic Django constructor.
See http://stackoverflow.com/a/37414551
"""
def __init__(self, **kwargs):
super().__init__(cmd, **kwargs)
sub = parser.add_subparsers(
dest="sub_command", help="Sub commands", parser_class=SubParser
)
sub.required = True
# "add" sub-command
add_parser = sub.add_parser("add", help="Add a user")
add_parser.add_argument("username", help="Username of the user")
add_parser.add_argument(
"--email", type=str, default=None, help="email of the user"
)
add_parser.add_argument(
"--passwd",
type=str,
default=None,
help="Password for this user. If empty, a random password is generated.",
)
add_parser.add_argument(
"--staff",
default=False,
action="store_true",
help="Make this user a staff member",
)
add_parser.add_argument(
"--superuser",
default=False,
action="store_true",
help="Make this user a super user",
)
# "update" sub-command
update_parser = sub.add_parser("update", help="Update an existing user")
update_parser.add_argument("username", help="Username of the user")
update_parser.add_argument(
"--email", type=str, default=None, help="Change email of the user"
)
active_parser = update_parser.add_mutually_exclusive_group(required=False)
active_parser.add_argument(
"--active",
dest="active",
action="store_const",
const=True, # not a boolean
help="Make this user active",
)
active_parser.add_argument(
"--not-active",
dest="active",
action="store_const",
const=False,
help="Make this user inactive",
)
active_parser.set_defaults(active=None) # tri-state - None, True, False
staff_parser = update_parser.add_mutually_exclusive_group(required=False)
staff_parser.add_argument(
"--staff",
dest="staff",
action="store_const",
const=True,
help="Make this user a staff member",
)
staff_parser.add_argument(
"--not-staff",
dest="staff",
action="store_const",
const=False,
help="Make this user no longer a staff member",
)
staff_parser.set_defaults(staff=None)
superuser_parser = update_parser.add_mutually_exclusive_group(required=False)
superuser_parser.add_argument(
"--superuser",
dest="superuser",
action="store_const",
const=True,
help="Make this user a superuser",
)
superuser_parser.add_argument(
"--not-superuser",
dest="superuser",
action="store_const",
const=False,
help="Make this user no longer a superuser",
)
superuser_parser.set_defaults(superuser=None)
# "details" sub-command
details_parser = sub.add_parser("details", help="User details")
details_parser.add_argument("username", help="Username of the user")
# "list" sub-command
list_parser = sub.add_parser("list", help="List users")
list_parser.add_argument(
"--all",
dest="all",
default=False,
action="store_true",
help="Show all users including inactive ones",
)
list_parser.add_argument(
"--csv", dest="csv", default=False, action="store_true", help="Print as csv"
)
def handle(self, *args, **options):
""" Forward to the right sub-handler """
if options["sub_command"] == "add":
self.handle_add(options)
elif options["sub_command"] == "update":
self.handle_update(options)
elif options["sub_command"] == "details":
self.handle_details(options["username"])
elif options["sub_command"] == "list":
self.handle_list(options["all"], options["csv"])
def handle_add(self, options):
""" Create a new user """
username = options["username"]
passwd = options["passwd"]
if passwd is None:
passwd = User.objects.make_random_password()
user = User.objects.create_user(username, options["email"], passwd)
if options["staff"]:
user.is_staff = True
if options["superuser"]:
user.is_superuser = True
user.save()
if options["passwd"] is None:
self.stdout.write(passwd)
def handle_details(self, username):
""" Print user details """
try:
user = User.objects.get(username=username)
except User.DoesNotExist:
raise CommandError("Unable to find user '%s'" % username)
self.stdout.write("username : %s" % username)
self.stdout.write("is_active : %s" % user.is_active)
self.stdout.write("is_staff : %s" % user.is_staff)
self.stdout.write("is_superuser: %s" % user.is_superuser)
groups = [g.name for g in user.groups.all().order_by("name")]
self.stdout.write("groups : [%s]" % ", ".join(groups))
def handle_list(self, show_all, format_as_csv):
""" List users """
users = User.objects.all().order_by("username")
if not show_all:
users = users.exclude(is_active=False)
if format_as_csv:
fields = ["username", "fullname", "email", "staff", "superuser"]
writer = csv.DictWriter(self.stdout, fieldnames=fields)
writer.writeheader()
for user in users:
writer.writerow(
{
"username": user.username,
"email": user.email,
"staff": user.is_staff,
"superuser": user.is_superuser,
}
)
else:
self.stdout.write("List of users:")
for user in users:
out = "* %s" % user.username
if user.get_full_name():
out = "%s (%s)" % (out, user.get_full_name())
if not user.is_active:
out = "%s [inactive]" % out
self.stdout.write(out)
|
Linaro/squad
|
squad/core/management/commands/users.py
|
Command.handle_details
|
python
|
def handle_details(self, username):
try:
user = User.objects.get(username=username)
except User.DoesNotExist:
raise CommandError("Unable to find user '%s'" % username)
self.stdout.write("username : %s" % username)
self.stdout.write("is_active : %s" % user.is_active)
self.stdout.write("is_staff : %s" % user.is_staff)
self.stdout.write("is_superuser: %s" % user.is_superuser)
groups = [g.name for g in user.groups.all().order_by("name")]
self.stdout.write("groups : [%s]" % ", ".join(groups))
|
Print user details
|
train
|
https://github.com/Linaro/squad/blob/27da5375e119312a86f231df95f99c979b9f48f0/squad/core/management/commands/users.py#L194-L206
| null |
class Command(BaseCommand):
help = "Manage users"
def add_arguments(self, parser):
cmd = self
class SubParser(CommandParser):
"""
Sub-parsers constructor that mimic Django constructor.
See http://stackoverflow.com/a/37414551
"""
def __init__(self, **kwargs):
super().__init__(cmd, **kwargs)
sub = parser.add_subparsers(
dest="sub_command", help="Sub commands", parser_class=SubParser
)
sub.required = True
# "add" sub-command
add_parser = sub.add_parser("add", help="Add a user")
add_parser.add_argument("username", help="Username of the user")
add_parser.add_argument(
"--email", type=str, default=None, help="email of the user"
)
add_parser.add_argument(
"--passwd",
type=str,
default=None,
help="Password for this user. If empty, a random password is generated.",
)
add_parser.add_argument(
"--staff",
default=False,
action="store_true",
help="Make this user a staff member",
)
add_parser.add_argument(
"--superuser",
default=False,
action="store_true",
help="Make this user a super user",
)
# "update" sub-command
update_parser = sub.add_parser("update", help="Update an existing user")
update_parser.add_argument("username", help="Username of the user")
update_parser.add_argument(
"--email", type=str, default=None, help="Change email of the user"
)
active_parser = update_parser.add_mutually_exclusive_group(required=False)
active_parser.add_argument(
"--active",
dest="active",
action="store_const",
const=True, # not a boolean
help="Make this user active",
)
active_parser.add_argument(
"--not-active",
dest="active",
action="store_const",
const=False,
help="Make this user inactive",
)
active_parser.set_defaults(active=None) # tri-state - None, True, False
staff_parser = update_parser.add_mutually_exclusive_group(required=False)
staff_parser.add_argument(
"--staff",
dest="staff",
action="store_const",
const=True,
help="Make this user a staff member",
)
staff_parser.add_argument(
"--not-staff",
dest="staff",
action="store_const",
const=False,
help="Make this user no longer a staff member",
)
staff_parser.set_defaults(staff=None)
superuser_parser = update_parser.add_mutually_exclusive_group(required=False)
superuser_parser.add_argument(
"--superuser",
dest="superuser",
action="store_const",
const=True,
help="Make this user a superuser",
)
superuser_parser.add_argument(
"--not-superuser",
dest="superuser",
action="store_const",
const=False,
help="Make this user no longer a superuser",
)
superuser_parser.set_defaults(superuser=None)
# "details" sub-command
details_parser = sub.add_parser("details", help="User details")
details_parser.add_argument("username", help="Username of the user")
# "list" sub-command
list_parser = sub.add_parser("list", help="List users")
list_parser.add_argument(
"--all",
dest="all",
default=False,
action="store_true",
help="Show all users including inactive ones",
)
list_parser.add_argument(
"--csv", dest="csv", default=False, action="store_true", help="Print as csv"
)
def handle(self, *args, **options):
""" Forward to the right sub-handler """
if options["sub_command"] == "add":
self.handle_add(options)
elif options["sub_command"] == "update":
self.handle_update(options)
elif options["sub_command"] == "details":
self.handle_details(options["username"])
elif options["sub_command"] == "list":
self.handle_list(options["all"], options["csv"])
def handle_add(self, options):
""" Create a new user """
username = options["username"]
passwd = options["passwd"]
if passwd is None:
passwd = User.objects.make_random_password()
user = User.objects.create_user(username, options["email"], passwd)
if options["staff"]:
user.is_staff = True
if options["superuser"]:
user.is_superuser = True
user.save()
if options["passwd"] is None:
self.stdout.write(passwd)
def handle_update(self, options): # pylint: disable=no-self-use
""" Update existing user"""
username = options["username"]
try:
user = User.objects.get(username=username)
except User.DoesNotExist:
raise CommandError("User %s does not exist" % username)
if options["email"]:
user.email = options["email"]
# False is an allowed value, but not None.
if options["active"] in [True, False]:
user.is_active = options["active"]
if options["staff"] in [True, False]:
user.is_staff = options["staff"]
if options["superuser"] in [True, False]:
user.is_superuser = options["superuser"]
user.save()
def handle_list(self, show_all, format_as_csv):
""" List users """
users = User.objects.all().order_by("username")
if not show_all:
users = users.exclude(is_active=False)
if format_as_csv:
fields = ["username", "fullname", "email", "staff", "superuser"]
writer = csv.DictWriter(self.stdout, fieldnames=fields)
writer.writeheader()
for user in users:
writer.writerow(
{
"username": user.username,
"email": user.email,
"staff": user.is_staff,
"superuser": user.is_superuser,
}
)
else:
self.stdout.write("List of users:")
for user in users:
out = "* %s" % user.username
if user.get_full_name():
out = "%s (%s)" % (out, user.get_full_name())
if not user.is_active:
out = "%s [inactive]" % out
self.stdout.write(out)
|
Linaro/squad
|
squad/core/management/commands/users.py
|
Command.handle_list
|
python
|
def handle_list(self, show_all, format_as_csv):
users = User.objects.all().order_by("username")
if not show_all:
users = users.exclude(is_active=False)
if format_as_csv:
fields = ["username", "fullname", "email", "staff", "superuser"]
writer = csv.DictWriter(self.stdout, fieldnames=fields)
writer.writeheader()
for user in users:
writer.writerow(
{
"username": user.username,
"email": user.email,
"staff": user.is_staff,
"superuser": user.is_superuser,
}
)
else:
self.stdout.write("List of users:")
for user in users:
out = "* %s" % user.username
if user.get_full_name():
out = "%s (%s)" % (out, user.get_full_name())
if not user.is_active:
out = "%s [inactive]" % out
self.stdout.write(out)
|
List users
|
train
|
https://github.com/Linaro/squad/blob/27da5375e119312a86f231df95f99c979b9f48f0/squad/core/management/commands/users.py#L208-L238
| null |
class Command(BaseCommand):
help = "Manage users"
def add_arguments(self, parser):
cmd = self
class SubParser(CommandParser):
"""
Sub-parsers constructor that mimic Django constructor.
See http://stackoverflow.com/a/37414551
"""
def __init__(self, **kwargs):
super().__init__(cmd, **kwargs)
sub = parser.add_subparsers(
dest="sub_command", help="Sub commands", parser_class=SubParser
)
sub.required = True
# "add" sub-command
add_parser = sub.add_parser("add", help="Add a user")
add_parser.add_argument("username", help="Username of the user")
add_parser.add_argument(
"--email", type=str, default=None, help="email of the user"
)
add_parser.add_argument(
"--passwd",
type=str,
default=None,
help="Password for this user. If empty, a random password is generated.",
)
add_parser.add_argument(
"--staff",
default=False,
action="store_true",
help="Make this user a staff member",
)
add_parser.add_argument(
"--superuser",
default=False,
action="store_true",
help="Make this user a super user",
)
# "update" sub-command
update_parser = sub.add_parser("update", help="Update an existing user")
update_parser.add_argument("username", help="Username of the user")
update_parser.add_argument(
"--email", type=str, default=None, help="Change email of the user"
)
active_parser = update_parser.add_mutually_exclusive_group(required=False)
active_parser.add_argument(
"--active",
dest="active",
action="store_const",
const=True, # not a boolean
help="Make this user active",
)
active_parser.add_argument(
"--not-active",
dest="active",
action="store_const",
const=False,
help="Make this user inactive",
)
active_parser.set_defaults(active=None) # tri-state - None, True, False
staff_parser = update_parser.add_mutually_exclusive_group(required=False)
staff_parser.add_argument(
"--staff",
dest="staff",
action="store_const",
const=True,
help="Make this user a staff member",
)
staff_parser.add_argument(
"--not-staff",
dest="staff",
action="store_const",
const=False,
help="Make this user no longer a staff member",
)
staff_parser.set_defaults(staff=None)
superuser_parser = update_parser.add_mutually_exclusive_group(required=False)
superuser_parser.add_argument(
"--superuser",
dest="superuser",
action="store_const",
const=True,
help="Make this user a superuser",
)
superuser_parser.add_argument(
"--not-superuser",
dest="superuser",
action="store_const",
const=False,
help="Make this user no longer a superuser",
)
superuser_parser.set_defaults(superuser=None)
# "details" sub-command
details_parser = sub.add_parser("details", help="User details")
details_parser.add_argument("username", help="Username of the user")
# "list" sub-command
list_parser = sub.add_parser("list", help="List users")
list_parser.add_argument(
"--all",
dest="all",
default=False,
action="store_true",
help="Show all users including inactive ones",
)
list_parser.add_argument(
"--csv", dest="csv", default=False, action="store_true", help="Print as csv"
)
def handle(self, *args, **options):
""" Forward to the right sub-handler """
if options["sub_command"] == "add":
self.handle_add(options)
elif options["sub_command"] == "update":
self.handle_update(options)
elif options["sub_command"] == "details":
self.handle_details(options["username"])
elif options["sub_command"] == "list":
self.handle_list(options["all"], options["csv"])
def handle_add(self, options):
""" Create a new user """
username = options["username"]
passwd = options["passwd"]
if passwd is None:
passwd = User.objects.make_random_password()
user = User.objects.create_user(username, options["email"], passwd)
if options["staff"]:
user.is_staff = True
if options["superuser"]:
user.is_superuser = True
user.save()
if options["passwd"] is None:
self.stdout.write(passwd)
def handle_update(self, options): # pylint: disable=no-self-use
""" Update existing user"""
username = options["username"]
try:
user = User.objects.get(username=username)
except User.DoesNotExist:
raise CommandError("User %s does not exist" % username)
if options["email"]:
user.email = options["email"]
# False is an allowed value, but not None.
if options["active"] in [True, False]:
user.is_active = options["active"]
if options["staff"] in [True, False]:
user.is_staff = options["staff"]
if options["superuser"] in [True, False]:
user.is_superuser = options["superuser"]
user.save()
def handle_details(self, username):
""" Print user details """
try:
user = User.objects.get(username=username)
except User.DoesNotExist:
raise CommandError("Unable to find user '%s'" % username)
self.stdout.write("username : %s" % username)
self.stdout.write("is_active : %s" % user.is_active)
self.stdout.write("is_staff : %s" % user.is_staff)
self.stdout.write("is_superuser: %s" % user.is_superuser)
groups = [g.name for g in user.groups.all().order_by("name")]
self.stdout.write("groups : [%s]" % ", ".join(groups))
|
Linaro/squad
|
squad/core/plugins.py
|
get_plugins_by_feature
|
python
|
def get_plugins_by_feature(features):
if not features:
return get_all_plugins()
plugins = PluginLoader.load_all().items()
names = set([f.__name__ for f in features])
return [e for e, plugin in plugins if names & set(plugin.__dict__.keys())]
|
Returns a list of plugin names where the plugins implement at least one of
the *features*. *features* must a list of Plugin methods, e.g.
[Plugin.postprocess_testrun, Plugin.postprocess_testjob]
|
train
|
https://github.com/Linaro/squad/blob/27da5375e119312a86f231df95f99c979b9f48f0/squad/core/plugins.py#L48-L58
|
[
"def get_all_plugins():\n plugins = PluginLoader.load_all()\n return plugins.keys()\n",
"def load_all(cls):\n if cls.__plugins__ is not None:\n return cls.__plugins__\n\n entry_points = []\n\n # builtin plugins\n for _, m, _ in iter_modules(['squad/plugins']):\n e = EntryPoint(m, 'squad.plugins.' + m, attrs=('Plugin',))\n entry_points.append(e)\n\n # external plugins\n plugins = iter_entry_points('squad_plugins')\n entry_points += list(plugins)\n\n cls.__plugins__ = {e.name: e.resolve() for e in entry_points}\n return cls.__plugins__\n"
] |
from django.db import models
from django.forms import MultipleChoiceField, ChoiceField, CheckboxSelectMultiple
from pkg_resources import EntryPoint, iter_entry_points
from pkgutil import iter_modules
class PluginNotFound(Exception):
pass
class PluginLoader(object):
__plugins__ = None
@classmethod
def load_all(cls):
if cls.__plugins__ is not None:
return cls.__plugins__
entry_points = []
# builtin plugins
for _, m, _ in iter_modules(['squad/plugins']):
e = EntryPoint(m, 'squad.plugins.' + m, attrs=('Plugin',))
entry_points.append(e)
# external plugins
plugins = iter_entry_points('squad_plugins')
entry_points += list(plugins)
cls.__plugins__ = {e.name: e.resolve() for e in entry_points}
return cls.__plugins__
def get_plugin_instance(name):
try:
plugin_class = PluginLoader.load_all()[name]
except KeyError:
raise PluginNotFound(name)
return plugin_class()
def get_all_plugins():
plugins = PluginLoader.load_all()
return plugins.keys()
def apply_plugins(plugin_names):
"""
This function should be used by code in the SQUAD core to trigger
functionality from plugins.
The ``plugin_names`` argument is list of plugins names to be used. Most
probably, you will want to pass the list of plugins enabled for a given
project, e.g. ``project.enabled_plugins``.
Example::
from squad.core.plugins import apply_plugins
# ...
for plugin in apply_plugins(project.enabled_plugins):
plugin.method(...)
"""
if plugin_names is None:
return
for p in plugin_names:
try:
plugin = get_plugin_instance(p)
yield(plugin)
except PluginNotFound:
pass
class Plugin(object):
"""
This class must be used as a superclass for all SQUAD plugins. All the
methods declared here have empty implementations (i.e. they do nothing),
and should be overriden in your plugin to provide extra functionality to
the SQUAD core.
"""
def postprocess_testrun(self, testrun):
"""
This method is called after a test run has been received by SQUAD, and
the test run data (tests, metrics, metadata, logs, etc) have been saved
to the database.
You can use this method to parse logs, do any special handling of
metadata, test results, etc.
The ``testrun`` arguments is an instance of
``squad.core.models.TestRun``.
"""
pass
def postprocess_testjob(self, testjob):
"""
This method is called after a test job has been fetched by SQUAD, and
the test run data (tests, metrics, metadata, logs, etc) have been saved
to the database.
You can use this method to do any processing that is specific to a
given CI backend (e.g. LAVA).
The ``testjob`` arguments is an instance of
``squad.ci.models.TestJob``.
"""
pass
def notify_patch_build_created(self, build):
"""
This method is called when a patch build is created. It should notify
the corresponding patch source that the checks are in progress.
The ``build`` argument is an instance of ``squad.core.Build``.
"""
pass
def notify_patch_build_finished(self, build):
"""
This method is called when a patch build is finished. It should notify
the patch source about the status of the tests (success, failure, etc).
The ``build`` argument is an instance of ``squad.core.Build``.
"""
pass
class PluginField(models.CharField):
def __init__(self, **args):
defaults = {'max_length': 256}
defaults.update(args)
self.features = defaults.pop('features', None)
return super(PluginField, self).__init__(**defaults)
def deconstruct(self):
name, path, args, kwargs = super(PluginField, self).deconstruct()
del kwargs["max_length"]
return name, path, args, kwargs
def formfield(self, **kwargs):
plugins = ((v, v) for v in get_plugins_by_feature(self.features))
return ChoiceField(choices=plugins)
class PluginListField(models.TextField):
def __init__(self, **args):
self.features = args.pop('features', None)
return super(PluginListField, self).__init__(**args)
def from_db_value(self, value, expression, connection, context):
if value is None:
return None
return [item.strip() for item in value.split(',')]
def to_python(self, value):
if isinstance(value, list):
return value
if value is None:
return None
return [item.strip() for item in value.split(',')]
def get_prep_value(self, value):
if value is None:
return value
return ', '.join(value)
def formfield(self, **kwargs):
plugins = ((v, v) for v in get_plugins_by_feature(self.features))
required = not self.null
return MultipleChoiceField(
required=required,
choices=plugins,
widget=CheckboxSelectMultiple,
)
|
Linaro/squad
|
squad/core/plugins.py
|
apply_plugins
|
python
|
def apply_plugins(plugin_names):
if plugin_names is None:
return
for p in plugin_names:
try:
plugin = get_plugin_instance(p)
yield(plugin)
except PluginNotFound:
pass
|
This function should be used by code in the SQUAD core to trigger
functionality from plugins.
The ``plugin_names`` argument is list of plugins names to be used. Most
probably, you will want to pass the list of plugins enabled for a given
project, e.g. ``project.enabled_plugins``.
Example::
from squad.core.plugins import apply_plugins
# ...
for plugin in apply_plugins(project.enabled_plugins):
plugin.method(...)
|
train
|
https://github.com/Linaro/squad/blob/27da5375e119312a86f231df95f99c979b9f48f0/squad/core/plugins.py#L61-L88
|
[
"def get_plugin_instance(name):\n try:\n plugin_class = PluginLoader.load_all()[name]\n except KeyError:\n raise PluginNotFound(name)\n return plugin_class()\n"
] |
from django.db import models
from django.forms import MultipleChoiceField, ChoiceField, CheckboxSelectMultiple
from pkg_resources import EntryPoint, iter_entry_points
from pkgutil import iter_modules
class PluginNotFound(Exception):
pass
class PluginLoader(object):
__plugins__ = None
@classmethod
def load_all(cls):
if cls.__plugins__ is not None:
return cls.__plugins__
entry_points = []
# builtin plugins
for _, m, _ in iter_modules(['squad/plugins']):
e = EntryPoint(m, 'squad.plugins.' + m, attrs=('Plugin',))
entry_points.append(e)
# external plugins
plugins = iter_entry_points('squad_plugins')
entry_points += list(plugins)
cls.__plugins__ = {e.name: e.resolve() for e in entry_points}
return cls.__plugins__
def get_plugin_instance(name):
try:
plugin_class = PluginLoader.load_all()[name]
except KeyError:
raise PluginNotFound(name)
return plugin_class()
def get_all_plugins():
plugins = PluginLoader.load_all()
return plugins.keys()
def get_plugins_by_feature(features):
"""
Returns a list of plugin names where the plugins implement at least one of
the *features*. *features* must a list of Plugin methods, e.g.
[Plugin.postprocess_testrun, Plugin.postprocess_testjob]
"""
if not features:
return get_all_plugins()
plugins = PluginLoader.load_all().items()
names = set([f.__name__ for f in features])
return [e for e, plugin in plugins if names & set(plugin.__dict__.keys())]
class Plugin(object):
"""
This class must be used as a superclass for all SQUAD plugins. All the
methods declared here have empty implementations (i.e. they do nothing),
and should be overriden in your plugin to provide extra functionality to
the SQUAD core.
"""
def postprocess_testrun(self, testrun):
"""
This method is called after a test run has been received by SQUAD, and
the test run data (tests, metrics, metadata, logs, etc) have been saved
to the database.
You can use this method to parse logs, do any special handling of
metadata, test results, etc.
The ``testrun`` arguments is an instance of
``squad.core.models.TestRun``.
"""
pass
def postprocess_testjob(self, testjob):
"""
This method is called after a test job has been fetched by SQUAD, and
the test run data (tests, metrics, metadata, logs, etc) have been saved
to the database.
You can use this method to do any processing that is specific to a
given CI backend (e.g. LAVA).
The ``testjob`` arguments is an instance of
``squad.ci.models.TestJob``.
"""
pass
def notify_patch_build_created(self, build):
"""
This method is called when a patch build is created. It should notify
the corresponding patch source that the checks are in progress.
The ``build`` argument is an instance of ``squad.core.Build``.
"""
pass
def notify_patch_build_finished(self, build):
"""
This method is called when a patch build is finished. It should notify
the patch source about the status of the tests (success, failure, etc).
The ``build`` argument is an instance of ``squad.core.Build``.
"""
pass
class PluginField(models.CharField):
def __init__(self, **args):
defaults = {'max_length': 256}
defaults.update(args)
self.features = defaults.pop('features', None)
return super(PluginField, self).__init__(**defaults)
def deconstruct(self):
name, path, args, kwargs = super(PluginField, self).deconstruct()
del kwargs["max_length"]
return name, path, args, kwargs
def formfield(self, **kwargs):
plugins = ((v, v) for v in get_plugins_by_feature(self.features))
return ChoiceField(choices=plugins)
class PluginListField(models.TextField):
def __init__(self, **args):
self.features = args.pop('features', None)
return super(PluginListField, self).__init__(**args)
def from_db_value(self, value, expression, connection, context):
if value is None:
return None
return [item.strip() for item in value.split(',')]
def to_python(self, value):
if isinstance(value, list):
return value
if value is None:
return None
return [item.strip() for item in value.split(',')]
def get_prep_value(self, value):
if value is None:
return value
return ', '.join(value)
def formfield(self, **kwargs):
plugins = ((v, v) for v in get_plugins_by_feature(self.features))
required = not self.null
return MultipleChoiceField(
required=required,
choices=plugins,
widget=CheckboxSelectMultiple,
)
|
Linaro/squad
|
squad/core/models.py
|
Build.metadata
|
python
|
def metadata(self):
if self.__metadata__ is None:
metadata = {}
for test_run in self.test_runs.defer(None).all():
for key, value in test_run.metadata.items():
metadata.setdefault(key, [])
if value not in metadata[key]:
metadata[key].append(value)
for key in metadata.keys():
if len(metadata[key]) == 1:
metadata[key] = metadata[key][0]
else:
metadata[key] = sorted(metadata[key], key=str)
self.__metadata__ = metadata
return self.__metadata__
|
The build metadata is the union of the metadata in its test runs.
Common keys with different values are transformed into a list with each
of the different values.
|
train
|
https://github.com/Linaro/squad/blob/27da5375e119312a86f231df95f99c979b9f48f0/squad/core/models.py#L352-L371
| null |
class Build(models.Model):
project = models.ForeignKey(Project, related_name='builds')
version = models.CharField(max_length=100)
created_at = models.DateTimeField(auto_now_add=True)
datetime = models.DateTimeField()
patch_source = models.ForeignKey(PatchSource, null=True, blank=True)
patch_baseline = models.ForeignKey('Build', null=True, blank=True)
patch_id = models.CharField(max_length=1024, null=True, blank=True)
keep_data = models.BooleanField(
default=False,
help_text="Keep this build data even after the project data retention period has passed"
)
class Meta:
unique_together = ('project', 'version',)
ordering = ['datetime']
def save(self, *args, **kwargs):
if not self.datetime:
self.datetime = timezone.now()
with transaction.atomic():
super(Build, self).save(*args, **kwargs)
ProjectStatus.objects.get_or_create(build=self)
def __str__(self):
return '%s (%s)' % (self.version, self.datetime)
def prefetch(self, *related):
prefetch_related_objects([self], *related)
@property
def test_summary(self):
return TestSummary(self)
__metadata__ = None
@property
@property
def important_metadata(self):
wanted = (self.project.important_metadata_keys or '').splitlines()
m = self.metadata
if len(wanted):
return {k: m[k] for k in wanted if k in m}
else:
return self.metadata
@property
def has_extra_metadata(self):
if set(self.important_metadata.keys()) == set(self.metadata.keys()):
return False
return True
@property
def finished(self):
"""
A finished build is a build that satisfies one of the following conditions:
* it has no pending CI test jobs.
* it has no submitted CI test jobs, and has at least N test runs for each of
the project environments, where N is configured in
Environment.expected_test_runs. Environment.expected_test_runs is
interpreted as follows:
* None (empty): there must be at least one test run for that
environment.
* 0: the environment is ignored, i.e. any amount of test runs will
be ok, including 0.
* N > 0: at least N test runs are expected for that environment
"""
reasons = []
# XXX note that by using test_jobs here, we are adding an implicit
# dependency on squad.ci, what in theory violates our architecture.
testjobs = self.test_jobs
if testjobs.count() > 0:
if testjobs.filter(fetched=False).count() > 0:
# a build that has pending CI jobs is NOT finished
reasons.append("There are unfinished CI jobs")
else:
# carry on, and check whether the number of expected test runs
# per environment is satisfied.
pass
# builds with no CI jobs are finished when each environment has
# received the expected amount of test runs
testruns = {
e.id: {
'name': str(e),
'expected': e.expected_test_runs,
'received': 0
}
for e in self.project.environments.all()
}
for t in self.test_runs.filter(completed=True).all():
testruns[t.environment_id]['received'] += 1
for env, count in testruns.items():
expected = count['expected']
received = count['received']
env_name = count['name']
if expected == 0:
continue
if received == 0:
reasons.append("No test runs for %s received so far" % env_name)
if expected and received < expected:
reasons.append(
"%d test runs expected for %s, but only %d received so far" % (
expected,
env_name,
received,
)
)
return (len(reasons) == 0, reasons)
@property
def test_suites_by_environment(self):
test_runs = self.test_runs.prefetch_related(
'tests',
'tests__suite',
'environment',
)
result = OrderedDict()
envlist = set([t.environment for t in test_runs])
for env in sorted(envlist, key=lambda env: env.slug):
result[env] = dict()
for tr in test_runs:
for t in tr.tests.all():
if t.suite in result[tr.environment].keys():
result[tr.environment][t.suite][t.status] += 1
else:
result[tr.environment].setdefault(t.suite, {'fail': 0, 'pass': 0, 'skip': 0, 'xfail': 0})
result[tr.environment][t.suite][t.status] += 1
for env in result.keys():
# there should only be one key in the most nested dict
result[env] = sorted(
result[env].items(),
key=lambda suite_dict: suite_dict[0].slug)
return result
|
Linaro/squad
|
squad/core/models.py
|
Build.finished
|
python
|
def finished(self):
reasons = []
# XXX note that by using test_jobs here, we are adding an implicit
# dependency on squad.ci, what in theory violates our architecture.
testjobs = self.test_jobs
if testjobs.count() > 0:
if testjobs.filter(fetched=False).count() > 0:
# a build that has pending CI jobs is NOT finished
reasons.append("There are unfinished CI jobs")
else:
# carry on, and check whether the number of expected test runs
# per environment is satisfied.
pass
# builds with no CI jobs are finished when each environment has
# received the expected amount of test runs
testruns = {
e.id: {
'name': str(e),
'expected': e.expected_test_runs,
'received': 0
}
for e in self.project.environments.all()
}
for t in self.test_runs.filter(completed=True).all():
testruns[t.environment_id]['received'] += 1
for env, count in testruns.items():
expected = count['expected']
received = count['received']
env_name = count['name']
if expected == 0:
continue
if received == 0:
reasons.append("No test runs for %s received so far" % env_name)
if expected and received < expected:
reasons.append(
"%d test runs expected for %s, but only %d received so far" % (
expected,
env_name,
received,
)
)
return (len(reasons) == 0, reasons)
|
A finished build is a build that satisfies one of the following conditions:
* it has no pending CI test jobs.
* it has no submitted CI test jobs, and has at least N test runs for each of
the project environments, where N is configured in
Environment.expected_test_runs. Environment.expected_test_runs is
interpreted as follows:
* None (empty): there must be at least one test run for that
environment.
* 0: the environment is ignored, i.e. any amount of test runs will
be ok, including 0.
* N > 0: at least N test runs are expected for that environment
|
train
|
https://github.com/Linaro/squad/blob/27da5375e119312a86f231df95f99c979b9f48f0/squad/core/models.py#L389-L450
| null |
class Build(models.Model):
project = models.ForeignKey(Project, related_name='builds')
version = models.CharField(max_length=100)
created_at = models.DateTimeField(auto_now_add=True)
datetime = models.DateTimeField()
patch_source = models.ForeignKey(PatchSource, null=True, blank=True)
patch_baseline = models.ForeignKey('Build', null=True, blank=True)
patch_id = models.CharField(max_length=1024, null=True, blank=True)
keep_data = models.BooleanField(
default=False,
help_text="Keep this build data even after the project data retention period has passed"
)
class Meta:
unique_together = ('project', 'version',)
ordering = ['datetime']
def save(self, *args, **kwargs):
if not self.datetime:
self.datetime = timezone.now()
with transaction.atomic():
super(Build, self).save(*args, **kwargs)
ProjectStatus.objects.get_or_create(build=self)
def __str__(self):
return '%s (%s)' % (self.version, self.datetime)
def prefetch(self, *related):
prefetch_related_objects([self], *related)
@property
def test_summary(self):
return TestSummary(self)
__metadata__ = None
@property
def metadata(self):
"""
The build metadata is the union of the metadata in its test runs.
Common keys with different values are transformed into a list with each
of the different values.
"""
if self.__metadata__ is None:
metadata = {}
for test_run in self.test_runs.defer(None).all():
for key, value in test_run.metadata.items():
metadata.setdefault(key, [])
if value not in metadata[key]:
metadata[key].append(value)
for key in metadata.keys():
if len(metadata[key]) == 1:
metadata[key] = metadata[key][0]
else:
metadata[key] = sorted(metadata[key], key=str)
self.__metadata__ = metadata
return self.__metadata__
@property
def important_metadata(self):
wanted = (self.project.important_metadata_keys or '').splitlines()
m = self.metadata
if len(wanted):
return {k: m[k] for k in wanted if k in m}
else:
return self.metadata
@property
def has_extra_metadata(self):
if set(self.important_metadata.keys()) == set(self.metadata.keys()):
return False
return True
@property
@property
def test_suites_by_environment(self):
test_runs = self.test_runs.prefetch_related(
'tests',
'tests__suite',
'environment',
)
result = OrderedDict()
envlist = set([t.environment for t in test_runs])
for env in sorted(envlist, key=lambda env: env.slug):
result[env] = dict()
for tr in test_runs:
for t in tr.tests.all():
if t.suite in result[tr.environment].keys():
result[tr.environment][t.suite][t.status] += 1
else:
result[tr.environment].setdefault(t.suite, {'fail': 0, 'pass': 0, 'skip': 0, 'xfail': 0})
result[tr.environment][t.suite][t.status] += 1
for env in result.keys():
# there should only be one key in the most nested dict
result[env] = sorted(
result[env].items(),
key=lambda suite_dict: suite_dict[0].slug)
return result
|
Linaro/squad
|
squad/core/models.py
|
ProjectStatus.create_or_update
|
python
|
def create_or_update(cls, build):
test_summary = build.test_summary
metrics_summary = MetricsSummary(build)
now = timezone.now()
test_runs_total = build.test_runs.count()
test_runs_completed = build.test_runs.filter(completed=True).count()
test_runs_incomplete = build.test_runs.filter(completed=False).count()
regressions = None
fixes = None
previous_build = Build.objects.filter(
status__finished=True,
datetime__lt=build.datetime,
project=build.project,
).order_by('datetime').last()
if previous_build is not None:
comparison = TestComparison(previous_build, build)
if comparison.regressions:
regressions = yaml.dump(comparison.regressions)
if comparison.fixes:
fixes = yaml.dump(comparison.fixes)
finished, _ = build.finished
data = {
'tests_pass': test_summary.tests_pass,
'tests_fail': test_summary.tests_fail,
'tests_xfail': test_summary.tests_xfail,
'tests_skip': test_summary.tests_skip,
'metrics_summary': metrics_summary.value,
'has_metrics': metrics_summary.has_metrics,
'last_updated': now,
'finished': finished,
'test_runs_total': test_runs_total,
'test_runs_completed': test_runs_completed,
'test_runs_incomplete': test_runs_incomplete,
'regressions': regressions,
'fixes': fixes
}
status, created = cls.objects.get_or_create(build=build, defaults=data)
if not created and test_summary.tests_total >= status.tests_total:
# XXX the test above for the new total number of tests prevents
# results that arrived earlier, but are only being processed now,
# from overwriting a ProjectStatus created by results that arrived
# later but were already processed.
status.tests_pass = test_summary.tests_pass
status.tests_fail = test_summary.tests_fail
status.tests_xfail = test_summary.tests_xfail
status.tests_skip = test_summary.tests_skip
status.metrics_summary = metrics_summary.value
status.has_metrics = metrics_summary.has_metrics
status.last_updated = now
finished, _ = build.finished
status.finished = finished
status.build = build
status.test_runs_total = test_runs_total
status.test_runs_completed = test_runs_completed
status.test_runs_incomplete = test_runs_incomplete
status.regressions = regressions
status.fixes = fixes
status.save()
return status
|
Creates (or updates) a new ProjectStatus for the given build and
returns it.
|
train
|
https://github.com/Linaro/squad/blob/27da5375e119312a86f231df95f99c979b9f48f0/squad/core/models.py#L917-L983
| null |
class ProjectStatus(models.Model, TestSummaryBase):
"""
Represents a "checkpoint" of a project status in time. It is used by the
notification system to know what was the project status at the time of the
last notification.
"""
build = models.OneToOneField('Build', related_name='status')
created_at = models.DateTimeField(auto_now_add=True)
last_updated = models.DateTimeField(null=True)
finished = models.BooleanField(default=False)
notified = models.BooleanField(default=False)
notified_on_timeout = models.BooleanField(default=False)
approved = models.BooleanField(default=False)
metrics_summary = models.FloatField(null=True)
has_metrics = models.BooleanField(default=False)
tests_pass = models.IntegerField(default=0)
tests_fail = models.IntegerField(default=0)
tests_xfail = models.IntegerField(default=0)
tests_skip = models.IntegerField(default=0)
test_runs_total = models.IntegerField(default=0)
test_runs_completed = models.IntegerField(default=0)
test_runs_incomplete = models.IntegerField(default=0)
regressions = models.TextField(
null=True,
blank=True,
validators=[yaml_validator]
)
fixes = models.TextField(
null=True,
blank=True,
validators=[yaml_validator]
)
class Meta:
verbose_name_plural = "Project statuses"
@classmethod
def __str__(self):
return "%s, build %s" % (self.build.project, self.build.version)
def get_previous(self):
return ProjectStatus.objects.filter(
finished=True,
build__datetime__lt=self.build.datetime,
build__project=self.build.project,
).order_by('build__datetime').last()
def __get_yaml_field__(self, field_value):
if field_value is not None:
return yaml.load(field_value, Loader=yaml.Loader)
return {}
def get_regressions(self):
return self.__get_yaml_field__(self.regressions)
def get_fixes(self):
return self.__get_yaml_field__(self.fixes)
def get_exceeded_thresholds(self):
# Return a list of all (threshold, metric) objects for those
# thresholds that were exceeded by corresponding metrics.
thresholds_exceeded = []
if self.has_metrics:
test_runs = self.build.test_runs.all()
suites = Suite.objects.filter(test__test_run__build=self.build)
for metric in Metric.objects.filter(
Q(test_run__in=test_runs) | Q(suite__in=suites),
name__in=self.build.project.metricthreshold_set.values_list('name', flat=True)):
for threshold in self.build.project.metricthreshold_set.all():
if threshold.is_higher_better:
if metric.result < threshold.value:
thresholds_exceeded.append((threshold, metric))
else:
if metric.result > threshold.value:
thresholds_exceeded.append((threshold, metric))
return thresholds_exceeded
|
Linaro/squad
|
squad/api/rest.py
|
ModelViewSet.get_project_ids
|
python
|
def get_project_ids(self):
user = self.request.user
projects = Project.objects.accessible_to(user).values('id')
return [p['id'] for p in projects]
|
Determines which projects the current user is allowed to visualize.
Returns a list of project ids to be used in get_queryset() for
filtering.
|
train
|
https://github.com/Linaro/squad/blob/27da5375e119312a86f231df95f99c979b9f48f0/squad/api/rest.py#L200-L208
| null |
class ModelViewSet(viewsets.ModelViewSet):
|
Linaro/squad
|
squad/api/rest.py
|
ProjectViewSet.builds
|
python
|
def builds(self, request, pk=None):
builds = self.get_object().builds.prefetch_related('test_runs').order_by('-datetime')
page = self.paginate_queryset(builds)
serializer = BuildSerializer(page, many=True, context={'request': request})
return self.get_paginated_response(serializer.data)
|
List of builds for the current project.
|
train
|
https://github.com/Linaro/squad/blob/27da5375e119312a86f231df95f99c979b9f48f0/squad/api/rest.py#L356-L363
| null |
class ProjectViewSet(viewsets.ModelViewSet):
"""
List of projects. Includes public projects and projects that the current
user has access to.
"""
queryset = Project.objects
serializer_class = ProjectSerializer
filter_fields = ('group',
'slug',
'name',
'is_public',
'html_mail',
'custom_email_template',
'moderate_notifications')
filter_backends = (ComplexFilterBackend, )
filter_class = ProjectFilter
search_fields = ('slug',
'name',)
ordering_fields = ('slug',
'name',)
def get_queryset(self):
return self.queryset.accessible_to(self.request.user)
@detail_route(methods=['get'], suffix='builds')
@detail_route(methods=['get'], suffix='suites')
def suites(self, request, pk=None):
"""
List of test suite names available in this project
"""
suites_names = self.get_object().suites.values_list('slug')
suites_metadata = SuiteMetadata.objects.filter(kind='suite', suite__in=suites_names)
page = self.paginate_queryset(suites_metadata)
serializer = SuiteMetadataSerializer(page, many=True, context={'request': request})
return self.get_paginated_response(serializer.data)
@detail_route(methods=['get'], suffix='tests')
def tests(self, request, pk=None):
"""
List of test names available in this project
"""
suite_name = request.query_params.get("suite_name", None)
if suite_name is None:
suites_names = self.get_object().suites.values_list('slug')
else:
suites_names = [suite_name]
suites_metadata = SuiteMetadata.objects.filter(kind='test', suite__in=suites_names)
page = self.paginate_queryset(suites_metadata)
serializer = SuiteMetadataSerializer(page, many=True, context={'request': request})
return self.get_paginated_response(serializer.data)
@detail_route(methods=['get'], suffix='test_results')
def test_results(self, request, pk=None):
test_name = request.query_params.get("test_name", None)
builds = self.get_object().builds.prefetch_related('test_runs').order_by('-datetime')
page = self.paginate_queryset(builds)
serializer = LatestTestResultsSerializer(
page,
many=True,
context={'request': request, 'test_name': test_name}
)
return Response(serializer.data)
|
Linaro/squad
|
squad/api/rest.py
|
ProjectViewSet.suites
|
python
|
def suites(self, request, pk=None):
suites_names = self.get_object().suites.values_list('slug')
suites_metadata = SuiteMetadata.objects.filter(kind='suite', suite__in=suites_names)
page = self.paginate_queryset(suites_metadata)
serializer = SuiteMetadataSerializer(page, many=True, context={'request': request})
return self.get_paginated_response(serializer.data)
|
List of test suite names available in this project
|
train
|
https://github.com/Linaro/squad/blob/27da5375e119312a86f231df95f99c979b9f48f0/squad/api/rest.py#L366-L374
| null |
class ProjectViewSet(viewsets.ModelViewSet):
"""
List of projects. Includes public projects and projects that the current
user has access to.
"""
queryset = Project.objects
serializer_class = ProjectSerializer
filter_fields = ('group',
'slug',
'name',
'is_public',
'html_mail',
'custom_email_template',
'moderate_notifications')
filter_backends = (ComplexFilterBackend, )
filter_class = ProjectFilter
search_fields = ('slug',
'name',)
ordering_fields = ('slug',
'name',)
def get_queryset(self):
return self.queryset.accessible_to(self.request.user)
@detail_route(methods=['get'], suffix='builds')
def builds(self, request, pk=None):
"""
List of builds for the current project.
"""
builds = self.get_object().builds.prefetch_related('test_runs').order_by('-datetime')
page = self.paginate_queryset(builds)
serializer = BuildSerializer(page, many=True, context={'request': request})
return self.get_paginated_response(serializer.data)
@detail_route(methods=['get'], suffix='suites')
@detail_route(methods=['get'], suffix='tests')
def tests(self, request, pk=None):
"""
List of test names available in this project
"""
suite_name = request.query_params.get("suite_name", None)
if suite_name is None:
suites_names = self.get_object().suites.values_list('slug')
else:
suites_names = [suite_name]
suites_metadata = SuiteMetadata.objects.filter(kind='test', suite__in=suites_names)
page = self.paginate_queryset(suites_metadata)
serializer = SuiteMetadataSerializer(page, many=True, context={'request': request})
return self.get_paginated_response(serializer.data)
@detail_route(methods=['get'], suffix='test_results')
def test_results(self, request, pk=None):
test_name = request.query_params.get("test_name", None)
builds = self.get_object().builds.prefetch_related('test_runs').order_by('-datetime')
page = self.paginate_queryset(builds)
serializer = LatestTestResultsSerializer(
page,
many=True,
context={'request': request, 'test_name': test_name}
)
return Response(serializer.data)
|
Linaro/squad
|
squad/api/rest.py
|
BuildViewSet.email
|
python
|
def email(self, request, pk=None):
force = request.query_params.get("force", False)
delayed_report, created = self.__return_delayed_report(request)
if created or force:
delayed_report = prepare_report(delayed_report.pk)
if delayed_report.status_code != status.HTTP_200_OK:
return Response(yaml.safe_load(delayed_report.error_message or ''), delayed_report.status_code)
if delayed_report.output_format == "text/html" and delayed_report.output_html:
return HttpResponse(delayed_report.output_html, content_type=delayed_report.output_format)
return HttpResponse(delayed_report.output_text, content_type=delayed_report.output_format)
|
This method produces the body of email notification for the build.
By default it uses the project settings for HTML and template.
These settings can be overwritten by using GET parameters:
* output - sets the output format (text/plan, text/html)
* template - sets the template used (id of existing template or
"default" for default SQUAD templates)
* force - force email report re-generation even if there is
existing one cached
|
train
|
https://github.com/Linaro/squad/blob/27da5375e119312a86f231df95f99c979b9f48f0/squad/api/rest.py#L620-L639
|
[
"def __return_delayed_report(self, request):\n output_format = request.query_params.get(\"output\", \"text/plain\")\n template_id = request.query_params.get(\"template\", None)\n baseline_id = request.query_params.get(\"baseline\", None)\n email_recipient = request.query_params.get(\"email_recipient\", None)\n callback = request.query_params.get(\"callback\", None)\n callback_token = request.query_params.get(\"callback_token\", None)\n # keep the cached reports for 1 day by default\n data_retention_days = request.query_params.get(\"keep\", 1)\n if request.method == \"POST\":\n output_format = request.data.get(\"output\", \"text/plain\")\n template_id = request.data.get(\"template\", None)\n baseline_id = request.data.get(\"baseline\", None)\n email_recipient = request.data.get(\"email_recipient\", None)\n callback = request.data.get(\"callback\", None)\n callback_token = request.data.get(\"callback_token\", None)\n # keep the cached reports for 1 day by default\n data_retention_days = request.data.get(\"keep\", 1)\n\n template = None\n if template_id != \"default\":\n template = self.get_object().project.custom_email_template\n if template_id is not None:\n try:\n template = EmailTemplate.objects.get(pk=template_id)\n except EmailTemplate.DoesNotExist:\n pass\n\n baseline = None\n\n report_kwargs = {\n \"baseline\": baseline,\n \"template\": template,\n \"output_format\": output_format,\n \"email_recipient\": email_recipient,\n \"callback\": callback,\n \"callback_token\": callback_token,\n \"data_retention_days\": data_retention_days\n }\n if baseline_id is not None:\n try:\n previous_build = Build.objects.get(pk=baseline_id)\n report_kwargs[\"baseline\"] = previous_build.status\n except Build.DoesNotExist:\n data = {\n \"message\": \"Baseline build %s does not exist\" % baseline_id\n }\n report_kwargs.update({\"build\": self.get_object()})\n # return created=False to avoid running prepare_report\n return update_delayed_report(None, data, status.HTTP_400_BAD_REQUEST, **report_kwargs), False\n except ProjectStatus.DoesNotExist:\n data = {\n \"message\": \"Build %s has no status\" % baseline_id\n }\n report_kwargs.update({\"build\": self.get_object()})\n # return created=False to avoid running prepare_report\n return update_delayed_report(None, data, status.HTTP_400_BAD_REQUEST, **report_kwargs), False\n try:\n delayed_report, created = self.get_object().delayed_reports.get_or_create(**report_kwargs)\n except MultipleObjectsReturned:\n delayed_report = self.get_object().delayed_reports.all()[0] # return first available object\n created = False\n\n return delayed_report, created\n"
] |
class BuildViewSet(ModelViewSet):
"""
List of all builds in the system. Only builds belonging to public projects
and to projects you have access to are available.
"""
queryset = Build.objects.prefetch_related('status', 'test_runs').order_by('-datetime').all()
serializer_class = BuildSerializer
filter_fields = ('version', 'project')
filter_class = BuildFilter
search_fields = ('version',)
ordering_fields = ('id', 'version', 'created_at', 'datetime')
def get_queryset(self):
return self.queryset.filter(project__in=self.get_project_ids())
@detail_route(methods=['get'], suffix='metadata')
def metadata(self, request, pk=None):
build = self.get_object()
return Response(build.metadata)
@detail_route(methods=['get'], suffix='status')
def status(self, request, pk=None):
try:
status = self.get_object().status
serializer = ProjectStatusSerializer(status, many=False, context={'request': request})
return Response(serializer.data)
except ProjectStatus.DoesNotExist:
raise NotFound()
@detail_route(methods=['get'], suffix='test runs')
def testruns(self, request, pk=None):
testruns = self.get_object().test_runs.order_by('-id')
page = self.paginate_queryset(testruns)
serializer = TestRunSerializer(page, many=True, context={'request': request})
return self.get_paginated_response(serializer.data)
@detail_route(methods=['get'], suffix='test jobs')
def testjobs(self, request, pk=None):
testjobs = self.get_object().test_jobs.order_by('-id')
page = self.paginate_queryset(testjobs)
serializer = TestJobSerializer(page, many=True, context={'request': request})
return self.get_paginated_response(serializer.data)
def __return_delayed_report(self, request):
output_format = request.query_params.get("output", "text/plain")
template_id = request.query_params.get("template", None)
baseline_id = request.query_params.get("baseline", None)
email_recipient = request.query_params.get("email_recipient", None)
callback = request.query_params.get("callback", None)
callback_token = request.query_params.get("callback_token", None)
# keep the cached reports for 1 day by default
data_retention_days = request.query_params.get("keep", 1)
if request.method == "POST":
output_format = request.data.get("output", "text/plain")
template_id = request.data.get("template", None)
baseline_id = request.data.get("baseline", None)
email_recipient = request.data.get("email_recipient", None)
callback = request.data.get("callback", None)
callback_token = request.data.get("callback_token", None)
# keep the cached reports for 1 day by default
data_retention_days = request.data.get("keep", 1)
template = None
if template_id != "default":
template = self.get_object().project.custom_email_template
if template_id is not None:
try:
template = EmailTemplate.objects.get(pk=template_id)
except EmailTemplate.DoesNotExist:
pass
baseline = None
report_kwargs = {
"baseline": baseline,
"template": template,
"output_format": output_format,
"email_recipient": email_recipient,
"callback": callback,
"callback_token": callback_token,
"data_retention_days": data_retention_days
}
if baseline_id is not None:
try:
previous_build = Build.objects.get(pk=baseline_id)
report_kwargs["baseline"] = previous_build.status
except Build.DoesNotExist:
data = {
"message": "Baseline build %s does not exist" % baseline_id
}
report_kwargs.update({"build": self.get_object()})
# return created=False to avoid running prepare_report
return update_delayed_report(None, data, status.HTTP_400_BAD_REQUEST, **report_kwargs), False
except ProjectStatus.DoesNotExist:
data = {
"message": "Build %s has no status" % baseline_id
}
report_kwargs.update({"build": self.get_object()})
# return created=False to avoid running prepare_report
return update_delayed_report(None, data, status.HTTP_400_BAD_REQUEST, **report_kwargs), False
try:
delayed_report, created = self.get_object().delayed_reports.get_or_create(**report_kwargs)
except MultipleObjectsReturned:
delayed_report = self.get_object().delayed_reports.all()[0] # return first available object
created = False
return delayed_report, created
@detail_route(methods=['get'], suffix='email')
@detail_route(methods=['get', 'post'], suffix='report', permission_classes=[AllowAny])
def report(self, request, pk=None):
force = request.query_params.get("force", False)
delayed_report, created = self.__return_delayed_report(request)
if created or force:
prepare_report.delay(delayed_report.pk)
data = {"message": "OK", "url": rest_reverse('delayedreport-detail', args=[delayed_report.pk], request=request)}
return Response(data, status=status.HTTP_202_ACCEPTED)
|
ska-sa/spead2
|
spead2/send/__init__.py
|
HeapGenerator.add_to_heap
|
python
|
def add_to_heap(self, heap, descriptors='stale', data='stale'):
if descriptors not in ['stale', 'all', 'none']:
raise ValueError("descriptors must be one of 'stale', 'all', 'none'")
if data not in ['stale', 'all', 'none']:
raise ValueError("data must be one of 'stale', 'all', 'none'")
for item in self._item_group.values():
info = self._get_info(item)
if (descriptors == 'all') or (descriptors == 'stale'
and self._descriptor_stale(item, info)):
heap.add_descriptor(item)
info.descriptor_cnt = self._descriptor_cnt
if item.value is not None:
if (data == 'all') or (data == 'stale' and info.version != item.version):
heap.add_item(item)
info.version = item.version
self._descriptor_cnt += 1
return heap
|
Update a heap to contains all the new items and item descriptors
since the last call.
Parameters
----------
heap : :py:class:`Heap`
The heap to update.
descriptors : {'stale', 'all', 'none'}
Which descriptors to send. The default ('stale') sends only
descriptors that have not been sent, or have not been sent recently
enough according to the `descriptor_frequency` passed to the
constructor. The other options are to send all the descriptors or
none of them. Sending all descriptors is useful if a new receiver
is added which will be out of date.
data : {'stale', 'all', 'none'}
Which data items to send.
item_group : :py:class:`ItemGroup`, optional
If specified, uses the items from this item group instead of the
one passed to the constructor (which could be `None`).
Raises
------
ValueError
if `descriptors` or `data` is not one of the legal values
|
train
|
https://github.com/ska-sa/spead2/blob/cac95fd01d8debaa302d2691bd26da64b7828bc6/spead2/send/__init__.py#L83-L124
|
[
"def _get_info(self, item):\n if item.id not in self._info:\n self._info[item.id] = _ItemInfo(item)\n return self._info[item.id]\n",
"def _descriptor_stale(self, item, info):\n if info.descriptor_cnt is None:\n # Never been sent before\n return True\n if self._descriptor_frequency is not None \\\n and self._descriptor_cnt - info.descriptor_cnt >= self._descriptor_frequency:\n # This descriptor is due for a resend\n return True\n # Check for complete replacement of the item\n orig_item = info.item()\n if orig_item is not item:\n info.version = None\n info.item = weakref.ref(item)\n return True\n return False\n"
] |
class HeapGenerator(object):
"""Tracks which items and item values have previously been sent and
generates delta heaps.
Parameters
----------
item_group : :py:class:`spead2.ItemGroup`
Item group to monitor.
descriptor_frequency : int, optional
If specified, descriptors will be re-sent once every `descriptor_frequency` heaps
generated by this method.
flavour : :py:class:`spead2.Flavour`
The SPEAD protocol flavour used for heaps generated by :py:meth:`get_heap` and
:py:meth:`get_end`.
"""
def __init__(self, item_group, descriptor_frequency=None, flavour=_spead2.Flavour()):
self._item_group = item_group
self._info = {} # Maps ID to _ItemInfo
self._descriptor_frequency = descriptor_frequency
# Counter for calls to add_to_heap. This is independent of the
# protocol-level heap count.
self._descriptor_cnt = 0
self._flavour = flavour
def _get_info(self, item):
if item.id not in self._info:
self._info[item.id] = _ItemInfo(item)
return self._info[item.id]
def _descriptor_stale(self, item, info):
if info.descriptor_cnt is None:
# Never been sent before
return True
if self._descriptor_frequency is not None \
and self._descriptor_cnt - info.descriptor_cnt >= self._descriptor_frequency:
# This descriptor is due for a resend
return True
# Check for complete replacement of the item
orig_item = info.item()
if orig_item is not item:
info.version = None
info.item = weakref.ref(item)
return True
return False
def get_heap(self, *args, **kwargs):
"""Return a new heap which contains all the new items and item
descriptors since the last call. This is a convenience wrapper
around :meth:`add_to_heap`.
"""
heap = Heap(self._flavour)
self.add_to_heap(heap, *args, **kwargs)
return heap
def get_start(self):
"""Return a heap that contains only a start-of-stream marker.
"""
heap = Heap(self._flavour)
heap.add_start()
return heap
def get_end(self):
"""Return a heap that contains only an end-of-stream marker.
"""
heap = Heap(self._flavour)
heap.add_end()
return heap
|
ska-sa/spead2
|
spead2/send/__init__.py
|
HeapGenerator.get_heap
|
python
|
def get_heap(self, *args, **kwargs):
heap = Heap(self._flavour)
self.add_to_heap(heap, *args, **kwargs)
return heap
|
Return a new heap which contains all the new items and item
descriptors since the last call. This is a convenience wrapper
around :meth:`add_to_heap`.
|
train
|
https://github.com/ska-sa/spead2/blob/cac95fd01d8debaa302d2691bd26da64b7828bc6/spead2/send/__init__.py#L126-L133
|
[
"def add_to_heap(self, heap, descriptors='stale', data='stale'):\n \"\"\"Update a heap to contains all the new items and item descriptors\n since the last call.\n\n Parameters\n ----------\n heap : :py:class:`Heap`\n The heap to update.\n descriptors : {'stale', 'all', 'none'}\n Which descriptors to send. The default ('stale') sends only\n descriptors that have not been sent, or have not been sent recently\n enough according to the `descriptor_frequency` passed to the\n constructor. The other options are to send all the descriptors or\n none of them. Sending all descriptors is useful if a new receiver\n is added which will be out of date.\n data : {'stale', 'all', 'none'}\n Which data items to send.\n item_group : :py:class:`ItemGroup`, optional\n If specified, uses the items from this item group instead of the\n one passed to the constructor (which could be `None`).\n\n Raises\n ------\n ValueError\n if `descriptors` or `data` is not one of the legal values\n \"\"\"\n if descriptors not in ['stale', 'all', 'none']:\n raise ValueError(\"descriptors must be one of 'stale', 'all', 'none'\")\n if data not in ['stale', 'all', 'none']:\n raise ValueError(\"data must be one of 'stale', 'all', 'none'\")\n for item in self._item_group.values():\n info = self._get_info(item)\n if (descriptors == 'all') or (descriptors == 'stale'\n and self._descriptor_stale(item, info)):\n heap.add_descriptor(item)\n info.descriptor_cnt = self._descriptor_cnt\n if item.value is not None:\n if (data == 'all') or (data == 'stale' and info.version != item.version):\n heap.add_item(item)\n info.version = item.version\n self._descriptor_cnt += 1\n return heap\n"
] |
class HeapGenerator(object):
"""Tracks which items and item values have previously been sent and
generates delta heaps.
Parameters
----------
item_group : :py:class:`spead2.ItemGroup`
Item group to monitor.
descriptor_frequency : int, optional
If specified, descriptors will be re-sent once every `descriptor_frequency` heaps
generated by this method.
flavour : :py:class:`spead2.Flavour`
The SPEAD protocol flavour used for heaps generated by :py:meth:`get_heap` and
:py:meth:`get_end`.
"""
def __init__(self, item_group, descriptor_frequency=None, flavour=_spead2.Flavour()):
self._item_group = item_group
self._info = {} # Maps ID to _ItemInfo
self._descriptor_frequency = descriptor_frequency
# Counter for calls to add_to_heap. This is independent of the
# protocol-level heap count.
self._descriptor_cnt = 0
self._flavour = flavour
def _get_info(self, item):
if item.id not in self._info:
self._info[item.id] = _ItemInfo(item)
return self._info[item.id]
def _descriptor_stale(self, item, info):
if info.descriptor_cnt is None:
# Never been sent before
return True
if self._descriptor_frequency is not None \
and self._descriptor_cnt - info.descriptor_cnt >= self._descriptor_frequency:
# This descriptor is due for a resend
return True
# Check for complete replacement of the item
orig_item = info.item()
if orig_item is not item:
info.version = None
info.item = weakref.ref(item)
return True
return False
def add_to_heap(self, heap, descriptors='stale', data='stale'):
"""Update a heap to contains all the new items and item descriptors
since the last call.
Parameters
----------
heap : :py:class:`Heap`
The heap to update.
descriptors : {'stale', 'all', 'none'}
Which descriptors to send. The default ('stale') sends only
descriptors that have not been sent, or have not been sent recently
enough according to the `descriptor_frequency` passed to the
constructor. The other options are to send all the descriptors or
none of them. Sending all descriptors is useful if a new receiver
is added which will be out of date.
data : {'stale', 'all', 'none'}
Which data items to send.
item_group : :py:class:`ItemGroup`, optional
If specified, uses the items from this item group instead of the
one passed to the constructor (which could be `None`).
Raises
------
ValueError
if `descriptors` or `data` is not one of the legal values
"""
if descriptors not in ['stale', 'all', 'none']:
raise ValueError("descriptors must be one of 'stale', 'all', 'none'")
if data not in ['stale', 'all', 'none']:
raise ValueError("data must be one of 'stale', 'all', 'none'")
for item in self._item_group.values():
info = self._get_info(item)
if (descriptors == 'all') or (descriptors == 'stale'
and self._descriptor_stale(item, info)):
heap.add_descriptor(item)
info.descriptor_cnt = self._descriptor_cnt
if item.value is not None:
if (data == 'all') or (data == 'stale' and info.version != item.version):
heap.add_item(item)
info.version = item.version
self._descriptor_cnt += 1
return heap
def get_start(self):
"""Return a heap that contains only a start-of-stream marker.
"""
heap = Heap(self._flavour)
heap.add_start()
return heap
def get_end(self):
"""Return a heap that contains only an end-of-stream marker.
"""
heap = Heap(self._flavour)
heap.add_end()
return heap
|
ska-sa/spead2
|
spead2/send/trollius.py
|
TcpStream.connect
|
python
|
def connect(cls, *args, **kwargs):
loop = kwargs.get('loop')
if loop is None:
loop = trollius.get_event_loop()
future = trollius.Future(loop=loop)
def callback(arg):
if not future.done():
if isinstance(arg, Exception):
loop.call_soon_threadsafe(future.set_exception, arg)
else:
loop.call_soon_threadsafe(future.set_result, arg)
stream = cls(callback, *args, **kwargs)
yield From(future)
raise Return(stream)
|
Open a connection.
The arguments are the same as for the constructor of
:py:class:`spead2.send.TcpStream`.
|
train
|
https://github.com/ska-sa/spead2/blob/cac95fd01d8debaa302d2691bd26da64b7828bc6/spead2/send/trollius.py#L132-L152
| null |
class TcpStream(_TcpStreamBase):
"""SPEAD over TCP with asynchronous connect and sends.
Most users will use :py:meth:`connect` to asynchronously create a stream.
The constructor should only be used if you wish to provide your own socket
and take care of connecting yourself.
Parameters
----------
thread_pool : :py:class:`spead2.ThreadPool`
Thread pool handling the I/O
socket : :py:class:`socket.socket`
TCP/IP Socket that is already connected to the remote end
config : :py:class:`spead2.send.StreamConfig`
Stream configuration
"""
@classmethod
@trollius.coroutine
|
ska-sa/spead2
|
spead2/recv/trollius.py
|
Stream._clear_done_waiters
|
python
|
def _clear_done_waiters(self):
while self._waiters and self._waiters[0].done():
self._waiters.popleft()
if not self._waiters:
self._stop_listening()
|
Remove waiters that are done (should only happen if they are cancelled)
|
train
|
https://github.com/ska-sa/spead2/blob/cac95fd01d8debaa302d2691bd26da64b7828bc6/spead2/recv/trollius.py#L83-L88
| null |
class Stream(spead2.recv.Stream):
"""Stream where `get` is a coroutine that yields the next heap.
Internally, it maintains a queue of waiters, each represented by a future.
When a heap becomes available, it is passed to the first waiter. We use
a callback on a file descriptor being readable, which happens when there
might be data available. The callback is enabled when we have at least one
waiter, otherwise disabled.
The futures store a singleton list containing the heap rather than the heap
itself. This allows the reference to the heap to be explicitly cleared so
that the heap can be garbage collected sooner.
Parameters
----------
loop : event loop, optional
Default event loop
"""
def __init__(self, *args, **kwargs):
self._loop = kwargs.pop('loop', None)
if self._loop is None:
self._loop = trollius.get_event_loop()
super(Stream, self).__init__(*args, **kwargs)
self._waiters = collections.deque()
self._listening = False
def _start_listening(self):
if not self._listening:
self._loop.add_reader(self.fd, self._ready_callback)
self._listening = True
def _stop_listening(self):
if self._listening:
self._loop.remove_reader(self.fd)
self._listening = False
def _ready_callback(self):
self._clear_done_waiters()
if self._waiters:
try:
heap = self.get_nowait()
except spead2.Empty:
# Shouldn't happen, but poll may have been woken spuriously
pass
except spead2.Stopped as e:
for waiter in self._waiters:
waiter.set_exception(e)
self._waiters = []
self._stop_listening()
else:
waiter = self._waiters.popleft()
waiter.set_result([heap])
if not self._waiters:
self._stop_listening()
# Break cyclic references if spead2.Stopped is raised
self = None
waiter = None
@trollius.coroutine
def get(self, loop=None):
"""Coroutine that waits for a heap to become available and returns it."""
self._clear_done_waiters()
if not self._waiters:
# If something is available directly, we can avoid going back to
# the scheduler
try:
heap = self.get_nowait()
except spead2.Empty:
pass
else:
# Give the event loop a chance to run. This ensures that a
# heap-processing loop cannot live-lock the event loop.
yield
raise Return(heap)
if loop is None:
loop = self._loop
waiter = trollius.Future(loop=loop)
self._waiters.append(waiter)
self._start_listening()
heap = (yield From(waiter)).pop()
raise Return(heap)
# Asynchronous iterator support for Python 3.5+. It's not supported with
# trollius, but after passing through trollius2asyncio it becomes useful.
@_aiter_compat
def __aiter__(self):
return self
@trollius.coroutine
def __anext__(self):
try:
heap = yield From(self.get())
except spead2.Stopped:
raise StopAsyncIteration # noqa: F821 (for Python 2)
else:
raise Return(heap)
|
ska-sa/spead2
|
spead2/recv/trollius.py
|
Stream.get
|
python
|
def get(self, loop=None):
self._clear_done_waiters()
if not self._waiters:
# If something is available directly, we can avoid going back to
# the scheduler
try:
heap = self.get_nowait()
except spead2.Empty:
pass
else:
# Give the event loop a chance to run. This ensures that a
# heap-processing loop cannot live-lock the event loop.
yield
raise Return(heap)
if loop is None:
loop = self._loop
waiter = trollius.Future(loop=loop)
self._waiters.append(waiter)
self._start_listening()
heap = (yield From(waiter)).pop()
raise Return(heap)
|
Coroutine that waits for a heap to become available and returns it.
|
train
|
https://github.com/ska-sa/spead2/blob/cac95fd01d8debaa302d2691bd26da64b7828bc6/spead2/recv/trollius.py#L113-L135
| null |
class Stream(spead2.recv.Stream):
"""Stream where `get` is a coroutine that yields the next heap.
Internally, it maintains a queue of waiters, each represented by a future.
When a heap becomes available, it is passed to the first waiter. We use
a callback on a file descriptor being readable, which happens when there
might be data available. The callback is enabled when we have at least one
waiter, otherwise disabled.
The futures store a singleton list containing the heap rather than the heap
itself. This allows the reference to the heap to be explicitly cleared so
that the heap can be garbage collected sooner.
Parameters
----------
loop : event loop, optional
Default event loop
"""
def __init__(self, *args, **kwargs):
self._loop = kwargs.pop('loop', None)
if self._loop is None:
self._loop = trollius.get_event_loop()
super(Stream, self).__init__(*args, **kwargs)
self._waiters = collections.deque()
self._listening = False
def _start_listening(self):
if not self._listening:
self._loop.add_reader(self.fd, self._ready_callback)
self._listening = True
def _stop_listening(self):
if self._listening:
self._loop.remove_reader(self.fd)
self._listening = False
def _clear_done_waiters(self):
"""Remove waiters that are done (should only happen if they are cancelled)"""
while self._waiters and self._waiters[0].done():
self._waiters.popleft()
if not self._waiters:
self._stop_listening()
def _ready_callback(self):
self._clear_done_waiters()
if self._waiters:
try:
heap = self.get_nowait()
except spead2.Empty:
# Shouldn't happen, but poll may have been woken spuriously
pass
except spead2.Stopped as e:
for waiter in self._waiters:
waiter.set_exception(e)
self._waiters = []
self._stop_listening()
else:
waiter = self._waiters.popleft()
waiter.set_result([heap])
if not self._waiters:
self._stop_listening()
# Break cyclic references if spead2.Stopped is raised
self = None
waiter = None
@trollius.coroutine
# Asynchronous iterator support for Python 3.5+. It's not supported with
# trollius, but after passing through trollius2asyncio it becomes useful.
@_aiter_compat
def __aiter__(self):
return self
@trollius.coroutine
def __anext__(self):
try:
heap = yield From(self.get())
except spead2.Stopped:
raise StopAsyncIteration # noqa: F821 (for Python 2)
else:
raise Return(heap)
|
ska-sa/spead2
|
spead2/__init__.py
|
parse_range_list
|
python
|
def parse_range_list(ranges):
if not ranges:
return []
parts = ranges.split(',')
out = []
for part in parts:
fields = part.split('-', 1)
if len(fields) == 2:
start = int(fields[0])
end = int(fields[1])
out.extend(range(start, end + 1))
else:
out.append(int(fields[0]))
return out
|
Split a string like 2,3-5,8,9-11 into a list of integers. This is
intended to ease adding command-line options for dealing with affinity.
|
train
|
https://github.com/ska-sa/spead2/blob/cac95fd01d8debaa302d2691bd26da64b7828bc6/spead2/__init__.py#L81-L97
| null |
# Copyright 2015 SKA South Africa
#
# This program is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option) any
# later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import numbers as _numbers
import logging
import six
import numpy as _np
import spead2._spead2
from spead2._spead2 import ( # noqa: F401
Flavour, ThreadPool, Stopped, Empty,
MemoryAllocator, MmapAllocator, MemoryPool, InprocQueue,
BUG_COMPAT_DESCRIPTOR_WIDTHS,
BUG_COMPAT_SHAPE_BIT_1,
BUG_COMPAT_SWAP_ENDIAN,
BUG_COMPAT_PYSPEAD_0_5_2,
NULL_ID,
HEAP_CNT_ID,
HEAP_LENGTH_ID,
PAYLOAD_OFFSET_ID,
PAYLOAD_LENGTH_ID,
DESCRIPTOR_ID,
STREAM_CTRL_ID,
DESCRIPTOR_NAME_ID,
DESCRIPTOR_DESCRIPTION_ID,
DESCRIPTOR_SHAPE_ID,
DESCRIPTOR_FORMAT_ID,
DESCRIPTOR_ID_ID,
DESCRIPTOR_DTYPE_ID,
CTRL_STREAM_START,
CTRL_DESCRIPTOR_REISSUE,
CTRL_STREAM_STOP,
CTRL_DESCRIPTOR_UPDATE,
MEMCPY_STD,
MEMCPY_NONTEMPORAL)
try:
from spead2._spead2 import IbvContext # noqa: F401
except ImportError:
pass
from spead2._version import __version__ # noqa: F401
_logger = logging.getLogger(__name__)
_UNRESERVED_ID = 0x1000 #: First ID that can be auto-allocated
_FASTPATH_NONE = 0
_FASTPATH_IMMEDIATE = 1
_FASTPATH_NUMPY = 2
if six.PY2:
def _bytes_to_str_ascii(b):
b.decode('ascii') # Just to check validity, throw away unicode object
return b
else:
# Python 3
def _bytes_to_str_ascii(b):
return b.decode('ascii')
def _shape_elements(shape):
elements = 1
for dimension in shape:
elements *= dimension
return elements
class Descriptor(object):
"""Metadata for a SPEAD item.
There are a number of restrictions in the way the parameters combine,
which will cause `ValueError` to be raised if violated:
- At most one element of `shape` can be `None`.
- Exactly one of `dtype` and `format` must be non-`None`.
- If `dtype` is specified, `shape` cannot have any unknown dimensions.
- If `format` is specified, `order` must be 'C'
Parameters
----------
id : int
SPEAD item ID
name : str
Short item name, suitable for use as a key
description : str
Long item description
shape : sequence
Dimensions, with `None` indicating a variable-size dimension
dtype : numpy data type, optional
Data type, or `None` if `format` will be used instead
order : {'C', 'F'}
Indicates C-order or Fortran-order storage
format : list of pairs, optional
Structure fields for generic (non-numpy) type. Each element of the list
is a tuple of field code and bit length.
"""
def __init__(self, id, name, description, shape, dtype=None, order='C', format=None):
shape = tuple(shape)
unknowns = sum([x is None for x in shape])
if unknowns > 1:
raise ValueError('Cannot have multiple unknown dimensions')
if dtype is not None:
dtype = _np.dtype(dtype)
if dtype.hasobject:
raise TypeError('Cannot use dtype that has reference-counted objects')
if format is not None:
raise ValueError('Only one of dtype and format can be specified')
if unknowns > 0:
raise ValueError('Cannot have unknown dimensions when using numpy descriptor')
self._internal_dtype = dtype
else:
if format is None:
raise ValueError('One of dtype and format must be specified')
if order != 'C':
raise ValueError("When specifying format, order must be 'C'")
self._internal_dtype = self._parse_format(format)
if order not in ['C', 'F']:
raise ValueError("Order must be 'C' or 'F'")
self.id = id
self.name = name
self.description = description
self.shape = shape
self.dtype = dtype
self.order = order
self.format = format
if not self._internal_dtype.hasobject:
self._fastpath = _FASTPATH_NUMPY
elif (not shape and
dtype is None and
len(format) == 1 and
format[0][0] in ('u', 'i') and
self._internal_dtype.hasobject):
self._fastpath = _FASTPATH_IMMEDIATE
else:
self._fastpath = _FASTPATH_NONE
@classmethod
def _parse_numpy_header(cls, header):
try:
d = _np.lib.utils.safe_eval(header)
except SyntaxError as e:
msg = "Cannot parse descriptor: %r\nException: %r"
raise ValueError(msg % (header, e))
if not isinstance(d, dict):
msg = "Descriptor is not a dictionary: %r"
raise ValueError(msg % d)
keys = list(d.keys())
keys.sort()
if keys != ['descr', 'fortran_order', 'shape']:
msg = "Descriptor does not contain the correct keys: %r"
raise ValueError(msg % (keys,))
# Sanity-check the values.
if (not isinstance(d['shape'], tuple) or
not all([isinstance(x, _numbers.Integral) and x >= 0 for x in d['shape']])):
msg = "shape is not valid: %r"
raise ValueError(msg % (d['shape'],))
if not isinstance(d['fortran_order'], bool):
msg = "fortran_order is not a valid bool: %r"
raise ValueError(msg % (d['fortran_order'],))
try:
dtype = _np.dtype(d['descr'])
except TypeError:
msg = "descr is not a valid dtype descriptor: %r"
raise ValueError(msg % (d['descr'],))
order = 'F' if d['fortran_order'] else 'C'
return d['shape'], order, dtype
@classmethod
def _make_numpy_header(self, shape, dtype, order):
return "{{'descr': {!r}, 'fortran_order': {!r}, 'shape': {!r}}}".format(
_np.lib.format.dtype_to_descr(dtype), order == 'F',
tuple(shape))
@classmethod
def _parse_format(cls, fmt):
"""Attempt to convert a SPEAD format specification to a numpy dtype.
Where necessary, `O` is used.
Raises
------
ValueError
If the format is illegal
"""
fields = []
if not fmt:
raise ValueError('empty format')
for code, length in fmt:
if length == 0:
raise ValueError('zero-length field (bug_compat mismatch?)')
if ((code in ('u', 'i') and length in (8, 16, 32, 64)) or
(code == 'f' and length in (32, 64))):
fields.append('>' + code + str(length // 8))
elif code == 'b' and length == 8:
fields.append('?')
elif code == 'c' and length == 8:
fields.append('S1')
else:
if code not in ['u', 'i', 'b']:
raise ValueError('illegal format ({}, {})'.format(code, length))
fields.append('O')
return _np.dtype(','.join(fields))
@property
def itemsize_bits(self):
"""Number of bits per element"""
if self.dtype is not None:
return self.dtype.itemsize * 8
else:
return sum(x[1] for x in self.format)
def is_variable_size(self):
"""Determine whether any element of the size is dynamic"""
return any([x is None for x in self.shape])
def allow_immediate(self):
"""Called by the C++ interface to determine whether sufficiently small
items should be encoded as immediates.
Variable-size objects cannot be immediates because there is no way to
determine the true payload size. Types with a non-integral number of
bytes are banned because the protocol does not specify where the
padding should go, and PySPEAD's encoder and decoder disagree, so it
is best not to send them at all.
"""
return not self.is_variable_size() and (
self.dtype is not None or self.itemsize_bits % 8 == 0)
def dynamic_shape(self, max_elements):
"""Determine the dynamic shape, given incoming data that is big enough
to hold `max_elements` elements.
"""
known = 1
unknown_pos = -1
for i, x in enumerate(self.shape):
if x is not None:
known *= x
else:
assert unknown_pos == -1, 'Shape has multiple unknown dimensions'
unknown_pos = i
if unknown_pos == -1:
return self.shape
else:
shape = list(self.shape)
if known == 0:
shape[unknown_pos] = 0
else:
shape[unknown_pos] = max_elements // known
return shape
def compatible_shape(self, shape):
"""Determine whether `shape` is compatible with the (possibly
variable-sized) shape for this descriptor"""
if len(shape) != len(self.shape):
return False
for x, y in zip(self.shape, shape):
if x is not None and x != y:
return False
return True
@classmethod
def from_raw(cls, raw_descriptor, flavour):
dtype = None
format = None
if raw_descriptor.numpy_header:
header = _bytes_to_str_ascii(raw_descriptor.numpy_header)
shape, order, dtype = cls._parse_numpy_header(header)
if flavour.bug_compat & BUG_COMPAT_SWAP_ENDIAN:
dtype = dtype.newbyteorder()
else:
shape = raw_descriptor.shape
order = 'C'
format = raw_descriptor.format
return cls(
raw_descriptor.id,
_bytes_to_str_ascii(raw_descriptor.name),
_bytes_to_str_ascii(raw_descriptor.description),
shape, dtype, order, format)
def to_raw(self, flavour):
raw = spead2._spead2.RawDescriptor()
raw.id = self.id
raw.name = self.name.encode('ascii')
raw.description = self.description.encode('ascii')
raw.shape = self.shape
if self.dtype is not None:
if flavour.bug_compat & BUG_COMPAT_SWAP_ENDIAN:
dtype = self.dtype.newbyteorder()
else:
dtype = self.dtype
raw.numpy_header = self._make_numpy_header(
self.shape, dtype, self.order).encode('ascii')
else:
raw.format = self.format
return raw
class Item(Descriptor):
"""A SPEAD item with a value and a version number.
Parameters
----------
value : object, optional
Initial value
"""
def __init__(self, *args, **kw):
value = kw.pop('value', None)
super(Item, self).__init__(*args, **kw)
self._value = value
self.version = 1 #: Version number
@property
def value(self):
"""Current value. Assigning to this will increment the version number.
Assigning `None` will raise `ValueError` because there is no way to
encode this using SPEAD.
.. warning:: If you modify a mutable value in-place, the change will
not be detected, and the new value will not be transmitted. In this
case, either manually increment the version number, or reassign the
value.
"""
return self._value
@value.setter
def value(self, new_value):
if new_value is None:
raise ValueError("Item value cannot be set to None")
self._value = new_value
self.version += 1
@classmethod
def _read_bits(cls, raw_value):
"""Generator that takes a memory view and provides bitfields from it.
After creating the generator, call `send(None)` to initialise it, and
thereafter call `send(need_bits)` to obtain that many bits.
"""
have_bits = 0
bits = 0
byte_source = iter(raw_value)
result = 0
while True:
need_bits = yield result
while have_bits < need_bits:
try:
bits = (bits << 8) | int(next(byte_source))
have_bits += 8
except StopIteration:
return
result = int(bits >> (have_bits - need_bits))
bits &= (1 << (have_bits - need_bits)) - 1
have_bits -= need_bits
@classmethod
def _write_bits(cls, array):
"""Generator that fills a `bytearray` with provided bits. After
creating the generator, call `send(None)` to initialise it, and
thereafter call `send((value, bits))` to add that many bits into
the array. You must call `close()` to flush any partial bytes."""
pos = 0
current = 0 # bits not yet written into array
current_bits = 0
try:
while True:
(value, bits) = yield
if value < 0 or value >= (1 << bits):
raise ValueError('Value is out of range for number of bits')
current = (current << bits) | value
current_bits += bits
while current_bits >= 8:
array[pos] = current >> (current_bits - 8)
current &= (1 << (current_bits - 8)) - 1
current_bits -= 8
pos += 1
except GeneratorExit:
if current_bits > 0:
current <<= (8 - current_bits)
array[pos] = current
def _load_recursive(self, shape, gen):
"""Recursively create a multidimensional array (as lists of lists)
from a bit generator.
"""
if len(shape) > 0:
ans = []
for i in range(shape[0]):
ans.append(self._load_recursive(shape[1:], gen))
else:
fields = []
for code, length in self.format:
field = None
raw = gen.send(length)
if code == 'u':
field = raw
elif code == 'i':
field = raw
# Interpret as 2's complement
if field >= 1 << (length - 1):
field -= 1 << length
elif code == 'b':
field = bool(raw)
elif code == 'c':
field = six.int2byte(raw)
elif code == 'f':
if length == 32:
field = _np.uint32(raw).view(_np.float32)
elif length == 64:
field = _np.uint64(raw).view(_np.float64)
else:
raise ValueError('unhandled float length {0}'.format((code, length)))
else:
raise ValueError('unhandled format {0}'.format((code, length)))
fields.append(field)
if len(fields) == 1:
ans = fields[0]
else:
ans = tuple(fields)
return ans
def _store_recursive(self, dims, value, gen):
if dims > 0:
for sub in value:
self._store_recursive(dims - 1, sub, gen)
else:
if len(self.format) == 1:
value = (value,)
for (code, length), field in zip(self.format, value):
raw = None
if code == 'u':
raw = int(field)
if raw < 0 or raw >= (1 << length):
raise ValueError('{} is out of range for u{}'.format(raw, length))
elif code == 'i':
top_bit = 1 << (length - 1)
raw = int(field)
if raw < -top_bit or raw >= top_bit:
raise ValueError('{} is out of range for i{}'.format(field, length))
# convert to 2's complement
if raw < 0:
raw += 2 * top_bit
elif code == 'b':
raw = 1 if field else 0
elif code == 'c':
raw = ord(field)
elif code == 'f':
if length == 32:
raw = _np.float32(field).view(_np.uint32)
elif length == 64:
raw = _np.float64(field).view(_np.uint64)
else:
raise ValueError('unhandled float length {0}'.format((code, length)))
else:
raise ValueError('unhandled format {0}'.format((code, length)))
gen.send((raw, length))
def set_from_raw(self, raw_item):
raw_value = _np.array(raw_item, _np.uint8, copy=False)
if self._fastpath == _FASTPATH_NUMPY:
max_elements = raw_value.shape[0] // self._internal_dtype.itemsize
shape = self.dynamic_shape(max_elements)
elements = _shape_elements(shape)
if elements > max_elements:
raise ValueError('Item {} has too few elements for shape ({} < {})'.format(
self.name, max_elements, elements))
size_bytes = elements * self._internal_dtype.itemsize
if raw_item.is_immediate:
# Immediates get head padding instead of tail padding
# For some reason, np.frombuffer doesn't work on memoryview, but np.array does
array1d = raw_value[-size_bytes:]
else:
array1d = raw_value[:size_bytes]
array1d = array1d.view(dtype=self._internal_dtype)
# Force to native endian
array1d = array1d.astype(self._internal_dtype.newbyteorder('='),
casting='equiv', copy=False)
value = _np.reshape(array1d, shape, self.order)
elif (self._fastpath == _FASTPATH_IMMEDIATE and
raw_item.is_immediate and
raw_value.shape[0] * 8 == self.format[0][1]):
value = raw_item.immediate_value
if self.format[0][0] == 'i':
top = 1 << (self.format[0][1] - 1)
if value >= top:
value -= 2 * top
else:
itemsize_bits = self.itemsize_bits
max_elements = raw_value.shape[0] * 8 // itemsize_bits
shape = self.dynamic_shape(max_elements)
elements = _shape_elements(shape)
bits = elements * itemsize_bits
if elements > max_elements:
raise ValueError('Item {} has too few elements for shape ({} < {})'.format(
self.name, max_elements, elements))
if raw_item.is_immediate:
# Immediates get head padding instead of tail padding
size_bytes = (bits + 7) // 8
raw_value = raw_value[-size_bytes:]
gen = self._read_bits(raw_value)
gen.send(None) # Initialisation of the generator
value = _np.array(self._load_recursive(shape, gen), self._internal_dtype)
if len(self.shape) == 0 and isinstance(value, _np.ndarray):
# Convert zero-dimensional array to scalar
value = value[()]
elif len(self.shape) == 1 and self.format == [('c', 8)]:
# Convert array of characters to a string
value = _bytes_to_str_ascii(b''.join(value))
self.value = value
def _num_elements(self):
if isinstance(self.value, _np.ndarray):
return self.value.size
cur = self.value
ans = 1
for size in self.shape:
ans *= len(cur)
if ans == 0:
return ans # Prevents IndexError below
cur = cur[0]
return ans
def _transform_value(self):
"""Mangle the value into a numpy array. This does several things:
- If it is stringlike (bytes or unicode) and the expected shape is
1D, it is split into an array of characters.
- It is coerced to a numpy array, enforcing the dtype and order. Where
possible, no copy is made.
- The shape is checked against the expected shape.
Returns
-------
value : :py:class:`numpy.ndarray`
The transformed value
Raises
------
ValueError
if the value is `None`
ValueError
if the value has the wrong shape
TypeError
if numpy raised it when trying to convert the value
"""
value = self.value
if value is None:
raise ValueError('Cannot send a value of None')
if (isinstance(value, (six.binary_type, six.text_type)) and
len(self.shape) == 1):
# This is complicated by Python 3 not providing a simple way to
# turn a bytes object into a list of one-byte objects, the way
# list(str) does.
value = [self.value[i : i + 1] for i in range(len(self.value))]
value = _np.array(value, dtype=self._internal_dtype, order=self.order, copy=False)
if not self.compatible_shape(value.shape):
raise ValueError('Value has shape {}, expected {}'.format(value.shape, self.shape))
return value
def to_buffer(self):
"""Returns an object that implements the buffer protocol for the value.
It can be either the original value (on the numpy fast path), or a new
temporary object.
"""
value = self._transform_value()
if self._fastpath != _FASTPATH_NUMPY:
bit_length = self.itemsize_bits * self._num_elements()
out = bytearray((bit_length + 7) // 8)
gen = self._write_bits(out)
gen.send(None) # Initialise the generator
# If it's a scalar, unpack it. That way, the input to the
# final level of recursion in _store_recursive is always
# the scalar rather than the 0D array.
if len(self.shape) == 0:
value = value[()]
self._store_recursive(len(self.shape), value, gen)
gen.close()
return out
else:
if self.order == 'F':
# numpy doesn't allow buffer protocol to be used on arrays that
# aren't C-contiguous, but transposition just fiddles the
# strides of the view without creating a new array.
value = value.transpose()
return value
class ItemGroup(object):
"""
Items are collected into sets called *item groups*, which can be indexed by
either item ID or item name.
There are some subtleties with respect to re-issued item descriptors. There are
two cases:
1. The item descriptor is identical to a previous seen one. In this case, no
action is taken.
2. Otherwise, any existing items with the same name or ID (which could be two
different items) are dropped, the new item is added, and its value
becomes ``None``. The version is set to be higher than version on an item
that was removed, so that consumers who only check the version will
detect the change.
"""
def __init__(self):
self._by_id = {}
self._by_name = {}
def _remove_item(self, item):
del self._by_id[item.id]
del self._by_name[item.name]
def _add_item(self, item):
try:
old = self._by_id[item.id]
except KeyError:
old = None
try:
old_by_name = self._by_name[item.name]
except KeyError:
old_by_name = None
# Check if this is just the same thing
if (old is not None and
old.name == item.name and
old.description == item.description and
old.shape == item.shape and
old.dtype == item.dtype and
old.order == item.order and
old.format == item.format):
# Descriptor is the same, so just transfer the value. If the value
# is None, then we've only been given a descriptor to add.
if item.value is not None:
old.value = item.value
return
if old is not None or old_by_name is not None:
_logger.info('Descriptor replacement for ID %#x, name %s', item.id, item.name)
# Ensure the version number is seen to increment, regardless of
# whether accessed by name or ID.
new_version = item.version
if old is not None:
new_version = max(new_version, old.version + 1)
if old_by_name is not None:
new_version = max(new_version, old_by_name.version + 1)
item.version = new_version
# Remove previous items, under the same name of ID
if old is not None:
self._remove_item(old)
if old_by_name is not None and old_by_name is not old:
self._remove_item(old_by_name)
# Install new item
self._by_id[item.id] = item
self._by_name[item.name] = item
def add_item(self, *args, **kwargs):
"""Add a new item to the group. The parameters are used to construct an
:py:class:`Item`. If `id` is `None`, it will be automatically populated
with an ID that is not already in use.
See the class documentation for the behaviour when the name or ID
collides with an existing one. In addition, if the item descriptor is
identical to an existing one and a value, this value is assigned to
the existing item.
"""
item = Item(*args, **kwargs)
if item.id is None:
item.id = _UNRESERVED_ID
while item.id in self._by_id:
item.id += 1
self._add_item(item)
return item
def __getitem__(self, key):
"""Dictionary-style lookup by either ID or name"""
if isinstance(key, _numbers.Integral):
return self._by_id[key]
else:
return self._by_name[key]
def __contains__(self, key):
"""Dictionary-style membership test by either ID or name"""
if isinstance(key, _numbers.Integral):
return key in self._by_id
else:
return key in self._by_name
def keys(self):
"""Item names"""
return self._by_name.keys()
def ids(self):
"""Item IDs"""
return self._by_id.keys()
def values(self):
"""Item values"""
return self._by_name.values()
def items(self):
"""Dictionary style (name, value) pairs"""
return self._by_name.items()
def __len__(self):
"""Number of items"""
return len(self._by_name)
def update(self, heap):
"""Update the item descriptors and items from an incoming heap.
Parameters
----------
heap : :class:`spead2.recv.Heap`
Incoming heap
Returns
-------
dict
Items that have been updated from this heap, indexed by name
"""
for descriptor in heap.get_descriptors():
item = Item.from_raw(descriptor, flavour=heap.flavour)
self._add_item(item)
updated_items = {}
for raw_item in heap.get_items():
if raw_item.id <= STREAM_CTRL_ID:
continue # Special fields, not real items
try:
item = self._by_id[raw_item.id]
except KeyError:
_logger.warning('Item with ID %#x received but there is no descriptor', raw_item.id)
else:
item.set_from_raw(raw_item)
item.version += 1
updated_items[item.name] = item
return updated_items
|
ska-sa/spead2
|
spead2/__init__.py
|
Descriptor._parse_format
|
python
|
def _parse_format(cls, fmt):
fields = []
if not fmt:
raise ValueError('empty format')
for code, length in fmt:
if length == 0:
raise ValueError('zero-length field (bug_compat mismatch?)')
if ((code in ('u', 'i') and length in (8, 16, 32, 64)) or
(code == 'f' and length in (32, 64))):
fields.append('>' + code + str(length // 8))
elif code == 'b' and length == 8:
fields.append('?')
elif code == 'c' and length == 8:
fields.append('S1')
else:
if code not in ['u', 'i', 'b']:
raise ValueError('illegal format ({}, {})'.format(code, length))
fields.append('O')
return _np.dtype(','.join(fields))
|
Attempt to convert a SPEAD format specification to a numpy dtype.
Where necessary, `O` is used.
Raises
------
ValueError
If the format is illegal
|
train
|
https://github.com/ska-sa/spead2/blob/cac95fd01d8debaa302d2691bd26da64b7828bc6/spead2/__init__.py#L209-L235
| null |
class Descriptor(object):
"""Metadata for a SPEAD item.
There are a number of restrictions in the way the parameters combine,
which will cause `ValueError` to be raised if violated:
- At most one element of `shape` can be `None`.
- Exactly one of `dtype` and `format` must be non-`None`.
- If `dtype` is specified, `shape` cannot have any unknown dimensions.
- If `format` is specified, `order` must be 'C'
Parameters
----------
id : int
SPEAD item ID
name : str
Short item name, suitable for use as a key
description : str
Long item description
shape : sequence
Dimensions, with `None` indicating a variable-size dimension
dtype : numpy data type, optional
Data type, or `None` if `format` will be used instead
order : {'C', 'F'}
Indicates C-order or Fortran-order storage
format : list of pairs, optional
Structure fields for generic (non-numpy) type. Each element of the list
is a tuple of field code and bit length.
"""
def __init__(self, id, name, description, shape, dtype=None, order='C', format=None):
shape = tuple(shape)
unknowns = sum([x is None for x in shape])
if unknowns > 1:
raise ValueError('Cannot have multiple unknown dimensions')
if dtype is not None:
dtype = _np.dtype(dtype)
if dtype.hasobject:
raise TypeError('Cannot use dtype that has reference-counted objects')
if format is not None:
raise ValueError('Only one of dtype and format can be specified')
if unknowns > 0:
raise ValueError('Cannot have unknown dimensions when using numpy descriptor')
self._internal_dtype = dtype
else:
if format is None:
raise ValueError('One of dtype and format must be specified')
if order != 'C':
raise ValueError("When specifying format, order must be 'C'")
self._internal_dtype = self._parse_format(format)
if order not in ['C', 'F']:
raise ValueError("Order must be 'C' or 'F'")
self.id = id
self.name = name
self.description = description
self.shape = shape
self.dtype = dtype
self.order = order
self.format = format
if not self._internal_dtype.hasobject:
self._fastpath = _FASTPATH_NUMPY
elif (not shape and
dtype is None and
len(format) == 1 and
format[0][0] in ('u', 'i') and
self._internal_dtype.hasobject):
self._fastpath = _FASTPATH_IMMEDIATE
else:
self._fastpath = _FASTPATH_NONE
@classmethod
def _parse_numpy_header(cls, header):
try:
d = _np.lib.utils.safe_eval(header)
except SyntaxError as e:
msg = "Cannot parse descriptor: %r\nException: %r"
raise ValueError(msg % (header, e))
if not isinstance(d, dict):
msg = "Descriptor is not a dictionary: %r"
raise ValueError(msg % d)
keys = list(d.keys())
keys.sort()
if keys != ['descr', 'fortran_order', 'shape']:
msg = "Descriptor does not contain the correct keys: %r"
raise ValueError(msg % (keys,))
# Sanity-check the values.
if (not isinstance(d['shape'], tuple) or
not all([isinstance(x, _numbers.Integral) and x >= 0 for x in d['shape']])):
msg = "shape is not valid: %r"
raise ValueError(msg % (d['shape'],))
if not isinstance(d['fortran_order'], bool):
msg = "fortran_order is not a valid bool: %r"
raise ValueError(msg % (d['fortran_order'],))
try:
dtype = _np.dtype(d['descr'])
except TypeError:
msg = "descr is not a valid dtype descriptor: %r"
raise ValueError(msg % (d['descr'],))
order = 'F' if d['fortran_order'] else 'C'
return d['shape'], order, dtype
@classmethod
def _make_numpy_header(self, shape, dtype, order):
return "{{'descr': {!r}, 'fortran_order': {!r}, 'shape': {!r}}}".format(
_np.lib.format.dtype_to_descr(dtype), order == 'F',
tuple(shape))
@classmethod
@property
def itemsize_bits(self):
"""Number of bits per element"""
if self.dtype is not None:
return self.dtype.itemsize * 8
else:
return sum(x[1] for x in self.format)
def is_variable_size(self):
"""Determine whether any element of the size is dynamic"""
return any([x is None for x in self.shape])
def allow_immediate(self):
"""Called by the C++ interface to determine whether sufficiently small
items should be encoded as immediates.
Variable-size objects cannot be immediates because there is no way to
determine the true payload size. Types with a non-integral number of
bytes are banned because the protocol does not specify where the
padding should go, and PySPEAD's encoder and decoder disagree, so it
is best not to send them at all.
"""
return not self.is_variable_size() and (
self.dtype is not None or self.itemsize_bits % 8 == 0)
def dynamic_shape(self, max_elements):
"""Determine the dynamic shape, given incoming data that is big enough
to hold `max_elements` elements.
"""
known = 1
unknown_pos = -1
for i, x in enumerate(self.shape):
if x is not None:
known *= x
else:
assert unknown_pos == -1, 'Shape has multiple unknown dimensions'
unknown_pos = i
if unknown_pos == -1:
return self.shape
else:
shape = list(self.shape)
if known == 0:
shape[unknown_pos] = 0
else:
shape[unknown_pos] = max_elements // known
return shape
def compatible_shape(self, shape):
"""Determine whether `shape` is compatible with the (possibly
variable-sized) shape for this descriptor"""
if len(shape) != len(self.shape):
return False
for x, y in zip(self.shape, shape):
if x is not None and x != y:
return False
return True
@classmethod
def from_raw(cls, raw_descriptor, flavour):
dtype = None
format = None
if raw_descriptor.numpy_header:
header = _bytes_to_str_ascii(raw_descriptor.numpy_header)
shape, order, dtype = cls._parse_numpy_header(header)
if flavour.bug_compat & BUG_COMPAT_SWAP_ENDIAN:
dtype = dtype.newbyteorder()
else:
shape = raw_descriptor.shape
order = 'C'
format = raw_descriptor.format
return cls(
raw_descriptor.id,
_bytes_to_str_ascii(raw_descriptor.name),
_bytes_to_str_ascii(raw_descriptor.description),
shape, dtype, order, format)
def to_raw(self, flavour):
raw = spead2._spead2.RawDescriptor()
raw.id = self.id
raw.name = self.name.encode('ascii')
raw.description = self.description.encode('ascii')
raw.shape = self.shape
if self.dtype is not None:
if flavour.bug_compat & BUG_COMPAT_SWAP_ENDIAN:
dtype = self.dtype.newbyteorder()
else:
dtype = self.dtype
raw.numpy_header = self._make_numpy_header(
self.shape, dtype, self.order).encode('ascii')
else:
raw.format = self.format
return raw
|
ska-sa/spead2
|
spead2/__init__.py
|
Descriptor.itemsize_bits
|
python
|
def itemsize_bits(self):
if self.dtype is not None:
return self.dtype.itemsize * 8
else:
return sum(x[1] for x in self.format)
|
Number of bits per element
|
train
|
https://github.com/ska-sa/spead2/blob/cac95fd01d8debaa302d2691bd26da64b7828bc6/spead2/__init__.py#L238-L243
| null |
class Descriptor(object):
"""Metadata for a SPEAD item.
There are a number of restrictions in the way the parameters combine,
which will cause `ValueError` to be raised if violated:
- At most one element of `shape` can be `None`.
- Exactly one of `dtype` and `format` must be non-`None`.
- If `dtype` is specified, `shape` cannot have any unknown dimensions.
- If `format` is specified, `order` must be 'C'
Parameters
----------
id : int
SPEAD item ID
name : str
Short item name, suitable for use as a key
description : str
Long item description
shape : sequence
Dimensions, with `None` indicating a variable-size dimension
dtype : numpy data type, optional
Data type, or `None` if `format` will be used instead
order : {'C', 'F'}
Indicates C-order or Fortran-order storage
format : list of pairs, optional
Structure fields for generic (non-numpy) type. Each element of the list
is a tuple of field code and bit length.
"""
def __init__(self, id, name, description, shape, dtype=None, order='C', format=None):
shape = tuple(shape)
unknowns = sum([x is None for x in shape])
if unknowns > 1:
raise ValueError('Cannot have multiple unknown dimensions')
if dtype is not None:
dtype = _np.dtype(dtype)
if dtype.hasobject:
raise TypeError('Cannot use dtype that has reference-counted objects')
if format is not None:
raise ValueError('Only one of dtype and format can be specified')
if unknowns > 0:
raise ValueError('Cannot have unknown dimensions when using numpy descriptor')
self._internal_dtype = dtype
else:
if format is None:
raise ValueError('One of dtype and format must be specified')
if order != 'C':
raise ValueError("When specifying format, order must be 'C'")
self._internal_dtype = self._parse_format(format)
if order not in ['C', 'F']:
raise ValueError("Order must be 'C' or 'F'")
self.id = id
self.name = name
self.description = description
self.shape = shape
self.dtype = dtype
self.order = order
self.format = format
if not self._internal_dtype.hasobject:
self._fastpath = _FASTPATH_NUMPY
elif (not shape and
dtype is None and
len(format) == 1 and
format[0][0] in ('u', 'i') and
self._internal_dtype.hasobject):
self._fastpath = _FASTPATH_IMMEDIATE
else:
self._fastpath = _FASTPATH_NONE
@classmethod
def _parse_numpy_header(cls, header):
try:
d = _np.lib.utils.safe_eval(header)
except SyntaxError as e:
msg = "Cannot parse descriptor: %r\nException: %r"
raise ValueError(msg % (header, e))
if not isinstance(d, dict):
msg = "Descriptor is not a dictionary: %r"
raise ValueError(msg % d)
keys = list(d.keys())
keys.sort()
if keys != ['descr', 'fortran_order', 'shape']:
msg = "Descriptor does not contain the correct keys: %r"
raise ValueError(msg % (keys,))
# Sanity-check the values.
if (not isinstance(d['shape'], tuple) or
not all([isinstance(x, _numbers.Integral) and x >= 0 for x in d['shape']])):
msg = "shape is not valid: %r"
raise ValueError(msg % (d['shape'],))
if not isinstance(d['fortran_order'], bool):
msg = "fortran_order is not a valid bool: %r"
raise ValueError(msg % (d['fortran_order'],))
try:
dtype = _np.dtype(d['descr'])
except TypeError:
msg = "descr is not a valid dtype descriptor: %r"
raise ValueError(msg % (d['descr'],))
order = 'F' if d['fortran_order'] else 'C'
return d['shape'], order, dtype
@classmethod
def _make_numpy_header(self, shape, dtype, order):
return "{{'descr': {!r}, 'fortran_order': {!r}, 'shape': {!r}}}".format(
_np.lib.format.dtype_to_descr(dtype), order == 'F',
tuple(shape))
@classmethod
def _parse_format(cls, fmt):
"""Attempt to convert a SPEAD format specification to a numpy dtype.
Where necessary, `O` is used.
Raises
------
ValueError
If the format is illegal
"""
fields = []
if not fmt:
raise ValueError('empty format')
for code, length in fmt:
if length == 0:
raise ValueError('zero-length field (bug_compat mismatch?)')
if ((code in ('u', 'i') and length in (8, 16, 32, 64)) or
(code == 'f' and length in (32, 64))):
fields.append('>' + code + str(length // 8))
elif code == 'b' and length == 8:
fields.append('?')
elif code == 'c' and length == 8:
fields.append('S1')
else:
if code not in ['u', 'i', 'b']:
raise ValueError('illegal format ({}, {})'.format(code, length))
fields.append('O')
return _np.dtype(','.join(fields))
@property
def is_variable_size(self):
"""Determine whether any element of the size is dynamic"""
return any([x is None for x in self.shape])
def allow_immediate(self):
"""Called by the C++ interface to determine whether sufficiently small
items should be encoded as immediates.
Variable-size objects cannot be immediates because there is no way to
determine the true payload size. Types with a non-integral number of
bytes are banned because the protocol does not specify where the
padding should go, and PySPEAD's encoder and decoder disagree, so it
is best not to send them at all.
"""
return not self.is_variable_size() and (
self.dtype is not None or self.itemsize_bits % 8 == 0)
def dynamic_shape(self, max_elements):
"""Determine the dynamic shape, given incoming data that is big enough
to hold `max_elements` elements.
"""
known = 1
unknown_pos = -1
for i, x in enumerate(self.shape):
if x is not None:
known *= x
else:
assert unknown_pos == -1, 'Shape has multiple unknown dimensions'
unknown_pos = i
if unknown_pos == -1:
return self.shape
else:
shape = list(self.shape)
if known == 0:
shape[unknown_pos] = 0
else:
shape[unknown_pos] = max_elements // known
return shape
def compatible_shape(self, shape):
"""Determine whether `shape` is compatible with the (possibly
variable-sized) shape for this descriptor"""
if len(shape) != len(self.shape):
return False
for x, y in zip(self.shape, shape):
if x is not None and x != y:
return False
return True
@classmethod
def from_raw(cls, raw_descriptor, flavour):
dtype = None
format = None
if raw_descriptor.numpy_header:
header = _bytes_to_str_ascii(raw_descriptor.numpy_header)
shape, order, dtype = cls._parse_numpy_header(header)
if flavour.bug_compat & BUG_COMPAT_SWAP_ENDIAN:
dtype = dtype.newbyteorder()
else:
shape = raw_descriptor.shape
order = 'C'
format = raw_descriptor.format
return cls(
raw_descriptor.id,
_bytes_to_str_ascii(raw_descriptor.name),
_bytes_to_str_ascii(raw_descriptor.description),
shape, dtype, order, format)
def to_raw(self, flavour):
raw = spead2._spead2.RawDescriptor()
raw.id = self.id
raw.name = self.name.encode('ascii')
raw.description = self.description.encode('ascii')
raw.shape = self.shape
if self.dtype is not None:
if flavour.bug_compat & BUG_COMPAT_SWAP_ENDIAN:
dtype = self.dtype.newbyteorder()
else:
dtype = self.dtype
raw.numpy_header = self._make_numpy_header(
self.shape, dtype, self.order).encode('ascii')
else:
raw.format = self.format
return raw
|
ska-sa/spead2
|
spead2/__init__.py
|
Descriptor.dynamic_shape
|
python
|
def dynamic_shape(self, max_elements):
known = 1
unknown_pos = -1
for i, x in enumerate(self.shape):
if x is not None:
known *= x
else:
assert unknown_pos == -1, 'Shape has multiple unknown dimensions'
unknown_pos = i
if unknown_pos == -1:
return self.shape
else:
shape = list(self.shape)
if known == 0:
shape[unknown_pos] = 0
else:
shape[unknown_pos] = max_elements // known
return shape
|
Determine the dynamic shape, given incoming data that is big enough
to hold `max_elements` elements.
|
train
|
https://github.com/ska-sa/spead2/blob/cac95fd01d8debaa302d2691bd26da64b7828bc6/spead2/__init__.py#L262-L282
| null |
class Descriptor(object):
"""Metadata for a SPEAD item.
There are a number of restrictions in the way the parameters combine,
which will cause `ValueError` to be raised if violated:
- At most one element of `shape` can be `None`.
- Exactly one of `dtype` and `format` must be non-`None`.
- If `dtype` is specified, `shape` cannot have any unknown dimensions.
- If `format` is specified, `order` must be 'C'
Parameters
----------
id : int
SPEAD item ID
name : str
Short item name, suitable for use as a key
description : str
Long item description
shape : sequence
Dimensions, with `None` indicating a variable-size dimension
dtype : numpy data type, optional
Data type, or `None` if `format` will be used instead
order : {'C', 'F'}
Indicates C-order or Fortran-order storage
format : list of pairs, optional
Structure fields for generic (non-numpy) type. Each element of the list
is a tuple of field code and bit length.
"""
def __init__(self, id, name, description, shape, dtype=None, order='C', format=None):
shape = tuple(shape)
unknowns = sum([x is None for x in shape])
if unknowns > 1:
raise ValueError('Cannot have multiple unknown dimensions')
if dtype is not None:
dtype = _np.dtype(dtype)
if dtype.hasobject:
raise TypeError('Cannot use dtype that has reference-counted objects')
if format is not None:
raise ValueError('Only one of dtype and format can be specified')
if unknowns > 0:
raise ValueError('Cannot have unknown dimensions when using numpy descriptor')
self._internal_dtype = dtype
else:
if format is None:
raise ValueError('One of dtype and format must be specified')
if order != 'C':
raise ValueError("When specifying format, order must be 'C'")
self._internal_dtype = self._parse_format(format)
if order not in ['C', 'F']:
raise ValueError("Order must be 'C' or 'F'")
self.id = id
self.name = name
self.description = description
self.shape = shape
self.dtype = dtype
self.order = order
self.format = format
if not self._internal_dtype.hasobject:
self._fastpath = _FASTPATH_NUMPY
elif (not shape and
dtype is None and
len(format) == 1 and
format[0][0] in ('u', 'i') and
self._internal_dtype.hasobject):
self._fastpath = _FASTPATH_IMMEDIATE
else:
self._fastpath = _FASTPATH_NONE
@classmethod
def _parse_numpy_header(cls, header):
try:
d = _np.lib.utils.safe_eval(header)
except SyntaxError as e:
msg = "Cannot parse descriptor: %r\nException: %r"
raise ValueError(msg % (header, e))
if not isinstance(d, dict):
msg = "Descriptor is not a dictionary: %r"
raise ValueError(msg % d)
keys = list(d.keys())
keys.sort()
if keys != ['descr', 'fortran_order', 'shape']:
msg = "Descriptor does not contain the correct keys: %r"
raise ValueError(msg % (keys,))
# Sanity-check the values.
if (not isinstance(d['shape'], tuple) or
not all([isinstance(x, _numbers.Integral) and x >= 0 for x in d['shape']])):
msg = "shape is not valid: %r"
raise ValueError(msg % (d['shape'],))
if not isinstance(d['fortran_order'], bool):
msg = "fortran_order is not a valid bool: %r"
raise ValueError(msg % (d['fortran_order'],))
try:
dtype = _np.dtype(d['descr'])
except TypeError:
msg = "descr is not a valid dtype descriptor: %r"
raise ValueError(msg % (d['descr'],))
order = 'F' if d['fortran_order'] else 'C'
return d['shape'], order, dtype
@classmethod
def _make_numpy_header(self, shape, dtype, order):
return "{{'descr': {!r}, 'fortran_order': {!r}, 'shape': {!r}}}".format(
_np.lib.format.dtype_to_descr(dtype), order == 'F',
tuple(shape))
@classmethod
def _parse_format(cls, fmt):
"""Attempt to convert a SPEAD format specification to a numpy dtype.
Where necessary, `O` is used.
Raises
------
ValueError
If the format is illegal
"""
fields = []
if not fmt:
raise ValueError('empty format')
for code, length in fmt:
if length == 0:
raise ValueError('zero-length field (bug_compat mismatch?)')
if ((code in ('u', 'i') and length in (8, 16, 32, 64)) or
(code == 'f' and length in (32, 64))):
fields.append('>' + code + str(length // 8))
elif code == 'b' and length == 8:
fields.append('?')
elif code == 'c' and length == 8:
fields.append('S1')
else:
if code not in ['u', 'i', 'b']:
raise ValueError('illegal format ({}, {})'.format(code, length))
fields.append('O')
return _np.dtype(','.join(fields))
@property
def itemsize_bits(self):
"""Number of bits per element"""
if self.dtype is not None:
return self.dtype.itemsize * 8
else:
return sum(x[1] for x in self.format)
def is_variable_size(self):
"""Determine whether any element of the size is dynamic"""
return any([x is None for x in self.shape])
def allow_immediate(self):
"""Called by the C++ interface to determine whether sufficiently small
items should be encoded as immediates.
Variable-size objects cannot be immediates because there is no way to
determine the true payload size. Types with a non-integral number of
bytes are banned because the protocol does not specify where the
padding should go, and PySPEAD's encoder and decoder disagree, so it
is best not to send them at all.
"""
return not self.is_variable_size() and (
self.dtype is not None or self.itemsize_bits % 8 == 0)
def compatible_shape(self, shape):
"""Determine whether `shape` is compatible with the (possibly
variable-sized) shape for this descriptor"""
if len(shape) != len(self.shape):
return False
for x, y in zip(self.shape, shape):
if x is not None and x != y:
return False
return True
@classmethod
def from_raw(cls, raw_descriptor, flavour):
dtype = None
format = None
if raw_descriptor.numpy_header:
header = _bytes_to_str_ascii(raw_descriptor.numpy_header)
shape, order, dtype = cls._parse_numpy_header(header)
if flavour.bug_compat & BUG_COMPAT_SWAP_ENDIAN:
dtype = dtype.newbyteorder()
else:
shape = raw_descriptor.shape
order = 'C'
format = raw_descriptor.format
return cls(
raw_descriptor.id,
_bytes_to_str_ascii(raw_descriptor.name),
_bytes_to_str_ascii(raw_descriptor.description),
shape, dtype, order, format)
def to_raw(self, flavour):
raw = spead2._spead2.RawDescriptor()
raw.id = self.id
raw.name = self.name.encode('ascii')
raw.description = self.description.encode('ascii')
raw.shape = self.shape
if self.dtype is not None:
if flavour.bug_compat & BUG_COMPAT_SWAP_ENDIAN:
dtype = self.dtype.newbyteorder()
else:
dtype = self.dtype
raw.numpy_header = self._make_numpy_header(
self.shape, dtype, self.order).encode('ascii')
else:
raw.format = self.format
return raw
|
ska-sa/spead2
|
spead2/__init__.py
|
Descriptor.compatible_shape
|
python
|
def compatible_shape(self, shape):
if len(shape) != len(self.shape):
return False
for x, y in zip(self.shape, shape):
if x is not None and x != y:
return False
return True
|
Determine whether `shape` is compatible with the (possibly
variable-sized) shape for this descriptor
|
train
|
https://github.com/ska-sa/spead2/blob/cac95fd01d8debaa302d2691bd26da64b7828bc6/spead2/__init__.py#L284-L292
| null |
class Descriptor(object):
"""Metadata for a SPEAD item.
There are a number of restrictions in the way the parameters combine,
which will cause `ValueError` to be raised if violated:
- At most one element of `shape` can be `None`.
- Exactly one of `dtype` and `format` must be non-`None`.
- If `dtype` is specified, `shape` cannot have any unknown dimensions.
- If `format` is specified, `order` must be 'C'
Parameters
----------
id : int
SPEAD item ID
name : str
Short item name, suitable for use as a key
description : str
Long item description
shape : sequence
Dimensions, with `None` indicating a variable-size dimension
dtype : numpy data type, optional
Data type, or `None` if `format` will be used instead
order : {'C', 'F'}
Indicates C-order or Fortran-order storage
format : list of pairs, optional
Structure fields for generic (non-numpy) type. Each element of the list
is a tuple of field code and bit length.
"""
def __init__(self, id, name, description, shape, dtype=None, order='C', format=None):
shape = tuple(shape)
unknowns = sum([x is None for x in shape])
if unknowns > 1:
raise ValueError('Cannot have multiple unknown dimensions')
if dtype is not None:
dtype = _np.dtype(dtype)
if dtype.hasobject:
raise TypeError('Cannot use dtype that has reference-counted objects')
if format is not None:
raise ValueError('Only one of dtype and format can be specified')
if unknowns > 0:
raise ValueError('Cannot have unknown dimensions when using numpy descriptor')
self._internal_dtype = dtype
else:
if format is None:
raise ValueError('One of dtype and format must be specified')
if order != 'C':
raise ValueError("When specifying format, order must be 'C'")
self._internal_dtype = self._parse_format(format)
if order not in ['C', 'F']:
raise ValueError("Order must be 'C' or 'F'")
self.id = id
self.name = name
self.description = description
self.shape = shape
self.dtype = dtype
self.order = order
self.format = format
if not self._internal_dtype.hasobject:
self._fastpath = _FASTPATH_NUMPY
elif (not shape and
dtype is None and
len(format) == 1 and
format[0][0] in ('u', 'i') and
self._internal_dtype.hasobject):
self._fastpath = _FASTPATH_IMMEDIATE
else:
self._fastpath = _FASTPATH_NONE
@classmethod
def _parse_numpy_header(cls, header):
try:
d = _np.lib.utils.safe_eval(header)
except SyntaxError as e:
msg = "Cannot parse descriptor: %r\nException: %r"
raise ValueError(msg % (header, e))
if not isinstance(d, dict):
msg = "Descriptor is not a dictionary: %r"
raise ValueError(msg % d)
keys = list(d.keys())
keys.sort()
if keys != ['descr', 'fortran_order', 'shape']:
msg = "Descriptor does not contain the correct keys: %r"
raise ValueError(msg % (keys,))
# Sanity-check the values.
if (not isinstance(d['shape'], tuple) or
not all([isinstance(x, _numbers.Integral) and x >= 0 for x in d['shape']])):
msg = "shape is not valid: %r"
raise ValueError(msg % (d['shape'],))
if not isinstance(d['fortran_order'], bool):
msg = "fortran_order is not a valid bool: %r"
raise ValueError(msg % (d['fortran_order'],))
try:
dtype = _np.dtype(d['descr'])
except TypeError:
msg = "descr is not a valid dtype descriptor: %r"
raise ValueError(msg % (d['descr'],))
order = 'F' if d['fortran_order'] else 'C'
return d['shape'], order, dtype
@classmethod
def _make_numpy_header(self, shape, dtype, order):
return "{{'descr': {!r}, 'fortran_order': {!r}, 'shape': {!r}}}".format(
_np.lib.format.dtype_to_descr(dtype), order == 'F',
tuple(shape))
@classmethod
def _parse_format(cls, fmt):
"""Attempt to convert a SPEAD format specification to a numpy dtype.
Where necessary, `O` is used.
Raises
------
ValueError
If the format is illegal
"""
fields = []
if not fmt:
raise ValueError('empty format')
for code, length in fmt:
if length == 0:
raise ValueError('zero-length field (bug_compat mismatch?)')
if ((code in ('u', 'i') and length in (8, 16, 32, 64)) or
(code == 'f' and length in (32, 64))):
fields.append('>' + code + str(length // 8))
elif code == 'b' and length == 8:
fields.append('?')
elif code == 'c' and length == 8:
fields.append('S1')
else:
if code not in ['u', 'i', 'b']:
raise ValueError('illegal format ({}, {})'.format(code, length))
fields.append('O')
return _np.dtype(','.join(fields))
@property
def itemsize_bits(self):
"""Number of bits per element"""
if self.dtype is not None:
return self.dtype.itemsize * 8
else:
return sum(x[1] for x in self.format)
def is_variable_size(self):
"""Determine whether any element of the size is dynamic"""
return any([x is None for x in self.shape])
def allow_immediate(self):
"""Called by the C++ interface to determine whether sufficiently small
items should be encoded as immediates.
Variable-size objects cannot be immediates because there is no way to
determine the true payload size. Types with a non-integral number of
bytes are banned because the protocol does not specify where the
padding should go, and PySPEAD's encoder and decoder disagree, so it
is best not to send them at all.
"""
return not self.is_variable_size() and (
self.dtype is not None or self.itemsize_bits % 8 == 0)
def dynamic_shape(self, max_elements):
"""Determine the dynamic shape, given incoming data that is big enough
to hold `max_elements` elements.
"""
known = 1
unknown_pos = -1
for i, x in enumerate(self.shape):
if x is not None:
known *= x
else:
assert unknown_pos == -1, 'Shape has multiple unknown dimensions'
unknown_pos = i
if unknown_pos == -1:
return self.shape
else:
shape = list(self.shape)
if known == 0:
shape[unknown_pos] = 0
else:
shape[unknown_pos] = max_elements // known
return shape
@classmethod
def from_raw(cls, raw_descriptor, flavour):
dtype = None
format = None
if raw_descriptor.numpy_header:
header = _bytes_to_str_ascii(raw_descriptor.numpy_header)
shape, order, dtype = cls._parse_numpy_header(header)
if flavour.bug_compat & BUG_COMPAT_SWAP_ENDIAN:
dtype = dtype.newbyteorder()
else:
shape = raw_descriptor.shape
order = 'C'
format = raw_descriptor.format
return cls(
raw_descriptor.id,
_bytes_to_str_ascii(raw_descriptor.name),
_bytes_to_str_ascii(raw_descriptor.description),
shape, dtype, order, format)
def to_raw(self, flavour):
raw = spead2._spead2.RawDescriptor()
raw.id = self.id
raw.name = self.name.encode('ascii')
raw.description = self.description.encode('ascii')
raw.shape = self.shape
if self.dtype is not None:
if flavour.bug_compat & BUG_COMPAT_SWAP_ENDIAN:
dtype = self.dtype.newbyteorder()
else:
dtype = self.dtype
raw.numpy_header = self._make_numpy_header(
self.shape, dtype, self.order).encode('ascii')
else:
raw.format = self.format
return raw
|
ska-sa/spead2
|
spead2/__init__.py
|
Item._read_bits
|
python
|
def _read_bits(cls, raw_value):
have_bits = 0
bits = 0
byte_source = iter(raw_value)
result = 0
while True:
need_bits = yield result
while have_bits < need_bits:
try:
bits = (bits << 8) | int(next(byte_source))
have_bits += 8
except StopIteration:
return
result = int(bits >> (have_bits - need_bits))
bits &= (1 << (have_bits - need_bits)) - 1
have_bits -= need_bits
|
Generator that takes a memory view and provides bitfields from it.
After creating the generator, call `send(None)` to initialise it, and
thereafter call `send(need_bits)` to obtain that many bits.
|
train
|
https://github.com/ska-sa/spead2/blob/cac95fd01d8debaa302d2691bd26da64b7828bc6/spead2/__init__.py#L367-L386
| null |
class Item(Descriptor):
"""A SPEAD item with a value and a version number.
Parameters
----------
value : object, optional
Initial value
"""
def __init__(self, *args, **kw):
value = kw.pop('value', None)
super(Item, self).__init__(*args, **kw)
self._value = value
self.version = 1 #: Version number
@property
def value(self):
"""Current value. Assigning to this will increment the version number.
Assigning `None` will raise `ValueError` because there is no way to
encode this using SPEAD.
.. warning:: If you modify a mutable value in-place, the change will
not be detected, and the new value will not be transmitted. In this
case, either manually increment the version number, or reassign the
value.
"""
return self._value
@value.setter
def value(self, new_value):
if new_value is None:
raise ValueError("Item value cannot be set to None")
self._value = new_value
self.version += 1
@classmethod
@classmethod
def _write_bits(cls, array):
"""Generator that fills a `bytearray` with provided bits. After
creating the generator, call `send(None)` to initialise it, and
thereafter call `send((value, bits))` to add that many bits into
the array. You must call `close()` to flush any partial bytes."""
pos = 0
current = 0 # bits not yet written into array
current_bits = 0
try:
while True:
(value, bits) = yield
if value < 0 or value >= (1 << bits):
raise ValueError('Value is out of range for number of bits')
current = (current << bits) | value
current_bits += bits
while current_bits >= 8:
array[pos] = current >> (current_bits - 8)
current &= (1 << (current_bits - 8)) - 1
current_bits -= 8
pos += 1
except GeneratorExit:
if current_bits > 0:
current <<= (8 - current_bits)
array[pos] = current
def _load_recursive(self, shape, gen):
"""Recursively create a multidimensional array (as lists of lists)
from a bit generator.
"""
if len(shape) > 0:
ans = []
for i in range(shape[0]):
ans.append(self._load_recursive(shape[1:], gen))
else:
fields = []
for code, length in self.format:
field = None
raw = gen.send(length)
if code == 'u':
field = raw
elif code == 'i':
field = raw
# Interpret as 2's complement
if field >= 1 << (length - 1):
field -= 1 << length
elif code == 'b':
field = bool(raw)
elif code == 'c':
field = six.int2byte(raw)
elif code == 'f':
if length == 32:
field = _np.uint32(raw).view(_np.float32)
elif length == 64:
field = _np.uint64(raw).view(_np.float64)
else:
raise ValueError('unhandled float length {0}'.format((code, length)))
else:
raise ValueError('unhandled format {0}'.format((code, length)))
fields.append(field)
if len(fields) == 1:
ans = fields[0]
else:
ans = tuple(fields)
return ans
def _store_recursive(self, dims, value, gen):
if dims > 0:
for sub in value:
self._store_recursive(dims - 1, sub, gen)
else:
if len(self.format) == 1:
value = (value,)
for (code, length), field in zip(self.format, value):
raw = None
if code == 'u':
raw = int(field)
if raw < 0 or raw >= (1 << length):
raise ValueError('{} is out of range for u{}'.format(raw, length))
elif code == 'i':
top_bit = 1 << (length - 1)
raw = int(field)
if raw < -top_bit or raw >= top_bit:
raise ValueError('{} is out of range for i{}'.format(field, length))
# convert to 2's complement
if raw < 0:
raw += 2 * top_bit
elif code == 'b':
raw = 1 if field else 0
elif code == 'c':
raw = ord(field)
elif code == 'f':
if length == 32:
raw = _np.float32(field).view(_np.uint32)
elif length == 64:
raw = _np.float64(field).view(_np.uint64)
else:
raise ValueError('unhandled float length {0}'.format((code, length)))
else:
raise ValueError('unhandled format {0}'.format((code, length)))
gen.send((raw, length))
def set_from_raw(self, raw_item):
raw_value = _np.array(raw_item, _np.uint8, copy=False)
if self._fastpath == _FASTPATH_NUMPY:
max_elements = raw_value.shape[0] // self._internal_dtype.itemsize
shape = self.dynamic_shape(max_elements)
elements = _shape_elements(shape)
if elements > max_elements:
raise ValueError('Item {} has too few elements for shape ({} < {})'.format(
self.name, max_elements, elements))
size_bytes = elements * self._internal_dtype.itemsize
if raw_item.is_immediate:
# Immediates get head padding instead of tail padding
# For some reason, np.frombuffer doesn't work on memoryview, but np.array does
array1d = raw_value[-size_bytes:]
else:
array1d = raw_value[:size_bytes]
array1d = array1d.view(dtype=self._internal_dtype)
# Force to native endian
array1d = array1d.astype(self._internal_dtype.newbyteorder('='),
casting='equiv', copy=False)
value = _np.reshape(array1d, shape, self.order)
elif (self._fastpath == _FASTPATH_IMMEDIATE and
raw_item.is_immediate and
raw_value.shape[0] * 8 == self.format[0][1]):
value = raw_item.immediate_value
if self.format[0][0] == 'i':
top = 1 << (self.format[0][1] - 1)
if value >= top:
value -= 2 * top
else:
itemsize_bits = self.itemsize_bits
max_elements = raw_value.shape[0] * 8 // itemsize_bits
shape = self.dynamic_shape(max_elements)
elements = _shape_elements(shape)
bits = elements * itemsize_bits
if elements > max_elements:
raise ValueError('Item {} has too few elements for shape ({} < {})'.format(
self.name, max_elements, elements))
if raw_item.is_immediate:
# Immediates get head padding instead of tail padding
size_bytes = (bits + 7) // 8
raw_value = raw_value[-size_bytes:]
gen = self._read_bits(raw_value)
gen.send(None) # Initialisation of the generator
value = _np.array(self._load_recursive(shape, gen), self._internal_dtype)
if len(self.shape) == 0 and isinstance(value, _np.ndarray):
# Convert zero-dimensional array to scalar
value = value[()]
elif len(self.shape) == 1 and self.format == [('c', 8)]:
# Convert array of characters to a string
value = _bytes_to_str_ascii(b''.join(value))
self.value = value
def _num_elements(self):
if isinstance(self.value, _np.ndarray):
return self.value.size
cur = self.value
ans = 1
for size in self.shape:
ans *= len(cur)
if ans == 0:
return ans # Prevents IndexError below
cur = cur[0]
return ans
def _transform_value(self):
"""Mangle the value into a numpy array. This does several things:
- If it is stringlike (bytes or unicode) and the expected shape is
1D, it is split into an array of characters.
- It is coerced to a numpy array, enforcing the dtype and order. Where
possible, no copy is made.
- The shape is checked against the expected shape.
Returns
-------
value : :py:class:`numpy.ndarray`
The transformed value
Raises
------
ValueError
if the value is `None`
ValueError
if the value has the wrong shape
TypeError
if numpy raised it when trying to convert the value
"""
value = self.value
if value is None:
raise ValueError('Cannot send a value of None')
if (isinstance(value, (six.binary_type, six.text_type)) and
len(self.shape) == 1):
# This is complicated by Python 3 not providing a simple way to
# turn a bytes object into a list of one-byte objects, the way
# list(str) does.
value = [self.value[i : i + 1] for i in range(len(self.value))]
value = _np.array(value, dtype=self._internal_dtype, order=self.order, copy=False)
if not self.compatible_shape(value.shape):
raise ValueError('Value has shape {}, expected {}'.format(value.shape, self.shape))
return value
def to_buffer(self):
"""Returns an object that implements the buffer protocol for the value.
It can be either the original value (on the numpy fast path), or a new
temporary object.
"""
value = self._transform_value()
if self._fastpath != _FASTPATH_NUMPY:
bit_length = self.itemsize_bits * self._num_elements()
out = bytearray((bit_length + 7) // 8)
gen = self._write_bits(out)
gen.send(None) # Initialise the generator
# If it's a scalar, unpack it. That way, the input to the
# final level of recursion in _store_recursive is always
# the scalar rather than the 0D array.
if len(self.shape) == 0:
value = value[()]
self._store_recursive(len(self.shape), value, gen)
gen.close()
return out
else:
if self.order == 'F':
# numpy doesn't allow buffer protocol to be used on arrays that
# aren't C-contiguous, but transposition just fiddles the
# strides of the view without creating a new array.
value = value.transpose()
return value
|
ska-sa/spead2
|
spead2/__init__.py
|
Item._write_bits
|
python
|
def _write_bits(cls, array):
pos = 0
current = 0 # bits not yet written into array
current_bits = 0
try:
while True:
(value, bits) = yield
if value < 0 or value >= (1 << bits):
raise ValueError('Value is out of range for number of bits')
current = (current << bits) | value
current_bits += bits
while current_bits >= 8:
array[pos] = current >> (current_bits - 8)
current &= (1 << (current_bits - 8)) - 1
current_bits -= 8
pos += 1
except GeneratorExit:
if current_bits > 0:
current <<= (8 - current_bits)
array[pos] = current
|
Generator that fills a `bytearray` with provided bits. After
creating the generator, call `send(None)` to initialise it, and
thereafter call `send((value, bits))` to add that many bits into
the array. You must call `close()` to flush any partial bytes.
|
train
|
https://github.com/ska-sa/spead2/blob/cac95fd01d8debaa302d2691bd26da64b7828bc6/spead2/__init__.py#L389-L412
| null |
class Item(Descriptor):
"""A SPEAD item with a value and a version number.
Parameters
----------
value : object, optional
Initial value
"""
def __init__(self, *args, **kw):
value = kw.pop('value', None)
super(Item, self).__init__(*args, **kw)
self._value = value
self.version = 1 #: Version number
@property
def value(self):
"""Current value. Assigning to this will increment the version number.
Assigning `None` will raise `ValueError` because there is no way to
encode this using SPEAD.
.. warning:: If you modify a mutable value in-place, the change will
not be detected, and the new value will not be transmitted. In this
case, either manually increment the version number, or reassign the
value.
"""
return self._value
@value.setter
def value(self, new_value):
if new_value is None:
raise ValueError("Item value cannot be set to None")
self._value = new_value
self.version += 1
@classmethod
def _read_bits(cls, raw_value):
"""Generator that takes a memory view and provides bitfields from it.
After creating the generator, call `send(None)` to initialise it, and
thereafter call `send(need_bits)` to obtain that many bits.
"""
have_bits = 0
bits = 0
byte_source = iter(raw_value)
result = 0
while True:
need_bits = yield result
while have_bits < need_bits:
try:
bits = (bits << 8) | int(next(byte_source))
have_bits += 8
except StopIteration:
return
result = int(bits >> (have_bits - need_bits))
bits &= (1 << (have_bits - need_bits)) - 1
have_bits -= need_bits
@classmethod
def _load_recursive(self, shape, gen):
"""Recursively create a multidimensional array (as lists of lists)
from a bit generator.
"""
if len(shape) > 0:
ans = []
for i in range(shape[0]):
ans.append(self._load_recursive(shape[1:], gen))
else:
fields = []
for code, length in self.format:
field = None
raw = gen.send(length)
if code == 'u':
field = raw
elif code == 'i':
field = raw
# Interpret as 2's complement
if field >= 1 << (length - 1):
field -= 1 << length
elif code == 'b':
field = bool(raw)
elif code == 'c':
field = six.int2byte(raw)
elif code == 'f':
if length == 32:
field = _np.uint32(raw).view(_np.float32)
elif length == 64:
field = _np.uint64(raw).view(_np.float64)
else:
raise ValueError('unhandled float length {0}'.format((code, length)))
else:
raise ValueError('unhandled format {0}'.format((code, length)))
fields.append(field)
if len(fields) == 1:
ans = fields[0]
else:
ans = tuple(fields)
return ans
def _store_recursive(self, dims, value, gen):
if dims > 0:
for sub in value:
self._store_recursive(dims - 1, sub, gen)
else:
if len(self.format) == 1:
value = (value,)
for (code, length), field in zip(self.format, value):
raw = None
if code == 'u':
raw = int(field)
if raw < 0 or raw >= (1 << length):
raise ValueError('{} is out of range for u{}'.format(raw, length))
elif code == 'i':
top_bit = 1 << (length - 1)
raw = int(field)
if raw < -top_bit or raw >= top_bit:
raise ValueError('{} is out of range for i{}'.format(field, length))
# convert to 2's complement
if raw < 0:
raw += 2 * top_bit
elif code == 'b':
raw = 1 if field else 0
elif code == 'c':
raw = ord(field)
elif code == 'f':
if length == 32:
raw = _np.float32(field).view(_np.uint32)
elif length == 64:
raw = _np.float64(field).view(_np.uint64)
else:
raise ValueError('unhandled float length {0}'.format((code, length)))
else:
raise ValueError('unhandled format {0}'.format((code, length)))
gen.send((raw, length))
def set_from_raw(self, raw_item):
raw_value = _np.array(raw_item, _np.uint8, copy=False)
if self._fastpath == _FASTPATH_NUMPY:
max_elements = raw_value.shape[0] // self._internal_dtype.itemsize
shape = self.dynamic_shape(max_elements)
elements = _shape_elements(shape)
if elements > max_elements:
raise ValueError('Item {} has too few elements for shape ({} < {})'.format(
self.name, max_elements, elements))
size_bytes = elements * self._internal_dtype.itemsize
if raw_item.is_immediate:
# Immediates get head padding instead of tail padding
# For some reason, np.frombuffer doesn't work on memoryview, but np.array does
array1d = raw_value[-size_bytes:]
else:
array1d = raw_value[:size_bytes]
array1d = array1d.view(dtype=self._internal_dtype)
# Force to native endian
array1d = array1d.astype(self._internal_dtype.newbyteorder('='),
casting='equiv', copy=False)
value = _np.reshape(array1d, shape, self.order)
elif (self._fastpath == _FASTPATH_IMMEDIATE and
raw_item.is_immediate and
raw_value.shape[0] * 8 == self.format[0][1]):
value = raw_item.immediate_value
if self.format[0][0] == 'i':
top = 1 << (self.format[0][1] - 1)
if value >= top:
value -= 2 * top
else:
itemsize_bits = self.itemsize_bits
max_elements = raw_value.shape[0] * 8 // itemsize_bits
shape = self.dynamic_shape(max_elements)
elements = _shape_elements(shape)
bits = elements * itemsize_bits
if elements > max_elements:
raise ValueError('Item {} has too few elements for shape ({} < {})'.format(
self.name, max_elements, elements))
if raw_item.is_immediate:
# Immediates get head padding instead of tail padding
size_bytes = (bits + 7) // 8
raw_value = raw_value[-size_bytes:]
gen = self._read_bits(raw_value)
gen.send(None) # Initialisation of the generator
value = _np.array(self._load_recursive(shape, gen), self._internal_dtype)
if len(self.shape) == 0 and isinstance(value, _np.ndarray):
# Convert zero-dimensional array to scalar
value = value[()]
elif len(self.shape) == 1 and self.format == [('c', 8)]:
# Convert array of characters to a string
value = _bytes_to_str_ascii(b''.join(value))
self.value = value
def _num_elements(self):
if isinstance(self.value, _np.ndarray):
return self.value.size
cur = self.value
ans = 1
for size in self.shape:
ans *= len(cur)
if ans == 0:
return ans # Prevents IndexError below
cur = cur[0]
return ans
def _transform_value(self):
"""Mangle the value into a numpy array. This does several things:
- If it is stringlike (bytes or unicode) and the expected shape is
1D, it is split into an array of characters.
- It is coerced to a numpy array, enforcing the dtype and order. Where
possible, no copy is made.
- The shape is checked against the expected shape.
Returns
-------
value : :py:class:`numpy.ndarray`
The transformed value
Raises
------
ValueError
if the value is `None`
ValueError
if the value has the wrong shape
TypeError
if numpy raised it when trying to convert the value
"""
value = self.value
if value is None:
raise ValueError('Cannot send a value of None')
if (isinstance(value, (six.binary_type, six.text_type)) and
len(self.shape) == 1):
# This is complicated by Python 3 not providing a simple way to
# turn a bytes object into a list of one-byte objects, the way
# list(str) does.
value = [self.value[i : i + 1] for i in range(len(self.value))]
value = _np.array(value, dtype=self._internal_dtype, order=self.order, copy=False)
if not self.compatible_shape(value.shape):
raise ValueError('Value has shape {}, expected {}'.format(value.shape, self.shape))
return value
def to_buffer(self):
"""Returns an object that implements the buffer protocol for the value.
It can be either the original value (on the numpy fast path), or a new
temporary object.
"""
value = self._transform_value()
if self._fastpath != _FASTPATH_NUMPY:
bit_length = self.itemsize_bits * self._num_elements()
out = bytearray((bit_length + 7) // 8)
gen = self._write_bits(out)
gen.send(None) # Initialise the generator
# If it's a scalar, unpack it. That way, the input to the
# final level of recursion in _store_recursive is always
# the scalar rather than the 0D array.
if len(self.shape) == 0:
value = value[()]
self._store_recursive(len(self.shape), value, gen)
gen.close()
return out
else:
if self.order == 'F':
# numpy doesn't allow buffer protocol to be used on arrays that
# aren't C-contiguous, but transposition just fiddles the
# strides of the view without creating a new array.
value = value.transpose()
return value
|
ska-sa/spead2
|
spead2/__init__.py
|
Item._load_recursive
|
python
|
def _load_recursive(self, shape, gen):
if len(shape) > 0:
ans = []
for i in range(shape[0]):
ans.append(self._load_recursive(shape[1:], gen))
else:
fields = []
for code, length in self.format:
field = None
raw = gen.send(length)
if code == 'u':
field = raw
elif code == 'i':
field = raw
# Interpret as 2's complement
if field >= 1 << (length - 1):
field -= 1 << length
elif code == 'b':
field = bool(raw)
elif code == 'c':
field = six.int2byte(raw)
elif code == 'f':
if length == 32:
field = _np.uint32(raw).view(_np.float32)
elif length == 64:
field = _np.uint64(raw).view(_np.float64)
else:
raise ValueError('unhandled float length {0}'.format((code, length)))
else:
raise ValueError('unhandled format {0}'.format((code, length)))
fields.append(field)
if len(fields) == 1:
ans = fields[0]
else:
ans = tuple(fields)
return ans
|
Recursively create a multidimensional array (as lists of lists)
from a bit generator.
|
train
|
https://github.com/ska-sa/spead2/blob/cac95fd01d8debaa302d2691bd26da64b7828bc6/spead2/__init__.py#L414-L452
|
[
"def _load_recursive(self, shape, gen):\n \"\"\"Recursively create a multidimensional array (as lists of lists)\n from a bit generator.\n \"\"\"\n if len(shape) > 0:\n ans = []\n for i in range(shape[0]):\n ans.append(self._load_recursive(shape[1:], gen))\n else:\n fields = []\n for code, length in self.format:\n field = None\n raw = gen.send(length)\n if code == 'u':\n field = raw\n elif code == 'i':\n field = raw\n # Interpret as 2's complement\n if field >= 1 << (length - 1):\n field -= 1 << length\n elif code == 'b':\n field = bool(raw)\n elif code == 'c':\n field = six.int2byte(raw)\n elif code == 'f':\n if length == 32:\n field = _np.uint32(raw).view(_np.float32)\n elif length == 64:\n field = _np.uint64(raw).view(_np.float64)\n else:\n raise ValueError('unhandled float length {0}'.format((code, length)))\n else:\n raise ValueError('unhandled format {0}'.format((code, length)))\n fields.append(field)\n if len(fields) == 1:\n ans = fields[0]\n else:\n ans = tuple(fields)\n return ans\n"
] |
class Item(Descriptor):
"""A SPEAD item with a value and a version number.
Parameters
----------
value : object, optional
Initial value
"""
def __init__(self, *args, **kw):
value = kw.pop('value', None)
super(Item, self).__init__(*args, **kw)
self._value = value
self.version = 1 #: Version number
@property
def value(self):
"""Current value. Assigning to this will increment the version number.
Assigning `None` will raise `ValueError` because there is no way to
encode this using SPEAD.
.. warning:: If you modify a mutable value in-place, the change will
not be detected, and the new value will not be transmitted. In this
case, either manually increment the version number, or reassign the
value.
"""
return self._value
@value.setter
def value(self, new_value):
if new_value is None:
raise ValueError("Item value cannot be set to None")
self._value = new_value
self.version += 1
@classmethod
def _read_bits(cls, raw_value):
"""Generator that takes a memory view and provides bitfields from it.
After creating the generator, call `send(None)` to initialise it, and
thereafter call `send(need_bits)` to obtain that many bits.
"""
have_bits = 0
bits = 0
byte_source = iter(raw_value)
result = 0
while True:
need_bits = yield result
while have_bits < need_bits:
try:
bits = (bits << 8) | int(next(byte_source))
have_bits += 8
except StopIteration:
return
result = int(bits >> (have_bits - need_bits))
bits &= (1 << (have_bits - need_bits)) - 1
have_bits -= need_bits
@classmethod
def _write_bits(cls, array):
"""Generator that fills a `bytearray` with provided bits. After
creating the generator, call `send(None)` to initialise it, and
thereafter call `send((value, bits))` to add that many bits into
the array. You must call `close()` to flush any partial bytes."""
pos = 0
current = 0 # bits not yet written into array
current_bits = 0
try:
while True:
(value, bits) = yield
if value < 0 or value >= (1 << bits):
raise ValueError('Value is out of range for number of bits')
current = (current << bits) | value
current_bits += bits
while current_bits >= 8:
array[pos] = current >> (current_bits - 8)
current &= (1 << (current_bits - 8)) - 1
current_bits -= 8
pos += 1
except GeneratorExit:
if current_bits > 0:
current <<= (8 - current_bits)
array[pos] = current
def _store_recursive(self, dims, value, gen):
if dims > 0:
for sub in value:
self._store_recursive(dims - 1, sub, gen)
else:
if len(self.format) == 1:
value = (value,)
for (code, length), field in zip(self.format, value):
raw = None
if code == 'u':
raw = int(field)
if raw < 0 or raw >= (1 << length):
raise ValueError('{} is out of range for u{}'.format(raw, length))
elif code == 'i':
top_bit = 1 << (length - 1)
raw = int(field)
if raw < -top_bit or raw >= top_bit:
raise ValueError('{} is out of range for i{}'.format(field, length))
# convert to 2's complement
if raw < 0:
raw += 2 * top_bit
elif code == 'b':
raw = 1 if field else 0
elif code == 'c':
raw = ord(field)
elif code == 'f':
if length == 32:
raw = _np.float32(field).view(_np.uint32)
elif length == 64:
raw = _np.float64(field).view(_np.uint64)
else:
raise ValueError('unhandled float length {0}'.format((code, length)))
else:
raise ValueError('unhandled format {0}'.format((code, length)))
gen.send((raw, length))
def set_from_raw(self, raw_item):
raw_value = _np.array(raw_item, _np.uint8, copy=False)
if self._fastpath == _FASTPATH_NUMPY:
max_elements = raw_value.shape[0] // self._internal_dtype.itemsize
shape = self.dynamic_shape(max_elements)
elements = _shape_elements(shape)
if elements > max_elements:
raise ValueError('Item {} has too few elements for shape ({} < {})'.format(
self.name, max_elements, elements))
size_bytes = elements * self._internal_dtype.itemsize
if raw_item.is_immediate:
# Immediates get head padding instead of tail padding
# For some reason, np.frombuffer doesn't work on memoryview, but np.array does
array1d = raw_value[-size_bytes:]
else:
array1d = raw_value[:size_bytes]
array1d = array1d.view(dtype=self._internal_dtype)
# Force to native endian
array1d = array1d.astype(self._internal_dtype.newbyteorder('='),
casting='equiv', copy=False)
value = _np.reshape(array1d, shape, self.order)
elif (self._fastpath == _FASTPATH_IMMEDIATE and
raw_item.is_immediate and
raw_value.shape[0] * 8 == self.format[0][1]):
value = raw_item.immediate_value
if self.format[0][0] == 'i':
top = 1 << (self.format[0][1] - 1)
if value >= top:
value -= 2 * top
else:
itemsize_bits = self.itemsize_bits
max_elements = raw_value.shape[0] * 8 // itemsize_bits
shape = self.dynamic_shape(max_elements)
elements = _shape_elements(shape)
bits = elements * itemsize_bits
if elements > max_elements:
raise ValueError('Item {} has too few elements for shape ({} < {})'.format(
self.name, max_elements, elements))
if raw_item.is_immediate:
# Immediates get head padding instead of tail padding
size_bytes = (bits + 7) // 8
raw_value = raw_value[-size_bytes:]
gen = self._read_bits(raw_value)
gen.send(None) # Initialisation of the generator
value = _np.array(self._load_recursive(shape, gen), self._internal_dtype)
if len(self.shape) == 0 and isinstance(value, _np.ndarray):
# Convert zero-dimensional array to scalar
value = value[()]
elif len(self.shape) == 1 and self.format == [('c', 8)]:
# Convert array of characters to a string
value = _bytes_to_str_ascii(b''.join(value))
self.value = value
def _num_elements(self):
if isinstance(self.value, _np.ndarray):
return self.value.size
cur = self.value
ans = 1
for size in self.shape:
ans *= len(cur)
if ans == 0:
return ans # Prevents IndexError below
cur = cur[0]
return ans
def _transform_value(self):
"""Mangle the value into a numpy array. This does several things:
- If it is stringlike (bytes or unicode) and the expected shape is
1D, it is split into an array of characters.
- It is coerced to a numpy array, enforcing the dtype and order. Where
possible, no copy is made.
- The shape is checked against the expected shape.
Returns
-------
value : :py:class:`numpy.ndarray`
The transformed value
Raises
------
ValueError
if the value is `None`
ValueError
if the value has the wrong shape
TypeError
if numpy raised it when trying to convert the value
"""
value = self.value
if value is None:
raise ValueError('Cannot send a value of None')
if (isinstance(value, (six.binary_type, six.text_type)) and
len(self.shape) == 1):
# This is complicated by Python 3 not providing a simple way to
# turn a bytes object into a list of one-byte objects, the way
# list(str) does.
value = [self.value[i : i + 1] for i in range(len(self.value))]
value = _np.array(value, dtype=self._internal_dtype, order=self.order, copy=False)
if not self.compatible_shape(value.shape):
raise ValueError('Value has shape {}, expected {}'.format(value.shape, self.shape))
return value
def to_buffer(self):
"""Returns an object that implements the buffer protocol for the value.
It can be either the original value (on the numpy fast path), or a new
temporary object.
"""
value = self._transform_value()
if self._fastpath != _FASTPATH_NUMPY:
bit_length = self.itemsize_bits * self._num_elements()
out = bytearray((bit_length + 7) // 8)
gen = self._write_bits(out)
gen.send(None) # Initialise the generator
# If it's a scalar, unpack it. That way, the input to the
# final level of recursion in _store_recursive is always
# the scalar rather than the 0D array.
if len(self.shape) == 0:
value = value[()]
self._store_recursive(len(self.shape), value, gen)
gen.close()
return out
else:
if self.order == 'F':
# numpy doesn't allow buffer protocol to be used on arrays that
# aren't C-contiguous, but transposition just fiddles the
# strides of the view without creating a new array.
value = value.transpose()
return value
|
ska-sa/spead2
|
spead2/__init__.py
|
Item._transform_value
|
python
|
def _transform_value(self):
value = self.value
if value is None:
raise ValueError('Cannot send a value of None')
if (isinstance(value, (six.binary_type, six.text_type)) and
len(self.shape) == 1):
# This is complicated by Python 3 not providing a simple way to
# turn a bytes object into a list of one-byte objects, the way
# list(str) does.
value = [self.value[i : i + 1] for i in range(len(self.value))]
value = _np.array(value, dtype=self._internal_dtype, order=self.order, copy=False)
if not self.compatible_shape(value.shape):
raise ValueError('Value has shape {}, expected {}'.format(value.shape, self.shape))
return value
|
Mangle the value into a numpy array. This does several things:
- If it is stringlike (bytes or unicode) and the expected shape is
1D, it is split into an array of characters.
- It is coerced to a numpy array, enforcing the dtype and order. Where
possible, no copy is made.
- The shape is checked against the expected shape.
Returns
-------
value : :py:class:`numpy.ndarray`
The transformed value
Raises
------
ValueError
if the value is `None`
ValueError
if the value has the wrong shape
TypeError
if numpy raised it when trying to convert the value
|
train
|
https://github.com/ska-sa/spead2/blob/cac95fd01d8debaa302d2691bd26da64b7828bc6/spead2/__init__.py#L557-L592
|
[
"def compatible_shape(self, shape):\n \"\"\"Determine whether `shape` is compatible with the (possibly\n variable-sized) shape for this descriptor\"\"\"\n if len(shape) != len(self.shape):\n return False\n for x, y in zip(self.shape, shape):\n if x is not None and x != y:\n return False\n return True\n"
] |
class Item(Descriptor):
"""A SPEAD item with a value and a version number.
Parameters
----------
value : object, optional
Initial value
"""
def __init__(self, *args, **kw):
value = kw.pop('value', None)
super(Item, self).__init__(*args, **kw)
self._value = value
self.version = 1 #: Version number
@property
def value(self):
"""Current value. Assigning to this will increment the version number.
Assigning `None` will raise `ValueError` because there is no way to
encode this using SPEAD.
.. warning:: If you modify a mutable value in-place, the change will
not be detected, and the new value will not be transmitted. In this
case, either manually increment the version number, or reassign the
value.
"""
return self._value
@value.setter
def value(self, new_value):
if new_value is None:
raise ValueError("Item value cannot be set to None")
self._value = new_value
self.version += 1
@classmethod
def _read_bits(cls, raw_value):
"""Generator that takes a memory view and provides bitfields from it.
After creating the generator, call `send(None)` to initialise it, and
thereafter call `send(need_bits)` to obtain that many bits.
"""
have_bits = 0
bits = 0
byte_source = iter(raw_value)
result = 0
while True:
need_bits = yield result
while have_bits < need_bits:
try:
bits = (bits << 8) | int(next(byte_source))
have_bits += 8
except StopIteration:
return
result = int(bits >> (have_bits - need_bits))
bits &= (1 << (have_bits - need_bits)) - 1
have_bits -= need_bits
@classmethod
def _write_bits(cls, array):
"""Generator that fills a `bytearray` with provided bits. After
creating the generator, call `send(None)` to initialise it, and
thereafter call `send((value, bits))` to add that many bits into
the array. You must call `close()` to flush any partial bytes."""
pos = 0
current = 0 # bits not yet written into array
current_bits = 0
try:
while True:
(value, bits) = yield
if value < 0 or value >= (1 << bits):
raise ValueError('Value is out of range for number of bits')
current = (current << bits) | value
current_bits += bits
while current_bits >= 8:
array[pos] = current >> (current_bits - 8)
current &= (1 << (current_bits - 8)) - 1
current_bits -= 8
pos += 1
except GeneratorExit:
if current_bits > 0:
current <<= (8 - current_bits)
array[pos] = current
def _load_recursive(self, shape, gen):
"""Recursively create a multidimensional array (as lists of lists)
from a bit generator.
"""
if len(shape) > 0:
ans = []
for i in range(shape[0]):
ans.append(self._load_recursive(shape[1:], gen))
else:
fields = []
for code, length in self.format:
field = None
raw = gen.send(length)
if code == 'u':
field = raw
elif code == 'i':
field = raw
# Interpret as 2's complement
if field >= 1 << (length - 1):
field -= 1 << length
elif code == 'b':
field = bool(raw)
elif code == 'c':
field = six.int2byte(raw)
elif code == 'f':
if length == 32:
field = _np.uint32(raw).view(_np.float32)
elif length == 64:
field = _np.uint64(raw).view(_np.float64)
else:
raise ValueError('unhandled float length {0}'.format((code, length)))
else:
raise ValueError('unhandled format {0}'.format((code, length)))
fields.append(field)
if len(fields) == 1:
ans = fields[0]
else:
ans = tuple(fields)
return ans
def _store_recursive(self, dims, value, gen):
if dims > 0:
for sub in value:
self._store_recursive(dims - 1, sub, gen)
else:
if len(self.format) == 1:
value = (value,)
for (code, length), field in zip(self.format, value):
raw = None
if code == 'u':
raw = int(field)
if raw < 0 or raw >= (1 << length):
raise ValueError('{} is out of range for u{}'.format(raw, length))
elif code == 'i':
top_bit = 1 << (length - 1)
raw = int(field)
if raw < -top_bit or raw >= top_bit:
raise ValueError('{} is out of range for i{}'.format(field, length))
# convert to 2's complement
if raw < 0:
raw += 2 * top_bit
elif code == 'b':
raw = 1 if field else 0
elif code == 'c':
raw = ord(field)
elif code == 'f':
if length == 32:
raw = _np.float32(field).view(_np.uint32)
elif length == 64:
raw = _np.float64(field).view(_np.uint64)
else:
raise ValueError('unhandled float length {0}'.format((code, length)))
else:
raise ValueError('unhandled format {0}'.format((code, length)))
gen.send((raw, length))
def set_from_raw(self, raw_item):
raw_value = _np.array(raw_item, _np.uint8, copy=False)
if self._fastpath == _FASTPATH_NUMPY:
max_elements = raw_value.shape[0] // self._internal_dtype.itemsize
shape = self.dynamic_shape(max_elements)
elements = _shape_elements(shape)
if elements > max_elements:
raise ValueError('Item {} has too few elements for shape ({} < {})'.format(
self.name, max_elements, elements))
size_bytes = elements * self._internal_dtype.itemsize
if raw_item.is_immediate:
# Immediates get head padding instead of tail padding
# For some reason, np.frombuffer doesn't work on memoryview, but np.array does
array1d = raw_value[-size_bytes:]
else:
array1d = raw_value[:size_bytes]
array1d = array1d.view(dtype=self._internal_dtype)
# Force to native endian
array1d = array1d.astype(self._internal_dtype.newbyteorder('='),
casting='equiv', copy=False)
value = _np.reshape(array1d, shape, self.order)
elif (self._fastpath == _FASTPATH_IMMEDIATE and
raw_item.is_immediate and
raw_value.shape[0] * 8 == self.format[0][1]):
value = raw_item.immediate_value
if self.format[0][0] == 'i':
top = 1 << (self.format[0][1] - 1)
if value >= top:
value -= 2 * top
else:
itemsize_bits = self.itemsize_bits
max_elements = raw_value.shape[0] * 8 // itemsize_bits
shape = self.dynamic_shape(max_elements)
elements = _shape_elements(shape)
bits = elements * itemsize_bits
if elements > max_elements:
raise ValueError('Item {} has too few elements for shape ({} < {})'.format(
self.name, max_elements, elements))
if raw_item.is_immediate:
# Immediates get head padding instead of tail padding
size_bytes = (bits + 7) // 8
raw_value = raw_value[-size_bytes:]
gen = self._read_bits(raw_value)
gen.send(None) # Initialisation of the generator
value = _np.array(self._load_recursive(shape, gen), self._internal_dtype)
if len(self.shape) == 0 and isinstance(value, _np.ndarray):
# Convert zero-dimensional array to scalar
value = value[()]
elif len(self.shape) == 1 and self.format == [('c', 8)]:
# Convert array of characters to a string
value = _bytes_to_str_ascii(b''.join(value))
self.value = value
def _num_elements(self):
if isinstance(self.value, _np.ndarray):
return self.value.size
cur = self.value
ans = 1
for size in self.shape:
ans *= len(cur)
if ans == 0:
return ans # Prevents IndexError below
cur = cur[0]
return ans
def to_buffer(self):
"""Returns an object that implements the buffer protocol for the value.
It can be either the original value (on the numpy fast path), or a new
temporary object.
"""
value = self._transform_value()
if self._fastpath != _FASTPATH_NUMPY:
bit_length = self.itemsize_bits * self._num_elements()
out = bytearray((bit_length + 7) // 8)
gen = self._write_bits(out)
gen.send(None) # Initialise the generator
# If it's a scalar, unpack it. That way, the input to the
# final level of recursion in _store_recursive is always
# the scalar rather than the 0D array.
if len(self.shape) == 0:
value = value[()]
self._store_recursive(len(self.shape), value, gen)
gen.close()
return out
else:
if self.order == 'F':
# numpy doesn't allow buffer protocol to be used on arrays that
# aren't C-contiguous, but transposition just fiddles the
# strides of the view without creating a new array.
value = value.transpose()
return value
|
ska-sa/spead2
|
spead2/__init__.py
|
Item.to_buffer
|
python
|
def to_buffer(self):
value = self._transform_value()
if self._fastpath != _FASTPATH_NUMPY:
bit_length = self.itemsize_bits * self._num_elements()
out = bytearray((bit_length + 7) // 8)
gen = self._write_bits(out)
gen.send(None) # Initialise the generator
# If it's a scalar, unpack it. That way, the input to the
# final level of recursion in _store_recursive is always
# the scalar rather than the 0D array.
if len(self.shape) == 0:
value = value[()]
self._store_recursive(len(self.shape), value, gen)
gen.close()
return out
else:
if self.order == 'F':
# numpy doesn't allow buffer protocol to be used on arrays that
# aren't C-contiguous, but transposition just fiddles the
# strides of the view without creating a new array.
value = value.transpose()
return value
|
Returns an object that implements the buffer protocol for the value.
It can be either the original value (on the numpy fast path), or a new
temporary object.
|
train
|
https://github.com/ska-sa/spead2/blob/cac95fd01d8debaa302d2691bd26da64b7828bc6/spead2/__init__.py#L594-L619
|
[
"def _write_bits(cls, array):\n \"\"\"Generator that fills a `bytearray` with provided bits. After\n creating the generator, call `send(None)` to initialise it, and\n thereafter call `send((value, bits))` to add that many bits into\n the array. You must call `close()` to flush any partial bytes.\"\"\"\n pos = 0\n current = 0 # bits not yet written into array\n current_bits = 0\n try:\n while True:\n (value, bits) = yield\n if value < 0 or value >= (1 << bits):\n raise ValueError('Value is out of range for number of bits')\n current = (current << bits) | value\n current_bits += bits\n while current_bits >= 8:\n array[pos] = current >> (current_bits - 8)\n current &= (1 << (current_bits - 8)) - 1\n current_bits -= 8\n pos += 1\n except GeneratorExit:\n if current_bits > 0:\n current <<= (8 - current_bits)\n array[pos] = current\n",
"def _store_recursive(self, dims, value, gen):\n if dims > 0:\n for sub in value:\n self._store_recursive(dims - 1, sub, gen)\n else:\n if len(self.format) == 1:\n value = (value,)\n for (code, length), field in zip(self.format, value):\n raw = None\n if code == 'u':\n raw = int(field)\n if raw < 0 or raw >= (1 << length):\n raise ValueError('{} is out of range for u{}'.format(raw, length))\n elif code == 'i':\n top_bit = 1 << (length - 1)\n raw = int(field)\n if raw < -top_bit or raw >= top_bit:\n raise ValueError('{} is out of range for i{}'.format(field, length))\n # convert to 2's complement\n if raw < 0:\n raw += 2 * top_bit\n elif code == 'b':\n raw = 1 if field else 0\n elif code == 'c':\n raw = ord(field)\n elif code == 'f':\n if length == 32:\n raw = _np.float32(field).view(_np.uint32)\n elif length == 64:\n raw = _np.float64(field).view(_np.uint64)\n else:\n raise ValueError('unhandled float length {0}'.format((code, length)))\n else:\n raise ValueError('unhandled format {0}'.format((code, length)))\n gen.send((raw, length))\n",
"def _num_elements(self):\n if isinstance(self.value, _np.ndarray):\n return self.value.size\n cur = self.value\n ans = 1\n for size in self.shape:\n ans *= len(cur)\n if ans == 0:\n return ans # Prevents IndexError below\n cur = cur[0]\n return ans\n",
"def _transform_value(self):\n \"\"\"Mangle the value into a numpy array. This does several things:\n\n - If it is stringlike (bytes or unicode) and the expected shape is\n 1D, it is split into an array of characters.\n - It is coerced to a numpy array, enforcing the dtype and order. Where\n possible, no copy is made.\n - The shape is checked against the expected shape.\n\n Returns\n -------\n value : :py:class:`numpy.ndarray`\n The transformed value\n\n Raises\n ------\n ValueError\n if the value is `None`\n ValueError\n if the value has the wrong shape\n TypeError\n if numpy raised it when trying to convert the value\n \"\"\"\n value = self.value\n if value is None:\n raise ValueError('Cannot send a value of None')\n if (isinstance(value, (six.binary_type, six.text_type)) and\n len(self.shape) == 1):\n # This is complicated by Python 3 not providing a simple way to\n # turn a bytes object into a list of one-byte objects, the way\n # list(str) does.\n value = [self.value[i : i + 1] for i in range(len(self.value))]\n value = _np.array(value, dtype=self._internal_dtype, order=self.order, copy=False)\n if not self.compatible_shape(value.shape):\n raise ValueError('Value has shape {}, expected {}'.format(value.shape, self.shape))\n return value\n"
] |
class Item(Descriptor):
"""A SPEAD item with a value and a version number.
Parameters
----------
value : object, optional
Initial value
"""
def __init__(self, *args, **kw):
value = kw.pop('value', None)
super(Item, self).__init__(*args, **kw)
self._value = value
self.version = 1 #: Version number
@property
def value(self):
"""Current value. Assigning to this will increment the version number.
Assigning `None` will raise `ValueError` because there is no way to
encode this using SPEAD.
.. warning:: If you modify a mutable value in-place, the change will
not be detected, and the new value will not be transmitted. In this
case, either manually increment the version number, or reassign the
value.
"""
return self._value
@value.setter
def value(self, new_value):
if new_value is None:
raise ValueError("Item value cannot be set to None")
self._value = new_value
self.version += 1
@classmethod
def _read_bits(cls, raw_value):
"""Generator that takes a memory view and provides bitfields from it.
After creating the generator, call `send(None)` to initialise it, and
thereafter call `send(need_bits)` to obtain that many bits.
"""
have_bits = 0
bits = 0
byte_source = iter(raw_value)
result = 0
while True:
need_bits = yield result
while have_bits < need_bits:
try:
bits = (bits << 8) | int(next(byte_source))
have_bits += 8
except StopIteration:
return
result = int(bits >> (have_bits - need_bits))
bits &= (1 << (have_bits - need_bits)) - 1
have_bits -= need_bits
@classmethod
def _write_bits(cls, array):
"""Generator that fills a `bytearray` with provided bits. After
creating the generator, call `send(None)` to initialise it, and
thereafter call `send((value, bits))` to add that many bits into
the array. You must call `close()` to flush any partial bytes."""
pos = 0
current = 0 # bits not yet written into array
current_bits = 0
try:
while True:
(value, bits) = yield
if value < 0 or value >= (1 << bits):
raise ValueError('Value is out of range for number of bits')
current = (current << bits) | value
current_bits += bits
while current_bits >= 8:
array[pos] = current >> (current_bits - 8)
current &= (1 << (current_bits - 8)) - 1
current_bits -= 8
pos += 1
except GeneratorExit:
if current_bits > 0:
current <<= (8 - current_bits)
array[pos] = current
def _load_recursive(self, shape, gen):
"""Recursively create a multidimensional array (as lists of lists)
from a bit generator.
"""
if len(shape) > 0:
ans = []
for i in range(shape[0]):
ans.append(self._load_recursive(shape[1:], gen))
else:
fields = []
for code, length in self.format:
field = None
raw = gen.send(length)
if code == 'u':
field = raw
elif code == 'i':
field = raw
# Interpret as 2's complement
if field >= 1 << (length - 1):
field -= 1 << length
elif code == 'b':
field = bool(raw)
elif code == 'c':
field = six.int2byte(raw)
elif code == 'f':
if length == 32:
field = _np.uint32(raw).view(_np.float32)
elif length == 64:
field = _np.uint64(raw).view(_np.float64)
else:
raise ValueError('unhandled float length {0}'.format((code, length)))
else:
raise ValueError('unhandled format {0}'.format((code, length)))
fields.append(field)
if len(fields) == 1:
ans = fields[0]
else:
ans = tuple(fields)
return ans
def _store_recursive(self, dims, value, gen):
if dims > 0:
for sub in value:
self._store_recursive(dims - 1, sub, gen)
else:
if len(self.format) == 1:
value = (value,)
for (code, length), field in zip(self.format, value):
raw = None
if code == 'u':
raw = int(field)
if raw < 0 or raw >= (1 << length):
raise ValueError('{} is out of range for u{}'.format(raw, length))
elif code == 'i':
top_bit = 1 << (length - 1)
raw = int(field)
if raw < -top_bit or raw >= top_bit:
raise ValueError('{} is out of range for i{}'.format(field, length))
# convert to 2's complement
if raw < 0:
raw += 2 * top_bit
elif code == 'b':
raw = 1 if field else 0
elif code == 'c':
raw = ord(field)
elif code == 'f':
if length == 32:
raw = _np.float32(field).view(_np.uint32)
elif length == 64:
raw = _np.float64(field).view(_np.uint64)
else:
raise ValueError('unhandled float length {0}'.format((code, length)))
else:
raise ValueError('unhandled format {0}'.format((code, length)))
gen.send((raw, length))
def set_from_raw(self, raw_item):
raw_value = _np.array(raw_item, _np.uint8, copy=False)
if self._fastpath == _FASTPATH_NUMPY:
max_elements = raw_value.shape[0] // self._internal_dtype.itemsize
shape = self.dynamic_shape(max_elements)
elements = _shape_elements(shape)
if elements > max_elements:
raise ValueError('Item {} has too few elements for shape ({} < {})'.format(
self.name, max_elements, elements))
size_bytes = elements * self._internal_dtype.itemsize
if raw_item.is_immediate:
# Immediates get head padding instead of tail padding
# For some reason, np.frombuffer doesn't work on memoryview, but np.array does
array1d = raw_value[-size_bytes:]
else:
array1d = raw_value[:size_bytes]
array1d = array1d.view(dtype=self._internal_dtype)
# Force to native endian
array1d = array1d.astype(self._internal_dtype.newbyteorder('='),
casting='equiv', copy=False)
value = _np.reshape(array1d, shape, self.order)
elif (self._fastpath == _FASTPATH_IMMEDIATE and
raw_item.is_immediate and
raw_value.shape[0] * 8 == self.format[0][1]):
value = raw_item.immediate_value
if self.format[0][0] == 'i':
top = 1 << (self.format[0][1] - 1)
if value >= top:
value -= 2 * top
else:
itemsize_bits = self.itemsize_bits
max_elements = raw_value.shape[0] * 8 // itemsize_bits
shape = self.dynamic_shape(max_elements)
elements = _shape_elements(shape)
bits = elements * itemsize_bits
if elements > max_elements:
raise ValueError('Item {} has too few elements for shape ({} < {})'.format(
self.name, max_elements, elements))
if raw_item.is_immediate:
# Immediates get head padding instead of tail padding
size_bytes = (bits + 7) // 8
raw_value = raw_value[-size_bytes:]
gen = self._read_bits(raw_value)
gen.send(None) # Initialisation of the generator
value = _np.array(self._load_recursive(shape, gen), self._internal_dtype)
if len(self.shape) == 0 and isinstance(value, _np.ndarray):
# Convert zero-dimensional array to scalar
value = value[()]
elif len(self.shape) == 1 and self.format == [('c', 8)]:
# Convert array of characters to a string
value = _bytes_to_str_ascii(b''.join(value))
self.value = value
def _num_elements(self):
if isinstance(self.value, _np.ndarray):
return self.value.size
cur = self.value
ans = 1
for size in self.shape:
ans *= len(cur)
if ans == 0:
return ans # Prevents IndexError below
cur = cur[0]
return ans
def _transform_value(self):
"""Mangle the value into a numpy array. This does several things:
- If it is stringlike (bytes or unicode) and the expected shape is
1D, it is split into an array of characters.
- It is coerced to a numpy array, enforcing the dtype and order. Where
possible, no copy is made.
- The shape is checked against the expected shape.
Returns
-------
value : :py:class:`numpy.ndarray`
The transformed value
Raises
------
ValueError
if the value is `None`
ValueError
if the value has the wrong shape
TypeError
if numpy raised it when trying to convert the value
"""
value = self.value
if value is None:
raise ValueError('Cannot send a value of None')
if (isinstance(value, (six.binary_type, six.text_type)) and
len(self.shape) == 1):
# This is complicated by Python 3 not providing a simple way to
# turn a bytes object into a list of one-byte objects, the way
# list(str) does.
value = [self.value[i : i + 1] for i in range(len(self.value))]
value = _np.array(value, dtype=self._internal_dtype, order=self.order, copy=False)
if not self.compatible_shape(value.shape):
raise ValueError('Value has shape {}, expected {}'.format(value.shape, self.shape))
return value
|
ska-sa/spead2
|
spead2/__init__.py
|
ItemGroup.add_item
|
python
|
def add_item(self, *args, **kwargs):
item = Item(*args, **kwargs)
if item.id is None:
item.id = _UNRESERVED_ID
while item.id in self._by_id:
item.id += 1
self._add_item(item)
return item
|
Add a new item to the group. The parameters are used to construct an
:py:class:`Item`. If `id` is `None`, it will be automatically populated
with an ID that is not already in use.
See the class documentation for the behaviour when the name or ID
collides with an existing one. In addition, if the item descriptor is
identical to an existing one and a value, this value is assigned to
the existing item.
|
train
|
https://github.com/ska-sa/spead2/blob/cac95fd01d8debaa302d2691bd26da64b7828bc6/spead2/__init__.py#L692-L708
|
[
"def _add_item(self, item):\n try:\n old = self._by_id[item.id]\n except KeyError:\n old = None\n try:\n old_by_name = self._by_name[item.name]\n except KeyError:\n old_by_name = None\n\n # Check if this is just the same thing\n if (old is not None and\n old.name == item.name and\n old.description == item.description and\n old.shape == item.shape and\n old.dtype == item.dtype and\n old.order == item.order and\n old.format == item.format):\n # Descriptor is the same, so just transfer the value. If the value\n # is None, then we've only been given a descriptor to add.\n if item.value is not None:\n old.value = item.value\n return\n\n if old is not None or old_by_name is not None:\n _logger.info('Descriptor replacement for ID %#x, name %s', item.id, item.name)\n # Ensure the version number is seen to increment, regardless of\n # whether accessed by name or ID.\n new_version = item.version\n if old is not None:\n new_version = max(new_version, old.version + 1)\n if old_by_name is not None:\n new_version = max(new_version, old_by_name.version + 1)\n item.version = new_version\n\n # Remove previous items, under the same name of ID\n if old is not None:\n self._remove_item(old)\n if old_by_name is not None and old_by_name is not old:\n self._remove_item(old_by_name)\n\n # Install new item\n self._by_id[item.id] = item\n self._by_name[item.name] = item\n"
] |
class ItemGroup(object):
"""
Items are collected into sets called *item groups*, which can be indexed by
either item ID or item name.
There are some subtleties with respect to re-issued item descriptors. There are
two cases:
1. The item descriptor is identical to a previous seen one. In this case, no
action is taken.
2. Otherwise, any existing items with the same name or ID (which could be two
different items) are dropped, the new item is added, and its value
becomes ``None``. The version is set to be higher than version on an item
that was removed, so that consumers who only check the version will
detect the change.
"""
def __init__(self):
self._by_id = {}
self._by_name = {}
def _remove_item(self, item):
del self._by_id[item.id]
del self._by_name[item.name]
def _add_item(self, item):
try:
old = self._by_id[item.id]
except KeyError:
old = None
try:
old_by_name = self._by_name[item.name]
except KeyError:
old_by_name = None
# Check if this is just the same thing
if (old is not None and
old.name == item.name and
old.description == item.description and
old.shape == item.shape and
old.dtype == item.dtype and
old.order == item.order and
old.format == item.format):
# Descriptor is the same, so just transfer the value. If the value
# is None, then we've only been given a descriptor to add.
if item.value is not None:
old.value = item.value
return
if old is not None or old_by_name is not None:
_logger.info('Descriptor replacement for ID %#x, name %s', item.id, item.name)
# Ensure the version number is seen to increment, regardless of
# whether accessed by name or ID.
new_version = item.version
if old is not None:
new_version = max(new_version, old.version + 1)
if old_by_name is not None:
new_version = max(new_version, old_by_name.version + 1)
item.version = new_version
# Remove previous items, under the same name of ID
if old is not None:
self._remove_item(old)
if old_by_name is not None and old_by_name is not old:
self._remove_item(old_by_name)
# Install new item
self._by_id[item.id] = item
self._by_name[item.name] = item
def __getitem__(self, key):
"""Dictionary-style lookup by either ID or name"""
if isinstance(key, _numbers.Integral):
return self._by_id[key]
else:
return self._by_name[key]
def __contains__(self, key):
"""Dictionary-style membership test by either ID or name"""
if isinstance(key, _numbers.Integral):
return key in self._by_id
else:
return key in self._by_name
def keys(self):
"""Item names"""
return self._by_name.keys()
def ids(self):
"""Item IDs"""
return self._by_id.keys()
def values(self):
"""Item values"""
return self._by_name.values()
def items(self):
"""Dictionary style (name, value) pairs"""
return self._by_name.items()
def __len__(self):
"""Number of items"""
return len(self._by_name)
def update(self, heap):
"""Update the item descriptors and items from an incoming heap.
Parameters
----------
heap : :class:`spead2.recv.Heap`
Incoming heap
Returns
-------
dict
Items that have been updated from this heap, indexed by name
"""
for descriptor in heap.get_descriptors():
item = Item.from_raw(descriptor, flavour=heap.flavour)
self._add_item(item)
updated_items = {}
for raw_item in heap.get_items():
if raw_item.id <= STREAM_CTRL_ID:
continue # Special fields, not real items
try:
item = self._by_id[raw_item.id]
except KeyError:
_logger.warning('Item with ID %#x received but there is no descriptor', raw_item.id)
else:
item.set_from_raw(raw_item)
item.version += 1
updated_items[item.name] = item
return updated_items
|
ska-sa/spead2
|
spead2/__init__.py
|
ItemGroup.update
|
python
|
def update(self, heap):
for descriptor in heap.get_descriptors():
item = Item.from_raw(descriptor, flavour=heap.flavour)
self._add_item(item)
updated_items = {}
for raw_item in heap.get_items():
if raw_item.id <= STREAM_CTRL_ID:
continue # Special fields, not real items
try:
item = self._by_id[raw_item.id]
except KeyError:
_logger.warning('Item with ID %#x received but there is no descriptor', raw_item.id)
else:
item.set_from_raw(raw_item)
item.version += 1
updated_items[item.name] = item
return updated_items
|
Update the item descriptors and items from an incoming heap.
Parameters
----------
heap : :class:`spead2.recv.Heap`
Incoming heap
Returns
-------
dict
Items that have been updated from this heap, indexed by name
|
train
|
https://github.com/ska-sa/spead2/blob/cac95fd01d8debaa302d2691bd26da64b7828bc6/spead2/__init__.py#L744-L772
|
[
"def from_raw(cls, raw_descriptor, flavour):\n dtype = None\n format = None\n if raw_descriptor.numpy_header:\n header = _bytes_to_str_ascii(raw_descriptor.numpy_header)\n shape, order, dtype = cls._parse_numpy_header(header)\n if flavour.bug_compat & BUG_COMPAT_SWAP_ENDIAN:\n dtype = dtype.newbyteorder()\n else:\n shape = raw_descriptor.shape\n order = 'C'\n format = raw_descriptor.format\n return cls(\n raw_descriptor.id,\n _bytes_to_str_ascii(raw_descriptor.name),\n _bytes_to_str_ascii(raw_descriptor.description),\n shape, dtype, order, format)\n",
"def _add_item(self, item):\n try:\n old = self._by_id[item.id]\n except KeyError:\n old = None\n try:\n old_by_name = self._by_name[item.name]\n except KeyError:\n old_by_name = None\n\n # Check if this is just the same thing\n if (old is not None and\n old.name == item.name and\n old.description == item.description and\n old.shape == item.shape and\n old.dtype == item.dtype and\n old.order == item.order and\n old.format == item.format):\n # Descriptor is the same, so just transfer the value. If the value\n # is None, then we've only been given a descriptor to add.\n if item.value is not None:\n old.value = item.value\n return\n\n if old is not None or old_by_name is not None:\n _logger.info('Descriptor replacement for ID %#x, name %s', item.id, item.name)\n # Ensure the version number is seen to increment, regardless of\n # whether accessed by name or ID.\n new_version = item.version\n if old is not None:\n new_version = max(new_version, old.version + 1)\n if old_by_name is not None:\n new_version = max(new_version, old_by_name.version + 1)\n item.version = new_version\n\n # Remove previous items, under the same name of ID\n if old is not None:\n self._remove_item(old)\n if old_by_name is not None and old_by_name is not old:\n self._remove_item(old_by_name)\n\n # Install new item\n self._by_id[item.id] = item\n self._by_name[item.name] = item\n"
] |
class ItemGroup(object):
"""
Items are collected into sets called *item groups*, which can be indexed by
either item ID or item name.
There are some subtleties with respect to re-issued item descriptors. There are
two cases:
1. The item descriptor is identical to a previous seen one. In this case, no
action is taken.
2. Otherwise, any existing items with the same name or ID (which could be two
different items) are dropped, the new item is added, and its value
becomes ``None``. The version is set to be higher than version on an item
that was removed, so that consumers who only check the version will
detect the change.
"""
def __init__(self):
self._by_id = {}
self._by_name = {}
def _remove_item(self, item):
del self._by_id[item.id]
del self._by_name[item.name]
def _add_item(self, item):
try:
old = self._by_id[item.id]
except KeyError:
old = None
try:
old_by_name = self._by_name[item.name]
except KeyError:
old_by_name = None
# Check if this is just the same thing
if (old is not None and
old.name == item.name and
old.description == item.description and
old.shape == item.shape and
old.dtype == item.dtype and
old.order == item.order and
old.format == item.format):
# Descriptor is the same, so just transfer the value. If the value
# is None, then we've only been given a descriptor to add.
if item.value is not None:
old.value = item.value
return
if old is not None or old_by_name is not None:
_logger.info('Descriptor replacement for ID %#x, name %s', item.id, item.name)
# Ensure the version number is seen to increment, regardless of
# whether accessed by name or ID.
new_version = item.version
if old is not None:
new_version = max(new_version, old.version + 1)
if old_by_name is not None:
new_version = max(new_version, old_by_name.version + 1)
item.version = new_version
# Remove previous items, under the same name of ID
if old is not None:
self._remove_item(old)
if old_by_name is not None and old_by_name is not old:
self._remove_item(old_by_name)
# Install new item
self._by_id[item.id] = item
self._by_name[item.name] = item
def add_item(self, *args, **kwargs):
"""Add a new item to the group. The parameters are used to construct an
:py:class:`Item`. If `id` is `None`, it will be automatically populated
with an ID that is not already in use.
See the class documentation for the behaviour when the name or ID
collides with an existing one. In addition, if the item descriptor is
identical to an existing one and a value, this value is assigned to
the existing item.
"""
item = Item(*args, **kwargs)
if item.id is None:
item.id = _UNRESERVED_ID
while item.id in self._by_id:
item.id += 1
self._add_item(item)
return item
def __getitem__(self, key):
"""Dictionary-style lookup by either ID or name"""
if isinstance(key, _numbers.Integral):
return self._by_id[key]
else:
return self._by_name[key]
def __contains__(self, key):
"""Dictionary-style membership test by either ID or name"""
if isinstance(key, _numbers.Integral):
return key in self._by_id
else:
return key in self._by_name
def keys(self):
"""Item names"""
return self._by_name.keys()
def ids(self):
"""Item IDs"""
return self._by_id.keys()
def values(self):
"""Item values"""
return self._by_name.values()
def items(self):
"""Dictionary style (name, value) pairs"""
return self._by_name.items()
def __len__(self):
"""Number of items"""
return len(self._by_name)
|
quantmind/ccy
|
ccy/core/country.py
|
countries
|
python
|
def countries():
'''
get country dictionar from pytz and add some extra.
'''
global _countries
if not _countries:
v = {}
_countries = v
try:
from pytz import country_names
for k, n in country_names.items():
v[k.upper()] = n
except Exception:
pass
return _countries
|
get country dictionar from pytz and add some extra.
|
train
|
https://github.com/quantmind/ccy/blob/068cf6887489087cd26657a937a932e82106b47f/ccy/core/country.py#L41-L55
| null |
#
# Requires pytz 2008i or higher
#
from .currency import currencydb
# Eurozone countries (officially the euro area)
# see http://en.wikipedia.org/wiki/Eurozone
# using ISO 3166-1 alpha-2 country codes
# see http://en.wikipedia.org/wiki/ISO_3166-1_alpha-2
#
eurozone = tuple(('AT BE CY DE EE ES FI FR GR IE IT LU LV LT MT '
'NL PT SI SK').split(' '))
def print_eurozone():
for c in sorted(map(country, eurozone)):
print(c)
_countries = None
_country_ccys = None
_country_maps = {}
class CountryError(Exception):
pass
def country(code):
cdb = countries()
code = country_map(code)
return cdb.get(code, '')
def countryccy(code):
cdb = countryccys()
code = str(code).upper()
return cdb.get(code, None)
def countryccys():
'''
Create a dictionary with keys given by countries ISO codes and values
given by their currencies
'''
global _country_ccys
if not _country_ccys:
v = {}
_country_ccys = v
ccys = currencydb()
for c in eurozone:
v[c] = 'EUR'
for c in ccys.values():
if c.default_country:
v[c.default_country] = c.code
return _country_ccys
def set_country_map(cfrom, cto, name=None, replace=True):
'''
Set a mapping between a country code to another code
'''
global _country_maps
cdb = countries()
cfrom = str(cfrom).upper()
c = cdb.get(cfrom)
if c:
if name:
c = name
cto = str(cto).upper()
if cto in cdb:
raise CountryError('Country %s already in database' % cto)
cdb[cto] = c
_country_maps[cfrom] = cto
ccys = currencydb()
cccys = countryccys()
ccy = cccys[cfrom]
cccys[cto] = ccy
# If set, remove cfrom from database
if replace:
ccy = ccys.get(ccy)
ccy.default_country = cto
cdb.pop(cfrom)
cccys.pop(cfrom)
else:
raise CountryError('Country %s not in database' % c)
def set_new_country(code, ccy, name):
'''
Add new country code to database
'''
code = str(code).upper()
cdb = countries()
if code in cdb:
raise CountryError('Country %s already in database' % code)
ccys = currencydb()
ccy = str(ccy).upper()
if ccy not in ccys:
raise CountryError('Currency %s not in database' % ccy)
cdb[code] = str(name)
cccys = countryccys()
cccys[code] = ccy
def country_map(code):
'''
Country mapping
'''
code = str(code).upper()
global _country_maps
return _country_maps.get(code, code)
# Add eurozone to list of Countries
set_new_country('EU', 'EUR', 'Eurozone')
|
quantmind/ccy
|
ccy/core/country.py
|
countryccys
|
python
|
def countryccys():
'''
Create a dictionary with keys given by countries ISO codes and values
given by their currencies
'''
global _country_ccys
if not _country_ccys:
v = {}
_country_ccys = v
ccys = currencydb()
for c in eurozone:
v[c] = 'EUR'
for c in ccys.values():
if c.default_country:
v[c.default_country] = c.code
return _country_ccys
|
Create a dictionary with keys given by countries ISO codes and values
given by their currencies
|
train
|
https://github.com/quantmind/ccy/blob/068cf6887489087cd26657a937a932e82106b47f/ccy/core/country.py#L58-L73
| null |
#
# Requires pytz 2008i or higher
#
from .currency import currencydb
# Eurozone countries (officially the euro area)
# see http://en.wikipedia.org/wiki/Eurozone
# using ISO 3166-1 alpha-2 country codes
# see http://en.wikipedia.org/wiki/ISO_3166-1_alpha-2
#
eurozone = tuple(('AT BE CY DE EE ES FI FR GR IE IT LU LV LT MT '
'NL PT SI SK').split(' '))
def print_eurozone():
for c in sorted(map(country, eurozone)):
print(c)
_countries = None
_country_ccys = None
_country_maps = {}
class CountryError(Exception):
pass
def country(code):
cdb = countries()
code = country_map(code)
return cdb.get(code, '')
def countryccy(code):
cdb = countryccys()
code = str(code).upper()
return cdb.get(code, None)
def countries():
'''
get country dictionar from pytz and add some extra.
'''
global _countries
if not _countries:
v = {}
_countries = v
try:
from pytz import country_names
for k, n in country_names.items():
v[k.upper()] = n
except Exception:
pass
return _countries
def set_country_map(cfrom, cto, name=None, replace=True):
'''
Set a mapping between a country code to another code
'''
global _country_maps
cdb = countries()
cfrom = str(cfrom).upper()
c = cdb.get(cfrom)
if c:
if name:
c = name
cto = str(cto).upper()
if cto in cdb:
raise CountryError('Country %s already in database' % cto)
cdb[cto] = c
_country_maps[cfrom] = cto
ccys = currencydb()
cccys = countryccys()
ccy = cccys[cfrom]
cccys[cto] = ccy
# If set, remove cfrom from database
if replace:
ccy = ccys.get(ccy)
ccy.default_country = cto
cdb.pop(cfrom)
cccys.pop(cfrom)
else:
raise CountryError('Country %s not in database' % c)
def set_new_country(code, ccy, name):
'''
Add new country code to database
'''
code = str(code).upper()
cdb = countries()
if code in cdb:
raise CountryError('Country %s already in database' % code)
ccys = currencydb()
ccy = str(ccy).upper()
if ccy not in ccys:
raise CountryError('Currency %s not in database' % ccy)
cdb[code] = str(name)
cccys = countryccys()
cccys[code] = ccy
def country_map(code):
'''
Country mapping
'''
code = str(code).upper()
global _country_maps
return _country_maps.get(code, code)
# Add eurozone to list of Countries
set_new_country('EU', 'EUR', 'Eurozone')
|
quantmind/ccy
|
ccy/core/country.py
|
set_country_map
|
python
|
def set_country_map(cfrom, cto, name=None, replace=True):
'''
Set a mapping between a country code to another code
'''
global _country_maps
cdb = countries()
cfrom = str(cfrom).upper()
c = cdb.get(cfrom)
if c:
if name:
c = name
cto = str(cto).upper()
if cto in cdb:
raise CountryError('Country %s already in database' % cto)
cdb[cto] = c
_country_maps[cfrom] = cto
ccys = currencydb()
cccys = countryccys()
ccy = cccys[cfrom]
cccys[cto] = ccy
# If set, remove cfrom from database
if replace:
ccy = ccys.get(ccy)
ccy.default_country = cto
cdb.pop(cfrom)
cccys.pop(cfrom)
else:
raise CountryError('Country %s not in database' % c)
|
Set a mapping between a country code to another code
|
train
|
https://github.com/quantmind/ccy/blob/068cf6887489087cd26657a937a932e82106b47f/ccy/core/country.py#L76-L104
|
[
"def countries():\n '''\n get country dictionar from pytz and add some extra.\n '''\n global _countries\n if not _countries:\n v = {}\n _countries = v\n try:\n from pytz import country_names\n for k, n in country_names.items():\n v[k.upper()] = n\n except Exception:\n pass\n return _countries\n"
] |
#
# Requires pytz 2008i or higher
#
from .currency import currencydb
# Eurozone countries (officially the euro area)
# see http://en.wikipedia.org/wiki/Eurozone
# using ISO 3166-1 alpha-2 country codes
# see http://en.wikipedia.org/wiki/ISO_3166-1_alpha-2
#
eurozone = tuple(('AT BE CY DE EE ES FI FR GR IE IT LU LV LT MT '
'NL PT SI SK').split(' '))
def print_eurozone():
for c in sorted(map(country, eurozone)):
print(c)
_countries = None
_country_ccys = None
_country_maps = {}
class CountryError(Exception):
pass
def country(code):
cdb = countries()
code = country_map(code)
return cdb.get(code, '')
def countryccy(code):
cdb = countryccys()
code = str(code).upper()
return cdb.get(code, None)
def countries():
'''
get country dictionar from pytz and add some extra.
'''
global _countries
if not _countries:
v = {}
_countries = v
try:
from pytz import country_names
for k, n in country_names.items():
v[k.upper()] = n
except Exception:
pass
return _countries
def countryccys():
'''
Create a dictionary with keys given by countries ISO codes and values
given by their currencies
'''
global _country_ccys
if not _country_ccys:
v = {}
_country_ccys = v
ccys = currencydb()
for c in eurozone:
v[c] = 'EUR'
for c in ccys.values():
if c.default_country:
v[c.default_country] = c.code
return _country_ccys
def set_new_country(code, ccy, name):
'''
Add new country code to database
'''
code = str(code).upper()
cdb = countries()
if code in cdb:
raise CountryError('Country %s already in database' % code)
ccys = currencydb()
ccy = str(ccy).upper()
if ccy not in ccys:
raise CountryError('Currency %s not in database' % ccy)
cdb[code] = str(name)
cccys = countryccys()
cccys[code] = ccy
def country_map(code):
'''
Country mapping
'''
code = str(code).upper()
global _country_maps
return _country_maps.get(code, code)
# Add eurozone to list of Countries
set_new_country('EU', 'EUR', 'Eurozone')
|
quantmind/ccy
|
ccy/core/country.py
|
set_new_country
|
python
|
def set_new_country(code, ccy, name):
'''
Add new country code to database
'''
code = str(code).upper()
cdb = countries()
if code in cdb:
raise CountryError('Country %s already in database' % code)
ccys = currencydb()
ccy = str(ccy).upper()
if ccy not in ccys:
raise CountryError('Currency %s not in database' % ccy)
cdb[code] = str(name)
cccys = countryccys()
cccys[code] = ccy
|
Add new country code to database
|
train
|
https://github.com/quantmind/ccy/blob/068cf6887489087cd26657a937a932e82106b47f/ccy/core/country.py#L107-L121
|
[
"def currencydb():\n global _ccys\n if not _ccys:\n _ccys = ccydb()\n make_ccys(_ccys)\n return _ccys\n",
"def countries():\n '''\n get country dictionar from pytz and add some extra.\n '''\n global _countries\n if not _countries:\n v = {}\n _countries = v\n try:\n from pytz import country_names\n for k, n in country_names.items():\n v[k.upper()] = n\n except Exception:\n pass\n return _countries\n",
"def countryccys():\n '''\n Create a dictionary with keys given by countries ISO codes and values\n given by their currencies\n '''\n global _country_ccys\n if not _country_ccys:\n v = {}\n _country_ccys = v\n ccys = currencydb()\n for c in eurozone:\n v[c] = 'EUR'\n for c in ccys.values():\n if c.default_country:\n v[c.default_country] = c.code\n return _country_ccys\n"
] |
#
# Requires pytz 2008i or higher
#
from .currency import currencydb
# Eurozone countries (officially the euro area)
# see http://en.wikipedia.org/wiki/Eurozone
# using ISO 3166-1 alpha-2 country codes
# see http://en.wikipedia.org/wiki/ISO_3166-1_alpha-2
#
eurozone = tuple(('AT BE CY DE EE ES FI FR GR IE IT LU LV LT MT '
'NL PT SI SK').split(' '))
def print_eurozone():
for c in sorted(map(country, eurozone)):
print(c)
_countries = None
_country_ccys = None
_country_maps = {}
class CountryError(Exception):
pass
def country(code):
cdb = countries()
code = country_map(code)
return cdb.get(code, '')
def countryccy(code):
cdb = countryccys()
code = str(code).upper()
return cdb.get(code, None)
def countries():
'''
get country dictionar from pytz and add some extra.
'''
global _countries
if not _countries:
v = {}
_countries = v
try:
from pytz import country_names
for k, n in country_names.items():
v[k.upper()] = n
except Exception:
pass
return _countries
def countryccys():
'''
Create a dictionary with keys given by countries ISO codes and values
given by their currencies
'''
global _country_ccys
if not _country_ccys:
v = {}
_country_ccys = v
ccys = currencydb()
for c in eurozone:
v[c] = 'EUR'
for c in ccys.values():
if c.default_country:
v[c.default_country] = c.code
return _country_ccys
def set_country_map(cfrom, cto, name=None, replace=True):
'''
Set a mapping between a country code to another code
'''
global _country_maps
cdb = countries()
cfrom = str(cfrom).upper()
c = cdb.get(cfrom)
if c:
if name:
c = name
cto = str(cto).upper()
if cto in cdb:
raise CountryError('Country %s already in database' % cto)
cdb[cto] = c
_country_maps[cfrom] = cto
ccys = currencydb()
cccys = countryccys()
ccy = cccys[cfrom]
cccys[cto] = ccy
# If set, remove cfrom from database
if replace:
ccy = ccys.get(ccy)
ccy.default_country = cto
cdb.pop(cfrom)
cccys.pop(cfrom)
else:
raise CountryError('Country %s not in database' % c)
def country_map(code):
'''
Country mapping
'''
code = str(code).upper()
global _country_maps
return _country_maps.get(code, code)
# Add eurozone to list of Countries
set_new_country('EU', 'EUR', 'Eurozone')
|
quantmind/ccy
|
ccy/core/country.py
|
country_map
|
python
|
def country_map(code):
'''
Country mapping
'''
code = str(code).upper()
global _country_maps
return _country_maps.get(code, code)
|
Country mapping
|
train
|
https://github.com/quantmind/ccy/blob/068cf6887489087cd26657a937a932e82106b47f/ccy/core/country.py#L124-L130
| null |
#
# Requires pytz 2008i or higher
#
from .currency import currencydb
# Eurozone countries (officially the euro area)
# see http://en.wikipedia.org/wiki/Eurozone
# using ISO 3166-1 alpha-2 country codes
# see http://en.wikipedia.org/wiki/ISO_3166-1_alpha-2
#
eurozone = tuple(('AT BE CY DE EE ES FI FR GR IE IT LU LV LT MT '
'NL PT SI SK').split(' '))
def print_eurozone():
for c in sorted(map(country, eurozone)):
print(c)
_countries = None
_country_ccys = None
_country_maps = {}
class CountryError(Exception):
pass
def country(code):
cdb = countries()
code = country_map(code)
return cdb.get(code, '')
def countryccy(code):
cdb = countryccys()
code = str(code).upper()
return cdb.get(code, None)
def countries():
'''
get country dictionar from pytz and add some extra.
'''
global _countries
if not _countries:
v = {}
_countries = v
try:
from pytz import country_names
for k, n in country_names.items():
v[k.upper()] = n
except Exception:
pass
return _countries
def countryccys():
'''
Create a dictionary with keys given by countries ISO codes and values
given by their currencies
'''
global _country_ccys
if not _country_ccys:
v = {}
_country_ccys = v
ccys = currencydb()
for c in eurozone:
v[c] = 'EUR'
for c in ccys.values():
if c.default_country:
v[c.default_country] = c.code
return _country_ccys
def set_country_map(cfrom, cto, name=None, replace=True):
'''
Set a mapping between a country code to another code
'''
global _country_maps
cdb = countries()
cfrom = str(cfrom).upper()
c = cdb.get(cfrom)
if c:
if name:
c = name
cto = str(cto).upper()
if cto in cdb:
raise CountryError('Country %s already in database' % cto)
cdb[cto] = c
_country_maps[cfrom] = cto
ccys = currencydb()
cccys = countryccys()
ccy = cccys[cfrom]
cccys[cto] = ccy
# If set, remove cfrom from database
if replace:
ccy = ccys.get(ccy)
ccy.default_country = cto
cdb.pop(cfrom)
cccys.pop(cfrom)
else:
raise CountryError('Country %s not in database' % c)
def set_new_country(code, ccy, name):
'''
Add new country code to database
'''
code = str(code).upper()
cdb = countries()
if code in cdb:
raise CountryError('Country %s already in database' % code)
ccys = currencydb()
ccy = str(ccy).upper()
if ccy not in ccys:
raise CountryError('Currency %s not in database' % ccy)
cdb[code] = str(name)
cccys = countryccys()
cccys[code] = ccy
# Add eurozone to list of Countries
set_new_country('EU', 'EUR', 'Eurozone')
|
quantmind/ccy
|
docs/_ext/table.py
|
TableDirective.run
|
python
|
def run(self):
# Get content and options
data_path = self.arguments[0]
header = self.options.get('header', True)
bits = data_path.split('.')
name = bits[-1]
path = '.'.join(bits[:-1])
node = table_node()
code = None
try:
module = import_module(path)
except Exception:
code = '<p>Could not import %s</p>' % path
try:
callable = getattr(module, name)
except Exception:
code = 'Could not import %s from %s' % (name, path)
if not code:
data = callable()
table = ['<table>']
if header:
headers, data = data[0], data[1:]
table.append('<thead>')
tr = ['<tr>']
for head in headers:
tr.append('<th>%s</th>' % head)
tr.append('</tr>')
table.append(''.join(tr))
table.append('</thead>')
table.append('</tbody>')
for row in data:
tr = ['<tr>']
for c in row:
tr.append('<td>%s</td>' % c)
tr.append('</tr>')
table.append(''.join(tr))
table.append('</tbody>')
table.append('</table>')
code = '\n'.join(table)
node['code'] = code
return [node]
|
Implements the directive
|
train
|
https://github.com/quantmind/ccy/blob/068cf6887489087cd26657a937a932e82106b47f/docs/_ext/table.py#L35-L78
| null |
class TableDirective(Directive):
"""
ExcelTableDirective implements the directive.
Directive allows to create RST tables from the contents
of the Excel sheet. The functionality is very similar to
csv-table (docutils) and xmltable (:mod:`sphinxcontrib.xmltable`).
Example of the directive:
.. code-block:: rest
.. table::
:datafunction: path.to.my.data.function
"""
has_content = False
required_arguments = 1
option_spec = {'class': directives.class_option}
|
quantmind/ccy
|
ccy/dates/converters.py
|
todate
|
python
|
def todate(val):
'''Convert val to a datetime.date instance by trying several
conversion algorithm.
If it fails it raise a ValueError exception.
'''
if not val:
raise ValueError("Value not provided")
if isinstance(val, datetime):
return val.date()
elif isinstance(val, date):
return val
else:
try:
ival = int(val)
sval = str(ival)
if len(sval) == 8:
return yyyymmdd2date(val)
elif len(sval) == 5:
return juldate2date(val)
else:
raise ValueError
except Exception:
# Try to convert using the parsing algorithm
try:
return date_from_string(val).date()
except Exception:
raise ValueError("Could not convert %s to date" % val)
|
Convert val to a datetime.date instance by trying several
conversion algorithm.
If it fails it raise a ValueError exception.
|
train
|
https://github.com/quantmind/ccy/blob/068cf6887489087cd26657a937a932e82106b47f/ccy/dates/converters.py#L12-L38
| null |
import time
from datetime import datetime, date
try:
from dateutil.parser import parse as date_from_string
except ImportError: # noqa
def date_from_string(dte):
raise NotImplementedError
def date2timestamp(dte):
return time.mktime(dte.timetuple())
def jstimestamp(dte):
'''Convert a date to a javascript timestamp.
A Javascript timestamp is the number of milliseconds since
January 1, 1970 00:00:00 UTC.'''
return 1000*date2timestamp(dte)
def timestamp2date(tstamp):
"Converts a unix timestamp to a Python datetime object"
dt = datetime.fromtimestamp(tstamp)
if not dt.hour+dt.minute+dt.second+dt.microsecond:
return dt.date()
else:
return dt
def yyyymmdd2date(dte):
try:
y = dte // 10000
md = dte % 10000
m = md // 100
d = md % 100
return date(y, m, d)
except Exception:
raise ValueError('Could not convert %s to date' % dte)
def date2yyyymmdd(dte):
return dte.day + 100*(dte.month + 100*dte.year)
def juldate2date(val):
'''Convert from a Julian date/datetime to python date or datetime'''
ival = int(val)
dec = val - ival
try:
val4 = 4*ival
yd = val4 % 1461
st = 1899
if yd >= 4:
st = 1900
yd1 = yd - 241
y = val4 // 1461 + st
if yd1 >= 0:
q = yd1 // 4 * 5 + 308
qq = q // 153
qr = q % 153
else:
q = yd // 4 * 5 + 1833
qq = q // 153
qr = q % 153
m = qq % 12 + 1
d = qr // 5 + 1
except Exception:
raise ValueError('Could not convert %s to date' % val)
if dec:
dec24 = 24*dec
hours = int(dec24)
minutes = int(60*(dec24 - hours))
tot_seconds = 60*(60*(dec24 - hours) - minutes)
seconds = int(tot_seconds)
microseconds = int(1000000*(tot_seconds-seconds))
return datetime(y, m, d, hours, minutes, seconds, microseconds)
else:
return date(y, m, d)
def date2juldate(val):
'''Convert from a python date/datetime to a Julian date & time'''
f = 12*val.year + val.month - 22803
fq = f // 12
fr = f % 12
dt = (fr*153 + 302)//5 + val.day + fq*1461//4
if isinstance(val, datetime):
return dt + (val.hour + (val.minute + (
val.second + 0.000001*val.microsecond)/60.)/60.)/24.
else:
return dt
|
quantmind/ccy
|
ccy/dates/converters.py
|
timestamp2date
|
python
|
def timestamp2date(tstamp):
"Converts a unix timestamp to a Python datetime object"
dt = datetime.fromtimestamp(tstamp)
if not dt.hour+dt.minute+dt.second+dt.microsecond:
return dt.date()
else:
return dt
|
Converts a unix timestamp to a Python datetime object
|
train
|
https://github.com/quantmind/ccy/blob/068cf6887489087cd26657a937a932e82106b47f/ccy/dates/converters.py#L53-L59
| null |
import time
from datetime import datetime, date
try:
from dateutil.parser import parse as date_from_string
except ImportError: # noqa
def date_from_string(dte):
raise NotImplementedError
def todate(val):
'''Convert val to a datetime.date instance by trying several
conversion algorithm.
If it fails it raise a ValueError exception.
'''
if not val:
raise ValueError("Value not provided")
if isinstance(val, datetime):
return val.date()
elif isinstance(val, date):
return val
else:
try:
ival = int(val)
sval = str(ival)
if len(sval) == 8:
return yyyymmdd2date(val)
elif len(sval) == 5:
return juldate2date(val)
else:
raise ValueError
except Exception:
# Try to convert using the parsing algorithm
try:
return date_from_string(val).date()
except Exception:
raise ValueError("Could not convert %s to date" % val)
def date2timestamp(dte):
return time.mktime(dte.timetuple())
def jstimestamp(dte):
'''Convert a date to a javascript timestamp.
A Javascript timestamp is the number of milliseconds since
January 1, 1970 00:00:00 UTC.'''
return 1000*date2timestamp(dte)
def yyyymmdd2date(dte):
try:
y = dte // 10000
md = dte % 10000
m = md // 100
d = md % 100
return date(y, m, d)
except Exception:
raise ValueError('Could not convert %s to date' % dte)
def date2yyyymmdd(dte):
return dte.day + 100*(dte.month + 100*dte.year)
def juldate2date(val):
'''Convert from a Julian date/datetime to python date or datetime'''
ival = int(val)
dec = val - ival
try:
val4 = 4*ival
yd = val4 % 1461
st = 1899
if yd >= 4:
st = 1900
yd1 = yd - 241
y = val4 // 1461 + st
if yd1 >= 0:
q = yd1 // 4 * 5 + 308
qq = q // 153
qr = q % 153
else:
q = yd // 4 * 5 + 1833
qq = q // 153
qr = q % 153
m = qq % 12 + 1
d = qr // 5 + 1
except Exception:
raise ValueError('Could not convert %s to date' % val)
if dec:
dec24 = 24*dec
hours = int(dec24)
minutes = int(60*(dec24 - hours))
tot_seconds = 60*(60*(dec24 - hours) - minutes)
seconds = int(tot_seconds)
microseconds = int(1000000*(tot_seconds-seconds))
return datetime(y, m, d, hours, minutes, seconds, microseconds)
else:
return date(y, m, d)
def date2juldate(val):
'''Convert from a python date/datetime to a Julian date & time'''
f = 12*val.year + val.month - 22803
fq = f // 12
fr = f % 12
dt = (fr*153 + 302)//5 + val.day + fq*1461//4
if isinstance(val, datetime):
return dt + (val.hour + (val.minute + (
val.second + 0.000001*val.microsecond)/60.)/60.)/24.
else:
return dt
|
quantmind/ccy
|
ccy/dates/converters.py
|
juldate2date
|
python
|
def juldate2date(val):
'''Convert from a Julian date/datetime to python date or datetime'''
ival = int(val)
dec = val - ival
try:
val4 = 4*ival
yd = val4 % 1461
st = 1899
if yd >= 4:
st = 1900
yd1 = yd - 241
y = val4 // 1461 + st
if yd1 >= 0:
q = yd1 // 4 * 5 + 308
qq = q // 153
qr = q % 153
else:
q = yd // 4 * 5 + 1833
qq = q // 153
qr = q % 153
m = qq % 12 + 1
d = qr // 5 + 1
except Exception:
raise ValueError('Could not convert %s to date' % val)
if dec:
dec24 = 24*dec
hours = int(dec24)
minutes = int(60*(dec24 - hours))
tot_seconds = 60*(60*(dec24 - hours) - minutes)
seconds = int(tot_seconds)
microseconds = int(1000000*(tot_seconds-seconds))
return datetime(y, m, d, hours, minutes, seconds, microseconds)
else:
return date(y, m, d)
|
Convert from a Julian date/datetime to python date or datetime
|
train
|
https://github.com/quantmind/ccy/blob/068cf6887489087cd26657a937a932e82106b47f/ccy/dates/converters.py#L77-L110
| null |
import time
from datetime import datetime, date
try:
from dateutil.parser import parse as date_from_string
except ImportError: # noqa
def date_from_string(dte):
raise NotImplementedError
def todate(val):
'''Convert val to a datetime.date instance by trying several
conversion algorithm.
If it fails it raise a ValueError exception.
'''
if not val:
raise ValueError("Value not provided")
if isinstance(val, datetime):
return val.date()
elif isinstance(val, date):
return val
else:
try:
ival = int(val)
sval = str(ival)
if len(sval) == 8:
return yyyymmdd2date(val)
elif len(sval) == 5:
return juldate2date(val)
else:
raise ValueError
except Exception:
# Try to convert using the parsing algorithm
try:
return date_from_string(val).date()
except Exception:
raise ValueError("Could not convert %s to date" % val)
def date2timestamp(dte):
return time.mktime(dte.timetuple())
def jstimestamp(dte):
'''Convert a date to a javascript timestamp.
A Javascript timestamp is the number of milliseconds since
January 1, 1970 00:00:00 UTC.'''
return 1000*date2timestamp(dte)
def timestamp2date(tstamp):
"Converts a unix timestamp to a Python datetime object"
dt = datetime.fromtimestamp(tstamp)
if not dt.hour+dt.minute+dt.second+dt.microsecond:
return dt.date()
else:
return dt
def yyyymmdd2date(dte):
try:
y = dte // 10000
md = dte % 10000
m = md // 100
d = md % 100
return date(y, m, d)
except Exception:
raise ValueError('Could not convert %s to date' % dte)
def date2yyyymmdd(dte):
return dte.day + 100*(dte.month + 100*dte.year)
def date2juldate(val):
'''Convert from a python date/datetime to a Julian date & time'''
f = 12*val.year + val.month - 22803
fq = f // 12
fr = f % 12
dt = (fr*153 + 302)//5 + val.day + fq*1461//4
if isinstance(val, datetime):
return dt + (val.hour + (val.minute + (
val.second + 0.000001*val.microsecond)/60.)/60.)/24.
else:
return dt
|
quantmind/ccy
|
ccy/dates/converters.py
|
date2juldate
|
python
|
def date2juldate(val):
'''Convert from a python date/datetime to a Julian date & time'''
f = 12*val.year + val.month - 22803
fq = f // 12
fr = f % 12
dt = (fr*153 + 302)//5 + val.day + fq*1461//4
if isinstance(val, datetime):
return dt + (val.hour + (val.minute + (
val.second + 0.000001*val.microsecond)/60.)/60.)/24.
else:
return dt
|
Convert from a python date/datetime to a Julian date & time
|
train
|
https://github.com/quantmind/ccy/blob/068cf6887489087cd26657a937a932e82106b47f/ccy/dates/converters.py#L113-L123
| null |
import time
from datetime import datetime, date
try:
from dateutil.parser import parse as date_from_string
except ImportError: # noqa
def date_from_string(dte):
raise NotImplementedError
def todate(val):
'''Convert val to a datetime.date instance by trying several
conversion algorithm.
If it fails it raise a ValueError exception.
'''
if not val:
raise ValueError("Value not provided")
if isinstance(val, datetime):
return val.date()
elif isinstance(val, date):
return val
else:
try:
ival = int(val)
sval = str(ival)
if len(sval) == 8:
return yyyymmdd2date(val)
elif len(sval) == 5:
return juldate2date(val)
else:
raise ValueError
except Exception:
# Try to convert using the parsing algorithm
try:
return date_from_string(val).date()
except Exception:
raise ValueError("Could not convert %s to date" % val)
def date2timestamp(dte):
return time.mktime(dte.timetuple())
def jstimestamp(dte):
'''Convert a date to a javascript timestamp.
A Javascript timestamp is the number of milliseconds since
January 1, 1970 00:00:00 UTC.'''
return 1000*date2timestamp(dte)
def timestamp2date(tstamp):
"Converts a unix timestamp to a Python datetime object"
dt = datetime.fromtimestamp(tstamp)
if not dt.hour+dt.minute+dt.second+dt.microsecond:
return dt.date()
else:
return dt
def yyyymmdd2date(dte):
try:
y = dte // 10000
md = dte % 10000
m = md // 100
d = md % 100
return date(y, m, d)
except Exception:
raise ValueError('Could not convert %s to date' % dte)
def date2yyyymmdd(dte):
return dte.day + 100*(dte.month + 100*dte.year)
def juldate2date(val):
'''Convert from a Julian date/datetime to python date or datetime'''
ival = int(val)
dec = val - ival
try:
val4 = 4*ival
yd = val4 % 1461
st = 1899
if yd >= 4:
st = 1900
yd1 = yd - 241
y = val4 // 1461 + st
if yd1 >= 0:
q = yd1 // 4 * 5 + 308
qq = q // 153
qr = q % 153
else:
q = yd // 4 * 5 + 1833
qq = q // 153
qr = q % 153
m = qq % 12 + 1
d = qr // 5 + 1
except Exception:
raise ValueError('Could not convert %s to date' % val)
if dec:
dec24 = 24*dec
hours = int(dec24)
minutes = int(60*(dec24 - hours))
tot_seconds = 60*(60*(dec24 - hours) - minutes)
seconds = int(tot_seconds)
microseconds = int(1000000*(tot_seconds-seconds))
return datetime(y, m, d, hours, minutes, seconds, microseconds)
else:
return date(y, m, d)
|
quantmind/ccy
|
ccy/dates/period.py
|
Period.components
|
python
|
def components(self):
'''The period string'''
p = ''
neg = self.totaldays < 0
y = self.years
m = self.months
w = self.weeks
d = self.days
if y:
p = '%sY' % abs(y)
if m:
p = '%s%sM' % (p, abs(m))
if w:
p = '%s%sW' % (p, abs(w))
if d:
p = '%s%sD' % (p, abs(d))
return '-'+p if neg else p
|
The period string
|
train
|
https://github.com/quantmind/ccy/blob/068cf6887489087cd26657a937a932e82106b47f/ccy/dates/period.py#L79-L95
| null |
class Period:
def __init__(self, months=0, days=0):
self._months = months
self._days = days
@classmethod
def make(cls, pstr=''):
if isinstance(pstr, cls):
return pstr
else:
return cls().add_tenure(pstr)
def isempty(self):
return self._months == 0 and self._days == 0
def add_days(self, days):
self._days += days
def add_weeks(self, weeks):
self._days += int(7*weeks)
def add_months(self, months):
self._months += months
def add_years(self, years):
self._months += int(12*years)
@property
def years(self):
return safediv(self._months, 12)
@property
def months(self):
return safemod(self._months, 12)
@property
def weeks(self):
return safediv(self._days, 7)
@property
def days(self):
return safemod(self._days, 7)
@property
def totaldays(self):
return 30*self._months + self._days
def __repr__(self):
'''The period string'''
return self.components()
def __str__(self):
return self.__repr__()
def simple(self):
'''A string representation with only one period delimiter.'''
if self._days:
return '%sD' % self.totaldays
elif self.months:
return '%sM' % self._months
elif self.years:
return '%sY' % self.years
else:
return ''
def add_tenure(self, pstr):
if isinstance(pstr, self.__class__):
self._months += pstr._months
self._days += pstr._days
return self
st = str(pstr).upper()
done = False
sign = 1
while not done:
if not st:
done = True
else:
ip = find_first_of(st, 'DWMY')
if ip == -1:
raise ValueError("Unknown period %s" % pstr)
p = st[ip]
v = int(st[:ip])
sign = sign if v > 0 else -sign
v = sign*abs(v)
if p == 'D':
self.add_days(v)
elif p == 'W':
self.add_weeks(v)
elif p == 'M':
self.add_months(v)
elif p == 'Y':
self.add_years(v)
st = st[ip+1:]
return self
def __add__(self, other):
other = self.make(other)
return self.__class__(self._months+other._months,
self._days+other._days)
def __radd__(self, other):
return self + other
def __sub__(self, other):
other = self.make(other)
return self.__class__(self._months-other._months,
self._days-other._days)
def __rsub__(self, other):
return self.make(other) - self
def __gt__(self, other):
return self.totaldays > self.make(other).totaldays
def __lt__(self, other):
return self.totaldays < self.make(other).totaldays
def __ge__(self, other):
return self.totaldays >= self.make(other).totaldays
def __le__(self, other):
return self.totaldays <= self.make(other).totaldays
def __eq__(self, other):
return self.totaldays == self.make(other).totaldays
|
quantmind/ccy
|
ccy/dates/period.py
|
Period.simple
|
python
|
def simple(self):
'''A string representation with only one period delimiter.'''
if self._days:
return '%sD' % self.totaldays
elif self.months:
return '%sM' % self._months
elif self.years:
return '%sY' % self.years
else:
return ''
|
A string representation with only one period delimiter.
|
train
|
https://github.com/quantmind/ccy/blob/068cf6887489087cd26657a937a932e82106b47f/ccy/dates/period.py#L97-L106
| null |
class Period:
def __init__(self, months=0, days=0):
self._months = months
self._days = days
@classmethod
def make(cls, pstr=''):
if isinstance(pstr, cls):
return pstr
else:
return cls().add_tenure(pstr)
def isempty(self):
return self._months == 0 and self._days == 0
def add_days(self, days):
self._days += days
def add_weeks(self, weeks):
self._days += int(7*weeks)
def add_months(self, months):
self._months += months
def add_years(self, years):
self._months += int(12*years)
@property
def years(self):
return safediv(self._months, 12)
@property
def months(self):
return safemod(self._months, 12)
@property
def weeks(self):
return safediv(self._days, 7)
@property
def days(self):
return safemod(self._days, 7)
@property
def totaldays(self):
return 30*self._months + self._days
def __repr__(self):
'''The period string'''
return self.components()
def __str__(self):
return self.__repr__()
def components(self):
'''The period string'''
p = ''
neg = self.totaldays < 0
y = self.years
m = self.months
w = self.weeks
d = self.days
if y:
p = '%sY' % abs(y)
if m:
p = '%s%sM' % (p, abs(m))
if w:
p = '%s%sW' % (p, abs(w))
if d:
p = '%s%sD' % (p, abs(d))
return '-'+p if neg else p
def add_tenure(self, pstr):
if isinstance(pstr, self.__class__):
self._months += pstr._months
self._days += pstr._days
return self
st = str(pstr).upper()
done = False
sign = 1
while not done:
if not st:
done = True
else:
ip = find_first_of(st, 'DWMY')
if ip == -1:
raise ValueError("Unknown period %s" % pstr)
p = st[ip]
v = int(st[:ip])
sign = sign if v > 0 else -sign
v = sign*abs(v)
if p == 'D':
self.add_days(v)
elif p == 'W':
self.add_weeks(v)
elif p == 'M':
self.add_months(v)
elif p == 'Y':
self.add_years(v)
st = st[ip+1:]
return self
def __add__(self, other):
other = self.make(other)
return self.__class__(self._months+other._months,
self._days+other._days)
def __radd__(self, other):
return self + other
def __sub__(self, other):
other = self.make(other)
return self.__class__(self._months-other._months,
self._days-other._days)
def __rsub__(self, other):
return self.make(other) - self
def __gt__(self, other):
return self.totaldays > self.make(other).totaldays
def __lt__(self, other):
return self.totaldays < self.make(other).totaldays
def __ge__(self, other):
return self.totaldays >= self.make(other).totaldays
def __le__(self, other):
return self.totaldays <= self.make(other).totaldays
def __eq__(self, other):
return self.totaldays == self.make(other).totaldays
|
quantmind/ccy
|
ccy/core/data.py
|
make_ccys
|
python
|
def make_ccys(db):
'''
Create the currency dictionary
'''
dfr = 4
dollar = r'\u0024'
peso = r'\u20b1'
kr = r'kr'
insert = db.insert
# G10 & SCANDI
insert('EUR', '978', 'EU', 1,
'Euro', dfr, 'EU', '30/360', 'ACT/360',
future='FE', symbol=r'\u20ac', html='€')
insert('GBP', '826', 'BP', 2,
'British Pound', dfr, 'GB', 'ACT/365', 'ACT/365',
symbol=r'\u00a3', html='£')
insert('AUD', '036', 'AD', 3,
'Australian Dollar', dfr, 'AU', 'ACT/365', 'ACT/365',
symbol=dollar, html='$')
insert('NZD', '554', 'ND', 4,
'New-Zealand Dollar', dfr, 'NZ', 'ACT/365', 'ACT/365',
symbol=dollar, html='$')
insert('USD', '840', 'UD', 5,
'US Dollar', 0, 'US', '30/360', 'ACT/360',
future='ED', symbol=dollar, html='$')
insert('CAD', '124', 'CD', 6,
'Canadian Dollar', dfr, 'CA', 'ACT/365', 'ACT/365',
symbol=dollar, html='$')
insert('CHF', '756', 'SF', 7,
'Swiss Franc', dfr, 'CH', '30/360', 'ACT/360',
symbol=r'Fr', html='₣')
insert('NOK', '578', 'NK', 8,
'Norwegian Krona', dfr, 'NO', '30/360', 'ACT/360',
symbol=kr, html=kr)
insert('SEK', '752', 'SK', 9,
'Swedish Krona', dfr, 'SE', '30/360', 'ACT/360',
symbol=kr, html=kr)
insert('DKK', '208', 'DK', 10,
'Danish Krona', dfr, 'DK', '30/360', 'ACT/360',
symbol=kr, html=kr)
insert('JPY', '392', 'JY', 10000,
'Japanese Yen', 2, 'JP', 'ACT/365', 'ACT/360',
symbol=r'\u00a5', html='¥')
# ASIA
insert('CNY', '156', 'CY', 680,
'Chinese Renminbi', dfr, 'CN', 'ACT/365', 'ACT/365',
symbol=r'\u00a5', html='¥')
insert('KRW', '410', 'KW', 110000,
'South Korean won', 2, 'KR', 'ACT/365', 'ACT/365',
symbol=r'\u20a9', html='₩')
insert('SGD', '702', 'SD', 15,
'Singapore Dollar', dfr, 'SG', 'ACT/365', 'ACT/365',
symbol=dollar, html='$')
insert('IDR', '360', 'IH', 970000,
'Indonesian Rupiah', 0, 'ID', 'ACT/360', 'ACT/360',
symbol=r'Rp', html='Rp')
insert('THB', '764', 'TB', 3300,
'Thai Baht', 2, 'TH', 'ACT/365', 'ACT/365',
symbol=r'\u0e3f', html='฿')
insert('TWD', '901', 'TD', 18,
'Taiwan Dollar', dfr, 'TW', 'ACT/365', 'ACT/365',
symbol=dollar, html='$')
insert('HKD', '344', 'HD', 19,
'Hong Kong Dollar', dfr, 'HK', 'ACT/365', 'ACT/365',
symbol=r'\u5713', html='HK$')
insert('PHP', '608', 'PP', 4770,
'Philippines Peso', dfr, 'PH', 'ACT/360', 'ACT/360',
symbol=peso, html='₱')
insert('INR', '356', 'IR', 4500,
'Indian Rupee', dfr, 'IN', 'ACT/365', 'ACT/365',
symbol=r'\u20a8', html='₨')
insert('MYR', '458', 'MR', 345,
'Malaysian Ringgit', dfr, 'MY', 'ACT/365', 'ACT/365')
insert('VND', '704', 'VD', 1700000,
'Vietnamese Dong', 0, 'VN', 'ACT/365', 'ACT/365',
symbol=r'\u20ab', html='₫')
# LATIN AMERICA
insert('BRL', '986', 'BC', 200,
'Brazilian Real', dfr, 'BR', 'BUS/252', 'BUS/252',
symbol=r'R$')
insert('PEN', '604', 'PS', 220,
'Peruvian New Sol', dfr, 'PE', 'ACT/360', 'ACT/360',
symbol=r'S/.')
insert('ARS', '032', 'AP', 301,
'Argentine Peso', dfr, 'AR', '30/360', 'ACT/360',
symbol=dollar, html='$')
insert('MXN', '484', 'MP', 1330,
'Mexican Peso', dfr, 'MX', 'ACT/360', 'ACT/360',
symbol=dollar, html='$')
insert('CLP', '152', 'CH', 54500,
'Chilean Peso', 2, 'CL', 'ACT/360', 'ACT/360',
symbol=dollar, html='$')
insert('COP', '170', 'CL', 190000,
'Colombian Peso', 2, 'CO', 'ACT/360', 'ACT/360',
symbol=dollar, html='$')
# TODO: Check towletters code and position
insert('JMD', '388', 'JD', 410,
'Jamaican Dollar', dfr, 'JM', 'ACT/360', 'ACT/360',
symbol=dollar, html='$')
# TODO: Check towletters code and position
insert('TTD', '780', 'TT', 410,
'Trinidad and Tobago Dollar', dfr, 'TT', 'ACT/360', 'ACT/360',
symbol=dollar, html='$')
# TODO: Check towletters code and position
insert('BMD', '060', 'BD', 410,
'Bermudian Dollar', dfr, 'BM',
symbol=dollar, html='$')
# EASTERN EUROPE
insert('CZK', '203', 'CK', 28,
'Czech Koruna', dfr, 'CZ', 'ACT/360', 'ACT/360',
symbol=r'\u004b\u010d')
insert('PLN', '985', 'PZ', 29,
'Polish Zloty', dfr, 'PL', 'ACT/ACT', 'ACT/365',
symbol=r'\u0050\u0142')
insert('TRY', '949', 'TY', 30,
'Turkish Lira', dfr, 'TR', 'ACT/360', 'ACT/360',
symbol=r'\u0054\u004c')
insert('HUF', '348', 'HF', 32,
'Hungarian Forint', dfr, 'HU', 'ACT/365', 'ACT/360',
symbol=r'Ft', html='Ft')
insert('RON', '946', 'RN', 34,
'Romanian Leu', dfr, 'RO', 'ACT/360', 'ACT/360')
insert('RUB', '643', 'RR', 36,
'Russian Ruble', dfr, 'RU', 'ACT/ACT', 'ACT/ACT',
symbol=r'\u0440\u0443\u0431')
# TODO: Check towletters code and position
insert('HRK', '191', 'HK', 410,
'Croatian kuna', dfr, 'HR',
symbol=r'kn')
# TODO: Check towletters code and position
insert('KZT', '398', 'KT', 410,
'Tenge', dfr, 'KZ',
symbol=r'\u20b8', html='₸')
# TODO: Check towletters code and position
insert('BGN', '975', 'BN', 410,
'Bulgarian Lev', dfr, 'BG',
symbol=r'\u043b\u0432.', html='лв')
# MIDDLE EAST & AFRICA
insert('ILS', '376', 'IS', 410,
'Israeli Shekel', dfr, 'IL', 'ACT/365', 'ACT/365',
symbol=r'\u20aa', html='₪')
# TODO: Check towletters code and position
insert('AED', '784', 'AE', 410,
'United Arab Emirates Dirham', dfr, 'AE')
# TODO: Check towletters code and position
insert('QAR', '634', 'QA', 410,
'Qatari Riyal', dfr, 'QA',
symbol=r'\ufdfc', html='﷼')
# TODO: Check towletters code and position
insert('SAR', '682', 'SR', 410,
'Saudi Riyal', dfr, 'SA',
symbol=r'\ufdfc', html='﷼')
insert('EGP', '818', 'EP', 550,
'Egyptian Pound', dfr, 'EG',
symbol=r'\u00a3', html='£')
insert('ZAR', '710', 'SA', 750,
'South African Rand', dfr, 'ZA', 'ACT/365', 'ACT/365',
symbol=r'R', html='R')
# BITCOIN
insert('XBT', '000', 'BT', -1,
'Bitcoin', 8, 'WW',
symbol=r'\u0e3f', html='฿')
|
Create the currency dictionary
|
train
|
https://github.com/quantmind/ccy/blob/068cf6887489087cd26657a937a932e82106b47f/ccy/core/data.py#L2-L169
|
[
"def insert(self, *args, **kwargs):\n c = ccy(*args, **kwargs)\n self[c.code] = c\n"
] | |
quantmind/ccy
|
ccy/core/currency.py
|
currency_pair
|
python
|
def currency_pair(code):
'''Construct a :class:`ccy_pair` from a six letter string.'''
c = str(code)
c1 = currency(c[:3])
c2 = currency(c[3:])
return ccy_pair(c1, c2)
|
Construct a :class:`ccy_pair` from a six letter string.
|
train
|
https://github.com/quantmind/ccy/blob/068cf6887489087cd26657a937a932e82106b47f/ccy/core/currency.py#L211-L216
|
[
"def currency(code):\n c = currencydb()\n return c.get(str(code).upper())\n"
] |
import sys
from .data import make_ccys
usd_order = 5
def to_string(v):
if isinstance(v, bytes):
return v.decode('utf-8')
else:
return '%s' % v
def overusdfun(v1):
return v1
def overusdfuni(v1):
return 1./v1
class ccy(object):
'''
Currency object
'''
def __init__(self, code, isonumber, twoletterscode, order, name,
roundoff=4,
default_country=None,
fixeddc=None,
floatdc=None,
fixedfreq=None,
floatfreq=None,
future=None,
symbol=r'\00a4',
html=''):
self.code = to_string(code)
self.id = self.code
self.isonumber = isonumber
self.twoletterscode = to_string(twoletterscode)
self.order = int(order)
self.name = to_string(name)
self.rounding = roundoff
self.default_country = default_country
self.symbol_raw = symbol
self.symbol = symbol.encode('utf-8').decode('unicode_escape')
self.html = html or self.symbol
self.fixeddc = fixeddc
self.floatdc = floatdc
self.future = ''
if future:
self.future = str(future)
def __getstate__(self):
return {'code': self.code}
def __setstate__(self, dict):
c = currency(dict['code'])
self.__dict__.update(c.__dict__)
def __eq__(self, other):
if isinstance(other, ccy):
return other.code == self.code
return False
def description(self):
if self.order > usd_order:
v = 'USD / %s' % self.code
else:
v = '%s / USD' % self.code
if self.order != usd_order:
return '%s Spot Exchange Rate' % v
else:
return 'Dollar'
def info(self):
return {'code': self.code,
'isonumber': self.isonumber,
'twoletterscode': self.twoletterscode,
'symbol': self.symbol,
'order': self.order,
'name': self.name,
'rounding': self.rounding,
'default_country': self.default_country,
'unicode symbol': self.symbol_raw}
def printinfo(self, stream=None):
info = self.info()
stream = stream or sys.stdout
for k, v in info.items():
stream.write(to_string('%s: %s\n' % (k, v)))
def __repr__(self):
return '%s: %s' % (self.__class__.__name__, self.code)
def __str__(self):
return self.code
def swap(self, c2):
'''
put the order of currencies as market standard
'''
inv = False
c1 = self
if c1.order > c2.order:
ct = c1
c1 = c2
c2 = ct
inv = True
return inv, c1, c2
def overusdfunc(self):
if self.order > usd_order:
return overusdfuni
else:
return overusdfun
def usdoverfunc(self):
if self.order > usd_order:
return overusdfun
else:
return overusdfuni
def as_cross(self, delimiter=''):
'''
Return a cross rate representation with respect USD.
@param delimiter: could be '' or '/' normally
'''
if self.order > usd_order:
return 'USD%s%s' % (delimiter, self.code)
else:
return '%s%sUSD' % (self.code, delimiter)
def spot(self, c2, v1, v2):
if self.order > c2.order:
vt = v1
v1 = v2
v2 = vt
return v1/v2
class ccy_pair(object):
'''
Currency pair such as EURUSD, USDCHF
XXXYYY - XXX is the foreign currency, while YYY is the base currency
XXXYYY means 1 unit of of XXX cost XXXYYY units of YYY
'''
def __init__(self, c1, c2):
self.ccy1 = c1
self.ccy2 = c2
self.code = '%s%s' % (c1, c2)
self.id = self.code
def __repr__(self):
return '%s: %s' % (self.__class__.__name__, self.code)
def __str__(self):
return self.code
def mkt(self):
if self.ccy1.order > self.ccy2.order:
return ccy_pair(self.ccy2, self.ccy1)
else:
return self
def over(self, name='usd'):
'''Returns a new currency pair with the *over* currency as
second part of the pair (Foreign currency).'''
name = name.upper()
if self.ccy1.code == name.upper():
return ccy_pair(self.ccy2, self.ccy1)
else:
return self
class ccydb(dict):
def insert(self, *args, **kwargs):
c = ccy(*args, **kwargs)
self[c.code] = c
def currencydb():
global _ccys
if not _ccys:
_ccys = ccydb()
make_ccys(_ccys)
return _ccys
def ccypairsdb():
global _ccypairs
if not _ccypairs:
_ccypairs = make_ccypairs()
return _ccypairs
def currency(code):
c = currencydb()
return c.get(str(code).upper())
def ccypair(code):
c = ccypairsdb()
return c.get(str(code).upper())
def make_ccypairs():
ccys = currencydb()
db = {}
for ccy1 in ccys.values():
od = ccy1.order
for ccy2 in ccys.values():
if ccy2.order <= od:
continue
p = ccy_pair(ccy1, ccy2)
db[p.code] = p
return db
def dump_currency_table():
headers = ['code',
'name',
('isonumber', 'iso'),
('html', 'symbol'),
('default_country', 'country'),
'order',
'rounding']
all = []
data = []
all.append(data)
for h in headers:
if isinstance(h, tuple):
h = h[1]
data.append(h)
for c in sorted(currencydb().values(), key=lambda x: x.order):
data = []
all.append(data)
for h in headers:
if isinstance(h, tuple):
h = h[0]
data.append(getattr(c, h))
return all
_ccys = None
_ccypairs = None
|
quantmind/ccy
|
ccy/core/currency.py
|
ccy.swap
|
python
|
def swap(self, c2):
'''
put the order of currencies as market standard
'''
inv = False
c1 = self
if c1.order > c2.order:
ct = c1
c1 = c2
c2 = ct
inv = True
return inv, c1, c2
|
put the order of currencies as market standard
|
train
|
https://github.com/quantmind/ccy/blob/068cf6887489087cd26657a937a932e82106b47f/ccy/core/currency.py#L100-L111
| null |
class ccy(object):
'''
Currency object
'''
def __init__(self, code, isonumber, twoletterscode, order, name,
roundoff=4,
default_country=None,
fixeddc=None,
floatdc=None,
fixedfreq=None,
floatfreq=None,
future=None,
symbol=r'\00a4',
html=''):
self.code = to_string(code)
self.id = self.code
self.isonumber = isonumber
self.twoletterscode = to_string(twoletterscode)
self.order = int(order)
self.name = to_string(name)
self.rounding = roundoff
self.default_country = default_country
self.symbol_raw = symbol
self.symbol = symbol.encode('utf-8').decode('unicode_escape')
self.html = html or self.symbol
self.fixeddc = fixeddc
self.floatdc = floatdc
self.future = ''
if future:
self.future = str(future)
def __getstate__(self):
return {'code': self.code}
def __setstate__(self, dict):
c = currency(dict['code'])
self.__dict__.update(c.__dict__)
def __eq__(self, other):
if isinstance(other, ccy):
return other.code == self.code
return False
def description(self):
if self.order > usd_order:
v = 'USD / %s' % self.code
else:
v = '%s / USD' % self.code
if self.order != usd_order:
return '%s Spot Exchange Rate' % v
else:
return 'Dollar'
def info(self):
return {'code': self.code,
'isonumber': self.isonumber,
'twoletterscode': self.twoletterscode,
'symbol': self.symbol,
'order': self.order,
'name': self.name,
'rounding': self.rounding,
'default_country': self.default_country,
'unicode symbol': self.symbol_raw}
def printinfo(self, stream=None):
info = self.info()
stream = stream or sys.stdout
for k, v in info.items():
stream.write(to_string('%s: %s\n' % (k, v)))
def __repr__(self):
return '%s: %s' % (self.__class__.__name__, self.code)
def __str__(self):
return self.code
def overusdfunc(self):
if self.order > usd_order:
return overusdfuni
else:
return overusdfun
def usdoverfunc(self):
if self.order > usd_order:
return overusdfun
else:
return overusdfuni
def as_cross(self, delimiter=''):
'''
Return a cross rate representation with respect USD.
@param delimiter: could be '' or '/' normally
'''
if self.order > usd_order:
return 'USD%s%s' % (delimiter, self.code)
else:
return '%s%sUSD' % (self.code, delimiter)
def spot(self, c2, v1, v2):
if self.order > c2.order:
vt = v1
v1 = v2
v2 = vt
return v1/v2
|
quantmind/ccy
|
ccy/core/currency.py
|
ccy.as_cross
|
python
|
def as_cross(self, delimiter=''):
'''
Return a cross rate representation with respect USD.
@param delimiter: could be '' or '/' normally
'''
if self.order > usd_order:
return 'USD%s%s' % (delimiter, self.code)
else:
return '%s%sUSD' % (self.code, delimiter)
|
Return a cross rate representation with respect USD.
@param delimiter: could be '' or '/' normally
|
train
|
https://github.com/quantmind/ccy/blob/068cf6887489087cd26657a937a932e82106b47f/ccy/core/currency.py#L125-L133
| null |
class ccy(object):
'''
Currency object
'''
def __init__(self, code, isonumber, twoletterscode, order, name,
roundoff=4,
default_country=None,
fixeddc=None,
floatdc=None,
fixedfreq=None,
floatfreq=None,
future=None,
symbol=r'\00a4',
html=''):
self.code = to_string(code)
self.id = self.code
self.isonumber = isonumber
self.twoletterscode = to_string(twoletterscode)
self.order = int(order)
self.name = to_string(name)
self.rounding = roundoff
self.default_country = default_country
self.symbol_raw = symbol
self.symbol = symbol.encode('utf-8').decode('unicode_escape')
self.html = html or self.symbol
self.fixeddc = fixeddc
self.floatdc = floatdc
self.future = ''
if future:
self.future = str(future)
def __getstate__(self):
return {'code': self.code}
def __setstate__(self, dict):
c = currency(dict['code'])
self.__dict__.update(c.__dict__)
def __eq__(self, other):
if isinstance(other, ccy):
return other.code == self.code
return False
def description(self):
if self.order > usd_order:
v = 'USD / %s' % self.code
else:
v = '%s / USD' % self.code
if self.order != usd_order:
return '%s Spot Exchange Rate' % v
else:
return 'Dollar'
def info(self):
return {'code': self.code,
'isonumber': self.isonumber,
'twoletterscode': self.twoletterscode,
'symbol': self.symbol,
'order': self.order,
'name': self.name,
'rounding': self.rounding,
'default_country': self.default_country,
'unicode symbol': self.symbol_raw}
def printinfo(self, stream=None):
info = self.info()
stream = stream or sys.stdout
for k, v in info.items():
stream.write(to_string('%s: %s\n' % (k, v)))
def __repr__(self):
return '%s: %s' % (self.__class__.__name__, self.code)
def __str__(self):
return self.code
def swap(self, c2):
'''
put the order of currencies as market standard
'''
inv = False
c1 = self
if c1.order > c2.order:
ct = c1
c1 = c2
c2 = ct
inv = True
return inv, c1, c2
def overusdfunc(self):
if self.order > usd_order:
return overusdfuni
else:
return overusdfun
def usdoverfunc(self):
if self.order > usd_order:
return overusdfun
else:
return overusdfuni
def spot(self, c2, v1, v2):
if self.order > c2.order:
vt = v1
v1 = v2
v2 = vt
return v1/v2
|
quantmind/ccy
|
ccy/core/currency.py
|
ccy_pair.over
|
python
|
def over(self, name='usd'):
'''Returns a new currency pair with the *over* currency as
second part of the pair (Foreign currency).'''
name = name.upper()
if self.ccy1.code == name.upper():
return ccy_pair(self.ccy2, self.ccy1)
else:
return self
|
Returns a new currency pair with the *over* currency as
second part of the pair (Foreign currency).
|
train
|
https://github.com/quantmind/ccy/blob/068cf6887489087cd26657a937a932e82106b47f/ccy/core/currency.py#L169-L176
| null |
class ccy_pair(object):
'''
Currency pair such as EURUSD, USDCHF
XXXYYY - XXX is the foreign currency, while YYY is the base currency
XXXYYY means 1 unit of of XXX cost XXXYYY units of YYY
'''
def __init__(self, c1, c2):
self.ccy1 = c1
self.ccy2 = c2
self.code = '%s%s' % (c1, c2)
self.id = self.code
def __repr__(self):
return '%s: %s' % (self.__class__.__name__, self.code)
def __str__(self):
return self.code
def mkt(self):
if self.ccy1.order > self.ccy2.order:
return ccy_pair(self.ccy2, self.ccy1)
else:
return self
|
Feneric/doxypypy
|
doxypypy/doxypypy.py
|
coroutine
|
python
|
def coroutine(func):
def __start(*args, **kwargs):
"""Automatically calls next() on the internal generator function."""
__cr = func(*args, **kwargs)
next(__cr)
return __cr
return __start
|
Basic decorator to implement the coroutine pattern.
|
train
|
https://github.com/Feneric/doxypypy/blob/a8555b15fa2a758ea8392372de31c0f635cc0d93/doxypypy/doxypypy.py#L25-L32
| null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Filters Python code for use with Doxygen, using a syntax-aware approach.
Rather than implementing a partial Python parser with regular expressions, this
script uses Python's own abstract syntax tree walker to isolate meaningful
constructs. It passes along namespace information so Doxygen can construct a
proper tree for nested functions, classes, and methods. It understands bed lump
variables are by convention private. It groks Zope-style Python interfaces.
It can automatically turn PEP 257 compliant that follow the more restrictive
Google style guide into appropriate Doxygen tags, and is even aware of
doctests.
"""
from ast import NodeVisitor, parse, iter_fields, AST, Name, get_docstring
from re import compile as regexpCompile, IGNORECASE, MULTILINE
from types import GeneratorType
from sys import stderr
from os import linesep
from string import whitespace
from codeop import compile_command
class AstWalker(NodeVisitor):
"""
A walker that'll recursively progress through an AST.
Given an abstract syntax tree for Python code, walk through all the
nodes looking for significant types (for our purposes we only care
about module starts, class definitions, function definitions, variable
assignments, and function calls, as all the information we want to pass
to Doxygen is found within these constructs). If the autobrief option
is set, it further attempts to parse docstrings to create appropriate
Doxygen tags.
"""
# We have a number of regular expressions that we use. They don't
# vary across instances and so are compiled directly in the class
# definition.
__indentRE = regexpCompile(r'^(\s*)\S')
__newlineRE = regexpCompile(r'^#', MULTILINE)
__blanklineRE = regexpCompile(r'^\s*$')
__docstrMarkerRE = regexpCompile(r"\s*([uUbB]*[rR]?(['\"]{3}))")
__docstrOneLineRE = regexpCompile(r"\s*[uUbB]*[rR]?(['\"]{3})(.+)\1")
__implementsRE = regexpCompile(r"^(\s*)(?:zope\.)?(?:interface\.)?"
r"(?:module|class|directly)?"
r"(?:Provides|Implements)\(\s*(.+)\s*\)",
IGNORECASE)
__classRE = regexpCompile(r"^\s*class\s+(\S+)\s*\((\S+)\):")
__interfaceRE = regexpCompile(r"^\s*class\s+(\S+)\s*\(\s*(?:zope\.)?"
r"(?:interface\.)?"
r"Interface\s*\)\s*:", IGNORECASE)
__attributeRE = regexpCompile(r"^(\s*)(\S+)\s*=\s*(?:zope\.)?"
r"(?:interface\.)?"
r"Attribute\s*\(['\"]{1,3}(.*)['\"]{1,3}\)",
IGNORECASE)
__singleLineREs = {
' @author: ': regexpCompile(r"^(\s*Authors?:\s*)(.*)$", IGNORECASE),
' @copyright ': regexpCompile(r"^(\s*Copyright:\s*)(.*)$", IGNORECASE),
' @date ': regexpCompile(r"^(\s*Date:\s*)(.*)$", IGNORECASE),
' @file ': regexpCompile(r"^(\s*File:\s*)(.*)$", IGNORECASE),
' @version: ': regexpCompile(r"^(\s*Version:\s*)(.*)$", IGNORECASE),
' @note ': regexpCompile(r"^(\s*Note:\s*)(.*)$", IGNORECASE),
' @warning ': regexpCompile(r"^(\s*Warning:\s*)(.*)$", IGNORECASE)
}
__argsStartRE = regexpCompile(r"^(\s*(?:(?:Keyword\s+)?"
r"(?:A|Kwa)rg(?:ument)?|Attribute)s?"
r"\s*:\s*)$", IGNORECASE)
__argsRE = regexpCompile(r"^\s*(?P<name>\w+)\s*(?P<type>\(?\S*\)?)?\s*"
r"(?:-|:)+\s+(?P<desc>.+)$")
__returnsStartRE = regexpCompile(r"^\s*(?:Return|Yield)s:\s*$", IGNORECASE)
__raisesStartRE = regexpCompile(r"^\s*(Raises|Exceptions|See Also):\s*$",
IGNORECASE)
__listRE = regexpCompile(r"^\s*(([\w\.]+),\s*)+(&|and)?\s*([\w\.]+)$")
__singleListItemRE = regexpCompile(r'^\s*([\w\.]+)\s*$')
__listItemRE = regexpCompile(r'([\w\.]+),?\s*')
__examplesStartRE = regexpCompile(r"^\s*(?:Example|Doctest)s?:\s*$",
IGNORECASE)
__sectionStartRE = regexpCompile(r"^\s*(([A-Z]\w* ?){1,2}):\s*$")
# The error line should match traceback lines, error exception lines, and
# (due to a weird behavior of codeop) single word lines.
__errorLineRE = regexpCompile(r"^\s*((?:\S+Error|Traceback.*):?\s*(.*)|@?[\w.]+)\s*$",
IGNORECASE)
def __init__(self, lines, options, inFilename):
"""Initialize a few class variables in preparation for our walk."""
self.lines = lines
self.options = options
self.inFilename = inFilename
self.docLines = []
@staticmethod
def _stripOutAnds(inStr):
"""Takes a string and returns the same without ands or ampersands."""
assert isinstance(inStr, str)
return inStr.replace(' and ', ' ').replace(' & ', ' ')
@staticmethod
def _endCodeIfNeeded(line, inCodeBlock):
"""Simple routine to append end code marker if needed."""
assert isinstance(line, str)
if inCodeBlock:
line = '# @endcode{0}{1}'.format(linesep, line.rstrip())
inCodeBlock = False
return line, inCodeBlock
@coroutine
def _checkIfCode(self, inCodeBlockObj):
"""Checks whether or not a given line appears to be Python code."""
while True:
line, lines, lineNum = (yield)
testLineNum = 1
currentLineNum = 0
testLine = line.strip()
lineOfCode = None
while lineOfCode is None:
match = AstWalker.__errorLineRE.match(testLine)
if not testLine or testLine == '...' or match:
# These are ambiguous.
line, lines, lineNum = (yield)
testLine = line.strip()
#testLineNum = 1
elif testLine.startswith('>>>'):
# This is definitely code.
lineOfCode = True
else:
try:
compLine = compile_command(testLine)
if compLine and lines[currentLineNum].strip().startswith('#'):
lineOfCode = True
else:
line, lines, lineNum = (yield)
line = line.strip()
if line.startswith('>>>'):
# Definitely code, don't compile further.
lineOfCode = True
else:
testLine += linesep + line
testLine = testLine.strip()
testLineNum += 1
except (SyntaxError, RuntimeError):
# This is definitely not code.
lineOfCode = False
except Exception:
# Other errors are ambiguous.
line, lines, lineNum = (yield)
testLine = line.strip()
#testLineNum = 1
currentLineNum = lineNum - testLineNum
if not inCodeBlockObj[0] and lineOfCode:
inCodeBlockObj[0] = True
lines[currentLineNum] = '{0}{1}# @code{1}'.format(
lines[currentLineNum],
linesep
)
elif inCodeBlockObj[0] and lineOfCode is False:
# None is ambiguous, so strict checking
# against False is necessary.
inCodeBlockObj[0] = False
lines[currentLineNum] = '{0}{1}# @endcode{1}'.format(
lines[currentLineNum],
linesep
)
@coroutine
def __alterDocstring(self, tail='', writer=None):
"""
Runs eternally, processing docstring lines.
Parses docstring lines as they get fed in via send, applies appropriate
Doxygen tags, and passes them along in batches for writing.
"""
assert isinstance(tail, str) and isinstance(writer, GeneratorType)
lines = []
timeToSend = False
inCodeBlock = False
inCodeBlockObj = [False]
inSection = False
prefix = ''
firstLineNum = -1
sectionHeadingIndent = 0
codeChecker = self._checkIfCode(inCodeBlockObj)
while True:
lineNum, line = (yield)
if firstLineNum < 0:
firstLineNum = lineNum
# Don't bother doing extra work if it's a sentinel.
if line is not None:
# Also limit work if we're not parsing the docstring.
if self.options.autobrief:
for doxyTag, tagRE in AstWalker.__singleLineREs.items():
match = tagRE.search(line)
if match:
# We've got a simple one-line Doxygen command
lines[-1], inCodeBlock = self._endCodeIfNeeded(
lines[-1], inCodeBlock)
inCodeBlockObj[0] = inCodeBlock
writer.send((firstLineNum, lineNum - 1, lines))
lines = []
firstLineNum = lineNum
line = line.replace(match.group(1), doxyTag)
timeToSend = True
if inSection:
# The last line belonged to a section.
# Does this one too? (Ignoring empty lines.)
match = AstWalker.__blanklineRE.match(line)
if not match:
indent = len(line.expandtabs(self.options.tablength)) - \
len(line.expandtabs(self.options.tablength).lstrip())
if indent <= sectionHeadingIndent:
inSection = False
else:
if lines[-1] == '#':
# If the last line was empty, but we're still in a section
# then we need to start a new paragraph.
lines[-1] = '# @par'
match = AstWalker.__returnsStartRE.match(line)
if match:
# We've got a "returns" section
lines[-1], inCodeBlock = self._endCodeIfNeeded(
lines[-1], inCodeBlock)
inCodeBlockObj[0] = inCodeBlock
line = line.replace(match.group(0), ' @return\t').rstrip()
prefix = '@return\t'
else:
match = AstWalker.__argsStartRE.match(line)
if match:
# We've got an "arguments" section
line = line.replace(match.group(0), '').rstrip()
if 'attr' in match.group(0).lower():
prefix = '@property\t'
else:
prefix = '@param\t'
lines[-1], inCodeBlock = self._endCodeIfNeeded(
lines[-1], inCodeBlock)
inCodeBlockObj[0] = inCodeBlock
lines.append('#' + line)
continue
else:
match = AstWalker.__argsRE.match(line)
if match and not inCodeBlock:
# We've got something that looks like an item /
# description pair.
if 'property' in prefix:
line = '# {0}\t{1[name]}{2}# {1[desc]}'.format(
prefix, match.groupdict(), linesep)
else:
line = ' {0}\t{1[name]}\t{1[desc]}'.format(
prefix, match.groupdict())
else:
match = AstWalker.__raisesStartRE.match(line)
if match:
line = line.replace(match.group(0), '').rstrip()
if 'see' in match.group(1).lower():
# We've got a "see also" section
prefix = '@sa\t'
else:
# We've got an "exceptions" section
prefix = '@exception\t'
lines[-1], inCodeBlock = self._endCodeIfNeeded(
lines[-1], inCodeBlock)
inCodeBlockObj[0] = inCodeBlock
lines.append('#' + line)
continue
else:
match = AstWalker.__listRE.match(line)
if match and not inCodeBlock:
# We've got a list of something or another
itemList = []
for itemMatch in AstWalker.__listItemRE.findall(self._stripOutAnds(
match.group(0))):
itemList.append('# {0}\t{1}{2}'.format(
prefix, itemMatch, linesep))
line = ''.join(itemList)[1:]
else:
match = AstWalker.__examplesStartRE.match(line)
if match and lines[-1].strip() == '#' \
and self.options.autocode:
# We've got an "example" section
inCodeBlock = True
inCodeBlockObj[0] = True
line = line.replace(match.group(0),
' @b Examples{0}# @code'.format(linesep))
else:
match = AstWalker.__sectionStartRE.match(line)
if match:
# We've got an arbitrary section
prefix = ''
inSection = True
# What's the indentation of the section heading?
sectionHeadingIndent = len(line.expandtabs(self.options.tablength)) \
- len(line.expandtabs(self.options.tablength).lstrip())
line = line.replace(
match.group(0),
' @par {0}'.format(match.group(1))
)
if lines[-1] == '# @par':
lines[-1] = '#'
lines[-1], inCodeBlock = self._endCodeIfNeeded(
lines[-1], inCodeBlock)
inCodeBlockObj[0] = inCodeBlock
lines.append('#' + line)
continue
elif prefix:
match = AstWalker.__singleListItemRE.match(line)
if match and not inCodeBlock:
# Probably a single list item
line = ' {0}\t{1}'.format(
prefix, match.group(0))
elif self.options.autocode:
codeChecker.send(
(
line, lines,
lineNum - firstLineNum
)
)
inCodeBlock = inCodeBlockObj[0]
else:
if self.options.autocode:
codeChecker.send(
(
line, lines,
lineNum - firstLineNum
)
)
inCodeBlock = inCodeBlockObj[0]
# If we were passed a tail, append it to the docstring.
# Note that this means that we need a docstring for this
# item to get documented.
if tail and lineNum == len(self.docLines) - 1:
line = '{0}{1}# {2}'.format(line.rstrip(), linesep, tail)
# Add comment marker for every line.
line = '#{0}'.format(line.rstrip())
# Ensure the first line has the Doxygen double comment.
if lineNum == 0:
line = '#' + line
lines.append(line.replace(' ' + linesep, linesep))
else:
# If we get our sentinel value, send out what we've got.
timeToSend = True
if timeToSend:
lines[-1], inCodeBlock = self._endCodeIfNeeded(lines[-1],
inCodeBlock)
inCodeBlockObj[0] = inCodeBlock
writer.send((firstLineNum, lineNum, lines))
lines = []
firstLineNum = -1
timeToSend = False
@coroutine
def __writeDocstring(self):
"""
Runs eternally, dumping out docstring line batches as they get fed in.
Replaces original batches of docstring lines with modified versions
fed in via send.
"""
while True:
firstLineNum, lastLineNum, lines = (yield)
newDocstringLen = lastLineNum - firstLineNum + 1
while len(lines) < newDocstringLen:
lines.append('')
# Substitute the new block of lines for the original block of lines.
self.docLines[firstLineNum: lastLineNum + 1] = lines
def _processDocstring(self, node, tail='', **kwargs):
"""
Handles a docstring for functions, classes, and modules.
Basically just figures out the bounds of the docstring and sends it
off to the parser to do the actual work.
"""
typeName = type(node).__name__
# Modules don't have lineno defined, but it's always 0 for them.
curLineNum = startLineNum = 0
if typeName != 'Module':
startLineNum = curLineNum = node.lineno - 1
# Figure out where both our enclosing object and our docstring start.
line = ''
while curLineNum < len(self.lines):
line = self.lines[curLineNum]
match = AstWalker.__docstrMarkerRE.match(line)
if match:
break
curLineNum += 1
docstringStart = curLineNum
# Figure out where our docstring ends.
if not AstWalker.__docstrOneLineRE.match(line):
# Skip for the special case of a single-line docstring.
curLineNum += 1
while curLineNum < len(self.lines):
line = self.lines[curLineNum]
if line.find(match.group(2)) >= 0:
break
curLineNum += 1
endLineNum = curLineNum + 1
# Isolate our enclosing object's declaration.
defLines = self.lines[startLineNum: docstringStart]
# Isolate our docstring.
self.docLines = self.lines[docstringStart: endLineNum]
# If we have a docstring, extract information from it.
if self.docLines:
# Get rid of the docstring delineators.
self.docLines[0] = AstWalker.__docstrMarkerRE.sub('',
self.docLines[0])
self.docLines[-1] = AstWalker.__docstrMarkerRE.sub('',
self.docLines[-1])
# Handle special strings within the docstring.
docstringConverter = self.__alterDocstring(
tail, self.__writeDocstring())
for lineInfo in enumerate(self.docLines):
docstringConverter.send(lineInfo)
docstringConverter.send((len(self.docLines) - 1, None))
# Add a Doxygen @brief tag to any single-line description.
if self.options.autobrief:
safetyCounter = 0
while len(self.docLines) > 0 and self.docLines[0].lstrip('#').strip() == '':
del self.docLines[0]
self.docLines.append('')
safetyCounter += 1
if safetyCounter >= len(self.docLines):
# Escape the effectively empty docstring.
break
if len(self.docLines) == 1 or (len(self.docLines) >= 2 and (
self.docLines[1].strip(whitespace + '#') == '' or
self.docLines[1].strip(whitespace + '#').startswith('@'))):
self.docLines[0] = "## @brief {0}".format(self.docLines[0].lstrip('#'))
if len(self.docLines) > 1 and self.docLines[1] == '# @par':
self.docLines[1] = '#'
if defLines:
match = AstWalker.__indentRE.match(defLines[0])
indentStr = match and match.group(1) or ''
self.docLines = [AstWalker.__newlineRE.sub(indentStr + '#', docLine)
for docLine in self.docLines]
# Taking away a docstring from an interface method definition sometimes
# leaves broken code as the docstring may be the only code in it.
# Here we manually insert a pass statement to rectify this problem.
if typeName != 'Module':
if docstringStart < len(self.lines):
match = AstWalker.__indentRE.match(self.lines[docstringStart])
indentStr = match and match.group(1) or ''
else:
indentStr = ''
containingNodes = kwargs.get('containingNodes', []) or []
fullPathNamespace = self._getFullPathName(containingNodes)
parentType = fullPathNamespace[-2][1]
if parentType == 'interface' and typeName == 'FunctionDef' \
or fullPathNamespace[-1][1] == 'interface':
defLines[-1] = '{0}{1}{2}pass'.format(defLines[-1],
linesep, indentStr)
elif self.options.autobrief and typeName == 'ClassDef':
# If we're parsing docstrings separate out class attribute
# definitions to get better Doxygen output.
for firstVarLineNum, firstVarLine in enumerate(self.docLines):
if '@property\t' in firstVarLine:
break
lastVarLineNum = len(self.docLines)
if lastVarLineNum > 0 and '@property\t' in firstVarLine:
while lastVarLineNum > firstVarLineNum:
lastVarLineNum -= 1
if '@property\t' in self.docLines[lastVarLineNum]:
break
lastVarLineNum += 1
if firstVarLineNum < len(self.docLines):
indentLineNum = endLineNum
indentStr = ''
while not indentStr and indentLineNum < len(self.lines):
match = AstWalker.__indentRE.match(self.lines[indentLineNum])
indentStr = match and match.group(1) or ''
indentLineNum += 1
varLines = ['{0}{1}'.format(linesep, docLine).replace(
linesep, linesep + indentStr)
for docLine in self.docLines[
firstVarLineNum: lastVarLineNum]]
defLines.extend(varLines)
self.docLines[firstVarLineNum: lastVarLineNum] = []
# After the property shuffling we will need to relocate
# any existing namespace information.
namespaceLoc = defLines[-1].find('\n# @namespace')
if namespaceLoc >= 0:
self.docLines[-1] += defLines[-1][namespaceLoc:]
defLines[-1] = defLines[-1][:namespaceLoc]
# For classes and functions, apply our changes and reverse the
# order of the declaration and docstring, and for modules just
# apply our changes.
if typeName != 'Module':
self.lines[startLineNum: endLineNum] = self.docLines + defLines
else:
self.lines[startLineNum: endLineNum] = defLines + self.docLines
@staticmethod
def _checkMemberName(name):
"""
See if a member name indicates that it should be private.
Private variables in Python (starting with a double underscore but
not ending in a double underscore) and bed lumps (variables that
are not really private but are by common convention treated as
protected because they begin with a single underscore) get Doxygen
tags labeling them appropriately.
"""
assert isinstance(name, str)
restrictionLevel = None
if not name.endswith('__'):
if name.startswith('__'):
restrictionLevel = 'private'
elif name.startswith('_'):
restrictionLevel = 'protected'
return restrictionLevel
def _processMembers(self, node, contextTag):
"""
Mark up members if they should be private.
If the name indicates it should be private or protected, apply
the appropriate Doxygen tags.
"""
restrictionLevel = self._checkMemberName(node.name)
if restrictionLevel:
workTag = '{0}{1}# @{2}'.format(contextTag,
linesep,
restrictionLevel)
else:
workTag = contextTag
return workTag
def generic_visit(self, node, **kwargs):
"""
Extract useful information from relevant nodes including docstrings.
This is virtually identical to the standard version contained in
NodeVisitor. It is only overridden because we're tracking extra
information (the hierarchy of containing nodes) not preserved in
the original.
"""
for field, value in iter_fields(node):
if isinstance(value, list):
for item in value:
if isinstance(item, AST):
self.visit(item, containingNodes=kwargs['containingNodes'])
elif isinstance(value, AST):
self.visit(value, containingNodes=kwargs['containingNodes'])
def visit(self, node, **kwargs):
"""
Visit a node and extract useful information from it.
This is virtually identical to the standard version contained in
NodeVisitor. It is only overridden because we're tracking extra
information (the hierarchy of containing nodes) not preserved in
the original.
"""
containingNodes = kwargs.get('containingNodes', [])
method = 'visit_' + node.__class__.__name__
visitor = getattr(self, method, self.generic_visit)
return visitor(node, containingNodes=containingNodes)
def _getFullPathName(self, containingNodes):
"""
Returns the full node hierarchy rooted at module name.
The list representing the full path through containing nodes
(starting with the module itself) is returned.
"""
assert isinstance(containingNodes, list)
return [(self.options.fullPathNamespace, 'module')] + containingNodes
def visit_Module(self, node, **kwargs):
"""
Handles the module-level docstring.
Process the module-level docstring and create appropriate Doxygen tags
if autobrief option is set.
"""
containingNodes=kwargs.get('containingNodes', [])
if self.options.debug:
stderr.write("# Module {0}{1}".format(self.options.fullPathNamespace,
linesep))
if get_docstring(node):
if self.options.topLevelNamespace:
fullPathNamespace = self._getFullPathName(containingNodes)
contextTag = '.'.join(pathTuple[0] for pathTuple in fullPathNamespace)
tail = '@namespace {0}'.format(contextTag)
else:
tail = ''
self._processDocstring(node, tail)
# Visit any contained nodes (in this case pretty much everything).
self.generic_visit(node, containingNodes=containingNodes)
def visit_Assign(self, node, **kwargs):
"""
Handles assignments within code.
Variable assignments in Python are used to represent interface
attributes in addition to basic variables. If an assignment appears
to be an attribute, it gets labeled as such for Doxygen. If a variable
name uses Python mangling or is just a bed lump, it is labeled as
private for Doxygen.
"""
lineNum = node.lineno - 1
# Assignments have one Doxygen-significant special case:
# interface attributes.
match = AstWalker.__attributeRE.match(self.lines[lineNum])
if match:
self.lines[lineNum] = '{0}## @property {1}{2}{0}# {3}{2}' \
'{0}# @hideinitializer{2}{4}{2}'.format(
match.group(1),
match.group(2),
linesep,
match.group(3),
self.lines[lineNum].rstrip()
)
if self.options.debug:
stderr.write("# Attribute {0.id}{1}".format(node.targets[0],
linesep))
if isinstance(node.targets[0], Name):
match = AstWalker.__indentRE.match(self.lines[lineNum])
indentStr = match and match.group(1) or ''
restrictionLevel = self._checkMemberName(node.targets[0].id)
if restrictionLevel:
self.lines[lineNum] = '{0}## @var {1}{2}{0}' \
'# @hideinitializer{2}{0}# @{3}{2}{4}{2}'.format(
indentStr,
node.targets[0].id,
linesep,
restrictionLevel,
self.lines[lineNum].rstrip()
)
# Visit any contained nodes.
self.generic_visit(node, containingNodes=kwargs['containingNodes'])
def visit_Call(self, node, **kwargs):
"""
Handles function calls within code.
Function calls in Python are used to represent interface implementations
in addition to their normal use. If a call appears to mark an
implementation, it gets labeled as such for Doxygen.
"""
lineNum = node.lineno - 1
# Function calls have one Doxygen-significant special case: interface
# implementations.
match = AstWalker.__implementsRE.match(self.lines[lineNum])
if match:
self.lines[lineNum] = '{0}## @implements {1}{2}{0}{3}{2}'.format(
match.group(1), match.group(2), linesep,
self.lines[lineNum].rstrip())
if self.options.debug:
stderr.write("# Implements {0}{1}".format(match.group(1),
linesep))
# Visit any contained nodes.
self.generic_visit(node, containingNodes=kwargs['containingNodes'])
def visit_FunctionDef(self, node, **kwargs):
"""
Handles function definitions within code.
Process a function's docstring, keeping well aware of the function's
context and whether or not it's part of an interface definition.
"""
if self.options.debug:
stderr.write("# Function {0.name}{1}".format(node, linesep))
# Push either 'interface' or 'class' onto our containing nodes
# hierarchy so we can keep track of context. This will let us tell
# if a function is nested within another function or even if a class
# is nested within a function.
containingNodes = kwargs.get('containingNodes') or []
containingNodes.append((node.name, 'function'))
if self.options.topLevelNamespace:
fullPathNamespace = self._getFullPathName(containingNodes)
contextTag = '.'.join(pathTuple[0] for pathTuple in fullPathNamespace)
modifiedContextTag = self._processMembers(node, contextTag)
tail = '@namespace {0}'.format(modifiedContextTag)
else:
tail = self._processMembers(node, '')
if get_docstring(node):
self._processDocstring(node, tail,
containingNodes=containingNodes)
# Visit any contained nodes.
self.generic_visit(node, containingNodes=containingNodes)
# Remove the item we pushed onto the containing nodes hierarchy.
containingNodes.pop()
def visit_ClassDef(self, node, **kwargs):
"""
Handles class definitions within code.
Process the docstring. Note though that in Python Class definitions
are used to define interfaces in addition to classes.
If a class definition appears to be an interface definition tag it as an
interface definition for Doxygen. Otherwise tag it as a class
definition for Doxygen.
"""
lineNum = node.lineno - 1
# Push either 'interface' or 'class' onto our containing nodes
# hierarchy so we can keep track of context. This will let us tell
# if a function is a method or an interface method definition or if
# a class is fully contained within another class.
containingNodes = kwargs.get('containingNodes') or []
if not self.options.object_respect:
# Remove object class of the inherited class list to avoid that all
# new-style class inherits from object in the hierarchy class
line = self.lines[lineNum]
match = AstWalker.__classRE.match(line)
if match:
if match.group(2) == 'object':
self.lines[lineNum] = line[:match.start(2)] + line[match.end(2):]
match = AstWalker.__interfaceRE.match(self.lines[lineNum])
if match:
if self.options.debug:
stderr.write("# Interface {0.name}{1}".format(node, linesep))
containingNodes.append((node.name, 'interface'))
else:
if self.options.debug:
stderr.write("# Class {0.name}{1}".format(node, linesep))
containingNodes.append((node.name, 'class'))
if self.options.topLevelNamespace:
fullPathNamespace = self._getFullPathName(containingNodes)
contextTag = '.'.join(pathTuple[0] for pathTuple in fullPathNamespace)
tail = '@namespace {0}'.format(contextTag)
else:
tail = ''
# Class definitions have one Doxygen-significant special case:
# interface definitions.
if match:
contextTag = '{0}{1}# @interface {2}'.format(tail,
linesep,
match.group(1))
else:
contextTag = tail
contextTag = self._processMembers(node, contextTag)
if get_docstring(node):
self._processDocstring(node, contextTag,
containingNodes=containingNodes)
# Visit any contained nodes.
self.generic_visit(node, containingNodes=containingNodes)
# Remove the item we pushed onto the containing nodes hierarchy.
containingNodes.pop()
def parseLines(self):
"""Form an AST for the code and produce a new version of the source."""
inAst = parse(''.join(self.lines), self.inFilename)
# Visit all the nodes in our tree and apply Doxygen tags to the source.
self.visit(inAst)
def getLines(self):
"""Return the modified file once processing has been completed."""
return linesep.join(line.rstrip() for line in self.lines)
def main():
"""
Starts the parser on the file given by the filename as the first
argument on the command line.
"""
from optparse import OptionParser, OptionGroup
from os import sep
from os.path import basename, getsize
from sys import argv, exit as sysExit
from chardet import detect
from codecs import BOM_UTF8, open as codecsOpen
def optParse():
"""
Parses command line options.
Generally we're supporting all the command line options that doxypy.py
supports in an analogous way to make it easy to switch back and forth.
We additionally support a top-level namespace argument that is used
to trim away excess path information.
"""
parser = OptionParser(prog=basename(argv[0]))
parser.set_usage("%prog [options] filename")
parser.add_option(
"-a", "--autobrief",
action="store_true", dest="autobrief",
help="parse the docstring for @brief description and other information"
)
parser.add_option(
"-c", "--autocode",
action="store_true", dest="autocode",
help="parse the docstring for code samples"
)
parser.add_option(
"-n", "--ns",
action="store", type="string", dest="topLevelNamespace",
help="specify a top-level namespace that will be used to trim paths"
)
parser.add_option(
"-t", "--tablength",
action="store", type="int", dest="tablength", default=4,
help="specify a tab length in spaces; only needed if tabs are used"
)
parser.add_option(
"-s", "--stripinit",
action="store_true", dest="stripinit",
help="strip __init__ from namespace"
)
parser.add_option(
"-O", "--object-respect",
action="store_true", dest="object_respect",
help="By default, doxypypy hides object class from class dependencies even if class inherits explictilty from objects (new-style class), this option disable this."
)
group = OptionGroup(parser, "Debug Options")
group.add_option(
"-d", "--debug",
action="store_true", dest="debug",
help="enable debug output on stderr"
)
parser.add_option_group(group)
## Parse options based on our definition.
(options, filename) = parser.parse_args()
# Just abort immediately if we are don't have an input file.
if not filename:
stderr.write("No filename given." + linesep)
sysExit(-1)
# Turn the full path filename into a full path module location.
fullPathNamespace = filename[0].replace(sep, '.')[:-3]
# Use any provided top-level namespace argument to trim off excess.
realNamespace = fullPathNamespace
if options.topLevelNamespace:
namespaceStart = fullPathNamespace.find(options.topLevelNamespace)
if namespaceStart >= 0:
realNamespace = fullPathNamespace[namespaceStart:]
if options.stripinit:
realNamespace = realNamespace.replace('.__init__', '')
options.fullPathNamespace = realNamespace
return options, filename[0]
# Figure out what is being requested.
(options, inFilename) = optParse()
# Figure out encoding of input file.
numOfSampleBytes = min(getsize(inFilename), 32)
sampleBytes = open(inFilename, 'rb').read(numOfSampleBytes)
sampleByteAnalysis = detect(sampleBytes)
encoding = sampleByteAnalysis['encoding'] or 'ascii'
# Switch to generic versions to strip the BOM automatically.
if sampleBytes.startswith(BOM_UTF8):
encoding = 'UTF-8-SIG'
if encoding.startswith("UTF-16"):
encoding = "UTF-16"
elif encoding.startswith("UTF-32"):
encoding = "UTF-32"
# Read contents of input file.
if encoding == 'ascii':
inFile = open(inFilename)
else:
inFile = codecsOpen(inFilename, encoding=encoding)
lines = inFile.readlines()
inFile.close()
# Create the abstract syntax tree for the input file.
astWalker = AstWalker(lines, options, inFilename)
astWalker.parseLines()
# Output the modified source.
print(astWalker.getLines())
# See if we're running as a script.
if __name__ == "__main__":
main()
|
Feneric/doxypypy
|
doxypypy/doxypypy.py
|
main
|
python
|
def main():
from optparse import OptionParser, OptionGroup
from os import sep
from os.path import basename, getsize
from sys import argv, exit as sysExit
from chardet import detect
from codecs import BOM_UTF8, open as codecsOpen
def optParse():
"""
Parses command line options.
Generally we're supporting all the command line options that doxypy.py
supports in an analogous way to make it easy to switch back and forth.
We additionally support a top-level namespace argument that is used
to trim away excess path information.
"""
parser = OptionParser(prog=basename(argv[0]))
parser.set_usage("%prog [options] filename")
parser.add_option(
"-a", "--autobrief",
action="store_true", dest="autobrief",
help="parse the docstring for @brief description and other information"
)
parser.add_option(
"-c", "--autocode",
action="store_true", dest="autocode",
help="parse the docstring for code samples"
)
parser.add_option(
"-n", "--ns",
action="store", type="string", dest="topLevelNamespace",
help="specify a top-level namespace that will be used to trim paths"
)
parser.add_option(
"-t", "--tablength",
action="store", type="int", dest="tablength", default=4,
help="specify a tab length in spaces; only needed if tabs are used"
)
parser.add_option(
"-s", "--stripinit",
action="store_true", dest="stripinit",
help="strip __init__ from namespace"
)
parser.add_option(
"-O", "--object-respect",
action="store_true", dest="object_respect",
help="By default, doxypypy hides object class from class dependencies even if class inherits explictilty from objects (new-style class), this option disable this."
)
group = OptionGroup(parser, "Debug Options")
group.add_option(
"-d", "--debug",
action="store_true", dest="debug",
help="enable debug output on stderr"
)
parser.add_option_group(group)
## Parse options based on our definition.
(options, filename) = parser.parse_args()
# Just abort immediately if we are don't have an input file.
if not filename:
stderr.write("No filename given." + linesep)
sysExit(-1)
# Turn the full path filename into a full path module location.
fullPathNamespace = filename[0].replace(sep, '.')[:-3]
# Use any provided top-level namespace argument to trim off excess.
realNamespace = fullPathNamespace
if options.topLevelNamespace:
namespaceStart = fullPathNamespace.find(options.topLevelNamespace)
if namespaceStart >= 0:
realNamespace = fullPathNamespace[namespaceStart:]
if options.stripinit:
realNamespace = realNamespace.replace('.__init__', '')
options.fullPathNamespace = realNamespace
return options, filename[0]
# Figure out what is being requested.
(options, inFilename) = optParse()
# Figure out encoding of input file.
numOfSampleBytes = min(getsize(inFilename), 32)
sampleBytes = open(inFilename, 'rb').read(numOfSampleBytes)
sampleByteAnalysis = detect(sampleBytes)
encoding = sampleByteAnalysis['encoding'] or 'ascii'
# Switch to generic versions to strip the BOM automatically.
if sampleBytes.startswith(BOM_UTF8):
encoding = 'UTF-8-SIG'
if encoding.startswith("UTF-16"):
encoding = "UTF-16"
elif encoding.startswith("UTF-32"):
encoding = "UTF-32"
# Read contents of input file.
if encoding == 'ascii':
inFile = open(inFilename)
else:
inFile = codecsOpen(inFilename, encoding=encoding)
lines = inFile.readlines()
inFile.close()
# Create the abstract syntax tree for the input file.
astWalker = AstWalker(lines, options, inFilename)
astWalker.parseLines()
# Output the modified source.
print(astWalker.getLines())
|
Starts the parser on the file given by the filename as the first
argument on the command line.
|
train
|
https://github.com/Feneric/doxypypy/blob/a8555b15fa2a758ea8392372de31c0f635cc0d93/doxypypy/doxypypy.py#L779-L892
|
[
"def parseLines(self):\n \"\"\"Form an AST for the code and produce a new version of the source.\"\"\"\n inAst = parse(''.join(self.lines), self.inFilename)\n # Visit all the nodes in our tree and apply Doxygen tags to the source.\n self.visit(inAst)\n",
"def getLines(self):\n \"\"\"Return the modified file once processing has been completed.\"\"\"\n return linesep.join(line.rstrip() for line in self.lines)\n",
"def optParse():\n \"\"\"\n Parses command line options.\n\n Generally we're supporting all the command line options that doxypy.py\n supports in an analogous way to make it easy to switch back and forth.\n We additionally support a top-level namespace argument that is used\n to trim away excess path information.\n \"\"\"\n\n parser = OptionParser(prog=basename(argv[0]))\n\n parser.set_usage(\"%prog [options] filename\")\n parser.add_option(\n \"-a\", \"--autobrief\",\n action=\"store_true\", dest=\"autobrief\",\n help=\"parse the docstring for @brief description and other information\"\n )\n parser.add_option(\n \"-c\", \"--autocode\",\n action=\"store_true\", dest=\"autocode\",\n help=\"parse the docstring for code samples\"\n )\n parser.add_option(\n \"-n\", \"--ns\",\n action=\"store\", type=\"string\", dest=\"topLevelNamespace\",\n help=\"specify a top-level namespace that will be used to trim paths\"\n )\n parser.add_option(\n \"-t\", \"--tablength\",\n action=\"store\", type=\"int\", dest=\"tablength\", default=4,\n help=\"specify a tab length in spaces; only needed if tabs are used\"\n )\n parser.add_option(\n \"-s\", \"--stripinit\",\n action=\"store_true\", dest=\"stripinit\",\n help=\"strip __init__ from namespace\"\n )\n parser.add_option(\n \"-O\", \"--object-respect\",\n action=\"store_true\", dest=\"object_respect\",\n help=\"By default, doxypypy hides object class from class dependencies even if class inherits explictilty from objects (new-style class), this option disable this.\"\n )\n group = OptionGroup(parser, \"Debug Options\")\n group.add_option(\n \"-d\", \"--debug\",\n action=\"store_true\", dest=\"debug\",\n help=\"enable debug output on stderr\"\n )\n parser.add_option_group(group)\n\n ## Parse options based on our definition.\n (options, filename) = parser.parse_args()\n\n # Just abort immediately if we are don't have an input file.\n if not filename:\n stderr.write(\"No filename given.\" + linesep)\n sysExit(-1)\n\n # Turn the full path filename into a full path module location.\n fullPathNamespace = filename[0].replace(sep, '.')[:-3]\n # Use any provided top-level namespace argument to trim off excess.\n realNamespace = fullPathNamespace\n if options.topLevelNamespace:\n namespaceStart = fullPathNamespace.find(options.topLevelNamespace)\n if namespaceStart >= 0:\n realNamespace = fullPathNamespace[namespaceStart:]\n if options.stripinit:\n realNamespace = realNamespace.replace('.__init__', '')\n options.fullPathNamespace = realNamespace\n\n return options, filename[0]\n"
] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Filters Python code for use with Doxygen, using a syntax-aware approach.
Rather than implementing a partial Python parser with regular expressions, this
script uses Python's own abstract syntax tree walker to isolate meaningful
constructs. It passes along namespace information so Doxygen can construct a
proper tree for nested functions, classes, and methods. It understands bed lump
variables are by convention private. It groks Zope-style Python interfaces.
It can automatically turn PEP 257 compliant that follow the more restrictive
Google style guide into appropriate Doxygen tags, and is even aware of
doctests.
"""
from ast import NodeVisitor, parse, iter_fields, AST, Name, get_docstring
from re import compile as regexpCompile, IGNORECASE, MULTILINE
from types import GeneratorType
from sys import stderr
from os import linesep
from string import whitespace
from codeop import compile_command
def coroutine(func):
"""Basic decorator to implement the coroutine pattern."""
def __start(*args, **kwargs):
"""Automatically calls next() on the internal generator function."""
__cr = func(*args, **kwargs)
next(__cr)
return __cr
return __start
class AstWalker(NodeVisitor):
"""
A walker that'll recursively progress through an AST.
Given an abstract syntax tree for Python code, walk through all the
nodes looking for significant types (for our purposes we only care
about module starts, class definitions, function definitions, variable
assignments, and function calls, as all the information we want to pass
to Doxygen is found within these constructs). If the autobrief option
is set, it further attempts to parse docstrings to create appropriate
Doxygen tags.
"""
# We have a number of regular expressions that we use. They don't
# vary across instances and so are compiled directly in the class
# definition.
__indentRE = regexpCompile(r'^(\s*)\S')
__newlineRE = regexpCompile(r'^#', MULTILINE)
__blanklineRE = regexpCompile(r'^\s*$')
__docstrMarkerRE = regexpCompile(r"\s*([uUbB]*[rR]?(['\"]{3}))")
__docstrOneLineRE = regexpCompile(r"\s*[uUbB]*[rR]?(['\"]{3})(.+)\1")
__implementsRE = regexpCompile(r"^(\s*)(?:zope\.)?(?:interface\.)?"
r"(?:module|class|directly)?"
r"(?:Provides|Implements)\(\s*(.+)\s*\)",
IGNORECASE)
__classRE = regexpCompile(r"^\s*class\s+(\S+)\s*\((\S+)\):")
__interfaceRE = regexpCompile(r"^\s*class\s+(\S+)\s*\(\s*(?:zope\.)?"
r"(?:interface\.)?"
r"Interface\s*\)\s*:", IGNORECASE)
__attributeRE = regexpCompile(r"^(\s*)(\S+)\s*=\s*(?:zope\.)?"
r"(?:interface\.)?"
r"Attribute\s*\(['\"]{1,3}(.*)['\"]{1,3}\)",
IGNORECASE)
__singleLineREs = {
' @author: ': regexpCompile(r"^(\s*Authors?:\s*)(.*)$", IGNORECASE),
' @copyright ': regexpCompile(r"^(\s*Copyright:\s*)(.*)$", IGNORECASE),
' @date ': regexpCompile(r"^(\s*Date:\s*)(.*)$", IGNORECASE),
' @file ': regexpCompile(r"^(\s*File:\s*)(.*)$", IGNORECASE),
' @version: ': regexpCompile(r"^(\s*Version:\s*)(.*)$", IGNORECASE),
' @note ': regexpCompile(r"^(\s*Note:\s*)(.*)$", IGNORECASE),
' @warning ': regexpCompile(r"^(\s*Warning:\s*)(.*)$", IGNORECASE)
}
__argsStartRE = regexpCompile(r"^(\s*(?:(?:Keyword\s+)?"
r"(?:A|Kwa)rg(?:ument)?|Attribute)s?"
r"\s*:\s*)$", IGNORECASE)
__argsRE = regexpCompile(r"^\s*(?P<name>\w+)\s*(?P<type>\(?\S*\)?)?\s*"
r"(?:-|:)+\s+(?P<desc>.+)$")
__returnsStartRE = regexpCompile(r"^\s*(?:Return|Yield)s:\s*$", IGNORECASE)
__raisesStartRE = regexpCompile(r"^\s*(Raises|Exceptions|See Also):\s*$",
IGNORECASE)
__listRE = regexpCompile(r"^\s*(([\w\.]+),\s*)+(&|and)?\s*([\w\.]+)$")
__singleListItemRE = regexpCompile(r'^\s*([\w\.]+)\s*$')
__listItemRE = regexpCompile(r'([\w\.]+),?\s*')
__examplesStartRE = regexpCompile(r"^\s*(?:Example|Doctest)s?:\s*$",
IGNORECASE)
__sectionStartRE = regexpCompile(r"^\s*(([A-Z]\w* ?){1,2}):\s*$")
# The error line should match traceback lines, error exception lines, and
# (due to a weird behavior of codeop) single word lines.
__errorLineRE = regexpCompile(r"^\s*((?:\S+Error|Traceback.*):?\s*(.*)|@?[\w.]+)\s*$",
IGNORECASE)
def __init__(self, lines, options, inFilename):
"""Initialize a few class variables in preparation for our walk."""
self.lines = lines
self.options = options
self.inFilename = inFilename
self.docLines = []
@staticmethod
def _stripOutAnds(inStr):
"""Takes a string and returns the same without ands or ampersands."""
assert isinstance(inStr, str)
return inStr.replace(' and ', ' ').replace(' & ', ' ')
@staticmethod
def _endCodeIfNeeded(line, inCodeBlock):
"""Simple routine to append end code marker if needed."""
assert isinstance(line, str)
if inCodeBlock:
line = '# @endcode{0}{1}'.format(linesep, line.rstrip())
inCodeBlock = False
return line, inCodeBlock
@coroutine
def _checkIfCode(self, inCodeBlockObj):
"""Checks whether or not a given line appears to be Python code."""
while True:
line, lines, lineNum = (yield)
testLineNum = 1
currentLineNum = 0
testLine = line.strip()
lineOfCode = None
while lineOfCode is None:
match = AstWalker.__errorLineRE.match(testLine)
if not testLine or testLine == '...' or match:
# These are ambiguous.
line, lines, lineNum = (yield)
testLine = line.strip()
#testLineNum = 1
elif testLine.startswith('>>>'):
# This is definitely code.
lineOfCode = True
else:
try:
compLine = compile_command(testLine)
if compLine and lines[currentLineNum].strip().startswith('#'):
lineOfCode = True
else:
line, lines, lineNum = (yield)
line = line.strip()
if line.startswith('>>>'):
# Definitely code, don't compile further.
lineOfCode = True
else:
testLine += linesep + line
testLine = testLine.strip()
testLineNum += 1
except (SyntaxError, RuntimeError):
# This is definitely not code.
lineOfCode = False
except Exception:
# Other errors are ambiguous.
line, lines, lineNum = (yield)
testLine = line.strip()
#testLineNum = 1
currentLineNum = lineNum - testLineNum
if not inCodeBlockObj[0] and lineOfCode:
inCodeBlockObj[0] = True
lines[currentLineNum] = '{0}{1}# @code{1}'.format(
lines[currentLineNum],
linesep
)
elif inCodeBlockObj[0] and lineOfCode is False:
# None is ambiguous, so strict checking
# against False is necessary.
inCodeBlockObj[0] = False
lines[currentLineNum] = '{0}{1}# @endcode{1}'.format(
lines[currentLineNum],
linesep
)
@coroutine
def __alterDocstring(self, tail='', writer=None):
"""
Runs eternally, processing docstring lines.
Parses docstring lines as they get fed in via send, applies appropriate
Doxygen tags, and passes them along in batches for writing.
"""
assert isinstance(tail, str) and isinstance(writer, GeneratorType)
lines = []
timeToSend = False
inCodeBlock = False
inCodeBlockObj = [False]
inSection = False
prefix = ''
firstLineNum = -1
sectionHeadingIndent = 0
codeChecker = self._checkIfCode(inCodeBlockObj)
while True:
lineNum, line = (yield)
if firstLineNum < 0:
firstLineNum = lineNum
# Don't bother doing extra work if it's a sentinel.
if line is not None:
# Also limit work if we're not parsing the docstring.
if self.options.autobrief:
for doxyTag, tagRE in AstWalker.__singleLineREs.items():
match = tagRE.search(line)
if match:
# We've got a simple one-line Doxygen command
lines[-1], inCodeBlock = self._endCodeIfNeeded(
lines[-1], inCodeBlock)
inCodeBlockObj[0] = inCodeBlock
writer.send((firstLineNum, lineNum - 1, lines))
lines = []
firstLineNum = lineNum
line = line.replace(match.group(1), doxyTag)
timeToSend = True
if inSection:
# The last line belonged to a section.
# Does this one too? (Ignoring empty lines.)
match = AstWalker.__blanklineRE.match(line)
if not match:
indent = len(line.expandtabs(self.options.tablength)) - \
len(line.expandtabs(self.options.tablength).lstrip())
if indent <= sectionHeadingIndent:
inSection = False
else:
if lines[-1] == '#':
# If the last line was empty, but we're still in a section
# then we need to start a new paragraph.
lines[-1] = '# @par'
match = AstWalker.__returnsStartRE.match(line)
if match:
# We've got a "returns" section
lines[-1], inCodeBlock = self._endCodeIfNeeded(
lines[-1], inCodeBlock)
inCodeBlockObj[0] = inCodeBlock
line = line.replace(match.group(0), ' @return\t').rstrip()
prefix = '@return\t'
else:
match = AstWalker.__argsStartRE.match(line)
if match:
# We've got an "arguments" section
line = line.replace(match.group(0), '').rstrip()
if 'attr' in match.group(0).lower():
prefix = '@property\t'
else:
prefix = '@param\t'
lines[-1], inCodeBlock = self._endCodeIfNeeded(
lines[-1], inCodeBlock)
inCodeBlockObj[0] = inCodeBlock
lines.append('#' + line)
continue
else:
match = AstWalker.__argsRE.match(line)
if match and not inCodeBlock:
# We've got something that looks like an item /
# description pair.
if 'property' in prefix:
line = '# {0}\t{1[name]}{2}# {1[desc]}'.format(
prefix, match.groupdict(), linesep)
else:
line = ' {0}\t{1[name]}\t{1[desc]}'.format(
prefix, match.groupdict())
else:
match = AstWalker.__raisesStartRE.match(line)
if match:
line = line.replace(match.group(0), '').rstrip()
if 'see' in match.group(1).lower():
# We've got a "see also" section
prefix = '@sa\t'
else:
# We've got an "exceptions" section
prefix = '@exception\t'
lines[-1], inCodeBlock = self._endCodeIfNeeded(
lines[-1], inCodeBlock)
inCodeBlockObj[0] = inCodeBlock
lines.append('#' + line)
continue
else:
match = AstWalker.__listRE.match(line)
if match and not inCodeBlock:
# We've got a list of something or another
itemList = []
for itemMatch in AstWalker.__listItemRE.findall(self._stripOutAnds(
match.group(0))):
itemList.append('# {0}\t{1}{2}'.format(
prefix, itemMatch, linesep))
line = ''.join(itemList)[1:]
else:
match = AstWalker.__examplesStartRE.match(line)
if match and lines[-1].strip() == '#' \
and self.options.autocode:
# We've got an "example" section
inCodeBlock = True
inCodeBlockObj[0] = True
line = line.replace(match.group(0),
' @b Examples{0}# @code'.format(linesep))
else:
match = AstWalker.__sectionStartRE.match(line)
if match:
# We've got an arbitrary section
prefix = ''
inSection = True
# What's the indentation of the section heading?
sectionHeadingIndent = len(line.expandtabs(self.options.tablength)) \
- len(line.expandtabs(self.options.tablength).lstrip())
line = line.replace(
match.group(0),
' @par {0}'.format(match.group(1))
)
if lines[-1] == '# @par':
lines[-1] = '#'
lines[-1], inCodeBlock = self._endCodeIfNeeded(
lines[-1], inCodeBlock)
inCodeBlockObj[0] = inCodeBlock
lines.append('#' + line)
continue
elif prefix:
match = AstWalker.__singleListItemRE.match(line)
if match and not inCodeBlock:
# Probably a single list item
line = ' {0}\t{1}'.format(
prefix, match.group(0))
elif self.options.autocode:
codeChecker.send(
(
line, lines,
lineNum - firstLineNum
)
)
inCodeBlock = inCodeBlockObj[0]
else:
if self.options.autocode:
codeChecker.send(
(
line, lines,
lineNum - firstLineNum
)
)
inCodeBlock = inCodeBlockObj[0]
# If we were passed a tail, append it to the docstring.
# Note that this means that we need a docstring for this
# item to get documented.
if tail and lineNum == len(self.docLines) - 1:
line = '{0}{1}# {2}'.format(line.rstrip(), linesep, tail)
# Add comment marker for every line.
line = '#{0}'.format(line.rstrip())
# Ensure the first line has the Doxygen double comment.
if lineNum == 0:
line = '#' + line
lines.append(line.replace(' ' + linesep, linesep))
else:
# If we get our sentinel value, send out what we've got.
timeToSend = True
if timeToSend:
lines[-1], inCodeBlock = self._endCodeIfNeeded(lines[-1],
inCodeBlock)
inCodeBlockObj[0] = inCodeBlock
writer.send((firstLineNum, lineNum, lines))
lines = []
firstLineNum = -1
timeToSend = False
@coroutine
def __writeDocstring(self):
"""
Runs eternally, dumping out docstring line batches as they get fed in.
Replaces original batches of docstring lines with modified versions
fed in via send.
"""
while True:
firstLineNum, lastLineNum, lines = (yield)
newDocstringLen = lastLineNum - firstLineNum + 1
while len(lines) < newDocstringLen:
lines.append('')
# Substitute the new block of lines for the original block of lines.
self.docLines[firstLineNum: lastLineNum + 1] = lines
def _processDocstring(self, node, tail='', **kwargs):
"""
Handles a docstring for functions, classes, and modules.
Basically just figures out the bounds of the docstring and sends it
off to the parser to do the actual work.
"""
typeName = type(node).__name__
# Modules don't have lineno defined, but it's always 0 for them.
curLineNum = startLineNum = 0
if typeName != 'Module':
startLineNum = curLineNum = node.lineno - 1
# Figure out where both our enclosing object and our docstring start.
line = ''
while curLineNum < len(self.lines):
line = self.lines[curLineNum]
match = AstWalker.__docstrMarkerRE.match(line)
if match:
break
curLineNum += 1
docstringStart = curLineNum
# Figure out where our docstring ends.
if not AstWalker.__docstrOneLineRE.match(line):
# Skip for the special case of a single-line docstring.
curLineNum += 1
while curLineNum < len(self.lines):
line = self.lines[curLineNum]
if line.find(match.group(2)) >= 0:
break
curLineNum += 1
endLineNum = curLineNum + 1
# Isolate our enclosing object's declaration.
defLines = self.lines[startLineNum: docstringStart]
# Isolate our docstring.
self.docLines = self.lines[docstringStart: endLineNum]
# If we have a docstring, extract information from it.
if self.docLines:
# Get rid of the docstring delineators.
self.docLines[0] = AstWalker.__docstrMarkerRE.sub('',
self.docLines[0])
self.docLines[-1] = AstWalker.__docstrMarkerRE.sub('',
self.docLines[-1])
# Handle special strings within the docstring.
docstringConverter = self.__alterDocstring(
tail, self.__writeDocstring())
for lineInfo in enumerate(self.docLines):
docstringConverter.send(lineInfo)
docstringConverter.send((len(self.docLines) - 1, None))
# Add a Doxygen @brief tag to any single-line description.
if self.options.autobrief:
safetyCounter = 0
while len(self.docLines) > 0 and self.docLines[0].lstrip('#').strip() == '':
del self.docLines[0]
self.docLines.append('')
safetyCounter += 1
if safetyCounter >= len(self.docLines):
# Escape the effectively empty docstring.
break
if len(self.docLines) == 1 or (len(self.docLines) >= 2 and (
self.docLines[1].strip(whitespace + '#') == '' or
self.docLines[1].strip(whitespace + '#').startswith('@'))):
self.docLines[0] = "## @brief {0}".format(self.docLines[0].lstrip('#'))
if len(self.docLines) > 1 and self.docLines[1] == '# @par':
self.docLines[1] = '#'
if defLines:
match = AstWalker.__indentRE.match(defLines[0])
indentStr = match and match.group(1) or ''
self.docLines = [AstWalker.__newlineRE.sub(indentStr + '#', docLine)
for docLine in self.docLines]
# Taking away a docstring from an interface method definition sometimes
# leaves broken code as the docstring may be the only code in it.
# Here we manually insert a pass statement to rectify this problem.
if typeName != 'Module':
if docstringStart < len(self.lines):
match = AstWalker.__indentRE.match(self.lines[docstringStart])
indentStr = match and match.group(1) or ''
else:
indentStr = ''
containingNodes = kwargs.get('containingNodes', []) or []
fullPathNamespace = self._getFullPathName(containingNodes)
parentType = fullPathNamespace[-2][1]
if parentType == 'interface' and typeName == 'FunctionDef' \
or fullPathNamespace[-1][1] == 'interface':
defLines[-1] = '{0}{1}{2}pass'.format(defLines[-1],
linesep, indentStr)
elif self.options.autobrief and typeName == 'ClassDef':
# If we're parsing docstrings separate out class attribute
# definitions to get better Doxygen output.
for firstVarLineNum, firstVarLine in enumerate(self.docLines):
if '@property\t' in firstVarLine:
break
lastVarLineNum = len(self.docLines)
if lastVarLineNum > 0 and '@property\t' in firstVarLine:
while lastVarLineNum > firstVarLineNum:
lastVarLineNum -= 1
if '@property\t' in self.docLines[lastVarLineNum]:
break
lastVarLineNum += 1
if firstVarLineNum < len(self.docLines):
indentLineNum = endLineNum
indentStr = ''
while not indentStr and indentLineNum < len(self.lines):
match = AstWalker.__indentRE.match(self.lines[indentLineNum])
indentStr = match and match.group(1) or ''
indentLineNum += 1
varLines = ['{0}{1}'.format(linesep, docLine).replace(
linesep, linesep + indentStr)
for docLine in self.docLines[
firstVarLineNum: lastVarLineNum]]
defLines.extend(varLines)
self.docLines[firstVarLineNum: lastVarLineNum] = []
# After the property shuffling we will need to relocate
# any existing namespace information.
namespaceLoc = defLines[-1].find('\n# @namespace')
if namespaceLoc >= 0:
self.docLines[-1] += defLines[-1][namespaceLoc:]
defLines[-1] = defLines[-1][:namespaceLoc]
# For classes and functions, apply our changes and reverse the
# order of the declaration and docstring, and for modules just
# apply our changes.
if typeName != 'Module':
self.lines[startLineNum: endLineNum] = self.docLines + defLines
else:
self.lines[startLineNum: endLineNum] = defLines + self.docLines
@staticmethod
def _checkMemberName(name):
"""
See if a member name indicates that it should be private.
Private variables in Python (starting with a double underscore but
not ending in a double underscore) and bed lumps (variables that
are not really private but are by common convention treated as
protected because they begin with a single underscore) get Doxygen
tags labeling them appropriately.
"""
assert isinstance(name, str)
restrictionLevel = None
if not name.endswith('__'):
if name.startswith('__'):
restrictionLevel = 'private'
elif name.startswith('_'):
restrictionLevel = 'protected'
return restrictionLevel
def _processMembers(self, node, contextTag):
"""
Mark up members if they should be private.
If the name indicates it should be private or protected, apply
the appropriate Doxygen tags.
"""
restrictionLevel = self._checkMemberName(node.name)
if restrictionLevel:
workTag = '{0}{1}# @{2}'.format(contextTag,
linesep,
restrictionLevel)
else:
workTag = contextTag
return workTag
def generic_visit(self, node, **kwargs):
"""
Extract useful information from relevant nodes including docstrings.
This is virtually identical to the standard version contained in
NodeVisitor. It is only overridden because we're tracking extra
information (the hierarchy of containing nodes) not preserved in
the original.
"""
for field, value in iter_fields(node):
if isinstance(value, list):
for item in value:
if isinstance(item, AST):
self.visit(item, containingNodes=kwargs['containingNodes'])
elif isinstance(value, AST):
self.visit(value, containingNodes=kwargs['containingNodes'])
def visit(self, node, **kwargs):
"""
Visit a node and extract useful information from it.
This is virtually identical to the standard version contained in
NodeVisitor. It is only overridden because we're tracking extra
information (the hierarchy of containing nodes) not preserved in
the original.
"""
containingNodes = kwargs.get('containingNodes', [])
method = 'visit_' + node.__class__.__name__
visitor = getattr(self, method, self.generic_visit)
return visitor(node, containingNodes=containingNodes)
def _getFullPathName(self, containingNodes):
"""
Returns the full node hierarchy rooted at module name.
The list representing the full path through containing nodes
(starting with the module itself) is returned.
"""
assert isinstance(containingNodes, list)
return [(self.options.fullPathNamespace, 'module')] + containingNodes
def visit_Module(self, node, **kwargs):
"""
Handles the module-level docstring.
Process the module-level docstring and create appropriate Doxygen tags
if autobrief option is set.
"""
containingNodes=kwargs.get('containingNodes', [])
if self.options.debug:
stderr.write("# Module {0}{1}".format(self.options.fullPathNamespace,
linesep))
if get_docstring(node):
if self.options.topLevelNamespace:
fullPathNamespace = self._getFullPathName(containingNodes)
contextTag = '.'.join(pathTuple[0] for pathTuple in fullPathNamespace)
tail = '@namespace {0}'.format(contextTag)
else:
tail = ''
self._processDocstring(node, tail)
# Visit any contained nodes (in this case pretty much everything).
self.generic_visit(node, containingNodes=containingNodes)
def visit_Assign(self, node, **kwargs):
"""
Handles assignments within code.
Variable assignments in Python are used to represent interface
attributes in addition to basic variables. If an assignment appears
to be an attribute, it gets labeled as such for Doxygen. If a variable
name uses Python mangling or is just a bed lump, it is labeled as
private for Doxygen.
"""
lineNum = node.lineno - 1
# Assignments have one Doxygen-significant special case:
# interface attributes.
match = AstWalker.__attributeRE.match(self.lines[lineNum])
if match:
self.lines[lineNum] = '{0}## @property {1}{2}{0}# {3}{2}' \
'{0}# @hideinitializer{2}{4}{2}'.format(
match.group(1),
match.group(2),
linesep,
match.group(3),
self.lines[lineNum].rstrip()
)
if self.options.debug:
stderr.write("# Attribute {0.id}{1}".format(node.targets[0],
linesep))
if isinstance(node.targets[0], Name):
match = AstWalker.__indentRE.match(self.lines[lineNum])
indentStr = match and match.group(1) or ''
restrictionLevel = self._checkMemberName(node.targets[0].id)
if restrictionLevel:
self.lines[lineNum] = '{0}## @var {1}{2}{0}' \
'# @hideinitializer{2}{0}# @{3}{2}{4}{2}'.format(
indentStr,
node.targets[0].id,
linesep,
restrictionLevel,
self.lines[lineNum].rstrip()
)
# Visit any contained nodes.
self.generic_visit(node, containingNodes=kwargs['containingNodes'])
def visit_Call(self, node, **kwargs):
"""
Handles function calls within code.
Function calls in Python are used to represent interface implementations
in addition to their normal use. If a call appears to mark an
implementation, it gets labeled as such for Doxygen.
"""
lineNum = node.lineno - 1
# Function calls have one Doxygen-significant special case: interface
# implementations.
match = AstWalker.__implementsRE.match(self.lines[lineNum])
if match:
self.lines[lineNum] = '{0}## @implements {1}{2}{0}{3}{2}'.format(
match.group(1), match.group(2), linesep,
self.lines[lineNum].rstrip())
if self.options.debug:
stderr.write("# Implements {0}{1}".format(match.group(1),
linesep))
# Visit any contained nodes.
self.generic_visit(node, containingNodes=kwargs['containingNodes'])
def visit_FunctionDef(self, node, **kwargs):
"""
Handles function definitions within code.
Process a function's docstring, keeping well aware of the function's
context and whether or not it's part of an interface definition.
"""
if self.options.debug:
stderr.write("# Function {0.name}{1}".format(node, linesep))
# Push either 'interface' or 'class' onto our containing nodes
# hierarchy so we can keep track of context. This will let us tell
# if a function is nested within another function or even if a class
# is nested within a function.
containingNodes = kwargs.get('containingNodes') or []
containingNodes.append((node.name, 'function'))
if self.options.topLevelNamespace:
fullPathNamespace = self._getFullPathName(containingNodes)
contextTag = '.'.join(pathTuple[0] for pathTuple in fullPathNamespace)
modifiedContextTag = self._processMembers(node, contextTag)
tail = '@namespace {0}'.format(modifiedContextTag)
else:
tail = self._processMembers(node, '')
if get_docstring(node):
self._processDocstring(node, tail,
containingNodes=containingNodes)
# Visit any contained nodes.
self.generic_visit(node, containingNodes=containingNodes)
# Remove the item we pushed onto the containing nodes hierarchy.
containingNodes.pop()
def visit_ClassDef(self, node, **kwargs):
"""
Handles class definitions within code.
Process the docstring. Note though that in Python Class definitions
are used to define interfaces in addition to classes.
If a class definition appears to be an interface definition tag it as an
interface definition for Doxygen. Otherwise tag it as a class
definition for Doxygen.
"""
lineNum = node.lineno - 1
# Push either 'interface' or 'class' onto our containing nodes
# hierarchy so we can keep track of context. This will let us tell
# if a function is a method or an interface method definition or if
# a class is fully contained within another class.
containingNodes = kwargs.get('containingNodes') or []
if not self.options.object_respect:
# Remove object class of the inherited class list to avoid that all
# new-style class inherits from object in the hierarchy class
line = self.lines[lineNum]
match = AstWalker.__classRE.match(line)
if match:
if match.group(2) == 'object':
self.lines[lineNum] = line[:match.start(2)] + line[match.end(2):]
match = AstWalker.__interfaceRE.match(self.lines[lineNum])
if match:
if self.options.debug:
stderr.write("# Interface {0.name}{1}".format(node, linesep))
containingNodes.append((node.name, 'interface'))
else:
if self.options.debug:
stderr.write("# Class {0.name}{1}".format(node, linesep))
containingNodes.append((node.name, 'class'))
if self.options.topLevelNamespace:
fullPathNamespace = self._getFullPathName(containingNodes)
contextTag = '.'.join(pathTuple[0] for pathTuple in fullPathNamespace)
tail = '@namespace {0}'.format(contextTag)
else:
tail = ''
# Class definitions have one Doxygen-significant special case:
# interface definitions.
if match:
contextTag = '{0}{1}# @interface {2}'.format(tail,
linesep,
match.group(1))
else:
contextTag = tail
contextTag = self._processMembers(node, contextTag)
if get_docstring(node):
self._processDocstring(node, contextTag,
containingNodes=containingNodes)
# Visit any contained nodes.
self.generic_visit(node, containingNodes=containingNodes)
# Remove the item we pushed onto the containing nodes hierarchy.
containingNodes.pop()
def parseLines(self):
"""Form an AST for the code and produce a new version of the source."""
inAst = parse(''.join(self.lines), self.inFilename)
# Visit all the nodes in our tree and apply Doxygen tags to the source.
self.visit(inAst)
def getLines(self):
"""Return the modified file once processing has been completed."""
return linesep.join(line.rstrip() for line in self.lines)
# See if we're running as a script.
if __name__ == "__main__":
main()
|
Feneric/doxypypy
|
doxypypy/doxypypy.py
|
AstWalker._endCodeIfNeeded
|
python
|
def _endCodeIfNeeded(line, inCodeBlock):
assert isinstance(line, str)
if inCodeBlock:
line = '# @endcode{0}{1}'.format(linesep, line.rstrip())
inCodeBlock = False
return line, inCodeBlock
|
Simple routine to append end code marker if needed.
|
train
|
https://github.com/Feneric/doxypypy/blob/a8555b15fa2a758ea8392372de31c0f635cc0d93/doxypypy/doxypypy.py#L112-L118
| null |
class AstWalker(NodeVisitor):
"""
A walker that'll recursively progress through an AST.
Given an abstract syntax tree for Python code, walk through all the
nodes looking for significant types (for our purposes we only care
about module starts, class definitions, function definitions, variable
assignments, and function calls, as all the information we want to pass
to Doxygen is found within these constructs). If the autobrief option
is set, it further attempts to parse docstrings to create appropriate
Doxygen tags.
"""
# We have a number of regular expressions that we use. They don't
# vary across instances and so are compiled directly in the class
# definition.
__indentRE = regexpCompile(r'^(\s*)\S')
__newlineRE = regexpCompile(r'^#', MULTILINE)
__blanklineRE = regexpCompile(r'^\s*$')
__docstrMarkerRE = regexpCompile(r"\s*([uUbB]*[rR]?(['\"]{3}))")
__docstrOneLineRE = regexpCompile(r"\s*[uUbB]*[rR]?(['\"]{3})(.+)\1")
__implementsRE = regexpCompile(r"^(\s*)(?:zope\.)?(?:interface\.)?"
r"(?:module|class|directly)?"
r"(?:Provides|Implements)\(\s*(.+)\s*\)",
IGNORECASE)
__classRE = regexpCompile(r"^\s*class\s+(\S+)\s*\((\S+)\):")
__interfaceRE = regexpCompile(r"^\s*class\s+(\S+)\s*\(\s*(?:zope\.)?"
r"(?:interface\.)?"
r"Interface\s*\)\s*:", IGNORECASE)
__attributeRE = regexpCompile(r"^(\s*)(\S+)\s*=\s*(?:zope\.)?"
r"(?:interface\.)?"
r"Attribute\s*\(['\"]{1,3}(.*)['\"]{1,3}\)",
IGNORECASE)
__singleLineREs = {
' @author: ': regexpCompile(r"^(\s*Authors?:\s*)(.*)$", IGNORECASE),
' @copyright ': regexpCompile(r"^(\s*Copyright:\s*)(.*)$", IGNORECASE),
' @date ': regexpCompile(r"^(\s*Date:\s*)(.*)$", IGNORECASE),
' @file ': regexpCompile(r"^(\s*File:\s*)(.*)$", IGNORECASE),
' @version: ': regexpCompile(r"^(\s*Version:\s*)(.*)$", IGNORECASE),
' @note ': regexpCompile(r"^(\s*Note:\s*)(.*)$", IGNORECASE),
' @warning ': regexpCompile(r"^(\s*Warning:\s*)(.*)$", IGNORECASE)
}
__argsStartRE = regexpCompile(r"^(\s*(?:(?:Keyword\s+)?"
r"(?:A|Kwa)rg(?:ument)?|Attribute)s?"
r"\s*:\s*)$", IGNORECASE)
__argsRE = regexpCompile(r"^\s*(?P<name>\w+)\s*(?P<type>\(?\S*\)?)?\s*"
r"(?:-|:)+\s+(?P<desc>.+)$")
__returnsStartRE = regexpCompile(r"^\s*(?:Return|Yield)s:\s*$", IGNORECASE)
__raisesStartRE = regexpCompile(r"^\s*(Raises|Exceptions|See Also):\s*$",
IGNORECASE)
__listRE = regexpCompile(r"^\s*(([\w\.]+),\s*)+(&|and)?\s*([\w\.]+)$")
__singleListItemRE = regexpCompile(r'^\s*([\w\.]+)\s*$')
__listItemRE = regexpCompile(r'([\w\.]+),?\s*')
__examplesStartRE = regexpCompile(r"^\s*(?:Example|Doctest)s?:\s*$",
IGNORECASE)
__sectionStartRE = regexpCompile(r"^\s*(([A-Z]\w* ?){1,2}):\s*$")
# The error line should match traceback lines, error exception lines, and
# (due to a weird behavior of codeop) single word lines.
__errorLineRE = regexpCompile(r"^\s*((?:\S+Error|Traceback.*):?\s*(.*)|@?[\w.]+)\s*$",
IGNORECASE)
def __init__(self, lines, options, inFilename):
"""Initialize a few class variables in preparation for our walk."""
self.lines = lines
self.options = options
self.inFilename = inFilename
self.docLines = []
@staticmethod
def _stripOutAnds(inStr):
"""Takes a string and returns the same without ands or ampersands."""
assert isinstance(inStr, str)
return inStr.replace(' and ', ' ').replace(' & ', ' ')
@staticmethod
@coroutine
def _checkIfCode(self, inCodeBlockObj):
"""Checks whether or not a given line appears to be Python code."""
while True:
line, lines, lineNum = (yield)
testLineNum = 1
currentLineNum = 0
testLine = line.strip()
lineOfCode = None
while lineOfCode is None:
match = AstWalker.__errorLineRE.match(testLine)
if not testLine or testLine == '...' or match:
# These are ambiguous.
line, lines, lineNum = (yield)
testLine = line.strip()
#testLineNum = 1
elif testLine.startswith('>>>'):
# This is definitely code.
lineOfCode = True
else:
try:
compLine = compile_command(testLine)
if compLine and lines[currentLineNum].strip().startswith('#'):
lineOfCode = True
else:
line, lines, lineNum = (yield)
line = line.strip()
if line.startswith('>>>'):
# Definitely code, don't compile further.
lineOfCode = True
else:
testLine += linesep + line
testLine = testLine.strip()
testLineNum += 1
except (SyntaxError, RuntimeError):
# This is definitely not code.
lineOfCode = False
except Exception:
# Other errors are ambiguous.
line, lines, lineNum = (yield)
testLine = line.strip()
#testLineNum = 1
currentLineNum = lineNum - testLineNum
if not inCodeBlockObj[0] and lineOfCode:
inCodeBlockObj[0] = True
lines[currentLineNum] = '{0}{1}# @code{1}'.format(
lines[currentLineNum],
linesep
)
elif inCodeBlockObj[0] and lineOfCode is False:
# None is ambiguous, so strict checking
# against False is necessary.
inCodeBlockObj[0] = False
lines[currentLineNum] = '{0}{1}# @endcode{1}'.format(
lines[currentLineNum],
linesep
)
@coroutine
def __alterDocstring(self, tail='', writer=None):
"""
Runs eternally, processing docstring lines.
Parses docstring lines as they get fed in via send, applies appropriate
Doxygen tags, and passes them along in batches for writing.
"""
assert isinstance(tail, str) and isinstance(writer, GeneratorType)
lines = []
timeToSend = False
inCodeBlock = False
inCodeBlockObj = [False]
inSection = False
prefix = ''
firstLineNum = -1
sectionHeadingIndent = 0
codeChecker = self._checkIfCode(inCodeBlockObj)
while True:
lineNum, line = (yield)
if firstLineNum < 0:
firstLineNum = lineNum
# Don't bother doing extra work if it's a sentinel.
if line is not None:
# Also limit work if we're not parsing the docstring.
if self.options.autobrief:
for doxyTag, tagRE in AstWalker.__singleLineREs.items():
match = tagRE.search(line)
if match:
# We've got a simple one-line Doxygen command
lines[-1], inCodeBlock = self._endCodeIfNeeded(
lines[-1], inCodeBlock)
inCodeBlockObj[0] = inCodeBlock
writer.send((firstLineNum, lineNum - 1, lines))
lines = []
firstLineNum = lineNum
line = line.replace(match.group(1), doxyTag)
timeToSend = True
if inSection:
# The last line belonged to a section.
# Does this one too? (Ignoring empty lines.)
match = AstWalker.__blanklineRE.match(line)
if not match:
indent = len(line.expandtabs(self.options.tablength)) - \
len(line.expandtabs(self.options.tablength).lstrip())
if indent <= sectionHeadingIndent:
inSection = False
else:
if lines[-1] == '#':
# If the last line was empty, but we're still in a section
# then we need to start a new paragraph.
lines[-1] = '# @par'
match = AstWalker.__returnsStartRE.match(line)
if match:
# We've got a "returns" section
lines[-1], inCodeBlock = self._endCodeIfNeeded(
lines[-1], inCodeBlock)
inCodeBlockObj[0] = inCodeBlock
line = line.replace(match.group(0), ' @return\t').rstrip()
prefix = '@return\t'
else:
match = AstWalker.__argsStartRE.match(line)
if match:
# We've got an "arguments" section
line = line.replace(match.group(0), '').rstrip()
if 'attr' in match.group(0).lower():
prefix = '@property\t'
else:
prefix = '@param\t'
lines[-1], inCodeBlock = self._endCodeIfNeeded(
lines[-1], inCodeBlock)
inCodeBlockObj[0] = inCodeBlock
lines.append('#' + line)
continue
else:
match = AstWalker.__argsRE.match(line)
if match and not inCodeBlock:
# We've got something that looks like an item /
# description pair.
if 'property' in prefix:
line = '# {0}\t{1[name]}{2}# {1[desc]}'.format(
prefix, match.groupdict(), linesep)
else:
line = ' {0}\t{1[name]}\t{1[desc]}'.format(
prefix, match.groupdict())
else:
match = AstWalker.__raisesStartRE.match(line)
if match:
line = line.replace(match.group(0), '').rstrip()
if 'see' in match.group(1).lower():
# We've got a "see also" section
prefix = '@sa\t'
else:
# We've got an "exceptions" section
prefix = '@exception\t'
lines[-1], inCodeBlock = self._endCodeIfNeeded(
lines[-1], inCodeBlock)
inCodeBlockObj[0] = inCodeBlock
lines.append('#' + line)
continue
else:
match = AstWalker.__listRE.match(line)
if match and not inCodeBlock:
# We've got a list of something or another
itemList = []
for itemMatch in AstWalker.__listItemRE.findall(self._stripOutAnds(
match.group(0))):
itemList.append('# {0}\t{1}{2}'.format(
prefix, itemMatch, linesep))
line = ''.join(itemList)[1:]
else:
match = AstWalker.__examplesStartRE.match(line)
if match and lines[-1].strip() == '#' \
and self.options.autocode:
# We've got an "example" section
inCodeBlock = True
inCodeBlockObj[0] = True
line = line.replace(match.group(0),
' @b Examples{0}# @code'.format(linesep))
else:
match = AstWalker.__sectionStartRE.match(line)
if match:
# We've got an arbitrary section
prefix = ''
inSection = True
# What's the indentation of the section heading?
sectionHeadingIndent = len(line.expandtabs(self.options.tablength)) \
- len(line.expandtabs(self.options.tablength).lstrip())
line = line.replace(
match.group(0),
' @par {0}'.format(match.group(1))
)
if lines[-1] == '# @par':
lines[-1] = '#'
lines[-1], inCodeBlock = self._endCodeIfNeeded(
lines[-1], inCodeBlock)
inCodeBlockObj[0] = inCodeBlock
lines.append('#' + line)
continue
elif prefix:
match = AstWalker.__singleListItemRE.match(line)
if match and not inCodeBlock:
# Probably a single list item
line = ' {0}\t{1}'.format(
prefix, match.group(0))
elif self.options.autocode:
codeChecker.send(
(
line, lines,
lineNum - firstLineNum
)
)
inCodeBlock = inCodeBlockObj[0]
else:
if self.options.autocode:
codeChecker.send(
(
line, lines,
lineNum - firstLineNum
)
)
inCodeBlock = inCodeBlockObj[0]
# If we were passed a tail, append it to the docstring.
# Note that this means that we need a docstring for this
# item to get documented.
if tail and lineNum == len(self.docLines) - 1:
line = '{0}{1}# {2}'.format(line.rstrip(), linesep, tail)
# Add comment marker for every line.
line = '#{0}'.format(line.rstrip())
# Ensure the first line has the Doxygen double comment.
if lineNum == 0:
line = '#' + line
lines.append(line.replace(' ' + linesep, linesep))
else:
# If we get our sentinel value, send out what we've got.
timeToSend = True
if timeToSend:
lines[-1], inCodeBlock = self._endCodeIfNeeded(lines[-1],
inCodeBlock)
inCodeBlockObj[0] = inCodeBlock
writer.send((firstLineNum, lineNum, lines))
lines = []
firstLineNum = -1
timeToSend = False
@coroutine
def __writeDocstring(self):
"""
Runs eternally, dumping out docstring line batches as they get fed in.
Replaces original batches of docstring lines with modified versions
fed in via send.
"""
while True:
firstLineNum, lastLineNum, lines = (yield)
newDocstringLen = lastLineNum - firstLineNum + 1
while len(lines) < newDocstringLen:
lines.append('')
# Substitute the new block of lines for the original block of lines.
self.docLines[firstLineNum: lastLineNum + 1] = lines
def _processDocstring(self, node, tail='', **kwargs):
"""
Handles a docstring for functions, classes, and modules.
Basically just figures out the bounds of the docstring and sends it
off to the parser to do the actual work.
"""
typeName = type(node).__name__
# Modules don't have lineno defined, but it's always 0 for them.
curLineNum = startLineNum = 0
if typeName != 'Module':
startLineNum = curLineNum = node.lineno - 1
# Figure out where both our enclosing object and our docstring start.
line = ''
while curLineNum < len(self.lines):
line = self.lines[curLineNum]
match = AstWalker.__docstrMarkerRE.match(line)
if match:
break
curLineNum += 1
docstringStart = curLineNum
# Figure out where our docstring ends.
if not AstWalker.__docstrOneLineRE.match(line):
# Skip for the special case of a single-line docstring.
curLineNum += 1
while curLineNum < len(self.lines):
line = self.lines[curLineNum]
if line.find(match.group(2)) >= 0:
break
curLineNum += 1
endLineNum = curLineNum + 1
# Isolate our enclosing object's declaration.
defLines = self.lines[startLineNum: docstringStart]
# Isolate our docstring.
self.docLines = self.lines[docstringStart: endLineNum]
# If we have a docstring, extract information from it.
if self.docLines:
# Get rid of the docstring delineators.
self.docLines[0] = AstWalker.__docstrMarkerRE.sub('',
self.docLines[0])
self.docLines[-1] = AstWalker.__docstrMarkerRE.sub('',
self.docLines[-1])
# Handle special strings within the docstring.
docstringConverter = self.__alterDocstring(
tail, self.__writeDocstring())
for lineInfo in enumerate(self.docLines):
docstringConverter.send(lineInfo)
docstringConverter.send((len(self.docLines) - 1, None))
# Add a Doxygen @brief tag to any single-line description.
if self.options.autobrief:
safetyCounter = 0
while len(self.docLines) > 0 and self.docLines[0].lstrip('#').strip() == '':
del self.docLines[0]
self.docLines.append('')
safetyCounter += 1
if safetyCounter >= len(self.docLines):
# Escape the effectively empty docstring.
break
if len(self.docLines) == 1 or (len(self.docLines) >= 2 and (
self.docLines[1].strip(whitespace + '#') == '' or
self.docLines[1].strip(whitespace + '#').startswith('@'))):
self.docLines[0] = "## @brief {0}".format(self.docLines[0].lstrip('#'))
if len(self.docLines) > 1 and self.docLines[1] == '# @par':
self.docLines[1] = '#'
if defLines:
match = AstWalker.__indentRE.match(defLines[0])
indentStr = match and match.group(1) or ''
self.docLines = [AstWalker.__newlineRE.sub(indentStr + '#', docLine)
for docLine in self.docLines]
# Taking away a docstring from an interface method definition sometimes
# leaves broken code as the docstring may be the only code in it.
# Here we manually insert a pass statement to rectify this problem.
if typeName != 'Module':
if docstringStart < len(self.lines):
match = AstWalker.__indentRE.match(self.lines[docstringStart])
indentStr = match and match.group(1) or ''
else:
indentStr = ''
containingNodes = kwargs.get('containingNodes', []) or []
fullPathNamespace = self._getFullPathName(containingNodes)
parentType = fullPathNamespace[-2][1]
if parentType == 'interface' and typeName == 'FunctionDef' \
or fullPathNamespace[-1][1] == 'interface':
defLines[-1] = '{0}{1}{2}pass'.format(defLines[-1],
linesep, indentStr)
elif self.options.autobrief and typeName == 'ClassDef':
# If we're parsing docstrings separate out class attribute
# definitions to get better Doxygen output.
for firstVarLineNum, firstVarLine in enumerate(self.docLines):
if '@property\t' in firstVarLine:
break
lastVarLineNum = len(self.docLines)
if lastVarLineNum > 0 and '@property\t' in firstVarLine:
while lastVarLineNum > firstVarLineNum:
lastVarLineNum -= 1
if '@property\t' in self.docLines[lastVarLineNum]:
break
lastVarLineNum += 1
if firstVarLineNum < len(self.docLines):
indentLineNum = endLineNum
indentStr = ''
while not indentStr and indentLineNum < len(self.lines):
match = AstWalker.__indentRE.match(self.lines[indentLineNum])
indentStr = match and match.group(1) or ''
indentLineNum += 1
varLines = ['{0}{1}'.format(linesep, docLine).replace(
linesep, linesep + indentStr)
for docLine in self.docLines[
firstVarLineNum: lastVarLineNum]]
defLines.extend(varLines)
self.docLines[firstVarLineNum: lastVarLineNum] = []
# After the property shuffling we will need to relocate
# any existing namespace information.
namespaceLoc = defLines[-1].find('\n# @namespace')
if namespaceLoc >= 0:
self.docLines[-1] += defLines[-1][namespaceLoc:]
defLines[-1] = defLines[-1][:namespaceLoc]
# For classes and functions, apply our changes and reverse the
# order of the declaration and docstring, and for modules just
# apply our changes.
if typeName != 'Module':
self.lines[startLineNum: endLineNum] = self.docLines + defLines
else:
self.lines[startLineNum: endLineNum] = defLines + self.docLines
@staticmethod
def _checkMemberName(name):
"""
See if a member name indicates that it should be private.
Private variables in Python (starting with a double underscore but
not ending in a double underscore) and bed lumps (variables that
are not really private but are by common convention treated as
protected because they begin with a single underscore) get Doxygen
tags labeling them appropriately.
"""
assert isinstance(name, str)
restrictionLevel = None
if not name.endswith('__'):
if name.startswith('__'):
restrictionLevel = 'private'
elif name.startswith('_'):
restrictionLevel = 'protected'
return restrictionLevel
def _processMembers(self, node, contextTag):
"""
Mark up members if they should be private.
If the name indicates it should be private or protected, apply
the appropriate Doxygen tags.
"""
restrictionLevel = self._checkMemberName(node.name)
if restrictionLevel:
workTag = '{0}{1}# @{2}'.format(contextTag,
linesep,
restrictionLevel)
else:
workTag = contextTag
return workTag
def generic_visit(self, node, **kwargs):
"""
Extract useful information from relevant nodes including docstrings.
This is virtually identical to the standard version contained in
NodeVisitor. It is only overridden because we're tracking extra
information (the hierarchy of containing nodes) not preserved in
the original.
"""
for field, value in iter_fields(node):
if isinstance(value, list):
for item in value:
if isinstance(item, AST):
self.visit(item, containingNodes=kwargs['containingNodes'])
elif isinstance(value, AST):
self.visit(value, containingNodes=kwargs['containingNodes'])
def visit(self, node, **kwargs):
"""
Visit a node and extract useful information from it.
This is virtually identical to the standard version contained in
NodeVisitor. It is only overridden because we're tracking extra
information (the hierarchy of containing nodes) not preserved in
the original.
"""
containingNodes = kwargs.get('containingNodes', [])
method = 'visit_' + node.__class__.__name__
visitor = getattr(self, method, self.generic_visit)
return visitor(node, containingNodes=containingNodes)
def _getFullPathName(self, containingNodes):
"""
Returns the full node hierarchy rooted at module name.
The list representing the full path through containing nodes
(starting with the module itself) is returned.
"""
assert isinstance(containingNodes, list)
return [(self.options.fullPathNamespace, 'module')] + containingNodes
def visit_Module(self, node, **kwargs):
"""
Handles the module-level docstring.
Process the module-level docstring and create appropriate Doxygen tags
if autobrief option is set.
"""
containingNodes=kwargs.get('containingNodes', [])
if self.options.debug:
stderr.write("# Module {0}{1}".format(self.options.fullPathNamespace,
linesep))
if get_docstring(node):
if self.options.topLevelNamespace:
fullPathNamespace = self._getFullPathName(containingNodes)
contextTag = '.'.join(pathTuple[0] for pathTuple in fullPathNamespace)
tail = '@namespace {0}'.format(contextTag)
else:
tail = ''
self._processDocstring(node, tail)
# Visit any contained nodes (in this case pretty much everything).
self.generic_visit(node, containingNodes=containingNodes)
def visit_Assign(self, node, **kwargs):
"""
Handles assignments within code.
Variable assignments in Python are used to represent interface
attributes in addition to basic variables. If an assignment appears
to be an attribute, it gets labeled as such for Doxygen. If a variable
name uses Python mangling or is just a bed lump, it is labeled as
private for Doxygen.
"""
lineNum = node.lineno - 1
# Assignments have one Doxygen-significant special case:
# interface attributes.
match = AstWalker.__attributeRE.match(self.lines[lineNum])
if match:
self.lines[lineNum] = '{0}## @property {1}{2}{0}# {3}{2}' \
'{0}# @hideinitializer{2}{4}{2}'.format(
match.group(1),
match.group(2),
linesep,
match.group(3),
self.lines[lineNum].rstrip()
)
if self.options.debug:
stderr.write("# Attribute {0.id}{1}".format(node.targets[0],
linesep))
if isinstance(node.targets[0], Name):
match = AstWalker.__indentRE.match(self.lines[lineNum])
indentStr = match and match.group(1) or ''
restrictionLevel = self._checkMemberName(node.targets[0].id)
if restrictionLevel:
self.lines[lineNum] = '{0}## @var {1}{2}{0}' \
'# @hideinitializer{2}{0}# @{3}{2}{4}{2}'.format(
indentStr,
node.targets[0].id,
linesep,
restrictionLevel,
self.lines[lineNum].rstrip()
)
# Visit any contained nodes.
self.generic_visit(node, containingNodes=kwargs['containingNodes'])
def visit_Call(self, node, **kwargs):
"""
Handles function calls within code.
Function calls in Python are used to represent interface implementations
in addition to their normal use. If a call appears to mark an
implementation, it gets labeled as such for Doxygen.
"""
lineNum = node.lineno - 1
# Function calls have one Doxygen-significant special case: interface
# implementations.
match = AstWalker.__implementsRE.match(self.lines[lineNum])
if match:
self.lines[lineNum] = '{0}## @implements {1}{2}{0}{3}{2}'.format(
match.group(1), match.group(2), linesep,
self.lines[lineNum].rstrip())
if self.options.debug:
stderr.write("# Implements {0}{1}".format(match.group(1),
linesep))
# Visit any contained nodes.
self.generic_visit(node, containingNodes=kwargs['containingNodes'])
def visit_FunctionDef(self, node, **kwargs):
"""
Handles function definitions within code.
Process a function's docstring, keeping well aware of the function's
context and whether or not it's part of an interface definition.
"""
if self.options.debug:
stderr.write("# Function {0.name}{1}".format(node, linesep))
# Push either 'interface' or 'class' onto our containing nodes
# hierarchy so we can keep track of context. This will let us tell
# if a function is nested within another function or even if a class
# is nested within a function.
containingNodes = kwargs.get('containingNodes') or []
containingNodes.append((node.name, 'function'))
if self.options.topLevelNamespace:
fullPathNamespace = self._getFullPathName(containingNodes)
contextTag = '.'.join(pathTuple[0] for pathTuple in fullPathNamespace)
modifiedContextTag = self._processMembers(node, contextTag)
tail = '@namespace {0}'.format(modifiedContextTag)
else:
tail = self._processMembers(node, '')
if get_docstring(node):
self._processDocstring(node, tail,
containingNodes=containingNodes)
# Visit any contained nodes.
self.generic_visit(node, containingNodes=containingNodes)
# Remove the item we pushed onto the containing nodes hierarchy.
containingNodes.pop()
def visit_ClassDef(self, node, **kwargs):
"""
Handles class definitions within code.
Process the docstring. Note though that in Python Class definitions
are used to define interfaces in addition to classes.
If a class definition appears to be an interface definition tag it as an
interface definition for Doxygen. Otherwise tag it as a class
definition for Doxygen.
"""
lineNum = node.lineno - 1
# Push either 'interface' or 'class' onto our containing nodes
# hierarchy so we can keep track of context. This will let us tell
# if a function is a method or an interface method definition or if
# a class is fully contained within another class.
containingNodes = kwargs.get('containingNodes') or []
if not self.options.object_respect:
# Remove object class of the inherited class list to avoid that all
# new-style class inherits from object in the hierarchy class
line = self.lines[lineNum]
match = AstWalker.__classRE.match(line)
if match:
if match.group(2) == 'object':
self.lines[lineNum] = line[:match.start(2)] + line[match.end(2):]
match = AstWalker.__interfaceRE.match(self.lines[lineNum])
if match:
if self.options.debug:
stderr.write("# Interface {0.name}{1}".format(node, linesep))
containingNodes.append((node.name, 'interface'))
else:
if self.options.debug:
stderr.write("# Class {0.name}{1}".format(node, linesep))
containingNodes.append((node.name, 'class'))
if self.options.topLevelNamespace:
fullPathNamespace = self._getFullPathName(containingNodes)
contextTag = '.'.join(pathTuple[0] for pathTuple in fullPathNamespace)
tail = '@namespace {0}'.format(contextTag)
else:
tail = ''
# Class definitions have one Doxygen-significant special case:
# interface definitions.
if match:
contextTag = '{0}{1}# @interface {2}'.format(tail,
linesep,
match.group(1))
else:
contextTag = tail
contextTag = self._processMembers(node, contextTag)
if get_docstring(node):
self._processDocstring(node, contextTag,
containingNodes=containingNodes)
# Visit any contained nodes.
self.generic_visit(node, containingNodes=containingNodes)
# Remove the item we pushed onto the containing nodes hierarchy.
containingNodes.pop()
def parseLines(self):
"""Form an AST for the code and produce a new version of the source."""
inAst = parse(''.join(self.lines), self.inFilename)
# Visit all the nodes in our tree and apply Doxygen tags to the source.
self.visit(inAst)
def getLines(self):
"""Return the modified file once processing has been completed."""
return linesep.join(line.rstrip() for line in self.lines)
|
Feneric/doxypypy
|
doxypypy/doxypypy.py
|
AstWalker._checkIfCode
|
python
|
def _checkIfCode(self, inCodeBlockObj):
while True:
line, lines, lineNum = (yield)
testLineNum = 1
currentLineNum = 0
testLine = line.strip()
lineOfCode = None
while lineOfCode is None:
match = AstWalker.__errorLineRE.match(testLine)
if not testLine or testLine == '...' or match:
# These are ambiguous.
line, lines, lineNum = (yield)
testLine = line.strip()
#testLineNum = 1
elif testLine.startswith('>>>'):
# This is definitely code.
lineOfCode = True
else:
try:
compLine = compile_command(testLine)
if compLine and lines[currentLineNum].strip().startswith('#'):
lineOfCode = True
else:
line, lines, lineNum = (yield)
line = line.strip()
if line.startswith('>>>'):
# Definitely code, don't compile further.
lineOfCode = True
else:
testLine += linesep + line
testLine = testLine.strip()
testLineNum += 1
except (SyntaxError, RuntimeError):
# This is definitely not code.
lineOfCode = False
except Exception:
# Other errors are ambiguous.
line, lines, lineNum = (yield)
testLine = line.strip()
#testLineNum = 1
currentLineNum = lineNum - testLineNum
if not inCodeBlockObj[0] and lineOfCode:
inCodeBlockObj[0] = True
lines[currentLineNum] = '{0}{1}# @code{1}'.format(
lines[currentLineNum],
linesep
)
elif inCodeBlockObj[0] and lineOfCode is False:
# None is ambiguous, so strict checking
# against False is necessary.
inCodeBlockObj[0] = False
lines[currentLineNum] = '{0}{1}# @endcode{1}'.format(
lines[currentLineNum],
linesep
)
|
Checks whether or not a given line appears to be Python code.
|
train
|
https://github.com/Feneric/doxypypy/blob/a8555b15fa2a758ea8392372de31c0f635cc0d93/doxypypy/doxypypy.py#L121-L176
| null |
class AstWalker(NodeVisitor):
"""
A walker that'll recursively progress through an AST.
Given an abstract syntax tree for Python code, walk through all the
nodes looking for significant types (for our purposes we only care
about module starts, class definitions, function definitions, variable
assignments, and function calls, as all the information we want to pass
to Doxygen is found within these constructs). If the autobrief option
is set, it further attempts to parse docstrings to create appropriate
Doxygen tags.
"""
# We have a number of regular expressions that we use. They don't
# vary across instances and so are compiled directly in the class
# definition.
__indentRE = regexpCompile(r'^(\s*)\S')
__newlineRE = regexpCompile(r'^#', MULTILINE)
__blanklineRE = regexpCompile(r'^\s*$')
__docstrMarkerRE = regexpCompile(r"\s*([uUbB]*[rR]?(['\"]{3}))")
__docstrOneLineRE = regexpCompile(r"\s*[uUbB]*[rR]?(['\"]{3})(.+)\1")
__implementsRE = regexpCompile(r"^(\s*)(?:zope\.)?(?:interface\.)?"
r"(?:module|class|directly)?"
r"(?:Provides|Implements)\(\s*(.+)\s*\)",
IGNORECASE)
__classRE = regexpCompile(r"^\s*class\s+(\S+)\s*\((\S+)\):")
__interfaceRE = regexpCompile(r"^\s*class\s+(\S+)\s*\(\s*(?:zope\.)?"
r"(?:interface\.)?"
r"Interface\s*\)\s*:", IGNORECASE)
__attributeRE = regexpCompile(r"^(\s*)(\S+)\s*=\s*(?:zope\.)?"
r"(?:interface\.)?"
r"Attribute\s*\(['\"]{1,3}(.*)['\"]{1,3}\)",
IGNORECASE)
__singleLineREs = {
' @author: ': regexpCompile(r"^(\s*Authors?:\s*)(.*)$", IGNORECASE),
' @copyright ': regexpCompile(r"^(\s*Copyright:\s*)(.*)$", IGNORECASE),
' @date ': regexpCompile(r"^(\s*Date:\s*)(.*)$", IGNORECASE),
' @file ': regexpCompile(r"^(\s*File:\s*)(.*)$", IGNORECASE),
' @version: ': regexpCompile(r"^(\s*Version:\s*)(.*)$", IGNORECASE),
' @note ': regexpCompile(r"^(\s*Note:\s*)(.*)$", IGNORECASE),
' @warning ': regexpCompile(r"^(\s*Warning:\s*)(.*)$", IGNORECASE)
}
__argsStartRE = regexpCompile(r"^(\s*(?:(?:Keyword\s+)?"
r"(?:A|Kwa)rg(?:ument)?|Attribute)s?"
r"\s*:\s*)$", IGNORECASE)
__argsRE = regexpCompile(r"^\s*(?P<name>\w+)\s*(?P<type>\(?\S*\)?)?\s*"
r"(?:-|:)+\s+(?P<desc>.+)$")
__returnsStartRE = regexpCompile(r"^\s*(?:Return|Yield)s:\s*$", IGNORECASE)
__raisesStartRE = regexpCompile(r"^\s*(Raises|Exceptions|See Also):\s*$",
IGNORECASE)
__listRE = regexpCompile(r"^\s*(([\w\.]+),\s*)+(&|and)?\s*([\w\.]+)$")
__singleListItemRE = regexpCompile(r'^\s*([\w\.]+)\s*$')
__listItemRE = regexpCompile(r'([\w\.]+),?\s*')
__examplesStartRE = regexpCompile(r"^\s*(?:Example|Doctest)s?:\s*$",
IGNORECASE)
__sectionStartRE = regexpCompile(r"^\s*(([A-Z]\w* ?){1,2}):\s*$")
# The error line should match traceback lines, error exception lines, and
# (due to a weird behavior of codeop) single word lines.
__errorLineRE = regexpCompile(r"^\s*((?:\S+Error|Traceback.*):?\s*(.*)|@?[\w.]+)\s*$",
IGNORECASE)
def __init__(self, lines, options, inFilename):
"""Initialize a few class variables in preparation for our walk."""
self.lines = lines
self.options = options
self.inFilename = inFilename
self.docLines = []
@staticmethod
def _stripOutAnds(inStr):
"""Takes a string and returns the same without ands or ampersands."""
assert isinstance(inStr, str)
return inStr.replace(' and ', ' ').replace(' & ', ' ')
@staticmethod
def _endCodeIfNeeded(line, inCodeBlock):
"""Simple routine to append end code marker if needed."""
assert isinstance(line, str)
if inCodeBlock:
line = '# @endcode{0}{1}'.format(linesep, line.rstrip())
inCodeBlock = False
return line, inCodeBlock
@coroutine
@coroutine
def __alterDocstring(self, tail='', writer=None):
"""
Runs eternally, processing docstring lines.
Parses docstring lines as they get fed in via send, applies appropriate
Doxygen tags, and passes them along in batches for writing.
"""
assert isinstance(tail, str) and isinstance(writer, GeneratorType)
lines = []
timeToSend = False
inCodeBlock = False
inCodeBlockObj = [False]
inSection = False
prefix = ''
firstLineNum = -1
sectionHeadingIndent = 0
codeChecker = self._checkIfCode(inCodeBlockObj)
while True:
lineNum, line = (yield)
if firstLineNum < 0:
firstLineNum = lineNum
# Don't bother doing extra work if it's a sentinel.
if line is not None:
# Also limit work if we're not parsing the docstring.
if self.options.autobrief:
for doxyTag, tagRE in AstWalker.__singleLineREs.items():
match = tagRE.search(line)
if match:
# We've got a simple one-line Doxygen command
lines[-1], inCodeBlock = self._endCodeIfNeeded(
lines[-1], inCodeBlock)
inCodeBlockObj[0] = inCodeBlock
writer.send((firstLineNum, lineNum - 1, lines))
lines = []
firstLineNum = lineNum
line = line.replace(match.group(1), doxyTag)
timeToSend = True
if inSection:
# The last line belonged to a section.
# Does this one too? (Ignoring empty lines.)
match = AstWalker.__blanklineRE.match(line)
if not match:
indent = len(line.expandtabs(self.options.tablength)) - \
len(line.expandtabs(self.options.tablength).lstrip())
if indent <= sectionHeadingIndent:
inSection = False
else:
if lines[-1] == '#':
# If the last line was empty, but we're still in a section
# then we need to start a new paragraph.
lines[-1] = '# @par'
match = AstWalker.__returnsStartRE.match(line)
if match:
# We've got a "returns" section
lines[-1], inCodeBlock = self._endCodeIfNeeded(
lines[-1], inCodeBlock)
inCodeBlockObj[0] = inCodeBlock
line = line.replace(match.group(0), ' @return\t').rstrip()
prefix = '@return\t'
else:
match = AstWalker.__argsStartRE.match(line)
if match:
# We've got an "arguments" section
line = line.replace(match.group(0), '').rstrip()
if 'attr' in match.group(0).lower():
prefix = '@property\t'
else:
prefix = '@param\t'
lines[-1], inCodeBlock = self._endCodeIfNeeded(
lines[-1], inCodeBlock)
inCodeBlockObj[0] = inCodeBlock
lines.append('#' + line)
continue
else:
match = AstWalker.__argsRE.match(line)
if match and not inCodeBlock:
# We've got something that looks like an item /
# description pair.
if 'property' in prefix:
line = '# {0}\t{1[name]}{2}# {1[desc]}'.format(
prefix, match.groupdict(), linesep)
else:
line = ' {0}\t{1[name]}\t{1[desc]}'.format(
prefix, match.groupdict())
else:
match = AstWalker.__raisesStartRE.match(line)
if match:
line = line.replace(match.group(0), '').rstrip()
if 'see' in match.group(1).lower():
# We've got a "see also" section
prefix = '@sa\t'
else:
# We've got an "exceptions" section
prefix = '@exception\t'
lines[-1], inCodeBlock = self._endCodeIfNeeded(
lines[-1], inCodeBlock)
inCodeBlockObj[0] = inCodeBlock
lines.append('#' + line)
continue
else:
match = AstWalker.__listRE.match(line)
if match and not inCodeBlock:
# We've got a list of something or another
itemList = []
for itemMatch in AstWalker.__listItemRE.findall(self._stripOutAnds(
match.group(0))):
itemList.append('# {0}\t{1}{2}'.format(
prefix, itemMatch, linesep))
line = ''.join(itemList)[1:]
else:
match = AstWalker.__examplesStartRE.match(line)
if match and lines[-1].strip() == '#' \
and self.options.autocode:
# We've got an "example" section
inCodeBlock = True
inCodeBlockObj[0] = True
line = line.replace(match.group(0),
' @b Examples{0}# @code'.format(linesep))
else:
match = AstWalker.__sectionStartRE.match(line)
if match:
# We've got an arbitrary section
prefix = ''
inSection = True
# What's the indentation of the section heading?
sectionHeadingIndent = len(line.expandtabs(self.options.tablength)) \
- len(line.expandtabs(self.options.tablength).lstrip())
line = line.replace(
match.group(0),
' @par {0}'.format(match.group(1))
)
if lines[-1] == '# @par':
lines[-1] = '#'
lines[-1], inCodeBlock = self._endCodeIfNeeded(
lines[-1], inCodeBlock)
inCodeBlockObj[0] = inCodeBlock
lines.append('#' + line)
continue
elif prefix:
match = AstWalker.__singleListItemRE.match(line)
if match and not inCodeBlock:
# Probably a single list item
line = ' {0}\t{1}'.format(
prefix, match.group(0))
elif self.options.autocode:
codeChecker.send(
(
line, lines,
lineNum - firstLineNum
)
)
inCodeBlock = inCodeBlockObj[0]
else:
if self.options.autocode:
codeChecker.send(
(
line, lines,
lineNum - firstLineNum
)
)
inCodeBlock = inCodeBlockObj[0]
# If we were passed a tail, append it to the docstring.
# Note that this means that we need a docstring for this
# item to get documented.
if tail and lineNum == len(self.docLines) - 1:
line = '{0}{1}# {2}'.format(line.rstrip(), linesep, tail)
# Add comment marker for every line.
line = '#{0}'.format(line.rstrip())
# Ensure the first line has the Doxygen double comment.
if lineNum == 0:
line = '#' + line
lines.append(line.replace(' ' + linesep, linesep))
else:
# If we get our sentinel value, send out what we've got.
timeToSend = True
if timeToSend:
lines[-1], inCodeBlock = self._endCodeIfNeeded(lines[-1],
inCodeBlock)
inCodeBlockObj[0] = inCodeBlock
writer.send((firstLineNum, lineNum, lines))
lines = []
firstLineNum = -1
timeToSend = False
@coroutine
def __writeDocstring(self):
"""
Runs eternally, dumping out docstring line batches as they get fed in.
Replaces original batches of docstring lines with modified versions
fed in via send.
"""
while True:
firstLineNum, lastLineNum, lines = (yield)
newDocstringLen = lastLineNum - firstLineNum + 1
while len(lines) < newDocstringLen:
lines.append('')
# Substitute the new block of lines for the original block of lines.
self.docLines[firstLineNum: lastLineNum + 1] = lines
def _processDocstring(self, node, tail='', **kwargs):
"""
Handles a docstring for functions, classes, and modules.
Basically just figures out the bounds of the docstring and sends it
off to the parser to do the actual work.
"""
typeName = type(node).__name__
# Modules don't have lineno defined, but it's always 0 for them.
curLineNum = startLineNum = 0
if typeName != 'Module':
startLineNum = curLineNum = node.lineno - 1
# Figure out where both our enclosing object and our docstring start.
line = ''
while curLineNum < len(self.lines):
line = self.lines[curLineNum]
match = AstWalker.__docstrMarkerRE.match(line)
if match:
break
curLineNum += 1
docstringStart = curLineNum
# Figure out where our docstring ends.
if not AstWalker.__docstrOneLineRE.match(line):
# Skip for the special case of a single-line docstring.
curLineNum += 1
while curLineNum < len(self.lines):
line = self.lines[curLineNum]
if line.find(match.group(2)) >= 0:
break
curLineNum += 1
endLineNum = curLineNum + 1
# Isolate our enclosing object's declaration.
defLines = self.lines[startLineNum: docstringStart]
# Isolate our docstring.
self.docLines = self.lines[docstringStart: endLineNum]
# If we have a docstring, extract information from it.
if self.docLines:
# Get rid of the docstring delineators.
self.docLines[0] = AstWalker.__docstrMarkerRE.sub('',
self.docLines[0])
self.docLines[-1] = AstWalker.__docstrMarkerRE.sub('',
self.docLines[-1])
# Handle special strings within the docstring.
docstringConverter = self.__alterDocstring(
tail, self.__writeDocstring())
for lineInfo in enumerate(self.docLines):
docstringConverter.send(lineInfo)
docstringConverter.send((len(self.docLines) - 1, None))
# Add a Doxygen @brief tag to any single-line description.
if self.options.autobrief:
safetyCounter = 0
while len(self.docLines) > 0 and self.docLines[0].lstrip('#').strip() == '':
del self.docLines[0]
self.docLines.append('')
safetyCounter += 1
if safetyCounter >= len(self.docLines):
# Escape the effectively empty docstring.
break
if len(self.docLines) == 1 or (len(self.docLines) >= 2 and (
self.docLines[1].strip(whitespace + '#') == '' or
self.docLines[1].strip(whitespace + '#').startswith('@'))):
self.docLines[0] = "## @brief {0}".format(self.docLines[0].lstrip('#'))
if len(self.docLines) > 1 and self.docLines[1] == '# @par':
self.docLines[1] = '#'
if defLines:
match = AstWalker.__indentRE.match(defLines[0])
indentStr = match and match.group(1) or ''
self.docLines = [AstWalker.__newlineRE.sub(indentStr + '#', docLine)
for docLine in self.docLines]
# Taking away a docstring from an interface method definition sometimes
# leaves broken code as the docstring may be the only code in it.
# Here we manually insert a pass statement to rectify this problem.
if typeName != 'Module':
if docstringStart < len(self.lines):
match = AstWalker.__indentRE.match(self.lines[docstringStart])
indentStr = match and match.group(1) or ''
else:
indentStr = ''
containingNodes = kwargs.get('containingNodes', []) or []
fullPathNamespace = self._getFullPathName(containingNodes)
parentType = fullPathNamespace[-2][1]
if parentType == 'interface' and typeName == 'FunctionDef' \
or fullPathNamespace[-1][1] == 'interface':
defLines[-1] = '{0}{1}{2}pass'.format(defLines[-1],
linesep, indentStr)
elif self.options.autobrief and typeName == 'ClassDef':
# If we're parsing docstrings separate out class attribute
# definitions to get better Doxygen output.
for firstVarLineNum, firstVarLine in enumerate(self.docLines):
if '@property\t' in firstVarLine:
break
lastVarLineNum = len(self.docLines)
if lastVarLineNum > 0 and '@property\t' in firstVarLine:
while lastVarLineNum > firstVarLineNum:
lastVarLineNum -= 1
if '@property\t' in self.docLines[lastVarLineNum]:
break
lastVarLineNum += 1
if firstVarLineNum < len(self.docLines):
indentLineNum = endLineNum
indentStr = ''
while not indentStr and indentLineNum < len(self.lines):
match = AstWalker.__indentRE.match(self.lines[indentLineNum])
indentStr = match and match.group(1) or ''
indentLineNum += 1
varLines = ['{0}{1}'.format(linesep, docLine).replace(
linesep, linesep + indentStr)
for docLine in self.docLines[
firstVarLineNum: lastVarLineNum]]
defLines.extend(varLines)
self.docLines[firstVarLineNum: lastVarLineNum] = []
# After the property shuffling we will need to relocate
# any existing namespace information.
namespaceLoc = defLines[-1].find('\n# @namespace')
if namespaceLoc >= 0:
self.docLines[-1] += defLines[-1][namespaceLoc:]
defLines[-1] = defLines[-1][:namespaceLoc]
# For classes and functions, apply our changes and reverse the
# order of the declaration and docstring, and for modules just
# apply our changes.
if typeName != 'Module':
self.lines[startLineNum: endLineNum] = self.docLines + defLines
else:
self.lines[startLineNum: endLineNum] = defLines + self.docLines
@staticmethod
def _checkMemberName(name):
"""
See if a member name indicates that it should be private.
Private variables in Python (starting with a double underscore but
not ending in a double underscore) and bed lumps (variables that
are not really private but are by common convention treated as
protected because they begin with a single underscore) get Doxygen
tags labeling them appropriately.
"""
assert isinstance(name, str)
restrictionLevel = None
if not name.endswith('__'):
if name.startswith('__'):
restrictionLevel = 'private'
elif name.startswith('_'):
restrictionLevel = 'protected'
return restrictionLevel
def _processMembers(self, node, contextTag):
"""
Mark up members if they should be private.
If the name indicates it should be private or protected, apply
the appropriate Doxygen tags.
"""
restrictionLevel = self._checkMemberName(node.name)
if restrictionLevel:
workTag = '{0}{1}# @{2}'.format(contextTag,
linesep,
restrictionLevel)
else:
workTag = contextTag
return workTag
def generic_visit(self, node, **kwargs):
"""
Extract useful information from relevant nodes including docstrings.
This is virtually identical to the standard version contained in
NodeVisitor. It is only overridden because we're tracking extra
information (the hierarchy of containing nodes) not preserved in
the original.
"""
for field, value in iter_fields(node):
if isinstance(value, list):
for item in value:
if isinstance(item, AST):
self.visit(item, containingNodes=kwargs['containingNodes'])
elif isinstance(value, AST):
self.visit(value, containingNodes=kwargs['containingNodes'])
def visit(self, node, **kwargs):
"""
Visit a node and extract useful information from it.
This is virtually identical to the standard version contained in
NodeVisitor. It is only overridden because we're tracking extra
information (the hierarchy of containing nodes) not preserved in
the original.
"""
containingNodes = kwargs.get('containingNodes', [])
method = 'visit_' + node.__class__.__name__
visitor = getattr(self, method, self.generic_visit)
return visitor(node, containingNodes=containingNodes)
def _getFullPathName(self, containingNodes):
"""
Returns the full node hierarchy rooted at module name.
The list representing the full path through containing nodes
(starting with the module itself) is returned.
"""
assert isinstance(containingNodes, list)
return [(self.options.fullPathNamespace, 'module')] + containingNodes
def visit_Module(self, node, **kwargs):
"""
Handles the module-level docstring.
Process the module-level docstring and create appropriate Doxygen tags
if autobrief option is set.
"""
containingNodes=kwargs.get('containingNodes', [])
if self.options.debug:
stderr.write("# Module {0}{1}".format(self.options.fullPathNamespace,
linesep))
if get_docstring(node):
if self.options.topLevelNamespace:
fullPathNamespace = self._getFullPathName(containingNodes)
contextTag = '.'.join(pathTuple[0] for pathTuple in fullPathNamespace)
tail = '@namespace {0}'.format(contextTag)
else:
tail = ''
self._processDocstring(node, tail)
# Visit any contained nodes (in this case pretty much everything).
self.generic_visit(node, containingNodes=containingNodes)
def visit_Assign(self, node, **kwargs):
"""
Handles assignments within code.
Variable assignments in Python are used to represent interface
attributes in addition to basic variables. If an assignment appears
to be an attribute, it gets labeled as such for Doxygen. If a variable
name uses Python mangling or is just a bed lump, it is labeled as
private for Doxygen.
"""
lineNum = node.lineno - 1
# Assignments have one Doxygen-significant special case:
# interface attributes.
match = AstWalker.__attributeRE.match(self.lines[lineNum])
if match:
self.lines[lineNum] = '{0}## @property {1}{2}{0}# {3}{2}' \
'{0}# @hideinitializer{2}{4}{2}'.format(
match.group(1),
match.group(2),
linesep,
match.group(3),
self.lines[lineNum].rstrip()
)
if self.options.debug:
stderr.write("# Attribute {0.id}{1}".format(node.targets[0],
linesep))
if isinstance(node.targets[0], Name):
match = AstWalker.__indentRE.match(self.lines[lineNum])
indentStr = match and match.group(1) or ''
restrictionLevel = self._checkMemberName(node.targets[0].id)
if restrictionLevel:
self.lines[lineNum] = '{0}## @var {1}{2}{0}' \
'# @hideinitializer{2}{0}# @{3}{2}{4}{2}'.format(
indentStr,
node.targets[0].id,
linesep,
restrictionLevel,
self.lines[lineNum].rstrip()
)
# Visit any contained nodes.
self.generic_visit(node, containingNodes=kwargs['containingNodes'])
def visit_Call(self, node, **kwargs):
"""
Handles function calls within code.
Function calls in Python are used to represent interface implementations
in addition to their normal use. If a call appears to mark an
implementation, it gets labeled as such for Doxygen.
"""
lineNum = node.lineno - 1
# Function calls have one Doxygen-significant special case: interface
# implementations.
match = AstWalker.__implementsRE.match(self.lines[lineNum])
if match:
self.lines[lineNum] = '{0}## @implements {1}{2}{0}{3}{2}'.format(
match.group(1), match.group(2), linesep,
self.lines[lineNum].rstrip())
if self.options.debug:
stderr.write("# Implements {0}{1}".format(match.group(1),
linesep))
# Visit any contained nodes.
self.generic_visit(node, containingNodes=kwargs['containingNodes'])
def visit_FunctionDef(self, node, **kwargs):
"""
Handles function definitions within code.
Process a function's docstring, keeping well aware of the function's
context and whether or not it's part of an interface definition.
"""
if self.options.debug:
stderr.write("# Function {0.name}{1}".format(node, linesep))
# Push either 'interface' or 'class' onto our containing nodes
# hierarchy so we can keep track of context. This will let us tell
# if a function is nested within another function or even if a class
# is nested within a function.
containingNodes = kwargs.get('containingNodes') or []
containingNodes.append((node.name, 'function'))
if self.options.topLevelNamespace:
fullPathNamespace = self._getFullPathName(containingNodes)
contextTag = '.'.join(pathTuple[0] for pathTuple in fullPathNamespace)
modifiedContextTag = self._processMembers(node, contextTag)
tail = '@namespace {0}'.format(modifiedContextTag)
else:
tail = self._processMembers(node, '')
if get_docstring(node):
self._processDocstring(node, tail,
containingNodes=containingNodes)
# Visit any contained nodes.
self.generic_visit(node, containingNodes=containingNodes)
# Remove the item we pushed onto the containing nodes hierarchy.
containingNodes.pop()
def visit_ClassDef(self, node, **kwargs):
"""
Handles class definitions within code.
Process the docstring. Note though that in Python Class definitions
are used to define interfaces in addition to classes.
If a class definition appears to be an interface definition tag it as an
interface definition for Doxygen. Otherwise tag it as a class
definition for Doxygen.
"""
lineNum = node.lineno - 1
# Push either 'interface' or 'class' onto our containing nodes
# hierarchy so we can keep track of context. This will let us tell
# if a function is a method or an interface method definition or if
# a class is fully contained within another class.
containingNodes = kwargs.get('containingNodes') or []
if not self.options.object_respect:
# Remove object class of the inherited class list to avoid that all
# new-style class inherits from object in the hierarchy class
line = self.lines[lineNum]
match = AstWalker.__classRE.match(line)
if match:
if match.group(2) == 'object':
self.lines[lineNum] = line[:match.start(2)] + line[match.end(2):]
match = AstWalker.__interfaceRE.match(self.lines[lineNum])
if match:
if self.options.debug:
stderr.write("# Interface {0.name}{1}".format(node, linesep))
containingNodes.append((node.name, 'interface'))
else:
if self.options.debug:
stderr.write("# Class {0.name}{1}".format(node, linesep))
containingNodes.append((node.name, 'class'))
if self.options.topLevelNamespace:
fullPathNamespace = self._getFullPathName(containingNodes)
contextTag = '.'.join(pathTuple[0] for pathTuple in fullPathNamespace)
tail = '@namespace {0}'.format(contextTag)
else:
tail = ''
# Class definitions have one Doxygen-significant special case:
# interface definitions.
if match:
contextTag = '{0}{1}# @interface {2}'.format(tail,
linesep,
match.group(1))
else:
contextTag = tail
contextTag = self._processMembers(node, contextTag)
if get_docstring(node):
self._processDocstring(node, contextTag,
containingNodes=containingNodes)
# Visit any contained nodes.
self.generic_visit(node, containingNodes=containingNodes)
# Remove the item we pushed onto the containing nodes hierarchy.
containingNodes.pop()
def parseLines(self):
"""Form an AST for the code and produce a new version of the source."""
inAst = parse(''.join(self.lines), self.inFilename)
# Visit all the nodes in our tree and apply Doxygen tags to the source.
self.visit(inAst)
def getLines(self):
"""Return the modified file once processing has been completed."""
return linesep.join(line.rstrip() for line in self.lines)
|
Feneric/doxypypy
|
doxypypy/doxypypy.py
|
AstWalker.__alterDocstring
|
python
|
def __alterDocstring(self, tail='', writer=None):
assert isinstance(tail, str) and isinstance(writer, GeneratorType)
lines = []
timeToSend = False
inCodeBlock = False
inCodeBlockObj = [False]
inSection = False
prefix = ''
firstLineNum = -1
sectionHeadingIndent = 0
codeChecker = self._checkIfCode(inCodeBlockObj)
while True:
lineNum, line = (yield)
if firstLineNum < 0:
firstLineNum = lineNum
# Don't bother doing extra work if it's a sentinel.
if line is not None:
# Also limit work if we're not parsing the docstring.
if self.options.autobrief:
for doxyTag, tagRE in AstWalker.__singleLineREs.items():
match = tagRE.search(line)
if match:
# We've got a simple one-line Doxygen command
lines[-1], inCodeBlock = self._endCodeIfNeeded(
lines[-1], inCodeBlock)
inCodeBlockObj[0] = inCodeBlock
writer.send((firstLineNum, lineNum - 1, lines))
lines = []
firstLineNum = lineNum
line = line.replace(match.group(1), doxyTag)
timeToSend = True
if inSection:
# The last line belonged to a section.
# Does this one too? (Ignoring empty lines.)
match = AstWalker.__blanklineRE.match(line)
if not match:
indent = len(line.expandtabs(self.options.tablength)) - \
len(line.expandtabs(self.options.tablength).lstrip())
if indent <= sectionHeadingIndent:
inSection = False
else:
if lines[-1] == '#':
# If the last line was empty, but we're still in a section
# then we need to start a new paragraph.
lines[-1] = '# @par'
match = AstWalker.__returnsStartRE.match(line)
if match:
# We've got a "returns" section
lines[-1], inCodeBlock = self._endCodeIfNeeded(
lines[-1], inCodeBlock)
inCodeBlockObj[0] = inCodeBlock
line = line.replace(match.group(0), ' @return\t').rstrip()
prefix = '@return\t'
else:
match = AstWalker.__argsStartRE.match(line)
if match:
# We've got an "arguments" section
line = line.replace(match.group(0), '').rstrip()
if 'attr' in match.group(0).lower():
prefix = '@property\t'
else:
prefix = '@param\t'
lines[-1], inCodeBlock = self._endCodeIfNeeded(
lines[-1], inCodeBlock)
inCodeBlockObj[0] = inCodeBlock
lines.append('#' + line)
continue
else:
match = AstWalker.__argsRE.match(line)
if match and not inCodeBlock:
# We've got something that looks like an item /
# description pair.
if 'property' in prefix:
line = '# {0}\t{1[name]}{2}# {1[desc]}'.format(
prefix, match.groupdict(), linesep)
else:
line = ' {0}\t{1[name]}\t{1[desc]}'.format(
prefix, match.groupdict())
else:
match = AstWalker.__raisesStartRE.match(line)
if match:
line = line.replace(match.group(0), '').rstrip()
if 'see' in match.group(1).lower():
# We've got a "see also" section
prefix = '@sa\t'
else:
# We've got an "exceptions" section
prefix = '@exception\t'
lines[-1], inCodeBlock = self._endCodeIfNeeded(
lines[-1], inCodeBlock)
inCodeBlockObj[0] = inCodeBlock
lines.append('#' + line)
continue
else:
match = AstWalker.__listRE.match(line)
if match and not inCodeBlock:
# We've got a list of something or another
itemList = []
for itemMatch in AstWalker.__listItemRE.findall(self._stripOutAnds(
match.group(0))):
itemList.append('# {0}\t{1}{2}'.format(
prefix, itemMatch, linesep))
line = ''.join(itemList)[1:]
else:
match = AstWalker.__examplesStartRE.match(line)
if match and lines[-1].strip() == '#' \
and self.options.autocode:
# We've got an "example" section
inCodeBlock = True
inCodeBlockObj[0] = True
line = line.replace(match.group(0),
' @b Examples{0}# @code'.format(linesep))
else:
match = AstWalker.__sectionStartRE.match(line)
if match:
# We've got an arbitrary section
prefix = ''
inSection = True
# What's the indentation of the section heading?
sectionHeadingIndent = len(line.expandtabs(self.options.tablength)) \
- len(line.expandtabs(self.options.tablength).lstrip())
line = line.replace(
match.group(0),
' @par {0}'.format(match.group(1))
)
if lines[-1] == '# @par':
lines[-1] = '#'
lines[-1], inCodeBlock = self._endCodeIfNeeded(
lines[-1], inCodeBlock)
inCodeBlockObj[0] = inCodeBlock
lines.append('#' + line)
continue
elif prefix:
match = AstWalker.__singleListItemRE.match(line)
if match and not inCodeBlock:
# Probably a single list item
line = ' {0}\t{1}'.format(
prefix, match.group(0))
elif self.options.autocode:
codeChecker.send(
(
line, lines,
lineNum - firstLineNum
)
)
inCodeBlock = inCodeBlockObj[0]
else:
if self.options.autocode:
codeChecker.send(
(
line, lines,
lineNum - firstLineNum
)
)
inCodeBlock = inCodeBlockObj[0]
# If we were passed a tail, append it to the docstring.
# Note that this means that we need a docstring for this
# item to get documented.
if tail and lineNum == len(self.docLines) - 1:
line = '{0}{1}# {2}'.format(line.rstrip(), linesep, tail)
# Add comment marker for every line.
line = '#{0}'.format(line.rstrip())
# Ensure the first line has the Doxygen double comment.
if lineNum == 0:
line = '#' + line
lines.append(line.replace(' ' + linesep, linesep))
else:
# If we get our sentinel value, send out what we've got.
timeToSend = True
if timeToSend:
lines[-1], inCodeBlock = self._endCodeIfNeeded(lines[-1],
inCodeBlock)
inCodeBlockObj[0] = inCodeBlock
writer.send((firstLineNum, lineNum, lines))
lines = []
firstLineNum = -1
timeToSend = False
|
Runs eternally, processing docstring lines.
Parses docstring lines as they get fed in via send, applies appropriate
Doxygen tags, and passes them along in batches for writing.
|
train
|
https://github.com/Feneric/doxypypy/blob/a8555b15fa2a758ea8392372de31c0f635cc0d93/doxypypy/doxypypy.py#L179-L368
| null |
class AstWalker(NodeVisitor):
"""
A walker that'll recursively progress through an AST.
Given an abstract syntax tree for Python code, walk through all the
nodes looking for significant types (for our purposes we only care
about module starts, class definitions, function definitions, variable
assignments, and function calls, as all the information we want to pass
to Doxygen is found within these constructs). If the autobrief option
is set, it further attempts to parse docstrings to create appropriate
Doxygen tags.
"""
# We have a number of regular expressions that we use. They don't
# vary across instances and so are compiled directly in the class
# definition.
__indentRE = regexpCompile(r'^(\s*)\S')
__newlineRE = regexpCompile(r'^#', MULTILINE)
__blanklineRE = regexpCompile(r'^\s*$')
__docstrMarkerRE = regexpCompile(r"\s*([uUbB]*[rR]?(['\"]{3}))")
__docstrOneLineRE = regexpCompile(r"\s*[uUbB]*[rR]?(['\"]{3})(.+)\1")
__implementsRE = regexpCompile(r"^(\s*)(?:zope\.)?(?:interface\.)?"
r"(?:module|class|directly)?"
r"(?:Provides|Implements)\(\s*(.+)\s*\)",
IGNORECASE)
__classRE = regexpCompile(r"^\s*class\s+(\S+)\s*\((\S+)\):")
__interfaceRE = regexpCompile(r"^\s*class\s+(\S+)\s*\(\s*(?:zope\.)?"
r"(?:interface\.)?"
r"Interface\s*\)\s*:", IGNORECASE)
__attributeRE = regexpCompile(r"^(\s*)(\S+)\s*=\s*(?:zope\.)?"
r"(?:interface\.)?"
r"Attribute\s*\(['\"]{1,3}(.*)['\"]{1,3}\)",
IGNORECASE)
__singleLineREs = {
' @author: ': regexpCompile(r"^(\s*Authors?:\s*)(.*)$", IGNORECASE),
' @copyright ': regexpCompile(r"^(\s*Copyright:\s*)(.*)$", IGNORECASE),
' @date ': regexpCompile(r"^(\s*Date:\s*)(.*)$", IGNORECASE),
' @file ': regexpCompile(r"^(\s*File:\s*)(.*)$", IGNORECASE),
' @version: ': regexpCompile(r"^(\s*Version:\s*)(.*)$", IGNORECASE),
' @note ': regexpCompile(r"^(\s*Note:\s*)(.*)$", IGNORECASE),
' @warning ': regexpCompile(r"^(\s*Warning:\s*)(.*)$", IGNORECASE)
}
__argsStartRE = regexpCompile(r"^(\s*(?:(?:Keyword\s+)?"
r"(?:A|Kwa)rg(?:ument)?|Attribute)s?"
r"\s*:\s*)$", IGNORECASE)
__argsRE = regexpCompile(r"^\s*(?P<name>\w+)\s*(?P<type>\(?\S*\)?)?\s*"
r"(?:-|:)+\s+(?P<desc>.+)$")
__returnsStartRE = regexpCompile(r"^\s*(?:Return|Yield)s:\s*$", IGNORECASE)
__raisesStartRE = regexpCompile(r"^\s*(Raises|Exceptions|See Also):\s*$",
IGNORECASE)
__listRE = regexpCompile(r"^\s*(([\w\.]+),\s*)+(&|and)?\s*([\w\.]+)$")
__singleListItemRE = regexpCompile(r'^\s*([\w\.]+)\s*$')
__listItemRE = regexpCompile(r'([\w\.]+),?\s*')
__examplesStartRE = regexpCompile(r"^\s*(?:Example|Doctest)s?:\s*$",
IGNORECASE)
__sectionStartRE = regexpCompile(r"^\s*(([A-Z]\w* ?){1,2}):\s*$")
# The error line should match traceback lines, error exception lines, and
# (due to a weird behavior of codeop) single word lines.
__errorLineRE = regexpCompile(r"^\s*((?:\S+Error|Traceback.*):?\s*(.*)|@?[\w.]+)\s*$",
IGNORECASE)
def __init__(self, lines, options, inFilename):
"""Initialize a few class variables in preparation for our walk."""
self.lines = lines
self.options = options
self.inFilename = inFilename
self.docLines = []
@staticmethod
def _stripOutAnds(inStr):
"""Takes a string and returns the same without ands or ampersands."""
assert isinstance(inStr, str)
return inStr.replace(' and ', ' ').replace(' & ', ' ')
@staticmethod
def _endCodeIfNeeded(line, inCodeBlock):
"""Simple routine to append end code marker if needed."""
assert isinstance(line, str)
if inCodeBlock:
line = '# @endcode{0}{1}'.format(linesep, line.rstrip())
inCodeBlock = False
return line, inCodeBlock
@coroutine
def _checkIfCode(self, inCodeBlockObj):
"""Checks whether or not a given line appears to be Python code."""
while True:
line, lines, lineNum = (yield)
testLineNum = 1
currentLineNum = 0
testLine = line.strip()
lineOfCode = None
while lineOfCode is None:
match = AstWalker.__errorLineRE.match(testLine)
if not testLine or testLine == '...' or match:
# These are ambiguous.
line, lines, lineNum = (yield)
testLine = line.strip()
#testLineNum = 1
elif testLine.startswith('>>>'):
# This is definitely code.
lineOfCode = True
else:
try:
compLine = compile_command(testLine)
if compLine and lines[currentLineNum].strip().startswith('#'):
lineOfCode = True
else:
line, lines, lineNum = (yield)
line = line.strip()
if line.startswith('>>>'):
# Definitely code, don't compile further.
lineOfCode = True
else:
testLine += linesep + line
testLine = testLine.strip()
testLineNum += 1
except (SyntaxError, RuntimeError):
# This is definitely not code.
lineOfCode = False
except Exception:
# Other errors are ambiguous.
line, lines, lineNum = (yield)
testLine = line.strip()
#testLineNum = 1
currentLineNum = lineNum - testLineNum
if not inCodeBlockObj[0] and lineOfCode:
inCodeBlockObj[0] = True
lines[currentLineNum] = '{0}{1}# @code{1}'.format(
lines[currentLineNum],
linesep
)
elif inCodeBlockObj[0] and lineOfCode is False:
# None is ambiguous, so strict checking
# against False is necessary.
inCodeBlockObj[0] = False
lines[currentLineNum] = '{0}{1}# @endcode{1}'.format(
lines[currentLineNum],
linesep
)
@coroutine
@coroutine
def __writeDocstring(self):
"""
Runs eternally, dumping out docstring line batches as they get fed in.
Replaces original batches of docstring lines with modified versions
fed in via send.
"""
while True:
firstLineNum, lastLineNum, lines = (yield)
newDocstringLen = lastLineNum - firstLineNum + 1
while len(lines) < newDocstringLen:
lines.append('')
# Substitute the new block of lines for the original block of lines.
self.docLines[firstLineNum: lastLineNum + 1] = lines
def _processDocstring(self, node, tail='', **kwargs):
"""
Handles a docstring for functions, classes, and modules.
Basically just figures out the bounds of the docstring and sends it
off to the parser to do the actual work.
"""
typeName = type(node).__name__
# Modules don't have lineno defined, but it's always 0 for them.
curLineNum = startLineNum = 0
if typeName != 'Module':
startLineNum = curLineNum = node.lineno - 1
# Figure out where both our enclosing object and our docstring start.
line = ''
while curLineNum < len(self.lines):
line = self.lines[curLineNum]
match = AstWalker.__docstrMarkerRE.match(line)
if match:
break
curLineNum += 1
docstringStart = curLineNum
# Figure out where our docstring ends.
if not AstWalker.__docstrOneLineRE.match(line):
# Skip for the special case of a single-line docstring.
curLineNum += 1
while curLineNum < len(self.lines):
line = self.lines[curLineNum]
if line.find(match.group(2)) >= 0:
break
curLineNum += 1
endLineNum = curLineNum + 1
# Isolate our enclosing object's declaration.
defLines = self.lines[startLineNum: docstringStart]
# Isolate our docstring.
self.docLines = self.lines[docstringStart: endLineNum]
# If we have a docstring, extract information from it.
if self.docLines:
# Get rid of the docstring delineators.
self.docLines[0] = AstWalker.__docstrMarkerRE.sub('',
self.docLines[0])
self.docLines[-1] = AstWalker.__docstrMarkerRE.sub('',
self.docLines[-1])
# Handle special strings within the docstring.
docstringConverter = self.__alterDocstring(
tail, self.__writeDocstring())
for lineInfo in enumerate(self.docLines):
docstringConverter.send(lineInfo)
docstringConverter.send((len(self.docLines) - 1, None))
# Add a Doxygen @brief tag to any single-line description.
if self.options.autobrief:
safetyCounter = 0
while len(self.docLines) > 0 and self.docLines[0].lstrip('#').strip() == '':
del self.docLines[0]
self.docLines.append('')
safetyCounter += 1
if safetyCounter >= len(self.docLines):
# Escape the effectively empty docstring.
break
if len(self.docLines) == 1 or (len(self.docLines) >= 2 and (
self.docLines[1].strip(whitespace + '#') == '' or
self.docLines[1].strip(whitespace + '#').startswith('@'))):
self.docLines[0] = "## @brief {0}".format(self.docLines[0].lstrip('#'))
if len(self.docLines) > 1 and self.docLines[1] == '# @par':
self.docLines[1] = '#'
if defLines:
match = AstWalker.__indentRE.match(defLines[0])
indentStr = match and match.group(1) or ''
self.docLines = [AstWalker.__newlineRE.sub(indentStr + '#', docLine)
for docLine in self.docLines]
# Taking away a docstring from an interface method definition sometimes
# leaves broken code as the docstring may be the only code in it.
# Here we manually insert a pass statement to rectify this problem.
if typeName != 'Module':
if docstringStart < len(self.lines):
match = AstWalker.__indentRE.match(self.lines[docstringStart])
indentStr = match and match.group(1) or ''
else:
indentStr = ''
containingNodes = kwargs.get('containingNodes', []) or []
fullPathNamespace = self._getFullPathName(containingNodes)
parentType = fullPathNamespace[-2][1]
if parentType == 'interface' and typeName == 'FunctionDef' \
or fullPathNamespace[-1][1] == 'interface':
defLines[-1] = '{0}{1}{2}pass'.format(defLines[-1],
linesep, indentStr)
elif self.options.autobrief and typeName == 'ClassDef':
# If we're parsing docstrings separate out class attribute
# definitions to get better Doxygen output.
for firstVarLineNum, firstVarLine in enumerate(self.docLines):
if '@property\t' in firstVarLine:
break
lastVarLineNum = len(self.docLines)
if lastVarLineNum > 0 and '@property\t' in firstVarLine:
while lastVarLineNum > firstVarLineNum:
lastVarLineNum -= 1
if '@property\t' in self.docLines[lastVarLineNum]:
break
lastVarLineNum += 1
if firstVarLineNum < len(self.docLines):
indentLineNum = endLineNum
indentStr = ''
while not indentStr and indentLineNum < len(self.lines):
match = AstWalker.__indentRE.match(self.lines[indentLineNum])
indentStr = match and match.group(1) or ''
indentLineNum += 1
varLines = ['{0}{1}'.format(linesep, docLine).replace(
linesep, linesep + indentStr)
for docLine in self.docLines[
firstVarLineNum: lastVarLineNum]]
defLines.extend(varLines)
self.docLines[firstVarLineNum: lastVarLineNum] = []
# After the property shuffling we will need to relocate
# any existing namespace information.
namespaceLoc = defLines[-1].find('\n# @namespace')
if namespaceLoc >= 0:
self.docLines[-1] += defLines[-1][namespaceLoc:]
defLines[-1] = defLines[-1][:namespaceLoc]
# For classes and functions, apply our changes and reverse the
# order of the declaration and docstring, and for modules just
# apply our changes.
if typeName != 'Module':
self.lines[startLineNum: endLineNum] = self.docLines + defLines
else:
self.lines[startLineNum: endLineNum] = defLines + self.docLines
@staticmethod
def _checkMemberName(name):
"""
See if a member name indicates that it should be private.
Private variables in Python (starting with a double underscore but
not ending in a double underscore) and bed lumps (variables that
are not really private but are by common convention treated as
protected because they begin with a single underscore) get Doxygen
tags labeling them appropriately.
"""
assert isinstance(name, str)
restrictionLevel = None
if not name.endswith('__'):
if name.startswith('__'):
restrictionLevel = 'private'
elif name.startswith('_'):
restrictionLevel = 'protected'
return restrictionLevel
def _processMembers(self, node, contextTag):
"""
Mark up members if they should be private.
If the name indicates it should be private or protected, apply
the appropriate Doxygen tags.
"""
restrictionLevel = self._checkMemberName(node.name)
if restrictionLevel:
workTag = '{0}{1}# @{2}'.format(contextTag,
linesep,
restrictionLevel)
else:
workTag = contextTag
return workTag
def generic_visit(self, node, **kwargs):
"""
Extract useful information from relevant nodes including docstrings.
This is virtually identical to the standard version contained in
NodeVisitor. It is only overridden because we're tracking extra
information (the hierarchy of containing nodes) not preserved in
the original.
"""
for field, value in iter_fields(node):
if isinstance(value, list):
for item in value:
if isinstance(item, AST):
self.visit(item, containingNodes=kwargs['containingNodes'])
elif isinstance(value, AST):
self.visit(value, containingNodes=kwargs['containingNodes'])
def visit(self, node, **kwargs):
"""
Visit a node and extract useful information from it.
This is virtually identical to the standard version contained in
NodeVisitor. It is only overridden because we're tracking extra
information (the hierarchy of containing nodes) not preserved in
the original.
"""
containingNodes = kwargs.get('containingNodes', [])
method = 'visit_' + node.__class__.__name__
visitor = getattr(self, method, self.generic_visit)
return visitor(node, containingNodes=containingNodes)
def _getFullPathName(self, containingNodes):
"""
Returns the full node hierarchy rooted at module name.
The list representing the full path through containing nodes
(starting with the module itself) is returned.
"""
assert isinstance(containingNodes, list)
return [(self.options.fullPathNamespace, 'module')] + containingNodes
def visit_Module(self, node, **kwargs):
"""
Handles the module-level docstring.
Process the module-level docstring and create appropriate Doxygen tags
if autobrief option is set.
"""
containingNodes=kwargs.get('containingNodes', [])
if self.options.debug:
stderr.write("# Module {0}{1}".format(self.options.fullPathNamespace,
linesep))
if get_docstring(node):
if self.options.topLevelNamespace:
fullPathNamespace = self._getFullPathName(containingNodes)
contextTag = '.'.join(pathTuple[0] for pathTuple in fullPathNamespace)
tail = '@namespace {0}'.format(contextTag)
else:
tail = ''
self._processDocstring(node, tail)
# Visit any contained nodes (in this case pretty much everything).
self.generic_visit(node, containingNodes=containingNodes)
def visit_Assign(self, node, **kwargs):
"""
Handles assignments within code.
Variable assignments in Python are used to represent interface
attributes in addition to basic variables. If an assignment appears
to be an attribute, it gets labeled as such for Doxygen. If a variable
name uses Python mangling or is just a bed lump, it is labeled as
private for Doxygen.
"""
lineNum = node.lineno - 1
# Assignments have one Doxygen-significant special case:
# interface attributes.
match = AstWalker.__attributeRE.match(self.lines[lineNum])
if match:
self.lines[lineNum] = '{0}## @property {1}{2}{0}# {3}{2}' \
'{0}# @hideinitializer{2}{4}{2}'.format(
match.group(1),
match.group(2),
linesep,
match.group(3),
self.lines[lineNum].rstrip()
)
if self.options.debug:
stderr.write("# Attribute {0.id}{1}".format(node.targets[0],
linesep))
if isinstance(node.targets[0], Name):
match = AstWalker.__indentRE.match(self.lines[lineNum])
indentStr = match and match.group(1) or ''
restrictionLevel = self._checkMemberName(node.targets[0].id)
if restrictionLevel:
self.lines[lineNum] = '{0}## @var {1}{2}{0}' \
'# @hideinitializer{2}{0}# @{3}{2}{4}{2}'.format(
indentStr,
node.targets[0].id,
linesep,
restrictionLevel,
self.lines[lineNum].rstrip()
)
# Visit any contained nodes.
self.generic_visit(node, containingNodes=kwargs['containingNodes'])
def visit_Call(self, node, **kwargs):
"""
Handles function calls within code.
Function calls in Python are used to represent interface implementations
in addition to their normal use. If a call appears to mark an
implementation, it gets labeled as such for Doxygen.
"""
lineNum = node.lineno - 1
# Function calls have one Doxygen-significant special case: interface
# implementations.
match = AstWalker.__implementsRE.match(self.lines[lineNum])
if match:
self.lines[lineNum] = '{0}## @implements {1}{2}{0}{3}{2}'.format(
match.group(1), match.group(2), linesep,
self.lines[lineNum].rstrip())
if self.options.debug:
stderr.write("# Implements {0}{1}".format(match.group(1),
linesep))
# Visit any contained nodes.
self.generic_visit(node, containingNodes=kwargs['containingNodes'])
def visit_FunctionDef(self, node, **kwargs):
"""
Handles function definitions within code.
Process a function's docstring, keeping well aware of the function's
context and whether or not it's part of an interface definition.
"""
if self.options.debug:
stderr.write("# Function {0.name}{1}".format(node, linesep))
# Push either 'interface' or 'class' onto our containing nodes
# hierarchy so we can keep track of context. This will let us tell
# if a function is nested within another function or even if a class
# is nested within a function.
containingNodes = kwargs.get('containingNodes') or []
containingNodes.append((node.name, 'function'))
if self.options.topLevelNamespace:
fullPathNamespace = self._getFullPathName(containingNodes)
contextTag = '.'.join(pathTuple[0] for pathTuple in fullPathNamespace)
modifiedContextTag = self._processMembers(node, contextTag)
tail = '@namespace {0}'.format(modifiedContextTag)
else:
tail = self._processMembers(node, '')
if get_docstring(node):
self._processDocstring(node, tail,
containingNodes=containingNodes)
# Visit any contained nodes.
self.generic_visit(node, containingNodes=containingNodes)
# Remove the item we pushed onto the containing nodes hierarchy.
containingNodes.pop()
def visit_ClassDef(self, node, **kwargs):
"""
Handles class definitions within code.
Process the docstring. Note though that in Python Class definitions
are used to define interfaces in addition to classes.
If a class definition appears to be an interface definition tag it as an
interface definition for Doxygen. Otherwise tag it as a class
definition for Doxygen.
"""
lineNum = node.lineno - 1
# Push either 'interface' or 'class' onto our containing nodes
# hierarchy so we can keep track of context. This will let us tell
# if a function is a method or an interface method definition or if
# a class is fully contained within another class.
containingNodes = kwargs.get('containingNodes') or []
if not self.options.object_respect:
# Remove object class of the inherited class list to avoid that all
# new-style class inherits from object in the hierarchy class
line = self.lines[lineNum]
match = AstWalker.__classRE.match(line)
if match:
if match.group(2) == 'object':
self.lines[lineNum] = line[:match.start(2)] + line[match.end(2):]
match = AstWalker.__interfaceRE.match(self.lines[lineNum])
if match:
if self.options.debug:
stderr.write("# Interface {0.name}{1}".format(node, linesep))
containingNodes.append((node.name, 'interface'))
else:
if self.options.debug:
stderr.write("# Class {0.name}{1}".format(node, linesep))
containingNodes.append((node.name, 'class'))
if self.options.topLevelNamespace:
fullPathNamespace = self._getFullPathName(containingNodes)
contextTag = '.'.join(pathTuple[0] for pathTuple in fullPathNamespace)
tail = '@namespace {0}'.format(contextTag)
else:
tail = ''
# Class definitions have one Doxygen-significant special case:
# interface definitions.
if match:
contextTag = '{0}{1}# @interface {2}'.format(tail,
linesep,
match.group(1))
else:
contextTag = tail
contextTag = self._processMembers(node, contextTag)
if get_docstring(node):
self._processDocstring(node, contextTag,
containingNodes=containingNodes)
# Visit any contained nodes.
self.generic_visit(node, containingNodes=containingNodes)
# Remove the item we pushed onto the containing nodes hierarchy.
containingNodes.pop()
def parseLines(self):
"""Form an AST for the code and produce a new version of the source."""
inAst = parse(''.join(self.lines), self.inFilename)
# Visit all the nodes in our tree and apply Doxygen tags to the source.
self.visit(inAst)
def getLines(self):
"""Return the modified file once processing has been completed."""
return linesep.join(line.rstrip() for line in self.lines)
|
Feneric/doxypypy
|
doxypypy/doxypypy.py
|
AstWalker.__writeDocstring
|
python
|
def __writeDocstring(self):
while True:
firstLineNum, lastLineNum, lines = (yield)
newDocstringLen = lastLineNum - firstLineNum + 1
while len(lines) < newDocstringLen:
lines.append('')
# Substitute the new block of lines for the original block of lines.
self.docLines[firstLineNum: lastLineNum + 1] = lines
|
Runs eternally, dumping out docstring line batches as they get fed in.
Replaces original batches of docstring lines with modified versions
fed in via send.
|
train
|
https://github.com/Feneric/doxypypy/blob/a8555b15fa2a758ea8392372de31c0f635cc0d93/doxypypy/doxypypy.py#L371-L384
| null |
class AstWalker(NodeVisitor):
"""
A walker that'll recursively progress through an AST.
Given an abstract syntax tree for Python code, walk through all the
nodes looking for significant types (for our purposes we only care
about module starts, class definitions, function definitions, variable
assignments, and function calls, as all the information we want to pass
to Doxygen is found within these constructs). If the autobrief option
is set, it further attempts to parse docstrings to create appropriate
Doxygen tags.
"""
# We have a number of regular expressions that we use. They don't
# vary across instances and so are compiled directly in the class
# definition.
__indentRE = regexpCompile(r'^(\s*)\S')
__newlineRE = regexpCompile(r'^#', MULTILINE)
__blanklineRE = regexpCompile(r'^\s*$')
__docstrMarkerRE = regexpCompile(r"\s*([uUbB]*[rR]?(['\"]{3}))")
__docstrOneLineRE = regexpCompile(r"\s*[uUbB]*[rR]?(['\"]{3})(.+)\1")
__implementsRE = regexpCompile(r"^(\s*)(?:zope\.)?(?:interface\.)?"
r"(?:module|class|directly)?"
r"(?:Provides|Implements)\(\s*(.+)\s*\)",
IGNORECASE)
__classRE = regexpCompile(r"^\s*class\s+(\S+)\s*\((\S+)\):")
__interfaceRE = regexpCompile(r"^\s*class\s+(\S+)\s*\(\s*(?:zope\.)?"
r"(?:interface\.)?"
r"Interface\s*\)\s*:", IGNORECASE)
__attributeRE = regexpCompile(r"^(\s*)(\S+)\s*=\s*(?:zope\.)?"
r"(?:interface\.)?"
r"Attribute\s*\(['\"]{1,3}(.*)['\"]{1,3}\)",
IGNORECASE)
__singleLineREs = {
' @author: ': regexpCompile(r"^(\s*Authors?:\s*)(.*)$", IGNORECASE),
' @copyright ': regexpCompile(r"^(\s*Copyright:\s*)(.*)$", IGNORECASE),
' @date ': regexpCompile(r"^(\s*Date:\s*)(.*)$", IGNORECASE),
' @file ': regexpCompile(r"^(\s*File:\s*)(.*)$", IGNORECASE),
' @version: ': regexpCompile(r"^(\s*Version:\s*)(.*)$", IGNORECASE),
' @note ': regexpCompile(r"^(\s*Note:\s*)(.*)$", IGNORECASE),
' @warning ': regexpCompile(r"^(\s*Warning:\s*)(.*)$", IGNORECASE)
}
__argsStartRE = regexpCompile(r"^(\s*(?:(?:Keyword\s+)?"
r"(?:A|Kwa)rg(?:ument)?|Attribute)s?"
r"\s*:\s*)$", IGNORECASE)
__argsRE = regexpCompile(r"^\s*(?P<name>\w+)\s*(?P<type>\(?\S*\)?)?\s*"
r"(?:-|:)+\s+(?P<desc>.+)$")
__returnsStartRE = regexpCompile(r"^\s*(?:Return|Yield)s:\s*$", IGNORECASE)
__raisesStartRE = regexpCompile(r"^\s*(Raises|Exceptions|See Also):\s*$",
IGNORECASE)
__listRE = regexpCompile(r"^\s*(([\w\.]+),\s*)+(&|and)?\s*([\w\.]+)$")
__singleListItemRE = regexpCompile(r'^\s*([\w\.]+)\s*$')
__listItemRE = regexpCompile(r'([\w\.]+),?\s*')
__examplesStartRE = regexpCompile(r"^\s*(?:Example|Doctest)s?:\s*$",
IGNORECASE)
__sectionStartRE = regexpCompile(r"^\s*(([A-Z]\w* ?){1,2}):\s*$")
# The error line should match traceback lines, error exception lines, and
# (due to a weird behavior of codeop) single word lines.
__errorLineRE = regexpCompile(r"^\s*((?:\S+Error|Traceback.*):?\s*(.*)|@?[\w.]+)\s*$",
IGNORECASE)
def __init__(self, lines, options, inFilename):
"""Initialize a few class variables in preparation for our walk."""
self.lines = lines
self.options = options
self.inFilename = inFilename
self.docLines = []
@staticmethod
def _stripOutAnds(inStr):
"""Takes a string and returns the same without ands or ampersands."""
assert isinstance(inStr, str)
return inStr.replace(' and ', ' ').replace(' & ', ' ')
@staticmethod
def _endCodeIfNeeded(line, inCodeBlock):
"""Simple routine to append end code marker if needed."""
assert isinstance(line, str)
if inCodeBlock:
line = '# @endcode{0}{1}'.format(linesep, line.rstrip())
inCodeBlock = False
return line, inCodeBlock
@coroutine
def _checkIfCode(self, inCodeBlockObj):
"""Checks whether or not a given line appears to be Python code."""
while True:
line, lines, lineNum = (yield)
testLineNum = 1
currentLineNum = 0
testLine = line.strip()
lineOfCode = None
while lineOfCode is None:
match = AstWalker.__errorLineRE.match(testLine)
if not testLine or testLine == '...' or match:
# These are ambiguous.
line, lines, lineNum = (yield)
testLine = line.strip()
#testLineNum = 1
elif testLine.startswith('>>>'):
# This is definitely code.
lineOfCode = True
else:
try:
compLine = compile_command(testLine)
if compLine and lines[currentLineNum].strip().startswith('#'):
lineOfCode = True
else:
line, lines, lineNum = (yield)
line = line.strip()
if line.startswith('>>>'):
# Definitely code, don't compile further.
lineOfCode = True
else:
testLine += linesep + line
testLine = testLine.strip()
testLineNum += 1
except (SyntaxError, RuntimeError):
# This is definitely not code.
lineOfCode = False
except Exception:
# Other errors are ambiguous.
line, lines, lineNum = (yield)
testLine = line.strip()
#testLineNum = 1
currentLineNum = lineNum - testLineNum
if not inCodeBlockObj[0] and lineOfCode:
inCodeBlockObj[0] = True
lines[currentLineNum] = '{0}{1}# @code{1}'.format(
lines[currentLineNum],
linesep
)
elif inCodeBlockObj[0] and lineOfCode is False:
# None is ambiguous, so strict checking
# against False is necessary.
inCodeBlockObj[0] = False
lines[currentLineNum] = '{0}{1}# @endcode{1}'.format(
lines[currentLineNum],
linesep
)
@coroutine
def __alterDocstring(self, tail='', writer=None):
"""
Runs eternally, processing docstring lines.
Parses docstring lines as they get fed in via send, applies appropriate
Doxygen tags, and passes them along in batches for writing.
"""
assert isinstance(tail, str) and isinstance(writer, GeneratorType)
lines = []
timeToSend = False
inCodeBlock = False
inCodeBlockObj = [False]
inSection = False
prefix = ''
firstLineNum = -1
sectionHeadingIndent = 0
codeChecker = self._checkIfCode(inCodeBlockObj)
while True:
lineNum, line = (yield)
if firstLineNum < 0:
firstLineNum = lineNum
# Don't bother doing extra work if it's a sentinel.
if line is not None:
# Also limit work if we're not parsing the docstring.
if self.options.autobrief:
for doxyTag, tagRE in AstWalker.__singleLineREs.items():
match = tagRE.search(line)
if match:
# We've got a simple one-line Doxygen command
lines[-1], inCodeBlock = self._endCodeIfNeeded(
lines[-1], inCodeBlock)
inCodeBlockObj[0] = inCodeBlock
writer.send((firstLineNum, lineNum - 1, lines))
lines = []
firstLineNum = lineNum
line = line.replace(match.group(1), doxyTag)
timeToSend = True
if inSection:
# The last line belonged to a section.
# Does this one too? (Ignoring empty lines.)
match = AstWalker.__blanklineRE.match(line)
if not match:
indent = len(line.expandtabs(self.options.tablength)) - \
len(line.expandtabs(self.options.tablength).lstrip())
if indent <= sectionHeadingIndent:
inSection = False
else:
if lines[-1] == '#':
# If the last line was empty, but we're still in a section
# then we need to start a new paragraph.
lines[-1] = '# @par'
match = AstWalker.__returnsStartRE.match(line)
if match:
# We've got a "returns" section
lines[-1], inCodeBlock = self._endCodeIfNeeded(
lines[-1], inCodeBlock)
inCodeBlockObj[0] = inCodeBlock
line = line.replace(match.group(0), ' @return\t').rstrip()
prefix = '@return\t'
else:
match = AstWalker.__argsStartRE.match(line)
if match:
# We've got an "arguments" section
line = line.replace(match.group(0), '').rstrip()
if 'attr' in match.group(0).lower():
prefix = '@property\t'
else:
prefix = '@param\t'
lines[-1], inCodeBlock = self._endCodeIfNeeded(
lines[-1], inCodeBlock)
inCodeBlockObj[0] = inCodeBlock
lines.append('#' + line)
continue
else:
match = AstWalker.__argsRE.match(line)
if match and not inCodeBlock:
# We've got something that looks like an item /
# description pair.
if 'property' in prefix:
line = '# {0}\t{1[name]}{2}# {1[desc]}'.format(
prefix, match.groupdict(), linesep)
else:
line = ' {0}\t{1[name]}\t{1[desc]}'.format(
prefix, match.groupdict())
else:
match = AstWalker.__raisesStartRE.match(line)
if match:
line = line.replace(match.group(0), '').rstrip()
if 'see' in match.group(1).lower():
# We've got a "see also" section
prefix = '@sa\t'
else:
# We've got an "exceptions" section
prefix = '@exception\t'
lines[-1], inCodeBlock = self._endCodeIfNeeded(
lines[-1], inCodeBlock)
inCodeBlockObj[0] = inCodeBlock
lines.append('#' + line)
continue
else:
match = AstWalker.__listRE.match(line)
if match and not inCodeBlock:
# We've got a list of something or another
itemList = []
for itemMatch in AstWalker.__listItemRE.findall(self._stripOutAnds(
match.group(0))):
itemList.append('# {0}\t{1}{2}'.format(
prefix, itemMatch, linesep))
line = ''.join(itemList)[1:]
else:
match = AstWalker.__examplesStartRE.match(line)
if match and lines[-1].strip() == '#' \
and self.options.autocode:
# We've got an "example" section
inCodeBlock = True
inCodeBlockObj[0] = True
line = line.replace(match.group(0),
' @b Examples{0}# @code'.format(linesep))
else:
match = AstWalker.__sectionStartRE.match(line)
if match:
# We've got an arbitrary section
prefix = ''
inSection = True
# What's the indentation of the section heading?
sectionHeadingIndent = len(line.expandtabs(self.options.tablength)) \
- len(line.expandtabs(self.options.tablength).lstrip())
line = line.replace(
match.group(0),
' @par {0}'.format(match.group(1))
)
if lines[-1] == '# @par':
lines[-1] = '#'
lines[-1], inCodeBlock = self._endCodeIfNeeded(
lines[-1], inCodeBlock)
inCodeBlockObj[0] = inCodeBlock
lines.append('#' + line)
continue
elif prefix:
match = AstWalker.__singleListItemRE.match(line)
if match and not inCodeBlock:
# Probably a single list item
line = ' {0}\t{1}'.format(
prefix, match.group(0))
elif self.options.autocode:
codeChecker.send(
(
line, lines,
lineNum - firstLineNum
)
)
inCodeBlock = inCodeBlockObj[0]
else:
if self.options.autocode:
codeChecker.send(
(
line, lines,
lineNum - firstLineNum
)
)
inCodeBlock = inCodeBlockObj[0]
# If we were passed a tail, append it to the docstring.
# Note that this means that we need a docstring for this
# item to get documented.
if tail and lineNum == len(self.docLines) - 1:
line = '{0}{1}# {2}'.format(line.rstrip(), linesep, tail)
# Add comment marker for every line.
line = '#{0}'.format(line.rstrip())
# Ensure the first line has the Doxygen double comment.
if lineNum == 0:
line = '#' + line
lines.append(line.replace(' ' + linesep, linesep))
else:
# If we get our sentinel value, send out what we've got.
timeToSend = True
if timeToSend:
lines[-1], inCodeBlock = self._endCodeIfNeeded(lines[-1],
inCodeBlock)
inCodeBlockObj[0] = inCodeBlock
writer.send((firstLineNum, lineNum, lines))
lines = []
firstLineNum = -1
timeToSend = False
@coroutine
def _processDocstring(self, node, tail='', **kwargs):
"""
Handles a docstring for functions, classes, and modules.
Basically just figures out the bounds of the docstring and sends it
off to the parser to do the actual work.
"""
typeName = type(node).__name__
# Modules don't have lineno defined, but it's always 0 for them.
curLineNum = startLineNum = 0
if typeName != 'Module':
startLineNum = curLineNum = node.lineno - 1
# Figure out where both our enclosing object and our docstring start.
line = ''
while curLineNum < len(self.lines):
line = self.lines[curLineNum]
match = AstWalker.__docstrMarkerRE.match(line)
if match:
break
curLineNum += 1
docstringStart = curLineNum
# Figure out where our docstring ends.
if not AstWalker.__docstrOneLineRE.match(line):
# Skip for the special case of a single-line docstring.
curLineNum += 1
while curLineNum < len(self.lines):
line = self.lines[curLineNum]
if line.find(match.group(2)) >= 0:
break
curLineNum += 1
endLineNum = curLineNum + 1
# Isolate our enclosing object's declaration.
defLines = self.lines[startLineNum: docstringStart]
# Isolate our docstring.
self.docLines = self.lines[docstringStart: endLineNum]
# If we have a docstring, extract information from it.
if self.docLines:
# Get rid of the docstring delineators.
self.docLines[0] = AstWalker.__docstrMarkerRE.sub('',
self.docLines[0])
self.docLines[-1] = AstWalker.__docstrMarkerRE.sub('',
self.docLines[-1])
# Handle special strings within the docstring.
docstringConverter = self.__alterDocstring(
tail, self.__writeDocstring())
for lineInfo in enumerate(self.docLines):
docstringConverter.send(lineInfo)
docstringConverter.send((len(self.docLines) - 1, None))
# Add a Doxygen @brief tag to any single-line description.
if self.options.autobrief:
safetyCounter = 0
while len(self.docLines) > 0 and self.docLines[0].lstrip('#').strip() == '':
del self.docLines[0]
self.docLines.append('')
safetyCounter += 1
if safetyCounter >= len(self.docLines):
# Escape the effectively empty docstring.
break
if len(self.docLines) == 1 or (len(self.docLines) >= 2 and (
self.docLines[1].strip(whitespace + '#') == '' or
self.docLines[1].strip(whitespace + '#').startswith('@'))):
self.docLines[0] = "## @brief {0}".format(self.docLines[0].lstrip('#'))
if len(self.docLines) > 1 and self.docLines[1] == '# @par':
self.docLines[1] = '#'
if defLines:
match = AstWalker.__indentRE.match(defLines[0])
indentStr = match and match.group(1) or ''
self.docLines = [AstWalker.__newlineRE.sub(indentStr + '#', docLine)
for docLine in self.docLines]
# Taking away a docstring from an interface method definition sometimes
# leaves broken code as the docstring may be the only code in it.
# Here we manually insert a pass statement to rectify this problem.
if typeName != 'Module':
if docstringStart < len(self.lines):
match = AstWalker.__indentRE.match(self.lines[docstringStart])
indentStr = match and match.group(1) or ''
else:
indentStr = ''
containingNodes = kwargs.get('containingNodes', []) or []
fullPathNamespace = self._getFullPathName(containingNodes)
parentType = fullPathNamespace[-2][1]
if parentType == 'interface' and typeName == 'FunctionDef' \
or fullPathNamespace[-1][1] == 'interface':
defLines[-1] = '{0}{1}{2}pass'.format(defLines[-1],
linesep, indentStr)
elif self.options.autobrief and typeName == 'ClassDef':
# If we're parsing docstrings separate out class attribute
# definitions to get better Doxygen output.
for firstVarLineNum, firstVarLine in enumerate(self.docLines):
if '@property\t' in firstVarLine:
break
lastVarLineNum = len(self.docLines)
if lastVarLineNum > 0 and '@property\t' in firstVarLine:
while lastVarLineNum > firstVarLineNum:
lastVarLineNum -= 1
if '@property\t' in self.docLines[lastVarLineNum]:
break
lastVarLineNum += 1
if firstVarLineNum < len(self.docLines):
indentLineNum = endLineNum
indentStr = ''
while not indentStr and indentLineNum < len(self.lines):
match = AstWalker.__indentRE.match(self.lines[indentLineNum])
indentStr = match and match.group(1) or ''
indentLineNum += 1
varLines = ['{0}{1}'.format(linesep, docLine).replace(
linesep, linesep + indentStr)
for docLine in self.docLines[
firstVarLineNum: lastVarLineNum]]
defLines.extend(varLines)
self.docLines[firstVarLineNum: lastVarLineNum] = []
# After the property shuffling we will need to relocate
# any existing namespace information.
namespaceLoc = defLines[-1].find('\n# @namespace')
if namespaceLoc >= 0:
self.docLines[-1] += defLines[-1][namespaceLoc:]
defLines[-1] = defLines[-1][:namespaceLoc]
# For classes and functions, apply our changes and reverse the
# order of the declaration and docstring, and for modules just
# apply our changes.
if typeName != 'Module':
self.lines[startLineNum: endLineNum] = self.docLines + defLines
else:
self.lines[startLineNum: endLineNum] = defLines + self.docLines
@staticmethod
def _checkMemberName(name):
"""
See if a member name indicates that it should be private.
Private variables in Python (starting with a double underscore but
not ending in a double underscore) and bed lumps (variables that
are not really private but are by common convention treated as
protected because they begin with a single underscore) get Doxygen
tags labeling them appropriately.
"""
assert isinstance(name, str)
restrictionLevel = None
if not name.endswith('__'):
if name.startswith('__'):
restrictionLevel = 'private'
elif name.startswith('_'):
restrictionLevel = 'protected'
return restrictionLevel
def _processMembers(self, node, contextTag):
"""
Mark up members if they should be private.
If the name indicates it should be private or protected, apply
the appropriate Doxygen tags.
"""
restrictionLevel = self._checkMemberName(node.name)
if restrictionLevel:
workTag = '{0}{1}# @{2}'.format(contextTag,
linesep,
restrictionLevel)
else:
workTag = contextTag
return workTag
def generic_visit(self, node, **kwargs):
"""
Extract useful information from relevant nodes including docstrings.
This is virtually identical to the standard version contained in
NodeVisitor. It is only overridden because we're tracking extra
information (the hierarchy of containing nodes) not preserved in
the original.
"""
for field, value in iter_fields(node):
if isinstance(value, list):
for item in value:
if isinstance(item, AST):
self.visit(item, containingNodes=kwargs['containingNodes'])
elif isinstance(value, AST):
self.visit(value, containingNodes=kwargs['containingNodes'])
def visit(self, node, **kwargs):
"""
Visit a node and extract useful information from it.
This is virtually identical to the standard version contained in
NodeVisitor. It is only overridden because we're tracking extra
information (the hierarchy of containing nodes) not preserved in
the original.
"""
containingNodes = kwargs.get('containingNodes', [])
method = 'visit_' + node.__class__.__name__
visitor = getattr(self, method, self.generic_visit)
return visitor(node, containingNodes=containingNodes)
def _getFullPathName(self, containingNodes):
"""
Returns the full node hierarchy rooted at module name.
The list representing the full path through containing nodes
(starting with the module itself) is returned.
"""
assert isinstance(containingNodes, list)
return [(self.options.fullPathNamespace, 'module')] + containingNodes
def visit_Module(self, node, **kwargs):
"""
Handles the module-level docstring.
Process the module-level docstring and create appropriate Doxygen tags
if autobrief option is set.
"""
containingNodes=kwargs.get('containingNodes', [])
if self.options.debug:
stderr.write("# Module {0}{1}".format(self.options.fullPathNamespace,
linesep))
if get_docstring(node):
if self.options.topLevelNamespace:
fullPathNamespace = self._getFullPathName(containingNodes)
contextTag = '.'.join(pathTuple[0] for pathTuple in fullPathNamespace)
tail = '@namespace {0}'.format(contextTag)
else:
tail = ''
self._processDocstring(node, tail)
# Visit any contained nodes (in this case pretty much everything).
self.generic_visit(node, containingNodes=containingNodes)
def visit_Assign(self, node, **kwargs):
"""
Handles assignments within code.
Variable assignments in Python are used to represent interface
attributes in addition to basic variables. If an assignment appears
to be an attribute, it gets labeled as such for Doxygen. If a variable
name uses Python mangling or is just a bed lump, it is labeled as
private for Doxygen.
"""
lineNum = node.lineno - 1
# Assignments have one Doxygen-significant special case:
# interface attributes.
match = AstWalker.__attributeRE.match(self.lines[lineNum])
if match:
self.lines[lineNum] = '{0}## @property {1}{2}{0}# {3}{2}' \
'{0}# @hideinitializer{2}{4}{2}'.format(
match.group(1),
match.group(2),
linesep,
match.group(3),
self.lines[lineNum].rstrip()
)
if self.options.debug:
stderr.write("# Attribute {0.id}{1}".format(node.targets[0],
linesep))
if isinstance(node.targets[0], Name):
match = AstWalker.__indentRE.match(self.lines[lineNum])
indentStr = match and match.group(1) or ''
restrictionLevel = self._checkMemberName(node.targets[0].id)
if restrictionLevel:
self.lines[lineNum] = '{0}## @var {1}{2}{0}' \
'# @hideinitializer{2}{0}# @{3}{2}{4}{2}'.format(
indentStr,
node.targets[0].id,
linesep,
restrictionLevel,
self.lines[lineNum].rstrip()
)
# Visit any contained nodes.
self.generic_visit(node, containingNodes=kwargs['containingNodes'])
def visit_Call(self, node, **kwargs):
"""
Handles function calls within code.
Function calls in Python are used to represent interface implementations
in addition to their normal use. If a call appears to mark an
implementation, it gets labeled as such for Doxygen.
"""
lineNum = node.lineno - 1
# Function calls have one Doxygen-significant special case: interface
# implementations.
match = AstWalker.__implementsRE.match(self.lines[lineNum])
if match:
self.lines[lineNum] = '{0}## @implements {1}{2}{0}{3}{2}'.format(
match.group(1), match.group(2), linesep,
self.lines[lineNum].rstrip())
if self.options.debug:
stderr.write("# Implements {0}{1}".format(match.group(1),
linesep))
# Visit any contained nodes.
self.generic_visit(node, containingNodes=kwargs['containingNodes'])
def visit_FunctionDef(self, node, **kwargs):
"""
Handles function definitions within code.
Process a function's docstring, keeping well aware of the function's
context and whether or not it's part of an interface definition.
"""
if self.options.debug:
stderr.write("# Function {0.name}{1}".format(node, linesep))
# Push either 'interface' or 'class' onto our containing nodes
# hierarchy so we can keep track of context. This will let us tell
# if a function is nested within another function or even if a class
# is nested within a function.
containingNodes = kwargs.get('containingNodes') or []
containingNodes.append((node.name, 'function'))
if self.options.topLevelNamespace:
fullPathNamespace = self._getFullPathName(containingNodes)
contextTag = '.'.join(pathTuple[0] for pathTuple in fullPathNamespace)
modifiedContextTag = self._processMembers(node, contextTag)
tail = '@namespace {0}'.format(modifiedContextTag)
else:
tail = self._processMembers(node, '')
if get_docstring(node):
self._processDocstring(node, tail,
containingNodes=containingNodes)
# Visit any contained nodes.
self.generic_visit(node, containingNodes=containingNodes)
# Remove the item we pushed onto the containing nodes hierarchy.
containingNodes.pop()
def visit_ClassDef(self, node, **kwargs):
"""
Handles class definitions within code.
Process the docstring. Note though that in Python Class definitions
are used to define interfaces in addition to classes.
If a class definition appears to be an interface definition tag it as an
interface definition for Doxygen. Otherwise tag it as a class
definition for Doxygen.
"""
lineNum = node.lineno - 1
# Push either 'interface' or 'class' onto our containing nodes
# hierarchy so we can keep track of context. This will let us tell
# if a function is a method or an interface method definition or if
# a class is fully contained within another class.
containingNodes = kwargs.get('containingNodes') or []
if not self.options.object_respect:
# Remove object class of the inherited class list to avoid that all
# new-style class inherits from object in the hierarchy class
line = self.lines[lineNum]
match = AstWalker.__classRE.match(line)
if match:
if match.group(2) == 'object':
self.lines[lineNum] = line[:match.start(2)] + line[match.end(2):]
match = AstWalker.__interfaceRE.match(self.lines[lineNum])
if match:
if self.options.debug:
stderr.write("# Interface {0.name}{1}".format(node, linesep))
containingNodes.append((node.name, 'interface'))
else:
if self.options.debug:
stderr.write("# Class {0.name}{1}".format(node, linesep))
containingNodes.append((node.name, 'class'))
if self.options.topLevelNamespace:
fullPathNamespace = self._getFullPathName(containingNodes)
contextTag = '.'.join(pathTuple[0] for pathTuple in fullPathNamespace)
tail = '@namespace {0}'.format(contextTag)
else:
tail = ''
# Class definitions have one Doxygen-significant special case:
# interface definitions.
if match:
contextTag = '{0}{1}# @interface {2}'.format(tail,
linesep,
match.group(1))
else:
contextTag = tail
contextTag = self._processMembers(node, contextTag)
if get_docstring(node):
self._processDocstring(node, contextTag,
containingNodes=containingNodes)
# Visit any contained nodes.
self.generic_visit(node, containingNodes=containingNodes)
# Remove the item we pushed onto the containing nodes hierarchy.
containingNodes.pop()
def parseLines(self):
"""Form an AST for the code and produce a new version of the source."""
inAst = parse(''.join(self.lines), self.inFilename)
# Visit all the nodes in our tree and apply Doxygen tags to the source.
self.visit(inAst)
def getLines(self):
"""Return the modified file once processing has been completed."""
return linesep.join(line.rstrip() for line in self.lines)
|
Feneric/doxypypy
|
doxypypy/doxypypy.py
|
AstWalker._processDocstring
|
python
|
def _processDocstring(self, node, tail='', **kwargs):
typeName = type(node).__name__
# Modules don't have lineno defined, but it's always 0 for them.
curLineNum = startLineNum = 0
if typeName != 'Module':
startLineNum = curLineNum = node.lineno - 1
# Figure out where both our enclosing object and our docstring start.
line = ''
while curLineNum < len(self.lines):
line = self.lines[curLineNum]
match = AstWalker.__docstrMarkerRE.match(line)
if match:
break
curLineNum += 1
docstringStart = curLineNum
# Figure out where our docstring ends.
if not AstWalker.__docstrOneLineRE.match(line):
# Skip for the special case of a single-line docstring.
curLineNum += 1
while curLineNum < len(self.lines):
line = self.lines[curLineNum]
if line.find(match.group(2)) >= 0:
break
curLineNum += 1
endLineNum = curLineNum + 1
# Isolate our enclosing object's declaration.
defLines = self.lines[startLineNum: docstringStart]
# Isolate our docstring.
self.docLines = self.lines[docstringStart: endLineNum]
# If we have a docstring, extract information from it.
if self.docLines:
# Get rid of the docstring delineators.
self.docLines[0] = AstWalker.__docstrMarkerRE.sub('',
self.docLines[0])
self.docLines[-1] = AstWalker.__docstrMarkerRE.sub('',
self.docLines[-1])
# Handle special strings within the docstring.
docstringConverter = self.__alterDocstring(
tail, self.__writeDocstring())
for lineInfo in enumerate(self.docLines):
docstringConverter.send(lineInfo)
docstringConverter.send((len(self.docLines) - 1, None))
# Add a Doxygen @brief tag to any single-line description.
if self.options.autobrief:
safetyCounter = 0
while len(self.docLines) > 0 and self.docLines[0].lstrip('#').strip() == '':
del self.docLines[0]
self.docLines.append('')
safetyCounter += 1
if safetyCounter >= len(self.docLines):
# Escape the effectively empty docstring.
break
if len(self.docLines) == 1 or (len(self.docLines) >= 2 and (
self.docLines[1].strip(whitespace + '#') == '' or
self.docLines[1].strip(whitespace + '#').startswith('@'))):
self.docLines[0] = "## @brief {0}".format(self.docLines[0].lstrip('#'))
if len(self.docLines) > 1 and self.docLines[1] == '# @par':
self.docLines[1] = '#'
if defLines:
match = AstWalker.__indentRE.match(defLines[0])
indentStr = match and match.group(1) or ''
self.docLines = [AstWalker.__newlineRE.sub(indentStr + '#', docLine)
for docLine in self.docLines]
# Taking away a docstring from an interface method definition sometimes
# leaves broken code as the docstring may be the only code in it.
# Here we manually insert a pass statement to rectify this problem.
if typeName != 'Module':
if docstringStart < len(self.lines):
match = AstWalker.__indentRE.match(self.lines[docstringStart])
indentStr = match and match.group(1) or ''
else:
indentStr = ''
containingNodes = kwargs.get('containingNodes', []) or []
fullPathNamespace = self._getFullPathName(containingNodes)
parentType = fullPathNamespace[-2][1]
if parentType == 'interface' and typeName == 'FunctionDef' \
or fullPathNamespace[-1][1] == 'interface':
defLines[-1] = '{0}{1}{2}pass'.format(defLines[-1],
linesep, indentStr)
elif self.options.autobrief and typeName == 'ClassDef':
# If we're parsing docstrings separate out class attribute
# definitions to get better Doxygen output.
for firstVarLineNum, firstVarLine in enumerate(self.docLines):
if '@property\t' in firstVarLine:
break
lastVarLineNum = len(self.docLines)
if lastVarLineNum > 0 and '@property\t' in firstVarLine:
while lastVarLineNum > firstVarLineNum:
lastVarLineNum -= 1
if '@property\t' in self.docLines[lastVarLineNum]:
break
lastVarLineNum += 1
if firstVarLineNum < len(self.docLines):
indentLineNum = endLineNum
indentStr = ''
while not indentStr and indentLineNum < len(self.lines):
match = AstWalker.__indentRE.match(self.lines[indentLineNum])
indentStr = match and match.group(1) or ''
indentLineNum += 1
varLines = ['{0}{1}'.format(linesep, docLine).replace(
linesep, linesep + indentStr)
for docLine in self.docLines[
firstVarLineNum: lastVarLineNum]]
defLines.extend(varLines)
self.docLines[firstVarLineNum: lastVarLineNum] = []
# After the property shuffling we will need to relocate
# any existing namespace information.
namespaceLoc = defLines[-1].find('\n# @namespace')
if namespaceLoc >= 0:
self.docLines[-1] += defLines[-1][namespaceLoc:]
defLines[-1] = defLines[-1][:namespaceLoc]
# For classes and functions, apply our changes and reverse the
# order of the declaration and docstring, and for modules just
# apply our changes.
if typeName != 'Module':
self.lines[startLineNum: endLineNum] = self.docLines + defLines
else:
self.lines[startLineNum: endLineNum] = defLines + self.docLines
|
Handles a docstring for functions, classes, and modules.
Basically just figures out the bounds of the docstring and sends it
off to the parser to do the actual work.
|
train
|
https://github.com/Feneric/doxypypy/blob/a8555b15fa2a758ea8392372de31c0f635cc0d93/doxypypy/doxypypy.py#L386-L515
| null |
class AstWalker(NodeVisitor):
"""
A walker that'll recursively progress through an AST.
Given an abstract syntax tree for Python code, walk through all the
nodes looking for significant types (for our purposes we only care
about module starts, class definitions, function definitions, variable
assignments, and function calls, as all the information we want to pass
to Doxygen is found within these constructs). If the autobrief option
is set, it further attempts to parse docstrings to create appropriate
Doxygen tags.
"""
# We have a number of regular expressions that we use. They don't
# vary across instances and so are compiled directly in the class
# definition.
__indentRE = regexpCompile(r'^(\s*)\S')
__newlineRE = regexpCompile(r'^#', MULTILINE)
__blanklineRE = regexpCompile(r'^\s*$')
__docstrMarkerRE = regexpCompile(r"\s*([uUbB]*[rR]?(['\"]{3}))")
__docstrOneLineRE = regexpCompile(r"\s*[uUbB]*[rR]?(['\"]{3})(.+)\1")
__implementsRE = regexpCompile(r"^(\s*)(?:zope\.)?(?:interface\.)?"
r"(?:module|class|directly)?"
r"(?:Provides|Implements)\(\s*(.+)\s*\)",
IGNORECASE)
__classRE = regexpCompile(r"^\s*class\s+(\S+)\s*\((\S+)\):")
__interfaceRE = regexpCompile(r"^\s*class\s+(\S+)\s*\(\s*(?:zope\.)?"
r"(?:interface\.)?"
r"Interface\s*\)\s*:", IGNORECASE)
__attributeRE = regexpCompile(r"^(\s*)(\S+)\s*=\s*(?:zope\.)?"
r"(?:interface\.)?"
r"Attribute\s*\(['\"]{1,3}(.*)['\"]{1,3}\)",
IGNORECASE)
__singleLineREs = {
' @author: ': regexpCompile(r"^(\s*Authors?:\s*)(.*)$", IGNORECASE),
' @copyright ': regexpCompile(r"^(\s*Copyright:\s*)(.*)$", IGNORECASE),
' @date ': regexpCompile(r"^(\s*Date:\s*)(.*)$", IGNORECASE),
' @file ': regexpCompile(r"^(\s*File:\s*)(.*)$", IGNORECASE),
' @version: ': regexpCompile(r"^(\s*Version:\s*)(.*)$", IGNORECASE),
' @note ': regexpCompile(r"^(\s*Note:\s*)(.*)$", IGNORECASE),
' @warning ': regexpCompile(r"^(\s*Warning:\s*)(.*)$", IGNORECASE)
}
__argsStartRE = regexpCompile(r"^(\s*(?:(?:Keyword\s+)?"
r"(?:A|Kwa)rg(?:ument)?|Attribute)s?"
r"\s*:\s*)$", IGNORECASE)
__argsRE = regexpCompile(r"^\s*(?P<name>\w+)\s*(?P<type>\(?\S*\)?)?\s*"
r"(?:-|:)+\s+(?P<desc>.+)$")
__returnsStartRE = regexpCompile(r"^\s*(?:Return|Yield)s:\s*$", IGNORECASE)
__raisesStartRE = regexpCompile(r"^\s*(Raises|Exceptions|See Also):\s*$",
IGNORECASE)
__listRE = regexpCompile(r"^\s*(([\w\.]+),\s*)+(&|and)?\s*([\w\.]+)$")
__singleListItemRE = regexpCompile(r'^\s*([\w\.]+)\s*$')
__listItemRE = regexpCompile(r'([\w\.]+),?\s*')
__examplesStartRE = regexpCompile(r"^\s*(?:Example|Doctest)s?:\s*$",
IGNORECASE)
__sectionStartRE = regexpCompile(r"^\s*(([A-Z]\w* ?){1,2}):\s*$")
# The error line should match traceback lines, error exception lines, and
# (due to a weird behavior of codeop) single word lines.
__errorLineRE = regexpCompile(r"^\s*((?:\S+Error|Traceback.*):?\s*(.*)|@?[\w.]+)\s*$",
IGNORECASE)
def __init__(self, lines, options, inFilename):
"""Initialize a few class variables in preparation for our walk."""
self.lines = lines
self.options = options
self.inFilename = inFilename
self.docLines = []
@staticmethod
def _stripOutAnds(inStr):
"""Takes a string and returns the same without ands or ampersands."""
assert isinstance(inStr, str)
return inStr.replace(' and ', ' ').replace(' & ', ' ')
@staticmethod
def _endCodeIfNeeded(line, inCodeBlock):
"""Simple routine to append end code marker if needed."""
assert isinstance(line, str)
if inCodeBlock:
line = '# @endcode{0}{1}'.format(linesep, line.rstrip())
inCodeBlock = False
return line, inCodeBlock
@coroutine
def _checkIfCode(self, inCodeBlockObj):
"""Checks whether or not a given line appears to be Python code."""
while True:
line, lines, lineNum = (yield)
testLineNum = 1
currentLineNum = 0
testLine = line.strip()
lineOfCode = None
while lineOfCode is None:
match = AstWalker.__errorLineRE.match(testLine)
if not testLine or testLine == '...' or match:
# These are ambiguous.
line, lines, lineNum = (yield)
testLine = line.strip()
#testLineNum = 1
elif testLine.startswith('>>>'):
# This is definitely code.
lineOfCode = True
else:
try:
compLine = compile_command(testLine)
if compLine and lines[currentLineNum].strip().startswith('#'):
lineOfCode = True
else:
line, lines, lineNum = (yield)
line = line.strip()
if line.startswith('>>>'):
# Definitely code, don't compile further.
lineOfCode = True
else:
testLine += linesep + line
testLine = testLine.strip()
testLineNum += 1
except (SyntaxError, RuntimeError):
# This is definitely not code.
lineOfCode = False
except Exception:
# Other errors are ambiguous.
line, lines, lineNum = (yield)
testLine = line.strip()
#testLineNum = 1
currentLineNum = lineNum - testLineNum
if not inCodeBlockObj[0] and lineOfCode:
inCodeBlockObj[0] = True
lines[currentLineNum] = '{0}{1}# @code{1}'.format(
lines[currentLineNum],
linesep
)
elif inCodeBlockObj[0] and lineOfCode is False:
# None is ambiguous, so strict checking
# against False is necessary.
inCodeBlockObj[0] = False
lines[currentLineNum] = '{0}{1}# @endcode{1}'.format(
lines[currentLineNum],
linesep
)
@coroutine
def __alterDocstring(self, tail='', writer=None):
"""
Runs eternally, processing docstring lines.
Parses docstring lines as they get fed in via send, applies appropriate
Doxygen tags, and passes them along in batches for writing.
"""
assert isinstance(tail, str) and isinstance(writer, GeneratorType)
lines = []
timeToSend = False
inCodeBlock = False
inCodeBlockObj = [False]
inSection = False
prefix = ''
firstLineNum = -1
sectionHeadingIndent = 0
codeChecker = self._checkIfCode(inCodeBlockObj)
while True:
lineNum, line = (yield)
if firstLineNum < 0:
firstLineNum = lineNum
# Don't bother doing extra work if it's a sentinel.
if line is not None:
# Also limit work if we're not parsing the docstring.
if self.options.autobrief:
for doxyTag, tagRE in AstWalker.__singleLineREs.items():
match = tagRE.search(line)
if match:
# We've got a simple one-line Doxygen command
lines[-1], inCodeBlock = self._endCodeIfNeeded(
lines[-1], inCodeBlock)
inCodeBlockObj[0] = inCodeBlock
writer.send((firstLineNum, lineNum - 1, lines))
lines = []
firstLineNum = lineNum
line = line.replace(match.group(1), doxyTag)
timeToSend = True
if inSection:
# The last line belonged to a section.
# Does this one too? (Ignoring empty lines.)
match = AstWalker.__blanklineRE.match(line)
if not match:
indent = len(line.expandtabs(self.options.tablength)) - \
len(line.expandtabs(self.options.tablength).lstrip())
if indent <= sectionHeadingIndent:
inSection = False
else:
if lines[-1] == '#':
# If the last line was empty, but we're still in a section
# then we need to start a new paragraph.
lines[-1] = '# @par'
match = AstWalker.__returnsStartRE.match(line)
if match:
# We've got a "returns" section
lines[-1], inCodeBlock = self._endCodeIfNeeded(
lines[-1], inCodeBlock)
inCodeBlockObj[0] = inCodeBlock
line = line.replace(match.group(0), ' @return\t').rstrip()
prefix = '@return\t'
else:
match = AstWalker.__argsStartRE.match(line)
if match:
# We've got an "arguments" section
line = line.replace(match.group(0), '').rstrip()
if 'attr' in match.group(0).lower():
prefix = '@property\t'
else:
prefix = '@param\t'
lines[-1], inCodeBlock = self._endCodeIfNeeded(
lines[-1], inCodeBlock)
inCodeBlockObj[0] = inCodeBlock
lines.append('#' + line)
continue
else:
match = AstWalker.__argsRE.match(line)
if match and not inCodeBlock:
# We've got something that looks like an item /
# description pair.
if 'property' in prefix:
line = '# {0}\t{1[name]}{2}# {1[desc]}'.format(
prefix, match.groupdict(), linesep)
else:
line = ' {0}\t{1[name]}\t{1[desc]}'.format(
prefix, match.groupdict())
else:
match = AstWalker.__raisesStartRE.match(line)
if match:
line = line.replace(match.group(0), '').rstrip()
if 'see' in match.group(1).lower():
# We've got a "see also" section
prefix = '@sa\t'
else:
# We've got an "exceptions" section
prefix = '@exception\t'
lines[-1], inCodeBlock = self._endCodeIfNeeded(
lines[-1], inCodeBlock)
inCodeBlockObj[0] = inCodeBlock
lines.append('#' + line)
continue
else:
match = AstWalker.__listRE.match(line)
if match and not inCodeBlock:
# We've got a list of something or another
itemList = []
for itemMatch in AstWalker.__listItemRE.findall(self._stripOutAnds(
match.group(0))):
itemList.append('# {0}\t{1}{2}'.format(
prefix, itemMatch, linesep))
line = ''.join(itemList)[1:]
else:
match = AstWalker.__examplesStartRE.match(line)
if match and lines[-1].strip() == '#' \
and self.options.autocode:
# We've got an "example" section
inCodeBlock = True
inCodeBlockObj[0] = True
line = line.replace(match.group(0),
' @b Examples{0}# @code'.format(linesep))
else:
match = AstWalker.__sectionStartRE.match(line)
if match:
# We've got an arbitrary section
prefix = ''
inSection = True
# What's the indentation of the section heading?
sectionHeadingIndent = len(line.expandtabs(self.options.tablength)) \
- len(line.expandtabs(self.options.tablength).lstrip())
line = line.replace(
match.group(0),
' @par {0}'.format(match.group(1))
)
if lines[-1] == '# @par':
lines[-1] = '#'
lines[-1], inCodeBlock = self._endCodeIfNeeded(
lines[-1], inCodeBlock)
inCodeBlockObj[0] = inCodeBlock
lines.append('#' + line)
continue
elif prefix:
match = AstWalker.__singleListItemRE.match(line)
if match and not inCodeBlock:
# Probably a single list item
line = ' {0}\t{1}'.format(
prefix, match.group(0))
elif self.options.autocode:
codeChecker.send(
(
line, lines,
lineNum - firstLineNum
)
)
inCodeBlock = inCodeBlockObj[0]
else:
if self.options.autocode:
codeChecker.send(
(
line, lines,
lineNum - firstLineNum
)
)
inCodeBlock = inCodeBlockObj[0]
# If we were passed a tail, append it to the docstring.
# Note that this means that we need a docstring for this
# item to get documented.
if tail and lineNum == len(self.docLines) - 1:
line = '{0}{1}# {2}'.format(line.rstrip(), linesep, tail)
# Add comment marker for every line.
line = '#{0}'.format(line.rstrip())
# Ensure the first line has the Doxygen double comment.
if lineNum == 0:
line = '#' + line
lines.append(line.replace(' ' + linesep, linesep))
else:
# If we get our sentinel value, send out what we've got.
timeToSend = True
if timeToSend:
lines[-1], inCodeBlock = self._endCodeIfNeeded(lines[-1],
inCodeBlock)
inCodeBlockObj[0] = inCodeBlock
writer.send((firstLineNum, lineNum, lines))
lines = []
firstLineNum = -1
timeToSend = False
@coroutine
def __writeDocstring(self):
"""
Runs eternally, dumping out docstring line batches as they get fed in.
Replaces original batches of docstring lines with modified versions
fed in via send.
"""
while True:
firstLineNum, lastLineNum, lines = (yield)
newDocstringLen = lastLineNum - firstLineNum + 1
while len(lines) < newDocstringLen:
lines.append('')
# Substitute the new block of lines for the original block of lines.
self.docLines[firstLineNum: lastLineNum + 1] = lines
@staticmethod
def _checkMemberName(name):
"""
See if a member name indicates that it should be private.
Private variables in Python (starting with a double underscore but
not ending in a double underscore) and bed lumps (variables that
are not really private but are by common convention treated as
protected because they begin with a single underscore) get Doxygen
tags labeling them appropriately.
"""
assert isinstance(name, str)
restrictionLevel = None
if not name.endswith('__'):
if name.startswith('__'):
restrictionLevel = 'private'
elif name.startswith('_'):
restrictionLevel = 'protected'
return restrictionLevel
def _processMembers(self, node, contextTag):
"""
Mark up members if they should be private.
If the name indicates it should be private or protected, apply
the appropriate Doxygen tags.
"""
restrictionLevel = self._checkMemberName(node.name)
if restrictionLevel:
workTag = '{0}{1}# @{2}'.format(contextTag,
linesep,
restrictionLevel)
else:
workTag = contextTag
return workTag
def generic_visit(self, node, **kwargs):
"""
Extract useful information from relevant nodes including docstrings.
This is virtually identical to the standard version contained in
NodeVisitor. It is only overridden because we're tracking extra
information (the hierarchy of containing nodes) not preserved in
the original.
"""
for field, value in iter_fields(node):
if isinstance(value, list):
for item in value:
if isinstance(item, AST):
self.visit(item, containingNodes=kwargs['containingNodes'])
elif isinstance(value, AST):
self.visit(value, containingNodes=kwargs['containingNodes'])
def visit(self, node, **kwargs):
"""
Visit a node and extract useful information from it.
This is virtually identical to the standard version contained in
NodeVisitor. It is only overridden because we're tracking extra
information (the hierarchy of containing nodes) not preserved in
the original.
"""
containingNodes = kwargs.get('containingNodes', [])
method = 'visit_' + node.__class__.__name__
visitor = getattr(self, method, self.generic_visit)
return visitor(node, containingNodes=containingNodes)
def _getFullPathName(self, containingNodes):
"""
Returns the full node hierarchy rooted at module name.
The list representing the full path through containing nodes
(starting with the module itself) is returned.
"""
assert isinstance(containingNodes, list)
return [(self.options.fullPathNamespace, 'module')] + containingNodes
def visit_Module(self, node, **kwargs):
"""
Handles the module-level docstring.
Process the module-level docstring and create appropriate Doxygen tags
if autobrief option is set.
"""
containingNodes=kwargs.get('containingNodes', [])
if self.options.debug:
stderr.write("# Module {0}{1}".format(self.options.fullPathNamespace,
linesep))
if get_docstring(node):
if self.options.topLevelNamespace:
fullPathNamespace = self._getFullPathName(containingNodes)
contextTag = '.'.join(pathTuple[0] for pathTuple in fullPathNamespace)
tail = '@namespace {0}'.format(contextTag)
else:
tail = ''
self._processDocstring(node, tail)
# Visit any contained nodes (in this case pretty much everything).
self.generic_visit(node, containingNodes=containingNodes)
def visit_Assign(self, node, **kwargs):
"""
Handles assignments within code.
Variable assignments in Python are used to represent interface
attributes in addition to basic variables. If an assignment appears
to be an attribute, it gets labeled as such for Doxygen. If a variable
name uses Python mangling or is just a bed lump, it is labeled as
private for Doxygen.
"""
lineNum = node.lineno - 1
# Assignments have one Doxygen-significant special case:
# interface attributes.
match = AstWalker.__attributeRE.match(self.lines[lineNum])
if match:
self.lines[lineNum] = '{0}## @property {1}{2}{0}# {3}{2}' \
'{0}# @hideinitializer{2}{4}{2}'.format(
match.group(1),
match.group(2),
linesep,
match.group(3),
self.lines[lineNum].rstrip()
)
if self.options.debug:
stderr.write("# Attribute {0.id}{1}".format(node.targets[0],
linesep))
if isinstance(node.targets[0], Name):
match = AstWalker.__indentRE.match(self.lines[lineNum])
indentStr = match and match.group(1) or ''
restrictionLevel = self._checkMemberName(node.targets[0].id)
if restrictionLevel:
self.lines[lineNum] = '{0}## @var {1}{2}{0}' \
'# @hideinitializer{2}{0}# @{3}{2}{4}{2}'.format(
indentStr,
node.targets[0].id,
linesep,
restrictionLevel,
self.lines[lineNum].rstrip()
)
# Visit any contained nodes.
self.generic_visit(node, containingNodes=kwargs['containingNodes'])
def visit_Call(self, node, **kwargs):
"""
Handles function calls within code.
Function calls in Python are used to represent interface implementations
in addition to their normal use. If a call appears to mark an
implementation, it gets labeled as such for Doxygen.
"""
lineNum = node.lineno - 1
# Function calls have one Doxygen-significant special case: interface
# implementations.
match = AstWalker.__implementsRE.match(self.lines[lineNum])
if match:
self.lines[lineNum] = '{0}## @implements {1}{2}{0}{3}{2}'.format(
match.group(1), match.group(2), linesep,
self.lines[lineNum].rstrip())
if self.options.debug:
stderr.write("# Implements {0}{1}".format(match.group(1),
linesep))
# Visit any contained nodes.
self.generic_visit(node, containingNodes=kwargs['containingNodes'])
def visit_FunctionDef(self, node, **kwargs):
"""
Handles function definitions within code.
Process a function's docstring, keeping well aware of the function's
context and whether or not it's part of an interface definition.
"""
if self.options.debug:
stderr.write("# Function {0.name}{1}".format(node, linesep))
# Push either 'interface' or 'class' onto our containing nodes
# hierarchy so we can keep track of context. This will let us tell
# if a function is nested within another function or even if a class
# is nested within a function.
containingNodes = kwargs.get('containingNodes') or []
containingNodes.append((node.name, 'function'))
if self.options.topLevelNamespace:
fullPathNamespace = self._getFullPathName(containingNodes)
contextTag = '.'.join(pathTuple[0] for pathTuple in fullPathNamespace)
modifiedContextTag = self._processMembers(node, contextTag)
tail = '@namespace {0}'.format(modifiedContextTag)
else:
tail = self._processMembers(node, '')
if get_docstring(node):
self._processDocstring(node, tail,
containingNodes=containingNodes)
# Visit any contained nodes.
self.generic_visit(node, containingNodes=containingNodes)
# Remove the item we pushed onto the containing nodes hierarchy.
containingNodes.pop()
def visit_ClassDef(self, node, **kwargs):
"""
Handles class definitions within code.
Process the docstring. Note though that in Python Class definitions
are used to define interfaces in addition to classes.
If a class definition appears to be an interface definition tag it as an
interface definition for Doxygen. Otherwise tag it as a class
definition for Doxygen.
"""
lineNum = node.lineno - 1
# Push either 'interface' or 'class' onto our containing nodes
# hierarchy so we can keep track of context. This will let us tell
# if a function is a method or an interface method definition or if
# a class is fully contained within another class.
containingNodes = kwargs.get('containingNodes') or []
if not self.options.object_respect:
# Remove object class of the inherited class list to avoid that all
# new-style class inherits from object in the hierarchy class
line = self.lines[lineNum]
match = AstWalker.__classRE.match(line)
if match:
if match.group(2) == 'object':
self.lines[lineNum] = line[:match.start(2)] + line[match.end(2):]
match = AstWalker.__interfaceRE.match(self.lines[lineNum])
if match:
if self.options.debug:
stderr.write("# Interface {0.name}{1}".format(node, linesep))
containingNodes.append((node.name, 'interface'))
else:
if self.options.debug:
stderr.write("# Class {0.name}{1}".format(node, linesep))
containingNodes.append((node.name, 'class'))
if self.options.topLevelNamespace:
fullPathNamespace = self._getFullPathName(containingNodes)
contextTag = '.'.join(pathTuple[0] for pathTuple in fullPathNamespace)
tail = '@namespace {0}'.format(contextTag)
else:
tail = ''
# Class definitions have one Doxygen-significant special case:
# interface definitions.
if match:
contextTag = '{0}{1}# @interface {2}'.format(tail,
linesep,
match.group(1))
else:
contextTag = tail
contextTag = self._processMembers(node, contextTag)
if get_docstring(node):
self._processDocstring(node, contextTag,
containingNodes=containingNodes)
# Visit any contained nodes.
self.generic_visit(node, containingNodes=containingNodes)
# Remove the item we pushed onto the containing nodes hierarchy.
containingNodes.pop()
def parseLines(self):
"""Form an AST for the code and produce a new version of the source."""
inAst = parse(''.join(self.lines), self.inFilename)
# Visit all the nodes in our tree and apply Doxygen tags to the source.
self.visit(inAst)
def getLines(self):
"""Return the modified file once processing has been completed."""
return linesep.join(line.rstrip() for line in self.lines)
|
Feneric/doxypypy
|
doxypypy/doxypypy.py
|
AstWalker._checkMemberName
|
python
|
def _checkMemberName(name):
assert isinstance(name, str)
restrictionLevel = None
if not name.endswith('__'):
if name.startswith('__'):
restrictionLevel = 'private'
elif name.startswith('_'):
restrictionLevel = 'protected'
return restrictionLevel
|
See if a member name indicates that it should be private.
Private variables in Python (starting with a double underscore but
not ending in a double underscore) and bed lumps (variables that
are not really private but are by common convention treated as
protected because they begin with a single underscore) get Doxygen
tags labeling them appropriately.
|
train
|
https://github.com/Feneric/doxypypy/blob/a8555b15fa2a758ea8392372de31c0f635cc0d93/doxypypy/doxypypy.py#L518-L535
| null |
class AstWalker(NodeVisitor):
"""
A walker that'll recursively progress through an AST.
Given an abstract syntax tree for Python code, walk through all the
nodes looking for significant types (for our purposes we only care
about module starts, class definitions, function definitions, variable
assignments, and function calls, as all the information we want to pass
to Doxygen is found within these constructs). If the autobrief option
is set, it further attempts to parse docstrings to create appropriate
Doxygen tags.
"""
# We have a number of regular expressions that we use. They don't
# vary across instances and so are compiled directly in the class
# definition.
__indentRE = regexpCompile(r'^(\s*)\S')
__newlineRE = regexpCompile(r'^#', MULTILINE)
__blanklineRE = regexpCompile(r'^\s*$')
__docstrMarkerRE = regexpCompile(r"\s*([uUbB]*[rR]?(['\"]{3}))")
__docstrOneLineRE = regexpCompile(r"\s*[uUbB]*[rR]?(['\"]{3})(.+)\1")
__implementsRE = regexpCompile(r"^(\s*)(?:zope\.)?(?:interface\.)?"
r"(?:module|class|directly)?"
r"(?:Provides|Implements)\(\s*(.+)\s*\)",
IGNORECASE)
__classRE = regexpCompile(r"^\s*class\s+(\S+)\s*\((\S+)\):")
__interfaceRE = regexpCompile(r"^\s*class\s+(\S+)\s*\(\s*(?:zope\.)?"
r"(?:interface\.)?"
r"Interface\s*\)\s*:", IGNORECASE)
__attributeRE = regexpCompile(r"^(\s*)(\S+)\s*=\s*(?:zope\.)?"
r"(?:interface\.)?"
r"Attribute\s*\(['\"]{1,3}(.*)['\"]{1,3}\)",
IGNORECASE)
__singleLineREs = {
' @author: ': regexpCompile(r"^(\s*Authors?:\s*)(.*)$", IGNORECASE),
' @copyright ': regexpCompile(r"^(\s*Copyright:\s*)(.*)$", IGNORECASE),
' @date ': regexpCompile(r"^(\s*Date:\s*)(.*)$", IGNORECASE),
' @file ': regexpCompile(r"^(\s*File:\s*)(.*)$", IGNORECASE),
' @version: ': regexpCompile(r"^(\s*Version:\s*)(.*)$", IGNORECASE),
' @note ': regexpCompile(r"^(\s*Note:\s*)(.*)$", IGNORECASE),
' @warning ': regexpCompile(r"^(\s*Warning:\s*)(.*)$", IGNORECASE)
}
__argsStartRE = regexpCompile(r"^(\s*(?:(?:Keyword\s+)?"
r"(?:A|Kwa)rg(?:ument)?|Attribute)s?"
r"\s*:\s*)$", IGNORECASE)
__argsRE = regexpCompile(r"^\s*(?P<name>\w+)\s*(?P<type>\(?\S*\)?)?\s*"
r"(?:-|:)+\s+(?P<desc>.+)$")
__returnsStartRE = regexpCompile(r"^\s*(?:Return|Yield)s:\s*$", IGNORECASE)
__raisesStartRE = regexpCompile(r"^\s*(Raises|Exceptions|See Also):\s*$",
IGNORECASE)
__listRE = regexpCompile(r"^\s*(([\w\.]+),\s*)+(&|and)?\s*([\w\.]+)$")
__singleListItemRE = regexpCompile(r'^\s*([\w\.]+)\s*$')
__listItemRE = regexpCompile(r'([\w\.]+),?\s*')
__examplesStartRE = regexpCompile(r"^\s*(?:Example|Doctest)s?:\s*$",
IGNORECASE)
__sectionStartRE = regexpCompile(r"^\s*(([A-Z]\w* ?){1,2}):\s*$")
# The error line should match traceback lines, error exception lines, and
# (due to a weird behavior of codeop) single word lines.
__errorLineRE = regexpCompile(r"^\s*((?:\S+Error|Traceback.*):?\s*(.*)|@?[\w.]+)\s*$",
IGNORECASE)
def __init__(self, lines, options, inFilename):
"""Initialize a few class variables in preparation for our walk."""
self.lines = lines
self.options = options
self.inFilename = inFilename
self.docLines = []
@staticmethod
def _stripOutAnds(inStr):
"""Takes a string and returns the same without ands or ampersands."""
assert isinstance(inStr, str)
return inStr.replace(' and ', ' ').replace(' & ', ' ')
@staticmethod
def _endCodeIfNeeded(line, inCodeBlock):
"""Simple routine to append end code marker if needed."""
assert isinstance(line, str)
if inCodeBlock:
line = '# @endcode{0}{1}'.format(linesep, line.rstrip())
inCodeBlock = False
return line, inCodeBlock
@coroutine
def _checkIfCode(self, inCodeBlockObj):
"""Checks whether or not a given line appears to be Python code."""
while True:
line, lines, lineNum = (yield)
testLineNum = 1
currentLineNum = 0
testLine = line.strip()
lineOfCode = None
while lineOfCode is None:
match = AstWalker.__errorLineRE.match(testLine)
if not testLine or testLine == '...' or match:
# These are ambiguous.
line, lines, lineNum = (yield)
testLine = line.strip()
#testLineNum = 1
elif testLine.startswith('>>>'):
# This is definitely code.
lineOfCode = True
else:
try:
compLine = compile_command(testLine)
if compLine and lines[currentLineNum].strip().startswith('#'):
lineOfCode = True
else:
line, lines, lineNum = (yield)
line = line.strip()
if line.startswith('>>>'):
# Definitely code, don't compile further.
lineOfCode = True
else:
testLine += linesep + line
testLine = testLine.strip()
testLineNum += 1
except (SyntaxError, RuntimeError):
# This is definitely not code.
lineOfCode = False
except Exception:
# Other errors are ambiguous.
line, lines, lineNum = (yield)
testLine = line.strip()
#testLineNum = 1
currentLineNum = lineNum - testLineNum
if not inCodeBlockObj[0] and lineOfCode:
inCodeBlockObj[0] = True
lines[currentLineNum] = '{0}{1}# @code{1}'.format(
lines[currentLineNum],
linesep
)
elif inCodeBlockObj[0] and lineOfCode is False:
# None is ambiguous, so strict checking
# against False is necessary.
inCodeBlockObj[0] = False
lines[currentLineNum] = '{0}{1}# @endcode{1}'.format(
lines[currentLineNum],
linesep
)
@coroutine
def __alterDocstring(self, tail='', writer=None):
"""
Runs eternally, processing docstring lines.
Parses docstring lines as they get fed in via send, applies appropriate
Doxygen tags, and passes them along in batches for writing.
"""
assert isinstance(tail, str) and isinstance(writer, GeneratorType)
lines = []
timeToSend = False
inCodeBlock = False
inCodeBlockObj = [False]
inSection = False
prefix = ''
firstLineNum = -1
sectionHeadingIndent = 0
codeChecker = self._checkIfCode(inCodeBlockObj)
while True:
lineNum, line = (yield)
if firstLineNum < 0:
firstLineNum = lineNum
# Don't bother doing extra work if it's a sentinel.
if line is not None:
# Also limit work if we're not parsing the docstring.
if self.options.autobrief:
for doxyTag, tagRE in AstWalker.__singleLineREs.items():
match = tagRE.search(line)
if match:
# We've got a simple one-line Doxygen command
lines[-1], inCodeBlock = self._endCodeIfNeeded(
lines[-1], inCodeBlock)
inCodeBlockObj[0] = inCodeBlock
writer.send((firstLineNum, lineNum - 1, lines))
lines = []
firstLineNum = lineNum
line = line.replace(match.group(1), doxyTag)
timeToSend = True
if inSection:
# The last line belonged to a section.
# Does this one too? (Ignoring empty lines.)
match = AstWalker.__blanklineRE.match(line)
if not match:
indent = len(line.expandtabs(self.options.tablength)) - \
len(line.expandtabs(self.options.tablength).lstrip())
if indent <= sectionHeadingIndent:
inSection = False
else:
if lines[-1] == '#':
# If the last line was empty, but we're still in a section
# then we need to start a new paragraph.
lines[-1] = '# @par'
match = AstWalker.__returnsStartRE.match(line)
if match:
# We've got a "returns" section
lines[-1], inCodeBlock = self._endCodeIfNeeded(
lines[-1], inCodeBlock)
inCodeBlockObj[0] = inCodeBlock
line = line.replace(match.group(0), ' @return\t').rstrip()
prefix = '@return\t'
else:
match = AstWalker.__argsStartRE.match(line)
if match:
# We've got an "arguments" section
line = line.replace(match.group(0), '').rstrip()
if 'attr' in match.group(0).lower():
prefix = '@property\t'
else:
prefix = '@param\t'
lines[-1], inCodeBlock = self._endCodeIfNeeded(
lines[-1], inCodeBlock)
inCodeBlockObj[0] = inCodeBlock
lines.append('#' + line)
continue
else:
match = AstWalker.__argsRE.match(line)
if match and not inCodeBlock:
# We've got something that looks like an item /
# description pair.
if 'property' in prefix:
line = '# {0}\t{1[name]}{2}# {1[desc]}'.format(
prefix, match.groupdict(), linesep)
else:
line = ' {0}\t{1[name]}\t{1[desc]}'.format(
prefix, match.groupdict())
else:
match = AstWalker.__raisesStartRE.match(line)
if match:
line = line.replace(match.group(0), '').rstrip()
if 'see' in match.group(1).lower():
# We've got a "see also" section
prefix = '@sa\t'
else:
# We've got an "exceptions" section
prefix = '@exception\t'
lines[-1], inCodeBlock = self._endCodeIfNeeded(
lines[-1], inCodeBlock)
inCodeBlockObj[0] = inCodeBlock
lines.append('#' + line)
continue
else:
match = AstWalker.__listRE.match(line)
if match and not inCodeBlock:
# We've got a list of something or another
itemList = []
for itemMatch in AstWalker.__listItemRE.findall(self._stripOutAnds(
match.group(0))):
itemList.append('# {0}\t{1}{2}'.format(
prefix, itemMatch, linesep))
line = ''.join(itemList)[1:]
else:
match = AstWalker.__examplesStartRE.match(line)
if match and lines[-1].strip() == '#' \
and self.options.autocode:
# We've got an "example" section
inCodeBlock = True
inCodeBlockObj[0] = True
line = line.replace(match.group(0),
' @b Examples{0}# @code'.format(linesep))
else:
match = AstWalker.__sectionStartRE.match(line)
if match:
# We've got an arbitrary section
prefix = ''
inSection = True
# What's the indentation of the section heading?
sectionHeadingIndent = len(line.expandtabs(self.options.tablength)) \
- len(line.expandtabs(self.options.tablength).lstrip())
line = line.replace(
match.group(0),
' @par {0}'.format(match.group(1))
)
if lines[-1] == '# @par':
lines[-1] = '#'
lines[-1], inCodeBlock = self._endCodeIfNeeded(
lines[-1], inCodeBlock)
inCodeBlockObj[0] = inCodeBlock
lines.append('#' + line)
continue
elif prefix:
match = AstWalker.__singleListItemRE.match(line)
if match and not inCodeBlock:
# Probably a single list item
line = ' {0}\t{1}'.format(
prefix, match.group(0))
elif self.options.autocode:
codeChecker.send(
(
line, lines,
lineNum - firstLineNum
)
)
inCodeBlock = inCodeBlockObj[0]
else:
if self.options.autocode:
codeChecker.send(
(
line, lines,
lineNum - firstLineNum
)
)
inCodeBlock = inCodeBlockObj[0]
# If we were passed a tail, append it to the docstring.
# Note that this means that we need a docstring for this
# item to get documented.
if tail and lineNum == len(self.docLines) - 1:
line = '{0}{1}# {2}'.format(line.rstrip(), linesep, tail)
# Add comment marker for every line.
line = '#{0}'.format(line.rstrip())
# Ensure the first line has the Doxygen double comment.
if lineNum == 0:
line = '#' + line
lines.append(line.replace(' ' + linesep, linesep))
else:
# If we get our sentinel value, send out what we've got.
timeToSend = True
if timeToSend:
lines[-1], inCodeBlock = self._endCodeIfNeeded(lines[-1],
inCodeBlock)
inCodeBlockObj[0] = inCodeBlock
writer.send((firstLineNum, lineNum, lines))
lines = []
firstLineNum = -1
timeToSend = False
@coroutine
def __writeDocstring(self):
"""
Runs eternally, dumping out docstring line batches as they get fed in.
Replaces original batches of docstring lines with modified versions
fed in via send.
"""
while True:
firstLineNum, lastLineNum, lines = (yield)
newDocstringLen = lastLineNum - firstLineNum + 1
while len(lines) < newDocstringLen:
lines.append('')
# Substitute the new block of lines for the original block of lines.
self.docLines[firstLineNum: lastLineNum + 1] = lines
def _processDocstring(self, node, tail='', **kwargs):
"""
Handles a docstring for functions, classes, and modules.
Basically just figures out the bounds of the docstring and sends it
off to the parser to do the actual work.
"""
typeName = type(node).__name__
# Modules don't have lineno defined, but it's always 0 for them.
curLineNum = startLineNum = 0
if typeName != 'Module':
startLineNum = curLineNum = node.lineno - 1
# Figure out where both our enclosing object and our docstring start.
line = ''
while curLineNum < len(self.lines):
line = self.lines[curLineNum]
match = AstWalker.__docstrMarkerRE.match(line)
if match:
break
curLineNum += 1
docstringStart = curLineNum
# Figure out where our docstring ends.
if not AstWalker.__docstrOneLineRE.match(line):
# Skip for the special case of a single-line docstring.
curLineNum += 1
while curLineNum < len(self.lines):
line = self.lines[curLineNum]
if line.find(match.group(2)) >= 0:
break
curLineNum += 1
endLineNum = curLineNum + 1
# Isolate our enclosing object's declaration.
defLines = self.lines[startLineNum: docstringStart]
# Isolate our docstring.
self.docLines = self.lines[docstringStart: endLineNum]
# If we have a docstring, extract information from it.
if self.docLines:
# Get rid of the docstring delineators.
self.docLines[0] = AstWalker.__docstrMarkerRE.sub('',
self.docLines[0])
self.docLines[-1] = AstWalker.__docstrMarkerRE.sub('',
self.docLines[-1])
# Handle special strings within the docstring.
docstringConverter = self.__alterDocstring(
tail, self.__writeDocstring())
for lineInfo in enumerate(self.docLines):
docstringConverter.send(lineInfo)
docstringConverter.send((len(self.docLines) - 1, None))
# Add a Doxygen @brief tag to any single-line description.
if self.options.autobrief:
safetyCounter = 0
while len(self.docLines) > 0 and self.docLines[0].lstrip('#').strip() == '':
del self.docLines[0]
self.docLines.append('')
safetyCounter += 1
if safetyCounter >= len(self.docLines):
# Escape the effectively empty docstring.
break
if len(self.docLines) == 1 or (len(self.docLines) >= 2 and (
self.docLines[1].strip(whitespace + '#') == '' or
self.docLines[1].strip(whitespace + '#').startswith('@'))):
self.docLines[0] = "## @brief {0}".format(self.docLines[0].lstrip('#'))
if len(self.docLines) > 1 and self.docLines[1] == '# @par':
self.docLines[1] = '#'
if defLines:
match = AstWalker.__indentRE.match(defLines[0])
indentStr = match and match.group(1) or ''
self.docLines = [AstWalker.__newlineRE.sub(indentStr + '#', docLine)
for docLine in self.docLines]
# Taking away a docstring from an interface method definition sometimes
# leaves broken code as the docstring may be the only code in it.
# Here we manually insert a pass statement to rectify this problem.
if typeName != 'Module':
if docstringStart < len(self.lines):
match = AstWalker.__indentRE.match(self.lines[docstringStart])
indentStr = match and match.group(1) or ''
else:
indentStr = ''
containingNodes = kwargs.get('containingNodes', []) or []
fullPathNamespace = self._getFullPathName(containingNodes)
parentType = fullPathNamespace[-2][1]
if parentType == 'interface' and typeName == 'FunctionDef' \
or fullPathNamespace[-1][1] == 'interface':
defLines[-1] = '{0}{1}{2}pass'.format(defLines[-1],
linesep, indentStr)
elif self.options.autobrief and typeName == 'ClassDef':
# If we're parsing docstrings separate out class attribute
# definitions to get better Doxygen output.
for firstVarLineNum, firstVarLine in enumerate(self.docLines):
if '@property\t' in firstVarLine:
break
lastVarLineNum = len(self.docLines)
if lastVarLineNum > 0 and '@property\t' in firstVarLine:
while lastVarLineNum > firstVarLineNum:
lastVarLineNum -= 1
if '@property\t' in self.docLines[lastVarLineNum]:
break
lastVarLineNum += 1
if firstVarLineNum < len(self.docLines):
indentLineNum = endLineNum
indentStr = ''
while not indentStr and indentLineNum < len(self.lines):
match = AstWalker.__indentRE.match(self.lines[indentLineNum])
indentStr = match and match.group(1) or ''
indentLineNum += 1
varLines = ['{0}{1}'.format(linesep, docLine).replace(
linesep, linesep + indentStr)
for docLine in self.docLines[
firstVarLineNum: lastVarLineNum]]
defLines.extend(varLines)
self.docLines[firstVarLineNum: lastVarLineNum] = []
# After the property shuffling we will need to relocate
# any existing namespace information.
namespaceLoc = defLines[-1].find('\n# @namespace')
if namespaceLoc >= 0:
self.docLines[-1] += defLines[-1][namespaceLoc:]
defLines[-1] = defLines[-1][:namespaceLoc]
# For classes and functions, apply our changes and reverse the
# order of the declaration and docstring, and for modules just
# apply our changes.
if typeName != 'Module':
self.lines[startLineNum: endLineNum] = self.docLines + defLines
else:
self.lines[startLineNum: endLineNum] = defLines + self.docLines
@staticmethod
def _processMembers(self, node, contextTag):
"""
Mark up members if they should be private.
If the name indicates it should be private or protected, apply
the appropriate Doxygen tags.
"""
restrictionLevel = self._checkMemberName(node.name)
if restrictionLevel:
workTag = '{0}{1}# @{2}'.format(contextTag,
linesep,
restrictionLevel)
else:
workTag = contextTag
return workTag
def generic_visit(self, node, **kwargs):
"""
Extract useful information from relevant nodes including docstrings.
This is virtually identical to the standard version contained in
NodeVisitor. It is only overridden because we're tracking extra
information (the hierarchy of containing nodes) not preserved in
the original.
"""
for field, value in iter_fields(node):
if isinstance(value, list):
for item in value:
if isinstance(item, AST):
self.visit(item, containingNodes=kwargs['containingNodes'])
elif isinstance(value, AST):
self.visit(value, containingNodes=kwargs['containingNodes'])
def visit(self, node, **kwargs):
"""
Visit a node and extract useful information from it.
This is virtually identical to the standard version contained in
NodeVisitor. It is only overridden because we're tracking extra
information (the hierarchy of containing nodes) not preserved in
the original.
"""
containingNodes = kwargs.get('containingNodes', [])
method = 'visit_' + node.__class__.__name__
visitor = getattr(self, method, self.generic_visit)
return visitor(node, containingNodes=containingNodes)
def _getFullPathName(self, containingNodes):
"""
Returns the full node hierarchy rooted at module name.
The list representing the full path through containing nodes
(starting with the module itself) is returned.
"""
assert isinstance(containingNodes, list)
return [(self.options.fullPathNamespace, 'module')] + containingNodes
def visit_Module(self, node, **kwargs):
"""
Handles the module-level docstring.
Process the module-level docstring and create appropriate Doxygen tags
if autobrief option is set.
"""
containingNodes=kwargs.get('containingNodes', [])
if self.options.debug:
stderr.write("# Module {0}{1}".format(self.options.fullPathNamespace,
linesep))
if get_docstring(node):
if self.options.topLevelNamespace:
fullPathNamespace = self._getFullPathName(containingNodes)
contextTag = '.'.join(pathTuple[0] for pathTuple in fullPathNamespace)
tail = '@namespace {0}'.format(contextTag)
else:
tail = ''
self._processDocstring(node, tail)
# Visit any contained nodes (in this case pretty much everything).
self.generic_visit(node, containingNodes=containingNodes)
def visit_Assign(self, node, **kwargs):
"""
Handles assignments within code.
Variable assignments in Python are used to represent interface
attributes in addition to basic variables. If an assignment appears
to be an attribute, it gets labeled as such for Doxygen. If a variable
name uses Python mangling or is just a bed lump, it is labeled as
private for Doxygen.
"""
lineNum = node.lineno - 1
# Assignments have one Doxygen-significant special case:
# interface attributes.
match = AstWalker.__attributeRE.match(self.lines[lineNum])
if match:
self.lines[lineNum] = '{0}## @property {1}{2}{0}# {3}{2}' \
'{0}# @hideinitializer{2}{4}{2}'.format(
match.group(1),
match.group(2),
linesep,
match.group(3),
self.lines[lineNum].rstrip()
)
if self.options.debug:
stderr.write("# Attribute {0.id}{1}".format(node.targets[0],
linesep))
if isinstance(node.targets[0], Name):
match = AstWalker.__indentRE.match(self.lines[lineNum])
indentStr = match and match.group(1) or ''
restrictionLevel = self._checkMemberName(node.targets[0].id)
if restrictionLevel:
self.lines[lineNum] = '{0}## @var {1}{2}{0}' \
'# @hideinitializer{2}{0}# @{3}{2}{4}{2}'.format(
indentStr,
node.targets[0].id,
linesep,
restrictionLevel,
self.lines[lineNum].rstrip()
)
# Visit any contained nodes.
self.generic_visit(node, containingNodes=kwargs['containingNodes'])
def visit_Call(self, node, **kwargs):
"""
Handles function calls within code.
Function calls in Python are used to represent interface implementations
in addition to their normal use. If a call appears to mark an
implementation, it gets labeled as such for Doxygen.
"""
lineNum = node.lineno - 1
# Function calls have one Doxygen-significant special case: interface
# implementations.
match = AstWalker.__implementsRE.match(self.lines[lineNum])
if match:
self.lines[lineNum] = '{0}## @implements {1}{2}{0}{3}{2}'.format(
match.group(1), match.group(2), linesep,
self.lines[lineNum].rstrip())
if self.options.debug:
stderr.write("# Implements {0}{1}".format(match.group(1),
linesep))
# Visit any contained nodes.
self.generic_visit(node, containingNodes=kwargs['containingNodes'])
def visit_FunctionDef(self, node, **kwargs):
"""
Handles function definitions within code.
Process a function's docstring, keeping well aware of the function's
context and whether or not it's part of an interface definition.
"""
if self.options.debug:
stderr.write("# Function {0.name}{1}".format(node, linesep))
# Push either 'interface' or 'class' onto our containing nodes
# hierarchy so we can keep track of context. This will let us tell
# if a function is nested within another function or even if a class
# is nested within a function.
containingNodes = kwargs.get('containingNodes') or []
containingNodes.append((node.name, 'function'))
if self.options.topLevelNamespace:
fullPathNamespace = self._getFullPathName(containingNodes)
contextTag = '.'.join(pathTuple[0] for pathTuple in fullPathNamespace)
modifiedContextTag = self._processMembers(node, contextTag)
tail = '@namespace {0}'.format(modifiedContextTag)
else:
tail = self._processMembers(node, '')
if get_docstring(node):
self._processDocstring(node, tail,
containingNodes=containingNodes)
# Visit any contained nodes.
self.generic_visit(node, containingNodes=containingNodes)
# Remove the item we pushed onto the containing nodes hierarchy.
containingNodes.pop()
def visit_ClassDef(self, node, **kwargs):
"""
Handles class definitions within code.
Process the docstring. Note though that in Python Class definitions
are used to define interfaces in addition to classes.
If a class definition appears to be an interface definition tag it as an
interface definition for Doxygen. Otherwise tag it as a class
definition for Doxygen.
"""
lineNum = node.lineno - 1
# Push either 'interface' or 'class' onto our containing nodes
# hierarchy so we can keep track of context. This will let us tell
# if a function is a method or an interface method definition or if
# a class is fully contained within another class.
containingNodes = kwargs.get('containingNodes') or []
if not self.options.object_respect:
# Remove object class of the inherited class list to avoid that all
# new-style class inherits from object in the hierarchy class
line = self.lines[lineNum]
match = AstWalker.__classRE.match(line)
if match:
if match.group(2) == 'object':
self.lines[lineNum] = line[:match.start(2)] + line[match.end(2):]
match = AstWalker.__interfaceRE.match(self.lines[lineNum])
if match:
if self.options.debug:
stderr.write("# Interface {0.name}{1}".format(node, linesep))
containingNodes.append((node.name, 'interface'))
else:
if self.options.debug:
stderr.write("# Class {0.name}{1}".format(node, linesep))
containingNodes.append((node.name, 'class'))
if self.options.topLevelNamespace:
fullPathNamespace = self._getFullPathName(containingNodes)
contextTag = '.'.join(pathTuple[0] for pathTuple in fullPathNamespace)
tail = '@namespace {0}'.format(contextTag)
else:
tail = ''
# Class definitions have one Doxygen-significant special case:
# interface definitions.
if match:
contextTag = '{0}{1}# @interface {2}'.format(tail,
linesep,
match.group(1))
else:
contextTag = tail
contextTag = self._processMembers(node, contextTag)
if get_docstring(node):
self._processDocstring(node, contextTag,
containingNodes=containingNodes)
# Visit any contained nodes.
self.generic_visit(node, containingNodes=containingNodes)
# Remove the item we pushed onto the containing nodes hierarchy.
containingNodes.pop()
def parseLines(self):
"""Form an AST for the code and produce a new version of the source."""
inAst = parse(''.join(self.lines), self.inFilename)
# Visit all the nodes in our tree and apply Doxygen tags to the source.
self.visit(inAst)
def getLines(self):
"""Return the modified file once processing has been completed."""
return linesep.join(line.rstrip() for line in self.lines)
|
Feneric/doxypypy
|
doxypypy/doxypypy.py
|
AstWalker._processMembers
|
python
|
def _processMembers(self, node, contextTag):
restrictionLevel = self._checkMemberName(node.name)
if restrictionLevel:
workTag = '{0}{1}# @{2}'.format(contextTag,
linesep,
restrictionLevel)
else:
workTag = contextTag
return workTag
|
Mark up members if they should be private.
If the name indicates it should be private or protected, apply
the appropriate Doxygen tags.
|
train
|
https://github.com/Feneric/doxypypy/blob/a8555b15fa2a758ea8392372de31c0f635cc0d93/doxypypy/doxypypy.py#L537-L551
|
[
"def _checkMemberName(name):\n \"\"\"\n See if a member name indicates that it should be private.\n\n Private variables in Python (starting with a double underscore but\n not ending in a double underscore) and bed lumps (variables that\n are not really private but are by common convention treated as\n protected because they begin with a single underscore) get Doxygen\n tags labeling them appropriately.\n \"\"\"\n assert isinstance(name, str)\n restrictionLevel = None\n if not name.endswith('__'):\n if name.startswith('__'):\n restrictionLevel = 'private'\n elif name.startswith('_'):\n restrictionLevel = 'protected'\n return restrictionLevel\n"
] |
class AstWalker(NodeVisitor):
"""
A walker that'll recursively progress through an AST.
Given an abstract syntax tree for Python code, walk through all the
nodes looking for significant types (for our purposes we only care
about module starts, class definitions, function definitions, variable
assignments, and function calls, as all the information we want to pass
to Doxygen is found within these constructs). If the autobrief option
is set, it further attempts to parse docstrings to create appropriate
Doxygen tags.
"""
# We have a number of regular expressions that we use. They don't
# vary across instances and so are compiled directly in the class
# definition.
__indentRE = regexpCompile(r'^(\s*)\S')
__newlineRE = regexpCompile(r'^#', MULTILINE)
__blanklineRE = regexpCompile(r'^\s*$')
__docstrMarkerRE = regexpCompile(r"\s*([uUbB]*[rR]?(['\"]{3}))")
__docstrOneLineRE = regexpCompile(r"\s*[uUbB]*[rR]?(['\"]{3})(.+)\1")
__implementsRE = regexpCompile(r"^(\s*)(?:zope\.)?(?:interface\.)?"
r"(?:module|class|directly)?"
r"(?:Provides|Implements)\(\s*(.+)\s*\)",
IGNORECASE)
__classRE = regexpCompile(r"^\s*class\s+(\S+)\s*\((\S+)\):")
__interfaceRE = regexpCompile(r"^\s*class\s+(\S+)\s*\(\s*(?:zope\.)?"
r"(?:interface\.)?"
r"Interface\s*\)\s*:", IGNORECASE)
__attributeRE = regexpCompile(r"^(\s*)(\S+)\s*=\s*(?:zope\.)?"
r"(?:interface\.)?"
r"Attribute\s*\(['\"]{1,3}(.*)['\"]{1,3}\)",
IGNORECASE)
__singleLineREs = {
' @author: ': regexpCompile(r"^(\s*Authors?:\s*)(.*)$", IGNORECASE),
' @copyright ': regexpCompile(r"^(\s*Copyright:\s*)(.*)$", IGNORECASE),
' @date ': regexpCompile(r"^(\s*Date:\s*)(.*)$", IGNORECASE),
' @file ': regexpCompile(r"^(\s*File:\s*)(.*)$", IGNORECASE),
' @version: ': regexpCompile(r"^(\s*Version:\s*)(.*)$", IGNORECASE),
' @note ': regexpCompile(r"^(\s*Note:\s*)(.*)$", IGNORECASE),
' @warning ': regexpCompile(r"^(\s*Warning:\s*)(.*)$", IGNORECASE)
}
__argsStartRE = regexpCompile(r"^(\s*(?:(?:Keyword\s+)?"
r"(?:A|Kwa)rg(?:ument)?|Attribute)s?"
r"\s*:\s*)$", IGNORECASE)
__argsRE = regexpCompile(r"^\s*(?P<name>\w+)\s*(?P<type>\(?\S*\)?)?\s*"
r"(?:-|:)+\s+(?P<desc>.+)$")
__returnsStartRE = regexpCompile(r"^\s*(?:Return|Yield)s:\s*$", IGNORECASE)
__raisesStartRE = regexpCompile(r"^\s*(Raises|Exceptions|See Also):\s*$",
IGNORECASE)
__listRE = regexpCompile(r"^\s*(([\w\.]+),\s*)+(&|and)?\s*([\w\.]+)$")
__singleListItemRE = regexpCompile(r'^\s*([\w\.]+)\s*$')
__listItemRE = regexpCompile(r'([\w\.]+),?\s*')
__examplesStartRE = regexpCompile(r"^\s*(?:Example|Doctest)s?:\s*$",
IGNORECASE)
__sectionStartRE = regexpCompile(r"^\s*(([A-Z]\w* ?){1,2}):\s*$")
# The error line should match traceback lines, error exception lines, and
# (due to a weird behavior of codeop) single word lines.
__errorLineRE = regexpCompile(r"^\s*((?:\S+Error|Traceback.*):?\s*(.*)|@?[\w.]+)\s*$",
IGNORECASE)
def __init__(self, lines, options, inFilename):
"""Initialize a few class variables in preparation for our walk."""
self.lines = lines
self.options = options
self.inFilename = inFilename
self.docLines = []
@staticmethod
def _stripOutAnds(inStr):
"""Takes a string and returns the same without ands or ampersands."""
assert isinstance(inStr, str)
return inStr.replace(' and ', ' ').replace(' & ', ' ')
@staticmethod
def _endCodeIfNeeded(line, inCodeBlock):
"""Simple routine to append end code marker if needed."""
assert isinstance(line, str)
if inCodeBlock:
line = '# @endcode{0}{1}'.format(linesep, line.rstrip())
inCodeBlock = False
return line, inCodeBlock
@coroutine
def _checkIfCode(self, inCodeBlockObj):
"""Checks whether or not a given line appears to be Python code."""
while True:
line, lines, lineNum = (yield)
testLineNum = 1
currentLineNum = 0
testLine = line.strip()
lineOfCode = None
while lineOfCode is None:
match = AstWalker.__errorLineRE.match(testLine)
if not testLine or testLine == '...' or match:
# These are ambiguous.
line, lines, lineNum = (yield)
testLine = line.strip()
#testLineNum = 1
elif testLine.startswith('>>>'):
# This is definitely code.
lineOfCode = True
else:
try:
compLine = compile_command(testLine)
if compLine and lines[currentLineNum].strip().startswith('#'):
lineOfCode = True
else:
line, lines, lineNum = (yield)
line = line.strip()
if line.startswith('>>>'):
# Definitely code, don't compile further.
lineOfCode = True
else:
testLine += linesep + line
testLine = testLine.strip()
testLineNum += 1
except (SyntaxError, RuntimeError):
# This is definitely not code.
lineOfCode = False
except Exception:
# Other errors are ambiguous.
line, lines, lineNum = (yield)
testLine = line.strip()
#testLineNum = 1
currentLineNum = lineNum - testLineNum
if not inCodeBlockObj[0] and lineOfCode:
inCodeBlockObj[0] = True
lines[currentLineNum] = '{0}{1}# @code{1}'.format(
lines[currentLineNum],
linesep
)
elif inCodeBlockObj[0] and lineOfCode is False:
# None is ambiguous, so strict checking
# against False is necessary.
inCodeBlockObj[0] = False
lines[currentLineNum] = '{0}{1}# @endcode{1}'.format(
lines[currentLineNum],
linesep
)
@coroutine
def __alterDocstring(self, tail='', writer=None):
"""
Runs eternally, processing docstring lines.
Parses docstring lines as they get fed in via send, applies appropriate
Doxygen tags, and passes them along in batches for writing.
"""
assert isinstance(tail, str) and isinstance(writer, GeneratorType)
lines = []
timeToSend = False
inCodeBlock = False
inCodeBlockObj = [False]
inSection = False
prefix = ''
firstLineNum = -1
sectionHeadingIndent = 0
codeChecker = self._checkIfCode(inCodeBlockObj)
while True:
lineNum, line = (yield)
if firstLineNum < 0:
firstLineNum = lineNum
# Don't bother doing extra work if it's a sentinel.
if line is not None:
# Also limit work if we're not parsing the docstring.
if self.options.autobrief:
for doxyTag, tagRE in AstWalker.__singleLineREs.items():
match = tagRE.search(line)
if match:
# We've got a simple one-line Doxygen command
lines[-1], inCodeBlock = self._endCodeIfNeeded(
lines[-1], inCodeBlock)
inCodeBlockObj[0] = inCodeBlock
writer.send((firstLineNum, lineNum - 1, lines))
lines = []
firstLineNum = lineNum
line = line.replace(match.group(1), doxyTag)
timeToSend = True
if inSection:
# The last line belonged to a section.
# Does this one too? (Ignoring empty lines.)
match = AstWalker.__blanklineRE.match(line)
if not match:
indent = len(line.expandtabs(self.options.tablength)) - \
len(line.expandtabs(self.options.tablength).lstrip())
if indent <= sectionHeadingIndent:
inSection = False
else:
if lines[-1] == '#':
# If the last line was empty, but we're still in a section
# then we need to start a new paragraph.
lines[-1] = '# @par'
match = AstWalker.__returnsStartRE.match(line)
if match:
# We've got a "returns" section
lines[-1], inCodeBlock = self._endCodeIfNeeded(
lines[-1], inCodeBlock)
inCodeBlockObj[0] = inCodeBlock
line = line.replace(match.group(0), ' @return\t').rstrip()
prefix = '@return\t'
else:
match = AstWalker.__argsStartRE.match(line)
if match:
# We've got an "arguments" section
line = line.replace(match.group(0), '').rstrip()
if 'attr' in match.group(0).lower():
prefix = '@property\t'
else:
prefix = '@param\t'
lines[-1], inCodeBlock = self._endCodeIfNeeded(
lines[-1], inCodeBlock)
inCodeBlockObj[0] = inCodeBlock
lines.append('#' + line)
continue
else:
match = AstWalker.__argsRE.match(line)
if match and not inCodeBlock:
# We've got something that looks like an item /
# description pair.
if 'property' in prefix:
line = '# {0}\t{1[name]}{2}# {1[desc]}'.format(
prefix, match.groupdict(), linesep)
else:
line = ' {0}\t{1[name]}\t{1[desc]}'.format(
prefix, match.groupdict())
else:
match = AstWalker.__raisesStartRE.match(line)
if match:
line = line.replace(match.group(0), '').rstrip()
if 'see' in match.group(1).lower():
# We've got a "see also" section
prefix = '@sa\t'
else:
# We've got an "exceptions" section
prefix = '@exception\t'
lines[-1], inCodeBlock = self._endCodeIfNeeded(
lines[-1], inCodeBlock)
inCodeBlockObj[0] = inCodeBlock
lines.append('#' + line)
continue
else:
match = AstWalker.__listRE.match(line)
if match and not inCodeBlock:
# We've got a list of something or another
itemList = []
for itemMatch in AstWalker.__listItemRE.findall(self._stripOutAnds(
match.group(0))):
itemList.append('# {0}\t{1}{2}'.format(
prefix, itemMatch, linesep))
line = ''.join(itemList)[1:]
else:
match = AstWalker.__examplesStartRE.match(line)
if match and lines[-1].strip() == '#' \
and self.options.autocode:
# We've got an "example" section
inCodeBlock = True
inCodeBlockObj[0] = True
line = line.replace(match.group(0),
' @b Examples{0}# @code'.format(linesep))
else:
match = AstWalker.__sectionStartRE.match(line)
if match:
# We've got an arbitrary section
prefix = ''
inSection = True
# What's the indentation of the section heading?
sectionHeadingIndent = len(line.expandtabs(self.options.tablength)) \
- len(line.expandtabs(self.options.tablength).lstrip())
line = line.replace(
match.group(0),
' @par {0}'.format(match.group(1))
)
if lines[-1] == '# @par':
lines[-1] = '#'
lines[-1], inCodeBlock = self._endCodeIfNeeded(
lines[-1], inCodeBlock)
inCodeBlockObj[0] = inCodeBlock
lines.append('#' + line)
continue
elif prefix:
match = AstWalker.__singleListItemRE.match(line)
if match and not inCodeBlock:
# Probably a single list item
line = ' {0}\t{1}'.format(
prefix, match.group(0))
elif self.options.autocode:
codeChecker.send(
(
line, lines,
lineNum - firstLineNum
)
)
inCodeBlock = inCodeBlockObj[0]
else:
if self.options.autocode:
codeChecker.send(
(
line, lines,
lineNum - firstLineNum
)
)
inCodeBlock = inCodeBlockObj[0]
# If we were passed a tail, append it to the docstring.
# Note that this means that we need a docstring for this
# item to get documented.
if tail and lineNum == len(self.docLines) - 1:
line = '{0}{1}# {2}'.format(line.rstrip(), linesep, tail)
# Add comment marker for every line.
line = '#{0}'.format(line.rstrip())
# Ensure the first line has the Doxygen double comment.
if lineNum == 0:
line = '#' + line
lines.append(line.replace(' ' + linesep, linesep))
else:
# If we get our sentinel value, send out what we've got.
timeToSend = True
if timeToSend:
lines[-1], inCodeBlock = self._endCodeIfNeeded(lines[-1],
inCodeBlock)
inCodeBlockObj[0] = inCodeBlock
writer.send((firstLineNum, lineNum, lines))
lines = []
firstLineNum = -1
timeToSend = False
@coroutine
def __writeDocstring(self):
"""
Runs eternally, dumping out docstring line batches as they get fed in.
Replaces original batches of docstring lines with modified versions
fed in via send.
"""
while True:
firstLineNum, lastLineNum, lines = (yield)
newDocstringLen = lastLineNum - firstLineNum + 1
while len(lines) < newDocstringLen:
lines.append('')
# Substitute the new block of lines for the original block of lines.
self.docLines[firstLineNum: lastLineNum + 1] = lines
def _processDocstring(self, node, tail='', **kwargs):
"""
Handles a docstring for functions, classes, and modules.
Basically just figures out the bounds of the docstring and sends it
off to the parser to do the actual work.
"""
typeName = type(node).__name__
# Modules don't have lineno defined, but it's always 0 for them.
curLineNum = startLineNum = 0
if typeName != 'Module':
startLineNum = curLineNum = node.lineno - 1
# Figure out where both our enclosing object and our docstring start.
line = ''
while curLineNum < len(self.lines):
line = self.lines[curLineNum]
match = AstWalker.__docstrMarkerRE.match(line)
if match:
break
curLineNum += 1
docstringStart = curLineNum
# Figure out where our docstring ends.
if not AstWalker.__docstrOneLineRE.match(line):
# Skip for the special case of a single-line docstring.
curLineNum += 1
while curLineNum < len(self.lines):
line = self.lines[curLineNum]
if line.find(match.group(2)) >= 0:
break
curLineNum += 1
endLineNum = curLineNum + 1
# Isolate our enclosing object's declaration.
defLines = self.lines[startLineNum: docstringStart]
# Isolate our docstring.
self.docLines = self.lines[docstringStart: endLineNum]
# If we have a docstring, extract information from it.
if self.docLines:
# Get rid of the docstring delineators.
self.docLines[0] = AstWalker.__docstrMarkerRE.sub('',
self.docLines[0])
self.docLines[-1] = AstWalker.__docstrMarkerRE.sub('',
self.docLines[-1])
# Handle special strings within the docstring.
docstringConverter = self.__alterDocstring(
tail, self.__writeDocstring())
for lineInfo in enumerate(self.docLines):
docstringConverter.send(lineInfo)
docstringConverter.send((len(self.docLines) - 1, None))
# Add a Doxygen @brief tag to any single-line description.
if self.options.autobrief:
safetyCounter = 0
while len(self.docLines) > 0 and self.docLines[0].lstrip('#').strip() == '':
del self.docLines[0]
self.docLines.append('')
safetyCounter += 1
if safetyCounter >= len(self.docLines):
# Escape the effectively empty docstring.
break
if len(self.docLines) == 1 or (len(self.docLines) >= 2 and (
self.docLines[1].strip(whitespace + '#') == '' or
self.docLines[1].strip(whitespace + '#').startswith('@'))):
self.docLines[0] = "## @brief {0}".format(self.docLines[0].lstrip('#'))
if len(self.docLines) > 1 and self.docLines[1] == '# @par':
self.docLines[1] = '#'
if defLines:
match = AstWalker.__indentRE.match(defLines[0])
indentStr = match and match.group(1) or ''
self.docLines = [AstWalker.__newlineRE.sub(indentStr + '#', docLine)
for docLine in self.docLines]
# Taking away a docstring from an interface method definition sometimes
# leaves broken code as the docstring may be the only code in it.
# Here we manually insert a pass statement to rectify this problem.
if typeName != 'Module':
if docstringStart < len(self.lines):
match = AstWalker.__indentRE.match(self.lines[docstringStart])
indentStr = match and match.group(1) or ''
else:
indentStr = ''
containingNodes = kwargs.get('containingNodes', []) or []
fullPathNamespace = self._getFullPathName(containingNodes)
parentType = fullPathNamespace[-2][1]
if parentType == 'interface' and typeName == 'FunctionDef' \
or fullPathNamespace[-1][1] == 'interface':
defLines[-1] = '{0}{1}{2}pass'.format(defLines[-1],
linesep, indentStr)
elif self.options.autobrief and typeName == 'ClassDef':
# If we're parsing docstrings separate out class attribute
# definitions to get better Doxygen output.
for firstVarLineNum, firstVarLine in enumerate(self.docLines):
if '@property\t' in firstVarLine:
break
lastVarLineNum = len(self.docLines)
if lastVarLineNum > 0 and '@property\t' in firstVarLine:
while lastVarLineNum > firstVarLineNum:
lastVarLineNum -= 1
if '@property\t' in self.docLines[lastVarLineNum]:
break
lastVarLineNum += 1
if firstVarLineNum < len(self.docLines):
indentLineNum = endLineNum
indentStr = ''
while not indentStr and indentLineNum < len(self.lines):
match = AstWalker.__indentRE.match(self.lines[indentLineNum])
indentStr = match and match.group(1) or ''
indentLineNum += 1
varLines = ['{0}{1}'.format(linesep, docLine).replace(
linesep, linesep + indentStr)
for docLine in self.docLines[
firstVarLineNum: lastVarLineNum]]
defLines.extend(varLines)
self.docLines[firstVarLineNum: lastVarLineNum] = []
# After the property shuffling we will need to relocate
# any existing namespace information.
namespaceLoc = defLines[-1].find('\n# @namespace')
if namespaceLoc >= 0:
self.docLines[-1] += defLines[-1][namespaceLoc:]
defLines[-1] = defLines[-1][:namespaceLoc]
# For classes and functions, apply our changes and reverse the
# order of the declaration and docstring, and for modules just
# apply our changes.
if typeName != 'Module':
self.lines[startLineNum: endLineNum] = self.docLines + defLines
else:
self.lines[startLineNum: endLineNum] = defLines + self.docLines
@staticmethod
def _checkMemberName(name):
"""
See if a member name indicates that it should be private.
Private variables in Python (starting with a double underscore but
not ending in a double underscore) and bed lumps (variables that
are not really private but are by common convention treated as
protected because they begin with a single underscore) get Doxygen
tags labeling them appropriately.
"""
assert isinstance(name, str)
restrictionLevel = None
if not name.endswith('__'):
if name.startswith('__'):
restrictionLevel = 'private'
elif name.startswith('_'):
restrictionLevel = 'protected'
return restrictionLevel
def generic_visit(self, node, **kwargs):
"""
Extract useful information from relevant nodes including docstrings.
This is virtually identical to the standard version contained in
NodeVisitor. It is only overridden because we're tracking extra
information (the hierarchy of containing nodes) not preserved in
the original.
"""
for field, value in iter_fields(node):
if isinstance(value, list):
for item in value:
if isinstance(item, AST):
self.visit(item, containingNodes=kwargs['containingNodes'])
elif isinstance(value, AST):
self.visit(value, containingNodes=kwargs['containingNodes'])
def visit(self, node, **kwargs):
"""
Visit a node and extract useful information from it.
This is virtually identical to the standard version contained in
NodeVisitor. It is only overridden because we're tracking extra
information (the hierarchy of containing nodes) not preserved in
the original.
"""
containingNodes = kwargs.get('containingNodes', [])
method = 'visit_' + node.__class__.__name__
visitor = getattr(self, method, self.generic_visit)
return visitor(node, containingNodes=containingNodes)
def _getFullPathName(self, containingNodes):
"""
Returns the full node hierarchy rooted at module name.
The list representing the full path through containing nodes
(starting with the module itself) is returned.
"""
assert isinstance(containingNodes, list)
return [(self.options.fullPathNamespace, 'module')] + containingNodes
def visit_Module(self, node, **kwargs):
"""
Handles the module-level docstring.
Process the module-level docstring and create appropriate Doxygen tags
if autobrief option is set.
"""
containingNodes=kwargs.get('containingNodes', [])
if self.options.debug:
stderr.write("# Module {0}{1}".format(self.options.fullPathNamespace,
linesep))
if get_docstring(node):
if self.options.topLevelNamespace:
fullPathNamespace = self._getFullPathName(containingNodes)
contextTag = '.'.join(pathTuple[0] for pathTuple in fullPathNamespace)
tail = '@namespace {0}'.format(contextTag)
else:
tail = ''
self._processDocstring(node, tail)
# Visit any contained nodes (in this case pretty much everything).
self.generic_visit(node, containingNodes=containingNodes)
def visit_Assign(self, node, **kwargs):
"""
Handles assignments within code.
Variable assignments in Python are used to represent interface
attributes in addition to basic variables. If an assignment appears
to be an attribute, it gets labeled as such for Doxygen. If a variable
name uses Python mangling or is just a bed lump, it is labeled as
private for Doxygen.
"""
lineNum = node.lineno - 1
# Assignments have one Doxygen-significant special case:
# interface attributes.
match = AstWalker.__attributeRE.match(self.lines[lineNum])
if match:
self.lines[lineNum] = '{0}## @property {1}{2}{0}# {3}{2}' \
'{0}# @hideinitializer{2}{4}{2}'.format(
match.group(1),
match.group(2),
linesep,
match.group(3),
self.lines[lineNum].rstrip()
)
if self.options.debug:
stderr.write("# Attribute {0.id}{1}".format(node.targets[0],
linesep))
if isinstance(node.targets[0], Name):
match = AstWalker.__indentRE.match(self.lines[lineNum])
indentStr = match and match.group(1) or ''
restrictionLevel = self._checkMemberName(node.targets[0].id)
if restrictionLevel:
self.lines[lineNum] = '{0}## @var {1}{2}{0}' \
'# @hideinitializer{2}{0}# @{3}{2}{4}{2}'.format(
indentStr,
node.targets[0].id,
linesep,
restrictionLevel,
self.lines[lineNum].rstrip()
)
# Visit any contained nodes.
self.generic_visit(node, containingNodes=kwargs['containingNodes'])
def visit_Call(self, node, **kwargs):
"""
Handles function calls within code.
Function calls in Python are used to represent interface implementations
in addition to their normal use. If a call appears to mark an
implementation, it gets labeled as such for Doxygen.
"""
lineNum = node.lineno - 1
# Function calls have one Doxygen-significant special case: interface
# implementations.
match = AstWalker.__implementsRE.match(self.lines[lineNum])
if match:
self.lines[lineNum] = '{0}## @implements {1}{2}{0}{3}{2}'.format(
match.group(1), match.group(2), linesep,
self.lines[lineNum].rstrip())
if self.options.debug:
stderr.write("# Implements {0}{1}".format(match.group(1),
linesep))
# Visit any contained nodes.
self.generic_visit(node, containingNodes=kwargs['containingNodes'])
def visit_FunctionDef(self, node, **kwargs):
"""
Handles function definitions within code.
Process a function's docstring, keeping well aware of the function's
context and whether or not it's part of an interface definition.
"""
if self.options.debug:
stderr.write("# Function {0.name}{1}".format(node, linesep))
# Push either 'interface' or 'class' onto our containing nodes
# hierarchy so we can keep track of context. This will let us tell
# if a function is nested within another function or even if a class
# is nested within a function.
containingNodes = kwargs.get('containingNodes') or []
containingNodes.append((node.name, 'function'))
if self.options.topLevelNamespace:
fullPathNamespace = self._getFullPathName(containingNodes)
contextTag = '.'.join(pathTuple[0] for pathTuple in fullPathNamespace)
modifiedContextTag = self._processMembers(node, contextTag)
tail = '@namespace {0}'.format(modifiedContextTag)
else:
tail = self._processMembers(node, '')
if get_docstring(node):
self._processDocstring(node, tail,
containingNodes=containingNodes)
# Visit any contained nodes.
self.generic_visit(node, containingNodes=containingNodes)
# Remove the item we pushed onto the containing nodes hierarchy.
containingNodes.pop()
def visit_ClassDef(self, node, **kwargs):
"""
Handles class definitions within code.
Process the docstring. Note though that in Python Class definitions
are used to define interfaces in addition to classes.
If a class definition appears to be an interface definition tag it as an
interface definition for Doxygen. Otherwise tag it as a class
definition for Doxygen.
"""
lineNum = node.lineno - 1
# Push either 'interface' or 'class' onto our containing nodes
# hierarchy so we can keep track of context. This will let us tell
# if a function is a method or an interface method definition or if
# a class is fully contained within another class.
containingNodes = kwargs.get('containingNodes') or []
if not self.options.object_respect:
# Remove object class of the inherited class list to avoid that all
# new-style class inherits from object in the hierarchy class
line = self.lines[lineNum]
match = AstWalker.__classRE.match(line)
if match:
if match.group(2) == 'object':
self.lines[lineNum] = line[:match.start(2)] + line[match.end(2):]
match = AstWalker.__interfaceRE.match(self.lines[lineNum])
if match:
if self.options.debug:
stderr.write("# Interface {0.name}{1}".format(node, linesep))
containingNodes.append((node.name, 'interface'))
else:
if self.options.debug:
stderr.write("# Class {0.name}{1}".format(node, linesep))
containingNodes.append((node.name, 'class'))
if self.options.topLevelNamespace:
fullPathNamespace = self._getFullPathName(containingNodes)
contextTag = '.'.join(pathTuple[0] for pathTuple in fullPathNamespace)
tail = '@namespace {0}'.format(contextTag)
else:
tail = ''
# Class definitions have one Doxygen-significant special case:
# interface definitions.
if match:
contextTag = '{0}{1}# @interface {2}'.format(tail,
linesep,
match.group(1))
else:
contextTag = tail
contextTag = self._processMembers(node, contextTag)
if get_docstring(node):
self._processDocstring(node, contextTag,
containingNodes=containingNodes)
# Visit any contained nodes.
self.generic_visit(node, containingNodes=containingNodes)
# Remove the item we pushed onto the containing nodes hierarchy.
containingNodes.pop()
def parseLines(self):
"""Form an AST for the code and produce a new version of the source."""
inAst = parse(''.join(self.lines), self.inFilename)
# Visit all the nodes in our tree and apply Doxygen tags to the source.
self.visit(inAst)
def getLines(self):
"""Return the modified file once processing has been completed."""
return linesep.join(line.rstrip() for line in self.lines)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.