id int32 0 252k | repo stringlengths 7 55 | path stringlengths 4 127 | func_name stringlengths 1 88 | original_string stringlengths 75 19.8k | language stringclasses 1
value | code stringlengths 51 19.8k | code_tokens list | docstring stringlengths 3 17.3k | docstring_tokens list | sha stringlengths 40 40 | url stringlengths 87 242 |
|---|---|---|---|---|---|---|---|---|---|---|---|
240,600 | jmcarpenter2/swifter | swifter/swifter.py | _SwifterObject.set_npartitions | def set_npartitions(self, npartitions=None):
"""
Set the number of partitions to use for dask
"""
if npartitions is None:
self._npartitions = cpu_count() * 2
else:
self._npartitions = npartitions
return self | python | def set_npartitions(self, npartitions=None):
if npartitions is None:
self._npartitions = cpu_count() * 2
else:
self._npartitions = npartitions
return self | [
"def",
"set_npartitions",
"(",
"self",
",",
"npartitions",
"=",
"None",
")",
":",
"if",
"npartitions",
"is",
"None",
":",
"self",
".",
"_npartitions",
"=",
"cpu_count",
"(",
")",
"*",
"2",
"else",
":",
"self",
".",
"_npartitions",
"=",
"npartitions",
"re... | Set the number of partitions to use for dask | [
"Set",
"the",
"number",
"of",
"partitions",
"to",
"use",
"for",
"dask"
] | ed5fc3235b43f981fa58ac9bc982c8209d4e3df3 | https://github.com/jmcarpenter2/swifter/blob/ed5fc3235b43f981fa58ac9bc982c8209d4e3df3/swifter/swifter.py#L39-L47 |
240,601 | jmcarpenter2/swifter | swifter/swifter.py | _SwifterObject.rolling | def rolling(self, window, min_periods=None, center=False, win_type=None, on=None, axis=0, closed=None):
"""
Create a swifter rolling object
"""
kwds = {
"window": window,
"min_periods": min_periods,
"center": center,
"win_type": win_type,
"on": on,
"axis": axis,
"closed": closed,
}
return Rolling(self._obj, self._npartitions, self._dask_threshold, self._scheduler, self._progress_bar, **kwds) | python | def rolling(self, window, min_periods=None, center=False, win_type=None, on=None, axis=0, closed=None):
kwds = {
"window": window,
"min_periods": min_periods,
"center": center,
"win_type": win_type,
"on": on,
"axis": axis,
"closed": closed,
}
return Rolling(self._obj, self._npartitions, self._dask_threshold, self._scheduler, self._progress_bar, **kwds) | [
"def",
"rolling",
"(",
"self",
",",
"window",
",",
"min_periods",
"=",
"None",
",",
"center",
"=",
"False",
",",
"win_type",
"=",
"None",
",",
"on",
"=",
"None",
",",
"axis",
"=",
"0",
",",
"closed",
"=",
"None",
")",
":",
"kwds",
"=",
"{",
"\"wi... | Create a swifter rolling object | [
"Create",
"a",
"swifter",
"rolling",
"object"
] | ed5fc3235b43f981fa58ac9bc982c8209d4e3df3 | https://github.com/jmcarpenter2/swifter/blob/ed5fc3235b43f981fa58ac9bc982c8209d4e3df3/swifter/swifter.py#L78-L91 |
240,602 | jmcarpenter2/swifter | swifter/swifter.py | Transformation.apply | def apply(self, func, *args, **kwds):
"""
Apply the function to the transformed swifter object
"""
# estimate time to pandas apply
wrapped = self._wrapped_apply(func, *args, **kwds)
n_repeats = 3
timed = timeit.timeit(wrapped, number=n_repeats)
samp_proc_est = timed / n_repeats
est_apply_duration = samp_proc_est / self._SAMP_SIZE * self._nrows
# if pandas apply takes too long, use dask
if est_apply_duration > self._dask_threshold:
return self._dask_apply(func, *args, **kwds)
else: # use pandas
if self._progress_bar:
tqdm.pandas(desc="Pandas Apply")
return self._obj_pd.progress_apply(func, *args, **kwds)
else:
return self._obj_pd.apply(func, *args, **kwds) | python | def apply(self, func, *args, **kwds):
# estimate time to pandas apply
wrapped = self._wrapped_apply(func, *args, **kwds)
n_repeats = 3
timed = timeit.timeit(wrapped, number=n_repeats)
samp_proc_est = timed / n_repeats
est_apply_duration = samp_proc_est / self._SAMP_SIZE * self._nrows
# if pandas apply takes too long, use dask
if est_apply_duration > self._dask_threshold:
return self._dask_apply(func, *args, **kwds)
else: # use pandas
if self._progress_bar:
tqdm.pandas(desc="Pandas Apply")
return self._obj_pd.progress_apply(func, *args, **kwds)
else:
return self._obj_pd.apply(func, *args, **kwds) | [
"def",
"apply",
"(",
"self",
",",
"func",
",",
"*",
"args",
",",
"*",
"*",
"kwds",
")",
":",
"# estimate time to pandas apply",
"wrapped",
"=",
"self",
".",
"_wrapped_apply",
"(",
"func",
",",
"*",
"args",
",",
"*",
"*",
"kwds",
")",
"n_repeats",
"=",
... | Apply the function to the transformed swifter object | [
"Apply",
"the",
"function",
"to",
"the",
"transformed",
"swifter",
"object"
] | ed5fc3235b43f981fa58ac9bc982c8209d4e3df3 | https://github.com/jmcarpenter2/swifter/blob/ed5fc3235b43f981fa58ac9bc982c8209d4e3df3/swifter/swifter.py#L338-L357 |
240,603 | summernote/django-summernote | django_summernote/utils.py | using_config | def using_config(_func=None):
"""
This allows a function to use Summernote configuration
as a global variable, temporarily.
"""
def decorator(func):
@wraps(func)
def inner_dec(*args, **kwargs):
g = func.__globals__
var_name = 'config'
sentinel = object()
oldvalue = g.get(var_name, sentinel)
g[var_name] = apps.get_app_config('django_summernote').config
try:
res = func(*args, **kwargs)
finally:
if oldvalue is sentinel:
del g[var_name]
else:
g[var_name] = oldvalue
return res
return inner_dec
if _func is None:
return decorator
else:
return decorator(_func) | python | def using_config(_func=None):
def decorator(func):
@wraps(func)
def inner_dec(*args, **kwargs):
g = func.__globals__
var_name = 'config'
sentinel = object()
oldvalue = g.get(var_name, sentinel)
g[var_name] = apps.get_app_config('django_summernote').config
try:
res = func(*args, **kwargs)
finally:
if oldvalue is sentinel:
del g[var_name]
else:
g[var_name] = oldvalue
return res
return inner_dec
if _func is None:
return decorator
else:
return decorator(_func) | [
"def",
"using_config",
"(",
"_func",
"=",
"None",
")",
":",
"def",
"decorator",
"(",
"func",
")",
":",
"@",
"wraps",
"(",
"func",
")",
"def",
"inner_dec",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"g",
"=",
"func",
".",
"__globals__",
... | This allows a function to use Summernote configuration
as a global variable, temporarily. | [
"This",
"allows",
"a",
"function",
"to",
"use",
"Summernote",
"configuration",
"as",
"a",
"global",
"variable",
"temporarily",
"."
] | bc7fbbf065d88a909fe3e1533c84110e0dd132bc | https://github.com/summernote/django-summernote/blob/bc7fbbf065d88a909fe3e1533c84110e0dd132bc/django_summernote/utils.py#L117-L146 |
240,604 | summernote/django-summernote | django_summernote/utils.py | uploaded_filepath | def uploaded_filepath(instance, filename):
"""
Returns default filepath for uploaded files.
"""
ext = filename.split('.')[-1]
filename = "%s.%s" % (uuid.uuid4(), ext)
today = datetime.now().strftime('%Y-%m-%d')
return os.path.join('django-summernote', today, filename) | python | def uploaded_filepath(instance, filename):
ext = filename.split('.')[-1]
filename = "%s.%s" % (uuid.uuid4(), ext)
today = datetime.now().strftime('%Y-%m-%d')
return os.path.join('django-summernote', today, filename) | [
"def",
"uploaded_filepath",
"(",
"instance",
",",
"filename",
")",
":",
"ext",
"=",
"filename",
".",
"split",
"(",
"'.'",
")",
"[",
"-",
"1",
"]",
"filename",
"=",
"\"%s.%s\"",
"%",
"(",
"uuid",
".",
"uuid4",
"(",
")",
",",
"ext",
")",
"today",
"="... | Returns default filepath for uploaded files. | [
"Returns",
"default",
"filepath",
"for",
"uploaded",
"files",
"."
] | bc7fbbf065d88a909fe3e1533c84110e0dd132bc | https://github.com/summernote/django-summernote/blob/bc7fbbf065d88a909fe3e1533c84110e0dd132bc/django_summernote/utils.py#L149-L156 |
240,605 | summernote/django-summernote | django_summernote/utils.py | get_attachment_model | def get_attachment_model():
"""
Returns the Attachment model that is active in this project.
"""
try:
from .models import AbstractAttachment
klass = apps.get_model(config["attachment_model"])
if not issubclass(klass, AbstractAttachment):
raise ImproperlyConfigured(
"SUMMERNOTE_CONFIG['attachment_model'] refers to model '%s' that is not "
"inherited from 'django_summernote.models.AbstractAttachment'" % config["attachment_model"]
)
return klass
except ValueError:
raise ImproperlyConfigured("SUMMERNOTE_CONFIG['attachment_model'] must be of the form 'app_label.model_name'")
except LookupError:
raise ImproperlyConfigured(
"SUMMERNOTE_CONFIG['attachment_model'] refers to model '%s' that has not been installed" % config["attachment_model"]
) | python | def get_attachment_model():
try:
from .models import AbstractAttachment
klass = apps.get_model(config["attachment_model"])
if not issubclass(klass, AbstractAttachment):
raise ImproperlyConfigured(
"SUMMERNOTE_CONFIG['attachment_model'] refers to model '%s' that is not "
"inherited from 'django_summernote.models.AbstractAttachment'" % config["attachment_model"]
)
return klass
except ValueError:
raise ImproperlyConfigured("SUMMERNOTE_CONFIG['attachment_model'] must be of the form 'app_label.model_name'")
except LookupError:
raise ImproperlyConfigured(
"SUMMERNOTE_CONFIG['attachment_model'] refers to model '%s' that has not been installed" % config["attachment_model"]
) | [
"def",
"get_attachment_model",
"(",
")",
":",
"try",
":",
"from",
".",
"models",
"import",
"AbstractAttachment",
"klass",
"=",
"apps",
".",
"get_model",
"(",
"config",
"[",
"\"attachment_model\"",
"]",
")",
"if",
"not",
"issubclass",
"(",
"klass",
",",
"Abst... | Returns the Attachment model that is active in this project. | [
"Returns",
"the",
"Attachment",
"model",
"that",
"is",
"active",
"in",
"this",
"project",
"."
] | bc7fbbf065d88a909fe3e1533c84110e0dd132bc | https://github.com/summernote/django-summernote/blob/bc7fbbf065d88a909fe3e1533c84110e0dd132bc/django_summernote/utils.py#L180-L199 |
240,606 | PyMySQL/mysqlclient-python | MySQLdb/connections.py | numeric_part | def numeric_part(s):
"""Returns the leading numeric part of a string.
>>> numeric_part("20-alpha")
20
>>> numeric_part("foo")
>>> numeric_part("16b")
16
"""
m = re_numeric_part.match(s)
if m:
return int(m.group(1))
return None | python | def numeric_part(s):
m = re_numeric_part.match(s)
if m:
return int(m.group(1))
return None | [
"def",
"numeric_part",
"(",
"s",
")",
":",
"m",
"=",
"re_numeric_part",
".",
"match",
"(",
"s",
")",
"if",
"m",
":",
"return",
"int",
"(",
"m",
".",
"group",
"(",
"1",
")",
")",
"return",
"None"
] | Returns the leading numeric part of a string.
>>> numeric_part("20-alpha")
20
>>> numeric_part("foo")
>>> numeric_part("16b")
16 | [
"Returns",
"the",
"leading",
"numeric",
"part",
"of",
"a",
"string",
"."
] | b66971ee36be96b772ae7fdec79ccc1611376f3c | https://github.com/PyMySQL/mysqlclient-python/blob/b66971ee36be96b772ae7fdec79ccc1611376f3c/MySQLdb/connections.py#L21-L34 |
240,607 | PyMySQL/mysqlclient-python | MySQLdb/connections.py | Connection.literal | def literal(self, o):
"""If o is a single object, returns an SQL literal as a string.
If o is a non-string sequence, the items of the sequence are
converted and returned as a sequence.
Non-standard. For internal use; do not use this in your
applications.
"""
if isinstance(o, unicode):
s = self.string_literal(o.encode(self.encoding))
elif isinstance(o, bytearray):
s = self._bytes_literal(o)
elif isinstance(o, bytes):
if PY2:
s = self.string_literal(o)
else:
s = self._bytes_literal(o)
elif isinstance(o, (tuple, list)):
s = self._tuple_literal(o)
else:
s = self.escape(o, self.encoders)
if isinstance(s, unicode):
s = s.encode(self.encoding)
assert isinstance(s, bytes)
return s | python | def literal(self, o):
if isinstance(o, unicode):
s = self.string_literal(o.encode(self.encoding))
elif isinstance(o, bytearray):
s = self._bytes_literal(o)
elif isinstance(o, bytes):
if PY2:
s = self.string_literal(o)
else:
s = self._bytes_literal(o)
elif isinstance(o, (tuple, list)):
s = self._tuple_literal(o)
else:
s = self.escape(o, self.encoders)
if isinstance(s, unicode):
s = s.encode(self.encoding)
assert isinstance(s, bytes)
return s | [
"def",
"literal",
"(",
"self",
",",
"o",
")",
":",
"if",
"isinstance",
"(",
"o",
",",
"unicode",
")",
":",
"s",
"=",
"self",
".",
"string_literal",
"(",
"o",
".",
"encode",
"(",
"self",
".",
"encoding",
")",
")",
"elif",
"isinstance",
"(",
"o",
"... | If o is a single object, returns an SQL literal as a string.
If o is a non-string sequence, the items of the sequence are
converted and returned as a sequence.
Non-standard. For internal use; do not use this in your
applications. | [
"If",
"o",
"is",
"a",
"single",
"object",
"returns",
"an",
"SQL",
"literal",
"as",
"a",
"string",
".",
"If",
"o",
"is",
"a",
"non",
"-",
"string",
"sequence",
"the",
"items",
"of",
"the",
"sequence",
"are",
"converted",
"and",
"returned",
"as",
"a",
... | b66971ee36be96b772ae7fdec79ccc1611376f3c | https://github.com/PyMySQL/mysqlclient-python/blob/b66971ee36be96b772ae7fdec79ccc1611376f3c/MySQLdb/connections.py#L238-L262 |
240,608 | PyMySQL/mysqlclient-python | MySQLdb/connections.py | Connection.set_character_set | def set_character_set(self, charset):
"""Set the connection character set to charset. The character
set can only be changed in MySQL-4.1 and newer. If you try
to change the character set from the current value in an
older version, NotSupportedError will be raised."""
if charset in ("utf8mb4", "utf8mb3"):
py_charset = "utf8"
else:
py_charset = charset
if self.character_set_name() != charset:
try:
super(Connection, self).set_character_set(charset)
except AttributeError:
if self._server_version < (4, 1):
raise NotSupportedError("server is too old to set charset")
self.query('SET NAMES %s' % charset)
self.store_result()
self.encoding = py_charset | python | def set_character_set(self, charset):
if charset in ("utf8mb4", "utf8mb3"):
py_charset = "utf8"
else:
py_charset = charset
if self.character_set_name() != charset:
try:
super(Connection, self).set_character_set(charset)
except AttributeError:
if self._server_version < (4, 1):
raise NotSupportedError("server is too old to set charset")
self.query('SET NAMES %s' % charset)
self.store_result()
self.encoding = py_charset | [
"def",
"set_character_set",
"(",
"self",
",",
"charset",
")",
":",
"if",
"charset",
"in",
"(",
"\"utf8mb4\"",
",",
"\"utf8mb3\"",
")",
":",
"py_charset",
"=",
"\"utf8\"",
"else",
":",
"py_charset",
"=",
"charset",
"if",
"self",
".",
"character_set_name",
"("... | Set the connection character set to charset. The character
set can only be changed in MySQL-4.1 and newer. If you try
to change the character set from the current value in an
older version, NotSupportedError will be raised. | [
"Set",
"the",
"connection",
"character",
"set",
"to",
"charset",
".",
"The",
"character",
"set",
"can",
"only",
"be",
"changed",
"in",
"MySQL",
"-",
"4",
".",
"1",
"and",
"newer",
".",
"If",
"you",
"try",
"to",
"change",
"the",
"character",
"set",
"fro... | b66971ee36be96b772ae7fdec79ccc1611376f3c | https://github.com/PyMySQL/mysqlclient-python/blob/b66971ee36be96b772ae7fdec79ccc1611376f3c/MySQLdb/connections.py#L282-L299 |
240,609 | PyMySQL/mysqlclient-python | MySQLdb/connections.py | Connection.set_sql_mode | def set_sql_mode(self, sql_mode):
"""Set the connection sql_mode. See MySQL documentation for
legal values."""
if self._server_version < (4, 1):
raise NotSupportedError("server is too old to set sql_mode")
self.query("SET SESSION sql_mode='%s'" % sql_mode)
self.store_result() | python | def set_sql_mode(self, sql_mode):
if self._server_version < (4, 1):
raise NotSupportedError("server is too old to set sql_mode")
self.query("SET SESSION sql_mode='%s'" % sql_mode)
self.store_result() | [
"def",
"set_sql_mode",
"(",
"self",
",",
"sql_mode",
")",
":",
"if",
"self",
".",
"_server_version",
"<",
"(",
"4",
",",
"1",
")",
":",
"raise",
"NotSupportedError",
"(",
"\"server is too old to set sql_mode\"",
")",
"self",
".",
"query",
"(",
"\"SET SESSION s... | Set the connection sql_mode. See MySQL documentation for
legal values. | [
"Set",
"the",
"connection",
"sql_mode",
".",
"See",
"MySQL",
"documentation",
"for",
"legal",
"values",
"."
] | b66971ee36be96b772ae7fdec79ccc1611376f3c | https://github.com/PyMySQL/mysqlclient-python/blob/b66971ee36be96b772ae7fdec79ccc1611376f3c/MySQLdb/connections.py#L301-L307 |
240,610 | PyMySQL/mysqlclient-python | MySQLdb/cursors.py | BaseCursor.close | def close(self):
"""Close the cursor. No further queries will be possible."""
try:
if self.connection is None:
return
while self.nextset():
pass
finally:
self.connection = None
self._result = None | python | def close(self):
try:
if self.connection is None:
return
while self.nextset():
pass
finally:
self.connection = None
self._result = None | [
"def",
"close",
"(",
"self",
")",
":",
"try",
":",
"if",
"self",
".",
"connection",
"is",
"None",
":",
"return",
"while",
"self",
".",
"nextset",
"(",
")",
":",
"pass",
"finally",
":",
"self",
".",
"connection",
"=",
"None",
"self",
".",
"_result",
... | Close the cursor. No further queries will be possible. | [
"Close",
"the",
"cursor",
".",
"No",
"further",
"queries",
"will",
"be",
"possible",
"."
] | b66971ee36be96b772ae7fdec79ccc1611376f3c | https://github.com/PyMySQL/mysqlclient-python/blob/b66971ee36be96b772ae7fdec79ccc1611376f3c/MySQLdb/cursors.py#L81-L90 |
240,611 | MagicStack/asyncpg | asyncpg/cursor.py | Cursor.fetchrow | async def fetchrow(self, *, timeout=None):
r"""Return the next row.
:param float timeout: Optional timeout value in seconds.
:return: A :class:`Record` instance.
"""
self._check_ready()
if self._exhausted:
return None
recs = await self._exec(1, timeout)
if len(recs) < 1:
self._exhausted = True
return None
return recs[0] | python | async def fetchrow(self, *, timeout=None):
r"""Return the next row.
:param float timeout: Optional timeout value in seconds.
:return: A :class:`Record` instance.
"""
self._check_ready()
if self._exhausted:
return None
recs = await self._exec(1, timeout)
if len(recs) < 1:
self._exhausted = True
return None
return recs[0] | [
"async",
"def",
"fetchrow",
"(",
"self",
",",
"*",
",",
"timeout",
"=",
"None",
")",
":",
"self",
".",
"_check_ready",
"(",
")",
"if",
"self",
".",
"_exhausted",
":",
"return",
"None",
"recs",
"=",
"await",
"self",
".",
"_exec",
"(",
"1",
",",
"tim... | r"""Return the next row.
:param float timeout: Optional timeout value in seconds.
:return: A :class:`Record` instance. | [
"r",
"Return",
"the",
"next",
"row",
"."
] | 92c2d81256a1efd8cab12c0118d74ccd1c18131b | https://github.com/MagicStack/asyncpg/blob/92c2d81256a1efd8cab12c0118d74ccd1c18131b/asyncpg/cursor.py#L224-L238 |
240,612 | MagicStack/asyncpg | asyncpg/connresource.py | guarded | def guarded(meth):
"""A decorator to add a sanity check to ConnectionResource methods."""
@functools.wraps(meth)
def _check(self, *args, **kwargs):
self._check_conn_validity(meth.__name__)
return meth(self, *args, **kwargs)
return _check | python | def guarded(meth):
@functools.wraps(meth)
def _check(self, *args, **kwargs):
self._check_conn_validity(meth.__name__)
return meth(self, *args, **kwargs)
return _check | [
"def",
"guarded",
"(",
"meth",
")",
":",
"@",
"functools",
".",
"wraps",
"(",
"meth",
")",
"def",
"_check",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"self",
".",
"_check_conn_validity",
"(",
"meth",
".",
"__name__",
")",
"r... | A decorator to add a sanity check to ConnectionResource methods. | [
"A",
"decorator",
"to",
"add",
"a",
"sanity",
"check",
"to",
"ConnectionResource",
"methods",
"."
] | 92c2d81256a1efd8cab12c0118d74ccd1c18131b | https://github.com/MagicStack/asyncpg/blob/92c2d81256a1efd8cab12c0118d74ccd1c18131b/asyncpg/connresource.py#L14-L22 |
240,613 | MagicStack/asyncpg | asyncpg/cluster.py | Cluster.start | def start(self, wait=60, *, server_settings={}, **opts):
"""Start the cluster."""
status = self.get_status()
if status == 'running':
return
elif status == 'not-initialized':
raise ClusterError(
'cluster in {!r} has not been initialized'.format(
self._data_dir))
port = opts.pop('port', None)
if port == 'dynamic':
port = find_available_port()
extra_args = ['--{}={}'.format(k, v) for k, v in opts.items()]
extra_args.append('--port={}'.format(port))
sockdir = server_settings.get('unix_socket_directories')
if sockdir is None:
sockdir = server_settings.get('unix_socket_directory')
if sockdir is None:
sockdir = '/tmp'
ssl_key = server_settings.get('ssl_key_file')
if ssl_key:
# Make sure server certificate key file has correct permissions.
keyfile = os.path.join(self._data_dir, 'srvkey.pem')
shutil.copy(ssl_key, keyfile)
os.chmod(keyfile, 0o600)
server_settings = server_settings.copy()
server_settings['ssl_key_file'] = keyfile
if self._pg_version < (9, 3):
sockdir_opt = 'unix_socket_directory'
else:
sockdir_opt = 'unix_socket_directories'
server_settings[sockdir_opt] = sockdir
for k, v in server_settings.items():
extra_args.extend(['-c', '{}={}'.format(k, v)])
if _system == 'Windows':
# On Windows we have to use pg_ctl as direct execution
# of postgres daemon under an Administrative account
# is not permitted and there is no easy way to drop
# privileges.
if os.getenv('ASYNCPG_DEBUG_SERVER'):
stdout = sys.stdout
else:
stdout = subprocess.DEVNULL
process = subprocess.run(
[self._pg_ctl, 'start', '-D', self._data_dir,
'-o', ' '.join(extra_args)],
stdout=stdout, stderr=subprocess.STDOUT)
if process.returncode != 0:
if process.stderr:
stderr = ':\n{}'.format(process.stderr.decode())
else:
stderr = ''
raise ClusterError(
'pg_ctl start exited with status {:d}{}'.format(
process.returncode, stderr))
else:
if os.getenv('ASYNCPG_DEBUG_SERVER'):
stdout = sys.stdout
else:
stdout = subprocess.DEVNULL
self._daemon_process = \
subprocess.Popen(
[self._postgres, '-D', self._data_dir, *extra_args],
stdout=stdout, stderr=subprocess.STDOUT)
self._daemon_pid = self._daemon_process.pid
self._test_connection(timeout=wait) | python | def start(self, wait=60, *, server_settings={}, **opts):
status = self.get_status()
if status == 'running':
return
elif status == 'not-initialized':
raise ClusterError(
'cluster in {!r} has not been initialized'.format(
self._data_dir))
port = opts.pop('port', None)
if port == 'dynamic':
port = find_available_port()
extra_args = ['--{}={}'.format(k, v) for k, v in opts.items()]
extra_args.append('--port={}'.format(port))
sockdir = server_settings.get('unix_socket_directories')
if sockdir is None:
sockdir = server_settings.get('unix_socket_directory')
if sockdir is None:
sockdir = '/tmp'
ssl_key = server_settings.get('ssl_key_file')
if ssl_key:
# Make sure server certificate key file has correct permissions.
keyfile = os.path.join(self._data_dir, 'srvkey.pem')
shutil.copy(ssl_key, keyfile)
os.chmod(keyfile, 0o600)
server_settings = server_settings.copy()
server_settings['ssl_key_file'] = keyfile
if self._pg_version < (9, 3):
sockdir_opt = 'unix_socket_directory'
else:
sockdir_opt = 'unix_socket_directories'
server_settings[sockdir_opt] = sockdir
for k, v in server_settings.items():
extra_args.extend(['-c', '{}={}'.format(k, v)])
if _system == 'Windows':
# On Windows we have to use pg_ctl as direct execution
# of postgres daemon under an Administrative account
# is not permitted and there is no easy way to drop
# privileges.
if os.getenv('ASYNCPG_DEBUG_SERVER'):
stdout = sys.stdout
else:
stdout = subprocess.DEVNULL
process = subprocess.run(
[self._pg_ctl, 'start', '-D', self._data_dir,
'-o', ' '.join(extra_args)],
stdout=stdout, stderr=subprocess.STDOUT)
if process.returncode != 0:
if process.stderr:
stderr = ':\n{}'.format(process.stderr.decode())
else:
stderr = ''
raise ClusterError(
'pg_ctl start exited with status {:d}{}'.format(
process.returncode, stderr))
else:
if os.getenv('ASYNCPG_DEBUG_SERVER'):
stdout = sys.stdout
else:
stdout = subprocess.DEVNULL
self._daemon_process = \
subprocess.Popen(
[self._postgres, '-D', self._data_dir, *extra_args],
stdout=stdout, stderr=subprocess.STDOUT)
self._daemon_pid = self._daemon_process.pid
self._test_connection(timeout=wait) | [
"def",
"start",
"(",
"self",
",",
"wait",
"=",
"60",
",",
"*",
",",
"server_settings",
"=",
"{",
"}",
",",
"*",
"*",
"opts",
")",
":",
"status",
"=",
"self",
".",
"get_status",
"(",
")",
"if",
"status",
"==",
"'running'",
":",
"return",
"elif",
"... | Start the cluster. | [
"Start",
"the",
"cluster",
"."
] | 92c2d81256a1efd8cab12c0118d74ccd1c18131b | https://github.com/MagicStack/asyncpg/blob/92c2d81256a1efd8cab12c0118d74ccd1c18131b/asyncpg/cluster.py#L144-L222 |
240,614 | MagicStack/asyncpg | asyncpg/cluster.py | Cluster.reload | def reload(self):
"""Reload server configuration."""
status = self.get_status()
if status != 'running':
raise ClusterError('cannot reload: cluster is not running')
process = subprocess.run(
[self._pg_ctl, 'reload', '-D', self._data_dir],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stderr = process.stderr
if process.returncode != 0:
raise ClusterError(
'pg_ctl stop exited with status {:d}: {}'.format(
process.returncode, stderr.decode())) | python | def reload(self):
status = self.get_status()
if status != 'running':
raise ClusterError('cannot reload: cluster is not running')
process = subprocess.run(
[self._pg_ctl, 'reload', '-D', self._data_dir],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stderr = process.stderr
if process.returncode != 0:
raise ClusterError(
'pg_ctl stop exited with status {:d}: {}'.format(
process.returncode, stderr.decode())) | [
"def",
"reload",
"(",
"self",
")",
":",
"status",
"=",
"self",
".",
"get_status",
"(",
")",
"if",
"status",
"!=",
"'running'",
":",
"raise",
"ClusterError",
"(",
"'cannot reload: cluster is not running'",
")",
"process",
"=",
"subprocess",
".",
"run",
"(",
"... | Reload server configuration. | [
"Reload",
"server",
"configuration",
"."
] | 92c2d81256a1efd8cab12c0118d74ccd1c18131b | https://github.com/MagicStack/asyncpg/blob/92c2d81256a1efd8cab12c0118d74ccd1c18131b/asyncpg/cluster.py#L224-L239 |
240,615 | MagicStack/asyncpg | asyncpg/cluster.py | Cluster.reset_hba | def reset_hba(self):
"""Remove all records from pg_hba.conf."""
status = self.get_status()
if status == 'not-initialized':
raise ClusterError(
'cannot modify HBA records: cluster is not initialized')
pg_hba = os.path.join(self._data_dir, 'pg_hba.conf')
try:
with open(pg_hba, 'w'):
pass
except IOError as e:
raise ClusterError(
'cannot modify HBA records: {}'.format(e)) from e | python | def reset_hba(self):
status = self.get_status()
if status == 'not-initialized':
raise ClusterError(
'cannot modify HBA records: cluster is not initialized')
pg_hba = os.path.join(self._data_dir, 'pg_hba.conf')
try:
with open(pg_hba, 'w'):
pass
except IOError as e:
raise ClusterError(
'cannot modify HBA records: {}'.format(e)) from e | [
"def",
"reset_hba",
"(",
"self",
")",
":",
"status",
"=",
"self",
".",
"get_status",
"(",
")",
"if",
"status",
"==",
"'not-initialized'",
":",
"raise",
"ClusterError",
"(",
"'cannot modify HBA records: cluster is not initialized'",
")",
"pg_hba",
"=",
"os",
".",
... | Remove all records from pg_hba.conf. | [
"Remove",
"all",
"records",
"from",
"pg_hba",
".",
"conf",
"."
] | 92c2d81256a1efd8cab12c0118d74ccd1c18131b | https://github.com/MagicStack/asyncpg/blob/92c2d81256a1efd8cab12c0118d74ccd1c18131b/asyncpg/cluster.py#L323-L337 |
240,616 | MagicStack/asyncpg | asyncpg/cluster.py | Cluster.add_hba_entry | def add_hba_entry(self, *, type='host', database, user, address=None,
auth_method, auth_options=None):
"""Add a record to pg_hba.conf."""
status = self.get_status()
if status == 'not-initialized':
raise ClusterError(
'cannot modify HBA records: cluster is not initialized')
if type not in {'local', 'host', 'hostssl', 'hostnossl'}:
raise ValueError('invalid HBA record type: {!r}'.format(type))
pg_hba = os.path.join(self._data_dir, 'pg_hba.conf')
record = '{} {} {}'.format(type, database, user)
if type != 'local':
if address is None:
raise ValueError(
'{!r} entry requires a valid address'.format(type))
else:
record += ' {}'.format(address)
record += ' {}'.format(auth_method)
if auth_options is not None:
record += ' ' + ' '.join(
'{}={}'.format(k, v) for k, v in auth_options)
try:
with open(pg_hba, 'a') as f:
print(record, file=f)
except IOError as e:
raise ClusterError(
'cannot modify HBA records: {}'.format(e)) from e | python | def add_hba_entry(self, *, type='host', database, user, address=None,
auth_method, auth_options=None):
status = self.get_status()
if status == 'not-initialized':
raise ClusterError(
'cannot modify HBA records: cluster is not initialized')
if type not in {'local', 'host', 'hostssl', 'hostnossl'}:
raise ValueError('invalid HBA record type: {!r}'.format(type))
pg_hba = os.path.join(self._data_dir, 'pg_hba.conf')
record = '{} {} {}'.format(type, database, user)
if type != 'local':
if address is None:
raise ValueError(
'{!r} entry requires a valid address'.format(type))
else:
record += ' {}'.format(address)
record += ' {}'.format(auth_method)
if auth_options is not None:
record += ' ' + ' '.join(
'{}={}'.format(k, v) for k, v in auth_options)
try:
with open(pg_hba, 'a') as f:
print(record, file=f)
except IOError as e:
raise ClusterError(
'cannot modify HBA records: {}'.format(e)) from e | [
"def",
"add_hba_entry",
"(",
"self",
",",
"*",
",",
"type",
"=",
"'host'",
",",
"database",
",",
"user",
",",
"address",
"=",
"None",
",",
"auth_method",
",",
"auth_options",
"=",
"None",
")",
":",
"status",
"=",
"self",
".",
"get_status",
"(",
")",
... | Add a record to pg_hba.conf. | [
"Add",
"a",
"record",
"to",
"pg_hba",
".",
"conf",
"."
] | 92c2d81256a1efd8cab12c0118d74ccd1c18131b | https://github.com/MagicStack/asyncpg/blob/92c2d81256a1efd8cab12c0118d74ccd1c18131b/asyncpg/cluster.py#L339-L372 |
240,617 | MagicStack/asyncpg | asyncpg/connection.py | connect | async def connect(dsn=None, *,
host=None, port=None,
user=None, password=None, passfile=None,
database=None,
loop=None,
timeout=60,
statement_cache_size=100,
max_cached_statement_lifetime=300,
max_cacheable_statement_size=1024 * 15,
command_timeout=None,
ssl=None,
connection_class=Connection,
server_settings=None):
r"""A coroutine to establish a connection to a PostgreSQL server.
The connection parameters may be specified either as a connection
URI in *dsn*, or as specific keyword arguments, or both.
If both *dsn* and keyword arguments are specified, the latter
override the corresponding values parsed from the connection URI.
The default values for the majority of arguments can be specified
using `environment variables <postgres envvars>`_.
Returns a new :class:`~asyncpg.connection.Connection` object.
:param dsn:
Connection arguments specified using as a single string in the
`libpq connection URI format`_:
``postgres://user:password@host:port/database?option=value``.
The following options are recognized by asyncpg: host, port,
user, database (or dbname), password, passfile, sslmode.
Unlike libpq, asyncpg will treat unrecognized options
as `server settings`_ to be used for the connection.
:param host:
Database host address as one of the following:
- an IP address or a domain name;
- an absolute path to the directory containing the database
server Unix-domain socket (not supported on Windows);
- a sequence of any of the above, in which case the addresses
will be tried in order, and the first successful connection
will be returned.
If not specified, asyncpg will try the following, in order:
- host address(es) parsed from the *dsn* argument,
- the value of the ``PGHOST`` environment variable,
- on Unix, common directories used for PostgreSQL Unix-domain
sockets: ``"/run/postgresql"``, ``"/var/run/postgresl"``,
``"/var/pgsql_socket"``, ``"/private/tmp"``, and ``"/tmp"``,
- ``"localhost"``.
:param port:
Port number to connect to at the server host
(or Unix-domain socket file extension). If multiple host
addresses were specified, this parameter may specify a
sequence of port numbers of the same length as the host sequence,
or it may specify a single port number to be used for all host
addresses.
If not specified, the value parsed from the *dsn* argument is used,
or the value of the ``PGPORT`` environment variable, or ``5432`` if
neither is specified.
:param user:
The name of the database role used for authentication.
If not specified, the value parsed from the *dsn* argument is used,
or the value of the ``PGUSER`` environment variable, or the
operating system name of the user running the application.
:param database:
The name of the database to connect to.
If not specified, the value parsed from the *dsn* argument is used,
or the value of the ``PGDATABASE`` environment variable, or the
operating system name of the user running the application.
:param password:
Password to be used for authentication, if the server requires
one. If not specified, the value parsed from the *dsn* argument
is used, or the value of the ``PGPASSWORD`` environment variable.
Note that the use of the environment variable is discouraged as
other users and applications may be able to read it without needing
specific privileges. It is recommended to use *passfile* instead.
:param passfile:
The name of the file used to store passwords
(defaults to ``~/.pgpass``, or ``%APPDATA%\postgresql\pgpass.conf``
on Windows).
:param loop:
An asyncio event loop instance. If ``None``, the default
event loop will be used.
:param float timeout:
Connection timeout in seconds.
:param int statement_cache_size:
The size of prepared statement LRU cache. Pass ``0`` to
disable the cache.
:param int max_cached_statement_lifetime:
The maximum time in seconds a prepared statement will stay
in the cache. Pass ``0`` to allow statements be cached
indefinitely.
:param int max_cacheable_statement_size:
The maximum size of a statement that can be cached (15KiB by
default). Pass ``0`` to allow all statements to be cached
regardless of their size.
:param float command_timeout:
The default timeout for operations on this connection
(the default is ``None``: no timeout).
:param ssl:
Pass ``True`` or an `ssl.SSLContext <SSLContext_>`_ instance to
require an SSL connection. If ``True``, a default SSL context
returned by `ssl.create_default_context() <create_default_context_>`_
will be used.
:param dict server_settings:
An optional dict of server runtime parameters. Refer to
PostgreSQL documentation for
a `list of supported options <server settings>`_.
:param Connection connection_class:
Class of the returned connection object. Must be a subclass of
:class:`~asyncpg.connection.Connection`.
:return: A :class:`~asyncpg.connection.Connection` instance.
Example:
.. code-block:: pycon
>>> import asyncpg
>>> import asyncio
>>> async def run():
... con = await asyncpg.connect(user='postgres')
... types = await con.fetch('SELECT * FROM pg_type')
... print(types)
...
>>> asyncio.get_event_loop().run_until_complete(run())
[<Record typname='bool' typnamespace=11 ...
.. versionadded:: 0.10.0
Added ``max_cached_statement_use_count`` parameter.
.. versionchanged:: 0.11.0
Removed ability to pass arbitrary keyword arguments to set
server settings. Added a dedicated parameter ``server_settings``
for that.
.. versionadded:: 0.11.0
Added ``connection_class`` parameter.
.. versionadded:: 0.16.0
Added ``passfile`` parameter
(and support for password files in general).
.. versionadded:: 0.18.0
Added ability to specify multiple hosts in the *dsn*
and *host* arguments.
.. _SSLContext: https://docs.python.org/3/library/ssl.html#ssl.SSLContext
.. _create_default_context:
https://docs.python.org/3/library/ssl.html#ssl.create_default_context
.. _server settings:
https://www.postgresql.org/docs/current/static/runtime-config.html
.. _postgres envvars:
https://www.postgresql.org/docs/current/static/libpq-envars.html
.. _libpq connection URI format:
https://www.postgresql.org/docs/current/static/\
libpq-connect.html#LIBPQ-CONNSTRING
"""
if not issubclass(connection_class, Connection):
raise TypeError(
'connection_class is expected to be a subclass of '
'asyncpg.Connection, got {!r}'.format(connection_class))
if loop is None:
loop = asyncio.get_event_loop()
return await connect_utils._connect(
loop=loop, timeout=timeout, connection_class=connection_class,
dsn=dsn, host=host, port=port, user=user,
password=password, passfile=passfile,
ssl=ssl, database=database,
server_settings=server_settings,
command_timeout=command_timeout,
statement_cache_size=statement_cache_size,
max_cached_statement_lifetime=max_cached_statement_lifetime,
max_cacheable_statement_size=max_cacheable_statement_size) | python | async def connect(dsn=None, *,
host=None, port=None,
user=None, password=None, passfile=None,
database=None,
loop=None,
timeout=60,
statement_cache_size=100,
max_cached_statement_lifetime=300,
max_cacheable_statement_size=1024 * 15,
command_timeout=None,
ssl=None,
connection_class=Connection,
server_settings=None):
r"""A coroutine to establish a connection to a PostgreSQL server.
The connection parameters may be specified either as a connection
URI in *dsn*, or as specific keyword arguments, or both.
If both *dsn* and keyword arguments are specified, the latter
override the corresponding values parsed from the connection URI.
The default values for the majority of arguments can be specified
using `environment variables <postgres envvars>`_.
Returns a new :class:`~asyncpg.connection.Connection` object.
:param dsn:
Connection arguments specified using as a single string in the
`libpq connection URI format`_:
``postgres://user:password@host:port/database?option=value``.
The following options are recognized by asyncpg: host, port,
user, database (or dbname), password, passfile, sslmode.
Unlike libpq, asyncpg will treat unrecognized options
as `server settings`_ to be used for the connection.
:param host:
Database host address as one of the following:
- an IP address or a domain name;
- an absolute path to the directory containing the database
server Unix-domain socket (not supported on Windows);
- a sequence of any of the above, in which case the addresses
will be tried in order, and the first successful connection
will be returned.
If not specified, asyncpg will try the following, in order:
- host address(es) parsed from the *dsn* argument,
- the value of the ``PGHOST`` environment variable,
- on Unix, common directories used for PostgreSQL Unix-domain
sockets: ``"/run/postgresql"``, ``"/var/run/postgresl"``,
``"/var/pgsql_socket"``, ``"/private/tmp"``, and ``"/tmp"``,
- ``"localhost"``.
:param port:
Port number to connect to at the server host
(or Unix-domain socket file extension). If multiple host
addresses were specified, this parameter may specify a
sequence of port numbers of the same length as the host sequence,
or it may specify a single port number to be used for all host
addresses.
If not specified, the value parsed from the *dsn* argument is used,
or the value of the ``PGPORT`` environment variable, or ``5432`` if
neither is specified.
:param user:
The name of the database role used for authentication.
If not specified, the value parsed from the *dsn* argument is used,
or the value of the ``PGUSER`` environment variable, or the
operating system name of the user running the application.
:param database:
The name of the database to connect to.
If not specified, the value parsed from the *dsn* argument is used,
or the value of the ``PGDATABASE`` environment variable, or the
operating system name of the user running the application.
:param password:
Password to be used for authentication, if the server requires
one. If not specified, the value parsed from the *dsn* argument
is used, or the value of the ``PGPASSWORD`` environment variable.
Note that the use of the environment variable is discouraged as
other users and applications may be able to read it without needing
specific privileges. It is recommended to use *passfile* instead.
:param passfile:
The name of the file used to store passwords
(defaults to ``~/.pgpass``, or ``%APPDATA%\postgresql\pgpass.conf``
on Windows).
:param loop:
An asyncio event loop instance. If ``None``, the default
event loop will be used.
:param float timeout:
Connection timeout in seconds.
:param int statement_cache_size:
The size of prepared statement LRU cache. Pass ``0`` to
disable the cache.
:param int max_cached_statement_lifetime:
The maximum time in seconds a prepared statement will stay
in the cache. Pass ``0`` to allow statements be cached
indefinitely.
:param int max_cacheable_statement_size:
The maximum size of a statement that can be cached (15KiB by
default). Pass ``0`` to allow all statements to be cached
regardless of their size.
:param float command_timeout:
The default timeout for operations on this connection
(the default is ``None``: no timeout).
:param ssl:
Pass ``True`` or an `ssl.SSLContext <SSLContext_>`_ instance to
require an SSL connection. If ``True``, a default SSL context
returned by `ssl.create_default_context() <create_default_context_>`_
will be used.
:param dict server_settings:
An optional dict of server runtime parameters. Refer to
PostgreSQL documentation for
a `list of supported options <server settings>`_.
:param Connection connection_class:
Class of the returned connection object. Must be a subclass of
:class:`~asyncpg.connection.Connection`.
:return: A :class:`~asyncpg.connection.Connection` instance.
Example:
.. code-block:: pycon
>>> import asyncpg
>>> import asyncio
>>> async def run():
... con = await asyncpg.connect(user='postgres')
... types = await con.fetch('SELECT * FROM pg_type')
... print(types)
...
>>> asyncio.get_event_loop().run_until_complete(run())
[<Record typname='bool' typnamespace=11 ...
.. versionadded:: 0.10.0
Added ``max_cached_statement_use_count`` parameter.
.. versionchanged:: 0.11.0
Removed ability to pass arbitrary keyword arguments to set
server settings. Added a dedicated parameter ``server_settings``
for that.
.. versionadded:: 0.11.0
Added ``connection_class`` parameter.
.. versionadded:: 0.16.0
Added ``passfile`` parameter
(and support for password files in general).
.. versionadded:: 0.18.0
Added ability to specify multiple hosts in the *dsn*
and *host* arguments.
.. _SSLContext: https://docs.python.org/3/library/ssl.html#ssl.SSLContext
.. _create_default_context:
https://docs.python.org/3/library/ssl.html#ssl.create_default_context
.. _server settings:
https://www.postgresql.org/docs/current/static/runtime-config.html
.. _postgres envvars:
https://www.postgresql.org/docs/current/static/libpq-envars.html
.. _libpq connection URI format:
https://www.postgresql.org/docs/current/static/\
libpq-connect.html#LIBPQ-CONNSTRING
"""
if not issubclass(connection_class, Connection):
raise TypeError(
'connection_class is expected to be a subclass of '
'asyncpg.Connection, got {!r}'.format(connection_class))
if loop is None:
loop = asyncio.get_event_loop()
return await connect_utils._connect(
loop=loop, timeout=timeout, connection_class=connection_class,
dsn=dsn, host=host, port=port, user=user,
password=password, passfile=passfile,
ssl=ssl, database=database,
server_settings=server_settings,
command_timeout=command_timeout,
statement_cache_size=statement_cache_size,
max_cached_statement_lifetime=max_cached_statement_lifetime,
max_cacheable_statement_size=max_cacheable_statement_size) | [
"async",
"def",
"connect",
"(",
"dsn",
"=",
"None",
",",
"*",
",",
"host",
"=",
"None",
",",
"port",
"=",
"None",
",",
"user",
"=",
"None",
",",
"password",
"=",
"None",
",",
"passfile",
"=",
"None",
",",
"database",
"=",
"None",
",",
"loop",
"="... | r"""A coroutine to establish a connection to a PostgreSQL server.
The connection parameters may be specified either as a connection
URI in *dsn*, or as specific keyword arguments, or both.
If both *dsn* and keyword arguments are specified, the latter
override the corresponding values parsed from the connection URI.
The default values for the majority of arguments can be specified
using `environment variables <postgres envvars>`_.
Returns a new :class:`~asyncpg.connection.Connection` object.
:param dsn:
Connection arguments specified using as a single string in the
`libpq connection URI format`_:
``postgres://user:password@host:port/database?option=value``.
The following options are recognized by asyncpg: host, port,
user, database (or dbname), password, passfile, sslmode.
Unlike libpq, asyncpg will treat unrecognized options
as `server settings`_ to be used for the connection.
:param host:
Database host address as one of the following:
- an IP address or a domain name;
- an absolute path to the directory containing the database
server Unix-domain socket (not supported on Windows);
- a sequence of any of the above, in which case the addresses
will be tried in order, and the first successful connection
will be returned.
If not specified, asyncpg will try the following, in order:
- host address(es) parsed from the *dsn* argument,
- the value of the ``PGHOST`` environment variable,
- on Unix, common directories used for PostgreSQL Unix-domain
sockets: ``"/run/postgresql"``, ``"/var/run/postgresl"``,
``"/var/pgsql_socket"``, ``"/private/tmp"``, and ``"/tmp"``,
- ``"localhost"``.
:param port:
Port number to connect to at the server host
(or Unix-domain socket file extension). If multiple host
addresses were specified, this parameter may specify a
sequence of port numbers of the same length as the host sequence,
or it may specify a single port number to be used for all host
addresses.
If not specified, the value parsed from the *dsn* argument is used,
or the value of the ``PGPORT`` environment variable, or ``5432`` if
neither is specified.
:param user:
The name of the database role used for authentication.
If not specified, the value parsed from the *dsn* argument is used,
or the value of the ``PGUSER`` environment variable, or the
operating system name of the user running the application.
:param database:
The name of the database to connect to.
If not specified, the value parsed from the *dsn* argument is used,
or the value of the ``PGDATABASE`` environment variable, or the
operating system name of the user running the application.
:param password:
Password to be used for authentication, if the server requires
one. If not specified, the value parsed from the *dsn* argument
is used, or the value of the ``PGPASSWORD`` environment variable.
Note that the use of the environment variable is discouraged as
other users and applications may be able to read it without needing
specific privileges. It is recommended to use *passfile* instead.
:param passfile:
The name of the file used to store passwords
(defaults to ``~/.pgpass``, or ``%APPDATA%\postgresql\pgpass.conf``
on Windows).
:param loop:
An asyncio event loop instance. If ``None``, the default
event loop will be used.
:param float timeout:
Connection timeout in seconds.
:param int statement_cache_size:
The size of prepared statement LRU cache. Pass ``0`` to
disable the cache.
:param int max_cached_statement_lifetime:
The maximum time in seconds a prepared statement will stay
in the cache. Pass ``0`` to allow statements be cached
indefinitely.
:param int max_cacheable_statement_size:
The maximum size of a statement that can be cached (15KiB by
default). Pass ``0`` to allow all statements to be cached
regardless of their size.
:param float command_timeout:
The default timeout for operations on this connection
(the default is ``None``: no timeout).
:param ssl:
Pass ``True`` or an `ssl.SSLContext <SSLContext_>`_ instance to
require an SSL connection. If ``True``, a default SSL context
returned by `ssl.create_default_context() <create_default_context_>`_
will be used.
:param dict server_settings:
An optional dict of server runtime parameters. Refer to
PostgreSQL documentation for
a `list of supported options <server settings>`_.
:param Connection connection_class:
Class of the returned connection object. Must be a subclass of
:class:`~asyncpg.connection.Connection`.
:return: A :class:`~asyncpg.connection.Connection` instance.
Example:
.. code-block:: pycon
>>> import asyncpg
>>> import asyncio
>>> async def run():
... con = await asyncpg.connect(user='postgres')
... types = await con.fetch('SELECT * FROM pg_type')
... print(types)
...
>>> asyncio.get_event_loop().run_until_complete(run())
[<Record typname='bool' typnamespace=11 ...
.. versionadded:: 0.10.0
Added ``max_cached_statement_use_count`` parameter.
.. versionchanged:: 0.11.0
Removed ability to pass arbitrary keyword arguments to set
server settings. Added a dedicated parameter ``server_settings``
for that.
.. versionadded:: 0.11.0
Added ``connection_class`` parameter.
.. versionadded:: 0.16.0
Added ``passfile`` parameter
(and support for password files in general).
.. versionadded:: 0.18.0
Added ability to specify multiple hosts in the *dsn*
and *host* arguments.
.. _SSLContext: https://docs.python.org/3/library/ssl.html#ssl.SSLContext
.. _create_default_context:
https://docs.python.org/3/library/ssl.html#ssl.create_default_context
.. _server settings:
https://www.postgresql.org/docs/current/static/runtime-config.html
.. _postgres envvars:
https://www.postgresql.org/docs/current/static/libpq-envars.html
.. _libpq connection URI format:
https://www.postgresql.org/docs/current/static/\
libpq-connect.html#LIBPQ-CONNSTRING | [
"r",
"A",
"coroutine",
"to",
"establish",
"a",
"connection",
"to",
"a",
"PostgreSQL",
"server",
"."
] | 92c2d81256a1efd8cab12c0118d74ccd1c18131b | https://github.com/MagicStack/asyncpg/blob/92c2d81256a1efd8cab12c0118d74ccd1c18131b/asyncpg/connection.py#L1494-L1688 |
240,618 | MagicStack/asyncpg | asyncpg/connection.py | Connection.add_listener | async def add_listener(self, channel, callback):
"""Add a listener for Postgres notifications.
:param str channel: Channel to listen on.
:param callable callback:
A callable receiving the following arguments:
**connection**: a Connection the callback is registered with;
**pid**: PID of the Postgres server that sent the notification;
**channel**: name of the channel the notification was sent to;
**payload**: the payload.
"""
self._check_open()
if channel not in self._listeners:
await self.fetch('LISTEN {}'.format(utils._quote_ident(channel)))
self._listeners[channel] = set()
self._listeners[channel].add(callback) | python | async def add_listener(self, channel, callback):
self._check_open()
if channel not in self._listeners:
await self.fetch('LISTEN {}'.format(utils._quote_ident(channel)))
self._listeners[channel] = set()
self._listeners[channel].add(callback) | [
"async",
"def",
"add_listener",
"(",
"self",
",",
"channel",
",",
"callback",
")",
":",
"self",
".",
"_check_open",
"(",
")",
"if",
"channel",
"not",
"in",
"self",
".",
"_listeners",
":",
"await",
"self",
".",
"fetch",
"(",
"'LISTEN {}'",
".",
"format",
... | Add a listener for Postgres notifications.
:param str channel: Channel to listen on.
:param callable callback:
A callable receiving the following arguments:
**connection**: a Connection the callback is registered with;
**pid**: PID of the Postgres server that sent the notification;
**channel**: name of the channel the notification was sent to;
**payload**: the payload. | [
"Add",
"a",
"listener",
"for",
"Postgres",
"notifications",
"."
] | 92c2d81256a1efd8cab12c0118d74ccd1c18131b | https://github.com/MagicStack/asyncpg/blob/92c2d81256a1efd8cab12c0118d74ccd1c18131b/asyncpg/connection.py#L126-L142 |
240,619 | MagicStack/asyncpg | asyncpg/connection.py | Connection.remove_listener | async def remove_listener(self, channel, callback):
"""Remove a listening callback on the specified channel."""
if self.is_closed():
return
if channel not in self._listeners:
return
if callback not in self._listeners[channel]:
return
self._listeners[channel].remove(callback)
if not self._listeners[channel]:
del self._listeners[channel]
await self.fetch('UNLISTEN {}'.format(utils._quote_ident(channel))) | python | async def remove_listener(self, channel, callback):
if self.is_closed():
return
if channel not in self._listeners:
return
if callback not in self._listeners[channel]:
return
self._listeners[channel].remove(callback)
if not self._listeners[channel]:
del self._listeners[channel]
await self.fetch('UNLISTEN {}'.format(utils._quote_ident(channel))) | [
"async",
"def",
"remove_listener",
"(",
"self",
",",
"channel",
",",
"callback",
")",
":",
"if",
"self",
".",
"is_closed",
"(",
")",
":",
"return",
"if",
"channel",
"not",
"in",
"self",
".",
"_listeners",
":",
"return",
"if",
"callback",
"not",
"in",
"... | Remove a listening callback on the specified channel. | [
"Remove",
"a",
"listening",
"callback",
"on",
"the",
"specified",
"channel",
"."
] | 92c2d81256a1efd8cab12c0118d74ccd1c18131b | https://github.com/MagicStack/asyncpg/blob/92c2d81256a1efd8cab12c0118d74ccd1c18131b/asyncpg/connection.py#L144-L155 |
240,620 | MagicStack/asyncpg | asyncpg/connection.py | Connection.add_log_listener | def add_log_listener(self, callback):
"""Add a listener for Postgres log messages.
It will be called when asyncronous NoticeResponse is received
from the connection. Possible message types are: WARNING, NOTICE,
DEBUG, INFO, or LOG.
:param callable callback:
A callable receiving the following arguments:
**connection**: a Connection the callback is registered with;
**message**: the `exceptions.PostgresLogMessage` message.
.. versionadded:: 0.12.0
"""
if self.is_closed():
raise exceptions.InterfaceError('connection is closed')
self._log_listeners.add(callback) | python | def add_log_listener(self, callback):
if self.is_closed():
raise exceptions.InterfaceError('connection is closed')
self._log_listeners.add(callback) | [
"def",
"add_log_listener",
"(",
"self",
",",
"callback",
")",
":",
"if",
"self",
".",
"is_closed",
"(",
")",
":",
"raise",
"exceptions",
".",
"InterfaceError",
"(",
"'connection is closed'",
")",
"self",
".",
"_log_listeners",
".",
"add",
"(",
"callback",
")... | Add a listener for Postgres log messages.
It will be called when asyncronous NoticeResponse is received
from the connection. Possible message types are: WARNING, NOTICE,
DEBUG, INFO, or LOG.
:param callable callback:
A callable receiving the following arguments:
**connection**: a Connection the callback is registered with;
**message**: the `exceptions.PostgresLogMessage` message.
.. versionadded:: 0.12.0 | [
"Add",
"a",
"listener",
"for",
"Postgres",
"log",
"messages",
"."
] | 92c2d81256a1efd8cab12c0118d74ccd1c18131b | https://github.com/MagicStack/asyncpg/blob/92c2d81256a1efd8cab12c0118d74ccd1c18131b/asyncpg/connection.py#L157-L173 |
240,621 | MagicStack/asyncpg | asyncpg/connection.py | Connection.copy_from_table | async def copy_from_table(self, table_name, *, output,
columns=None, schema_name=None, timeout=None,
format=None, oids=None, delimiter=None,
null=None, header=None, quote=None,
escape=None, force_quote=None, encoding=None):
"""Copy table contents to a file or file-like object.
:param str table_name:
The name of the table to copy data from.
:param output:
A :term:`path-like object <python:path-like object>`,
or a :term:`file-like object <python:file-like object>`, or
a :term:`coroutine function <python:coroutine function>`
that takes a ``bytes`` instance as a sole argument.
:param list columns:
An optional list of column names to copy.
:param str schema_name:
An optional schema name to qualify the table.
:param float timeout:
Optional timeout value in seconds.
The remaining keyword arguments are ``COPY`` statement options,
see `COPY statement documentation`_ for details.
:return: The status string of the COPY command.
Example:
.. code-block:: pycon
>>> import asyncpg
>>> import asyncio
>>> async def run():
... con = await asyncpg.connect(user='postgres')
... result = await con.copy_from_table(
... 'mytable', columns=('foo', 'bar'),
... output='file.csv', format='csv')
... print(result)
...
>>> asyncio.get_event_loop().run_until_complete(run())
'COPY 100'
.. _`COPY statement documentation`:
https://www.postgresql.org/docs/current/static/sql-copy.html
.. versionadded:: 0.11.0
"""
tabname = utils._quote_ident(table_name)
if schema_name:
tabname = utils._quote_ident(schema_name) + '.' + tabname
if columns:
cols = '({})'.format(
', '.join(utils._quote_ident(c) for c in columns))
else:
cols = ''
opts = self._format_copy_opts(
format=format, oids=oids, delimiter=delimiter,
null=null, header=header, quote=quote, escape=escape,
force_quote=force_quote, encoding=encoding
)
copy_stmt = 'COPY {tab}{cols} TO STDOUT {opts}'.format(
tab=tabname, cols=cols, opts=opts)
return await self._copy_out(copy_stmt, output, timeout) | python | async def copy_from_table(self, table_name, *, output,
columns=None, schema_name=None, timeout=None,
format=None, oids=None, delimiter=None,
null=None, header=None, quote=None,
escape=None, force_quote=None, encoding=None):
tabname = utils._quote_ident(table_name)
if schema_name:
tabname = utils._quote_ident(schema_name) + '.' + tabname
if columns:
cols = '({})'.format(
', '.join(utils._quote_ident(c) for c in columns))
else:
cols = ''
opts = self._format_copy_opts(
format=format, oids=oids, delimiter=delimiter,
null=null, header=header, quote=quote, escape=escape,
force_quote=force_quote, encoding=encoding
)
copy_stmt = 'COPY {tab}{cols} TO STDOUT {opts}'.format(
tab=tabname, cols=cols, opts=opts)
return await self._copy_out(copy_stmt, output, timeout) | [
"async",
"def",
"copy_from_table",
"(",
"self",
",",
"table_name",
",",
"*",
",",
"output",
",",
"columns",
"=",
"None",
",",
"schema_name",
"=",
"None",
",",
"timeout",
"=",
"None",
",",
"format",
"=",
"None",
",",
"oids",
"=",
"None",
",",
"delimiter... | Copy table contents to a file or file-like object.
:param str table_name:
The name of the table to copy data from.
:param output:
A :term:`path-like object <python:path-like object>`,
or a :term:`file-like object <python:file-like object>`, or
a :term:`coroutine function <python:coroutine function>`
that takes a ``bytes`` instance as a sole argument.
:param list columns:
An optional list of column names to copy.
:param str schema_name:
An optional schema name to qualify the table.
:param float timeout:
Optional timeout value in seconds.
The remaining keyword arguments are ``COPY`` statement options,
see `COPY statement documentation`_ for details.
:return: The status string of the COPY command.
Example:
.. code-block:: pycon
>>> import asyncpg
>>> import asyncio
>>> async def run():
... con = await asyncpg.connect(user='postgres')
... result = await con.copy_from_table(
... 'mytable', columns=('foo', 'bar'),
... output='file.csv', format='csv')
... print(result)
...
>>> asyncio.get_event_loop().run_until_complete(run())
'COPY 100'
.. _`COPY statement documentation`:
https://www.postgresql.org/docs/current/static/sql-copy.html
.. versionadded:: 0.11.0 | [
"Copy",
"table",
"contents",
"to",
"a",
"file",
"or",
"file",
"-",
"like",
"object",
"."
] | 92c2d81256a1efd8cab12c0118d74ccd1c18131b | https://github.com/MagicStack/asyncpg/blob/92c2d81256a1efd8cab12c0118d74ccd1c18131b/asyncpg/connection.py#L460-L530 |
240,622 | MagicStack/asyncpg | asyncpg/connection.py | Connection.copy_from_query | async def copy_from_query(self, query, *args, output,
timeout=None, format=None, oids=None,
delimiter=None, null=None, header=None,
quote=None, escape=None, force_quote=None,
encoding=None):
"""Copy the results of a query to a file or file-like object.
:param str query:
The query to copy the results of.
:param args:
Query arguments.
:param output:
A :term:`path-like object <python:path-like object>`,
or a :term:`file-like object <python:file-like object>`, or
a :term:`coroutine function <python:coroutine function>`
that takes a ``bytes`` instance as a sole argument.
:param float timeout:
Optional timeout value in seconds.
The remaining keyword arguments are ``COPY`` statement options,
see `COPY statement documentation`_ for details.
:return: The status string of the COPY command.
Example:
.. code-block:: pycon
>>> import asyncpg
>>> import asyncio
>>> async def run():
... con = await asyncpg.connect(user='postgres')
... result = await con.copy_from_query(
... 'SELECT foo, bar FROM mytable WHERE foo > $1', 10,
... output='file.csv', format='csv')
... print(result)
...
>>> asyncio.get_event_loop().run_until_complete(run())
'COPY 10'
.. _`COPY statement documentation`:
https://www.postgresql.org/docs/current/static/sql-copy.html
.. versionadded:: 0.11.0
"""
opts = self._format_copy_opts(
format=format, oids=oids, delimiter=delimiter,
null=null, header=header, quote=quote, escape=escape,
force_quote=force_quote, encoding=encoding
)
if args:
query = await utils._mogrify(self, query, args)
copy_stmt = 'COPY ({query}) TO STDOUT {opts}'.format(
query=query, opts=opts)
return await self._copy_out(copy_stmt, output, timeout) | python | async def copy_from_query(self, query, *args, output,
timeout=None, format=None, oids=None,
delimiter=None, null=None, header=None,
quote=None, escape=None, force_quote=None,
encoding=None):
opts = self._format_copy_opts(
format=format, oids=oids, delimiter=delimiter,
null=null, header=header, quote=quote, escape=escape,
force_quote=force_quote, encoding=encoding
)
if args:
query = await utils._mogrify(self, query, args)
copy_stmt = 'COPY ({query}) TO STDOUT {opts}'.format(
query=query, opts=opts)
return await self._copy_out(copy_stmt, output, timeout) | [
"async",
"def",
"copy_from_query",
"(",
"self",
",",
"query",
",",
"*",
"args",
",",
"output",
",",
"timeout",
"=",
"None",
",",
"format",
"=",
"None",
",",
"oids",
"=",
"None",
",",
"delimiter",
"=",
"None",
",",
"null",
"=",
"None",
",",
"header",
... | Copy the results of a query to a file or file-like object.
:param str query:
The query to copy the results of.
:param args:
Query arguments.
:param output:
A :term:`path-like object <python:path-like object>`,
or a :term:`file-like object <python:file-like object>`, or
a :term:`coroutine function <python:coroutine function>`
that takes a ``bytes`` instance as a sole argument.
:param float timeout:
Optional timeout value in seconds.
The remaining keyword arguments are ``COPY`` statement options,
see `COPY statement documentation`_ for details.
:return: The status string of the COPY command.
Example:
.. code-block:: pycon
>>> import asyncpg
>>> import asyncio
>>> async def run():
... con = await asyncpg.connect(user='postgres')
... result = await con.copy_from_query(
... 'SELECT foo, bar FROM mytable WHERE foo > $1', 10,
... output='file.csv', format='csv')
... print(result)
...
>>> asyncio.get_event_loop().run_until_complete(run())
'COPY 10'
.. _`COPY statement documentation`:
https://www.postgresql.org/docs/current/static/sql-copy.html
.. versionadded:: 0.11.0 | [
"Copy",
"the",
"results",
"of",
"a",
"query",
"to",
"a",
"file",
"or",
"file",
"-",
"like",
"object",
"."
] | 92c2d81256a1efd8cab12c0118d74ccd1c18131b | https://github.com/MagicStack/asyncpg/blob/92c2d81256a1efd8cab12c0118d74ccd1c18131b/asyncpg/connection.py#L532-L592 |
240,623 | MagicStack/asyncpg | asyncpg/connection.py | Connection.copy_to_table | async def copy_to_table(self, table_name, *, source,
columns=None, schema_name=None, timeout=None,
format=None, oids=None, freeze=None,
delimiter=None, null=None, header=None,
quote=None, escape=None, force_quote=None,
force_not_null=None, force_null=None,
encoding=None):
"""Copy data to the specified table.
:param str table_name:
The name of the table to copy data to.
:param source:
A :term:`path-like object <python:path-like object>`,
or a :term:`file-like object <python:file-like object>`, or
an :term:`asynchronous iterable <python:asynchronous iterable>`
that returns ``bytes``, or an object supporting the
:ref:`buffer protocol <python:bufferobjects>`.
:param list columns:
An optional list of column names to copy.
:param str schema_name:
An optional schema name to qualify the table.
:param float timeout:
Optional timeout value in seconds.
The remaining keyword arguments are ``COPY`` statement options,
see `COPY statement documentation`_ for details.
:return: The status string of the COPY command.
Example:
.. code-block:: pycon
>>> import asyncpg
>>> import asyncio
>>> async def run():
... con = await asyncpg.connect(user='postgres')
... result = await con.copy_to_table(
... 'mytable', source='datafile.tbl')
... print(result)
...
>>> asyncio.get_event_loop().run_until_complete(run())
'COPY 140000'
.. _`COPY statement documentation`:
https://www.postgresql.org/docs/current/static/sql-copy.html
.. versionadded:: 0.11.0
"""
tabname = utils._quote_ident(table_name)
if schema_name:
tabname = utils._quote_ident(schema_name) + '.' + tabname
if columns:
cols = '({})'.format(
', '.join(utils._quote_ident(c) for c in columns))
else:
cols = ''
opts = self._format_copy_opts(
format=format, oids=oids, freeze=freeze, delimiter=delimiter,
null=null, header=header, quote=quote, escape=escape,
force_not_null=force_not_null, force_null=force_null,
encoding=encoding
)
copy_stmt = 'COPY {tab}{cols} FROM STDIN {opts}'.format(
tab=tabname, cols=cols, opts=opts)
return await self._copy_in(copy_stmt, source, timeout) | python | async def copy_to_table(self, table_name, *, source,
columns=None, schema_name=None, timeout=None,
format=None, oids=None, freeze=None,
delimiter=None, null=None, header=None,
quote=None, escape=None, force_quote=None,
force_not_null=None, force_null=None,
encoding=None):
tabname = utils._quote_ident(table_name)
if schema_name:
tabname = utils._quote_ident(schema_name) + '.' + tabname
if columns:
cols = '({})'.format(
', '.join(utils._quote_ident(c) for c in columns))
else:
cols = ''
opts = self._format_copy_opts(
format=format, oids=oids, freeze=freeze, delimiter=delimiter,
null=null, header=header, quote=quote, escape=escape,
force_not_null=force_not_null, force_null=force_null,
encoding=encoding
)
copy_stmt = 'COPY {tab}{cols} FROM STDIN {opts}'.format(
tab=tabname, cols=cols, opts=opts)
return await self._copy_in(copy_stmt, source, timeout) | [
"async",
"def",
"copy_to_table",
"(",
"self",
",",
"table_name",
",",
"*",
",",
"source",
",",
"columns",
"=",
"None",
",",
"schema_name",
"=",
"None",
",",
"timeout",
"=",
"None",
",",
"format",
"=",
"None",
",",
"oids",
"=",
"None",
",",
"freeze",
... | Copy data to the specified table.
:param str table_name:
The name of the table to copy data to.
:param source:
A :term:`path-like object <python:path-like object>`,
or a :term:`file-like object <python:file-like object>`, or
an :term:`asynchronous iterable <python:asynchronous iterable>`
that returns ``bytes``, or an object supporting the
:ref:`buffer protocol <python:bufferobjects>`.
:param list columns:
An optional list of column names to copy.
:param str schema_name:
An optional schema name to qualify the table.
:param float timeout:
Optional timeout value in seconds.
The remaining keyword arguments are ``COPY`` statement options,
see `COPY statement documentation`_ for details.
:return: The status string of the COPY command.
Example:
.. code-block:: pycon
>>> import asyncpg
>>> import asyncio
>>> async def run():
... con = await asyncpg.connect(user='postgres')
... result = await con.copy_to_table(
... 'mytable', source='datafile.tbl')
... print(result)
...
>>> asyncio.get_event_loop().run_until_complete(run())
'COPY 140000'
.. _`COPY statement documentation`:
https://www.postgresql.org/docs/current/static/sql-copy.html
.. versionadded:: 0.11.0 | [
"Copy",
"data",
"to",
"the",
"specified",
"table",
"."
] | 92c2d81256a1efd8cab12c0118d74ccd1c18131b | https://github.com/MagicStack/asyncpg/blob/92c2d81256a1efd8cab12c0118d74ccd1c18131b/asyncpg/connection.py#L594-L667 |
240,624 | MagicStack/asyncpg | asyncpg/connection.py | Connection.copy_records_to_table | async def copy_records_to_table(self, table_name, *, records,
columns=None, schema_name=None,
timeout=None):
"""Copy a list of records to the specified table using binary COPY.
:param str table_name:
The name of the table to copy data to.
:param records:
An iterable returning row tuples to copy into the table.
:param list columns:
An optional list of column names to copy.
:param str schema_name:
An optional schema name to qualify the table.
:param float timeout:
Optional timeout value in seconds.
:return: The status string of the COPY command.
Example:
.. code-block:: pycon
>>> import asyncpg
>>> import asyncio
>>> async def run():
... con = await asyncpg.connect(user='postgres')
... result = await con.copy_records_to_table(
... 'mytable', records=[
... (1, 'foo', 'bar'),
... (2, 'ham', 'spam')])
... print(result)
...
>>> asyncio.get_event_loop().run_until_complete(run())
'COPY 2'
.. versionadded:: 0.11.0
"""
tabname = utils._quote_ident(table_name)
if schema_name:
tabname = utils._quote_ident(schema_name) + '.' + tabname
if columns:
col_list = ', '.join(utils._quote_ident(c) for c in columns)
cols = '({})'.format(col_list)
else:
col_list = '*'
cols = ''
intro_query = 'SELECT {cols} FROM {tab} LIMIT 1'.format(
tab=tabname, cols=col_list)
intro_ps = await self._prepare(intro_query, use_cache=True)
opts = '(FORMAT binary)'
copy_stmt = 'COPY {tab}{cols} FROM STDIN {opts}'.format(
tab=tabname, cols=cols, opts=opts)
return await self._copy_in_records(
copy_stmt, records, intro_ps._state, timeout) | python | async def copy_records_to_table(self, table_name, *, records,
columns=None, schema_name=None,
timeout=None):
tabname = utils._quote_ident(table_name)
if schema_name:
tabname = utils._quote_ident(schema_name) + '.' + tabname
if columns:
col_list = ', '.join(utils._quote_ident(c) for c in columns)
cols = '({})'.format(col_list)
else:
col_list = '*'
cols = ''
intro_query = 'SELECT {cols} FROM {tab} LIMIT 1'.format(
tab=tabname, cols=col_list)
intro_ps = await self._prepare(intro_query, use_cache=True)
opts = '(FORMAT binary)'
copy_stmt = 'COPY {tab}{cols} FROM STDIN {opts}'.format(
tab=tabname, cols=cols, opts=opts)
return await self._copy_in_records(
copy_stmt, records, intro_ps._state, timeout) | [
"async",
"def",
"copy_records_to_table",
"(",
"self",
",",
"table_name",
",",
"*",
",",
"records",
",",
"columns",
"=",
"None",
",",
"schema_name",
"=",
"None",
",",
"timeout",
"=",
"None",
")",
":",
"tabname",
"=",
"utils",
".",
"_quote_ident",
"(",
"ta... | Copy a list of records to the specified table using binary COPY.
:param str table_name:
The name of the table to copy data to.
:param records:
An iterable returning row tuples to copy into the table.
:param list columns:
An optional list of column names to copy.
:param str schema_name:
An optional schema name to qualify the table.
:param float timeout:
Optional timeout value in seconds.
:return: The status string of the COPY command.
Example:
.. code-block:: pycon
>>> import asyncpg
>>> import asyncio
>>> async def run():
... con = await asyncpg.connect(user='postgres')
... result = await con.copy_records_to_table(
... 'mytable', records=[
... (1, 'foo', 'bar'),
... (2, 'ham', 'spam')])
... print(result)
...
>>> asyncio.get_event_loop().run_until_complete(run())
'COPY 2'
.. versionadded:: 0.11.0 | [
"Copy",
"a",
"list",
"of",
"records",
"to",
"the",
"specified",
"table",
"using",
"binary",
"COPY",
"."
] | 92c2d81256a1efd8cab12c0118d74ccd1c18131b | https://github.com/MagicStack/asyncpg/blob/92c2d81256a1efd8cab12c0118d74ccd1c18131b/asyncpg/connection.py#L669-L732 |
240,625 | MagicStack/asyncpg | asyncpg/connection.py | Connection.set_builtin_type_codec | async def set_builtin_type_codec(self, typename, *,
schema='public', codec_name,
format=None):
"""Set a builtin codec for the specified scalar data type.
This method has two uses. The first is to register a builtin
codec for an extension type without a stable OID, such as 'hstore'.
The second use is to declare that an extension type or a
user-defined type is wire-compatible with a certain builtin
data type and should be exchanged as such.
:param typename:
Name of the data type the codec is for.
:param schema:
Schema name of the data type the codec is for
(defaults to ``'public'``).
:param codec_name:
The name of the builtin codec to use for the type.
This should be either the name of a known core type
(such as ``"int"``), or the name of a supported extension
type. Currently, the only supported extension type is
``"pg_contrib.hstore"``.
:param format:
If *format* is ``None`` (the default), all formats supported
by the target codec are declared to be supported for *typename*.
If *format* is ``'text'`` or ``'binary'``, then only the
specified format is declared to be supported for *typename*.
.. versionchanged:: 0.18.0
The *codec_name* argument can be the name of any known
core data type. Added the *format* keyword argument.
"""
self._check_open()
typeinfo = await self.fetchrow(
introspection.TYPE_BY_NAME, typename, schema)
if not typeinfo:
raise exceptions.InterfaceError(
'unknown type: {}.{}'.format(schema, typename))
if not introspection.is_scalar_type(typeinfo):
raise exceptions.InterfaceError(
'cannot alias non-scalar type {}.{}'.format(
schema, typename))
oid = typeinfo['oid']
self._protocol.get_settings().set_builtin_type_codec(
oid, typename, schema, 'scalar', codec_name, format)
# Statement cache is no longer valid due to codec changes.
self._drop_local_statement_cache() | python | async def set_builtin_type_codec(self, typename, *,
schema='public', codec_name,
format=None):
self._check_open()
typeinfo = await self.fetchrow(
introspection.TYPE_BY_NAME, typename, schema)
if not typeinfo:
raise exceptions.InterfaceError(
'unknown type: {}.{}'.format(schema, typename))
if not introspection.is_scalar_type(typeinfo):
raise exceptions.InterfaceError(
'cannot alias non-scalar type {}.{}'.format(
schema, typename))
oid = typeinfo['oid']
self._protocol.get_settings().set_builtin_type_codec(
oid, typename, schema, 'scalar', codec_name, format)
# Statement cache is no longer valid due to codec changes.
self._drop_local_statement_cache() | [
"async",
"def",
"set_builtin_type_codec",
"(",
"self",
",",
"typename",
",",
"*",
",",
"schema",
"=",
"'public'",
",",
"codec_name",
",",
"format",
"=",
"None",
")",
":",
"self",
".",
"_check_open",
"(",
")",
"typeinfo",
"=",
"await",
"self",
".",
"fetch... | Set a builtin codec for the specified scalar data type.
This method has two uses. The first is to register a builtin
codec for an extension type without a stable OID, such as 'hstore'.
The second use is to declare that an extension type or a
user-defined type is wire-compatible with a certain builtin
data type and should be exchanged as such.
:param typename:
Name of the data type the codec is for.
:param schema:
Schema name of the data type the codec is for
(defaults to ``'public'``).
:param codec_name:
The name of the builtin codec to use for the type.
This should be either the name of a known core type
(such as ``"int"``), or the name of a supported extension
type. Currently, the only supported extension type is
``"pg_contrib.hstore"``.
:param format:
If *format* is ``None`` (the default), all formats supported
by the target codec are declared to be supported for *typename*.
If *format* is ``'text'`` or ``'binary'``, then only the
specified format is declared to be supported for *typename*.
.. versionchanged:: 0.18.0
The *codec_name* argument can be the name of any known
core data type. Added the *format* keyword argument. | [
"Set",
"a",
"builtin",
"codec",
"for",
"the",
"specified",
"scalar",
"data",
"type",
"."
] | 92c2d81256a1efd8cab12c0118d74ccd1c18131b | https://github.com/MagicStack/asyncpg/blob/92c2d81256a1efd8cab12c0118d74ccd1c18131b/asyncpg/connection.py#L1007-L1061 |
240,626 | MagicStack/asyncpg | asyncpg/connection.py | Connection.close | async def close(self, *, timeout=None):
"""Close the connection gracefully.
:param float timeout:
Optional timeout value in seconds.
.. versionchanged:: 0.14.0
Added the *timeout* parameter.
"""
try:
if not self.is_closed():
await self._protocol.close(timeout)
except Exception:
# If we fail to close gracefully, abort the connection.
self._abort()
raise
finally:
self._cleanup() | python | async def close(self, *, timeout=None):
try:
if not self.is_closed():
await self._protocol.close(timeout)
except Exception:
# If we fail to close gracefully, abort the connection.
self._abort()
raise
finally:
self._cleanup() | [
"async",
"def",
"close",
"(",
"self",
",",
"*",
",",
"timeout",
"=",
"None",
")",
":",
"try",
":",
"if",
"not",
"self",
".",
"is_closed",
"(",
")",
":",
"await",
"self",
".",
"_protocol",
".",
"close",
"(",
"timeout",
")",
"except",
"Exception",
":... | Close the connection gracefully.
:param float timeout:
Optional timeout value in seconds.
.. versionchanged:: 0.14.0
Added the *timeout* parameter. | [
"Close",
"the",
"connection",
"gracefully",
"."
] | 92c2d81256a1efd8cab12c0118d74ccd1c18131b | https://github.com/MagicStack/asyncpg/blob/92c2d81256a1efd8cab12c0118d74ccd1c18131b/asyncpg/connection.py#L1071-L1088 |
240,627 | MagicStack/asyncpg | asyncpg/pool.py | create_pool | def create_pool(dsn=None, *,
min_size=10,
max_size=10,
max_queries=50000,
max_inactive_connection_lifetime=300.0,
setup=None,
init=None,
loop=None,
connection_class=connection.Connection,
**connect_kwargs):
r"""Create a connection pool.
Can be used either with an ``async with`` block:
.. code-block:: python
async with asyncpg.create_pool(user='postgres',
command_timeout=60) as pool:
async with pool.acquire() as con:
await con.fetch('SELECT 1')
Or directly with ``await``:
.. code-block:: python
pool = await asyncpg.create_pool(user='postgres', command_timeout=60)
con = await pool.acquire()
try:
await con.fetch('SELECT 1')
finally:
await pool.release(con)
.. warning::
Prepared statements and cursors returned by
:meth:`Connection.prepare() <connection.Connection.prepare>` and
:meth:`Connection.cursor() <connection.Connection.cursor>` become
invalid once the connection is released. Likewise, all notification
and log listeners are removed, and ``asyncpg`` will issue a warning
if there are any listener callbacks registered on a connection that
is being released to the pool.
:param str dsn:
Connection arguments specified using as a single string in
the following format:
``postgres://user:pass@host:port/database?option=value``.
:param \*\*connect_kwargs:
Keyword arguments for the :func:`~asyncpg.connection.connect`
function.
:param Connection connection_class:
The class to use for connections. Must be a subclass of
:class:`~asyncpg.connection.Connection`.
:param int min_size:
Number of connection the pool will be initialized with.
:param int max_size:
Max number of connections in the pool.
:param int max_queries:
Number of queries after a connection is closed and replaced
with a new connection.
:param float max_inactive_connection_lifetime:
Number of seconds after which inactive connections in the
pool will be closed. Pass ``0`` to disable this mechanism.
:param coroutine setup:
A coroutine to prepare a connection right before it is returned
from :meth:`Pool.acquire() <pool.Pool.acquire>`. An example use
case would be to automatically set up notifications listeners for
all connections of a pool.
:param coroutine init:
A coroutine to initialize a connection when it is created.
An example use case would be to setup type codecs with
:meth:`Connection.set_builtin_type_codec() <\
asyncpg.connection.Connection.set_builtin_type_codec>`
or :meth:`Connection.set_type_codec() <\
asyncpg.connection.Connection.set_type_codec>`.
:param loop:
An asyncio event loop instance. If ``None``, the default
event loop will be used.
:return: An instance of :class:`~asyncpg.pool.Pool`.
.. versionchanged:: 0.10.0
An :exc:`~asyncpg.exceptions.InterfaceError` will be raised on any
attempted operation on a released connection.
.. versionchanged:: 0.13.0
An :exc:`~asyncpg.exceptions.InterfaceError` will be raised on any
attempted operation on a prepared statement or a cursor created
on a connection that has been released to the pool.
.. versionchanged:: 0.13.0
An :exc:`~asyncpg.exceptions.InterfaceWarning` will be produced
if there are any active listeners (added via
:meth:`Connection.add_listener() <connection.Connection.add_listener>`
or :meth:`Connection.add_log_listener()
<connection.Connection.add_log_listener>`) present on the connection
at the moment of its release to the pool.
"""
return Pool(
dsn,
connection_class=connection_class,
min_size=min_size, max_size=max_size,
max_queries=max_queries, loop=loop, setup=setup, init=init,
max_inactive_connection_lifetime=max_inactive_connection_lifetime,
**connect_kwargs) | python | def create_pool(dsn=None, *,
min_size=10,
max_size=10,
max_queries=50000,
max_inactive_connection_lifetime=300.0,
setup=None,
init=None,
loop=None,
connection_class=connection.Connection,
**connect_kwargs):
r"""Create a connection pool.
Can be used either with an ``async with`` block:
.. code-block:: python
async with asyncpg.create_pool(user='postgres',
command_timeout=60) as pool:
async with pool.acquire() as con:
await con.fetch('SELECT 1')
Or directly with ``await``:
.. code-block:: python
pool = await asyncpg.create_pool(user='postgres', command_timeout=60)
con = await pool.acquire()
try:
await con.fetch('SELECT 1')
finally:
await pool.release(con)
.. warning::
Prepared statements and cursors returned by
:meth:`Connection.prepare() <connection.Connection.prepare>` and
:meth:`Connection.cursor() <connection.Connection.cursor>` become
invalid once the connection is released. Likewise, all notification
and log listeners are removed, and ``asyncpg`` will issue a warning
if there are any listener callbacks registered on a connection that
is being released to the pool.
:param str dsn:
Connection arguments specified using as a single string in
the following format:
``postgres://user:pass@host:port/database?option=value``.
:param \*\*connect_kwargs:
Keyword arguments for the :func:`~asyncpg.connection.connect`
function.
:param Connection connection_class:
The class to use for connections. Must be a subclass of
:class:`~asyncpg.connection.Connection`.
:param int min_size:
Number of connection the pool will be initialized with.
:param int max_size:
Max number of connections in the pool.
:param int max_queries:
Number of queries after a connection is closed and replaced
with a new connection.
:param float max_inactive_connection_lifetime:
Number of seconds after which inactive connections in the
pool will be closed. Pass ``0`` to disable this mechanism.
:param coroutine setup:
A coroutine to prepare a connection right before it is returned
from :meth:`Pool.acquire() <pool.Pool.acquire>`. An example use
case would be to automatically set up notifications listeners for
all connections of a pool.
:param coroutine init:
A coroutine to initialize a connection when it is created.
An example use case would be to setup type codecs with
:meth:`Connection.set_builtin_type_codec() <\
asyncpg.connection.Connection.set_builtin_type_codec>`
or :meth:`Connection.set_type_codec() <\
asyncpg.connection.Connection.set_type_codec>`.
:param loop:
An asyncio event loop instance. If ``None``, the default
event loop will be used.
:return: An instance of :class:`~asyncpg.pool.Pool`.
.. versionchanged:: 0.10.0
An :exc:`~asyncpg.exceptions.InterfaceError` will be raised on any
attempted operation on a released connection.
.. versionchanged:: 0.13.0
An :exc:`~asyncpg.exceptions.InterfaceError` will be raised on any
attempted operation on a prepared statement or a cursor created
on a connection that has been released to the pool.
.. versionchanged:: 0.13.0
An :exc:`~asyncpg.exceptions.InterfaceWarning` will be produced
if there are any active listeners (added via
:meth:`Connection.add_listener() <connection.Connection.add_listener>`
or :meth:`Connection.add_log_listener()
<connection.Connection.add_log_listener>`) present on the connection
at the moment of its release to the pool.
"""
return Pool(
dsn,
connection_class=connection_class,
min_size=min_size, max_size=max_size,
max_queries=max_queries, loop=loop, setup=setup, init=init,
max_inactive_connection_lifetime=max_inactive_connection_lifetime,
**connect_kwargs) | [
"def",
"create_pool",
"(",
"dsn",
"=",
"None",
",",
"*",
",",
"min_size",
"=",
"10",
",",
"max_size",
"=",
"10",
",",
"max_queries",
"=",
"50000",
",",
"max_inactive_connection_lifetime",
"=",
"300.0",
",",
"setup",
"=",
"None",
",",
"init",
"=",
"None",... | r"""Create a connection pool.
Can be used either with an ``async with`` block:
.. code-block:: python
async with asyncpg.create_pool(user='postgres',
command_timeout=60) as pool:
async with pool.acquire() as con:
await con.fetch('SELECT 1')
Or directly with ``await``:
.. code-block:: python
pool = await asyncpg.create_pool(user='postgres', command_timeout=60)
con = await pool.acquire()
try:
await con.fetch('SELECT 1')
finally:
await pool.release(con)
.. warning::
Prepared statements and cursors returned by
:meth:`Connection.prepare() <connection.Connection.prepare>` and
:meth:`Connection.cursor() <connection.Connection.cursor>` become
invalid once the connection is released. Likewise, all notification
and log listeners are removed, and ``asyncpg`` will issue a warning
if there are any listener callbacks registered on a connection that
is being released to the pool.
:param str dsn:
Connection arguments specified using as a single string in
the following format:
``postgres://user:pass@host:port/database?option=value``.
:param \*\*connect_kwargs:
Keyword arguments for the :func:`~asyncpg.connection.connect`
function.
:param Connection connection_class:
The class to use for connections. Must be a subclass of
:class:`~asyncpg.connection.Connection`.
:param int min_size:
Number of connection the pool will be initialized with.
:param int max_size:
Max number of connections in the pool.
:param int max_queries:
Number of queries after a connection is closed and replaced
with a new connection.
:param float max_inactive_connection_lifetime:
Number of seconds after which inactive connections in the
pool will be closed. Pass ``0`` to disable this mechanism.
:param coroutine setup:
A coroutine to prepare a connection right before it is returned
from :meth:`Pool.acquire() <pool.Pool.acquire>`. An example use
case would be to automatically set up notifications listeners for
all connections of a pool.
:param coroutine init:
A coroutine to initialize a connection when it is created.
An example use case would be to setup type codecs with
:meth:`Connection.set_builtin_type_codec() <\
asyncpg.connection.Connection.set_builtin_type_codec>`
or :meth:`Connection.set_type_codec() <\
asyncpg.connection.Connection.set_type_codec>`.
:param loop:
An asyncio event loop instance. If ``None``, the default
event loop will be used.
:return: An instance of :class:`~asyncpg.pool.Pool`.
.. versionchanged:: 0.10.0
An :exc:`~asyncpg.exceptions.InterfaceError` will be raised on any
attempted operation on a released connection.
.. versionchanged:: 0.13.0
An :exc:`~asyncpg.exceptions.InterfaceError` will be raised on any
attempted operation on a prepared statement or a cursor created
on a connection that has been released to the pool.
.. versionchanged:: 0.13.0
An :exc:`~asyncpg.exceptions.InterfaceWarning` will be produced
if there are any active listeners (added via
:meth:`Connection.add_listener() <connection.Connection.add_listener>`
or :meth:`Connection.add_log_listener()
<connection.Connection.add_log_listener>`) present on the connection
at the moment of its release to the pool. | [
"r",
"Create",
"a",
"connection",
"pool",
"."
] | 92c2d81256a1efd8cab12c0118d74ccd1c18131b | https://github.com/MagicStack/asyncpg/blob/92c2d81256a1efd8cab12c0118d74ccd1c18131b/asyncpg/pool.py#L778-L889 |
240,628 | MagicStack/asyncpg | asyncpg/pool.py | PoolConnectionHolder._release | def _release(self):
"""Release this connection holder."""
if self._in_use is None:
# The holder is not checked out.
return
if not self._in_use.done():
self._in_use.set_result(None)
self._in_use = None
# Deinitialize the connection proxy. All subsequent
# operations on it will fail.
if self._proxy is not None:
self._proxy._detach()
self._proxy = None
# Put ourselves back to the pool queue.
self._pool._queue.put_nowait(self) | python | def _release(self):
if self._in_use is None:
# The holder is not checked out.
return
if not self._in_use.done():
self._in_use.set_result(None)
self._in_use = None
# Deinitialize the connection proxy. All subsequent
# operations on it will fail.
if self._proxy is not None:
self._proxy._detach()
self._proxy = None
# Put ourselves back to the pool queue.
self._pool._queue.put_nowait(self) | [
"def",
"_release",
"(",
"self",
")",
":",
"if",
"self",
".",
"_in_use",
"is",
"None",
":",
"# The holder is not checked out.",
"return",
"if",
"not",
"self",
".",
"_in_use",
".",
"done",
"(",
")",
":",
"self",
".",
"_in_use",
".",
"set_result",
"(",
"Non... | Release this connection holder. | [
"Release",
"this",
"connection",
"holder",
"."
] | 92c2d81256a1efd8cab12c0118d74ccd1c18131b | https://github.com/MagicStack/asyncpg/blob/92c2d81256a1efd8cab12c0118d74ccd1c18131b/asyncpg/pool.py#L276-L293 |
240,629 | MagicStack/asyncpg | asyncpg/pool.py | Pool.set_connect_args | def set_connect_args(self, dsn=None, **connect_kwargs):
r"""Set the new connection arguments for this pool.
The new connection arguments will be used for all subsequent
new connection attempts. Existing connections will remain until
they expire. Use :meth:`Pool.expire_connections()
<asyncpg.pool.Pool.expire_connections>` to expedite the connection
expiry.
:param str dsn:
Connection arguments specified using as a single string in
the following format:
``postgres://user:pass@host:port/database?option=value``.
:param \*\*connect_kwargs:
Keyword arguments for the :func:`~asyncpg.connection.connect`
function.
.. versionadded:: 0.16.0
"""
self._connect_args = [dsn]
self._connect_kwargs = connect_kwargs
self._working_addr = None
self._working_config = None
self._working_params = None | python | def set_connect_args(self, dsn=None, **connect_kwargs):
r"""Set the new connection arguments for this pool.
The new connection arguments will be used for all subsequent
new connection attempts. Existing connections will remain until
they expire. Use :meth:`Pool.expire_connections()
<asyncpg.pool.Pool.expire_connections>` to expedite the connection
expiry.
:param str dsn:
Connection arguments specified using as a single string in
the following format:
``postgres://user:pass@host:port/database?option=value``.
:param \*\*connect_kwargs:
Keyword arguments for the :func:`~asyncpg.connection.connect`
function.
.. versionadded:: 0.16.0
"""
self._connect_args = [dsn]
self._connect_kwargs = connect_kwargs
self._working_addr = None
self._working_config = None
self._working_params = None | [
"def",
"set_connect_args",
"(",
"self",
",",
"dsn",
"=",
"None",
",",
"*",
"*",
"connect_kwargs",
")",
":",
"self",
".",
"_connect_args",
"=",
"[",
"dsn",
"]",
"self",
".",
"_connect_kwargs",
"=",
"connect_kwargs",
"self",
".",
"_working_addr",
"=",
"None"... | r"""Set the new connection arguments for this pool.
The new connection arguments will be used for all subsequent
new connection attempts. Existing connections will remain until
they expire. Use :meth:`Pool.expire_connections()
<asyncpg.pool.Pool.expire_connections>` to expedite the connection
expiry.
:param str dsn:
Connection arguments specified using as a single string in
the following format:
``postgres://user:pass@host:port/database?option=value``.
:param \*\*connect_kwargs:
Keyword arguments for the :func:`~asyncpg.connection.connect`
function.
.. versionadded:: 0.16.0 | [
"r",
"Set",
"the",
"new",
"connection",
"arguments",
"for",
"this",
"pool",
"."
] | 92c2d81256a1efd8cab12c0118d74ccd1c18131b | https://github.com/MagicStack/asyncpg/blob/92c2d81256a1efd8cab12c0118d74ccd1c18131b/asyncpg/pool.py#L429-L454 |
240,630 | MagicStack/asyncpg | asyncpg/pool.py | Pool.release | async def release(self, connection, *, timeout=None):
"""Release a database connection back to the pool.
:param Connection connection:
A :class:`~asyncpg.connection.Connection` object to release.
:param float timeout:
A timeout for releasing the connection. If not specified, defaults
to the timeout provided in the corresponding call to the
:meth:`Pool.acquire() <asyncpg.pool.Pool.acquire>` method.
.. versionchanged:: 0.14.0
Added the *timeout* parameter.
"""
if (type(connection) is not PoolConnectionProxy or
connection._holder._pool is not self):
raise exceptions.InterfaceError(
'Pool.release() received invalid connection: '
'{connection!r} is not a member of this pool'.format(
connection=connection))
if connection._con is None:
# Already released, do nothing.
return
self._check_init()
# Let the connection do its internal housekeeping when its released.
connection._con._on_release()
ch = connection._holder
if timeout is None:
timeout = ch._timeout
# Use asyncio.shield() to guarantee that task cancellation
# does not prevent the connection from being returned to the
# pool properly.
return await asyncio.shield(ch.release(timeout), loop=self._loop) | python | async def release(self, connection, *, timeout=None):
if (type(connection) is not PoolConnectionProxy or
connection._holder._pool is not self):
raise exceptions.InterfaceError(
'Pool.release() received invalid connection: '
'{connection!r} is not a member of this pool'.format(
connection=connection))
if connection._con is None:
# Already released, do nothing.
return
self._check_init()
# Let the connection do its internal housekeeping when its released.
connection._con._on_release()
ch = connection._holder
if timeout is None:
timeout = ch._timeout
# Use asyncio.shield() to guarantee that task cancellation
# does not prevent the connection from being returned to the
# pool properly.
return await asyncio.shield(ch.release(timeout), loop=self._loop) | [
"async",
"def",
"release",
"(",
"self",
",",
"connection",
",",
"*",
",",
"timeout",
"=",
"None",
")",
":",
"if",
"(",
"type",
"(",
"connection",
")",
"is",
"not",
"PoolConnectionProxy",
"or",
"connection",
".",
"_holder",
".",
"_pool",
"is",
"not",
"s... | Release a database connection back to the pool.
:param Connection connection:
A :class:`~asyncpg.connection.Connection` object to release.
:param float timeout:
A timeout for releasing the connection. If not specified, defaults
to the timeout provided in the corresponding call to the
:meth:`Pool.acquire() <asyncpg.pool.Pool.acquire>` method.
.. versionchanged:: 0.14.0
Added the *timeout* parameter. | [
"Release",
"a",
"database",
"connection",
"back",
"to",
"the",
"pool",
"."
] | 92c2d81256a1efd8cab12c0118d74ccd1c18131b | https://github.com/MagicStack/asyncpg/blob/92c2d81256a1efd8cab12c0118d74ccd1c18131b/asyncpg/pool.py#L609-L645 |
240,631 | MagicStack/asyncpg | asyncpg/pool.py | Pool.close | async def close(self):
"""Attempt to gracefully close all connections in the pool.
Wait until all pool connections are released, close them and
shut down the pool. If any error (including cancellation) occurs
in ``close()`` the pool will terminate by calling
:meth:`Pool.terminate() <pool.Pool.terminate>`.
It is advisable to use :func:`python:asyncio.wait_for` to set
a timeout.
.. versionchanged:: 0.16.0
``close()`` now waits until all pool connections are released
before closing them and the pool. Errors raised in ``close()``
will cause immediate pool termination.
"""
if self._closed:
return
self._check_init()
self._closing = True
warning_callback = None
try:
warning_callback = self._loop.call_later(
60, self._warn_on_long_close)
release_coros = [
ch.wait_until_released() for ch in self._holders]
await asyncio.gather(*release_coros, loop=self._loop)
close_coros = [
ch.close() for ch in self._holders]
await asyncio.gather(*close_coros, loop=self._loop)
except Exception:
self.terminate()
raise
finally:
if warning_callback is not None:
warning_callback.cancel()
self._closed = True
self._closing = False | python | async def close(self):
if self._closed:
return
self._check_init()
self._closing = True
warning_callback = None
try:
warning_callback = self._loop.call_later(
60, self._warn_on_long_close)
release_coros = [
ch.wait_until_released() for ch in self._holders]
await asyncio.gather(*release_coros, loop=self._loop)
close_coros = [
ch.close() for ch in self._holders]
await asyncio.gather(*close_coros, loop=self._loop)
except Exception:
self.terminate()
raise
finally:
if warning_callback is not None:
warning_callback.cancel()
self._closed = True
self._closing = False | [
"async",
"def",
"close",
"(",
"self",
")",
":",
"if",
"self",
".",
"_closed",
":",
"return",
"self",
".",
"_check_init",
"(",
")",
"self",
".",
"_closing",
"=",
"True",
"warning_callback",
"=",
"None",
"try",
":",
"warning_callback",
"=",
"self",
".",
... | Attempt to gracefully close all connections in the pool.
Wait until all pool connections are released, close them and
shut down the pool. If any error (including cancellation) occurs
in ``close()`` the pool will terminate by calling
:meth:`Pool.terminate() <pool.Pool.terminate>`.
It is advisable to use :func:`python:asyncio.wait_for` to set
a timeout.
.. versionchanged:: 0.16.0
``close()`` now waits until all pool connections are released
before closing them and the pool. Errors raised in ``close()``
will cause immediate pool termination. | [
"Attempt",
"to",
"gracefully",
"close",
"all",
"connections",
"in",
"the",
"pool",
"."
] | 92c2d81256a1efd8cab12c0118d74ccd1c18131b | https://github.com/MagicStack/asyncpg/blob/92c2d81256a1efd8cab12c0118d74ccd1c18131b/asyncpg/pool.py#L647-L690 |
240,632 | MagicStack/asyncpg | asyncpg/pool.py | Pool.terminate | def terminate(self):
"""Terminate all connections in the pool."""
if self._closed:
return
self._check_init()
for ch in self._holders:
ch.terminate()
self._closed = True | python | def terminate(self):
if self._closed:
return
self._check_init()
for ch in self._holders:
ch.terminate()
self._closed = True | [
"def",
"terminate",
"(",
"self",
")",
":",
"if",
"self",
".",
"_closed",
":",
"return",
"self",
".",
"_check_init",
"(",
")",
"for",
"ch",
"in",
"self",
".",
"_holders",
":",
"ch",
".",
"terminate",
"(",
")",
"self",
".",
"_closed",
"=",
"True"
] | Terminate all connections in the pool. | [
"Terminate",
"all",
"connections",
"in",
"the",
"pool",
"."
] | 92c2d81256a1efd8cab12c0118d74ccd1c18131b | https://github.com/MagicStack/asyncpg/blob/92c2d81256a1efd8cab12c0118d74ccd1c18131b/asyncpg/pool.py#L698-L705 |
240,633 | MagicStack/asyncpg | asyncpg/transaction.py | Transaction.start | async def start(self):
"""Enter the transaction or savepoint block."""
self.__check_state_base('start')
if self._state is TransactionState.STARTED:
raise apg_errors.InterfaceError(
'cannot start; the transaction is already started')
con = self._connection
if con._top_xact is None:
if con._protocol.is_in_transaction():
raise apg_errors.InterfaceError(
'cannot use Connection.transaction() in '
'a manually started transaction')
con._top_xact = self
else:
# Nested transaction block
top_xact = con._top_xact
if self._isolation != top_xact._isolation:
raise apg_errors.InterfaceError(
'nested transaction has a different isolation level: '
'current {!r} != outer {!r}'.format(
self._isolation, top_xact._isolation))
self._nested = True
if self._nested:
self._id = con._get_unique_id('savepoint')
query = 'SAVEPOINT {};'.format(self._id)
else:
if self._isolation == 'read_committed':
query = 'BEGIN;'
elif self._isolation == 'repeatable_read':
query = 'BEGIN ISOLATION LEVEL REPEATABLE READ;'
else:
query = 'BEGIN ISOLATION LEVEL SERIALIZABLE'
if self._readonly:
query += ' READ ONLY'
if self._deferrable:
query += ' DEFERRABLE'
query += ';'
try:
await self._connection.execute(query)
except BaseException:
self._state = TransactionState.FAILED
raise
else:
self._state = TransactionState.STARTED | python | async def start(self):
self.__check_state_base('start')
if self._state is TransactionState.STARTED:
raise apg_errors.InterfaceError(
'cannot start; the transaction is already started')
con = self._connection
if con._top_xact is None:
if con._protocol.is_in_transaction():
raise apg_errors.InterfaceError(
'cannot use Connection.transaction() in '
'a manually started transaction')
con._top_xact = self
else:
# Nested transaction block
top_xact = con._top_xact
if self._isolation != top_xact._isolation:
raise apg_errors.InterfaceError(
'nested transaction has a different isolation level: '
'current {!r} != outer {!r}'.format(
self._isolation, top_xact._isolation))
self._nested = True
if self._nested:
self._id = con._get_unique_id('savepoint')
query = 'SAVEPOINT {};'.format(self._id)
else:
if self._isolation == 'read_committed':
query = 'BEGIN;'
elif self._isolation == 'repeatable_read':
query = 'BEGIN ISOLATION LEVEL REPEATABLE READ;'
else:
query = 'BEGIN ISOLATION LEVEL SERIALIZABLE'
if self._readonly:
query += ' READ ONLY'
if self._deferrable:
query += ' DEFERRABLE'
query += ';'
try:
await self._connection.execute(query)
except BaseException:
self._state = TransactionState.FAILED
raise
else:
self._state = TransactionState.STARTED | [
"async",
"def",
"start",
"(",
"self",
")",
":",
"self",
".",
"__check_state_base",
"(",
"'start'",
")",
"if",
"self",
".",
"_state",
"is",
"TransactionState",
".",
"STARTED",
":",
"raise",
"apg_errors",
".",
"InterfaceError",
"(",
"'cannot start; the transaction... | Enter the transaction or savepoint block. | [
"Enter",
"the",
"transaction",
"or",
"savepoint",
"block",
"."
] | 92c2d81256a1efd8cab12c0118d74ccd1c18131b | https://github.com/MagicStack/asyncpg/blob/92c2d81256a1efd8cab12c0118d74ccd1c18131b/asyncpg/transaction.py#L96-L143 |
240,634 | MagicStack/asyncpg | asyncpg/prepared_stmt.py | PreparedStatement.get_statusmsg | def get_statusmsg(self) -> str:
"""Return the status of the executed command.
Example::
stmt = await connection.prepare('CREATE TABLE mytab (a int)')
await stmt.fetch()
assert stmt.get_statusmsg() == "CREATE TABLE"
"""
if self._last_status is None:
return self._last_status
return self._last_status.decode() | python | def get_statusmsg(self) -> str:
if self._last_status is None:
return self._last_status
return self._last_status.decode() | [
"def",
"get_statusmsg",
"(",
"self",
")",
"->",
"str",
":",
"if",
"self",
".",
"_last_status",
"is",
"None",
":",
"return",
"self",
".",
"_last_status",
"return",
"self",
".",
"_last_status",
".",
"decode",
"(",
")"
] | Return the status of the executed command.
Example::
stmt = await connection.prepare('CREATE TABLE mytab (a int)')
await stmt.fetch()
assert stmt.get_statusmsg() == "CREATE TABLE" | [
"Return",
"the",
"status",
"of",
"the",
"executed",
"command",
"."
] | 92c2d81256a1efd8cab12c0118d74ccd1c18131b | https://github.com/MagicStack/asyncpg/blob/92c2d81256a1efd8cab12c0118d74ccd1c18131b/asyncpg/prepared_stmt.py#L39-L50 |
240,635 | MagicStack/asyncpg | asyncpg/prepared_stmt.py | PreparedStatement.explain | async def explain(self, *args, analyze=False):
"""Return the execution plan of the statement.
:param args: Query arguments.
:param analyze: If ``True``, the statement will be executed and
the run time statitics added to the return value.
:return: An object representing the execution plan. This value
is actually a deserialized JSON output of the SQL
``EXPLAIN`` command.
"""
query = 'EXPLAIN (FORMAT JSON, VERBOSE'
if analyze:
query += ', ANALYZE) '
else:
query += ') '
query += self._state.query
if analyze:
# From PostgreSQL docs:
# Important: Keep in mind that the statement is actually
# executed when the ANALYZE option is used. Although EXPLAIN
# will discard any output that a SELECT would return, other
# side effects of the statement will happen as usual. If you
# wish to use EXPLAIN ANALYZE on an INSERT, UPDATE, DELETE,
# CREATE TABLE AS, or EXECUTE statement without letting the
# command affect your data, use this approach:
# BEGIN;
# EXPLAIN ANALYZE ...;
# ROLLBACK;
tr = self._connection.transaction()
await tr.start()
try:
data = await self._connection.fetchval(query, *args)
finally:
await tr.rollback()
else:
data = await self._connection.fetchval(query, *args)
return json.loads(data) | python | async def explain(self, *args, analyze=False):
query = 'EXPLAIN (FORMAT JSON, VERBOSE'
if analyze:
query += ', ANALYZE) '
else:
query += ') '
query += self._state.query
if analyze:
# From PostgreSQL docs:
# Important: Keep in mind that the statement is actually
# executed when the ANALYZE option is used. Although EXPLAIN
# will discard any output that a SELECT would return, other
# side effects of the statement will happen as usual. If you
# wish to use EXPLAIN ANALYZE on an INSERT, UPDATE, DELETE,
# CREATE TABLE AS, or EXECUTE statement without letting the
# command affect your data, use this approach:
# BEGIN;
# EXPLAIN ANALYZE ...;
# ROLLBACK;
tr = self._connection.transaction()
await tr.start()
try:
data = await self._connection.fetchval(query, *args)
finally:
await tr.rollback()
else:
data = await self._connection.fetchval(query, *args)
return json.loads(data) | [
"async",
"def",
"explain",
"(",
"self",
",",
"*",
"args",
",",
"analyze",
"=",
"False",
")",
":",
"query",
"=",
"'EXPLAIN (FORMAT JSON, VERBOSE'",
"if",
"analyze",
":",
"query",
"+=",
"', ANALYZE) '",
"else",
":",
"query",
"+=",
"') '",
"query",
"+=",
"sel... | Return the execution plan of the statement.
:param args: Query arguments.
:param analyze: If ``True``, the statement will be executed and
the run time statitics added to the return value.
:return: An object representing the execution plan. This value
is actually a deserialized JSON output of the SQL
``EXPLAIN`` command. | [
"Return",
"the",
"execution",
"plan",
"of",
"the",
"statement",
"."
] | 92c2d81256a1efd8cab12c0118d74ccd1c18131b | https://github.com/MagicStack/asyncpg/blob/92c2d81256a1efd8cab12c0118d74ccd1c18131b/asyncpg/prepared_stmt.py#L111-L150 |
240,636 | MagicStack/asyncpg | asyncpg/prepared_stmt.py | PreparedStatement.fetchval | async def fetchval(self, *args, column=0, timeout=None):
"""Execute the statement and return a value in the first row.
:param args: Query arguments.
:param int column: Numeric index within the record of the value to
return (defaults to 0).
:param float timeout: Optional timeout value in seconds.
If not specified, defaults to the value of
``command_timeout`` argument to the ``Connection``
instance constructor.
:return: The value of the specified column of the first record.
"""
data = await self.__bind_execute(args, 1, timeout)
if not data:
return None
return data[0][column] | python | async def fetchval(self, *args, column=0, timeout=None):
data = await self.__bind_execute(args, 1, timeout)
if not data:
return None
return data[0][column] | [
"async",
"def",
"fetchval",
"(",
"self",
",",
"*",
"args",
",",
"column",
"=",
"0",
",",
"timeout",
"=",
"None",
")",
":",
"data",
"=",
"await",
"self",
".",
"__bind_execute",
"(",
"args",
",",
"1",
",",
"timeout",
")",
"if",
"not",
"data",
":",
... | Execute the statement and return a value in the first row.
:param args: Query arguments.
:param int column: Numeric index within the record of the value to
return (defaults to 0).
:param float timeout: Optional timeout value in seconds.
If not specified, defaults to the value of
``command_timeout`` argument to the ``Connection``
instance constructor.
:return: The value of the specified column of the first record. | [
"Execute",
"the",
"statement",
"and",
"return",
"a",
"value",
"in",
"the",
"first",
"row",
"."
] | 92c2d81256a1efd8cab12c0118d74ccd1c18131b | https://github.com/MagicStack/asyncpg/blob/92c2d81256a1efd8cab12c0118d74ccd1c18131b/asyncpg/prepared_stmt.py#L166-L182 |
240,637 | MagicStack/asyncpg | asyncpg/prepared_stmt.py | PreparedStatement.fetchrow | async def fetchrow(self, *args, timeout=None):
"""Execute the statement and return the first row.
:param str query: Query text
:param args: Query arguments
:param float timeout: Optional timeout value in seconds.
:return: The first row as a :class:`Record` instance.
"""
data = await self.__bind_execute(args, 1, timeout)
if not data:
return None
return data[0] | python | async def fetchrow(self, *args, timeout=None):
data = await self.__bind_execute(args, 1, timeout)
if not data:
return None
return data[0] | [
"async",
"def",
"fetchrow",
"(",
"self",
",",
"*",
"args",
",",
"timeout",
"=",
"None",
")",
":",
"data",
"=",
"await",
"self",
".",
"__bind_execute",
"(",
"args",
",",
"1",
",",
"timeout",
")",
"if",
"not",
"data",
":",
"return",
"None",
"return",
... | Execute the statement and return the first row.
:param str query: Query text
:param args: Query arguments
:param float timeout: Optional timeout value in seconds.
:return: The first row as a :class:`Record` instance. | [
"Execute",
"the",
"statement",
"and",
"return",
"the",
"first",
"row",
"."
] | 92c2d81256a1efd8cab12c0118d74ccd1c18131b | https://github.com/MagicStack/asyncpg/blob/92c2d81256a1efd8cab12c0118d74ccd1c18131b/asyncpg/prepared_stmt.py#L185-L197 |
240,638 | MagicStack/asyncpg | asyncpg/connect_utils.py | _read_password_from_pgpass | def _read_password_from_pgpass(
*, passfile: typing.Optional[pathlib.Path],
hosts: typing.List[str],
ports: typing.List[int],
database: str,
user: str):
"""Parse the pgpass file and return the matching password.
:return:
Password string, if found, ``None`` otherwise.
"""
passtab = _read_password_file(passfile)
if not passtab:
return None
for host, port in zip(hosts, ports):
if host.startswith('/'):
# Unix sockets get normalized into 'localhost'
host = 'localhost'
for phost, pport, pdatabase, puser, ppassword in passtab:
if phost != '*' and phost != host:
continue
if pport != '*' and pport != str(port):
continue
if pdatabase != '*' and pdatabase != database:
continue
if puser != '*' and puser != user:
continue
# Found a match.
return ppassword
return None | python | def _read_password_from_pgpass(
*, passfile: typing.Optional[pathlib.Path],
hosts: typing.List[str],
ports: typing.List[int],
database: str,
user: str):
passtab = _read_password_file(passfile)
if not passtab:
return None
for host, port in zip(hosts, ports):
if host.startswith('/'):
# Unix sockets get normalized into 'localhost'
host = 'localhost'
for phost, pport, pdatabase, puser, ppassword in passtab:
if phost != '*' and phost != host:
continue
if pport != '*' and pport != str(port):
continue
if pdatabase != '*' and pdatabase != database:
continue
if puser != '*' and puser != user:
continue
# Found a match.
return ppassword
return None | [
"def",
"_read_password_from_pgpass",
"(",
"*",
",",
"passfile",
":",
"typing",
".",
"Optional",
"[",
"pathlib",
".",
"Path",
"]",
",",
"hosts",
":",
"typing",
".",
"List",
"[",
"str",
"]",
",",
"ports",
":",
"typing",
".",
"List",
"[",
"int",
"]",
",... | Parse the pgpass file and return the matching password.
:return:
Password string, if found, ``None`` otherwise. | [
"Parse",
"the",
"pgpass",
"file",
"and",
"return",
"the",
"matching",
"password",
"."
] | 92c2d81256a1efd8cab12c0118d74ccd1c18131b | https://github.com/MagicStack/asyncpg/blob/92c2d81256a1efd8cab12c0118d74ccd1c18131b/asyncpg/connect_utils.py#L105-L139 |
240,639 | MagicStack/asyncpg | asyncpg/utils.py | _mogrify | async def _mogrify(conn, query, args):
"""Safely inline arguments to query text."""
# Introspect the target query for argument types and
# build a list of safely-quoted fully-qualified type names.
ps = await conn.prepare(query)
paramtypes = []
for t in ps.get_parameters():
if t.name.endswith('[]'):
pname = '_' + t.name[:-2]
else:
pname = t.name
paramtypes.append('{}.{}'.format(
_quote_ident(t.schema), _quote_ident(pname)))
del ps
# Use Postgres to convert arguments to text representation
# by casting each value to text.
cols = ['quote_literal(${}::{}::text)'.format(i, t)
for i, t in enumerate(paramtypes, start=1)]
textified = await conn.fetchrow(
'SELECT {cols}'.format(cols=', '.join(cols)), *args)
# Finally, replace $n references with text values.
return re.sub(
r'\$(\d+)\b', lambda m: textified[int(m.group(1)) - 1], query) | python | async def _mogrify(conn, query, args):
# Introspect the target query for argument types and
# build a list of safely-quoted fully-qualified type names.
ps = await conn.prepare(query)
paramtypes = []
for t in ps.get_parameters():
if t.name.endswith('[]'):
pname = '_' + t.name[:-2]
else:
pname = t.name
paramtypes.append('{}.{}'.format(
_quote_ident(t.schema), _quote_ident(pname)))
del ps
# Use Postgres to convert arguments to text representation
# by casting each value to text.
cols = ['quote_literal(${}::{}::text)'.format(i, t)
for i, t in enumerate(paramtypes, start=1)]
textified = await conn.fetchrow(
'SELECT {cols}'.format(cols=', '.join(cols)), *args)
# Finally, replace $n references with text values.
return re.sub(
r'\$(\d+)\b', lambda m: textified[int(m.group(1)) - 1], query) | [
"async",
"def",
"_mogrify",
"(",
"conn",
",",
"query",
",",
"args",
")",
":",
"# Introspect the target query for argument types and",
"# build a list of safely-quoted fully-qualified type names.",
"ps",
"=",
"await",
"conn",
".",
"prepare",
"(",
"query",
")",
"paramtypes"... | Safely inline arguments to query text. | [
"Safely",
"inline",
"arguments",
"to",
"query",
"text",
"."
] | 92c2d81256a1efd8cab12c0118d74ccd1c18131b | https://github.com/MagicStack/asyncpg/blob/92c2d81256a1efd8cab12c0118d74ccd1c18131b/asyncpg/utils.py#L19-L45 |
240,640 | jrfonseca/gprof2dot | gprof2dot.py | Event.aggregate | def aggregate(self, val1, val2):
"""Aggregate two event values."""
assert val1 is not None
assert val2 is not None
return self._aggregator(val1, val2) | python | def aggregate(self, val1, val2):
assert val1 is not None
assert val2 is not None
return self._aggregator(val1, val2) | [
"def",
"aggregate",
"(",
"self",
",",
"val1",
",",
"val2",
")",
":",
"assert",
"val1",
"is",
"not",
"None",
"assert",
"val2",
"is",
"not",
"None",
"return",
"self",
".",
"_aggregator",
"(",
"val1",
",",
"val2",
")"
] | Aggregate two event values. | [
"Aggregate",
"two",
"event",
"values",
"."
] | 0500e89f001e555f5eaa32e70793b4875f2f70db | https://github.com/jrfonseca/gprof2dot/blob/0500e89f001e555f5eaa32e70793b4875f2f70db/gprof2dot.py#L121-L125 |
240,641 | jrfonseca/gprof2dot | gprof2dot.py | Function.stripped_name | def stripped_name(self):
"""Remove extraneous information from C++ demangled function names."""
name = self.name
# Strip function parameters from name by recursively removing paired parenthesis
while True:
name, n = self._parenthesis_re.subn('', name)
if not n:
break
# Strip const qualifier
name = self._const_re.sub('', name)
# Strip template parameters from name by recursively removing paired angles
while True:
name, n = self._angles_re.subn('', name)
if not n:
break
return name | python | def stripped_name(self):
name = self.name
# Strip function parameters from name by recursively removing paired parenthesis
while True:
name, n = self._parenthesis_re.subn('', name)
if not n:
break
# Strip const qualifier
name = self._const_re.sub('', name)
# Strip template parameters from name by recursively removing paired angles
while True:
name, n = self._angles_re.subn('', name)
if not n:
break
return name | [
"def",
"stripped_name",
"(",
"self",
")",
":",
"name",
"=",
"self",
".",
"name",
"# Strip function parameters from name by recursively removing paired parenthesis",
"while",
"True",
":",
"name",
",",
"n",
"=",
"self",
".",
"_parenthesis_re",
".",
"subn",
"(",
"''",
... | Remove extraneous information from C++ demangled function names. | [
"Remove",
"extraneous",
"information",
"from",
"C",
"++",
"demangled",
"function",
"names",
"."
] | 0500e89f001e555f5eaa32e70793b4875f2f70db | https://github.com/jrfonseca/gprof2dot/blob/0500e89f001e555f5eaa32e70793b4875f2f70db/gprof2dot.py#L244-L264 |
240,642 | jrfonseca/gprof2dot | gprof2dot.py | Profile.validate | def validate(self):
"""Validate the edges."""
for function in compat_itervalues(self.functions):
for callee_id in compat_keys(function.calls):
assert function.calls[callee_id].callee_id == callee_id
if callee_id not in self.functions:
sys.stderr.write('warning: call to undefined function %s from function %s\n' % (str(callee_id), function.name))
del function.calls[callee_id] | python | def validate(self):
for function in compat_itervalues(self.functions):
for callee_id in compat_keys(function.calls):
assert function.calls[callee_id].callee_id == callee_id
if callee_id not in self.functions:
sys.stderr.write('warning: call to undefined function %s from function %s\n' % (str(callee_id), function.name))
del function.calls[callee_id] | [
"def",
"validate",
"(",
"self",
")",
":",
"for",
"function",
"in",
"compat_itervalues",
"(",
"self",
".",
"functions",
")",
":",
"for",
"callee_id",
"in",
"compat_keys",
"(",
"function",
".",
"calls",
")",
":",
"assert",
"function",
".",
"calls",
"[",
"c... | Validate the edges. | [
"Validate",
"the",
"edges",
"."
] | 0500e89f001e555f5eaa32e70793b4875f2f70db | https://github.com/jrfonseca/gprof2dot/blob/0500e89f001e555f5eaa32e70793b4875f2f70db/gprof2dot.py#L305-L313 |
240,643 | jrfonseca/gprof2dot | gprof2dot.py | Profile.find_cycles | def find_cycles(self):
"""Find cycles using Tarjan's strongly connected components algorithm."""
# Apply the Tarjan's algorithm successively until all functions are visited
stack = []
data = {}
order = 0
for function in compat_itervalues(self.functions):
order = self._tarjan(function, order, stack, data)
cycles = []
for function in compat_itervalues(self.functions):
if function.cycle is not None and function.cycle not in cycles:
cycles.append(function.cycle)
self.cycles = cycles
if 0:
for cycle in cycles:
sys.stderr.write("Cycle:\n")
for member in cycle.functions:
sys.stderr.write("\tFunction %s\n" % member.name) | python | def find_cycles(self):
# Apply the Tarjan's algorithm successively until all functions are visited
stack = []
data = {}
order = 0
for function in compat_itervalues(self.functions):
order = self._tarjan(function, order, stack, data)
cycles = []
for function in compat_itervalues(self.functions):
if function.cycle is not None and function.cycle not in cycles:
cycles.append(function.cycle)
self.cycles = cycles
if 0:
for cycle in cycles:
sys.stderr.write("Cycle:\n")
for member in cycle.functions:
sys.stderr.write("\tFunction %s\n" % member.name) | [
"def",
"find_cycles",
"(",
"self",
")",
":",
"# Apply the Tarjan's algorithm successively until all functions are visited",
"stack",
"=",
"[",
"]",
"data",
"=",
"{",
"}",
"order",
"=",
"0",
"for",
"function",
"in",
"compat_itervalues",
"(",
"self",
".",
"functions",... | Find cycles using Tarjan's strongly connected components algorithm. | [
"Find",
"cycles",
"using",
"Tarjan",
"s",
"strongly",
"connected",
"components",
"algorithm",
"."
] | 0500e89f001e555f5eaa32e70793b4875f2f70db | https://github.com/jrfonseca/gprof2dot/blob/0500e89f001e555f5eaa32e70793b4875f2f70db/gprof2dot.py#L315-L333 |
240,644 | jrfonseca/gprof2dot | gprof2dot.py | Profile._tarjan | def _tarjan(self, function, order, stack, data):
"""Tarjan's strongly connected components algorithm.
See also:
- http://en.wikipedia.org/wiki/Tarjan's_strongly_connected_components_algorithm
"""
try:
func_data = data[function.id]
return order
except KeyError:
func_data = self._TarjanData(order)
data[function.id] = func_data
order += 1
pos = len(stack)
stack.append(function)
func_data.onstack = True
for call in compat_itervalues(function.calls):
try:
callee_data = data[call.callee_id]
if callee_data.onstack:
func_data.lowlink = min(func_data.lowlink, callee_data.order)
except KeyError:
callee = self.functions[call.callee_id]
order = self._tarjan(callee, order, stack, data)
callee_data = data[call.callee_id]
func_data.lowlink = min(func_data.lowlink, callee_data.lowlink)
if func_data.lowlink == func_data.order:
# Strongly connected component found
members = stack[pos:]
del stack[pos:]
if len(members) > 1:
cycle = Cycle()
for member in members:
cycle.add_function(member)
data[member.id].onstack = False
else:
for member in members:
data[member.id].onstack = False
return order | python | def _tarjan(self, function, order, stack, data):
try:
func_data = data[function.id]
return order
except KeyError:
func_data = self._TarjanData(order)
data[function.id] = func_data
order += 1
pos = len(stack)
stack.append(function)
func_data.onstack = True
for call in compat_itervalues(function.calls):
try:
callee_data = data[call.callee_id]
if callee_data.onstack:
func_data.lowlink = min(func_data.lowlink, callee_data.order)
except KeyError:
callee = self.functions[call.callee_id]
order = self._tarjan(callee, order, stack, data)
callee_data = data[call.callee_id]
func_data.lowlink = min(func_data.lowlink, callee_data.lowlink)
if func_data.lowlink == func_data.order:
# Strongly connected component found
members = stack[pos:]
del stack[pos:]
if len(members) > 1:
cycle = Cycle()
for member in members:
cycle.add_function(member)
data[member.id].onstack = False
else:
for member in members:
data[member.id].onstack = False
return order | [
"def",
"_tarjan",
"(",
"self",
",",
"function",
",",
"order",
",",
"stack",
",",
"data",
")",
":",
"try",
":",
"func_data",
"=",
"data",
"[",
"function",
".",
"id",
"]",
"return",
"order",
"except",
"KeyError",
":",
"func_data",
"=",
"self",
".",
"_T... | Tarjan's strongly connected components algorithm.
See also:
- http://en.wikipedia.org/wiki/Tarjan's_strongly_connected_components_algorithm | [
"Tarjan",
"s",
"strongly",
"connected",
"components",
"algorithm",
"."
] | 0500e89f001e555f5eaa32e70793b4875f2f70db | https://github.com/jrfonseca/gprof2dot/blob/0500e89f001e555f5eaa32e70793b4875f2f70db/gprof2dot.py#L402-L441 |
240,645 | jrfonseca/gprof2dot | gprof2dot.py | Profile.integrate | def integrate(self, outevent, inevent):
"""Propagate function time ratio along the function calls.
Must be called after finding the cycles.
See also:
- http://citeseer.ist.psu.edu/graham82gprof.html
"""
# Sanity checking
assert outevent not in self
for function in compat_itervalues(self.functions):
assert outevent not in function
assert inevent in function
for call in compat_itervalues(function.calls):
assert outevent not in call
if call.callee_id != function.id:
assert call.ratio is not None
# Aggregate the input for each cycle
for cycle in self.cycles:
total = inevent.null()
for function in compat_itervalues(self.functions):
total = inevent.aggregate(total, function[inevent])
self[inevent] = total
# Integrate along the edges
total = inevent.null()
for function in compat_itervalues(self.functions):
total = inevent.aggregate(total, function[inevent])
self._integrate_function(function, outevent, inevent)
self[outevent] = total | python | def integrate(self, outevent, inevent):
# Sanity checking
assert outevent not in self
for function in compat_itervalues(self.functions):
assert outevent not in function
assert inevent in function
for call in compat_itervalues(function.calls):
assert outevent not in call
if call.callee_id != function.id:
assert call.ratio is not None
# Aggregate the input for each cycle
for cycle in self.cycles:
total = inevent.null()
for function in compat_itervalues(self.functions):
total = inevent.aggregate(total, function[inevent])
self[inevent] = total
# Integrate along the edges
total = inevent.null()
for function in compat_itervalues(self.functions):
total = inevent.aggregate(total, function[inevent])
self._integrate_function(function, outevent, inevent)
self[outevent] = total | [
"def",
"integrate",
"(",
"self",
",",
"outevent",
",",
"inevent",
")",
":",
"# Sanity checking",
"assert",
"outevent",
"not",
"in",
"self",
"for",
"function",
"in",
"compat_itervalues",
"(",
"self",
".",
"functions",
")",
":",
"assert",
"outevent",
"not",
"i... | Propagate function time ratio along the function calls.
Must be called after finding the cycles.
See also:
- http://citeseer.ist.psu.edu/graham82gprof.html | [
"Propagate",
"function",
"time",
"ratio",
"along",
"the",
"function",
"calls",
"."
] | 0500e89f001e555f5eaa32e70793b4875f2f70db | https://github.com/jrfonseca/gprof2dot/blob/0500e89f001e555f5eaa32e70793b4875f2f70db/gprof2dot.py#L484-L515 |
240,646 | jrfonseca/gprof2dot | gprof2dot.py | Profile._rank_cycle_function | def _rank_cycle_function(self, cycle, function, ranks):
"""Dijkstra's shortest paths algorithm.
See also:
- http://en.wikipedia.org/wiki/Dijkstra's_algorithm
"""
import heapq
Q = []
Qd = {}
p = {}
visited = set([function])
ranks[function] = 0
for call in compat_itervalues(function.calls):
if call.callee_id != function.id:
callee = self.functions[call.callee_id]
if callee.cycle is cycle:
ranks[callee] = 1
item = [ranks[callee], function, callee]
heapq.heappush(Q, item)
Qd[callee] = item
while Q:
cost, parent, member = heapq.heappop(Q)
if member not in visited:
p[member]= parent
visited.add(member)
for call in compat_itervalues(member.calls):
if call.callee_id != member.id:
callee = self.functions[call.callee_id]
if callee.cycle is cycle:
member_rank = ranks[member]
rank = ranks.get(callee)
if rank is not None:
if rank > 1 + member_rank:
rank = 1 + member_rank
ranks[callee] = rank
Qd_callee = Qd[callee]
Qd_callee[0] = rank
Qd_callee[1] = member
heapq._siftdown(Q, 0, Q.index(Qd_callee))
else:
rank = 1 + member_rank
ranks[callee] = rank
item = [rank, member, callee]
heapq.heappush(Q, item)
Qd[callee] = item | python | def _rank_cycle_function(self, cycle, function, ranks):
import heapq
Q = []
Qd = {}
p = {}
visited = set([function])
ranks[function] = 0
for call in compat_itervalues(function.calls):
if call.callee_id != function.id:
callee = self.functions[call.callee_id]
if callee.cycle is cycle:
ranks[callee] = 1
item = [ranks[callee], function, callee]
heapq.heappush(Q, item)
Qd[callee] = item
while Q:
cost, parent, member = heapq.heappop(Q)
if member not in visited:
p[member]= parent
visited.add(member)
for call in compat_itervalues(member.calls):
if call.callee_id != member.id:
callee = self.functions[call.callee_id]
if callee.cycle is cycle:
member_rank = ranks[member]
rank = ranks.get(callee)
if rank is not None:
if rank > 1 + member_rank:
rank = 1 + member_rank
ranks[callee] = rank
Qd_callee = Qd[callee]
Qd_callee[0] = rank
Qd_callee[1] = member
heapq._siftdown(Q, 0, Q.index(Qd_callee))
else:
rank = 1 + member_rank
ranks[callee] = rank
item = [rank, member, callee]
heapq.heappush(Q, item)
Qd[callee] = item | [
"def",
"_rank_cycle_function",
"(",
"self",
",",
"cycle",
",",
"function",
",",
"ranks",
")",
":",
"import",
"heapq",
"Q",
"=",
"[",
"]",
"Qd",
"=",
"{",
"}",
"p",
"=",
"{",
"}",
"visited",
"=",
"set",
"(",
"[",
"function",
"]",
")",
"ranks",
"["... | Dijkstra's shortest paths algorithm.
See also:
- http://en.wikipedia.org/wiki/Dijkstra's_algorithm | [
"Dijkstra",
"s",
"shortest",
"paths",
"algorithm",
"."
] | 0500e89f001e555f5eaa32e70793b4875f2f70db | https://github.com/jrfonseca/gprof2dot/blob/0500e89f001e555f5eaa32e70793b4875f2f70db/gprof2dot.py#L582-L629 |
240,647 | jrfonseca/gprof2dot | gprof2dot.py | Profile.aggregate | def aggregate(self, event):
"""Aggregate an event for the whole profile."""
total = event.null()
for function in compat_itervalues(self.functions):
try:
total = event.aggregate(total, function[event])
except UndefinedEvent:
return
self[event] = total | python | def aggregate(self, event):
total = event.null()
for function in compat_itervalues(self.functions):
try:
total = event.aggregate(total, function[event])
except UndefinedEvent:
return
self[event] = total | [
"def",
"aggregate",
"(",
"self",
",",
"event",
")",
":",
"total",
"=",
"event",
".",
"null",
"(",
")",
"for",
"function",
"in",
"compat_itervalues",
"(",
"self",
".",
"functions",
")",
":",
"try",
":",
"total",
"=",
"event",
".",
"aggregate",
"(",
"t... | Aggregate an event for the whole profile. | [
"Aggregate",
"an",
"event",
"for",
"the",
"whole",
"profile",
"."
] | 0500e89f001e555f5eaa32e70793b4875f2f70db | https://github.com/jrfonseca/gprof2dot/blob/0500e89f001e555f5eaa32e70793b4875f2f70db/gprof2dot.py#L668-L677 |
240,648 | jrfonseca/gprof2dot | gprof2dot.py | Profile.prune | def prune(self, node_thres, edge_thres, paths, color_nodes_by_selftime):
"""Prune the profile"""
# compute the prune ratios
for function in compat_itervalues(self.functions):
try:
function.weight = function[TOTAL_TIME_RATIO]
except UndefinedEvent:
pass
for call in compat_itervalues(function.calls):
callee = self.functions[call.callee_id]
if TOTAL_TIME_RATIO in call:
# handle exact cases first
call.weight = call[TOTAL_TIME_RATIO]
else:
try:
# make a safe estimate
call.weight = min(function[TOTAL_TIME_RATIO], callee[TOTAL_TIME_RATIO])
except UndefinedEvent:
pass
# prune the nodes
for function_id in compat_keys(self.functions):
function = self.functions[function_id]
if function.weight is not None:
if function.weight < node_thres:
del self.functions[function_id]
# prune file paths
for function_id in compat_keys(self.functions):
function = self.functions[function_id]
if paths and function.filename and not any(function.filename.startswith(path) for path in paths):
del self.functions[function_id]
elif paths and function.module and not any((function.module.find(path)>-1) for path in paths):
del self.functions[function_id]
# prune the edges
for function in compat_itervalues(self.functions):
for callee_id in compat_keys(function.calls):
call = function.calls[callee_id]
if callee_id not in self.functions or call.weight is not None and call.weight < edge_thres:
del function.calls[callee_id]
if color_nodes_by_selftime:
weights = []
for function in compat_itervalues(self.functions):
try:
weights.append(function[TIME_RATIO])
except UndefinedEvent:
pass
max_ratio = max(weights or [1])
# apply rescaled weights for coloriung
for function in compat_itervalues(self.functions):
try:
function.weight = function[TIME_RATIO] / max_ratio
except (ZeroDivisionError, UndefinedEvent):
pass | python | def prune(self, node_thres, edge_thres, paths, color_nodes_by_selftime):
# compute the prune ratios
for function in compat_itervalues(self.functions):
try:
function.weight = function[TOTAL_TIME_RATIO]
except UndefinedEvent:
pass
for call in compat_itervalues(function.calls):
callee = self.functions[call.callee_id]
if TOTAL_TIME_RATIO in call:
# handle exact cases first
call.weight = call[TOTAL_TIME_RATIO]
else:
try:
# make a safe estimate
call.weight = min(function[TOTAL_TIME_RATIO], callee[TOTAL_TIME_RATIO])
except UndefinedEvent:
pass
# prune the nodes
for function_id in compat_keys(self.functions):
function = self.functions[function_id]
if function.weight is not None:
if function.weight < node_thres:
del self.functions[function_id]
# prune file paths
for function_id in compat_keys(self.functions):
function = self.functions[function_id]
if paths and function.filename and not any(function.filename.startswith(path) for path in paths):
del self.functions[function_id]
elif paths and function.module and not any((function.module.find(path)>-1) for path in paths):
del self.functions[function_id]
# prune the edges
for function in compat_itervalues(self.functions):
for callee_id in compat_keys(function.calls):
call = function.calls[callee_id]
if callee_id not in self.functions or call.weight is not None and call.weight < edge_thres:
del function.calls[callee_id]
if color_nodes_by_selftime:
weights = []
for function in compat_itervalues(self.functions):
try:
weights.append(function[TIME_RATIO])
except UndefinedEvent:
pass
max_ratio = max(weights or [1])
# apply rescaled weights for coloriung
for function in compat_itervalues(self.functions):
try:
function.weight = function[TIME_RATIO] / max_ratio
except (ZeroDivisionError, UndefinedEvent):
pass | [
"def",
"prune",
"(",
"self",
",",
"node_thres",
",",
"edge_thres",
",",
"paths",
",",
"color_nodes_by_selftime",
")",
":",
"# compute the prune ratios",
"for",
"function",
"in",
"compat_itervalues",
"(",
"self",
".",
"functions",
")",
":",
"try",
":",
"function"... | Prune the profile | [
"Prune",
"the",
"profile"
] | 0500e89f001e555f5eaa32e70793b4875f2f70db | https://github.com/jrfonseca/gprof2dot/blob/0500e89f001e555f5eaa32e70793b4875f2f70db/gprof2dot.py#L692-L751 |
240,649 | jrfonseca/gprof2dot | gprof2dot.py | GprofParser.translate | def translate(self, mo):
"""Extract a structure from a match object, while translating the types in the process."""
attrs = {}
groupdict = mo.groupdict()
for name, value in compat_iteritems(groupdict):
if value is None:
value = None
elif self._int_re.match(value):
value = int(value)
elif self._float_re.match(value):
value = float(value)
attrs[name] = (value)
return Struct(attrs) | python | def translate(self, mo):
attrs = {}
groupdict = mo.groupdict()
for name, value in compat_iteritems(groupdict):
if value is None:
value = None
elif self._int_re.match(value):
value = int(value)
elif self._float_re.match(value):
value = float(value)
attrs[name] = (value)
return Struct(attrs) | [
"def",
"translate",
"(",
"self",
",",
"mo",
")",
":",
"attrs",
"=",
"{",
"}",
"groupdict",
"=",
"mo",
".",
"groupdict",
"(",
")",
"for",
"name",
",",
"value",
"in",
"compat_iteritems",
"(",
"groupdict",
")",
":",
"if",
"value",
"is",
"None",
":",
"... | Extract a structure from a match object, while translating the types in the process. | [
"Extract",
"a",
"structure",
"from",
"a",
"match",
"object",
"while",
"translating",
"the",
"types",
"in",
"the",
"process",
"."
] | 0500e89f001e555f5eaa32e70793b4875f2f70db | https://github.com/jrfonseca/gprof2dot/blob/0500e89f001e555f5eaa32e70793b4875f2f70db/gprof2dot.py#L1114-L1126 |
240,650 | jrfonseca/gprof2dot | gprof2dot.py | AXEParser.parse_cg | def parse_cg(self):
"""Parse the call graph."""
# skip call graph header
line = self.readline()
while self._cg_header_re.match(line):
line = self.readline()
# process call graph entries
entry_lines = []
# An EOF in readline terminates the program without returning.
while not self._cg_footer_re.match(line):
if line.isspace():
self.parse_cg_entry(entry_lines)
entry_lines = []
else:
entry_lines.append(line)
line = self.readline() | python | def parse_cg(self):
# skip call graph header
line = self.readline()
while self._cg_header_re.match(line):
line = self.readline()
# process call graph entries
entry_lines = []
# An EOF in readline terminates the program without returning.
while not self._cg_footer_re.match(line):
if line.isspace():
self.parse_cg_entry(entry_lines)
entry_lines = []
else:
entry_lines.append(line)
line = self.readline() | [
"def",
"parse_cg",
"(",
"self",
")",
":",
"# skip call graph header",
"line",
"=",
"self",
".",
"readline",
"(",
")",
"while",
"self",
".",
"_cg_header_re",
".",
"match",
"(",
"line",
")",
":",
"line",
"=",
"self",
".",
"readline",
"(",
")",
"# process c... | Parse the call graph. | [
"Parse",
"the",
"call",
"graph",
"."
] | 0500e89f001e555f5eaa32e70793b4875f2f70db | https://github.com/jrfonseca/gprof2dot/blob/0500e89f001e555f5eaa32e70793b4875f2f70db/gprof2dot.py#L1541-L1558 |
240,651 | jrfonseca/gprof2dot | gprof2dot.py | Theme.hsl_to_rgb | def hsl_to_rgb(self, h, s, l):
"""Convert a color from HSL color-model to RGB.
See also:
- http://www.w3.org/TR/css3-color/#hsl-color
"""
h = h % 1.0
s = min(max(s, 0.0), 1.0)
l = min(max(l, 0.0), 1.0)
if l <= 0.5:
m2 = l*(s + 1.0)
else:
m2 = l + s - l*s
m1 = l*2.0 - m2
r = self._hue_to_rgb(m1, m2, h + 1.0/3.0)
g = self._hue_to_rgb(m1, m2, h)
b = self._hue_to_rgb(m1, m2, h - 1.0/3.0)
# Apply gamma correction
r **= self.gamma
g **= self.gamma
b **= self.gamma
return (r, g, b) | python | def hsl_to_rgb(self, h, s, l):
h = h % 1.0
s = min(max(s, 0.0), 1.0)
l = min(max(l, 0.0), 1.0)
if l <= 0.5:
m2 = l*(s + 1.0)
else:
m2 = l + s - l*s
m1 = l*2.0 - m2
r = self._hue_to_rgb(m1, m2, h + 1.0/3.0)
g = self._hue_to_rgb(m1, m2, h)
b = self._hue_to_rgb(m1, m2, h - 1.0/3.0)
# Apply gamma correction
r **= self.gamma
g **= self.gamma
b **= self.gamma
return (r, g, b) | [
"def",
"hsl_to_rgb",
"(",
"self",
",",
"h",
",",
"s",
",",
"l",
")",
":",
"h",
"=",
"h",
"%",
"1.0",
"s",
"=",
"min",
"(",
"max",
"(",
"s",
",",
"0.0",
")",
",",
"1.0",
")",
"l",
"=",
"min",
"(",
"max",
"(",
"l",
",",
"0.0",
")",
",",
... | Convert a color from HSL color-model to RGB.
See also:
- http://www.w3.org/TR/css3-color/#hsl-color | [
"Convert",
"a",
"color",
"from",
"HSL",
"color",
"-",
"model",
"to",
"RGB",
"."
] | 0500e89f001e555f5eaa32e70793b4875f2f70db | https://github.com/jrfonseca/gprof2dot/blob/0500e89f001e555f5eaa32e70793b4875f2f70db/gprof2dot.py#L2863-L2888 |
240,652 | jrfonseca/gprof2dot | gprof2dot.py | DotWriter.wrap_function_name | def wrap_function_name(self, name):
"""Split the function name on multiple lines."""
if len(name) > 32:
ratio = 2.0/3.0
height = max(int(len(name)/(1.0 - ratio) + 0.5), 1)
width = max(len(name)/height, 32)
# TODO: break lines in symbols
name = textwrap.fill(name, width, break_long_words=False)
# Take away spaces
name = name.replace(", ", ",")
name = name.replace("> >", ">>")
name = name.replace("> >", ">>") # catch consecutive
return name | python | def wrap_function_name(self, name):
if len(name) > 32:
ratio = 2.0/3.0
height = max(int(len(name)/(1.0 - ratio) + 0.5), 1)
width = max(len(name)/height, 32)
# TODO: break lines in symbols
name = textwrap.fill(name, width, break_long_words=False)
# Take away spaces
name = name.replace(", ", ",")
name = name.replace("> >", ">>")
name = name.replace("> >", ">>") # catch consecutive
return name | [
"def",
"wrap_function_name",
"(",
"self",
",",
"name",
")",
":",
"if",
"len",
"(",
"name",
")",
">",
"32",
":",
"ratio",
"=",
"2.0",
"/",
"3.0",
"height",
"=",
"max",
"(",
"int",
"(",
"len",
"(",
"name",
")",
"/",
"(",
"1.0",
"-",
"ratio",
")",... | Split the function name on multiple lines. | [
"Split",
"the",
"function",
"name",
"on",
"multiple",
"lines",
"."
] | 0500e89f001e555f5eaa32e70793b4875f2f70db | https://github.com/jrfonseca/gprof2dot/blob/0500e89f001e555f5eaa32e70793b4875f2f70db/gprof2dot.py#L2974-L2989 |
240,653 | danielhrisca/asammdf | asammdf/blocks/utils.py | matlab_compatible | def matlab_compatible(name):
""" make a channel name compatible with Matlab variable naming
Parameters
----------
name : str
channel name
Returns
-------
compatible_name : str
channel name compatible with Matlab
"""
compatible_name = [ch if ch in ALLOWED_MATLAB_CHARS else "_" for ch in name]
compatible_name = "".join(compatible_name)
if compatible_name[0] not in string.ascii_letters:
compatible_name = "M_" + compatible_name
# max variable name is 63 and 3 chars are reserved
# for get_unique_name in case of multiple channel name occurence
return compatible_name[:60] | python | def matlab_compatible(name):
compatible_name = [ch if ch in ALLOWED_MATLAB_CHARS else "_" for ch in name]
compatible_name = "".join(compatible_name)
if compatible_name[0] not in string.ascii_letters:
compatible_name = "M_" + compatible_name
# max variable name is 63 and 3 chars are reserved
# for get_unique_name in case of multiple channel name occurence
return compatible_name[:60] | [
"def",
"matlab_compatible",
"(",
"name",
")",
":",
"compatible_name",
"=",
"[",
"ch",
"if",
"ch",
"in",
"ALLOWED_MATLAB_CHARS",
"else",
"\"_\"",
"for",
"ch",
"in",
"name",
"]",
"compatible_name",
"=",
"\"\"",
".",
"join",
"(",
"compatible_name",
")",
"if",
... | make a channel name compatible with Matlab variable naming
Parameters
----------
name : str
channel name
Returns
-------
compatible_name : str
channel name compatible with Matlab | [
"make",
"a",
"channel",
"name",
"compatible",
"with",
"Matlab",
"variable",
"naming"
] | 3c7a1fd19c957ceebe4dcdbb2abf00806c2bdb66 | https://github.com/danielhrisca/asammdf/blob/3c7a1fd19c957ceebe4dcdbb2abf00806c2bdb66/asammdf/blocks/utils.py#L149-L172 |
240,654 | danielhrisca/asammdf | asammdf/blocks/utils.py | get_text_v3 | def get_text_v3(address, stream, mapped=False):
""" faster way to extract strings from mdf versions 2 and 3 TextBlock
Parameters
----------
address : int
TextBlock address
stream : handle
file IO handle
Returns
-------
text : str
unicode string
"""
if address == 0:
return ""
if mapped:
size, = UINT16_uf(stream, address + 2)
text_bytes = stream[address + 4: address + size]
else:
stream.seek(address + 2)
size = UINT16_u(stream.read(2))[0] - 4
text_bytes = stream.read(size)
try:
text = text_bytes.strip(b" \r\t\n\0").decode("latin-1")
except UnicodeDecodeError as err:
try:
from cchardet import detect
encoding = detect(text_bytes)["encoding"]
text = text_bytes.strip(b" \r\t\n\0").decode(encoding)
except ImportError:
logger.warning(
'Unicode exception occured and "cChardet" package is '
'not installed. Mdf version 3 expects "latin-1" '
"strings and this package may detect if a different"
" encoding was used"
)
raise err
return text | python | def get_text_v3(address, stream, mapped=False):
if address == 0:
return ""
if mapped:
size, = UINT16_uf(stream, address + 2)
text_bytes = stream[address + 4: address + size]
else:
stream.seek(address + 2)
size = UINT16_u(stream.read(2))[0] - 4
text_bytes = stream.read(size)
try:
text = text_bytes.strip(b" \r\t\n\0").decode("latin-1")
except UnicodeDecodeError as err:
try:
from cchardet import detect
encoding = detect(text_bytes)["encoding"]
text = text_bytes.strip(b" \r\t\n\0").decode(encoding)
except ImportError:
logger.warning(
'Unicode exception occured and "cChardet" package is '
'not installed. Mdf version 3 expects "latin-1" '
"strings and this package may detect if a different"
" encoding was used"
)
raise err
return text | [
"def",
"get_text_v3",
"(",
"address",
",",
"stream",
",",
"mapped",
"=",
"False",
")",
":",
"if",
"address",
"==",
"0",
":",
"return",
"\"\"",
"if",
"mapped",
":",
"size",
",",
"=",
"UINT16_uf",
"(",
"stream",
",",
"address",
"+",
"2",
")",
"text_byt... | faster way to extract strings from mdf versions 2 and 3 TextBlock
Parameters
----------
address : int
TextBlock address
stream : handle
file IO handle
Returns
-------
text : str
unicode string | [
"faster",
"way",
"to",
"extract",
"strings",
"from",
"mdf",
"versions",
"2",
"and",
"3",
"TextBlock"
] | 3c7a1fd19c957ceebe4dcdbb2abf00806c2bdb66 | https://github.com/danielhrisca/asammdf/blob/3c7a1fd19c957ceebe4dcdbb2abf00806c2bdb66/asammdf/blocks/utils.py#L175-L219 |
240,655 | danielhrisca/asammdf | asammdf/blocks/utils.py | get_text_v4 | def get_text_v4(address, stream, mapped=False):
""" faster way to extract strings from mdf version 4 TextBlock
Parameters
----------
address : int
TextBlock address
stream : handle
file IO handle
Returns
-------
text : str
unicode string
"""
if address == 0:
return ""
if mapped:
size, _ = TWO_UINT64_uf(stream, address + 8)
text_bytes = stream[address + 24: address + size]
else:
stream.seek(address + 8)
size, _ = TWO_UINT64_u(stream.read(16))
text_bytes = stream.read(size - 24)
try:
text = text_bytes.strip(b" \r\t\n\0").decode("utf-8")
except UnicodeDecodeError as err:
try:
from cchardet import detect
encoding = detect(text_bytes)["encoding"]
text = text_bytes.decode(encoding).strip(" \r\t\n\0")
except ImportError:
logger.warning(
'Unicode exception occured and "cChardet" package is '
'not installed. Mdf version 4 expects "utf-8" '
"strings and this package may detect if a different"
" encoding was used"
)
raise err
return text | python | def get_text_v4(address, stream, mapped=False):
if address == 0:
return ""
if mapped:
size, _ = TWO_UINT64_uf(stream, address + 8)
text_bytes = stream[address + 24: address + size]
else:
stream.seek(address + 8)
size, _ = TWO_UINT64_u(stream.read(16))
text_bytes = stream.read(size - 24)
try:
text = text_bytes.strip(b" \r\t\n\0").decode("utf-8")
except UnicodeDecodeError as err:
try:
from cchardet import detect
encoding = detect(text_bytes)["encoding"]
text = text_bytes.decode(encoding).strip(" \r\t\n\0")
except ImportError:
logger.warning(
'Unicode exception occured and "cChardet" package is '
'not installed. Mdf version 4 expects "utf-8" '
"strings and this package may detect if a different"
" encoding was used"
)
raise err
return text | [
"def",
"get_text_v4",
"(",
"address",
",",
"stream",
",",
"mapped",
"=",
"False",
")",
":",
"if",
"address",
"==",
"0",
":",
"return",
"\"\"",
"if",
"mapped",
":",
"size",
",",
"_",
"=",
"TWO_UINT64_uf",
"(",
"stream",
",",
"address",
"+",
"8",
")",
... | faster way to extract strings from mdf version 4 TextBlock
Parameters
----------
address : int
TextBlock address
stream : handle
file IO handle
Returns
-------
text : str
unicode string | [
"faster",
"way",
"to",
"extract",
"strings",
"from",
"mdf",
"version",
"4",
"TextBlock"
] | 3c7a1fd19c957ceebe4dcdbb2abf00806c2bdb66 | https://github.com/danielhrisca/asammdf/blob/3c7a1fd19c957ceebe4dcdbb2abf00806c2bdb66/asammdf/blocks/utils.py#L222-L266 |
240,656 | danielhrisca/asammdf | asammdf/blocks/utils.py | get_fmt_v3 | def get_fmt_v3(data_type, size):
"""convert mdf versions 2 and 3 channel data type to numpy dtype format
string
Parameters
----------
data_type : int
mdf channel data type
size : int
data bit size
Returns
-------
fmt : str
numpy compatible data type format string
"""
if data_type in {v3c.DATA_TYPE_STRING, v3c.DATA_TYPE_BYTEARRAY}:
size = size // 8
if data_type == v3c.DATA_TYPE_STRING:
fmt = f"S{size}"
elif data_type == v3c.DATA_TYPE_BYTEARRAY:
fmt = f"({size},)u1"
else:
if size <= 8:
size = 1
elif size <= 16:
size = 2
elif size <= 32:
size = 4
elif size <= 64:
size = 8
else:
size = size // 8
if data_type in (v3c.DATA_TYPE_UNSIGNED_INTEL, v3c.DATA_TYPE_UNSIGNED):
fmt = f"<u{size}".format()
elif data_type == v3c.DATA_TYPE_UNSIGNED_MOTOROLA:
fmt = f">u{size}"
elif data_type in (v3c.DATA_TYPE_SIGNED_INTEL, v3c.DATA_TYPE_SIGNED):
fmt = f"<i{size}"
elif data_type == v3c.DATA_TYPE_SIGNED_MOTOROLA:
fmt = f">i{size}"
elif data_type in {
v3c.DATA_TYPE_FLOAT,
v3c.DATA_TYPE_DOUBLE,
v3c.DATA_TYPE_FLOAT_INTEL,
v3c.DATA_TYPE_DOUBLE_INTEL,
}:
fmt = f"<f{size}"
elif data_type in (v3c.DATA_TYPE_FLOAT_MOTOROLA, v3c.DATA_TYPE_DOUBLE_MOTOROLA):
fmt = f">f{size}"
return fmt | python | def get_fmt_v3(data_type, size):
if data_type in {v3c.DATA_TYPE_STRING, v3c.DATA_TYPE_BYTEARRAY}:
size = size // 8
if data_type == v3c.DATA_TYPE_STRING:
fmt = f"S{size}"
elif data_type == v3c.DATA_TYPE_BYTEARRAY:
fmt = f"({size},)u1"
else:
if size <= 8:
size = 1
elif size <= 16:
size = 2
elif size <= 32:
size = 4
elif size <= 64:
size = 8
else:
size = size // 8
if data_type in (v3c.DATA_TYPE_UNSIGNED_INTEL, v3c.DATA_TYPE_UNSIGNED):
fmt = f"<u{size}".format()
elif data_type == v3c.DATA_TYPE_UNSIGNED_MOTOROLA:
fmt = f">u{size}"
elif data_type in (v3c.DATA_TYPE_SIGNED_INTEL, v3c.DATA_TYPE_SIGNED):
fmt = f"<i{size}"
elif data_type == v3c.DATA_TYPE_SIGNED_MOTOROLA:
fmt = f">i{size}"
elif data_type in {
v3c.DATA_TYPE_FLOAT,
v3c.DATA_TYPE_DOUBLE,
v3c.DATA_TYPE_FLOAT_INTEL,
v3c.DATA_TYPE_DOUBLE_INTEL,
}:
fmt = f"<f{size}"
elif data_type in (v3c.DATA_TYPE_FLOAT_MOTOROLA, v3c.DATA_TYPE_DOUBLE_MOTOROLA):
fmt = f">f{size}"
return fmt | [
"def",
"get_fmt_v3",
"(",
"data_type",
",",
"size",
")",
":",
"if",
"data_type",
"in",
"{",
"v3c",
".",
"DATA_TYPE_STRING",
",",
"v3c",
".",
"DATA_TYPE_BYTEARRAY",
"}",
":",
"size",
"=",
"size",
"//",
"8",
"if",
"data_type",
"==",
"v3c",
".",
"DATA_TYPE_... | convert mdf versions 2 and 3 channel data type to numpy dtype format
string
Parameters
----------
data_type : int
mdf channel data type
size : int
data bit size
Returns
-------
fmt : str
numpy compatible data type format string | [
"convert",
"mdf",
"versions",
"2",
"and",
"3",
"channel",
"data",
"type",
"to",
"numpy",
"dtype",
"format",
"string"
] | 3c7a1fd19c957ceebe4dcdbb2abf00806c2bdb66 | https://github.com/danielhrisca/asammdf/blob/3c7a1fd19c957ceebe4dcdbb2abf00806c2bdb66/asammdf/blocks/utils.py#L273-L330 |
240,657 | danielhrisca/asammdf | asammdf/blocks/utils.py | get_fmt_v4 | def get_fmt_v4(data_type, size, channel_type=v4c.CHANNEL_TYPE_VALUE):
"""convert mdf version 4 channel data type to numpy dtype format string
Parameters
----------
data_type : int
mdf channel data type
size : int
data bit size
channel_type: int
mdf channel type
Returns
-------
fmt : str
numpy compatible data type format string
"""
if data_type in v4c.NON_SCALAR_TYPES:
size = size // 8
if data_type == v4c.DATA_TYPE_BYTEARRAY:
if channel_type == v4c.CHANNEL_TYPE_VALUE:
fmt = f"({size},)u1"
else:
if size == 4:
fmt = "<u4"
elif size == 8:
fmt = "<u8"
elif data_type in v4c.STRING_TYPES:
if channel_type == v4c.CHANNEL_TYPE_VALUE:
fmt = f"S{size}"
else:
if size == 4:
fmt = "<u4"
elif size == 8:
fmt = "<u8"
elif data_type == v4c.DATA_TYPE_CANOPEN_DATE:
fmt = "V7"
elif data_type == v4c.DATA_TYPE_CANOPEN_TIME:
fmt = "V6"
else:
if size <= 8:
size = 1
elif size <= 16:
size = 2
elif size <= 32:
size = 4
elif size <= 64:
size = 8
else:
size = size // 8
if data_type == v4c.DATA_TYPE_UNSIGNED_INTEL:
fmt = f"<u{size}"
elif data_type == v4c.DATA_TYPE_UNSIGNED_MOTOROLA:
fmt = f">u{size}"
elif data_type == v4c.DATA_TYPE_SIGNED_INTEL:
fmt = f"<i{size}"
elif data_type == v4c.DATA_TYPE_SIGNED_MOTOROLA:
fmt = f">i{size}"
elif data_type == v4c.DATA_TYPE_REAL_INTEL:
fmt = f"<f{size}"
elif data_type == v4c.DATA_TYPE_REAL_MOTOROLA:
fmt = f">f{size}"
return fmt | python | def get_fmt_v4(data_type, size, channel_type=v4c.CHANNEL_TYPE_VALUE):
if data_type in v4c.NON_SCALAR_TYPES:
size = size // 8
if data_type == v4c.DATA_TYPE_BYTEARRAY:
if channel_type == v4c.CHANNEL_TYPE_VALUE:
fmt = f"({size},)u1"
else:
if size == 4:
fmt = "<u4"
elif size == 8:
fmt = "<u8"
elif data_type in v4c.STRING_TYPES:
if channel_type == v4c.CHANNEL_TYPE_VALUE:
fmt = f"S{size}"
else:
if size == 4:
fmt = "<u4"
elif size == 8:
fmt = "<u8"
elif data_type == v4c.DATA_TYPE_CANOPEN_DATE:
fmt = "V7"
elif data_type == v4c.DATA_TYPE_CANOPEN_TIME:
fmt = "V6"
else:
if size <= 8:
size = 1
elif size <= 16:
size = 2
elif size <= 32:
size = 4
elif size <= 64:
size = 8
else:
size = size // 8
if data_type == v4c.DATA_TYPE_UNSIGNED_INTEL:
fmt = f"<u{size}"
elif data_type == v4c.DATA_TYPE_UNSIGNED_MOTOROLA:
fmt = f">u{size}"
elif data_type == v4c.DATA_TYPE_SIGNED_INTEL:
fmt = f"<i{size}"
elif data_type == v4c.DATA_TYPE_SIGNED_MOTOROLA:
fmt = f">i{size}"
elif data_type == v4c.DATA_TYPE_REAL_INTEL:
fmt = f"<f{size}"
elif data_type == v4c.DATA_TYPE_REAL_MOTOROLA:
fmt = f">f{size}"
return fmt | [
"def",
"get_fmt_v4",
"(",
"data_type",
",",
"size",
",",
"channel_type",
"=",
"v4c",
".",
"CHANNEL_TYPE_VALUE",
")",
":",
"if",
"data_type",
"in",
"v4c",
".",
"NON_SCALAR_TYPES",
":",
"size",
"=",
"size",
"//",
"8",
"if",
"data_type",
"==",
"v4c",
".",
"... | convert mdf version 4 channel data type to numpy dtype format string
Parameters
----------
data_type : int
mdf channel data type
size : int
data bit size
channel_type: int
mdf channel type
Returns
-------
fmt : str
numpy compatible data type format string | [
"convert",
"mdf",
"version",
"4",
"channel",
"data",
"type",
"to",
"numpy",
"dtype",
"format",
"string"
] | 3c7a1fd19c957ceebe4dcdbb2abf00806c2bdb66 | https://github.com/danielhrisca/asammdf/blob/3c7a1fd19c957ceebe4dcdbb2abf00806c2bdb66/asammdf/blocks/utils.py#L333-L409 |
240,658 | danielhrisca/asammdf | asammdf/blocks/utils.py | fmt_to_datatype_v3 | def fmt_to_datatype_v3(fmt, shape, array=False):
"""convert numpy dtype format string to mdf versions 2 and 3
channel data type and size
Parameters
----------
fmt : numpy.dtype
numpy data type
shape : tuple
numpy array shape
array : bool
disambiguate between bytearray and channel array
Returns
-------
data_type, size : int, int
integer data type as defined by ASAM MDF and bit size
"""
size = fmt.itemsize * 8
if not array and shape[1:] and fmt.itemsize == 1 and fmt.kind == "u":
data_type = v3c.DATA_TYPE_BYTEARRAY
for dim in shape[1:]:
size *= dim
else:
if fmt.kind == "u":
if fmt.byteorder in "=<|":
data_type = v3c.DATA_TYPE_UNSIGNED
else:
data_type = v3c.DATA_TYPE_UNSIGNED_MOTOROLA
elif fmt.kind == "i":
if fmt.byteorder in "=<|":
data_type = v3c.DATA_TYPE_SIGNED
else:
data_type = v3c.DATA_TYPE_SIGNED_MOTOROLA
elif fmt.kind == "f":
if fmt.byteorder in "=<":
if size == 32:
data_type = v3c.DATA_TYPE_FLOAT
else:
data_type = v3c.DATA_TYPE_DOUBLE
else:
if size == 32:
data_type = v3c.DATA_TYPE_FLOAT_MOTOROLA
else:
data_type = v3c.DATA_TYPE_DOUBLE_MOTOROLA
elif fmt.kind in "SV":
data_type = v3c.DATA_TYPE_STRING
elif fmt.kind == "b":
data_type = v3c.DATA_TYPE_UNSIGNED
size = 1
else:
message = f"Unknown type: dtype={fmt}, shape={shape}"
logger.exception(message)
raise MdfException(message)
return data_type, size | python | def fmt_to_datatype_v3(fmt, shape, array=False):
size = fmt.itemsize * 8
if not array and shape[1:] and fmt.itemsize == 1 and fmt.kind == "u":
data_type = v3c.DATA_TYPE_BYTEARRAY
for dim in shape[1:]:
size *= dim
else:
if fmt.kind == "u":
if fmt.byteorder in "=<|":
data_type = v3c.DATA_TYPE_UNSIGNED
else:
data_type = v3c.DATA_TYPE_UNSIGNED_MOTOROLA
elif fmt.kind == "i":
if fmt.byteorder in "=<|":
data_type = v3c.DATA_TYPE_SIGNED
else:
data_type = v3c.DATA_TYPE_SIGNED_MOTOROLA
elif fmt.kind == "f":
if fmt.byteorder in "=<":
if size == 32:
data_type = v3c.DATA_TYPE_FLOAT
else:
data_type = v3c.DATA_TYPE_DOUBLE
else:
if size == 32:
data_type = v3c.DATA_TYPE_FLOAT_MOTOROLA
else:
data_type = v3c.DATA_TYPE_DOUBLE_MOTOROLA
elif fmt.kind in "SV":
data_type = v3c.DATA_TYPE_STRING
elif fmt.kind == "b":
data_type = v3c.DATA_TYPE_UNSIGNED
size = 1
else:
message = f"Unknown type: dtype={fmt}, shape={shape}"
logger.exception(message)
raise MdfException(message)
return data_type, size | [
"def",
"fmt_to_datatype_v3",
"(",
"fmt",
",",
"shape",
",",
"array",
"=",
"False",
")",
":",
"size",
"=",
"fmt",
".",
"itemsize",
"*",
"8",
"if",
"not",
"array",
"and",
"shape",
"[",
"1",
":",
"]",
"and",
"fmt",
".",
"itemsize",
"==",
"1",
"and",
... | convert numpy dtype format string to mdf versions 2 and 3
channel data type and size
Parameters
----------
fmt : numpy.dtype
numpy data type
shape : tuple
numpy array shape
array : bool
disambiguate between bytearray and channel array
Returns
-------
data_type, size : int, int
integer data type as defined by ASAM MDF and bit size | [
"convert",
"numpy",
"dtype",
"format",
"string",
"to",
"mdf",
"versions",
"2",
"and",
"3",
"channel",
"data",
"type",
"and",
"size"
] | 3c7a1fd19c957ceebe4dcdbb2abf00806c2bdb66 | https://github.com/danielhrisca/asammdf/blob/3c7a1fd19c957ceebe4dcdbb2abf00806c2bdb66/asammdf/blocks/utils.py#L412-L469 |
240,659 | danielhrisca/asammdf | asammdf/blocks/utils.py | info_to_datatype_v4 | def info_to_datatype_v4(signed, little_endian):
"""map CAN signal to MDF integer types
Parameters
----------
signed : bool
signal is flagged as signed in the CAN database
little_endian : bool
signal is flagged as little endian (Intel) in the CAN database
Returns
-------
datatype : int
integer code for MDF channel data type
"""
if signed:
if little_endian:
datatype = v4c.DATA_TYPE_SIGNED_INTEL
else:
datatype = v4c.DATA_TYPE_SIGNED_MOTOROLA
else:
if little_endian:
datatype = v4c.DATA_TYPE_UNSIGNED_INTEL
else:
datatype = v4c.DATA_TYPE_UNSIGNED_MOTOROLA
return datatype | python | def info_to_datatype_v4(signed, little_endian):
if signed:
if little_endian:
datatype = v4c.DATA_TYPE_SIGNED_INTEL
else:
datatype = v4c.DATA_TYPE_SIGNED_MOTOROLA
else:
if little_endian:
datatype = v4c.DATA_TYPE_UNSIGNED_INTEL
else:
datatype = v4c.DATA_TYPE_UNSIGNED_MOTOROLA
return datatype | [
"def",
"info_to_datatype_v4",
"(",
"signed",
",",
"little_endian",
")",
":",
"if",
"signed",
":",
"if",
"little_endian",
":",
"datatype",
"=",
"v4c",
".",
"DATA_TYPE_SIGNED_INTEL",
"else",
":",
"datatype",
"=",
"v4c",
".",
"DATA_TYPE_SIGNED_MOTOROLA",
"else",
":... | map CAN signal to MDF integer types
Parameters
----------
signed : bool
signal is flagged as signed in the CAN database
little_endian : bool
signal is flagged as little endian (Intel) in the CAN database
Returns
-------
datatype : int
integer code for MDF channel data type | [
"map",
"CAN",
"signal",
"to",
"MDF",
"integer",
"types"
] | 3c7a1fd19c957ceebe4dcdbb2abf00806c2bdb66 | https://github.com/danielhrisca/asammdf/blob/3c7a1fd19c957ceebe4dcdbb2abf00806c2bdb66/asammdf/blocks/utils.py#L472-L500 |
240,660 | danielhrisca/asammdf | asammdf/blocks/utils.py | fmt_to_datatype_v4 | def fmt_to_datatype_v4(fmt, shape, array=False):
"""convert numpy dtype format string to mdf version 4 channel data
type and size
Parameters
----------
fmt : numpy.dtype
numpy data type
shape : tuple
numpy array shape
array : bool
disambiguate between bytearray and channel array
Returns
-------
data_type, size : int, int
integer data type as defined by ASAM MDF and bit size
"""
size = fmt.itemsize * 8
if not array and shape[1:] and fmt.itemsize == 1 and fmt.kind == "u":
data_type = v4c.DATA_TYPE_BYTEARRAY
for dim in shape[1:]:
size *= dim
else:
if fmt.kind == "u":
if fmt.byteorder in "=<|":
data_type = v4c.DATA_TYPE_UNSIGNED_INTEL
else:
data_type = v4c.DATA_TYPE_UNSIGNED_MOTOROLA
elif fmt.kind == "i":
if fmt.byteorder in "=<|":
data_type = v4c.DATA_TYPE_SIGNED_INTEL
else:
data_type = v4c.DATA_TYPE_SIGNED_MOTOROLA
elif fmt.kind == "f":
if fmt.byteorder in "=<":
data_type = v4c.DATA_TYPE_REAL_INTEL
else:
data_type = v4c.DATA_TYPE_REAL_MOTOROLA
elif fmt.kind in "SV":
data_type = v4c.DATA_TYPE_STRING_LATIN_1
elif fmt.kind == "b":
data_type = v4c.DATA_TYPE_UNSIGNED_INTEL
size = 1
else:
message = f"Unknown type: dtype={fmt}, shape={shape}"
logger.exception(message)
raise MdfException(message)
return data_type, size | python | def fmt_to_datatype_v4(fmt, shape, array=False):
size = fmt.itemsize * 8
if not array and shape[1:] and fmt.itemsize == 1 and fmt.kind == "u":
data_type = v4c.DATA_TYPE_BYTEARRAY
for dim in shape[1:]:
size *= dim
else:
if fmt.kind == "u":
if fmt.byteorder in "=<|":
data_type = v4c.DATA_TYPE_UNSIGNED_INTEL
else:
data_type = v4c.DATA_TYPE_UNSIGNED_MOTOROLA
elif fmt.kind == "i":
if fmt.byteorder in "=<|":
data_type = v4c.DATA_TYPE_SIGNED_INTEL
else:
data_type = v4c.DATA_TYPE_SIGNED_MOTOROLA
elif fmt.kind == "f":
if fmt.byteorder in "=<":
data_type = v4c.DATA_TYPE_REAL_INTEL
else:
data_type = v4c.DATA_TYPE_REAL_MOTOROLA
elif fmt.kind in "SV":
data_type = v4c.DATA_TYPE_STRING_LATIN_1
elif fmt.kind == "b":
data_type = v4c.DATA_TYPE_UNSIGNED_INTEL
size = 1
else:
message = f"Unknown type: dtype={fmt}, shape={shape}"
logger.exception(message)
raise MdfException(message)
return data_type, size | [
"def",
"fmt_to_datatype_v4",
"(",
"fmt",
",",
"shape",
",",
"array",
"=",
"False",
")",
":",
"size",
"=",
"fmt",
".",
"itemsize",
"*",
"8",
"if",
"not",
"array",
"and",
"shape",
"[",
"1",
":",
"]",
"and",
"fmt",
".",
"itemsize",
"==",
"1",
"and",
... | convert numpy dtype format string to mdf version 4 channel data
type and size
Parameters
----------
fmt : numpy.dtype
numpy data type
shape : tuple
numpy array shape
array : bool
disambiguate between bytearray and channel array
Returns
-------
data_type, size : int, int
integer data type as defined by ASAM MDF and bit size | [
"convert",
"numpy",
"dtype",
"format",
"string",
"to",
"mdf",
"version",
"4",
"channel",
"data",
"type",
"and",
"size"
] | 3c7a1fd19c957ceebe4dcdbb2abf00806c2bdb66 | https://github.com/danielhrisca/asammdf/blob/3c7a1fd19c957ceebe4dcdbb2abf00806c2bdb66/asammdf/blocks/utils.py#L503-L555 |
240,661 | danielhrisca/asammdf | asammdf/blocks/utils.py | debug_channel | def debug_channel(mdf, group, channel, dependency, file=None):
""" use this to print debug information in case of errors
Parameters
----------
mdf : MDF
source MDF object
group : dict
group
channel : Channel
channel object
dependency : ChannelDependency
channel dependency object
"""
print("MDF", "=" * 76, file=file)
print("name:", mdf.name, file=file)
print("version:", mdf.version, file=file)
print("read fragment size:", mdf._read_fragment_size, file=file)
print("write fragment size:", mdf._write_fragment_size, file=file)
print()
parents, dtypes = mdf._prepare_record(group)
print("GROUP", "=" * 74, file=file)
print("sorted:", group["sorted"], file=file)
print("data location:", group["data_location"], file=file)
print("data size:", group["data_size"], file=file)
print("data blocks:", group.data_blocks, file=file)
print("dependencies", group["channel_dependencies"], file=file)
print("parents:", parents, file=file)
print("dtypes:", dtypes, file=file)
print(file=file)
cg = group["channel_group"]
print("CHANNEL GROUP", "=" * 66, file=file)
print(cg, file=file)
print(file=file)
print("CHANNEL", "=" * 72, file=file)
print(channel, file=file)
print(file=file)
print("CHANNEL ARRAY", "=" * 66, file=file)
print(dependency, file=file)
print(file=file)
print("MASTER CACHE", "=" * 67, file=file)
print(
[(key, len(val)) for key, val in mdf._master_channel_cache.items()], file=file
) | python | def debug_channel(mdf, group, channel, dependency, file=None):
print("MDF", "=" * 76, file=file)
print("name:", mdf.name, file=file)
print("version:", mdf.version, file=file)
print("read fragment size:", mdf._read_fragment_size, file=file)
print("write fragment size:", mdf._write_fragment_size, file=file)
print()
parents, dtypes = mdf._prepare_record(group)
print("GROUP", "=" * 74, file=file)
print("sorted:", group["sorted"], file=file)
print("data location:", group["data_location"], file=file)
print("data size:", group["data_size"], file=file)
print("data blocks:", group.data_blocks, file=file)
print("dependencies", group["channel_dependencies"], file=file)
print("parents:", parents, file=file)
print("dtypes:", dtypes, file=file)
print(file=file)
cg = group["channel_group"]
print("CHANNEL GROUP", "=" * 66, file=file)
print(cg, file=file)
print(file=file)
print("CHANNEL", "=" * 72, file=file)
print(channel, file=file)
print(file=file)
print("CHANNEL ARRAY", "=" * 66, file=file)
print(dependency, file=file)
print(file=file)
print("MASTER CACHE", "=" * 67, file=file)
print(
[(key, len(val)) for key, val in mdf._master_channel_cache.items()], file=file
) | [
"def",
"debug_channel",
"(",
"mdf",
",",
"group",
",",
"channel",
",",
"dependency",
",",
"file",
"=",
"None",
")",
":",
"print",
"(",
"\"MDF\"",
",",
"\"=\"",
"*",
"76",
",",
"file",
"=",
"file",
")",
"print",
"(",
"\"name:\"",
",",
"mdf",
".",
"n... | use this to print debug information in case of errors
Parameters
----------
mdf : MDF
source MDF object
group : dict
group
channel : Channel
channel object
dependency : ChannelDependency
channel dependency object | [
"use",
"this",
"to",
"print",
"debug",
"information",
"in",
"case",
"of",
"errors"
] | 3c7a1fd19c957ceebe4dcdbb2abf00806c2bdb66 | https://github.com/danielhrisca/asammdf/blob/3c7a1fd19c957ceebe4dcdbb2abf00806c2bdb66/asammdf/blocks/utils.py#L594-L643 |
240,662 | danielhrisca/asammdf | asammdf/blocks/utils.py | count_channel_groups | def count_channel_groups(stream, include_channels=False):
""" count all channel groups as fast as possible. This is used to provide
reliable progress information when loading a file using the GUI
Parameters
----------
stream : file handle
opened file handle
include_channels : bool
also count channels
Returns
-------
count : int
channel group count
"""
count = 0
ch_count = 0
stream.seek(64)
blk_id = stream.read(2)
if blk_id == b"HD":
version = 3
else:
blk_id += stream.read(2)
if blk_id == b"##HD":
version = 4
else:
raise MdfException(f'"{stream.name}" is not a valid MDF file')
if version >= 4:
stream.seek(88, 0)
dg_addr = UINT64_u(stream.read(8))[0]
while dg_addr:
stream.seek(dg_addr + 32)
cg_addr = UINT64_u(stream.read(8))[0]
while cg_addr:
count += 1
if include_channels:
stream.seek(cg_addr + 32)
ch_addr = UINT64_u(stream.read(8))[0]
while ch_addr:
ch_count += 1
stream.seek(ch_addr + 24)
ch_addr = UINT64_u(stream.read(8))[0]
stream.seek(cg_addr + 24)
cg_addr = UINT64_u(stream.read(8))[0]
stream.seek(dg_addr + 24)
dg_addr = UINT64_u(stream.read(8))[0]
else:
stream.seek(68, 0)
dg_addr = UINT32_u(stream.read(4))[0]
while dg_addr:
stream.seek(dg_addr + 8)
cg_addr = UINT32_u(stream.read(4))[0]
while cg_addr:
count += 1
if include_channels:
stream.seek(cg_addr + 8)
ch_addr = UINT32_u(stream.read(4))[0]
while ch_addr:
ch_count += 1
stream.seek(ch_addr + 4)
ch_addr = UINT32_u(stream.read(4))[0]
stream.seek(cg_addr + 4)
cg_addr = UINT32_u(stream.read(4))[0]
stream.seek(dg_addr + 4)
dg_addr = UINT32_u(stream.read(4))[0]
return count, ch_count | python | def count_channel_groups(stream, include_channels=False):
count = 0
ch_count = 0
stream.seek(64)
blk_id = stream.read(2)
if blk_id == b"HD":
version = 3
else:
blk_id += stream.read(2)
if blk_id == b"##HD":
version = 4
else:
raise MdfException(f'"{stream.name}" is not a valid MDF file')
if version >= 4:
stream.seek(88, 0)
dg_addr = UINT64_u(stream.read(8))[0]
while dg_addr:
stream.seek(dg_addr + 32)
cg_addr = UINT64_u(stream.read(8))[0]
while cg_addr:
count += 1
if include_channels:
stream.seek(cg_addr + 32)
ch_addr = UINT64_u(stream.read(8))[0]
while ch_addr:
ch_count += 1
stream.seek(ch_addr + 24)
ch_addr = UINT64_u(stream.read(8))[0]
stream.seek(cg_addr + 24)
cg_addr = UINT64_u(stream.read(8))[0]
stream.seek(dg_addr + 24)
dg_addr = UINT64_u(stream.read(8))[0]
else:
stream.seek(68, 0)
dg_addr = UINT32_u(stream.read(4))[0]
while dg_addr:
stream.seek(dg_addr + 8)
cg_addr = UINT32_u(stream.read(4))[0]
while cg_addr:
count += 1
if include_channels:
stream.seek(cg_addr + 8)
ch_addr = UINT32_u(stream.read(4))[0]
while ch_addr:
ch_count += 1
stream.seek(ch_addr + 4)
ch_addr = UINT32_u(stream.read(4))[0]
stream.seek(cg_addr + 4)
cg_addr = UINT32_u(stream.read(4))[0]
stream.seek(dg_addr + 4)
dg_addr = UINT32_u(stream.read(4))[0]
return count, ch_count | [
"def",
"count_channel_groups",
"(",
"stream",
",",
"include_channels",
"=",
"False",
")",
":",
"count",
"=",
"0",
"ch_count",
"=",
"0",
"stream",
".",
"seek",
"(",
"64",
")",
"blk_id",
"=",
"stream",
".",
"read",
"(",
"2",
")",
"if",
"blk_id",
"==",
... | count all channel groups as fast as possible. This is used to provide
reliable progress information when loading a file using the GUI
Parameters
----------
stream : file handle
opened file handle
include_channels : bool
also count channels
Returns
-------
count : int
channel group count | [
"count",
"all",
"channel",
"groups",
"as",
"fast",
"as",
"possible",
".",
"This",
"is",
"used",
"to",
"provide",
"reliable",
"progress",
"information",
"when",
"loading",
"a",
"file",
"using",
"the",
"GUI"
] | 3c7a1fd19c957ceebe4dcdbb2abf00806c2bdb66 | https://github.com/danielhrisca/asammdf/blob/3c7a1fd19c957ceebe4dcdbb2abf00806c2bdb66/asammdf/blocks/utils.py#L646-L720 |
240,663 | danielhrisca/asammdf | asammdf/blocks/utils.py | validate_version_argument | def validate_version_argument(version, hint=4):
""" validate the version argument against the supported MDF versions. The
default version used depends on the hint MDF major revision
Parameters
----------
version : str
requested MDF version
hint : int
MDF revision hint
Returns
-------
valid_version : str
valid version
"""
if version not in SUPPORTED_VERSIONS:
if hint == 2:
valid_version = "2.14"
elif hint == 3:
valid_version = "3.30"
else:
valid_version = "4.10"
message = (
'Unknown mdf version "{}".'
" The available versions are {};"
' automatically using version "{}"'
)
message = message.format(version, SUPPORTED_VERSIONS, valid_version)
logger.warning(message)
else:
valid_version = version
return valid_version | python | def validate_version_argument(version, hint=4):
if version not in SUPPORTED_VERSIONS:
if hint == 2:
valid_version = "2.14"
elif hint == 3:
valid_version = "3.30"
else:
valid_version = "4.10"
message = (
'Unknown mdf version "{}".'
" The available versions are {};"
' automatically using version "{}"'
)
message = message.format(version, SUPPORTED_VERSIONS, valid_version)
logger.warning(message)
else:
valid_version = version
return valid_version | [
"def",
"validate_version_argument",
"(",
"version",
",",
"hint",
"=",
"4",
")",
":",
"if",
"version",
"not",
"in",
"SUPPORTED_VERSIONS",
":",
"if",
"hint",
"==",
"2",
":",
"valid_version",
"=",
"\"2.14\"",
"elif",
"hint",
"==",
"3",
":",
"valid_version",
"... | validate the version argument against the supported MDF versions. The
default version used depends on the hint MDF major revision
Parameters
----------
version : str
requested MDF version
hint : int
MDF revision hint
Returns
-------
valid_version : str
valid version | [
"validate",
"the",
"version",
"argument",
"against",
"the",
"supported",
"MDF",
"versions",
".",
"The",
"default",
"version",
"used",
"depends",
"on",
"the",
"hint",
"MDF",
"major",
"revision"
] | 3c7a1fd19c957ceebe4dcdbb2abf00806c2bdb66 | https://github.com/danielhrisca/asammdf/blob/3c7a1fd19c957ceebe4dcdbb2abf00806c2bdb66/asammdf/blocks/utils.py#L723-L756 |
240,664 | danielhrisca/asammdf | asammdf/blocks/utils.py | cut_video_stream | def cut_video_stream(stream, start, end, fmt):
""" cut video stream from `start` to `end` time
Parameters
----------
stream : bytes
video file content
start : float
start time
end : float
end time
Returns
-------
result : bytes
content of cut video
"""
with TemporaryDirectory() as tmp:
in_file = Path(tmp) / f"in{fmt}"
out_file = Path(tmp) / f"out{fmt}"
in_file.write_bytes(stream)
try:
ret = subprocess.run(
[
"ffmpeg",
"-ss",
f"{start}",
"-i",
f"{in_file}",
"-to",
f"{end}",
"-c",
"copy",
f"{out_file}",
],
capture_output=True,
)
except FileNotFoundError:
result = stream
else:
if ret.returncode:
result = stream
else:
result = out_file.read_bytes()
return result | python | def cut_video_stream(stream, start, end, fmt):
with TemporaryDirectory() as tmp:
in_file = Path(tmp) / f"in{fmt}"
out_file = Path(tmp) / f"out{fmt}"
in_file.write_bytes(stream)
try:
ret = subprocess.run(
[
"ffmpeg",
"-ss",
f"{start}",
"-i",
f"{in_file}",
"-to",
f"{end}",
"-c",
"copy",
f"{out_file}",
],
capture_output=True,
)
except FileNotFoundError:
result = stream
else:
if ret.returncode:
result = stream
else:
result = out_file.read_bytes()
return result | [
"def",
"cut_video_stream",
"(",
"stream",
",",
"start",
",",
"end",
",",
"fmt",
")",
":",
"with",
"TemporaryDirectory",
"(",
")",
"as",
"tmp",
":",
"in_file",
"=",
"Path",
"(",
"tmp",
")",
"/",
"f\"in{fmt}\"",
"out_file",
"=",
"Path",
"(",
"tmp",
")",
... | cut video stream from `start` to `end` time
Parameters
----------
stream : bytes
video file content
start : float
start time
end : float
end time
Returns
-------
result : bytes
content of cut video | [
"cut",
"video",
"stream",
"from",
"start",
"to",
"end",
"time"
] | 3c7a1fd19c957ceebe4dcdbb2abf00806c2bdb66 | https://github.com/danielhrisca/asammdf/blob/3c7a1fd19c957ceebe4dcdbb2abf00806c2bdb66/asammdf/blocks/utils.py#L878-L926 |
240,665 | danielhrisca/asammdf | asammdf/blocks/utils.py | components | def components(channel, channel_name, unique_names, prefix="", master=None):
""" yield pandas Series and unique name based on the ndarray object
Parameters
----------
channel : numpy.ndarray
channel to be used foir Series
channel_name : str
channel name
unique_names : UniqueDB
unique names object
prefix : str
prefix used in case of nested recarrays
Returns
-------
name, series : (str, pandas.Series)
tuple of unqiue name and Series object
"""
names = channel.dtype.names
# channel arrays
if names[0] == channel_name:
name = names[0]
if prefix:
name_ = unique_names.get_unique_name(f"{prefix}.{name}")
else:
name_ = unique_names.get_unique_name(name)
values = channel[name]
if len(values.shape) > 1:
arr = [values]
types = [("", values.dtype, values.shape[1:])]
values = fromarrays(arr, dtype=types)
del arr
yield name_, Series(values, index=master, dtype="O")
for name in names[1:]:
values = channel[name]
axis_name = unique_names.get_unique_name(f"{name_}.{name}")
if len(values.shape) > 1:
arr = [values]
types = [("", values.dtype, values.shape[1:])]
values = fromarrays(arr, dtype=types)
del arr
yield axis_name, Series(values, index=master, dtype="O")
# structure composition
else:
for name in channel.dtype.names:
values = channel[name]
if values.dtype.names:
yield from components(
values, name, unique_names,
prefix=f"{prefix}.{channel_name}" if prefix else f"{channel_name}",
master=master
)
else:
name_ = unique_names.get_unique_name(
f"{prefix}.{channel_name}.{name}" if prefix else f"{channel_name}.{name}"
)
if len(values.shape) > 1:
arr = [values]
types = [("", values.dtype, values.shape[1:])]
values = fromarrays(arr, dtype=types)
del arr
yield name_, Series(values, index=master) | python | def components(channel, channel_name, unique_names, prefix="", master=None):
names = channel.dtype.names
# channel arrays
if names[0] == channel_name:
name = names[0]
if prefix:
name_ = unique_names.get_unique_name(f"{prefix}.{name}")
else:
name_ = unique_names.get_unique_name(name)
values = channel[name]
if len(values.shape) > 1:
arr = [values]
types = [("", values.dtype, values.shape[1:])]
values = fromarrays(arr, dtype=types)
del arr
yield name_, Series(values, index=master, dtype="O")
for name in names[1:]:
values = channel[name]
axis_name = unique_names.get_unique_name(f"{name_}.{name}")
if len(values.shape) > 1:
arr = [values]
types = [("", values.dtype, values.shape[1:])]
values = fromarrays(arr, dtype=types)
del arr
yield axis_name, Series(values, index=master, dtype="O")
# structure composition
else:
for name in channel.dtype.names:
values = channel[name]
if values.dtype.names:
yield from components(
values, name, unique_names,
prefix=f"{prefix}.{channel_name}" if prefix else f"{channel_name}",
master=master
)
else:
name_ = unique_names.get_unique_name(
f"{prefix}.{channel_name}.{name}" if prefix else f"{channel_name}.{name}"
)
if len(values.shape) > 1:
arr = [values]
types = [("", values.dtype, values.shape[1:])]
values = fromarrays(arr, dtype=types)
del arr
yield name_, Series(values, index=master) | [
"def",
"components",
"(",
"channel",
",",
"channel_name",
",",
"unique_names",
",",
"prefix",
"=",
"\"\"",
",",
"master",
"=",
"None",
")",
":",
"names",
"=",
"channel",
".",
"dtype",
".",
"names",
"# channel arrays",
"if",
"names",
"[",
"0",
"]",
"==",
... | yield pandas Series and unique name based on the ndarray object
Parameters
----------
channel : numpy.ndarray
channel to be used foir Series
channel_name : str
channel name
unique_names : UniqueDB
unique names object
prefix : str
prefix used in case of nested recarrays
Returns
-------
name, series : (str, pandas.Series)
tuple of unqiue name and Series object | [
"yield",
"pandas",
"Series",
"and",
"unique",
"name",
"based",
"on",
"the",
"ndarray",
"object"
] | 3c7a1fd19c957ceebe4dcdbb2abf00806c2bdb66 | https://github.com/danielhrisca/asammdf/blob/3c7a1fd19c957ceebe4dcdbb2abf00806c2bdb66/asammdf/blocks/utils.py#L1035-L1104 |
240,666 | danielhrisca/asammdf | asammdf/blocks/utils.py | master_using_raster | def master_using_raster(mdf, raster, endpoint=False):
""" get single master based on the raster
Parameters
----------
mdf : asammdf.MDF
measurement object
raster : float
new raster
endpoint=False : bool
include maximum time stamp in the new master
Returns
-------
master : np.array
new master
"""
if not raster:
master = np.array([], dtype='<f8')
else:
t_min = []
t_max = []
for i, group in enumerate(mdf.groups):
cycles_nr = group.channel_group.cycles_nr
if cycles_nr:
master_min = mdf.get_master(
i,
record_offset=0,
record_count=1,
)
if len(master_min):
t_min.append(master_min[0])
mdf._master_channel_cache.clear()
master_max = mdf.get_master(
i,
record_offset=cycles_nr-1,
record_count=1,
)
if len(master_max):
t_max.append(master_max[0])
mdf._master_channel_cache.clear()
if t_min:
t_min = np.amin(t_min)
t_max = np.amax(t_max)
num = float(np.float32((t_max - t_min) / raster))
if int(num) == num:
master = np.linspace(t_min, t_max, int(num) + 1)
else:
master = np.arange(t_min, t_max, raster)
if endpoint:
master = np.concatenate([master, [t_max]])
else:
master = np.array([], dtype='<f8')
return master | python | def master_using_raster(mdf, raster, endpoint=False):
if not raster:
master = np.array([], dtype='<f8')
else:
t_min = []
t_max = []
for i, group in enumerate(mdf.groups):
cycles_nr = group.channel_group.cycles_nr
if cycles_nr:
master_min = mdf.get_master(
i,
record_offset=0,
record_count=1,
)
if len(master_min):
t_min.append(master_min[0])
mdf._master_channel_cache.clear()
master_max = mdf.get_master(
i,
record_offset=cycles_nr-1,
record_count=1,
)
if len(master_max):
t_max.append(master_max[0])
mdf._master_channel_cache.clear()
if t_min:
t_min = np.amin(t_min)
t_max = np.amax(t_max)
num = float(np.float32((t_max - t_min) / raster))
if int(num) == num:
master = np.linspace(t_min, t_max, int(num) + 1)
else:
master = np.arange(t_min, t_max, raster)
if endpoint:
master = np.concatenate([master, [t_max]])
else:
master = np.array([], dtype='<f8')
return master | [
"def",
"master_using_raster",
"(",
"mdf",
",",
"raster",
",",
"endpoint",
"=",
"False",
")",
":",
"if",
"not",
"raster",
":",
"master",
"=",
"np",
".",
"array",
"(",
"[",
"]",
",",
"dtype",
"=",
"'<f8'",
")",
"else",
":",
"t_min",
"=",
"[",
"]",
... | get single master based on the raster
Parameters
----------
mdf : asammdf.MDF
measurement object
raster : float
new raster
endpoint=False : bool
include maximum time stamp in the new master
Returns
-------
master : np.array
new master | [
"get",
"single",
"master",
"based",
"on",
"the",
"raster"
] | 3c7a1fd19c957ceebe4dcdbb2abf00806c2bdb66 | https://github.com/danielhrisca/asammdf/blob/3c7a1fd19c957ceebe4dcdbb2abf00806c2bdb66/asammdf/blocks/utils.py#L1178-L1237 |
240,667 | danielhrisca/asammdf | asammdf/blocks/utils.py | ChannelsDB.add | def add(self, channel_name, entry):
""" add name to channels database and check if it contains a source
path
Parameters
----------
channel_name : str
name that needs to be added to the database
entry : tuple
(group index, channel index) pair
"""
if channel_name:
if channel_name not in self:
self[channel_name] = [entry]
else:
self[channel_name].append(entry)
if "\\" in channel_name:
channel_name = channel_name.split("\\")[0]
if channel_name not in self:
self[channel_name] = [entry]
else:
self[channel_name].append(entry) | python | def add(self, channel_name, entry):
if channel_name:
if channel_name not in self:
self[channel_name] = [entry]
else:
self[channel_name].append(entry)
if "\\" in channel_name:
channel_name = channel_name.split("\\")[0]
if channel_name not in self:
self[channel_name] = [entry]
else:
self[channel_name].append(entry) | [
"def",
"add",
"(",
"self",
",",
"channel_name",
",",
"entry",
")",
":",
"if",
"channel_name",
":",
"if",
"channel_name",
"not",
"in",
"self",
":",
"self",
"[",
"channel_name",
"]",
"=",
"[",
"entry",
"]",
"else",
":",
"self",
"[",
"channel_name",
"]",
... | add name to channels database and check if it contains a source
path
Parameters
----------
channel_name : str
name that needs to be added to the database
entry : tuple
(group index, channel index) pair | [
"add",
"name",
"to",
"channels",
"database",
"and",
"check",
"if",
"it",
"contains",
"a",
"source",
"path"
] | 3c7a1fd19c957ceebe4dcdbb2abf00806c2bdb66 | https://github.com/danielhrisca/asammdf/blob/3c7a1fd19c957ceebe4dcdbb2abf00806c2bdb66/asammdf/blocks/utils.py#L768-L792 |
240,668 | danielhrisca/asammdf | asammdf/blocks/utils.py | UniqueDB.get_unique_name | def get_unique_name(self, name):
""" returns an available unique name
Parameters
----------
name : str
name to be made unique
Returns
-------
unique_name : str
new unique name
"""
if name not in self._db:
self._db[name] = 0
return name
else:
index = self._db[name]
self._db[name] = index + 1
return f"{name}_{index}" | python | def get_unique_name(self, name):
if name not in self._db:
self._db[name] = 0
return name
else:
index = self._db[name]
self._db[name] = index + 1
return f"{name}_{index}" | [
"def",
"get_unique_name",
"(",
"self",
",",
"name",
")",
":",
"if",
"name",
"not",
"in",
"self",
".",
"_db",
":",
"self",
".",
"_db",
"[",
"name",
"]",
"=",
"0",
"return",
"name",
"else",
":",
"index",
"=",
"self",
".",
"_db",
"[",
"name",
"]",
... | returns an available unique name
Parameters
----------
name : str
name to be made unique
Returns
-------
unique_name : str
new unique name | [
"returns",
"an",
"available",
"unique",
"name"
] | 3c7a1fd19c957ceebe4dcdbb2abf00806c2bdb66 | https://github.com/danielhrisca/asammdf/blob/3c7a1fd19c957ceebe4dcdbb2abf00806c2bdb66/asammdf/blocks/utils.py#L854-L875 |
240,669 | danielhrisca/asammdf | asammdf/mdf.py | MDF.iter_get | def iter_get(
self,
name=None,
group=None,
index=None,
raster=None,
samples_only=False,
raw=False,
):
""" iterator over a channel
This is usefull in case of large files with a small number of channels.
If the *raster* keyword argument is not *None* the output is
interpolated accordingly
Parameters
----------
name : string
name of channel
group : int
0-based group index
index : int
0-based channel index
raster : float
time raster in seconds
samples_only : bool
if *True* return only the channel samples as numpy array; if
*False* return a *Signal* object
raw : bool
return channel samples without appling the conversion rule; default
`False`
"""
gp_nr, ch_nr = self._validate_channel_selection(name, group, index)
grp = self.groups[gp_nr]
data = self._load_data(grp)
for fragment in data:
yield self.get(
group=gp_nr,
index=ch_nr,
raster=raster,
samples_only=samples_only,
data=fragment,
raw=raw,
) | python | def iter_get(
self,
name=None,
group=None,
index=None,
raster=None,
samples_only=False,
raw=False,
):
gp_nr, ch_nr = self._validate_channel_selection(name, group, index)
grp = self.groups[gp_nr]
data = self._load_data(grp)
for fragment in data:
yield self.get(
group=gp_nr,
index=ch_nr,
raster=raster,
samples_only=samples_only,
data=fragment,
raw=raw,
) | [
"def",
"iter_get",
"(",
"self",
",",
"name",
"=",
"None",
",",
"group",
"=",
"None",
",",
"index",
"=",
"None",
",",
"raster",
"=",
"None",
",",
"samples_only",
"=",
"False",
",",
"raw",
"=",
"False",
",",
")",
":",
"gp_nr",
",",
"ch_nr",
"=",
"s... | iterator over a channel
This is usefull in case of large files with a small number of channels.
If the *raster* keyword argument is not *None* the output is
interpolated accordingly
Parameters
----------
name : string
name of channel
group : int
0-based group index
index : int
0-based channel index
raster : float
time raster in seconds
samples_only : bool
if *True* return only the channel samples as numpy array; if
*False* return a *Signal* object
raw : bool
return channel samples without appling the conversion rule; default
`False` | [
"iterator",
"over",
"a",
"channel"
] | 3c7a1fd19c957ceebe4dcdbb2abf00806c2bdb66 | https://github.com/danielhrisca/asammdf/blob/3c7a1fd19c957ceebe4dcdbb2abf00806c2bdb66/asammdf/mdf.py#L1770-L1818 |
240,670 | danielhrisca/asammdf | asammdf/mdf.py | MDF.whereis | def whereis(self, channel):
""" get ocurrences of channel name in the file
Parameters
----------
channel : str
channel name string
Returns
-------
ocurrences : tuple
Examples
--------
>>> mdf = MDF(file_name)
>>> mdf.whereis('VehicleSpeed') # "VehicleSpeed" exists in the file
((1, 2), (2, 4))
>>> mdf.whereis('VehicleSPD') # "VehicleSPD" doesn't exist in the file
()
"""
if channel in self:
return tuple(self.channels_db[channel])
else:
return tuple() | python | def whereis(self, channel):
if channel in self:
return tuple(self.channels_db[channel])
else:
return tuple() | [
"def",
"whereis",
"(",
"self",
",",
"channel",
")",
":",
"if",
"channel",
"in",
"self",
":",
"return",
"tuple",
"(",
"self",
".",
"channels_db",
"[",
"channel",
"]",
")",
"else",
":",
"return",
"tuple",
"(",
")"
] | get ocurrences of channel name in the file
Parameters
----------
channel : str
channel name string
Returns
-------
ocurrences : tuple
Examples
--------
>>> mdf = MDF(file_name)
>>> mdf.whereis('VehicleSpeed') # "VehicleSpeed" exists in the file
((1, 2), (2, 4))
>>> mdf.whereis('VehicleSPD') # "VehicleSPD" doesn't exist in the file
() | [
"get",
"ocurrences",
"of",
"channel",
"name",
"in",
"the",
"file"
] | 3c7a1fd19c957ceebe4dcdbb2abf00806c2bdb66 | https://github.com/danielhrisca/asammdf/blob/3c7a1fd19c957ceebe4dcdbb2abf00806c2bdb66/asammdf/mdf.py#L2966-L2991 |
240,671 | danielhrisca/asammdf | asammdf/blocks/mdf_v4.py | MDF4.get_invalidation_bits | def get_invalidation_bits(self, group_index, channel, fragment):
""" get invalidation indexes for the channel
Parameters
----------
group_index : int
group index
channel : Channel
channel object
fragment : (bytes, int)
(fragment bytes, fragment offset)
Returns
-------
invalidation_bits : iterable
iterable of valid channel indexes; if all are valid `None` is
returned
"""
group = self.groups[group_index]
dtypes = group.types
data_bytes, offset, _count = fragment
try:
invalidation = self._invalidation_cache[(group_index, offset, _count)]
except KeyError:
record = group.record
if record is None:
dtypes = group.types
if dtypes.itemsize:
record = fromstring(data_bytes, dtype=dtypes)
else:
record = None
invalidation = record["invalidation_bytes"].copy()
self._invalidation_cache[(group_index, offset, _count)] = invalidation
ch_invalidation_pos = channel.pos_invalidation_bit
pos_byte, pos_offset = divmod(ch_invalidation_pos, 8)
mask = 1 << pos_offset
invalidation_bits = invalidation[:, pos_byte] & mask
invalidation_bits = invalidation_bits.astype(bool)
return invalidation_bits | python | def get_invalidation_bits(self, group_index, channel, fragment):
group = self.groups[group_index]
dtypes = group.types
data_bytes, offset, _count = fragment
try:
invalidation = self._invalidation_cache[(group_index, offset, _count)]
except KeyError:
record = group.record
if record is None:
dtypes = group.types
if dtypes.itemsize:
record = fromstring(data_bytes, dtype=dtypes)
else:
record = None
invalidation = record["invalidation_bytes"].copy()
self._invalidation_cache[(group_index, offset, _count)] = invalidation
ch_invalidation_pos = channel.pos_invalidation_bit
pos_byte, pos_offset = divmod(ch_invalidation_pos, 8)
mask = 1 << pos_offset
invalidation_bits = invalidation[:, pos_byte] & mask
invalidation_bits = invalidation_bits.astype(bool)
return invalidation_bits | [
"def",
"get_invalidation_bits",
"(",
"self",
",",
"group_index",
",",
"channel",
",",
"fragment",
")",
":",
"group",
"=",
"self",
".",
"groups",
"[",
"group_index",
"]",
"dtypes",
"=",
"group",
".",
"types",
"data_bytes",
",",
"offset",
",",
"_count",
"=",... | get invalidation indexes for the channel
Parameters
----------
group_index : int
group index
channel : Channel
channel object
fragment : (bytes, int)
(fragment bytes, fragment offset)
Returns
-------
invalidation_bits : iterable
iterable of valid channel indexes; if all are valid `None` is
returned | [
"get",
"invalidation",
"indexes",
"for",
"the",
"channel"
] | 3c7a1fd19c957ceebe4dcdbb2abf00806c2bdb66 | https://github.com/danielhrisca/asammdf/blob/3c7a1fd19c957ceebe4dcdbb2abf00806c2bdb66/asammdf/blocks/mdf_v4.py#L2182-L2227 |
240,672 | danielhrisca/asammdf | asammdf/blocks/mdf_v4.py | MDF4.configure | def configure(
self,
*,
read_fragment_size=None,
write_fragment_size=None,
use_display_names=None,
single_bit_uint_as_bool=None,
integer_interpolation=None,
):
""" configure MDF parameters
Parameters
----------
read_fragment_size : int
size hint of split data blocks, default 8MB; if the initial size is
smaller, then no data list is used. The actual split size depends on
the data groups' records size
write_fragment_size : int
size hint of split data blocks, default 4MB; if the initial size is
smaller, then no data list is used. The actual split size depends on
the data groups' records size. Maximum size is 4MB to ensure
compatibility with CANape
use_display_names : bool
search for display name in the Channel XML comment
single_bit_uint_as_bool : bool
return single bit channels are np.bool arrays
integer_interpolation : int
interpolation mode for integer channels:
* 0 - repeat previous sample
* 1 - use linear interpolation
"""
if read_fragment_size is not None:
self._read_fragment_size = int(read_fragment_size)
if write_fragment_size:
self._write_fragment_size = min(int(write_fragment_size), 4 * 2 ** 20)
if use_display_names is not None:
self._use_display_names = bool(use_display_names)
if single_bit_uint_as_bool is not None:
self._single_bit_uint_as_bool = bool(single_bit_uint_as_bool)
if integer_interpolation in (0, 1):
self._integer_interpolation = int(integer_interpolation) | python | def configure(
self,
*,
read_fragment_size=None,
write_fragment_size=None,
use_display_names=None,
single_bit_uint_as_bool=None,
integer_interpolation=None,
):
if read_fragment_size is not None:
self._read_fragment_size = int(read_fragment_size)
if write_fragment_size:
self._write_fragment_size = min(int(write_fragment_size), 4 * 2 ** 20)
if use_display_names is not None:
self._use_display_names = bool(use_display_names)
if single_bit_uint_as_bool is not None:
self._single_bit_uint_as_bool = bool(single_bit_uint_as_bool)
if integer_interpolation in (0, 1):
self._integer_interpolation = int(integer_interpolation) | [
"def",
"configure",
"(",
"self",
",",
"*",
",",
"read_fragment_size",
"=",
"None",
",",
"write_fragment_size",
"=",
"None",
",",
"use_display_names",
"=",
"None",
",",
"single_bit_uint_as_bool",
"=",
"None",
",",
"integer_interpolation",
"=",
"None",
",",
")",
... | configure MDF parameters
Parameters
----------
read_fragment_size : int
size hint of split data blocks, default 8MB; if the initial size is
smaller, then no data list is used. The actual split size depends on
the data groups' records size
write_fragment_size : int
size hint of split data blocks, default 4MB; if the initial size is
smaller, then no data list is used. The actual split size depends on
the data groups' records size. Maximum size is 4MB to ensure
compatibility with CANape
use_display_names : bool
search for display name in the Channel XML comment
single_bit_uint_as_bool : bool
return single bit channels are np.bool arrays
integer_interpolation : int
interpolation mode for integer channels:
* 0 - repeat previous sample
* 1 - use linear interpolation | [
"configure",
"MDF",
"parameters"
] | 3c7a1fd19c957ceebe4dcdbb2abf00806c2bdb66 | https://github.com/danielhrisca/asammdf/blob/3c7a1fd19c957ceebe4dcdbb2abf00806c2bdb66/asammdf/blocks/mdf_v4.py#L2229-L2276 |
240,673 | danielhrisca/asammdf | asammdf/blocks/mdf_v4.py | MDF4.extract_attachment | def extract_attachment(self, address=None, index=None):
""" extract attachment data by original address or by index. If it is an embedded attachment,
then this method creates the new file according to the attachment file
name information
Parameters
----------
address : int
attachment index; default *None*
index : int
attachment index; default *None*
Returns
-------
data : (bytes, pathlib.Path)
tuple of attachment data and path
"""
if address is None and index is None:
return b"", Path("")
if address is not None:
index = self._attachments_map[address]
attachment = self.attachments[index]
current_path = Path.cwd()
file_path = Path(attachment.file_name or "embedded")
try:
os.chdir(self.name.resolve().parent)
flags = attachment.flags
# for embedded attachments extrat data and create new files
if flags & v4c.FLAG_AT_EMBEDDED:
data = attachment.extract()
return data, file_path
else:
# for external attachments read the file and return the content
if flags & v4c.FLAG_AT_MD5_VALID:
data = open(file_path, "rb").read()
file_path = Path(f"FROM_{file_path}")
md5_worker = md5()
md5_worker.update(data)
md5_sum = md5_worker.digest()
if attachment["md5_sum"] == md5_sum:
if attachment.mime.startswith("text"):
with open(file_path, "r") as f:
data = f.read()
return data, file_path
else:
message = (
f'ATBLOCK md5sum="{attachment["md5_sum"]}" '
f"and external attachment data ({file_path}) "
f'md5sum="{md5_sum}"'
)
logger.warning(message)
else:
if attachment.mime.startswith("text"):
mode = "r"
else:
mode = "rb"
with open(file_path, mode) as f:
file_path = Path(f"FROM_{file_path}")
data = f.read()
return data, file_path
except Exception as err:
os.chdir(current_path)
message = "Exception during attachment extraction: " + repr(err)
logger.warning(message)
return b"", file_path | python | def extract_attachment(self, address=None, index=None):
if address is None and index is None:
return b"", Path("")
if address is not None:
index = self._attachments_map[address]
attachment = self.attachments[index]
current_path = Path.cwd()
file_path = Path(attachment.file_name or "embedded")
try:
os.chdir(self.name.resolve().parent)
flags = attachment.flags
# for embedded attachments extrat data and create new files
if flags & v4c.FLAG_AT_EMBEDDED:
data = attachment.extract()
return data, file_path
else:
# for external attachments read the file and return the content
if flags & v4c.FLAG_AT_MD5_VALID:
data = open(file_path, "rb").read()
file_path = Path(f"FROM_{file_path}")
md5_worker = md5()
md5_worker.update(data)
md5_sum = md5_worker.digest()
if attachment["md5_sum"] == md5_sum:
if attachment.mime.startswith("text"):
with open(file_path, "r") as f:
data = f.read()
return data, file_path
else:
message = (
f'ATBLOCK md5sum="{attachment["md5_sum"]}" '
f"and external attachment data ({file_path}) "
f'md5sum="{md5_sum}"'
)
logger.warning(message)
else:
if attachment.mime.startswith("text"):
mode = "r"
else:
mode = "rb"
with open(file_path, mode) as f:
file_path = Path(f"FROM_{file_path}")
data = f.read()
return data, file_path
except Exception as err:
os.chdir(current_path)
message = "Exception during attachment extraction: " + repr(err)
logger.warning(message)
return b"", file_path | [
"def",
"extract_attachment",
"(",
"self",
",",
"address",
"=",
"None",
",",
"index",
"=",
"None",
")",
":",
"if",
"address",
"is",
"None",
"and",
"index",
"is",
"None",
":",
"return",
"b\"\"",
",",
"Path",
"(",
"\"\"",
")",
"if",
"address",
"is",
"no... | extract attachment data by original address or by index. If it is an embedded attachment,
then this method creates the new file according to the attachment file
name information
Parameters
----------
address : int
attachment index; default *None*
index : int
attachment index; default *None*
Returns
-------
data : (bytes, pathlib.Path)
tuple of attachment data and path | [
"extract",
"attachment",
"data",
"by",
"original",
"address",
"or",
"by",
"index",
".",
"If",
"it",
"is",
"an",
"embedded",
"attachment",
"then",
"this",
"method",
"creates",
"the",
"new",
"file",
"according",
"to",
"the",
"attachment",
"file",
"name",
"info... | 3c7a1fd19c957ceebe4dcdbb2abf00806c2bdb66 | https://github.com/danielhrisca/asammdf/blob/3c7a1fd19c957ceebe4dcdbb2abf00806c2bdb66/asammdf/blocks/mdf_v4.py#L3567-L3637 |
240,674 | danielhrisca/asammdf | asammdf/blocks/mdf_v4.py | MDF4.get_channel_name | def get_channel_name(self, group, index):
"""Gets channel name.
Parameters
----------
group : int
0-based group index
index : int
0-based channel index
Returns
-------
name : str
found channel name
"""
gp_nr, ch_nr = self._validate_channel_selection(None, group, index)
return self.groups[gp_nr].channels[ch_nr].name | python | def get_channel_name(self, group, index):
gp_nr, ch_nr = self._validate_channel_selection(None, group, index)
return self.groups[gp_nr].channels[ch_nr].name | [
"def",
"get_channel_name",
"(",
"self",
",",
"group",
",",
"index",
")",
":",
"gp_nr",
",",
"ch_nr",
"=",
"self",
".",
"_validate_channel_selection",
"(",
"None",
",",
"group",
",",
"index",
")",
"return",
"self",
".",
"groups",
"[",
"gp_nr",
"]",
".",
... | Gets channel name.
Parameters
----------
group : int
0-based group index
index : int
0-based channel index
Returns
-------
name : str
found channel name | [
"Gets",
"channel",
"name",
"."
] | 3c7a1fd19c957ceebe4dcdbb2abf00806c2bdb66 | https://github.com/danielhrisca/asammdf/blob/3c7a1fd19c957ceebe4dcdbb2abf00806c2bdb66/asammdf/blocks/mdf_v4.py#L5803-L5821 |
240,675 | danielhrisca/asammdf | asammdf/blocks/mdf_v4.py | MDF4.get_channel_unit | def get_channel_unit(self, name=None, group=None, index=None):
"""Gets channel unit.
Channel can be specified in two ways:
* using the first positional argument *name*
* if there are multiple occurrences for this channel then the
*group* and *index* arguments can be used to select a specific
group.
* if there are multiple occurrences for this channel and either the
*group* or *index* arguments is None then a warning is issued
* using the group number (keyword argument *group*) and the channel
number (keyword argument *index*). Use *info* method for group and
channel numbers
If the *raster* keyword argument is not *None* the output is
interpolated accordingly.
Parameters
----------
name : string
name of channel
group : int
0-based group index
index : int
0-based channel index
Returns
-------
unit : str
found channel unit
"""
gp_nr, ch_nr = self._validate_channel_selection(name, group, index)
grp = self.groups[gp_nr]
channel = grp.channels[ch_nr]
conversion = channel.conversion
unit = conversion and conversion.unit or channel.unit or ""
return unit | python | def get_channel_unit(self, name=None, group=None, index=None):
gp_nr, ch_nr = self._validate_channel_selection(name, group, index)
grp = self.groups[gp_nr]
channel = grp.channels[ch_nr]
conversion = channel.conversion
unit = conversion and conversion.unit or channel.unit or ""
return unit | [
"def",
"get_channel_unit",
"(",
"self",
",",
"name",
"=",
"None",
",",
"group",
"=",
"None",
",",
"index",
"=",
"None",
")",
":",
"gp_nr",
",",
"ch_nr",
"=",
"self",
".",
"_validate_channel_selection",
"(",
"name",
",",
"group",
",",
"index",
")",
"grp... | Gets channel unit.
Channel can be specified in two ways:
* using the first positional argument *name*
* if there are multiple occurrences for this channel then the
*group* and *index* arguments can be used to select a specific
group.
* if there are multiple occurrences for this channel and either the
*group* or *index* arguments is None then a warning is issued
* using the group number (keyword argument *group*) and the channel
number (keyword argument *index*). Use *info* method for group and
channel numbers
If the *raster* keyword argument is not *None* the output is
interpolated accordingly.
Parameters
----------
name : string
name of channel
group : int
0-based group index
index : int
0-based channel index
Returns
-------
unit : str
found channel unit | [
"Gets",
"channel",
"unit",
"."
] | 3c7a1fd19c957ceebe4dcdbb2abf00806c2bdb66 | https://github.com/danielhrisca/asammdf/blob/3c7a1fd19c957ceebe4dcdbb2abf00806c2bdb66/asammdf/blocks/mdf_v4.py#L5835-L5881 |
240,676 | danielhrisca/asammdf | asammdf/blocks/mdf_v4.py | MDF4.get_channel_comment | def get_channel_comment(self, name=None, group=None, index=None):
"""Gets channel comment.
Channel can be specified in two ways:
* using the first positional argument *name*
* if there are multiple occurrences for this channel then the
*group* and *index* arguments can be used to select a specific
group.
* if there are multiple occurrences for this channel and either the
*group* or *index* arguments is None then a warning is issued
* using the group number (keyword argument *group*) and the channel
number (keyword argument *index*). Use *info* method for group and
channel numbers
If the *raster* keyword argument is not *None* the output is
interpolated accordingly.
Parameters
----------
name : string
name of channel
group : int
0-based group index
index : int
0-based channel index
Returns
-------
comment : str
found channel comment
"""
gp_nr, ch_nr = self._validate_channel_selection(name, group, index)
grp = self.groups[gp_nr]
channel = grp.channels[ch_nr]
return extract_cncomment_xml(channel.comment) | python | def get_channel_comment(self, name=None, group=None, index=None):
gp_nr, ch_nr = self._validate_channel_selection(name, group, index)
grp = self.groups[gp_nr]
channel = grp.channels[ch_nr]
return extract_cncomment_xml(channel.comment) | [
"def",
"get_channel_comment",
"(",
"self",
",",
"name",
"=",
"None",
",",
"group",
"=",
"None",
",",
"index",
"=",
"None",
")",
":",
"gp_nr",
",",
"ch_nr",
"=",
"self",
".",
"_validate_channel_selection",
"(",
"name",
",",
"group",
",",
"index",
")",
"... | Gets channel comment.
Channel can be specified in two ways:
* using the first positional argument *name*
* if there are multiple occurrences for this channel then the
*group* and *index* arguments can be used to select a specific
group.
* if there are multiple occurrences for this channel and either the
*group* or *index* arguments is None then a warning is issued
* using the group number (keyword argument *group*) and the channel
number (keyword argument *index*). Use *info* method for group and
channel numbers
If the *raster* keyword argument is not *None* the output is
interpolated accordingly.
Parameters
----------
name : string
name of channel
group : int
0-based group index
index : int
0-based channel index
Returns
-------
comment : str
found channel comment | [
"Gets",
"channel",
"comment",
"."
] | 3c7a1fd19c957ceebe4dcdbb2abf00806c2bdb66 | https://github.com/danielhrisca/asammdf/blob/3c7a1fd19c957ceebe4dcdbb2abf00806c2bdb66/asammdf/blocks/mdf_v4.py#L5883-L5925 |
240,677 | danielhrisca/asammdf | benchmarks/bench.py | _cmd_line_parser | def _cmd_line_parser():
'''
return a command line parser. It is used when generating the documentation
'''
parser = argparse.ArgumentParser()
parser.add_argument('--path',
help=('path to test files, '
'if not provided the script folder is used'))
parser.add_argument('--text_output',
action='store_true',
help='option to save the results to text file')
parser.add_argument('--format',
default='rst',
nargs='?',
choices=['rst', 'md'],
help='text formatting')
return parser | python | def _cmd_line_parser():
'''
return a command line parser. It is used when generating the documentation
'''
parser = argparse.ArgumentParser()
parser.add_argument('--path',
help=('path to test files, '
'if not provided the script folder is used'))
parser.add_argument('--text_output',
action='store_true',
help='option to save the results to text file')
parser.add_argument('--format',
default='rst',
nargs='?',
choices=['rst', 'md'],
help='text formatting')
return parser | [
"def",
"_cmd_line_parser",
"(",
")",
":",
"parser",
"=",
"argparse",
".",
"ArgumentParser",
"(",
")",
"parser",
".",
"add_argument",
"(",
"'--path'",
",",
"help",
"=",
"(",
"'path to test files, '",
"'if not provided the script folder is used'",
")",
")",
"parser",
... | return a command line parser. It is used when generating the documentation | [
"return",
"a",
"command",
"line",
"parser",
".",
"It",
"is",
"used",
"when",
"generating",
"the",
"documentation"
] | 3c7a1fd19c957ceebe4dcdbb2abf00806c2bdb66 | https://github.com/danielhrisca/asammdf/blob/3c7a1fd19c957ceebe4dcdbb2abf00806c2bdb66/benchmarks/bench.py#L1100-L1118 |
240,678 | danielhrisca/asammdf | benchmarks/bench.py | MyList.append | def append(self, item):
""" append item and print it to stdout """
print(item)
super(MyList, self).append(item) | python | def append(self, item):
print(item)
super(MyList, self).append(item) | [
"def",
"append",
"(",
"self",
",",
"item",
")",
":",
"print",
"(",
"item",
")",
"super",
"(",
"MyList",
",",
"self",
")",
".",
"append",
"(",
"item",
")"
] | append item and print it to stdout | [
"append",
"item",
"and",
"print",
"it",
"to",
"stdout"
] | 3c7a1fd19c957ceebe4dcdbb2abf00806c2bdb66 | https://github.com/danielhrisca/asammdf/blob/3c7a1fd19c957ceebe4dcdbb2abf00806c2bdb66/benchmarks/bench.py#L42-L45 |
240,679 | danielhrisca/asammdf | benchmarks/bench.py | MyList.extend | def extend(self, items):
""" extend items and print them to stdout
using the new line separator
"""
print('\n'.join(items))
super(MyList, self).extend(items) | python | def extend(self, items):
print('\n'.join(items))
super(MyList, self).extend(items) | [
"def",
"extend",
"(",
"self",
",",
"items",
")",
":",
"print",
"(",
"'\\n'",
".",
"join",
"(",
"items",
")",
")",
"super",
"(",
"MyList",
",",
"self",
")",
".",
"extend",
"(",
"items",
")"
] | extend items and print them to stdout
using the new line separator | [
"extend",
"items",
"and",
"print",
"them",
"to",
"stdout",
"using",
"the",
"new",
"line",
"separator"
] | 3c7a1fd19c957ceebe4dcdbb2abf00806c2bdb66 | https://github.com/danielhrisca/asammdf/blob/3c7a1fd19c957ceebe4dcdbb2abf00806c2bdb66/benchmarks/bench.py#L47-L52 |
240,680 | danielhrisca/asammdf | asammdf/signal.py | Signal.extend | def extend(self, other):
""" extend signal with samples from another signal
Parameters
----------
other : Signal
Returns
-------
signal : Signal
new extended *Signal*
"""
if len(self.timestamps):
last_stamp = self.timestamps[-1]
else:
last_stamp = 0
if len(other):
other_first_sample = other.timestamps[0]
if last_stamp >= other_first_sample:
timestamps = other.timestamps + last_stamp
else:
timestamps = other.timestamps
if self.invalidation_bits is None and other.invalidation_bits is None:
invalidation_bits = None
elif self.invalidation_bits is None and other.invalidation_bits is not None:
invalidation_bits = np.concatenate(
(np.zeros(len(self), dtype=bool), other.invalidation_bits)
)
elif self.invalidation_bits is not None and other.invalidation_bits is None:
invalidation_bits = np.concatenate(
(self.invalidation_bits, np.zeros(len(other), dtype=bool))
)
else:
invalidation_bits = np.append(
self.invalidation_bits, other.invalidation_bits
)
result = Signal(
np.append(self.samples, other.samples, axis=0),
np.append(self.timestamps, timestamps),
self.unit,
self.name,
self.conversion,
self.comment,
self.raw,
self.master_metadata,
self.display_name,
self.attachment,
self.source,
self.bit_count,
self.stream_sync,
invalidation_bits=invalidation_bits,
encoding=self.encoding,
)
else:
result = self
return result | python | def extend(self, other):
if len(self.timestamps):
last_stamp = self.timestamps[-1]
else:
last_stamp = 0
if len(other):
other_first_sample = other.timestamps[0]
if last_stamp >= other_first_sample:
timestamps = other.timestamps + last_stamp
else:
timestamps = other.timestamps
if self.invalidation_bits is None and other.invalidation_bits is None:
invalidation_bits = None
elif self.invalidation_bits is None and other.invalidation_bits is not None:
invalidation_bits = np.concatenate(
(np.zeros(len(self), dtype=bool), other.invalidation_bits)
)
elif self.invalidation_bits is not None and other.invalidation_bits is None:
invalidation_bits = np.concatenate(
(self.invalidation_bits, np.zeros(len(other), dtype=bool))
)
else:
invalidation_bits = np.append(
self.invalidation_bits, other.invalidation_bits
)
result = Signal(
np.append(self.samples, other.samples, axis=0),
np.append(self.timestamps, timestamps),
self.unit,
self.name,
self.conversion,
self.comment,
self.raw,
self.master_metadata,
self.display_name,
self.attachment,
self.source,
self.bit_count,
self.stream_sync,
invalidation_bits=invalidation_bits,
encoding=self.encoding,
)
else:
result = self
return result | [
"def",
"extend",
"(",
"self",
",",
"other",
")",
":",
"if",
"len",
"(",
"self",
".",
"timestamps",
")",
":",
"last_stamp",
"=",
"self",
".",
"timestamps",
"[",
"-",
"1",
"]",
"else",
":",
"last_stamp",
"=",
"0",
"if",
"len",
"(",
"other",
")",
":... | extend signal with samples from another signal
Parameters
----------
other : Signal
Returns
-------
signal : Signal
new extended *Signal* | [
"extend",
"signal",
"with",
"samples",
"from",
"another",
"signal"
] | 3c7a1fd19c957ceebe4dcdbb2abf00806c2bdb66 | https://github.com/danielhrisca/asammdf/blob/3c7a1fd19c957ceebe4dcdbb2abf00806c2bdb66/asammdf/signal.py#L670-L729 |
240,681 | danielhrisca/asammdf | asammdf/signal.py | Signal.physical | def physical(self):
"""
get the physical samples values
Returns
-------
phys : Signal
new *Signal* with physical values
"""
if not self.raw or self.conversion is None:
samples = self.samples.copy()
else:
samples = self.conversion.convert(self.samples)
return Signal(
samples,
self.timestamps.copy(),
unit=self.unit,
name=self.name,
conversion=self.conversion,
raw=False,
master_metadata=self.master_metadata,
display_name=self.display_name,
attachment=self.attachment,
stream_sync=self.stream_sync,
invalidation_bits=self.invalidation_bits,
source=self.source,
encoding=self.encoding,
) | python | def physical(self):
if not self.raw or self.conversion is None:
samples = self.samples.copy()
else:
samples = self.conversion.convert(self.samples)
return Signal(
samples,
self.timestamps.copy(),
unit=self.unit,
name=self.name,
conversion=self.conversion,
raw=False,
master_metadata=self.master_metadata,
display_name=self.display_name,
attachment=self.attachment,
stream_sync=self.stream_sync,
invalidation_bits=self.invalidation_bits,
source=self.source,
encoding=self.encoding,
) | [
"def",
"physical",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"raw",
"or",
"self",
".",
"conversion",
"is",
"None",
":",
"samples",
"=",
"self",
".",
"samples",
".",
"copy",
"(",
")",
"else",
":",
"samples",
"=",
"self",
".",
"conversion",
".... | get the physical samples values
Returns
-------
phys : Signal
new *Signal* with physical values | [
"get",
"the",
"physical",
"samples",
"values"
] | 3c7a1fd19c957ceebe4dcdbb2abf00806c2bdb66 | https://github.com/danielhrisca/asammdf/blob/3c7a1fd19c957ceebe4dcdbb2abf00806c2bdb66/asammdf/signal.py#L1094-L1124 |
240,682 | danielhrisca/asammdf | asammdf/blocks/v4_blocks.py | AttachmentBlock.extract | def extract(self):
"""extract attachment data
Returns
-------
data : bytes
"""
if self.flags & v4c.FLAG_AT_EMBEDDED:
if self.flags & v4c.FLAG_AT_COMPRESSED_EMBEDDED:
data = decompress(self.embedded_data)
else:
data = self.embedded_data
if self.flags & v4c.FLAG_AT_MD5_VALID:
md5_worker = md5()
md5_worker.update(data)
md5_sum = md5_worker.digest()
if self.md5_sum == md5_sum:
return data
else:
message = f"ATBLOCK md5sum={self.md5_sum} and embedded data md5sum={md5_sum}"
logger.warning(message)
else:
return data
else:
logger.warning("external attachments not supported") | python | def extract(self):
if self.flags & v4c.FLAG_AT_EMBEDDED:
if self.flags & v4c.FLAG_AT_COMPRESSED_EMBEDDED:
data = decompress(self.embedded_data)
else:
data = self.embedded_data
if self.flags & v4c.FLAG_AT_MD5_VALID:
md5_worker = md5()
md5_worker.update(data)
md5_sum = md5_worker.digest()
if self.md5_sum == md5_sum:
return data
else:
message = f"ATBLOCK md5sum={self.md5_sum} and embedded data md5sum={md5_sum}"
logger.warning(message)
else:
return data
else:
logger.warning("external attachments not supported") | [
"def",
"extract",
"(",
"self",
")",
":",
"if",
"self",
".",
"flags",
"&",
"v4c",
".",
"FLAG_AT_EMBEDDED",
":",
"if",
"self",
".",
"flags",
"&",
"v4c",
".",
"FLAG_AT_COMPRESSED_EMBEDDED",
":",
"data",
"=",
"decompress",
"(",
"self",
".",
"embedded_data",
... | extract attachment data
Returns
-------
data : bytes | [
"extract",
"attachment",
"data"
] | 3c7a1fd19c957ceebe4dcdbb2abf00806c2bdb66 | https://github.com/danielhrisca/asammdf/blob/3c7a1fd19c957ceebe4dcdbb2abf00806c2bdb66/asammdf/blocks/v4_blocks.py#L236-L260 |
240,683 | danielhrisca/asammdf | asammdf/blocks/v4_blocks.py | HeaderBlock.start_time | def start_time(self):
""" getter and setter the measurement start timestamp
Returns
-------
timestamp : datetime.datetime
start timestamp
"""
timestamp = self.abs_time / 10 ** 9
if self.time_flags & v4c.FLAG_HD_LOCAL_TIME:
timestamp = datetime.fromtimestamp(timestamp)
else:
timestamp = datetime.fromtimestamp(timestamp, timezone.utc)
return timestamp | python | def start_time(self):
timestamp = self.abs_time / 10 ** 9
if self.time_flags & v4c.FLAG_HD_LOCAL_TIME:
timestamp = datetime.fromtimestamp(timestamp)
else:
timestamp = datetime.fromtimestamp(timestamp, timezone.utc)
return timestamp | [
"def",
"start_time",
"(",
"self",
")",
":",
"timestamp",
"=",
"self",
".",
"abs_time",
"/",
"10",
"**",
"9",
"if",
"self",
".",
"time_flags",
"&",
"v4c",
".",
"FLAG_HD_LOCAL_TIME",
":",
"timestamp",
"=",
"datetime",
".",
"fromtimestamp",
"(",
"timestamp",
... | getter and setter the measurement start timestamp
Returns
-------
timestamp : datetime.datetime
start timestamp | [
"getter",
"and",
"setter",
"the",
"measurement",
"start",
"timestamp"
] | 3c7a1fd19c957ceebe4dcdbb2abf00806c2bdb66 | https://github.com/danielhrisca/asammdf/blob/3c7a1fd19c957ceebe4dcdbb2abf00806c2bdb66/asammdf/blocks/v4_blocks.py#L4366-L4382 |
240,684 | danielhrisca/asammdf | asammdf/blocks/mdf_v3.py | MDF3._prepare_record | def _prepare_record(self, group):
""" compute record dtype and parents dict for this group
Parameters
----------
group : dict
MDF group dict
Returns
-------
parents, dtypes : dict, numpy.dtype
mapping of channels to records fields, records fiels dtype
"""
parents, dtypes = group.parents, group.types
if parents is None:
if group.data_location == v23c.LOCATION_ORIGINAL_FILE:
stream = self._file
else:
stream = self._tempfile
grp = group
record_size = grp.channel_group.samples_byte_nr << 3
next_byte_aligned_position = 0
types = []
current_parent = ""
parent_start_offset = 0
parents = {}
group_channels = UniqueDB()
# the channels are first sorted ascending (see __lt__ method of Channel
# class): a channel with lower start offset is smaller, when two
# channels havethe same start offset the one with higer bit size is
# considered smaller. The reason is that when the numpy record is built
# and there are overlapping channels, the parent fields mustbe bigger
# (bit size) than the embedded channels. For each channel the parent
# dict will have a (parent name, bit offset) pair: the channel value is
# computed using the values from the parent field, and the bit offset,
# which is the channel's bit offset within the parent bytes.
# This means all parents will have themselves as parent, and bit offset
# of 0. Gaps in the records are also considered. Non standard integers
# size is adjusted to the first higher standard integer size (eq. uint
# of 28bits will be adjusted to 32bits)
sortedchannels = sorted(enumerate(grp.channels), key=lambda i: i[1])
for original_index, new_ch in sortedchannels:
# skip channels with channel dependencies from the numpy record
if new_ch.component_addr:
continue
start_offset = new_ch.start_offset
try:
additional_byte_offset = new_ch.additional_byte_offset
start_offset += 8 * additional_byte_offset
except AttributeError:
pass
bit_offset = start_offset % 8
data_type = new_ch.data_type
bit_count = new_ch.bit_count
name = new_ch.name
# handle multiple occurance of same channel name
name = group_channels.get_unique_name(name)
if start_offset >= next_byte_aligned_position:
parent_start_offset = (start_offset // 8) * 8
# check if there are byte gaps in the record
gap = (parent_start_offset - next_byte_aligned_position) // 8
if gap:
types.append(("", f"V{gap}"))
# adjust size to 1, 2, 4 or 8 bytes for nonstandard integers
size = bit_offset + bit_count
if data_type == v23c.DATA_TYPE_STRING:
next_byte_aligned_position = parent_start_offset + size
if next_byte_aligned_position <= record_size:
dtype_pair = (name, get_fmt_v3(data_type, size))
types.append(dtype_pair)
parents[original_index] = name, bit_offset
else:
next_byte_aligned_position = parent_start_offset
elif data_type == v23c.DATA_TYPE_BYTEARRAY:
next_byte_aligned_position = parent_start_offset + size
if next_byte_aligned_position <= record_size:
dtype_pair = (name, get_fmt_v3(data_type, size))
types.append(dtype_pair)
parents[original_index] = name, bit_offset
else:
next_byte_aligned_position = parent_start_offset
else:
if size > 32:
next_byte_aligned_position = parent_start_offset + 64
elif size > 16:
next_byte_aligned_position = parent_start_offset + 32
elif size > 8:
next_byte_aligned_position = parent_start_offset + 16
else:
next_byte_aligned_position = parent_start_offset + 8
if next_byte_aligned_position <= record_size:
dtype_pair = (name, get_fmt_v3(data_type, size))
types.append(dtype_pair)
parents[original_index] = name, bit_offset
else:
next_byte_aligned_position = parent_start_offset
current_parent = name
else:
max_overlapping = next_byte_aligned_position - start_offset
if max_overlapping >= bit_count:
parents[original_index] = (
current_parent,
start_offset - parent_start_offset,
)
if next_byte_aligned_position > record_size:
break
gap = (record_size - next_byte_aligned_position) // 8
if gap:
dtype_pair = ("", f"V{gap}")
types.append(dtype_pair)
dtypes = dtype(types)
group.parents, group.types = parents, dtypes
return parents, dtypes | python | def _prepare_record(self, group):
parents, dtypes = group.parents, group.types
if parents is None:
if group.data_location == v23c.LOCATION_ORIGINAL_FILE:
stream = self._file
else:
stream = self._tempfile
grp = group
record_size = grp.channel_group.samples_byte_nr << 3
next_byte_aligned_position = 0
types = []
current_parent = ""
parent_start_offset = 0
parents = {}
group_channels = UniqueDB()
# the channels are first sorted ascending (see __lt__ method of Channel
# class): a channel with lower start offset is smaller, when two
# channels havethe same start offset the one with higer bit size is
# considered smaller. The reason is that when the numpy record is built
# and there are overlapping channels, the parent fields mustbe bigger
# (bit size) than the embedded channels. For each channel the parent
# dict will have a (parent name, bit offset) pair: the channel value is
# computed using the values from the parent field, and the bit offset,
# which is the channel's bit offset within the parent bytes.
# This means all parents will have themselves as parent, and bit offset
# of 0. Gaps in the records are also considered. Non standard integers
# size is adjusted to the first higher standard integer size (eq. uint
# of 28bits will be adjusted to 32bits)
sortedchannels = sorted(enumerate(grp.channels), key=lambda i: i[1])
for original_index, new_ch in sortedchannels:
# skip channels with channel dependencies from the numpy record
if new_ch.component_addr:
continue
start_offset = new_ch.start_offset
try:
additional_byte_offset = new_ch.additional_byte_offset
start_offset += 8 * additional_byte_offset
except AttributeError:
pass
bit_offset = start_offset % 8
data_type = new_ch.data_type
bit_count = new_ch.bit_count
name = new_ch.name
# handle multiple occurance of same channel name
name = group_channels.get_unique_name(name)
if start_offset >= next_byte_aligned_position:
parent_start_offset = (start_offset // 8) * 8
# check if there are byte gaps in the record
gap = (parent_start_offset - next_byte_aligned_position) // 8
if gap:
types.append(("", f"V{gap}"))
# adjust size to 1, 2, 4 or 8 bytes for nonstandard integers
size = bit_offset + bit_count
if data_type == v23c.DATA_TYPE_STRING:
next_byte_aligned_position = parent_start_offset + size
if next_byte_aligned_position <= record_size:
dtype_pair = (name, get_fmt_v3(data_type, size))
types.append(dtype_pair)
parents[original_index] = name, bit_offset
else:
next_byte_aligned_position = parent_start_offset
elif data_type == v23c.DATA_TYPE_BYTEARRAY:
next_byte_aligned_position = parent_start_offset + size
if next_byte_aligned_position <= record_size:
dtype_pair = (name, get_fmt_v3(data_type, size))
types.append(dtype_pair)
parents[original_index] = name, bit_offset
else:
next_byte_aligned_position = parent_start_offset
else:
if size > 32:
next_byte_aligned_position = parent_start_offset + 64
elif size > 16:
next_byte_aligned_position = parent_start_offset + 32
elif size > 8:
next_byte_aligned_position = parent_start_offset + 16
else:
next_byte_aligned_position = parent_start_offset + 8
if next_byte_aligned_position <= record_size:
dtype_pair = (name, get_fmt_v3(data_type, size))
types.append(dtype_pair)
parents[original_index] = name, bit_offset
else:
next_byte_aligned_position = parent_start_offset
current_parent = name
else:
max_overlapping = next_byte_aligned_position - start_offset
if max_overlapping >= bit_count:
parents[original_index] = (
current_parent,
start_offset - parent_start_offset,
)
if next_byte_aligned_position > record_size:
break
gap = (record_size - next_byte_aligned_position) // 8
if gap:
dtype_pair = ("", f"V{gap}")
types.append(dtype_pair)
dtypes = dtype(types)
group.parents, group.types = parents, dtypes
return parents, dtypes | [
"def",
"_prepare_record",
"(",
"self",
",",
"group",
")",
":",
"parents",
",",
"dtypes",
"=",
"group",
".",
"parents",
",",
"group",
".",
"types",
"if",
"parents",
"is",
"None",
":",
"if",
"group",
".",
"data_location",
"==",
"v23c",
".",
"LOCATION_ORIGI... | compute record dtype and parents dict for this group
Parameters
----------
group : dict
MDF group dict
Returns
-------
parents, dtypes : dict, numpy.dtype
mapping of channels to records fields, records fiels dtype | [
"compute",
"record",
"dtype",
"and",
"parents",
"dict",
"for",
"this",
"group"
] | 3c7a1fd19c957ceebe4dcdbb2abf00806c2bdb66 | https://github.com/danielhrisca/asammdf/blob/3c7a1fd19c957ceebe4dcdbb2abf00806c2bdb66/asammdf/blocks/mdf_v3.py#L346-L475 |
240,685 | danielhrisca/asammdf | asammdf/blocks/mdf_v3.py | MDF3.close | def close(self):
""" if the MDF was created with memory='minimum' and new
channels have been appended, then this must be called just before the
object is not used anymore to clean-up the temporary file
"""
if self._tempfile is not None:
self._tempfile.close()
if self._file is not None and not self._from_filelike:
self._file.close() | python | def close(self):
if self._tempfile is not None:
self._tempfile.close()
if self._file is not None and not self._from_filelike:
self._file.close() | [
"def",
"close",
"(",
"self",
")",
":",
"if",
"self",
".",
"_tempfile",
"is",
"not",
"None",
":",
"self",
".",
"_tempfile",
".",
"close",
"(",
")",
"if",
"self",
".",
"_file",
"is",
"not",
"None",
"and",
"not",
"self",
".",
"_from_filelike",
":",
"s... | if the MDF was created with memory='minimum' and new
channels have been appended, then this must be called just before the
object is not used anymore to clean-up the temporary file | [
"if",
"the",
"MDF",
"was",
"created",
"with",
"memory",
"=",
"minimum",
"and",
"new",
"channels",
"have",
"been",
"appended",
"then",
"this",
"must",
"be",
"called",
"just",
"before",
"the",
"object",
"is",
"not",
"used",
"anymore",
"to",
"clean",
"-",
"... | 3c7a1fd19c957ceebe4dcdbb2abf00806c2bdb66 | https://github.com/danielhrisca/asammdf/blob/3c7a1fd19c957ceebe4dcdbb2abf00806c2bdb66/asammdf/blocks/mdf_v3.py#L2114-L2123 |
240,686 | danielhrisca/asammdf | asammdf/blocks/mdf_v3.py | MDF3.iter_get_triggers | def iter_get_triggers(self):
""" generator that yields triggers
Returns
-------
trigger_info : dict
trigger information with the following keys:
* comment : trigger comment
* time : trigger time
* pre_time : trigger pre time
* post_time : trigger post time
* index : trigger index
* group : data group index of trigger
"""
for i, gp in enumerate(self.groups):
trigger = gp.trigger
if trigger:
for j in range(trigger["trigger_events_nr"]):
trigger_info = {
"comment": trigger.comment,
"index": j,
"group": i,
"time": trigger[f"trigger_{j}_time"],
"pre_time": trigger[f"trigger_{j}_pretime"],
"post_time": trigger[f"trigger_{j}_posttime"],
}
yield trigger_info | python | def iter_get_triggers(self):
for i, gp in enumerate(self.groups):
trigger = gp.trigger
if trigger:
for j in range(trigger["trigger_events_nr"]):
trigger_info = {
"comment": trigger.comment,
"index": j,
"group": i,
"time": trigger[f"trigger_{j}_time"],
"pre_time": trigger[f"trigger_{j}_pretime"],
"post_time": trigger[f"trigger_{j}_posttime"],
}
yield trigger_info | [
"def",
"iter_get_triggers",
"(",
"self",
")",
":",
"for",
"i",
",",
"gp",
"in",
"enumerate",
"(",
"self",
".",
"groups",
")",
":",
"trigger",
"=",
"gp",
".",
"trigger",
"if",
"trigger",
":",
"for",
"j",
"in",
"range",
"(",
"trigger",
"[",
"\"trigger_... | generator that yields triggers
Returns
-------
trigger_info : dict
trigger information with the following keys:
* comment : trigger comment
* time : trigger time
* pre_time : trigger pre time
* post_time : trigger post time
* index : trigger index
* group : data group index of trigger | [
"generator",
"that",
"yields",
"triggers"
] | 3c7a1fd19c957ceebe4dcdbb2abf00806c2bdb66 | https://github.com/danielhrisca/asammdf/blob/3c7a1fd19c957ceebe4dcdbb2abf00806c2bdb66/asammdf/blocks/mdf_v3.py#L3069-L3097 |
240,687 | danielhrisca/asammdf | asammdf/gui/widgets/formated_axis.py | FormatedAxis.setLabel | def setLabel(self, text=None, units=None, unitPrefix=None, **args):
""" overwrites pyqtgraph setLabel
"""
show_label = False
if text is not None:
self.labelText = text
show_label = True
if units is not None:
self.labelUnits = units
show_label = True
if show_label:
self.showLabel()
if unitPrefix is not None:
self.labelUnitPrefix = unitPrefix
if len(args) > 0:
self.labelStyle = args
self.label.setHtml(self.labelString())
self._adjustSize()
self.picture = None
self.update() | python | def setLabel(self, text=None, units=None, unitPrefix=None, **args):
show_label = False
if text is not None:
self.labelText = text
show_label = True
if units is not None:
self.labelUnits = units
show_label = True
if show_label:
self.showLabel()
if unitPrefix is not None:
self.labelUnitPrefix = unitPrefix
if len(args) > 0:
self.labelStyle = args
self.label.setHtml(self.labelString())
self._adjustSize()
self.picture = None
self.update() | [
"def",
"setLabel",
"(",
"self",
",",
"text",
"=",
"None",
",",
"units",
"=",
"None",
",",
"unitPrefix",
"=",
"None",
",",
"*",
"*",
"args",
")",
":",
"show_label",
"=",
"False",
"if",
"text",
"is",
"not",
"None",
":",
"self",
".",
"labelText",
"=",... | overwrites pyqtgraph setLabel | [
"overwrites",
"pyqtgraph",
"setLabel"
] | 3c7a1fd19c957ceebe4dcdbb2abf00806c2bdb66 | https://github.com/danielhrisca/asammdf/blob/3c7a1fd19c957ceebe4dcdbb2abf00806c2bdb66/asammdf/gui/widgets/formated_axis.py#L52-L72 |
240,688 | bernardopires/django-tenant-schemas | tenant_schemas/utils.py | clean_tenant_url | def clean_tenant_url(url_string):
"""
Removes the TENANT_TOKEN from a particular string
"""
if hasattr(settings, 'PUBLIC_SCHEMA_URLCONF'):
if (settings.PUBLIC_SCHEMA_URLCONF and
url_string.startswith(settings.PUBLIC_SCHEMA_URLCONF)):
url_string = url_string[len(settings.PUBLIC_SCHEMA_URLCONF):]
return url_string | python | def clean_tenant_url(url_string):
if hasattr(settings, 'PUBLIC_SCHEMA_URLCONF'):
if (settings.PUBLIC_SCHEMA_URLCONF and
url_string.startswith(settings.PUBLIC_SCHEMA_URLCONF)):
url_string = url_string[len(settings.PUBLIC_SCHEMA_URLCONF):]
return url_string | [
"def",
"clean_tenant_url",
"(",
"url_string",
")",
":",
"if",
"hasattr",
"(",
"settings",
",",
"'PUBLIC_SCHEMA_URLCONF'",
")",
":",
"if",
"(",
"settings",
".",
"PUBLIC_SCHEMA_URLCONF",
"and",
"url_string",
".",
"startswith",
"(",
"settings",
".",
"PUBLIC_SCHEMA_UR... | Removes the TENANT_TOKEN from a particular string | [
"Removes",
"the",
"TENANT_TOKEN",
"from",
"a",
"particular",
"string"
] | 75faf00834e1fb7ed017949bfb54531f6329a8dd | https://github.com/bernardopires/django-tenant-schemas/blob/75faf00834e1fb7ed017949bfb54531f6329a8dd/tenant_schemas/utils.py#L53-L61 |
240,689 | bernardopires/django-tenant-schemas | tenant_schemas/utils.py | app_labels | def app_labels(apps_list):
"""
Returns a list of app labels of the given apps_list, now properly handles
new Django 1.7+ application registry.
https://docs.djangoproject.com/en/1.8/ref/applications/#django.apps.AppConfig.label
"""
if AppConfig is None:
return [app.split('.')[-1] for app in apps_list]
return [AppConfig.create(app).label for app in apps_list] | python | def app_labels(apps_list):
if AppConfig is None:
return [app.split('.')[-1] for app in apps_list]
return [AppConfig.create(app).label for app in apps_list] | [
"def",
"app_labels",
"(",
"apps_list",
")",
":",
"if",
"AppConfig",
"is",
"None",
":",
"return",
"[",
"app",
".",
"split",
"(",
"'.'",
")",
"[",
"-",
"1",
"]",
"for",
"app",
"in",
"apps_list",
"]",
"return",
"[",
"AppConfig",
".",
"create",
"(",
"a... | Returns a list of app labels of the given apps_list, now properly handles
new Django 1.7+ application registry.
https://docs.djangoproject.com/en/1.8/ref/applications/#django.apps.AppConfig.label | [
"Returns",
"a",
"list",
"of",
"app",
"labels",
"of",
"the",
"given",
"apps_list",
"now",
"properly",
"handles",
"new",
"Django",
"1",
".",
"7",
"+",
"application",
"registry",
"."
] | 75faf00834e1fb7ed017949bfb54531f6329a8dd | https://github.com/bernardopires/django-tenant-schemas/blob/75faf00834e1fb7ed017949bfb54531f6329a8dd/tenant_schemas/utils.py#L109-L118 |
240,690 | bernardopires/django-tenant-schemas | tenant_schemas/storage.py | TenantStorageMixin.path | def path(self, name):
"""
Look for files in subdirectory of MEDIA_ROOT using the tenant's
domain_url value as the specifier.
"""
if name is None:
name = ''
try:
location = safe_join(self.location, connection.tenant.domain_url)
except AttributeError:
location = self.location
try:
path = safe_join(location, name)
except ValueError:
raise SuspiciousOperation(
"Attempted access to '%s' denied." % name)
return os.path.normpath(path) | python | def path(self, name):
if name is None:
name = ''
try:
location = safe_join(self.location, connection.tenant.domain_url)
except AttributeError:
location = self.location
try:
path = safe_join(location, name)
except ValueError:
raise SuspiciousOperation(
"Attempted access to '%s' denied." % name)
return os.path.normpath(path) | [
"def",
"path",
"(",
"self",
",",
"name",
")",
":",
"if",
"name",
"is",
"None",
":",
"name",
"=",
"''",
"try",
":",
"location",
"=",
"safe_join",
"(",
"self",
".",
"location",
",",
"connection",
".",
"tenant",
".",
"domain_url",
")",
"except",
"Attrib... | Look for files in subdirectory of MEDIA_ROOT using the tenant's
domain_url value as the specifier. | [
"Look",
"for",
"files",
"in",
"subdirectory",
"of",
"MEDIA_ROOT",
"using",
"the",
"tenant",
"s",
"domain_url",
"value",
"as",
"the",
"specifier",
"."
] | 75faf00834e1fb7ed017949bfb54531f6329a8dd | https://github.com/bernardopires/django-tenant-schemas/blob/75faf00834e1fb7ed017949bfb54531f6329a8dd/tenant_schemas/storage.py#L27-L43 |
240,691 | bernardopires/django-tenant-schemas | tenant_schemas/management/commands/tenant_command.py | Command.run_from_argv | def run_from_argv(self, argv):
"""
Changes the option_list to use the options from the wrapped command.
Adds schema parameter to specify which schema will be used when
executing the wrapped command.
"""
# load the command object.
try:
app_name = get_commands()[argv[2]]
except KeyError:
raise CommandError("Unknown command: %r" % argv[2])
if isinstance(app_name, BaseCommand):
# if the command is already loaded, use it directly.
klass = app_name
else:
klass = load_command_class(app_name, argv[2])
# Ugly, but works. Delete tenant_command from the argv, parse the schema manually
# and forward the rest of the arguments to the actual command being wrapped.
del argv[1]
schema_parser = argparse.ArgumentParser()
schema_parser.add_argument("-s", "--schema", dest="schema_name", help="specify tenant schema")
schema_namespace, args = schema_parser.parse_known_args(argv)
tenant = self.get_tenant_from_options_or_interactive(schema_name=schema_namespace.schema_name)
connection.set_tenant(tenant)
klass.run_from_argv(args) | python | def run_from_argv(self, argv):
# load the command object.
try:
app_name = get_commands()[argv[2]]
except KeyError:
raise CommandError("Unknown command: %r" % argv[2])
if isinstance(app_name, BaseCommand):
# if the command is already loaded, use it directly.
klass = app_name
else:
klass = load_command_class(app_name, argv[2])
# Ugly, but works. Delete tenant_command from the argv, parse the schema manually
# and forward the rest of the arguments to the actual command being wrapped.
del argv[1]
schema_parser = argparse.ArgumentParser()
schema_parser.add_argument("-s", "--schema", dest="schema_name", help="specify tenant schema")
schema_namespace, args = schema_parser.parse_known_args(argv)
tenant = self.get_tenant_from_options_or_interactive(schema_name=schema_namespace.schema_name)
connection.set_tenant(tenant)
klass.run_from_argv(args) | [
"def",
"run_from_argv",
"(",
"self",
",",
"argv",
")",
":",
"# load the command object.",
"try",
":",
"app_name",
"=",
"get_commands",
"(",
")",
"[",
"argv",
"[",
"2",
"]",
"]",
"except",
"KeyError",
":",
"raise",
"CommandError",
"(",
"\"Unknown command: %r\""... | Changes the option_list to use the options from the wrapped command.
Adds schema parameter to specify which schema will be used when
executing the wrapped command. | [
"Changes",
"the",
"option_list",
"to",
"use",
"the",
"options",
"from",
"the",
"wrapped",
"command",
".",
"Adds",
"schema",
"parameter",
"to",
"specify",
"which",
"schema",
"will",
"be",
"used",
"when",
"executing",
"the",
"wrapped",
"command",
"."
] | 75faf00834e1fb7ed017949bfb54531f6329a8dd | https://github.com/bernardopires/django-tenant-schemas/blob/75faf00834e1fb7ed017949bfb54531f6329a8dd/tenant_schemas/management/commands/tenant_command.py#L11-L38 |
240,692 | bernardopires/django-tenant-schemas | tenant_schemas/management/commands/__init__.py | BaseTenantCommand.handle | def handle(self, *args, **options):
"""
Iterates a command over all registered schemata.
"""
if options['schema_name']:
# only run on a particular schema
connection.set_schema_to_public()
self.execute_command(get_tenant_model().objects.get(schema_name=options['schema_name']), self.COMMAND_NAME,
*args, **options)
else:
for tenant in get_tenant_model().objects.all():
if not (options['skip_public'] and tenant.schema_name == get_public_schema_name()):
self.execute_command(tenant, self.COMMAND_NAME, *args, **options) | python | def handle(self, *args, **options):
if options['schema_name']:
# only run on a particular schema
connection.set_schema_to_public()
self.execute_command(get_tenant_model().objects.get(schema_name=options['schema_name']), self.COMMAND_NAME,
*args, **options)
else:
for tenant in get_tenant_model().objects.all():
if not (options['skip_public'] and tenant.schema_name == get_public_schema_name()):
self.execute_command(tenant, self.COMMAND_NAME, *args, **options) | [
"def",
"handle",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"options",
")",
":",
"if",
"options",
"[",
"'schema_name'",
"]",
":",
"# only run on a particular schema",
"connection",
".",
"set_schema_to_public",
"(",
")",
"self",
".",
"execute_command",
"(",
... | Iterates a command over all registered schemata. | [
"Iterates",
"a",
"command",
"over",
"all",
"registered",
"schemata",
"."
] | 75faf00834e1fb7ed017949bfb54531f6329a8dd | https://github.com/bernardopires/django-tenant-schemas/blob/75faf00834e1fb7ed017949bfb54531f6329a8dd/tenant_schemas/management/commands/__init__.py#L69-L81 |
240,693 | bernardopires/django-tenant-schemas | tenant_schemas/postgresql_backend/base.py | DatabaseWrapper._cursor | def _cursor(self, name=None):
"""
Here it happens. We hope every Django db operation using PostgreSQL
must go through this to get the cursor handle. We change the path.
"""
if name:
# Only supported and required by Django 1.11 (server-side cursor)
cursor = super(DatabaseWrapper, self)._cursor(name=name)
else:
cursor = super(DatabaseWrapper, self)._cursor()
# optionally limit the number of executions - under load, the execution
# of `set search_path` can be quite time consuming
if (not get_limit_set_calls()) or not self.search_path_set:
# Actual search_path modification for the cursor. Database will
# search schemata from left to right when looking for the object
# (table, index, sequence, etc.).
if not self.schema_name:
raise ImproperlyConfigured("Database schema not set. Did you forget "
"to call set_schema() or set_tenant()?")
_check_schema_name(self.schema_name)
public_schema_name = get_public_schema_name()
search_paths = []
if self.schema_name == public_schema_name:
search_paths = [public_schema_name]
elif self.include_public_schema:
search_paths = [self.schema_name, public_schema_name]
else:
search_paths = [self.schema_name]
search_paths.extend(EXTRA_SEARCH_PATHS)
if name:
# Named cursor can only be used once
cursor_for_search_path = self.connection.cursor()
else:
# Reuse
cursor_for_search_path = cursor
# In the event that an error already happened in this transaction and we are going
# to rollback we should just ignore database error when setting the search_path
# if the next instruction is not a rollback it will just fail also, so
# we do not have to worry that it's not the good one
try:
cursor_for_search_path.execute('SET search_path = {0}'.format(','.join(search_paths)))
except (django.db.utils.DatabaseError, psycopg2.InternalError):
self.search_path_set = False
else:
self.search_path_set = True
if name:
cursor_for_search_path.close()
return cursor | python | def _cursor(self, name=None):
if name:
# Only supported and required by Django 1.11 (server-side cursor)
cursor = super(DatabaseWrapper, self)._cursor(name=name)
else:
cursor = super(DatabaseWrapper, self)._cursor()
# optionally limit the number of executions - under load, the execution
# of `set search_path` can be quite time consuming
if (not get_limit_set_calls()) or not self.search_path_set:
# Actual search_path modification for the cursor. Database will
# search schemata from left to right when looking for the object
# (table, index, sequence, etc.).
if not self.schema_name:
raise ImproperlyConfigured("Database schema not set. Did you forget "
"to call set_schema() or set_tenant()?")
_check_schema_name(self.schema_name)
public_schema_name = get_public_schema_name()
search_paths = []
if self.schema_name == public_schema_name:
search_paths = [public_schema_name]
elif self.include_public_schema:
search_paths = [self.schema_name, public_schema_name]
else:
search_paths = [self.schema_name]
search_paths.extend(EXTRA_SEARCH_PATHS)
if name:
# Named cursor can only be used once
cursor_for_search_path = self.connection.cursor()
else:
# Reuse
cursor_for_search_path = cursor
# In the event that an error already happened in this transaction and we are going
# to rollback we should just ignore database error when setting the search_path
# if the next instruction is not a rollback it will just fail also, so
# we do not have to worry that it's not the good one
try:
cursor_for_search_path.execute('SET search_path = {0}'.format(','.join(search_paths)))
except (django.db.utils.DatabaseError, psycopg2.InternalError):
self.search_path_set = False
else:
self.search_path_set = True
if name:
cursor_for_search_path.close()
return cursor | [
"def",
"_cursor",
"(",
"self",
",",
"name",
"=",
"None",
")",
":",
"if",
"name",
":",
"# Only supported and required by Django 1.11 (server-side cursor)",
"cursor",
"=",
"super",
"(",
"DatabaseWrapper",
",",
"self",
")",
".",
"_cursor",
"(",
"name",
"=",
"name",... | Here it happens. We hope every Django db operation using PostgreSQL
must go through this to get the cursor handle. We change the path. | [
"Here",
"it",
"happens",
".",
"We",
"hope",
"every",
"Django",
"db",
"operation",
"using",
"PostgreSQL",
"must",
"go",
"through",
"this",
"to",
"get",
"the",
"cursor",
"handle",
".",
"We",
"change",
"the",
"path",
"."
] | 75faf00834e1fb7ed017949bfb54531f6329a8dd | https://github.com/bernardopires/django-tenant-schemas/blob/75faf00834e1fb7ed017949bfb54531f6329a8dd/tenant_schemas/postgresql_backend/base.py#L112-L166 |
240,694 | bernardopires/django-tenant-schemas | tenant_schemas/apps.py | best_practice | def best_practice(app_configs, **kwargs):
"""
Test for configuration recommendations. These are best practices, they
avoid hard to find bugs and unexpected behaviour.
"""
if app_configs is None:
app_configs = apps.get_app_configs()
# Take the app_configs and turn them into *old style* application names.
# This is what we expect in the SHARED_APPS and TENANT_APPS settings.
INSTALLED_APPS = [
config.name
for config in app_configs
]
if not hasattr(settings, 'TENANT_APPS'):
return [Critical('TENANT_APPS setting not set')]
if not hasattr(settings, 'TENANT_MODEL'):
return [Critical('TENANT_MODEL setting not set')]
if not hasattr(settings, 'SHARED_APPS'):
return [Critical('SHARED_APPS setting not set')]
if 'tenant_schemas.routers.TenantSyncRouter' not in settings.DATABASE_ROUTERS:
return [
Critical("DATABASE_ROUTERS setting must contain "
"'tenant_schemas.routers.TenantSyncRouter'.")
]
errors = []
django_index = next(i for i, s in enumerate(INSTALLED_APPS) if s.startswith('django.'))
if INSTALLED_APPS.index('tenant_schemas') > django_index:
errors.append(
Warning("You should put 'tenant_schemas' before any django "
"core applications in INSTALLED_APPS.",
obj="django.conf.settings",
hint="This is necessary to overwrite built-in django "
"management commands with their schema-aware "
"implementations.",
id="tenant_schemas.W001"))
if not settings.TENANT_APPS:
errors.append(
Error("TENANT_APPS is empty.",
hint="Maybe you don't need this app?",
id="tenant_schemas.E001"))
if hasattr(settings, 'PG_EXTRA_SEARCH_PATHS'):
if get_public_schema_name() in settings.PG_EXTRA_SEARCH_PATHS:
errors.append(Critical(
"%s can not be included on PG_EXTRA_SEARCH_PATHS."
% get_public_schema_name()))
# make sure no tenant schema is in settings.PG_EXTRA_SEARCH_PATHS
invalid_schemas = set(settings.PG_EXTRA_SEARCH_PATHS).intersection(
get_tenant_model().objects.all().values_list('schema_name', flat=True))
if invalid_schemas:
errors.append(Critical(
"Do not include tenant schemas (%s) on PG_EXTRA_SEARCH_PATHS."
% ", ".join(sorted(invalid_schemas))))
if not settings.SHARED_APPS:
errors.append(
Warning("SHARED_APPS is empty.",
id="tenant_schemas.W002"))
if not set(settings.TENANT_APPS).issubset(INSTALLED_APPS):
delta = set(settings.TENANT_APPS).difference(INSTALLED_APPS)
errors.append(
Error("You have TENANT_APPS that are not in INSTALLED_APPS",
hint=[a for a in settings.TENANT_APPS if a in delta],
id="tenant_schemas.E002"))
if not set(settings.SHARED_APPS).issubset(INSTALLED_APPS):
delta = set(settings.SHARED_APPS).difference(INSTALLED_APPS)
errors.append(
Error("You have SHARED_APPS that are not in INSTALLED_APPS",
hint=[a for a in settings.SHARED_APPS if a in delta],
id="tenant_schemas.E003"))
if not isinstance(default_storage, TenantStorageMixin):
errors.append(Warning(
"Your default storage engine is not tenant aware.",
hint="Set settings.DEFAULT_FILE_STORAGE to "
"'tenant_schemas.storage.TenantFileSystemStorage'",
id="tenant_schemas.W003"
))
return errors | python | def best_practice(app_configs, **kwargs):
if app_configs is None:
app_configs = apps.get_app_configs()
# Take the app_configs and turn them into *old style* application names.
# This is what we expect in the SHARED_APPS and TENANT_APPS settings.
INSTALLED_APPS = [
config.name
for config in app_configs
]
if not hasattr(settings, 'TENANT_APPS'):
return [Critical('TENANT_APPS setting not set')]
if not hasattr(settings, 'TENANT_MODEL'):
return [Critical('TENANT_MODEL setting not set')]
if not hasattr(settings, 'SHARED_APPS'):
return [Critical('SHARED_APPS setting not set')]
if 'tenant_schemas.routers.TenantSyncRouter' not in settings.DATABASE_ROUTERS:
return [
Critical("DATABASE_ROUTERS setting must contain "
"'tenant_schemas.routers.TenantSyncRouter'.")
]
errors = []
django_index = next(i for i, s in enumerate(INSTALLED_APPS) if s.startswith('django.'))
if INSTALLED_APPS.index('tenant_schemas') > django_index:
errors.append(
Warning("You should put 'tenant_schemas' before any django "
"core applications in INSTALLED_APPS.",
obj="django.conf.settings",
hint="This is necessary to overwrite built-in django "
"management commands with their schema-aware "
"implementations.",
id="tenant_schemas.W001"))
if not settings.TENANT_APPS:
errors.append(
Error("TENANT_APPS is empty.",
hint="Maybe you don't need this app?",
id="tenant_schemas.E001"))
if hasattr(settings, 'PG_EXTRA_SEARCH_PATHS'):
if get_public_schema_name() in settings.PG_EXTRA_SEARCH_PATHS:
errors.append(Critical(
"%s can not be included on PG_EXTRA_SEARCH_PATHS."
% get_public_schema_name()))
# make sure no tenant schema is in settings.PG_EXTRA_SEARCH_PATHS
invalid_schemas = set(settings.PG_EXTRA_SEARCH_PATHS).intersection(
get_tenant_model().objects.all().values_list('schema_name', flat=True))
if invalid_schemas:
errors.append(Critical(
"Do not include tenant schemas (%s) on PG_EXTRA_SEARCH_PATHS."
% ", ".join(sorted(invalid_schemas))))
if not settings.SHARED_APPS:
errors.append(
Warning("SHARED_APPS is empty.",
id="tenant_schemas.W002"))
if not set(settings.TENANT_APPS).issubset(INSTALLED_APPS):
delta = set(settings.TENANT_APPS).difference(INSTALLED_APPS)
errors.append(
Error("You have TENANT_APPS that are not in INSTALLED_APPS",
hint=[a for a in settings.TENANT_APPS if a in delta],
id="tenant_schemas.E002"))
if not set(settings.SHARED_APPS).issubset(INSTALLED_APPS):
delta = set(settings.SHARED_APPS).difference(INSTALLED_APPS)
errors.append(
Error("You have SHARED_APPS that are not in INSTALLED_APPS",
hint=[a for a in settings.SHARED_APPS if a in delta],
id="tenant_schemas.E003"))
if not isinstance(default_storage, TenantStorageMixin):
errors.append(Warning(
"Your default storage engine is not tenant aware.",
hint="Set settings.DEFAULT_FILE_STORAGE to "
"'tenant_schemas.storage.TenantFileSystemStorage'",
id="tenant_schemas.W003"
))
return errors | [
"def",
"best_practice",
"(",
"app_configs",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"app_configs",
"is",
"None",
":",
"app_configs",
"=",
"apps",
".",
"get_app_configs",
"(",
")",
"# Take the app_configs and turn them into *old style* application names.",
"# This is wh... | Test for configuration recommendations. These are best practices, they
avoid hard to find bugs and unexpected behaviour. | [
"Test",
"for",
"configuration",
"recommendations",
".",
"These",
"are",
"best",
"practices",
"they",
"avoid",
"hard",
"to",
"find",
"bugs",
"and",
"unexpected",
"behaviour",
"."
] | 75faf00834e1fb7ed017949bfb54531f6329a8dd | https://github.com/bernardopires/django-tenant-schemas/blob/75faf00834e1fb7ed017949bfb54531f6329a8dd/tenant_schemas/apps.py#L14-L104 |
240,695 | google/openhtf | openhtf/plugs/device_wrapping.py | short_repr | def short_repr(obj, max_len=40):
"""Returns a short, term-friendly string representation of the object.
Args:
obj: An object for which to return a string representation.
max_len: Maximum length of the returned string. Longer reprs will be turned
into a brief descriptive string giving the type and length of obj.
"""
obj_repr = repr(obj)
if len(obj_repr) <= max_len:
return obj_repr
return '<{} of length {}>'.format(type(obj).__name__, len(obj_repr)) | python | def short_repr(obj, max_len=40):
obj_repr = repr(obj)
if len(obj_repr) <= max_len:
return obj_repr
return '<{} of length {}>'.format(type(obj).__name__, len(obj_repr)) | [
"def",
"short_repr",
"(",
"obj",
",",
"max_len",
"=",
"40",
")",
":",
"obj_repr",
"=",
"repr",
"(",
"obj",
")",
"if",
"len",
"(",
"obj_repr",
")",
"<=",
"max_len",
":",
"return",
"obj_repr",
"return",
"'<{} of length {}>'",
".",
"format",
"(",
"type",
... | Returns a short, term-friendly string representation of the object.
Args:
obj: An object for which to return a string representation.
max_len: Maximum length of the returned string. Longer reprs will be turned
into a brief descriptive string giving the type and length of obj. | [
"Returns",
"a",
"short",
"term",
"-",
"friendly",
"string",
"representation",
"of",
"the",
"object",
"."
] | 655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09 | https://github.com/google/openhtf/blob/655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09/openhtf/plugs/device_wrapping.py#L29-L40 |
240,696 | google/openhtf | openhtf/core/measurements.py | measures | def measures(*measurements, **kwargs):
"""Decorator-maker used to declare measurements for phases.
See the measurements module docstring for examples of usage.
Args:
measurements: Measurement objects to declare, or a string name from which
to create a Measurement.
kwargs: Keyword arguments to pass to Measurement constructor if we're
constructing one. Note that if kwargs are provided, the length
of measurements must be 1, and that value must be a string containing
the measurement name. For valid kwargs, see the definition of the
Measurement class.
Returns:
A decorator that declares the measurement(s) for the decorated phase.
"""
def _maybe_make(meas):
"""Turn strings into Measurement objects if necessary."""
if isinstance(meas, Measurement):
return meas
elif isinstance(meas, six.string_types):
return Measurement(meas, **kwargs)
raise InvalidMeasurementType('Expected Measurement or string', meas)
# In case we're declaring a measurement inline, we can only declare one.
if kwargs and len(measurements) != 1:
raise InvalidMeasurementType(
'If @measures kwargs are provided, a single measurement name must be '
'provided as a positional arg first.')
# Unlikely, but let's make sure we don't allow overriding initial outcome.
if 'outcome' in kwargs:
raise ValueError('Cannot specify outcome in measurement declaration!')
measurements = [_maybe_make(meas) for meas in measurements]
# 'measurements' is guaranteed to be a list of Measurement objects here.
def decorate(wrapped_phase):
"""Phase decorator to be returned."""
phase = phase_descriptor.PhaseDescriptor.wrap_or_copy(wrapped_phase)
duplicate_names = (set(m.name for m in measurements) &
set(m.name for m in phase.measurements))
if duplicate_names:
raise DuplicateNameError('Measurement names duplicated', duplicate_names)
phase.measurements.extend(measurements)
return phase
return decorate | python | def measures(*measurements, **kwargs):
def _maybe_make(meas):
"""Turn strings into Measurement objects if necessary."""
if isinstance(meas, Measurement):
return meas
elif isinstance(meas, six.string_types):
return Measurement(meas, **kwargs)
raise InvalidMeasurementType('Expected Measurement or string', meas)
# In case we're declaring a measurement inline, we can only declare one.
if kwargs and len(measurements) != 1:
raise InvalidMeasurementType(
'If @measures kwargs are provided, a single measurement name must be '
'provided as a positional arg first.')
# Unlikely, but let's make sure we don't allow overriding initial outcome.
if 'outcome' in kwargs:
raise ValueError('Cannot specify outcome in measurement declaration!')
measurements = [_maybe_make(meas) for meas in measurements]
# 'measurements' is guaranteed to be a list of Measurement objects here.
def decorate(wrapped_phase):
"""Phase decorator to be returned."""
phase = phase_descriptor.PhaseDescriptor.wrap_or_copy(wrapped_phase)
duplicate_names = (set(m.name for m in measurements) &
set(m.name for m in phase.measurements))
if duplicate_names:
raise DuplicateNameError('Measurement names duplicated', duplicate_names)
phase.measurements.extend(measurements)
return phase
return decorate | [
"def",
"measures",
"(",
"*",
"measurements",
",",
"*",
"*",
"kwargs",
")",
":",
"def",
"_maybe_make",
"(",
"meas",
")",
":",
"\"\"\"Turn strings into Measurement objects if necessary.\"\"\"",
"if",
"isinstance",
"(",
"meas",
",",
"Measurement",
")",
":",
"return",... | Decorator-maker used to declare measurements for phases.
See the measurements module docstring for examples of usage.
Args:
measurements: Measurement objects to declare, or a string name from which
to create a Measurement.
kwargs: Keyword arguments to pass to Measurement constructor if we're
constructing one. Note that if kwargs are provided, the length
of measurements must be 1, and that value must be a string containing
the measurement name. For valid kwargs, see the definition of the
Measurement class.
Returns:
A decorator that declares the measurement(s) for the decorated phase. | [
"Decorator",
"-",
"maker",
"used",
"to",
"declare",
"measurements",
"for",
"phases",
"."
] | 655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09 | https://github.com/google/openhtf/blob/655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09/openhtf/core/measurements.py#L614-L662 |
240,697 | google/openhtf | openhtf/core/measurements.py | Measurement.set_notification_callback | def set_notification_callback(self, notification_cb):
"""Set the notifier we'll call when measurements are set."""
self._notification_cb = notification_cb
if not notification_cb and self.dimensions:
self.measured_value.notify_value_set = None
return self | python | def set_notification_callback(self, notification_cb):
self._notification_cb = notification_cb
if not notification_cb and self.dimensions:
self.measured_value.notify_value_set = None
return self | [
"def",
"set_notification_callback",
"(",
"self",
",",
"notification_cb",
")",
":",
"self",
".",
"_notification_cb",
"=",
"notification_cb",
"if",
"not",
"notification_cb",
"and",
"self",
".",
"dimensions",
":",
"self",
".",
"measured_value",
".",
"notify_value_set",... | Set the notifier we'll call when measurements are set. | [
"Set",
"the",
"notifier",
"we",
"ll",
"call",
"when",
"measurements",
"are",
"set",
"."
] | 655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09 | https://github.com/google/openhtf/blob/655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09/openhtf/core/measurements.py#L171-L176 |
240,698 | google/openhtf | openhtf/core/measurements.py | Measurement._maybe_make_unit_desc | def _maybe_make_unit_desc(self, unit_desc):
"""Return the UnitDescriptor or convert a string to one."""
if isinstance(unit_desc, str) or unit_desc is None:
unit_desc = units.Unit(unit_desc)
if not isinstance(unit_desc, units.UnitDescriptor):
raise TypeError('Invalid units for measurement %s: %s' % (self.name,
unit_desc))
return unit_desc | python | def _maybe_make_unit_desc(self, unit_desc):
if isinstance(unit_desc, str) or unit_desc is None:
unit_desc = units.Unit(unit_desc)
if not isinstance(unit_desc, units.UnitDescriptor):
raise TypeError('Invalid units for measurement %s: %s' % (self.name,
unit_desc))
return unit_desc | [
"def",
"_maybe_make_unit_desc",
"(",
"self",
",",
"unit_desc",
")",
":",
"if",
"isinstance",
"(",
"unit_desc",
",",
"str",
")",
"or",
"unit_desc",
"is",
"None",
":",
"unit_desc",
"=",
"units",
".",
"Unit",
"(",
"unit_desc",
")",
"if",
"not",
"isinstance",
... | Return the UnitDescriptor or convert a string to one. | [
"Return",
"the",
"UnitDescriptor",
"or",
"convert",
"a",
"string",
"to",
"one",
"."
] | 655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09 | https://github.com/google/openhtf/blob/655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09/openhtf/core/measurements.py#L191-L198 |
240,699 | google/openhtf | openhtf/core/measurements.py | Measurement._maybe_make_dimension | def _maybe_make_dimension(self, dimension):
"""Return a `measurements.Dimension` instance."""
# For backwards compatibility the argument can be either a Dimension, a
# string or a `units.UnitDescriptor`.
if isinstance(dimension, Dimension):
return dimension
if isinstance(dimension, units.UnitDescriptor):
return Dimension.from_unit_descriptor(dimension)
if isinstance(dimension, str):
return Dimension.from_string(dimension)
raise TypeError('Cannot convert %s to a dimension', dimension) | python | def _maybe_make_dimension(self, dimension):
# For backwards compatibility the argument can be either a Dimension, a
# string or a `units.UnitDescriptor`.
if isinstance(dimension, Dimension):
return dimension
if isinstance(dimension, units.UnitDescriptor):
return Dimension.from_unit_descriptor(dimension)
if isinstance(dimension, str):
return Dimension.from_string(dimension)
raise TypeError('Cannot convert %s to a dimension', dimension) | [
"def",
"_maybe_make_dimension",
"(",
"self",
",",
"dimension",
")",
":",
"# For backwards compatibility the argument can be either a Dimension, a",
"# string or a `units.UnitDescriptor`.",
"if",
"isinstance",
"(",
"dimension",
",",
"Dimension",
")",
":",
"return",
"dimension",
... | Return a `measurements.Dimension` instance. | [
"Return",
"a",
"measurements",
".",
"Dimension",
"instance",
"."
] | 655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09 | https://github.com/google/openhtf/blob/655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09/openhtf/core/measurements.py#L200-L211 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.