repo stringlengths 7 55 | path stringlengths 4 127 | func_name stringlengths 1 88 | original_string stringlengths 75 19.8k | language stringclasses 1
value | code stringlengths 75 19.8k | code_tokens listlengths 20 707 | docstring stringlengths 3 17.3k | docstring_tokens listlengths 3 222 | sha stringlengths 40 40 | url stringlengths 87 242 | partition stringclasses 1
value | idx int64 0 252k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
ZELLMECHANIK-DRESDEN/dclab | dclab/kde_contours.py | _find_quantile_level | def _find_quantile_level(density, x, y, xp, yp, quantile, acc=.01,
ret_err=False):
"""Find density level for a given data quantile by iteration
Parameters
----------
density: 2d ndarray of shape (M, N)
Kernel density estimate for which to compute the contours
x: 2d ndarray of shape (M, N) or 1d ndarray of size M
X-values corresponding to `kde`
y: 2d ndarray of shape (M, N) or 1d ndarray of size M
Y-values corresponding to `kde`
xp: 1d ndarray of size D
Event x-data from which to compute the quantile
yp: 1d ndarray of size D
Event y-data from which to compute the quantile
quantile: float between 0 and 1
Quantile along which to find contours in `kde` relative
to its maximum
acc: float
Desired absolute accuracy (stopping criterion) of the
contours
ret_err: bool
If True, also return the absolute error
Returns
-------
level: float
Contours level corresponding to the given quantile
Notes
-----
A much more faster method (using interpolation) is implemented in
:func:`get_quantile_levels`.
NaN-values events in `xp` and `yp` are ignored.
See Also
--------
skimage.measure.find_contours: Contour finding algorithm
"""
if quantile >= 1 or quantile <= 0:
raise ValueError("Invalid value for `quantile`: {}".format(quantile))
# remove bad events
bad = get_bad_vals(xp, yp)
xp = xp[~bad]
yp = yp[~bad]
# initial guess
level = quantile
# error of current iteration
err = 1
# iteration factor (guarantees convergence)
itfac = 1
# total number of events
nev = xp.size
while np.abs(err) > acc:
# compute contours
conts = find_contours_level(density, x, y, level, closed=True)
# compute number of points in contour
isin = 0
for ii in range(nev):
for cc in conts:
isin += PolygonFilter.point_in_poly((xp[ii], yp[ii]),
poly=cc)
break # no need to check other contours
err = quantile - (nev - isin) / nev
level += err * itfac
itfac *= .9
if ret_err:
return level, err
else:
return level | python | def _find_quantile_level(density, x, y, xp, yp, quantile, acc=.01,
ret_err=False):
"""Find density level for a given data quantile by iteration
Parameters
----------
density: 2d ndarray of shape (M, N)
Kernel density estimate for which to compute the contours
x: 2d ndarray of shape (M, N) or 1d ndarray of size M
X-values corresponding to `kde`
y: 2d ndarray of shape (M, N) or 1d ndarray of size M
Y-values corresponding to `kde`
xp: 1d ndarray of size D
Event x-data from which to compute the quantile
yp: 1d ndarray of size D
Event y-data from which to compute the quantile
quantile: float between 0 and 1
Quantile along which to find contours in `kde` relative
to its maximum
acc: float
Desired absolute accuracy (stopping criterion) of the
contours
ret_err: bool
If True, also return the absolute error
Returns
-------
level: float
Contours level corresponding to the given quantile
Notes
-----
A much more faster method (using interpolation) is implemented in
:func:`get_quantile_levels`.
NaN-values events in `xp` and `yp` are ignored.
See Also
--------
skimage.measure.find_contours: Contour finding algorithm
"""
if quantile >= 1 or quantile <= 0:
raise ValueError("Invalid value for `quantile`: {}".format(quantile))
# remove bad events
bad = get_bad_vals(xp, yp)
xp = xp[~bad]
yp = yp[~bad]
# initial guess
level = quantile
# error of current iteration
err = 1
# iteration factor (guarantees convergence)
itfac = 1
# total number of events
nev = xp.size
while np.abs(err) > acc:
# compute contours
conts = find_contours_level(density, x, y, level, closed=True)
# compute number of points in contour
isin = 0
for ii in range(nev):
for cc in conts:
isin += PolygonFilter.point_in_poly((xp[ii], yp[ii]),
poly=cc)
break # no need to check other contours
err = quantile - (nev - isin) / nev
level += err * itfac
itfac *= .9
if ret_err:
return level, err
else:
return level | [
"def",
"_find_quantile_level",
"(",
"density",
",",
"x",
",",
"y",
",",
"xp",
",",
"yp",
",",
"quantile",
",",
"acc",
"=",
".01",
",",
"ret_err",
"=",
"False",
")",
":",
"if",
"quantile",
">=",
"1",
"or",
"quantile",
"<=",
"0",
":",
"raise",
"Value... | Find density level for a given data quantile by iteration
Parameters
----------
density: 2d ndarray of shape (M, N)
Kernel density estimate for which to compute the contours
x: 2d ndarray of shape (M, N) or 1d ndarray of size M
X-values corresponding to `kde`
y: 2d ndarray of shape (M, N) or 1d ndarray of size M
Y-values corresponding to `kde`
xp: 1d ndarray of size D
Event x-data from which to compute the quantile
yp: 1d ndarray of size D
Event y-data from which to compute the quantile
quantile: float between 0 and 1
Quantile along which to find contours in `kde` relative
to its maximum
acc: float
Desired absolute accuracy (stopping criterion) of the
contours
ret_err: bool
If True, also return the absolute error
Returns
-------
level: float
Contours level corresponding to the given quantile
Notes
-----
A much more faster method (using interpolation) is implemented in
:func:`get_quantile_levels`.
NaN-values events in `xp` and `yp` are ignored.
See Also
--------
skimage.measure.find_contours: Contour finding algorithm | [
"Find",
"density",
"level",
"for",
"a",
"given",
"data",
"quantile",
"by",
"iteration"
] | 79002c4356e7020c2ba73ab0a3819c9abd4affec | https://github.com/ZELLMECHANIK-DRESDEN/dclab/blob/79002c4356e7020c2ba73ab0a3819c9abd4affec/dclab/kde_contours.py#L146-L220 | train | 48,700 |
openstax/cnx-archive | cnxarchive/search.py | search | def search(query, query_type=DEFAULT_QUERY_TYPE):
"""Search database using parsed query.
Executes a database search query from the given ``query``
(a ``Query`` object) and optionally accepts a list of search weights.
By default, the search results are ordered by weight.
:param query: containing terms, filters, and sorts.
:type query: Query
:returns: a sequence of records that match the query conditions
:rtype: QueryResults (which is a sequence of QueryRecord objects)
"""
# Build the SQL statement.
statement, arguments = _build_search(query)
# Execute the SQL.
if statement is None and arguments is None:
return QueryResults([], [], 'AND')
with db_connect() as db_connection:
with db_connection.cursor() as cursor:
cursor.execute(statement, arguments)
search_results = cursor.fetchall()
# Wrap the SQL results.
return QueryResults(search_results, query, query_type) | python | def search(query, query_type=DEFAULT_QUERY_TYPE):
"""Search database using parsed query.
Executes a database search query from the given ``query``
(a ``Query`` object) and optionally accepts a list of search weights.
By default, the search results are ordered by weight.
:param query: containing terms, filters, and sorts.
:type query: Query
:returns: a sequence of records that match the query conditions
:rtype: QueryResults (which is a sequence of QueryRecord objects)
"""
# Build the SQL statement.
statement, arguments = _build_search(query)
# Execute the SQL.
if statement is None and arguments is None:
return QueryResults([], [], 'AND')
with db_connect() as db_connection:
with db_connection.cursor() as cursor:
cursor.execute(statement, arguments)
search_results = cursor.fetchall()
# Wrap the SQL results.
return QueryResults(search_results, query, query_type) | [
"def",
"search",
"(",
"query",
",",
"query_type",
"=",
"DEFAULT_QUERY_TYPE",
")",
":",
"# Build the SQL statement.",
"statement",
",",
"arguments",
"=",
"_build_search",
"(",
"query",
")",
"# Execute the SQL.",
"if",
"statement",
"is",
"None",
"and",
"arguments",
... | Search database using parsed query.
Executes a database search query from the given ``query``
(a ``Query`` object) and optionally accepts a list of search weights.
By default, the search results are ordered by weight.
:param query: containing terms, filters, and sorts.
:type query: Query
:returns: a sequence of records that match the query conditions
:rtype: QueryResults (which is a sequence of QueryRecord objects) | [
"Search",
"database",
"using",
"parsed",
"query",
"."
] | d31d34aa8bbc8a9fde6cd4227a0df92726e8daf4 | https://github.com/openstax/cnx-archive/blob/d31d34aa8bbc8a9fde6cd4227a0df92726e8daf4/cnxarchive/search.py#L570-L594 | train | 48,701 |
openstax/cnx-archive | cnxarchive/search.py | Query.fix_quotes | def fix_quotes(cls, query_string):
"""Heuristic attempt to fix unbalanced quotes in query_string."""
if query_string.count('"') % 2 == 0:
# no unbalanced quotes to fix
return query_string
fields = [] # contains what's matched by the regexp
# e.g. fields = ['sort:pubDate', 'author:"first last"']
def f(match):
fields.append(match.string[match.start():match.end()])
return ''
# terms will be all the search terms that don't have a field
terms = re.sub(r'[^\s:]*:("[^"]*"|[^\s]*)', f, query_string)
query_string = '{}" {}'.format(terms.strip(), ' '.join(fields))
return query_string | python | def fix_quotes(cls, query_string):
"""Heuristic attempt to fix unbalanced quotes in query_string."""
if query_string.count('"') % 2 == 0:
# no unbalanced quotes to fix
return query_string
fields = [] # contains what's matched by the regexp
# e.g. fields = ['sort:pubDate', 'author:"first last"']
def f(match):
fields.append(match.string[match.start():match.end()])
return ''
# terms will be all the search terms that don't have a field
terms = re.sub(r'[^\s:]*:("[^"]*"|[^\s]*)', f, query_string)
query_string = '{}" {}'.format(terms.strip(), ' '.join(fields))
return query_string | [
"def",
"fix_quotes",
"(",
"cls",
",",
"query_string",
")",
":",
"if",
"query_string",
".",
"count",
"(",
"'\"'",
")",
"%",
"2",
"==",
"0",
":",
"# no unbalanced quotes to fix",
"return",
"query_string",
"fields",
"=",
"[",
"]",
"# contains what's matched by the ... | Heuristic attempt to fix unbalanced quotes in query_string. | [
"Heuristic",
"attempt",
"to",
"fix",
"unbalanced",
"quotes",
"in",
"query_string",
"."
] | d31d34aa8bbc8a9fde6cd4227a0df92726e8daf4 | https://github.com/openstax/cnx-archive/blob/d31d34aa8bbc8a9fde6cd4227a0df92726e8daf4/cnxarchive/search.py#L106-L122 | train | 48,702 |
openstax/cnx-archive | cnxarchive/search.py | Query.from_raw_query | def from_raw_query(cls, query_string):
"""Parse raw string to query.
Given a raw string (typically typed by the user),
parse to a structured format and initialize the class.
"""
try:
node_tree = grammar.parse(query_string)
except IncompleteParseError:
query_string = cls.fix_quotes(query_string)
node_tree = grammar.parse(query_string)
structured_query = DictFormater().visit(node_tree)
return cls([t for t in structured_query
if t[1].lower() not in STOPWORDS]) | python | def from_raw_query(cls, query_string):
"""Parse raw string to query.
Given a raw string (typically typed by the user),
parse to a structured format and initialize the class.
"""
try:
node_tree = grammar.parse(query_string)
except IncompleteParseError:
query_string = cls.fix_quotes(query_string)
node_tree = grammar.parse(query_string)
structured_query = DictFormater().visit(node_tree)
return cls([t for t in structured_query
if t[1].lower() not in STOPWORDS]) | [
"def",
"from_raw_query",
"(",
"cls",
",",
"query_string",
")",
":",
"try",
":",
"node_tree",
"=",
"grammar",
".",
"parse",
"(",
"query_string",
")",
"except",
"IncompleteParseError",
":",
"query_string",
"=",
"cls",
".",
"fix_quotes",
"(",
"query_string",
")",... | Parse raw string to query.
Given a raw string (typically typed by the user),
parse to a structured format and initialize the class. | [
"Parse",
"raw",
"string",
"to",
"query",
"."
] | d31d34aa8bbc8a9fde6cd4227a0df92726e8daf4 | https://github.com/openstax/cnx-archive/blob/d31d34aa8bbc8a9fde6cd4227a0df92726e8daf4/cnxarchive/search.py#L125-L140 | train | 48,703 |
openstax/cnx-archive | cnxarchive/search.py | QueryRecord.highlighted_abstract | def highlighted_abstract(self):
"""Highlight the found terms in the abstract text."""
abstract_terms = self.fields.get('abstract', [])
if abstract_terms:
sql = _read_sql_file('highlighted-abstract')
else:
sql = _read_sql_file('get-abstract')
arguments = {'id': self['id'],
'query': ' & '.join(abstract_terms),
}
with db_connect() as db_connection:
with db_connection.cursor() as cursor:
cursor.execute(sql, arguments)
hl_abstract = cursor.fetchone()
if hl_abstract:
return hl_abstract[0] | python | def highlighted_abstract(self):
"""Highlight the found terms in the abstract text."""
abstract_terms = self.fields.get('abstract', [])
if abstract_terms:
sql = _read_sql_file('highlighted-abstract')
else:
sql = _read_sql_file('get-abstract')
arguments = {'id': self['id'],
'query': ' & '.join(abstract_terms),
}
with db_connect() as db_connection:
with db_connection.cursor() as cursor:
cursor.execute(sql, arguments)
hl_abstract = cursor.fetchone()
if hl_abstract:
return hl_abstract[0] | [
"def",
"highlighted_abstract",
"(",
"self",
")",
":",
"abstract_terms",
"=",
"self",
".",
"fields",
".",
"get",
"(",
"'abstract'",
",",
"[",
"]",
")",
"if",
"abstract_terms",
":",
"sql",
"=",
"_read_sql_file",
"(",
"'highlighted-abstract'",
")",
"else",
":",... | Highlight the found terms in the abstract text. | [
"Highlight",
"the",
"found",
"terms",
"in",
"the",
"abstract",
"text",
"."
] | d31d34aa8bbc8a9fde6cd4227a0df92726e8daf4 | https://github.com/openstax/cnx-archive/blob/d31d34aa8bbc8a9fde6cd4227a0df92726e8daf4/cnxarchive/search.py#L177-L192 | train | 48,704 |
openstax/cnx-archive | cnxarchive/search.py | QueryRecord.highlighted_fulltext | def highlighted_fulltext(self):
"""Highlight the found terms in the fulltext."""
terms = self.fields.get('fulltext', [])
if not terms:
return None
arguments = {'id': self['id'],
'query': ' & '.join(terms),
}
with db_connect() as db_connection:
with db_connection.cursor() as cursor:
cursor.execute(_read_sql_file('highlighted-fulltext'),
arguments)
hl_fulltext = cursor.fetchone()[0]
return hl_fulltext | python | def highlighted_fulltext(self):
"""Highlight the found terms in the fulltext."""
terms = self.fields.get('fulltext', [])
if not terms:
return None
arguments = {'id': self['id'],
'query': ' & '.join(terms),
}
with db_connect() as db_connection:
with db_connection.cursor() as cursor:
cursor.execute(_read_sql_file('highlighted-fulltext'),
arguments)
hl_fulltext = cursor.fetchone()[0]
return hl_fulltext | [
"def",
"highlighted_fulltext",
"(",
"self",
")",
":",
"terms",
"=",
"self",
".",
"fields",
".",
"get",
"(",
"'fulltext'",
",",
"[",
"]",
")",
"if",
"not",
"terms",
":",
"return",
"None",
"arguments",
"=",
"{",
"'id'",
":",
"self",
"[",
"'id'",
"]",
... | Highlight the found terms in the fulltext. | [
"Highlight",
"the",
"found",
"terms",
"in",
"the",
"fulltext",
"."
] | d31d34aa8bbc8a9fde6cd4227a0df92726e8daf4 | https://github.com/openstax/cnx-archive/blob/d31d34aa8bbc8a9fde6cd4227a0df92726e8daf4/cnxarchive/search.py#L195-L208 | train | 48,705 |
ZELLMECHANIK-DRESDEN/dclab | dclab/external/statsmodels/nonparametric/kernel_density.py | KDEMultivariate.pdf | def pdf(self, data_predict=None):
r"""
Evaluate the probability density function.
Parameters
----------
data_predict: array_like, optional
Points to evaluate at. If unspecified, the training data is used.
Returns
-------
pdf_est: array_like
Probability density function evaluated at `data_predict`.
Notes
-----
The probability density is given by the generalized product kernel
estimator:
.. math:: K_{h}(X_{i},X_{j}) =
\prod_{s=1}^{q}h_{s}^{-1}k\left(\frac{X_{is}-X_{js}}{h_{s}}\right)
"""
if data_predict is None:
data_predict = self.data
else:
data_predict = _adjust_shape(data_predict, self.k_vars)
pdf_est = []
for i in range(np.shape(data_predict)[0]):
pdf_est.append(gpke(self.bw, data=self.data,
data_predict=data_predict[i, :],
var_type=self.var_type) / self.nobs)
pdf_est = np.squeeze(pdf_est)
return pdf_est | python | def pdf(self, data_predict=None):
r"""
Evaluate the probability density function.
Parameters
----------
data_predict: array_like, optional
Points to evaluate at. If unspecified, the training data is used.
Returns
-------
pdf_est: array_like
Probability density function evaluated at `data_predict`.
Notes
-----
The probability density is given by the generalized product kernel
estimator:
.. math:: K_{h}(X_{i},X_{j}) =
\prod_{s=1}^{q}h_{s}^{-1}k\left(\frac{X_{is}-X_{js}}{h_{s}}\right)
"""
if data_predict is None:
data_predict = self.data
else:
data_predict = _adjust_shape(data_predict, self.k_vars)
pdf_est = []
for i in range(np.shape(data_predict)[0]):
pdf_est.append(gpke(self.bw, data=self.data,
data_predict=data_predict[i, :],
var_type=self.var_type) / self.nobs)
pdf_est = np.squeeze(pdf_est)
return pdf_est | [
"def",
"pdf",
"(",
"self",
",",
"data_predict",
"=",
"None",
")",
":",
"if",
"data_predict",
"is",
"None",
":",
"data_predict",
"=",
"self",
".",
"data",
"else",
":",
"data_predict",
"=",
"_adjust_shape",
"(",
"data_predict",
",",
"self",
".",
"k_vars",
... | r"""
Evaluate the probability density function.
Parameters
----------
data_predict: array_like, optional
Points to evaluate at. If unspecified, the training data is used.
Returns
-------
pdf_est: array_like
Probability density function evaluated at `data_predict`.
Notes
-----
The probability density is given by the generalized product kernel
estimator:
.. math:: K_{h}(X_{i},X_{j}) =
\prod_{s=1}^{q}h_{s}^{-1}k\left(\frac{X_{is}-X_{js}}{h_{s}}\right) | [
"r",
"Evaluate",
"the",
"probability",
"density",
"function",
"."
] | 79002c4356e7020c2ba73ab0a3819c9abd4affec | https://github.com/ZELLMECHANIK-DRESDEN/dclab/blob/79002c4356e7020c2ba73ab0a3819c9abd4affec/dclab/external/statsmodels/nonparametric/kernel_density.py#L126-L160 | train | 48,706 |
xenon-middleware/pyxenon | xenon/compat.py | find_xenon_grpc_jar | def find_xenon_grpc_jar():
"""Find the Xenon-GRPC jar-file, windows version."""
prefix = Path(sys.prefix)
locations = [
prefix / 'lib',
prefix / 'local' / 'lib'
]
for location in locations:
jar_file = location / 'xenon-grpc-{}-all.jar'.format(
xenon_grpc_version)
if not jar_file.exists():
continue
else:
return str(jar_file)
return None | python | def find_xenon_grpc_jar():
"""Find the Xenon-GRPC jar-file, windows version."""
prefix = Path(sys.prefix)
locations = [
prefix / 'lib',
prefix / 'local' / 'lib'
]
for location in locations:
jar_file = location / 'xenon-grpc-{}-all.jar'.format(
xenon_grpc_version)
if not jar_file.exists():
continue
else:
return str(jar_file)
return None | [
"def",
"find_xenon_grpc_jar",
"(",
")",
":",
"prefix",
"=",
"Path",
"(",
"sys",
".",
"prefix",
")",
"locations",
"=",
"[",
"prefix",
"/",
"'lib'",
",",
"prefix",
"/",
"'local'",
"/",
"'lib'",
"]",
"for",
"location",
"in",
"locations",
":",
"jar_file",
... | Find the Xenon-GRPC jar-file, windows version. | [
"Find",
"the",
"Xenon",
"-",
"GRPC",
"jar",
"-",
"file",
"windows",
"version",
"."
] | d61109ad339ee9bb9f0723471d532727b0f235ad | https://github.com/xenon-middleware/pyxenon/blob/d61109ad339ee9bb9f0723471d532727b0f235ad/xenon/compat.py#L16-L34 | train | 48,707 |
simse/pymitv | pymitv/control.py | Control.send_keystrokes | def send_keystrokes(ip, keystrokes, wait=False):
"""Connects to TV and sends keystroke via HTTP."""
tv_url = 'http://{}:6095/controller?action=keyevent&keycode='.format(ip)
for keystroke in keystrokes:
if keystroke == 'wait' or wait is True:
time.sleep(0.7)
else:
request = requests.get(tv_url + keystroke)
if request.status_code != 200:
return False
return True | python | def send_keystrokes(ip, keystrokes, wait=False):
"""Connects to TV and sends keystroke via HTTP."""
tv_url = 'http://{}:6095/controller?action=keyevent&keycode='.format(ip)
for keystroke in keystrokes:
if keystroke == 'wait' or wait is True:
time.sleep(0.7)
else:
request = requests.get(tv_url + keystroke)
if request.status_code != 200:
return False
return True | [
"def",
"send_keystrokes",
"(",
"ip",
",",
"keystrokes",
",",
"wait",
"=",
"False",
")",
":",
"tv_url",
"=",
"'http://{}:6095/controller?action=keyevent&keycode='",
".",
"format",
"(",
"ip",
")",
"for",
"keystroke",
"in",
"keystrokes",
":",
"if",
"keystroke",
"==... | Connects to TV and sends keystroke via HTTP. | [
"Connects",
"to",
"TV",
"and",
"sends",
"keystroke",
"via",
"HTTP",
"."
] | 03213f591d70fbf90ba2b6af372e474c9bfb99f6 | https://github.com/simse/pymitv/blob/03213f591d70fbf90ba2b6af372e474c9bfb99f6/pymitv/control.py#L29-L43 | train | 48,708 |
simse/pymitv | pymitv/control.py | Control.mute | def mute(ip):
"""Polyfill for muting the TV."""
tv_url = 'http://{}:6095/controller?action=keyevent&keycode='.format(ip)
count = 0
while count > 30:
count = count + 1
request = requests.get(tv_url + 'volumedown')
if request.status_code != 200:
return False
return True | python | def mute(ip):
"""Polyfill for muting the TV."""
tv_url = 'http://{}:6095/controller?action=keyevent&keycode='.format(ip)
count = 0
while count > 30:
count = count + 1
request = requests.get(tv_url + 'volumedown')
if request.status_code != 200:
return False
return True | [
"def",
"mute",
"(",
"ip",
")",
":",
"tv_url",
"=",
"'http://{}:6095/controller?action=keyevent&keycode='",
".",
"format",
"(",
"ip",
")",
"count",
"=",
"0",
"while",
"count",
">",
"30",
":",
"count",
"=",
"count",
"+",
"1",
"request",
"=",
"requests",
".",... | Polyfill for muting the TV. | [
"Polyfill",
"for",
"muting",
"the",
"TV",
"."
] | 03213f591d70fbf90ba2b6af372e474c9bfb99f6 | https://github.com/simse/pymitv/blob/03213f591d70fbf90ba2b6af372e474c9bfb99f6/pymitv/control.py#L46-L58 | train | 48,709 |
openstax/cnx-archive | cnxarchive/views/in_book_search.py | in_book_search | def in_book_search(request):
"""Full text, in-book search."""
results = {}
args = request.matchdict
ident_hash = args['ident_hash']
args['search_term'] = request.params.get('q', '')
query_type = request.params.get('query_type', '')
combiner = ''
if query_type:
if query_type.lower() == 'or':
combiner = '_or'
id, version = split_ident_hash(ident_hash)
args['uuid'] = id
args['version'] = version
with db_connect() as db_connection:
with db_connection.cursor() as cursor:
cursor.execute(SQL['get-collated-state'], args)
res = cursor.fetchall()
if res and res[0][0]:
statement = SQL['get-in-collated-book-search']
else:
statement = SQL['get-in-book-search']
cursor.execute(statement.format(combiner=combiner), args)
res = cursor.fetchall()
results['results'] = {'query': [],
'total': len(res),
'items': []}
results['results']['query'] = {
'id': ident_hash,
'search_term': args['search_term'],
}
for uuid, version, title, snippet, matches, rank in res:
results['results']['items'].append({
'rank': '{}'.format(rank),
'id': '{}@{}'.format(uuid, version),
'title': '{}'.format(title),
'snippet': '{}'.format(snippet),
'matches': '{}'.format(matches),
})
resp = request.response
resp.status = '200 OK'
resp.content_type = 'application/json'
resp.body = json.dumps(results)
return resp | python | def in_book_search(request):
"""Full text, in-book search."""
results = {}
args = request.matchdict
ident_hash = args['ident_hash']
args['search_term'] = request.params.get('q', '')
query_type = request.params.get('query_type', '')
combiner = ''
if query_type:
if query_type.lower() == 'or':
combiner = '_or'
id, version = split_ident_hash(ident_hash)
args['uuid'] = id
args['version'] = version
with db_connect() as db_connection:
with db_connection.cursor() as cursor:
cursor.execute(SQL['get-collated-state'], args)
res = cursor.fetchall()
if res and res[0][0]:
statement = SQL['get-in-collated-book-search']
else:
statement = SQL['get-in-book-search']
cursor.execute(statement.format(combiner=combiner), args)
res = cursor.fetchall()
results['results'] = {'query': [],
'total': len(res),
'items': []}
results['results']['query'] = {
'id': ident_hash,
'search_term': args['search_term'],
}
for uuid, version, title, snippet, matches, rank in res:
results['results']['items'].append({
'rank': '{}'.format(rank),
'id': '{}@{}'.format(uuid, version),
'title': '{}'.format(title),
'snippet': '{}'.format(snippet),
'matches': '{}'.format(matches),
})
resp = request.response
resp.status = '200 OK'
resp.content_type = 'application/json'
resp.body = json.dumps(results)
return resp | [
"def",
"in_book_search",
"(",
"request",
")",
":",
"results",
"=",
"{",
"}",
"args",
"=",
"request",
".",
"matchdict",
"ident_hash",
"=",
"args",
"[",
"'ident_hash'",
"]",
"args",
"[",
"'search_term'",
"]",
"=",
"request",
".",
"params",
".",
"get",
"(",... | Full text, in-book search. | [
"Full",
"text",
"in",
"-",
"book",
"search",
"."
] | d31d34aa8bbc8a9fde6cd4227a0df92726e8daf4 | https://github.com/openstax/cnx-archive/blob/d31d34aa8bbc8a9fde6cd4227a0df92726e8daf4/cnxarchive/views/in_book_search.py#L34-L84 | train | 48,710 |
openstax/cnx-archive | cnxarchive/views/in_book_search.py | in_book_search_highlighted_results | def in_book_search_highlighted_results(request):
"""In-book search - returns a highlighted version of the HTML."""
results = {}
args = request.matchdict
ident_hash = args['ident_hash']
page_ident_hash = args['page_ident_hash']
try:
page_uuid, _ = split_ident_hash(page_ident_hash)
except IdentHashShortId as e:
page_uuid = get_uuid(e.id)
except IdentHashMissingVersion as e:
page_uuid = e.id
args['page_uuid'] = page_uuid
args['search_term'] = request.params.get('q', '')
query_type = request.params.get('query_type', '')
combiner = ''
if query_type:
if query_type.lower() == 'or':
combiner = '_or'
# Get version from URL params
id, version = split_ident_hash(ident_hash)
args['uuid'] = id
args['version'] = version
with db_connect() as db_connection:
with db_connection.cursor() as cursor:
cursor.execute(SQL['get-collated-state'], args)
res = cursor.fetchall()
if res and res[0][0]:
statement = SQL['get-in-collated-book-search-full-page']
else:
statement = SQL['get-in-book-search-full-page']
cursor.execute(statement.format(combiner=combiner), args)
res = cursor.fetchall()
results['results'] = {'query': [],
'total': len(res),
'items': []}
results['results']['query'] = {
'search_term': args['search_term'],
'collection_id': ident_hash,
}
for uuid, version, title, headline, rank in res:
results['results']['items'].append({
'rank': '{}'.format(rank),
'id': '{}'.format(page_ident_hash),
'title': '{}'.format(title),
'html': '{}'.format(headline),
})
resp = request.response
resp.status = '200 OK'
resp.content_type = 'application/json'
resp.body = json.dumps(results)
return resp | python | def in_book_search_highlighted_results(request):
"""In-book search - returns a highlighted version of the HTML."""
results = {}
args = request.matchdict
ident_hash = args['ident_hash']
page_ident_hash = args['page_ident_hash']
try:
page_uuid, _ = split_ident_hash(page_ident_hash)
except IdentHashShortId as e:
page_uuid = get_uuid(e.id)
except IdentHashMissingVersion as e:
page_uuid = e.id
args['page_uuid'] = page_uuid
args['search_term'] = request.params.get('q', '')
query_type = request.params.get('query_type', '')
combiner = ''
if query_type:
if query_type.lower() == 'or':
combiner = '_or'
# Get version from URL params
id, version = split_ident_hash(ident_hash)
args['uuid'] = id
args['version'] = version
with db_connect() as db_connection:
with db_connection.cursor() as cursor:
cursor.execute(SQL['get-collated-state'], args)
res = cursor.fetchall()
if res and res[0][0]:
statement = SQL['get-in-collated-book-search-full-page']
else:
statement = SQL['get-in-book-search-full-page']
cursor.execute(statement.format(combiner=combiner), args)
res = cursor.fetchall()
results['results'] = {'query': [],
'total': len(res),
'items': []}
results['results']['query'] = {
'search_term': args['search_term'],
'collection_id': ident_hash,
}
for uuid, version, title, headline, rank in res:
results['results']['items'].append({
'rank': '{}'.format(rank),
'id': '{}'.format(page_ident_hash),
'title': '{}'.format(title),
'html': '{}'.format(headline),
})
resp = request.response
resp.status = '200 OK'
resp.content_type = 'application/json'
resp.body = json.dumps(results)
return resp | [
"def",
"in_book_search_highlighted_results",
"(",
"request",
")",
":",
"results",
"=",
"{",
"}",
"args",
"=",
"request",
".",
"matchdict",
"ident_hash",
"=",
"args",
"[",
"'ident_hash'",
"]",
"page_ident_hash",
"=",
"args",
"[",
"'page_ident_hash'",
"]",
"try",
... | In-book search - returns a highlighted version of the HTML. | [
"In",
"-",
"book",
"search",
"-",
"returns",
"a",
"highlighted",
"version",
"of",
"the",
"HTML",
"."
] | d31d34aa8bbc8a9fde6cd4227a0df92726e8daf4 | https://github.com/openstax/cnx-archive/blob/d31d34aa8bbc8a9fde6cd4227a0df92726e8daf4/cnxarchive/views/in_book_search.py#L89-L149 | train | 48,711 |
ZELLMECHANIK-DRESDEN/dclab | dclab/external/statsmodels/nonparametric/_kernel_base.py | gpke | def gpke(bw, data, data_predict, var_type, ckertype='gaussian',
okertype='wangryzin', ukertype='aitchisonaitken', tosum=True):
r"""
Returns the non-normalized Generalized Product Kernel Estimator
Parameters
----------
bw: 1-D ndarray
The user-specified bandwidth parameters.
data: 1D or 2-D ndarray
The training data.
data_predict: 1-D ndarray
The evaluation points at which the kernel estimation is performed.
var_type: str, optional
The variable type (continuous, ordered, unordered).
ckertype: str, optional
The kernel used for the continuous variables.
okertype: str, optional
The kernel used for the ordered discrete variables.
ukertype: str, optional
The kernel used for the unordered discrete variables.
tosum : bool, optional
Whether or not to sum the calculated array of densities. Default is
True.
Returns
-------
dens: array-like
The generalized product kernel density estimator.
Notes
-----
The formula for the multivariate kernel estimator for the pdf is:
.. math:: f(x)=\frac{1}{nh_{1}...h_{q}}\sum_{i=1}^
{n}K\left(\frac{X_{i}-x}{h}\right)
where
.. math:: K\left(\frac{X_{i}-x}{h}\right) =
k\left( \frac{X_{i1}-x_{1}}{h_{1}}\right)\times
k\left( \frac{X_{i2}-x_{2}}{h_{2}}\right)\times...\times
k\left(\frac{X_{iq}-x_{q}}{h_{q}}\right)
"""
kertypes = dict(c=ckertype, o=okertype, u=ukertype)
Kval = np.empty(data.shape)
for ii, vtype in enumerate(var_type):
func = kernel_func[kertypes[vtype]]
Kval[:, ii] = func(bw[ii], data[:, ii], data_predict[ii])
iscontinuous = np.array([c == 'c' for c in var_type])
dens = Kval.prod(axis=1) / np.prod(bw[iscontinuous])
if tosum:
return dens.sum(axis=0)
else:
return dens | python | def gpke(bw, data, data_predict, var_type, ckertype='gaussian',
okertype='wangryzin', ukertype='aitchisonaitken', tosum=True):
r"""
Returns the non-normalized Generalized Product Kernel Estimator
Parameters
----------
bw: 1-D ndarray
The user-specified bandwidth parameters.
data: 1D or 2-D ndarray
The training data.
data_predict: 1-D ndarray
The evaluation points at which the kernel estimation is performed.
var_type: str, optional
The variable type (continuous, ordered, unordered).
ckertype: str, optional
The kernel used for the continuous variables.
okertype: str, optional
The kernel used for the ordered discrete variables.
ukertype: str, optional
The kernel used for the unordered discrete variables.
tosum : bool, optional
Whether or not to sum the calculated array of densities. Default is
True.
Returns
-------
dens: array-like
The generalized product kernel density estimator.
Notes
-----
The formula for the multivariate kernel estimator for the pdf is:
.. math:: f(x)=\frac{1}{nh_{1}...h_{q}}\sum_{i=1}^
{n}K\left(\frac{X_{i}-x}{h}\right)
where
.. math:: K\left(\frac{X_{i}-x}{h}\right) =
k\left( \frac{X_{i1}-x_{1}}{h_{1}}\right)\times
k\left( \frac{X_{i2}-x_{2}}{h_{2}}\right)\times...\times
k\left(\frac{X_{iq}-x_{q}}{h_{q}}\right)
"""
kertypes = dict(c=ckertype, o=okertype, u=ukertype)
Kval = np.empty(data.shape)
for ii, vtype in enumerate(var_type):
func = kernel_func[kertypes[vtype]]
Kval[:, ii] = func(bw[ii], data[:, ii], data_predict[ii])
iscontinuous = np.array([c == 'c' for c in var_type])
dens = Kval.prod(axis=1) / np.prod(bw[iscontinuous])
if tosum:
return dens.sum(axis=0)
else:
return dens | [
"def",
"gpke",
"(",
"bw",
",",
"data",
",",
"data_predict",
",",
"var_type",
",",
"ckertype",
"=",
"'gaussian'",
",",
"okertype",
"=",
"'wangryzin'",
",",
"ukertype",
"=",
"'aitchisonaitken'",
",",
"tosum",
"=",
"True",
")",
":",
"kertypes",
"=",
"dict",
... | r"""
Returns the non-normalized Generalized Product Kernel Estimator
Parameters
----------
bw: 1-D ndarray
The user-specified bandwidth parameters.
data: 1D or 2-D ndarray
The training data.
data_predict: 1-D ndarray
The evaluation points at which the kernel estimation is performed.
var_type: str, optional
The variable type (continuous, ordered, unordered).
ckertype: str, optional
The kernel used for the continuous variables.
okertype: str, optional
The kernel used for the ordered discrete variables.
ukertype: str, optional
The kernel used for the unordered discrete variables.
tosum : bool, optional
Whether or not to sum the calculated array of densities. Default is
True.
Returns
-------
dens: array-like
The generalized product kernel density estimator.
Notes
-----
The formula for the multivariate kernel estimator for the pdf is:
.. math:: f(x)=\frac{1}{nh_{1}...h_{q}}\sum_{i=1}^
{n}K\left(\frac{X_{i}-x}{h}\right)
where
.. math:: K\left(\frac{X_{i}-x}{h}\right) =
k\left( \frac{X_{i1}-x_{1}}{h_{1}}\right)\times
k\left( \frac{X_{i2}-x_{2}}{h_{2}}\right)\times...\times
k\left(\frac{X_{iq}-x_{q}}{h_{q}}\right) | [
"r",
"Returns",
"the",
"non",
"-",
"normalized",
"Generalized",
"Product",
"Kernel",
"Estimator"
] | 79002c4356e7020c2ba73ab0a3819c9abd4affec | https://github.com/ZELLMECHANIK-DRESDEN/dclab/blob/79002c4356e7020c2ba73ab0a3819c9abd4affec/dclab/external/statsmodels/nonparametric/_kernel_base.py#L148-L204 | train | 48,712 |
ZELLMECHANIK-DRESDEN/dclab | dclab/external/statsmodels/nonparametric/_kernel_base.py | GenericKDE._compute_bw | def _compute_bw(self, bw):
"""
Computes the bandwidth of the data.
Parameters
----------
bw: array_like or str
If array_like: user-specified bandwidth.
If a string, should be one of:
- cv_ml: cross validation maximum likelihood
- normal_reference: normal reference rule of thumb
- cv_ls: cross validation least squares
Notes
-----
The default values for bw is 'normal_reference'.
"""
if bw is None:
bw = 'normal_reference'
if not isinstance(bw, string_types):
self._bw_method = "user-specified"
res = np.asarray(bw)
else:
# The user specified a bandwidth selection method
self._bw_method = bw
# Workaround to avoid instance methods in __dict__
if bw == 'normal_reference':
bwfunc = self._normal_reference
elif bw == 'cv_ml':
bwfunc = self._cv_ml
else: # bw == 'cv_ls'
bwfunc = self._cv_ls
res = bwfunc()
return res | python | def _compute_bw(self, bw):
"""
Computes the bandwidth of the data.
Parameters
----------
bw: array_like or str
If array_like: user-specified bandwidth.
If a string, should be one of:
- cv_ml: cross validation maximum likelihood
- normal_reference: normal reference rule of thumb
- cv_ls: cross validation least squares
Notes
-----
The default values for bw is 'normal_reference'.
"""
if bw is None:
bw = 'normal_reference'
if not isinstance(bw, string_types):
self._bw_method = "user-specified"
res = np.asarray(bw)
else:
# The user specified a bandwidth selection method
self._bw_method = bw
# Workaround to avoid instance methods in __dict__
if bw == 'normal_reference':
bwfunc = self._normal_reference
elif bw == 'cv_ml':
bwfunc = self._cv_ml
else: # bw == 'cv_ls'
bwfunc = self._cv_ls
res = bwfunc()
return res | [
"def",
"_compute_bw",
"(",
"self",
",",
"bw",
")",
":",
"if",
"bw",
"is",
"None",
":",
"bw",
"=",
"'normal_reference'",
"if",
"not",
"isinstance",
"(",
"bw",
",",
"string_types",
")",
":",
"self",
".",
"_bw_method",
"=",
"\"user-specified\"",
"res",
"=",... | Computes the bandwidth of the data.
Parameters
----------
bw: array_like or str
If array_like: user-specified bandwidth.
If a string, should be one of:
- cv_ml: cross validation maximum likelihood
- normal_reference: normal reference rule of thumb
- cv_ls: cross validation least squares
Notes
-----
The default values for bw is 'normal_reference'. | [
"Computes",
"the",
"bandwidth",
"of",
"the",
"data",
"."
] | 79002c4356e7020c2ba73ab0a3819c9abd4affec | https://github.com/ZELLMECHANIK-DRESDEN/dclab/blob/79002c4356e7020c2ba73ab0a3819c9abd4affec/dclab/external/statsmodels/nonparametric/_kernel_base.py#L20-L56 | train | 48,713 |
ZELLMECHANIK-DRESDEN/dclab | dclab/external/statsmodels/nonparametric/_kernel_base.py | GenericKDE._set_defaults | def _set_defaults(self, defaults):
"""Sets the default values for the efficient estimation"""
self.n_res = defaults.n_res
self.n_sub = defaults.n_sub
self.randomize = defaults.randomize
self.return_median = defaults.return_median
self.efficient = defaults.efficient
self.return_only_bw = defaults.return_only_bw
self.n_jobs = defaults.n_jobs | python | def _set_defaults(self, defaults):
"""Sets the default values for the efficient estimation"""
self.n_res = defaults.n_res
self.n_sub = defaults.n_sub
self.randomize = defaults.randomize
self.return_median = defaults.return_median
self.efficient = defaults.efficient
self.return_only_bw = defaults.return_only_bw
self.n_jobs = defaults.n_jobs | [
"def",
"_set_defaults",
"(",
"self",
",",
"defaults",
")",
":",
"self",
".",
"n_res",
"=",
"defaults",
".",
"n_res",
"self",
".",
"n_sub",
"=",
"defaults",
".",
"n_sub",
"self",
".",
"randomize",
"=",
"defaults",
".",
"randomize",
"self",
".",
"return_me... | Sets the default values for the efficient estimation | [
"Sets",
"the",
"default",
"values",
"for",
"the",
"efficient",
"estimation"
] | 79002c4356e7020c2ba73ab0a3819c9abd4affec | https://github.com/ZELLMECHANIK-DRESDEN/dclab/blob/79002c4356e7020c2ba73ab0a3819c9abd4affec/dclab/external/statsmodels/nonparametric/_kernel_base.py#L58-L66 | train | 48,714 |
mhostetter/nhl | docs/conf.py | get_version | def get_version():
"""Return package version from setup.cfg"""
config = RawConfigParser()
config.read(os.path.join('..', 'setup.cfg'))
return config.get('metadata', 'version') | python | def get_version():
"""Return package version from setup.cfg"""
config = RawConfigParser()
config.read(os.path.join('..', 'setup.cfg'))
return config.get('metadata', 'version') | [
"def",
"get_version",
"(",
")",
":",
"config",
"=",
"RawConfigParser",
"(",
")",
"config",
".",
"read",
"(",
"os",
".",
"path",
".",
"join",
"(",
"'..'",
",",
"'setup.cfg'",
")",
")",
"return",
"config",
".",
"get",
"(",
"'metadata'",
",",
"'version'",... | Return package version from setup.cfg | [
"Return",
"package",
"version",
"from",
"setup",
".",
"cfg"
] | 32c91cc392826e9de728563d57ab527421734ee1 | https://github.com/mhostetter/nhl/blob/32c91cc392826e9de728563d57ab527421734ee1/docs/conf.py#L27-L31 | train | 48,715 |
openstax/cnx-archive | cnxarchive/sitemap.py | SitemapIndex.to_string | def to_string(self):
"""Convert SitemapIndex into a string."""
root = etree.Element('sitemapindex', nsmap={None: SITEMAP_NS})
for sitemap in self.sitemaps:
sm = etree.SubElement(root, 'sitemap')
etree.SubElement(sm, 'loc').text = sitemap.url
if hasattr(sitemap.lastmod, 'strftime'):
etree.SubElement(sm, 'lastmod').text = \
sitemap.lastmod.strftime('%Y-%m-%d')
elif isinstance(sitemap.lastmod, str):
etree.SubElement(sm, 'lastmod').text = sitemap.lastmod
return etree.tostring(root, pretty_print=True, xml_declaration=True,
encoding='utf-8') | python | def to_string(self):
"""Convert SitemapIndex into a string."""
root = etree.Element('sitemapindex', nsmap={None: SITEMAP_NS})
for sitemap in self.sitemaps:
sm = etree.SubElement(root, 'sitemap')
etree.SubElement(sm, 'loc').text = sitemap.url
if hasattr(sitemap.lastmod, 'strftime'):
etree.SubElement(sm, 'lastmod').text = \
sitemap.lastmod.strftime('%Y-%m-%d')
elif isinstance(sitemap.lastmod, str):
etree.SubElement(sm, 'lastmod').text = sitemap.lastmod
return etree.tostring(root, pretty_print=True, xml_declaration=True,
encoding='utf-8') | [
"def",
"to_string",
"(",
"self",
")",
":",
"root",
"=",
"etree",
".",
"Element",
"(",
"'sitemapindex'",
",",
"nsmap",
"=",
"{",
"None",
":",
"SITEMAP_NS",
"}",
")",
"for",
"sitemap",
"in",
"self",
".",
"sitemaps",
":",
"sm",
"=",
"etree",
".",
"SubEl... | Convert SitemapIndex into a string. | [
"Convert",
"SitemapIndex",
"into",
"a",
"string",
"."
] | d31d34aa8bbc8a9fde6cd4227a0df92726e8daf4 | https://github.com/openstax/cnx-archive/blob/d31d34aa8bbc8a9fde6cd4227a0df92726e8daf4/cnxarchive/sitemap.py#L37-L49 | train | 48,716 |
openstax/cnx-archive | cnxarchive/sitemap.py | Sitemap.add_url | def add_url(self, *args, **kwargs):
"""Add a new url to the sitemap.
This function can either be called with a :class:`UrlEntry`
or some keyword and positional arguments that are forwarded to
the :class:`UrlEntry` constructor.
"""
if len(args) == 1 and not kwargs and isinstance(args[0], UrlEntry):
self.urls.append(args[0])
else:
self.urls.append(UrlEntry(*args, **kwargs)) | python | def add_url(self, *args, **kwargs):
"""Add a new url to the sitemap.
This function can either be called with a :class:`UrlEntry`
or some keyword and positional arguments that are forwarded to
the :class:`UrlEntry` constructor.
"""
if len(args) == 1 and not kwargs and isinstance(args[0], UrlEntry):
self.urls.append(args[0])
else:
self.urls.append(UrlEntry(*args, **kwargs)) | [
"def",
"add_url",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"len",
"(",
"args",
")",
"==",
"1",
"and",
"not",
"kwargs",
"and",
"isinstance",
"(",
"args",
"[",
"0",
"]",
",",
"UrlEntry",
")",
":",
"self",
".",
"urls... | Add a new url to the sitemap.
This function can either be called with a :class:`UrlEntry`
or some keyword and positional arguments that are forwarded to
the :class:`UrlEntry` constructor. | [
"Add",
"a",
"new",
"url",
"to",
"the",
"sitemap",
"."
] | d31d34aa8bbc8a9fde6cd4227a0df92726e8daf4 | https://github.com/openstax/cnx-archive/blob/d31d34aa8bbc8a9fde6cd4227a0df92726e8daf4/cnxarchive/sitemap.py#L74-L84 | train | 48,717 |
openstax/cnx-archive | cnxarchive/sitemap.py | Sitemap.to_string | def to_string(self):
"""Convert the sitemap into a string."""
root = etree.Element('urlset', nsmap={None: SITEMAP_NS})
for url in self.urls:
url.generate(root)
return etree.tostring(root, pretty_print=True, xml_declaration=True,
encoding='utf-8') | python | def to_string(self):
"""Convert the sitemap into a string."""
root = etree.Element('urlset', nsmap={None: SITEMAP_NS})
for url in self.urls:
url.generate(root)
return etree.tostring(root, pretty_print=True, xml_declaration=True,
encoding='utf-8') | [
"def",
"to_string",
"(",
"self",
")",
":",
"root",
"=",
"etree",
".",
"Element",
"(",
"'urlset'",
",",
"nsmap",
"=",
"{",
"None",
":",
"SITEMAP_NS",
"}",
")",
"for",
"url",
"in",
"self",
".",
"urls",
":",
"url",
".",
"generate",
"(",
"root",
")",
... | Convert the sitemap into a string. | [
"Convert",
"the",
"sitemap",
"into",
"a",
"string",
"."
] | d31d34aa8bbc8a9fde6cd4227a0df92726e8daf4 | https://github.com/openstax/cnx-archive/blob/d31d34aa8bbc8a9fde6cd4227a0df92726e8daf4/cnxarchive/sitemap.py#L93-L99 | train | 48,718 |
openstax/cnx-archive | cnxarchive/views/sitemap.py | notblocked | def notblocked(page):
"""Determine if given url is a page that should be in sitemap."""
for blocked in PAGES_TO_BLOCK:
if blocked[0] != '*':
blocked = '*' + blocked
rx = re.compile(blocked.replace('*', '[^$]*'))
if rx.match(page):
return False
return True | python | def notblocked(page):
"""Determine if given url is a page that should be in sitemap."""
for blocked in PAGES_TO_BLOCK:
if blocked[0] != '*':
blocked = '*' + blocked
rx = re.compile(blocked.replace('*', '[^$]*'))
if rx.match(page):
return False
return True | [
"def",
"notblocked",
"(",
"page",
")",
":",
"for",
"blocked",
"in",
"PAGES_TO_BLOCK",
":",
"if",
"blocked",
"[",
"0",
"]",
"!=",
"'*'",
":",
"blocked",
"=",
"'*'",
"+",
"blocked",
"rx",
"=",
"re",
".",
"compile",
"(",
"blocked",
".",
"replace",
"(",
... | Determine if given url is a page that should be in sitemap. | [
"Determine",
"if",
"given",
"url",
"is",
"a",
"page",
"that",
"should",
"be",
"in",
"sitemap",
"."
] | d31d34aa8bbc8a9fde6cd4227a0df92726e8daf4 | https://github.com/openstax/cnx-archive/blob/d31d34aa8bbc8a9fde6cd4227a0df92726e8daf4/cnxarchive/views/sitemap.py#L39-L47 | train | 48,719 |
openstax/cnx-archive | cnxarchive/views/sitemap.py | sitemap_index | def sitemap_index(request):
"""Return a sitemap index xml file for search engines."""
sitemaps = []
with db_connect() as db_connection:
with db_connection.cursor() as cursor:
cursor.execute("""\
SELECT authors[1], max(revised)
FROM latest_modules
WHERE portal_type NOT IN ('CompositeModule', 'SubCollection')
GROUP BY authors[1]
""")
for author, revised in cursor.fetchall():
sitemaps.append(Sitemap(url=request.route_url(
'sitemap', from_id=author),
lastmod=revised))
si = SitemapIndex(sitemaps=sitemaps)
resp = request.response
resp.status = '200 OK'
resp.content_type = 'text/xml'
resp.body = si()
return resp | python | def sitemap_index(request):
"""Return a sitemap index xml file for search engines."""
sitemaps = []
with db_connect() as db_connection:
with db_connection.cursor() as cursor:
cursor.execute("""\
SELECT authors[1], max(revised)
FROM latest_modules
WHERE portal_type NOT IN ('CompositeModule', 'SubCollection')
GROUP BY authors[1]
""")
for author, revised in cursor.fetchall():
sitemaps.append(Sitemap(url=request.route_url(
'sitemap', from_id=author),
lastmod=revised))
si = SitemapIndex(sitemaps=sitemaps)
resp = request.response
resp.status = '200 OK'
resp.content_type = 'text/xml'
resp.body = si()
return resp | [
"def",
"sitemap_index",
"(",
"request",
")",
":",
"sitemaps",
"=",
"[",
"]",
"with",
"db_connect",
"(",
")",
"as",
"db_connection",
":",
"with",
"db_connection",
".",
"cursor",
"(",
")",
"as",
"cursor",
":",
"cursor",
".",
"execute",
"(",
"\"\"\"\\\n ... | Return a sitemap index xml file for search engines. | [
"Return",
"a",
"sitemap",
"index",
"xml",
"file",
"for",
"search",
"engines",
"."
] | d31d34aa8bbc8a9fde6cd4227a0df92726e8daf4 | https://github.com/openstax/cnx-archive/blob/d31d34aa8bbc8a9fde6cd4227a0df92726e8daf4/cnxarchive/views/sitemap.py#L101-L123 | train | 48,720 |
ZELLMECHANIK-DRESDEN/dclab | dclab/features/volume.py | get_volume | def get_volume(cont, pos_x, pos_y, pix):
"""Calculate the volume of a polygon revolved around an axis
The volume estimation assumes rotational symmetry.
Green`s theorem and the Gaussian divergence theorem allow to
formulate the volume as a line integral.
Parameters
----------
cont: ndarray or list of ndarrays of shape (N,2)
A 2D array that holds the contour of an event [px]
e.g. obtained using `mm.contour` where `mm` is an instance
of `RTDCBase`. The first and second columns of `cont`
correspond to the x- and y-coordinates of the contour.
pos_x: float or ndarray of length N
The x coordinate(s) of the centroid of the event(s) [µm]
e.g. obtained using `mm.pos_x`
pos_y: float or ndarray of length N
The y coordinate(s) of the centroid of the event(s) [µm]
e.g. obtained using `mm.pos_y`
px_um: float
The detector pixel size in µm.
e.g. obtained using: `mm.config["image"]["pix size"]`
Returns
-------
volume: float or ndarray
volume in um^3
Notes
-----
The computation of the volume is based on a full rotation of the
upper and the lower halves of the contour from which the
average is then used.
The volume is computed radially from the the center position
given by (`pos_x`, `pos_y`). For sufficiently smooth contours,
such as densely sampled ellipses, the center position does not
play an important role. For contours that are given on a coarse
grid, as is the case for RT-DC, the center position must be
given.
References
----------
- Halpern et al. :cite:`Halpern2002`, chapter 5, Section 5.4
- This is a translation from a `Matlab script
<http://de.mathworks.com/matlabcentral/fileexchange/36525-volrevolve>`_
by Geoff Olynyk.
"""
if np.isscalar(pos_x):
cont = [cont]
ret_list = False
else:
ret_list = True
# Convert input to 1D arrays
pos_x = np.atleast_1d(pos_x)
pos_y = np.atleast_1d(pos_y)
if pos_x.size != pos_y.size:
raise ValueError("Size of `pos_x` and `pos_y` must match!")
if pos_x.size > 1 and len(cont) <= 1:
raise ValueError("Number of given contours too small!")
# results are stored in a separate array initialized with nans
v_avg = np.zeros_like(pos_x, dtype=float)*np.nan
# v_avg has the shape of `pos_x`. We are iterating over the smallest
# length for `cont` and `pos_x`.
for ii in range(min(len(cont), pos_x.shape[0])):
# If the contour has less than 4 pixels, the computation will fail.
# In that case, the value np.nan is already assigned.
cc = cont[ii]
if cc.shape[0] >= 4:
# Center contour coordinates with given centroid
contour_x = cc[:, 0] - pos_x[ii] / pix
contour_y = cc[:, 1] - pos_y[ii] / pix
# Make sure contour is counter-clockwise
contour_x, contour_y = counter_clockwise(contour_x, contour_y)
# Which points are below the x-axis? (y<0)?
ind_low = np.where(contour_y < 0)
# These points will be shifted up to y=0 to build an x-axis
# (wont contribute to lower volume).
contour_y_low = np.copy(contour_y)
contour_y_low[ind_low] = 0
# Which points are above the x-axis? (y>0)?
ind_upp = np.where(contour_y > 0)
# These points will be shifted down to y=0 to build an x-axis
# (wont contribute to upper volume).
contour_y_upp = np.copy(contour_y)
contour_y_upp[ind_upp] = 0
# Move the contour to the left
Z = contour_x
# Last point of the contour has to overlap with the first point
Z = np.hstack([Z, Z[0]])
Zp = Z[0:-1]
dZ = Z[1:]-Zp
# Last point of the contour has to overlap with the first point
contour_y_low = np.hstack([contour_y_low, contour_y_low[0]])
contour_y_upp = np.hstack([contour_y_upp, contour_y_upp[0]])
vol_low = _vol_helper(contour_y_low, Z, Zp, dZ, pix)
vol_upp = _vol_helper(contour_y_upp, Z, Zp, dZ, pix)
v_avg[ii] = (vol_low + vol_upp) / 2
if not ret_list:
# Do not return a list if the input contour was not in a list
v_avg = v_avg[0]
return v_avg | python | def get_volume(cont, pos_x, pos_y, pix):
"""Calculate the volume of a polygon revolved around an axis
The volume estimation assumes rotational symmetry.
Green`s theorem and the Gaussian divergence theorem allow to
formulate the volume as a line integral.
Parameters
----------
cont: ndarray or list of ndarrays of shape (N,2)
A 2D array that holds the contour of an event [px]
e.g. obtained using `mm.contour` where `mm` is an instance
of `RTDCBase`. The first and second columns of `cont`
correspond to the x- and y-coordinates of the contour.
pos_x: float or ndarray of length N
The x coordinate(s) of the centroid of the event(s) [µm]
e.g. obtained using `mm.pos_x`
pos_y: float or ndarray of length N
The y coordinate(s) of the centroid of the event(s) [µm]
e.g. obtained using `mm.pos_y`
px_um: float
The detector pixel size in µm.
e.g. obtained using: `mm.config["image"]["pix size"]`
Returns
-------
volume: float or ndarray
volume in um^3
Notes
-----
The computation of the volume is based on a full rotation of the
upper and the lower halves of the contour from which the
average is then used.
The volume is computed radially from the the center position
given by (`pos_x`, `pos_y`). For sufficiently smooth contours,
such as densely sampled ellipses, the center position does not
play an important role. For contours that are given on a coarse
grid, as is the case for RT-DC, the center position must be
given.
References
----------
- Halpern et al. :cite:`Halpern2002`, chapter 5, Section 5.4
- This is a translation from a `Matlab script
<http://de.mathworks.com/matlabcentral/fileexchange/36525-volrevolve>`_
by Geoff Olynyk.
"""
if np.isscalar(pos_x):
cont = [cont]
ret_list = False
else:
ret_list = True
# Convert input to 1D arrays
pos_x = np.atleast_1d(pos_x)
pos_y = np.atleast_1d(pos_y)
if pos_x.size != pos_y.size:
raise ValueError("Size of `pos_x` and `pos_y` must match!")
if pos_x.size > 1 and len(cont) <= 1:
raise ValueError("Number of given contours too small!")
# results are stored in a separate array initialized with nans
v_avg = np.zeros_like(pos_x, dtype=float)*np.nan
# v_avg has the shape of `pos_x`. We are iterating over the smallest
# length for `cont` and `pos_x`.
for ii in range(min(len(cont), pos_x.shape[0])):
# If the contour has less than 4 pixels, the computation will fail.
# In that case, the value np.nan is already assigned.
cc = cont[ii]
if cc.shape[0] >= 4:
# Center contour coordinates with given centroid
contour_x = cc[:, 0] - pos_x[ii] / pix
contour_y = cc[:, 1] - pos_y[ii] / pix
# Make sure contour is counter-clockwise
contour_x, contour_y = counter_clockwise(contour_x, contour_y)
# Which points are below the x-axis? (y<0)?
ind_low = np.where(contour_y < 0)
# These points will be shifted up to y=0 to build an x-axis
# (wont contribute to lower volume).
contour_y_low = np.copy(contour_y)
contour_y_low[ind_low] = 0
# Which points are above the x-axis? (y>0)?
ind_upp = np.where(contour_y > 0)
# These points will be shifted down to y=0 to build an x-axis
# (wont contribute to upper volume).
contour_y_upp = np.copy(contour_y)
contour_y_upp[ind_upp] = 0
# Move the contour to the left
Z = contour_x
# Last point of the contour has to overlap with the first point
Z = np.hstack([Z, Z[0]])
Zp = Z[0:-1]
dZ = Z[1:]-Zp
# Last point of the contour has to overlap with the first point
contour_y_low = np.hstack([contour_y_low, contour_y_low[0]])
contour_y_upp = np.hstack([contour_y_upp, contour_y_upp[0]])
vol_low = _vol_helper(contour_y_low, Z, Zp, dZ, pix)
vol_upp = _vol_helper(contour_y_upp, Z, Zp, dZ, pix)
v_avg[ii] = (vol_low + vol_upp) / 2
if not ret_list:
# Do not return a list if the input contour was not in a list
v_avg = v_avg[0]
return v_avg | [
"def",
"get_volume",
"(",
"cont",
",",
"pos_x",
",",
"pos_y",
",",
"pix",
")",
":",
"if",
"np",
".",
"isscalar",
"(",
"pos_x",
")",
":",
"cont",
"=",
"[",
"cont",
"]",
"ret_list",
"=",
"False",
"else",
":",
"ret_list",
"=",
"True",
"# Convert input t... | Calculate the volume of a polygon revolved around an axis
The volume estimation assumes rotational symmetry.
Green`s theorem and the Gaussian divergence theorem allow to
formulate the volume as a line integral.
Parameters
----------
cont: ndarray or list of ndarrays of shape (N,2)
A 2D array that holds the contour of an event [px]
e.g. obtained using `mm.contour` where `mm` is an instance
of `RTDCBase`. The first and second columns of `cont`
correspond to the x- and y-coordinates of the contour.
pos_x: float or ndarray of length N
The x coordinate(s) of the centroid of the event(s) [µm]
e.g. obtained using `mm.pos_x`
pos_y: float or ndarray of length N
The y coordinate(s) of the centroid of the event(s) [µm]
e.g. obtained using `mm.pos_y`
px_um: float
The detector pixel size in µm.
e.g. obtained using: `mm.config["image"]["pix size"]`
Returns
-------
volume: float or ndarray
volume in um^3
Notes
-----
The computation of the volume is based on a full rotation of the
upper and the lower halves of the contour from which the
average is then used.
The volume is computed radially from the the center position
given by (`pos_x`, `pos_y`). For sufficiently smooth contours,
such as densely sampled ellipses, the center position does not
play an important role. For contours that are given on a coarse
grid, as is the case for RT-DC, the center position must be
given.
References
----------
- Halpern et al. :cite:`Halpern2002`, chapter 5, Section 5.4
- This is a translation from a `Matlab script
<http://de.mathworks.com/matlabcentral/fileexchange/36525-volrevolve>`_
by Geoff Olynyk. | [
"Calculate",
"the",
"volume",
"of",
"a",
"polygon",
"revolved",
"around",
"an",
"axis"
] | 79002c4356e7020c2ba73ab0a3819c9abd4affec | https://github.com/ZELLMECHANIK-DRESDEN/dclab/blob/79002c4356e7020c2ba73ab0a3819c9abd4affec/dclab/features/volume.py#L9-L121 | train | 48,721 |
ZELLMECHANIK-DRESDEN/dclab | dclab/features/volume.py | counter_clockwise | def counter_clockwise(cx, cy):
"""Put contour coordinates into counter-clockwise order
Parameters
----------
cx, cy: 1d ndarrays
The x- and y-coordinates of the contour
Returns
-------
cx_cc, cy_cc:
The x- and y-coordinates of the contour in
counter-clockwise orientation.
"""
# test orientation
angles = np.unwrap(np.arctan2(cy, cx))
grad = np.gradient(angles)
if np.average(grad) > 0:
return cx[::-1], cy[::-1]
else:
return cx, cy | python | def counter_clockwise(cx, cy):
"""Put contour coordinates into counter-clockwise order
Parameters
----------
cx, cy: 1d ndarrays
The x- and y-coordinates of the contour
Returns
-------
cx_cc, cy_cc:
The x- and y-coordinates of the contour in
counter-clockwise orientation.
"""
# test orientation
angles = np.unwrap(np.arctan2(cy, cx))
grad = np.gradient(angles)
if np.average(grad) > 0:
return cx[::-1], cy[::-1]
else:
return cx, cy | [
"def",
"counter_clockwise",
"(",
"cx",
",",
"cy",
")",
":",
"# test orientation",
"angles",
"=",
"np",
".",
"unwrap",
"(",
"np",
".",
"arctan2",
"(",
"cy",
",",
"cx",
")",
")",
"grad",
"=",
"np",
".",
"gradient",
"(",
"angles",
")",
"if",
"np",
"."... | Put contour coordinates into counter-clockwise order
Parameters
----------
cx, cy: 1d ndarrays
The x- and y-coordinates of the contour
Returns
-------
cx_cc, cy_cc:
The x- and y-coordinates of the contour in
counter-clockwise orientation. | [
"Put",
"contour",
"coordinates",
"into",
"counter",
"-",
"clockwise",
"order"
] | 79002c4356e7020c2ba73ab0a3819c9abd4affec | https://github.com/ZELLMECHANIK-DRESDEN/dclab/blob/79002c4356e7020c2ba73ab0a3819c9abd4affec/dclab/features/volume.py#L124-L144 | train | 48,722 |
openstax/cnx-archive | cnxarchive/views/extras.py | extras | def extras(request):
"""Return a dict with archive metadata for webview."""
key = request.matchdict.get('key', '').lstrip('/')
key_map = {
'languages': _get_available_languages_and_count,
'subjects': _get_subject_list,
'featured': _get_featured_links,
'messages': _get_service_state_messages,
'licenses': _get_licenses
}
with db_connect() as db_connection:
with db_connection.cursor() as cursor:
if key:
proc = key_map[key]
metadata = {key: proc(cursor)}
else:
metadata = {key: proc(cursor)
for (key, proc) in key_map.items()}
resp = request.response
resp.status = '200 OK'
resp.content_type = 'application/json'
resp.body = json.dumps(metadata)
return resp | python | def extras(request):
"""Return a dict with archive metadata for webview."""
key = request.matchdict.get('key', '').lstrip('/')
key_map = {
'languages': _get_available_languages_and_count,
'subjects': _get_subject_list,
'featured': _get_featured_links,
'messages': _get_service_state_messages,
'licenses': _get_licenses
}
with db_connect() as db_connection:
with db_connection.cursor() as cursor:
if key:
proc = key_map[key]
metadata = {key: proc(cursor)}
else:
metadata = {key: proc(cursor)
for (key, proc) in key_map.items()}
resp = request.response
resp.status = '200 OK'
resp.content_type = 'application/json'
resp.body = json.dumps(metadata)
return resp | [
"def",
"extras",
"(",
"request",
")",
":",
"key",
"=",
"request",
".",
"matchdict",
".",
"get",
"(",
"'key'",
",",
"''",
")",
".",
"lstrip",
"(",
"'/'",
")",
"key_map",
"=",
"{",
"'languages'",
":",
"_get_available_languages_and_count",
",",
"'subjects'",
... | Return a dict with archive metadata for webview. | [
"Return",
"a",
"dict",
"with",
"archive",
"metadata",
"for",
"webview",
"."
] | d31d34aa8bbc8a9fde6cd4227a0df92726e8daf4 | https://github.com/openstax/cnx-archive/blob/d31d34aa8bbc8a9fde6cd4227a0df92726e8daf4/cnxarchive/views/extras.py#L86-L110 | train | 48,723 |
xenon-middleware/pyxenon | examples/timeout.py | timeout | def timeout(delay, call, *args, **kwargs):
"""Run a function call for `delay` seconds, and raise a RuntimeError
if the operation didn't complete."""
return_value = None
def target():
nonlocal return_value
return_value = call(*args, **kwargs)
t = Thread(target=target)
t.start()
t.join(delay)
if t.is_alive():
raise RuntimeError("Operation did not complete within time.")
return return_value | python | def timeout(delay, call, *args, **kwargs):
"""Run a function call for `delay` seconds, and raise a RuntimeError
if the operation didn't complete."""
return_value = None
def target():
nonlocal return_value
return_value = call(*args, **kwargs)
t = Thread(target=target)
t.start()
t.join(delay)
if t.is_alive():
raise RuntimeError("Operation did not complete within time.")
return return_value | [
"def",
"timeout",
"(",
"delay",
",",
"call",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return_value",
"=",
"None",
"def",
"target",
"(",
")",
":",
"nonlocal",
"return_value",
"return_value",
"=",
"call",
"(",
"*",
"args",
",",
"*",
"*",
... | Run a function call for `delay` seconds, and raise a RuntimeError
if the operation didn't complete. | [
"Run",
"a",
"function",
"call",
"for",
"delay",
"seconds",
"and",
"raise",
"a",
"RuntimeError",
"if",
"the",
"operation",
"didn",
"t",
"complete",
"."
] | d61109ad339ee9bb9f0723471d532727b0f235ad | https://github.com/xenon-middleware/pyxenon/blob/d61109ad339ee9bb9f0723471d532727b0f235ad/examples/timeout.py#L4-L19 | train | 48,724 |
openstax/cnx-archive | cnxarchive/scripts/_utils.py | create_parser | def create_parser(name, description=None):
"""Create an argument parser with the given ``name`` and ``description``.
The name is used to make ``cnx-archive-<name>`` program name.
This creates and returns a parser with
the ``config_uri`` argument declared.
"""
prog = _gen_prog_name(name)
parser = argparse.ArgumentParser(prog=prog, description=description)
parser.add_argument('config_uri', help="Configuration INI file.")
parser.add_argument('--config-name',
action='store',
default='main',
help="Supply a section name in the configuration")
return parser | python | def create_parser(name, description=None):
"""Create an argument parser with the given ``name`` and ``description``.
The name is used to make ``cnx-archive-<name>`` program name.
This creates and returns a parser with
the ``config_uri`` argument declared.
"""
prog = _gen_prog_name(name)
parser = argparse.ArgumentParser(prog=prog, description=description)
parser.add_argument('config_uri', help="Configuration INI file.")
parser.add_argument('--config-name',
action='store',
default='main',
help="Supply a section name in the configuration")
return parser | [
"def",
"create_parser",
"(",
"name",
",",
"description",
"=",
"None",
")",
":",
"prog",
"=",
"_gen_prog_name",
"(",
"name",
")",
"parser",
"=",
"argparse",
".",
"ArgumentParser",
"(",
"prog",
"=",
"prog",
",",
"description",
"=",
"description",
")",
"parse... | Create an argument parser with the given ``name`` and ``description``.
The name is used to make ``cnx-archive-<name>`` program name.
This creates and returns a parser with
the ``config_uri`` argument declared. | [
"Create",
"an",
"argument",
"parser",
"with",
"the",
"given",
"name",
"and",
"description",
"."
] | d31d34aa8bbc8a9fde6cd4227a0df92726e8daf4 | https://github.com/openstax/cnx-archive/blob/d31d34aa8bbc8a9fde6cd4227a0df92726e8daf4/cnxarchive/scripts/_utils.py#L28-L42 | train | 48,725 |
openstax/cnx-archive | cnxarchive/scripts/_utils.py | get_app_settings_from_arguments | def get_app_settings_from_arguments(args):
"""Parse ``argparse`` style arguments into app settings.
Given an ``argparse`` set of arguments as ``args``
parse the arguments to return the application settings.
This assumes the parser was created using ``create_parser``.
"""
config_filepath = os.path.abspath(args.config_uri)
return get_appsettings(config_filepath, name=args.config_name) | python | def get_app_settings_from_arguments(args):
"""Parse ``argparse`` style arguments into app settings.
Given an ``argparse`` set of arguments as ``args``
parse the arguments to return the application settings.
This assumes the parser was created using ``create_parser``.
"""
config_filepath = os.path.abspath(args.config_uri)
return get_appsettings(config_filepath, name=args.config_name) | [
"def",
"get_app_settings_from_arguments",
"(",
"args",
")",
":",
"config_filepath",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"args",
".",
"config_uri",
")",
"return",
"get_appsettings",
"(",
"config_filepath",
",",
"name",
"=",
"args",
".",
"config_name",
... | Parse ``argparse`` style arguments into app settings.
Given an ``argparse`` set of arguments as ``args``
parse the arguments to return the application settings.
This assumes the parser was created using ``create_parser``. | [
"Parse",
"argparse",
"style",
"arguments",
"into",
"app",
"settings",
"."
] | d31d34aa8bbc8a9fde6cd4227a0df92726e8daf4 | https://github.com/openstax/cnx-archive/blob/d31d34aa8bbc8a9fde6cd4227a0df92726e8daf4/cnxarchive/scripts/_utils.py#L45-L53 | train | 48,726 |
xenon-middleware/pyxenon | xenon/server.py | check_socket | def check_socket(host, port):
"""Checks if port is open on host. This is used to check if the
Xenon-GRPC server is running."""
with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as sock:
return sock.connect_ex((host, port)) == 0 | python | def check_socket(host, port):
"""Checks if port is open on host. This is used to check if the
Xenon-GRPC server is running."""
with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as sock:
return sock.connect_ex((host, port)) == 0 | [
"def",
"check_socket",
"(",
"host",
",",
"port",
")",
":",
"with",
"closing",
"(",
"socket",
".",
"socket",
"(",
"socket",
".",
"AF_INET",
",",
"socket",
".",
"SOCK_STREAM",
")",
")",
"as",
"sock",
":",
"return",
"sock",
".",
"connect_ex",
"(",
"(",
... | Checks if port is open on host. This is used to check if the
Xenon-GRPC server is running. | [
"Checks",
"if",
"port",
"is",
"open",
"on",
"host",
".",
"This",
"is",
"used",
"to",
"check",
"if",
"the",
"Xenon",
"-",
"GRPC",
"server",
"is",
"running",
"."
] | d61109ad339ee9bb9f0723471d532727b0f235ad | https://github.com/xenon-middleware/pyxenon/blob/d61109ad339ee9bb9f0723471d532727b0f235ad/xenon/server.py#L19-L23 | train | 48,727 |
xenon-middleware/pyxenon | xenon/server.py | get_secure_channel | def get_secure_channel(crt_file, key_file, port=50051):
"""Try to connect over a secure channel."""
creds = grpc.ssl_channel_credentials(
root_certificates=open(str(crt_file), 'rb').read(),
private_key=open(str(key_file), 'rb').read(),
certificate_chain=open(str(crt_file), 'rb').read())
address = "{}:{}".format(socket.gethostname(), port)
channel = grpc.secure_channel(address, creds)
return channel | python | def get_secure_channel(crt_file, key_file, port=50051):
"""Try to connect over a secure channel."""
creds = grpc.ssl_channel_credentials(
root_certificates=open(str(crt_file), 'rb').read(),
private_key=open(str(key_file), 'rb').read(),
certificate_chain=open(str(crt_file), 'rb').read())
address = "{}:{}".format(socket.gethostname(), port)
channel = grpc.secure_channel(address, creds)
return channel | [
"def",
"get_secure_channel",
"(",
"crt_file",
",",
"key_file",
",",
"port",
"=",
"50051",
")",
":",
"creds",
"=",
"grpc",
".",
"ssl_channel_credentials",
"(",
"root_certificates",
"=",
"open",
"(",
"str",
"(",
"crt_file",
")",
",",
"'rb'",
")",
".",
"read"... | Try to connect over a secure channel. | [
"Try",
"to",
"connect",
"over",
"a",
"secure",
"channel",
"."
] | d61109ad339ee9bb9f0723471d532727b0f235ad | https://github.com/xenon-middleware/pyxenon/blob/d61109ad339ee9bb9f0723471d532727b0f235ad/xenon/server.py#L26-L36 | train | 48,728 |
xenon-middleware/pyxenon | xenon/server.py | find_free_port | def find_free_port():
"""Finds a free port."""
with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as sock:
sock.bind(('', 0))
return sock.getsockname()[1] | python | def find_free_port():
"""Finds a free port."""
with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as sock:
sock.bind(('', 0))
return sock.getsockname()[1] | [
"def",
"find_free_port",
"(",
")",
":",
"with",
"closing",
"(",
"socket",
".",
"socket",
"(",
"socket",
".",
"AF_INET",
",",
"socket",
".",
"SOCK_STREAM",
")",
")",
"as",
"sock",
":",
"sock",
".",
"bind",
"(",
"(",
"''",
",",
"0",
")",
")",
"return... | Finds a free port. | [
"Finds",
"a",
"free",
"port",
"."
] | d61109ad339ee9bb9f0723471d532727b0f235ad | https://github.com/xenon-middleware/pyxenon/blob/d61109ad339ee9bb9f0723471d532727b0f235ad/xenon/server.py#L39-L43 | train | 48,729 |
xenon-middleware/pyxenon | xenon/server.py | print_stream | def print_stream(file, name):
"""Print stream from file to logger."""
logger = logging.getLogger('xenon.{}'.format(name))
for line in file:
logger.info('[{}] {}'.format(name, line.strip())) | python | def print_stream(file, name):
"""Print stream from file to logger."""
logger = logging.getLogger('xenon.{}'.format(name))
for line in file:
logger.info('[{}] {}'.format(name, line.strip())) | [
"def",
"print_stream",
"(",
"file",
",",
"name",
")",
":",
"logger",
"=",
"logging",
".",
"getLogger",
"(",
"'xenon.{}'",
".",
"format",
"(",
"name",
")",
")",
"for",
"line",
"in",
"file",
":",
"logger",
".",
"info",
"(",
"'[{}] {}'",
".",
"format",
... | Print stream from file to logger. | [
"Print",
"stream",
"from",
"file",
"to",
"logger",
"."
] | d61109ad339ee9bb9f0723471d532727b0f235ad | https://github.com/xenon-middleware/pyxenon/blob/d61109ad339ee9bb9f0723471d532727b0f235ad/xenon/server.py#L46-L50 | train | 48,730 |
xenon-middleware/pyxenon | xenon/server.py | init | def init(port=None, do_not_exit=False, disable_tls=False, log_level='WARNING'):
"""Start the Xenon GRPC server on the specified port, or, if a service
is already running on that port, connect to that.
If no port is given, a random port is selected. This means that, by
default, every python instance will start its own instance of a xenon-grpc
process.
:param port: the port number
:param do_not_exit: by default the GRPC server is shut down after Python
exits (through the `atexit` module), setting this value to `True` will
prevent that from happening."""
logger = logging.getLogger('xenon')
logger.setLevel(logging.INFO)
logger_handler = logging.StreamHandler()
logger_handler.setFormatter(logging.Formatter(style='{'))
logger_handler.setLevel(getattr(logging, log_level))
logger.addHandler(logger_handler)
if port is None:
port = find_free_port()
if __server__.process is not None:
logger.warning(
"You tried to run init(), but the server is already running.")
return __server__
__server__.port = port
__server__.disable_tls = disable_tls
__server__.__enter__()
if not do_not_exit:
atexit.register(__server__.__exit__, None, None, None)
return __server__ | python | def init(port=None, do_not_exit=False, disable_tls=False, log_level='WARNING'):
"""Start the Xenon GRPC server on the specified port, or, if a service
is already running on that port, connect to that.
If no port is given, a random port is selected. This means that, by
default, every python instance will start its own instance of a xenon-grpc
process.
:param port: the port number
:param do_not_exit: by default the GRPC server is shut down after Python
exits (through the `atexit` module), setting this value to `True` will
prevent that from happening."""
logger = logging.getLogger('xenon')
logger.setLevel(logging.INFO)
logger_handler = logging.StreamHandler()
logger_handler.setFormatter(logging.Formatter(style='{'))
logger_handler.setLevel(getattr(logging, log_level))
logger.addHandler(logger_handler)
if port is None:
port = find_free_port()
if __server__.process is not None:
logger.warning(
"You tried to run init(), but the server is already running.")
return __server__
__server__.port = port
__server__.disable_tls = disable_tls
__server__.__enter__()
if not do_not_exit:
atexit.register(__server__.__exit__, None, None, None)
return __server__ | [
"def",
"init",
"(",
"port",
"=",
"None",
",",
"do_not_exit",
"=",
"False",
",",
"disable_tls",
"=",
"False",
",",
"log_level",
"=",
"'WARNING'",
")",
":",
"logger",
"=",
"logging",
".",
"getLogger",
"(",
"'xenon'",
")",
"logger",
".",
"setLevel",
"(",
... | Start the Xenon GRPC server on the specified port, or, if a service
is already running on that port, connect to that.
If no port is given, a random port is selected. This means that, by
default, every python instance will start its own instance of a xenon-grpc
process.
:param port: the port number
:param do_not_exit: by default the GRPC server is shut down after Python
exits (through the `atexit` module), setting this value to `True` will
prevent that from happening. | [
"Start",
"the",
"Xenon",
"GRPC",
"server",
"on",
"the",
"specified",
"port",
"or",
"if",
"a",
"service",
"is",
"already",
"running",
"on",
"that",
"port",
"connect",
"to",
"that",
"."
] | d61109ad339ee9bb9f0723471d532727b0f235ad | https://github.com/xenon-middleware/pyxenon/blob/d61109ad339ee9bb9f0723471d532727b0f235ad/xenon/server.py#L116-L151 | train | 48,731 |
openstax/cnx-archive | cnxarchive/events.py | add_cors_headers | def add_cors_headers(request, response):
"""Add cors headers needed for web app implementation."""
response.headerlist.append(('Access-Control-Allow-Origin', '*'))
response.headerlist.append(
('Access-Control-Allow-Methods', 'GET, OPTIONS'))
response.headerlist.append(
('Access-Control-Allow-Headers',
','.join(DEFAULT_ACCESS_CONTROL_ALLOW_HEADERS))) | python | def add_cors_headers(request, response):
"""Add cors headers needed for web app implementation."""
response.headerlist.append(('Access-Control-Allow-Origin', '*'))
response.headerlist.append(
('Access-Control-Allow-Methods', 'GET, OPTIONS'))
response.headerlist.append(
('Access-Control-Allow-Headers',
','.join(DEFAULT_ACCESS_CONTROL_ALLOW_HEADERS))) | [
"def",
"add_cors_headers",
"(",
"request",
",",
"response",
")",
":",
"response",
".",
"headerlist",
".",
"append",
"(",
"(",
"'Access-Control-Allow-Origin'",
",",
"'*'",
")",
")",
"response",
".",
"headerlist",
".",
"append",
"(",
"(",
"'Access-Control-Allow-Me... | Add cors headers needed for web app implementation. | [
"Add",
"cors",
"headers",
"needed",
"for",
"web",
"app",
"implementation",
"."
] | d31d34aa8bbc8a9fde6cd4227a0df92726e8daf4 | https://github.com/openstax/cnx-archive/blob/d31d34aa8bbc8a9fde6cd4227a0df92726e8daf4/cnxarchive/events.py#L15-L22 | train | 48,732 |
ZELLMECHANIK-DRESDEN/dclab | dclab/rtdc_dataset/fmt_tdms/event_trace.py | TraceColumn.trace | def trace(self):
"""Initializes the trace data"""
if self._trace is None:
self._trace = self.load_trace(self.mname)
return self._trace | python | def trace(self):
"""Initializes the trace data"""
if self._trace is None:
self._trace = self.load_trace(self.mname)
return self._trace | [
"def",
"trace",
"(",
"self",
")",
":",
"if",
"self",
".",
"_trace",
"is",
"None",
":",
"self",
".",
"_trace",
"=",
"self",
".",
"load_trace",
"(",
"self",
".",
"mname",
")",
"return",
"self",
".",
"_trace"
] | Initializes the trace data | [
"Initializes",
"the",
"trace",
"data"
] | 79002c4356e7020c2ba73ab0a3819c9abd4affec | https://github.com/ZELLMECHANIK-DRESDEN/dclab/blob/79002c4356e7020c2ba73ab0a3819c9abd4affec/dclab/rtdc_dataset/fmt_tdms/event_trace.py#L58-L62 | train | 48,733 |
ZELLMECHANIK-DRESDEN/dclab | dclab/rtdc_dataset/fmt_tdms/event_trace.py | TraceColumn.load_trace | def load_trace(mname):
"""Loads the traces and returns them as a dictionary
Currently, only loading traces from tdms files is supported.
This forces us to load the full tdms file into memory which
takes some time.
"""
tname = TraceColumn.find_trace_file(mname)
# Initialize empty trace dictionary
trace = {}
if tname is None:
pass
elif tname.suffix == ".tdms":
# Again load the measurement tdms file.
# This might increase memory usage, but it is cleaner
# when looking at code structure.
mdata = TdmsFile(str(mname))
sampleids = mdata.object("Cell Track", "FL1index").data
# Load the trace data. The traces file is usually larger than the
# measurement file.
tdata = TdmsFile(str(tname))
for trace_key in dfn.FLUOR_TRACES:
group, ch = naming.tr_data_map[trace_key]
try:
trdat = tdata.object(group, ch).data
except KeyError:
pass
else:
if trdat is not None and trdat.size != 0:
# Only add trace if there is actual data.
# Split only needs the position of the sections,
# so we remove the first (0) index.
trace[trace_key] = np.split(trdat, sampleids[1:])
return trace | python | def load_trace(mname):
"""Loads the traces and returns them as a dictionary
Currently, only loading traces from tdms files is supported.
This forces us to load the full tdms file into memory which
takes some time.
"""
tname = TraceColumn.find_trace_file(mname)
# Initialize empty trace dictionary
trace = {}
if tname is None:
pass
elif tname.suffix == ".tdms":
# Again load the measurement tdms file.
# This might increase memory usage, but it is cleaner
# when looking at code structure.
mdata = TdmsFile(str(mname))
sampleids = mdata.object("Cell Track", "FL1index").data
# Load the trace data. The traces file is usually larger than the
# measurement file.
tdata = TdmsFile(str(tname))
for trace_key in dfn.FLUOR_TRACES:
group, ch = naming.tr_data_map[trace_key]
try:
trdat = tdata.object(group, ch).data
except KeyError:
pass
else:
if trdat is not None and trdat.size != 0:
# Only add trace if there is actual data.
# Split only needs the position of the sections,
# so we remove the first (0) index.
trace[trace_key] = np.split(trdat, sampleids[1:])
return trace | [
"def",
"load_trace",
"(",
"mname",
")",
":",
"tname",
"=",
"TraceColumn",
".",
"find_trace_file",
"(",
"mname",
")",
"# Initialize empty trace dictionary",
"trace",
"=",
"{",
"}",
"if",
"tname",
"is",
"None",
":",
"pass",
"elif",
"tname",
".",
"suffix",
"=="... | Loads the traces and returns them as a dictionary
Currently, only loading traces from tdms files is supported.
This forces us to load the full tdms file into memory which
takes some time. | [
"Loads",
"the",
"traces",
"and",
"returns",
"them",
"as",
"a",
"dictionary"
] | 79002c4356e7020c2ba73ab0a3819c9abd4affec | https://github.com/ZELLMECHANIK-DRESDEN/dclab/blob/79002c4356e7020c2ba73ab0a3819c9abd4affec/dclab/rtdc_dataset/fmt_tdms/event_trace.py#L65-L101 | train | 48,734 |
ZELLMECHANIK-DRESDEN/dclab | dclab/rtdc_dataset/fmt_tdms/event_trace.py | TraceColumn.find_trace_file | def find_trace_file(mname):
"""Tries to find the traces tdms file name
Returns None if no trace file is found.
"""
mname = pathlib.Path(mname)
tname = None
if mname.exists():
cand = mname.with_name(mname.name[:-5] + "_traces.tdms")
if cand.exists():
tname = cand
return tname | python | def find_trace_file(mname):
"""Tries to find the traces tdms file name
Returns None if no trace file is found.
"""
mname = pathlib.Path(mname)
tname = None
if mname.exists():
cand = mname.with_name(mname.name[:-5] + "_traces.tdms")
if cand.exists():
tname = cand
return tname | [
"def",
"find_trace_file",
"(",
"mname",
")",
":",
"mname",
"=",
"pathlib",
".",
"Path",
"(",
"mname",
")",
"tname",
"=",
"None",
"if",
"mname",
".",
"exists",
"(",
")",
":",
"cand",
"=",
"mname",
".",
"with_name",
"(",
"mname",
".",
"name",
"[",
":... | Tries to find the traces tdms file name
Returns None if no trace file is found. | [
"Tries",
"to",
"find",
"the",
"traces",
"tdms",
"file",
"name"
] | 79002c4356e7020c2ba73ab0a3819c9abd4affec | https://github.com/ZELLMECHANIK-DRESDEN/dclab/blob/79002c4356e7020c2ba73ab0a3819c9abd4affec/dclab/rtdc_dataset/fmt_tdms/event_trace.py#L104-L117 | train | 48,735 |
Fischerfredl/get-docker-secret | get_docker_secret.py | get_docker_secret | def get_docker_secret(name, default=None, cast_to=str, autocast_name=True, getenv=True, safe=True,
secrets_dir=os.path.join(root, 'var', 'run', 'secrets')):
"""This function fetches a docker secret
:param name: the name of the docker secret
:param default: the default value if no secret found
:param cast_to: casts the value to the given type
:param autocast_name: whether the name should be lowercase for secrets and upper case for environment
:param getenv: if environment variable should be fetched as fallback
:param safe: Whether the function should raise exceptions
:param secrets_dir: the directory where the secrets are stored
:returns: docker secret or environment variable depending on params
:raises TypeError: if cast fails due to wrong type (None)
:raises ValueError: if casts fails due to Value
"""
# cast name if autocast enabled
name_secret = name.lower() if autocast_name else name
name_env = name.upper() if autocast_name else name
# initiallize value
value = None
# try to read from secret file
try:
with open(os.path.join(secrets_dir, name_secret), 'r') as secret_file:
value = secret_file.read()
except IOError as e:
# try to read from env if enabled
if getenv:
value = os.environ.get(name_env)
# set default value if no value found
if value is None:
value = default
# try to cast
try:
# so None wont be cast to 'None'
if value is None:
raise TypeError('value is None')
# special case bool
if cast_to == bool:
if value not in ('True', 'true', 'False', 'false'):
raise ValueError('value %s not of type bool' % value)
value = 1 if value in ('True', 'true') else 0
# try to cast
return cast_to(value)
except (TypeError, ValueError) as e:
# whether exception should be thrown
if safe:
return default
raise e | python | def get_docker_secret(name, default=None, cast_to=str, autocast_name=True, getenv=True, safe=True,
secrets_dir=os.path.join(root, 'var', 'run', 'secrets')):
"""This function fetches a docker secret
:param name: the name of the docker secret
:param default: the default value if no secret found
:param cast_to: casts the value to the given type
:param autocast_name: whether the name should be lowercase for secrets and upper case for environment
:param getenv: if environment variable should be fetched as fallback
:param safe: Whether the function should raise exceptions
:param secrets_dir: the directory where the secrets are stored
:returns: docker secret or environment variable depending on params
:raises TypeError: if cast fails due to wrong type (None)
:raises ValueError: if casts fails due to Value
"""
# cast name if autocast enabled
name_secret = name.lower() if autocast_name else name
name_env = name.upper() if autocast_name else name
# initiallize value
value = None
# try to read from secret file
try:
with open(os.path.join(secrets_dir, name_secret), 'r') as secret_file:
value = secret_file.read()
except IOError as e:
# try to read from env if enabled
if getenv:
value = os.environ.get(name_env)
# set default value if no value found
if value is None:
value = default
# try to cast
try:
# so None wont be cast to 'None'
if value is None:
raise TypeError('value is None')
# special case bool
if cast_to == bool:
if value not in ('True', 'true', 'False', 'false'):
raise ValueError('value %s not of type bool' % value)
value = 1 if value in ('True', 'true') else 0
# try to cast
return cast_to(value)
except (TypeError, ValueError) as e:
# whether exception should be thrown
if safe:
return default
raise e | [
"def",
"get_docker_secret",
"(",
"name",
",",
"default",
"=",
"None",
",",
"cast_to",
"=",
"str",
",",
"autocast_name",
"=",
"True",
",",
"getenv",
"=",
"True",
",",
"safe",
"=",
"True",
",",
"secrets_dir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"... | This function fetches a docker secret
:param name: the name of the docker secret
:param default: the default value if no secret found
:param cast_to: casts the value to the given type
:param autocast_name: whether the name should be lowercase for secrets and upper case for environment
:param getenv: if environment variable should be fetched as fallback
:param safe: Whether the function should raise exceptions
:param secrets_dir: the directory where the secrets are stored
:returns: docker secret or environment variable depending on params
:raises TypeError: if cast fails due to wrong type (None)
:raises ValueError: if casts fails due to Value | [
"This",
"function",
"fetches",
"a",
"docker",
"secret"
] | 1fa7f7e2d8b727fd95b6257041e0498fde2d3880 | https://github.com/Fischerfredl/get-docker-secret/blob/1fa7f7e2d8b727fd95b6257041e0498fde2d3880/get_docker_secret.py#L6-L61 | train | 48,736 |
ZELLMECHANIK-DRESDEN/dclab | dclab/cached.py | Cache._update_hash | def _update_hash(self, arg):
""" Takes an argument and updates the hash.
The argument can be an np.array, string, or list
of things that are convertable to strings.
"""
if isinstance(arg, np.ndarray):
self.ahash.update(arg.view(np.uint8))
elif isinstance(arg, list):
[self._update_hash(a) for a in arg]
else:
self.ahash.update(str(arg).encode('utf-8')) | python | def _update_hash(self, arg):
""" Takes an argument and updates the hash.
The argument can be an np.array, string, or list
of things that are convertable to strings.
"""
if isinstance(arg, np.ndarray):
self.ahash.update(arg.view(np.uint8))
elif isinstance(arg, list):
[self._update_hash(a) for a in arg]
else:
self.ahash.update(str(arg).encode('utf-8')) | [
"def",
"_update_hash",
"(",
"self",
",",
"arg",
")",
":",
"if",
"isinstance",
"(",
"arg",
",",
"np",
".",
"ndarray",
")",
":",
"self",
".",
"ahash",
".",
"update",
"(",
"arg",
".",
"view",
"(",
"np",
".",
"uint8",
")",
")",
"elif",
"isinstance",
... | Takes an argument and updates the hash.
The argument can be an np.array, string, or list
of things that are convertable to strings. | [
"Takes",
"an",
"argument",
"and",
"updates",
"the",
"hash",
".",
"The",
"argument",
"can",
"be",
"an",
"np",
".",
"array",
"string",
"or",
"list",
"of",
"things",
"that",
"are",
"convertable",
"to",
"strings",
"."
] | 79002c4356e7020c2ba73ab0a3819c9abd4affec | https://github.com/ZELLMECHANIK-DRESDEN/dclab/blob/79002c4356e7020c2ba73ab0a3819c9abd4affec/dclab/cached.py#L81-L91 | train | 48,737 |
ZELLMECHANIK-DRESDEN/dclab | dclab/cached.py | Cache.clear_cache | def clear_cache():
"""Remove all cached objects"""
del Cache._keys
for k in list(Cache._cache.keys()):
it = Cache._cache.pop(k)
del it
del Cache._cache
Cache._keys = []
Cache._cache = {}
gc.collect() | python | def clear_cache():
"""Remove all cached objects"""
del Cache._keys
for k in list(Cache._cache.keys()):
it = Cache._cache.pop(k)
del it
del Cache._cache
Cache._keys = []
Cache._cache = {}
gc.collect() | [
"def",
"clear_cache",
"(",
")",
":",
"del",
"Cache",
".",
"_keys",
"for",
"k",
"in",
"list",
"(",
"Cache",
".",
"_cache",
".",
"keys",
"(",
")",
")",
":",
"it",
"=",
"Cache",
".",
"_cache",
".",
"pop",
"(",
"k",
")",
"del",
"it",
"del",
"Cache"... | Remove all cached objects | [
"Remove",
"all",
"cached",
"objects"
] | 79002c4356e7020c2ba73ab0a3819c9abd4affec | https://github.com/ZELLMECHANIK-DRESDEN/dclab/blob/79002c4356e7020c2ba73ab0a3819c9abd4affec/dclab/cached.py#L94-L103 | train | 48,738 |
robmcmullen/atrcopy | atrcopy/diskimages.py | DiskImageBase.write_file | def write_file(self, filename, filetype, data):
"""Write data to a file on disk
This throws various exceptions on failures, for instance if there is
not enough space on disk or a free entry is not available in the
catalog.
"""
state = self.begin_transaction()
try:
directory = self.directory_class(self.header)
self.get_directory(directory)
dirent = directory.add_dirent(filename, filetype)
data = to_numpy(data)
sector_list = self.build_sectors(data)
vtoc = self.get_vtoc_object()
directory.save_dirent(self, dirent, vtoc, sector_list)
self.write_sector_list(sector_list)
self.write_sector_list(vtoc)
self.write_sector_list(directory)
except errors.AtrError:
self.rollback_transaction(state)
raise
finally:
self.get_metadata() | python | def write_file(self, filename, filetype, data):
"""Write data to a file on disk
This throws various exceptions on failures, for instance if there is
not enough space on disk or a free entry is not available in the
catalog.
"""
state = self.begin_transaction()
try:
directory = self.directory_class(self.header)
self.get_directory(directory)
dirent = directory.add_dirent(filename, filetype)
data = to_numpy(data)
sector_list = self.build_sectors(data)
vtoc = self.get_vtoc_object()
directory.save_dirent(self, dirent, vtoc, sector_list)
self.write_sector_list(sector_list)
self.write_sector_list(vtoc)
self.write_sector_list(directory)
except errors.AtrError:
self.rollback_transaction(state)
raise
finally:
self.get_metadata() | [
"def",
"write_file",
"(",
"self",
",",
"filename",
",",
"filetype",
",",
"data",
")",
":",
"state",
"=",
"self",
".",
"begin_transaction",
"(",
")",
"try",
":",
"directory",
"=",
"self",
".",
"directory_class",
"(",
"self",
".",
"header",
")",
"self",
... | Write data to a file on disk
This throws various exceptions on failures, for instance if there is
not enough space on disk or a free entry is not available in the
catalog. | [
"Write",
"data",
"to",
"a",
"file",
"on",
"disk"
] | dafba8e74c718e95cf81fd72c184fa193ecec730 | https://github.com/robmcmullen/atrcopy/blob/dafba8e74c718e95cf81fd72c184fa193ecec730/atrcopy/diskimages.py#L337-L360 | train | 48,739 |
ZELLMECHANIK-DRESDEN/dclab | dclab/rtdc_dataset/core.py | RTDCBase._apply_scale | def _apply_scale(self, a, scale, feat):
"""Helper function for transforming an aray to log-scale
Parameters
----------
a: np.ndarray
Input array
scale:
If set to "log", take the logarithm of `a`; if set to
"linear" return `a` unchanged.
Returns
-------
b: np.ndarray
The scaled array
Notes
-----
If the scale is not "linear", then a new array is returned.
All warnings are suppressed when computing `np.log(a)`, as
`a` may have negative or nan values.
"""
if scale == "linear":
b = a
elif scale == "log":
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
b = np.log(a)
if len(w):
# Tell the user that the log-transformation issued
# a warning.
warnings.warn("Invalid values encounterd in np.log "
"while scaling feature '{}'!".format(feat))
else:
raise ValueError("`scale` must be either 'linear' or 'log', "
+ "got '{}'!".format(scale))
return b | python | def _apply_scale(self, a, scale, feat):
"""Helper function for transforming an aray to log-scale
Parameters
----------
a: np.ndarray
Input array
scale:
If set to "log", take the logarithm of `a`; if set to
"linear" return `a` unchanged.
Returns
-------
b: np.ndarray
The scaled array
Notes
-----
If the scale is not "linear", then a new array is returned.
All warnings are suppressed when computing `np.log(a)`, as
`a` may have negative or nan values.
"""
if scale == "linear":
b = a
elif scale == "log":
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
b = np.log(a)
if len(w):
# Tell the user that the log-transformation issued
# a warning.
warnings.warn("Invalid values encounterd in np.log "
"while scaling feature '{}'!".format(feat))
else:
raise ValueError("`scale` must be either 'linear' or 'log', "
+ "got '{}'!".format(scale))
return b | [
"def",
"_apply_scale",
"(",
"self",
",",
"a",
",",
"scale",
",",
"feat",
")",
":",
"if",
"scale",
"==",
"\"linear\"",
":",
"b",
"=",
"a",
"elif",
"scale",
"==",
"\"log\"",
":",
"with",
"warnings",
".",
"catch_warnings",
"(",
"record",
"=",
"True",
")... | Helper function for transforming an aray to log-scale
Parameters
----------
a: np.ndarray
Input array
scale:
If set to "log", take the logarithm of `a`; if set to
"linear" return `a` unchanged.
Returns
-------
b: np.ndarray
The scaled array
Notes
-----
If the scale is not "linear", then a new array is returned.
All warnings are suppressed when computing `np.log(a)`, as
`a` may have negative or nan values. | [
"Helper",
"function",
"for",
"transforming",
"an",
"aray",
"to",
"log",
"-",
"scale"
] | 79002c4356e7020c2ba73ab0a3819c9abd4affec | https://github.com/ZELLMECHANIK-DRESDEN/dclab/blob/79002c4356e7020c2ba73ab0a3819c9abd4affec/dclab/rtdc_dataset/core.py#L148-L184 | train | 48,740 |
ZELLMECHANIK-DRESDEN/dclab | dclab/rtdc_dataset/core.py | RTDCBase.features | def features(self):
"""All available features"""
mycols = []
for col in dfn.feature_names:
if col in self:
mycols.append(col)
mycols.sort()
return mycols | python | def features(self):
"""All available features"""
mycols = []
for col in dfn.feature_names:
if col in self:
mycols.append(col)
mycols.sort()
return mycols | [
"def",
"features",
"(",
"self",
")",
":",
"mycols",
"=",
"[",
"]",
"for",
"col",
"in",
"dfn",
".",
"feature_names",
":",
"if",
"col",
"in",
"self",
":",
"mycols",
".",
"append",
"(",
"col",
")",
"mycols",
".",
"sort",
"(",
")",
"return",
"mycols"
] | All available features | [
"All",
"available",
"features"
] | 79002c4356e7020c2ba73ab0a3819c9abd4affec | https://github.com/ZELLMECHANIK-DRESDEN/dclab/blob/79002c4356e7020c2ba73ab0a3819c9abd4affec/dclab/rtdc_dataset/core.py#L206-L213 | train | 48,741 |
ZELLMECHANIK-DRESDEN/dclab | dclab/rtdc_dataset/core.py | RTDCBase.get_downsampled_scatter | def get_downsampled_scatter(self, xax="area_um", yax="deform",
downsample=0, xscale="linear",
yscale="linear"):
"""Downsampling by removing points at dense locations
Parameters
----------
xax: str
Identifier for x axis (e.g. "area_um", "aspect", "deform")
yax: str
Identifier for y axis
downsample: int
Number of points to draw in the down-sampled plot.
This number is either
- >=1: exactly downsample to this number by randomly adding
or removing points
- 0 : do not perform downsampling
xscale: str
If set to "log", take the logarithm of the x-values before
performing downsampling. This is useful when data are are
displayed on a log-scale. Defaults to "linear".
yscale: str
See `xscale`.
Returns
-------
xnew, xnew: filtered x and y
"""
if downsample < 0:
raise ValueError("`downsample` must be zero or positive!")
downsample = int(downsample)
xax = xax.lower()
yax = yax.lower()
# Get data
x = self[xax][self.filter.all]
y = self[yax][self.filter.all]
# Apply scale (no change for linear scale)
xs = self._apply_scale(x, xscale, xax)
ys = self._apply_scale(y, yscale, yax)
_, _, idx = downsampling.downsample_grid(xs, ys,
samples=downsample,
ret_idx=True)
self._plot_filter = idx
return x[idx], y[idx] | python | def get_downsampled_scatter(self, xax="area_um", yax="deform",
downsample=0, xscale="linear",
yscale="linear"):
"""Downsampling by removing points at dense locations
Parameters
----------
xax: str
Identifier for x axis (e.g. "area_um", "aspect", "deform")
yax: str
Identifier for y axis
downsample: int
Number of points to draw in the down-sampled plot.
This number is either
- >=1: exactly downsample to this number by randomly adding
or removing points
- 0 : do not perform downsampling
xscale: str
If set to "log", take the logarithm of the x-values before
performing downsampling. This is useful when data are are
displayed on a log-scale. Defaults to "linear".
yscale: str
See `xscale`.
Returns
-------
xnew, xnew: filtered x and y
"""
if downsample < 0:
raise ValueError("`downsample` must be zero or positive!")
downsample = int(downsample)
xax = xax.lower()
yax = yax.lower()
# Get data
x = self[xax][self.filter.all]
y = self[yax][self.filter.all]
# Apply scale (no change for linear scale)
xs = self._apply_scale(x, xscale, xax)
ys = self._apply_scale(y, yscale, yax)
_, _, idx = downsampling.downsample_grid(xs, ys,
samples=downsample,
ret_idx=True)
self._plot_filter = idx
return x[idx], y[idx] | [
"def",
"get_downsampled_scatter",
"(",
"self",
",",
"xax",
"=",
"\"area_um\"",
",",
"yax",
"=",
"\"deform\"",
",",
"downsample",
"=",
"0",
",",
"xscale",
"=",
"\"linear\"",
",",
"yscale",
"=",
"\"linear\"",
")",
":",
"if",
"downsample",
"<",
"0",
":",
"r... | Downsampling by removing points at dense locations
Parameters
----------
xax: str
Identifier for x axis (e.g. "area_um", "aspect", "deform")
yax: str
Identifier for y axis
downsample: int
Number of points to draw in the down-sampled plot.
This number is either
- >=1: exactly downsample to this number by randomly adding
or removing points
- 0 : do not perform downsampling
xscale: str
If set to "log", take the logarithm of the x-values before
performing downsampling. This is useful when data are are
displayed on a log-scale. Defaults to "linear".
yscale: str
See `xscale`.
Returns
-------
xnew, xnew: filtered x and y | [
"Downsampling",
"by",
"removing",
"points",
"at",
"dense",
"locations"
] | 79002c4356e7020c2ba73ab0a3819c9abd4affec | https://github.com/ZELLMECHANIK-DRESDEN/dclab/blob/79002c4356e7020c2ba73ab0a3819c9abd4affec/dclab/rtdc_dataset/core.py#L223-L271 | train | 48,742 |
ZELLMECHANIK-DRESDEN/dclab | dclab/rtdc_dataset/core.py | RTDCBase.get_kde_contour | def get_kde_contour(self, xax="area_um", yax="deform", xacc=None,
yacc=None, kde_type="histogram", kde_kwargs={},
xscale="linear", yscale="linear"):
"""Evaluate the kernel density estimate for contour plots
Parameters
----------
xax: str
Identifier for X axis (e.g. "area_um", "aspect", "deform")
yax: str
Identifier for Y axis
xacc: float
Contour accuracy in x direction
yacc: float
Contour accuracy in y direction
kde_type: str
The KDE method to use
kde_kwargs: dict
Additional keyword arguments to the KDE method
xscale: str
If set to "log", take the logarithm of the x-values before
computing the KDE. This is useful when data are are
displayed on a log-scale. Defaults to "linear".
yscale: str
See `xscale`.
Returns
-------
X, Y, Z : coordinates
The kernel density Z evaluated on a rectangular grid (X,Y).
"""
xax = xax.lower()
yax = yax.lower()
kde_type = kde_type.lower()
if kde_type not in kde_methods.methods:
raise ValueError("Not a valid kde type: {}!".format(kde_type))
# Get data
x = self[xax][self.filter.all]
y = self[yax][self.filter.all]
# Apply scale (no change for linear scale)
xs = self._apply_scale(x, xscale, xax)
ys = self._apply_scale(y, yscale, yax)
# accuracy (bin width) of KDE estimator
if xacc is None:
xacc = kde_methods.bin_width_doane(xs) / 5
if yacc is None:
yacc = kde_methods.bin_width_doane(ys) / 5
# Ignore infs and nans
bad = kde_methods.get_bad_vals(xs, ys)
xc = xs[~bad]
yc = ys[~bad]
xnum = int(np.ceil((xc.max() - xc.min()) / xacc))
ynum = int(np.ceil((yc.max() - yc.min()) / yacc))
xlin = np.linspace(xc.min(), xc.max(), xnum, endpoint=True)
ylin = np.linspace(yc.min(), yc.max(), ynum, endpoint=True)
xmesh, ymesh = np.meshgrid(xlin, ylin, indexing="ij")
kde_fct = kde_methods.methods[kde_type]
if len(x):
density = kde_fct(events_x=xs, events_y=ys,
xout=xmesh, yout=ymesh,
**kde_kwargs)
else:
density = []
# Convert mesh back to linear scale if applicable
if xscale == "log":
xmesh = np.exp(xmesh)
if yscale == "log":
ymesh = np.exp(ymesh)
return xmesh, ymesh, density | python | def get_kde_contour(self, xax="area_um", yax="deform", xacc=None,
yacc=None, kde_type="histogram", kde_kwargs={},
xscale="linear", yscale="linear"):
"""Evaluate the kernel density estimate for contour plots
Parameters
----------
xax: str
Identifier for X axis (e.g. "area_um", "aspect", "deform")
yax: str
Identifier for Y axis
xacc: float
Contour accuracy in x direction
yacc: float
Contour accuracy in y direction
kde_type: str
The KDE method to use
kde_kwargs: dict
Additional keyword arguments to the KDE method
xscale: str
If set to "log", take the logarithm of the x-values before
computing the KDE. This is useful when data are are
displayed on a log-scale. Defaults to "linear".
yscale: str
See `xscale`.
Returns
-------
X, Y, Z : coordinates
The kernel density Z evaluated on a rectangular grid (X,Y).
"""
xax = xax.lower()
yax = yax.lower()
kde_type = kde_type.lower()
if kde_type not in kde_methods.methods:
raise ValueError("Not a valid kde type: {}!".format(kde_type))
# Get data
x = self[xax][self.filter.all]
y = self[yax][self.filter.all]
# Apply scale (no change for linear scale)
xs = self._apply_scale(x, xscale, xax)
ys = self._apply_scale(y, yscale, yax)
# accuracy (bin width) of KDE estimator
if xacc is None:
xacc = kde_methods.bin_width_doane(xs) / 5
if yacc is None:
yacc = kde_methods.bin_width_doane(ys) / 5
# Ignore infs and nans
bad = kde_methods.get_bad_vals(xs, ys)
xc = xs[~bad]
yc = ys[~bad]
xnum = int(np.ceil((xc.max() - xc.min()) / xacc))
ynum = int(np.ceil((yc.max() - yc.min()) / yacc))
xlin = np.linspace(xc.min(), xc.max(), xnum, endpoint=True)
ylin = np.linspace(yc.min(), yc.max(), ynum, endpoint=True)
xmesh, ymesh = np.meshgrid(xlin, ylin, indexing="ij")
kde_fct = kde_methods.methods[kde_type]
if len(x):
density = kde_fct(events_x=xs, events_y=ys,
xout=xmesh, yout=ymesh,
**kde_kwargs)
else:
density = []
# Convert mesh back to linear scale if applicable
if xscale == "log":
xmesh = np.exp(xmesh)
if yscale == "log":
ymesh = np.exp(ymesh)
return xmesh, ymesh, density | [
"def",
"get_kde_contour",
"(",
"self",
",",
"xax",
"=",
"\"area_um\"",
",",
"yax",
"=",
"\"deform\"",
",",
"xacc",
"=",
"None",
",",
"yacc",
"=",
"None",
",",
"kde_type",
"=",
"\"histogram\"",
",",
"kde_kwargs",
"=",
"{",
"}",
",",
"xscale",
"=",
"\"li... | Evaluate the kernel density estimate for contour plots
Parameters
----------
xax: str
Identifier for X axis (e.g. "area_um", "aspect", "deform")
yax: str
Identifier for Y axis
xacc: float
Contour accuracy in x direction
yacc: float
Contour accuracy in y direction
kde_type: str
The KDE method to use
kde_kwargs: dict
Additional keyword arguments to the KDE method
xscale: str
If set to "log", take the logarithm of the x-values before
computing the KDE. This is useful when data are are
displayed on a log-scale. Defaults to "linear".
yscale: str
See `xscale`.
Returns
-------
X, Y, Z : coordinates
The kernel density Z evaluated on a rectangular grid (X,Y). | [
"Evaluate",
"the",
"kernel",
"density",
"estimate",
"for",
"contour",
"plots"
] | 79002c4356e7020c2ba73ab0a3819c9abd4affec | https://github.com/ZELLMECHANIK-DRESDEN/dclab/blob/79002c4356e7020c2ba73ab0a3819c9abd4affec/dclab/rtdc_dataset/core.py#L273-L351 | train | 48,743 |
ZELLMECHANIK-DRESDEN/dclab | dclab/rtdc_dataset/core.py | RTDCBase.get_kde_scatter | def get_kde_scatter(self, xax="area_um", yax="deform", positions=None,
kde_type="histogram", kde_kwargs={}, xscale="linear",
yscale="linear"):
"""Evaluate the kernel density estimate for scatter plots
Parameters
----------
xax: str
Identifier for X axis (e.g. "area_um", "aspect", "deform")
yax: str
Identifier for Y axis
positions: list of two 1d ndarrays or ndarray of shape (2, N)
The positions where the KDE will be computed. Note that
the KDE estimate is computed from the the points that
are set in `self.filter.all`.
kde_type: str
The KDE method to use
kde_kwargs: dict
Additional keyword arguments to the KDE method
xscale: str
If set to "log", take the logarithm of the x-values before
computing the KDE. This is useful when data are are
displayed on a log-scale. Defaults to "linear".
yscale: str
See `xscale`.
Returns
-------
density : 1d ndarray
The kernel density evaluated for the filtered data points.
"""
xax = xax.lower()
yax = yax.lower()
kde_type = kde_type.lower()
if kde_type not in kde_methods.methods:
raise ValueError("Not a valid kde type: {}!".format(kde_type))
# Get data
x = self[xax][self.filter.all]
y = self[yax][self.filter.all]
# Apply scale (no change for linear scale)
xs = self._apply_scale(x, xscale, xax)
ys = self._apply_scale(y, yscale, yax)
if positions is None:
posx = None
posy = None
else:
posx = self._apply_scale(positions[0], xscale, xax)
posy = self._apply_scale(positions[1], yscale, yax)
kde_fct = kde_methods.methods[kde_type]
if len(x):
density = kde_fct(events_x=xs, events_y=ys,
xout=posx, yout=posy,
**kde_kwargs)
else:
density = []
return density | python | def get_kde_scatter(self, xax="area_um", yax="deform", positions=None,
kde_type="histogram", kde_kwargs={}, xscale="linear",
yscale="linear"):
"""Evaluate the kernel density estimate for scatter plots
Parameters
----------
xax: str
Identifier for X axis (e.g. "area_um", "aspect", "deform")
yax: str
Identifier for Y axis
positions: list of two 1d ndarrays or ndarray of shape (2, N)
The positions where the KDE will be computed. Note that
the KDE estimate is computed from the the points that
are set in `self.filter.all`.
kde_type: str
The KDE method to use
kde_kwargs: dict
Additional keyword arguments to the KDE method
xscale: str
If set to "log", take the logarithm of the x-values before
computing the KDE. This is useful when data are are
displayed on a log-scale. Defaults to "linear".
yscale: str
See `xscale`.
Returns
-------
density : 1d ndarray
The kernel density evaluated for the filtered data points.
"""
xax = xax.lower()
yax = yax.lower()
kde_type = kde_type.lower()
if kde_type not in kde_methods.methods:
raise ValueError("Not a valid kde type: {}!".format(kde_type))
# Get data
x = self[xax][self.filter.all]
y = self[yax][self.filter.all]
# Apply scale (no change for linear scale)
xs = self._apply_scale(x, xscale, xax)
ys = self._apply_scale(y, yscale, yax)
if positions is None:
posx = None
posy = None
else:
posx = self._apply_scale(positions[0], xscale, xax)
posy = self._apply_scale(positions[1], yscale, yax)
kde_fct = kde_methods.methods[kde_type]
if len(x):
density = kde_fct(events_x=xs, events_y=ys,
xout=posx, yout=posy,
**kde_kwargs)
else:
density = []
return density | [
"def",
"get_kde_scatter",
"(",
"self",
",",
"xax",
"=",
"\"area_um\"",
",",
"yax",
"=",
"\"deform\"",
",",
"positions",
"=",
"None",
",",
"kde_type",
"=",
"\"histogram\"",
",",
"kde_kwargs",
"=",
"{",
"}",
",",
"xscale",
"=",
"\"linear\"",
",",
"yscale",
... | Evaluate the kernel density estimate for scatter plots
Parameters
----------
xax: str
Identifier for X axis (e.g. "area_um", "aspect", "deform")
yax: str
Identifier for Y axis
positions: list of two 1d ndarrays or ndarray of shape (2, N)
The positions where the KDE will be computed. Note that
the KDE estimate is computed from the the points that
are set in `self.filter.all`.
kde_type: str
The KDE method to use
kde_kwargs: dict
Additional keyword arguments to the KDE method
xscale: str
If set to "log", take the logarithm of the x-values before
computing the KDE. This is useful when data are are
displayed on a log-scale. Defaults to "linear".
yscale: str
See `xscale`.
Returns
-------
density : 1d ndarray
The kernel density evaluated for the filtered data points. | [
"Evaluate",
"the",
"kernel",
"density",
"estimate",
"for",
"scatter",
"plots"
] | 79002c4356e7020c2ba73ab0a3819c9abd4affec | https://github.com/ZELLMECHANIK-DRESDEN/dclab/blob/79002c4356e7020c2ba73ab0a3819c9abd4affec/dclab/rtdc_dataset/core.py#L353-L413 | train | 48,744 |
ZELLMECHANIK-DRESDEN/dclab | dclab/rtdc_dataset/core.py | RTDCBase.polygon_filter_add | def polygon_filter_add(self, filt):
"""Associate a Polygon Filter with this instance
Parameters
----------
filt: int or instance of `PolygonFilter`
The polygon filter to add
"""
if not isinstance(filt, (PolygonFilter, int, float)):
msg = "`filt` must be a number or instance of PolygonFilter!"
raise ValueError(msg)
if isinstance(filt, PolygonFilter):
uid = filt.unique_id
else:
uid = int(filt)
# append item
self.config["filtering"]["polygon filters"].append(uid) | python | def polygon_filter_add(self, filt):
"""Associate a Polygon Filter with this instance
Parameters
----------
filt: int or instance of `PolygonFilter`
The polygon filter to add
"""
if not isinstance(filt, (PolygonFilter, int, float)):
msg = "`filt` must be a number or instance of PolygonFilter!"
raise ValueError(msg)
if isinstance(filt, PolygonFilter):
uid = filt.unique_id
else:
uid = int(filt)
# append item
self.config["filtering"]["polygon filters"].append(uid) | [
"def",
"polygon_filter_add",
"(",
"self",
",",
"filt",
")",
":",
"if",
"not",
"isinstance",
"(",
"filt",
",",
"(",
"PolygonFilter",
",",
"int",
",",
"float",
")",
")",
":",
"msg",
"=",
"\"`filt` must be a number or instance of PolygonFilter!\"",
"raise",
"ValueE... | Associate a Polygon Filter with this instance
Parameters
----------
filt: int or instance of `PolygonFilter`
The polygon filter to add | [
"Associate",
"a",
"Polygon",
"Filter",
"with",
"this",
"instance"
] | 79002c4356e7020c2ba73ab0a3819c9abd4affec | https://github.com/ZELLMECHANIK-DRESDEN/dclab/blob/79002c4356e7020c2ba73ab0a3819c9abd4affec/dclab/rtdc_dataset/core.py#L415-L432 | train | 48,745 |
ZELLMECHANIK-DRESDEN/dclab | dclab/rtdc_dataset/core.py | RTDCBase.polygon_filter_rm | def polygon_filter_rm(self, filt):
"""Remove a polygon filter from this instance
Parameters
----------
filt: int or instance of `PolygonFilter`
The polygon filter to remove
"""
if not isinstance(filt, (PolygonFilter, int, float)):
msg = "`filt` must be a number or instance of PolygonFilter!"
raise ValueError(msg)
if isinstance(filt, PolygonFilter):
uid = filt.unique_id
else:
uid = int(filt)
# remove item
self.config["filtering"]["polygon filters"].remove(uid) | python | def polygon_filter_rm(self, filt):
"""Remove a polygon filter from this instance
Parameters
----------
filt: int or instance of `PolygonFilter`
The polygon filter to remove
"""
if not isinstance(filt, (PolygonFilter, int, float)):
msg = "`filt` must be a number or instance of PolygonFilter!"
raise ValueError(msg)
if isinstance(filt, PolygonFilter):
uid = filt.unique_id
else:
uid = int(filt)
# remove item
self.config["filtering"]["polygon filters"].remove(uid) | [
"def",
"polygon_filter_rm",
"(",
"self",
",",
"filt",
")",
":",
"if",
"not",
"isinstance",
"(",
"filt",
",",
"(",
"PolygonFilter",
",",
"int",
",",
"float",
")",
")",
":",
"msg",
"=",
"\"`filt` must be a number or instance of PolygonFilter!\"",
"raise",
"ValueEr... | Remove a polygon filter from this instance
Parameters
----------
filt: int or instance of `PolygonFilter`
The polygon filter to remove | [
"Remove",
"a",
"polygon",
"filter",
"from",
"this",
"instance"
] | 79002c4356e7020c2ba73ab0a3819c9abd4affec | https://github.com/ZELLMECHANIK-DRESDEN/dclab/blob/79002c4356e7020c2ba73ab0a3819c9abd4affec/dclab/rtdc_dataset/core.py#L434-L451 | train | 48,746 |
ZELLMECHANIK-DRESDEN/dclab | dclab/rtdc_dataset/load.py | new_dataset | def new_dataset(data, identifier=None):
"""Initialize a new RT-DC dataset
Parameters
----------
data:
can be one of the following:
- dict
- .tdms file
- .rtdc file
- subclass of `RTDCBase`
(will create a hierarchy child)
identifier: str
A unique identifier for this dataset. If set to `None`
an identifier is generated.
Returns
-------
dataset: subclass of :class:`dclab.rtdc_dataset.RTDCBase`
A new dataset instance
"""
if isinstance(data, dict):
return fmt_dict.RTDC_Dict(data, identifier=identifier)
elif isinstance(data, (str_types)) or isinstance(data, pathlib.Path):
return load_file(data, identifier=identifier)
elif isinstance(data, RTDCBase):
return fmt_hierarchy.RTDC_Hierarchy(data, identifier=identifier)
else:
msg = "data type not supported: {}".format(data.__class__)
raise NotImplementedError(msg) | python | def new_dataset(data, identifier=None):
"""Initialize a new RT-DC dataset
Parameters
----------
data:
can be one of the following:
- dict
- .tdms file
- .rtdc file
- subclass of `RTDCBase`
(will create a hierarchy child)
identifier: str
A unique identifier for this dataset. If set to `None`
an identifier is generated.
Returns
-------
dataset: subclass of :class:`dclab.rtdc_dataset.RTDCBase`
A new dataset instance
"""
if isinstance(data, dict):
return fmt_dict.RTDC_Dict(data, identifier=identifier)
elif isinstance(data, (str_types)) or isinstance(data, pathlib.Path):
return load_file(data, identifier=identifier)
elif isinstance(data, RTDCBase):
return fmt_hierarchy.RTDC_Hierarchy(data, identifier=identifier)
else:
msg = "data type not supported: {}".format(data.__class__)
raise NotImplementedError(msg) | [
"def",
"new_dataset",
"(",
"data",
",",
"identifier",
"=",
"None",
")",
":",
"if",
"isinstance",
"(",
"data",
",",
"dict",
")",
":",
"return",
"fmt_dict",
".",
"RTDC_Dict",
"(",
"data",
",",
"identifier",
"=",
"identifier",
")",
"elif",
"isinstance",
"("... | Initialize a new RT-DC dataset
Parameters
----------
data:
can be one of the following:
- dict
- .tdms file
- .rtdc file
- subclass of `RTDCBase`
(will create a hierarchy child)
identifier: str
A unique identifier for this dataset. If set to `None`
an identifier is generated.
Returns
-------
dataset: subclass of :class:`dclab.rtdc_dataset.RTDCBase`
A new dataset instance | [
"Initialize",
"a",
"new",
"RT",
"-",
"DC",
"dataset"
] | 79002c4356e7020c2ba73ab0a3819c9abd4affec | https://github.com/ZELLMECHANIK-DRESDEN/dclab/blob/79002c4356e7020c2ba73ab0a3819c9abd4affec/dclab/rtdc_dataset/load.py#L229-L259 | train | 48,747 |
openstax/cnx-archive | cnxarchive/scripts/hits_counter.py | parse_log | def parse_log(log, url_pattern):
"""Parse ``log`` buffer based on ``url_pattern``.
Given a buffer as ``log``, parse the log buffer into
a mapping of ident-hashes to a hit count,
the timestamp of the initial log,
and the last timestamp in the log.
"""
hits = {}
initial_timestamp = None
def clean_timestamp(v):
return ' '.join(v).strip('[]')
for line in log:
data = line.split()
if not initial_timestamp:
initial_timestamp = clean_timestamp(data[3:5])
match = url_pattern.match(data[6])
if match:
ident_hash = '@'.join(match.groups())
if ident_hash:
hits[ident_hash] = hits.get(ident_hash, 0) + 1
else:
end_timestamp = clean_timestamp(data[3:5])
return hits, initial_timestamp, end_timestamp | python | def parse_log(log, url_pattern):
"""Parse ``log`` buffer based on ``url_pattern``.
Given a buffer as ``log``, parse the log buffer into
a mapping of ident-hashes to a hit count,
the timestamp of the initial log,
and the last timestamp in the log.
"""
hits = {}
initial_timestamp = None
def clean_timestamp(v):
return ' '.join(v).strip('[]')
for line in log:
data = line.split()
if not initial_timestamp:
initial_timestamp = clean_timestamp(data[3:5])
match = url_pattern.match(data[6])
if match:
ident_hash = '@'.join(match.groups())
if ident_hash:
hits[ident_hash] = hits.get(ident_hash, 0) + 1
else:
end_timestamp = clean_timestamp(data[3:5])
return hits, initial_timestamp, end_timestamp | [
"def",
"parse_log",
"(",
"log",
",",
"url_pattern",
")",
":",
"hits",
"=",
"{",
"}",
"initial_timestamp",
"=",
"None",
"def",
"clean_timestamp",
"(",
"v",
")",
":",
"return",
"' '",
".",
"join",
"(",
"v",
")",
".",
"strip",
"(",
"'[]'",
")",
"for",
... | Parse ``log`` buffer based on ``url_pattern``.
Given a buffer as ``log``, parse the log buffer into
a mapping of ident-hashes to a hit count,
the timestamp of the initial log,
and the last timestamp in the log. | [
"Parse",
"log",
"buffer",
"based",
"on",
"url_pattern",
"."
] | d31d34aa8bbc8a9fde6cd4227a0df92726e8daf4 | https://github.com/openstax/cnx-archive/blob/d31d34aa8bbc8a9fde6cd4227a0df92726e8daf4/cnxarchive/scripts/hits_counter.py#L34-L58 | train | 48,748 |
ZELLMECHANIK-DRESDEN/dclab | dclab/parse_funcs.py | fintlist | def fintlist(alist):
"""A list of integers"""
outlist = []
if not isinstance(alist, (list, tuple)):
# we have a string (comma-separated integers)
alist = alist.strip().strip("[] ").split(",")
for it in alist:
if it:
outlist.append(fint(it))
return outlist | python | def fintlist(alist):
"""A list of integers"""
outlist = []
if not isinstance(alist, (list, tuple)):
# we have a string (comma-separated integers)
alist = alist.strip().strip("[] ").split(",")
for it in alist:
if it:
outlist.append(fint(it))
return outlist | [
"def",
"fintlist",
"(",
"alist",
")",
":",
"outlist",
"=",
"[",
"]",
"if",
"not",
"isinstance",
"(",
"alist",
",",
"(",
"list",
",",
"tuple",
")",
")",
":",
"# we have a string (comma-separated integers)",
"alist",
"=",
"alist",
".",
"strip",
"(",
")",
"... | A list of integers | [
"A",
"list",
"of",
"integers"
] | 79002c4356e7020c2ba73ab0a3819c9abd4affec | https://github.com/ZELLMECHANIK-DRESDEN/dclab/blob/79002c4356e7020c2ba73ab0a3819c9abd4affec/dclab/parse_funcs.py#L43-L52 | train | 48,749 |
ZELLMECHANIK-DRESDEN/dclab | dclab/rtdc_dataset/export.py | Export.avi | def avi(self, path, filtered=True, override=False):
"""Exports filtered event images to an avi file
Parameters
----------
path: str
Path to a .tsv file. The ending .tsv is added automatically.
filtered: bool
If set to `True`, only the filtered data (index in ds._filter)
are used.
override: bool
If set to `True`, an existing file ``path`` will be overridden.
If set to `False`, raises `OSError` if ``path`` exists.
Notes
-----
Raises OSError if current dataset does not contain image data
"""
path = pathlib.Path(path)
ds = self.rtdc_ds
# Make sure that path ends with .avi
if path.suffix != ".avi":
path = path.with_name(path.name + ".avi")
# Check if file already exist
if not override and path.exists():
raise OSError("File already exists: {}\n".format(
str(path).encode("ascii", "ignore")) +
"Please use the `override=True` option.")
# Start exporting
if "image" in ds:
# Open video for writing
vout = imageio.get_writer(uri=path,
format="FFMPEG",
fps=25,
codec="rawvideo",
pixelformat="yuv420p",
macro_block_size=None,
ffmpeg_log_level="error")
# write the filtered frames to avi file
for evid in np.arange(len(ds)):
# skip frames that were filtered out
if filtered and not ds._filter[evid]:
continue
try:
image = ds["image"][evid]
except BaseException:
warnings.warn("Could not read image {}!".format(evid),
NoImageWarning)
continue
else:
if np.isnan(image[0, 0]):
# This is a nan-valued image
image = np.zeros_like(image, dtype=np.uint8)
# Convert image to RGB
image = image.reshape(image.shape[0], image.shape[1], 1)
image = np.repeat(image, 3, axis=2)
vout.append_data(image)
else:
msg = "No image data to export: dataset {} !".format(ds.title)
raise OSError(msg) | python | def avi(self, path, filtered=True, override=False):
"""Exports filtered event images to an avi file
Parameters
----------
path: str
Path to a .tsv file. The ending .tsv is added automatically.
filtered: bool
If set to `True`, only the filtered data (index in ds._filter)
are used.
override: bool
If set to `True`, an existing file ``path`` will be overridden.
If set to `False`, raises `OSError` if ``path`` exists.
Notes
-----
Raises OSError if current dataset does not contain image data
"""
path = pathlib.Path(path)
ds = self.rtdc_ds
# Make sure that path ends with .avi
if path.suffix != ".avi":
path = path.with_name(path.name + ".avi")
# Check if file already exist
if not override and path.exists():
raise OSError("File already exists: {}\n".format(
str(path).encode("ascii", "ignore")) +
"Please use the `override=True` option.")
# Start exporting
if "image" in ds:
# Open video for writing
vout = imageio.get_writer(uri=path,
format="FFMPEG",
fps=25,
codec="rawvideo",
pixelformat="yuv420p",
macro_block_size=None,
ffmpeg_log_level="error")
# write the filtered frames to avi file
for evid in np.arange(len(ds)):
# skip frames that were filtered out
if filtered and not ds._filter[evid]:
continue
try:
image = ds["image"][evid]
except BaseException:
warnings.warn("Could not read image {}!".format(evid),
NoImageWarning)
continue
else:
if np.isnan(image[0, 0]):
# This is a nan-valued image
image = np.zeros_like(image, dtype=np.uint8)
# Convert image to RGB
image = image.reshape(image.shape[0], image.shape[1], 1)
image = np.repeat(image, 3, axis=2)
vout.append_data(image)
else:
msg = "No image data to export: dataset {} !".format(ds.title)
raise OSError(msg) | [
"def",
"avi",
"(",
"self",
",",
"path",
",",
"filtered",
"=",
"True",
",",
"override",
"=",
"False",
")",
":",
"path",
"=",
"pathlib",
".",
"Path",
"(",
"path",
")",
"ds",
"=",
"self",
".",
"rtdc_ds",
"# Make sure that path ends with .avi",
"if",
"path",... | Exports filtered event images to an avi file
Parameters
----------
path: str
Path to a .tsv file. The ending .tsv is added automatically.
filtered: bool
If set to `True`, only the filtered data (index in ds._filter)
are used.
override: bool
If set to `True`, an existing file ``path`` will be overridden.
If set to `False`, raises `OSError` if ``path`` exists.
Notes
-----
Raises OSError if current dataset does not contain image data | [
"Exports",
"filtered",
"event",
"images",
"to",
"an",
"avi",
"file"
] | 79002c4356e7020c2ba73ab0a3819c9abd4affec | https://github.com/ZELLMECHANIK-DRESDEN/dclab/blob/79002c4356e7020c2ba73ab0a3819c9abd4affec/dclab/rtdc_dataset/export.py#L26-L85 | train | 48,750 |
ZELLMECHANIK-DRESDEN/dclab | dclab/rtdc_dataset/export.py | Export.fcs | def fcs(self, path, features, filtered=True, override=False):
"""Export the data of an RT-DC dataset to an .fcs file
Parameters
----------
mm: instance of dclab.RTDCBase
The dataset that will be exported.
path: str
Path to a .tsv file. The ending .tsv is added automatically.
features: list of str
The features in the resulting .tsv file. These are strings
that are defined in `dclab.definitions.scalar_feature_names`,
e.g. "area_cvx", "deform", "frame", "fl1_max", "aspect".
filtered: bool
If set to `True`, only the filtered data (index in ds._filter)
are used.
override: bool
If set to `True`, an existing file ``path`` will be overridden.
If set to `False`, raises `OSError` if ``path`` exists.
Notes
-----
Due to incompatibility with the .fcs file format, all events with
NaN-valued features are not exported.
"""
features = [c.lower() for c in features]
ds = self.rtdc_ds
path = pathlib.Path(path)
# Make sure that path ends with .fcs
if path.suffix != ".fcs":
path = path.with_name(path.name + ".fcs")
# Check if file already exist
if not override and path.exists():
raise OSError("File already exists: {}\n".format(
str(path).encode("ascii", "ignore")) +
"Please use the `override=True` option.")
# Check that features are in dfn.scalar_feature_names
for c in features:
if c not in dfn.scalar_feature_names:
msg = "Unknown or unsupported feature name: {}".format(c)
raise ValueError(msg)
# Collect the header
chn_names = [dfn.feature_name2label[c] for c in features]
# Collect the data
if filtered:
data = [ds[c][ds._filter] for c in features]
else:
data = [ds[c] for c in features]
data = np.array(data).transpose()
fcswrite.write_fcs(filename=str(path),
chn_names=chn_names,
data=data) | python | def fcs(self, path, features, filtered=True, override=False):
"""Export the data of an RT-DC dataset to an .fcs file
Parameters
----------
mm: instance of dclab.RTDCBase
The dataset that will be exported.
path: str
Path to a .tsv file. The ending .tsv is added automatically.
features: list of str
The features in the resulting .tsv file. These are strings
that are defined in `dclab.definitions.scalar_feature_names`,
e.g. "area_cvx", "deform", "frame", "fl1_max", "aspect".
filtered: bool
If set to `True`, only the filtered data (index in ds._filter)
are used.
override: bool
If set to `True`, an existing file ``path`` will be overridden.
If set to `False`, raises `OSError` if ``path`` exists.
Notes
-----
Due to incompatibility with the .fcs file format, all events with
NaN-valued features are not exported.
"""
features = [c.lower() for c in features]
ds = self.rtdc_ds
path = pathlib.Path(path)
# Make sure that path ends with .fcs
if path.suffix != ".fcs":
path = path.with_name(path.name + ".fcs")
# Check if file already exist
if not override and path.exists():
raise OSError("File already exists: {}\n".format(
str(path).encode("ascii", "ignore")) +
"Please use the `override=True` option.")
# Check that features are in dfn.scalar_feature_names
for c in features:
if c not in dfn.scalar_feature_names:
msg = "Unknown or unsupported feature name: {}".format(c)
raise ValueError(msg)
# Collect the header
chn_names = [dfn.feature_name2label[c] for c in features]
# Collect the data
if filtered:
data = [ds[c][ds._filter] for c in features]
else:
data = [ds[c] for c in features]
data = np.array(data).transpose()
fcswrite.write_fcs(filename=str(path),
chn_names=chn_names,
data=data) | [
"def",
"fcs",
"(",
"self",
",",
"path",
",",
"features",
",",
"filtered",
"=",
"True",
",",
"override",
"=",
"False",
")",
":",
"features",
"=",
"[",
"c",
".",
"lower",
"(",
")",
"for",
"c",
"in",
"features",
"]",
"ds",
"=",
"self",
".",
"rtdc_ds... | Export the data of an RT-DC dataset to an .fcs file
Parameters
----------
mm: instance of dclab.RTDCBase
The dataset that will be exported.
path: str
Path to a .tsv file. The ending .tsv is added automatically.
features: list of str
The features in the resulting .tsv file. These are strings
that are defined in `dclab.definitions.scalar_feature_names`,
e.g. "area_cvx", "deform", "frame", "fl1_max", "aspect".
filtered: bool
If set to `True`, only the filtered data (index in ds._filter)
are used.
override: bool
If set to `True`, an existing file ``path`` will be overridden.
If set to `False`, raises `OSError` if ``path`` exists.
Notes
-----
Due to incompatibility with the .fcs file format, all events with
NaN-valued features are not exported. | [
"Export",
"the",
"data",
"of",
"an",
"RT",
"-",
"DC",
"dataset",
"to",
"an",
".",
"fcs",
"file"
] | 79002c4356e7020c2ba73ab0a3819c9abd4affec | https://github.com/ZELLMECHANIK-DRESDEN/dclab/blob/79002c4356e7020c2ba73ab0a3819c9abd4affec/dclab/rtdc_dataset/export.py#L87-L142 | train | 48,751 |
ZELLMECHANIK-DRESDEN/dclab | dclab/rtdc_dataset/export.py | Export.tsv | def tsv(self, path, features, filtered=True, override=False):
"""Export the data of the current instance to a .tsv file
Parameters
----------
path: str
Path to a .tsv file. The ending .tsv is added automatically.
features: list of str
The features in the resulting .tsv file. These are strings
that are defined in `dclab.definitions.scalar_feature_names`,
e.g. "area_cvx", "deform", "frame", "fl1_max", "aspect".
filtered: bool
If set to `True`, only the filtered data (index in ds._filter)
are used.
override: bool
If set to `True`, an existing file ``path`` will be overridden.
If set to `False`, raises `OSError` if ``path`` exists.
"""
features = [c.lower() for c in features]
path = pathlib.Path(path)
ds = self.rtdc_ds
# Make sure that path ends with .tsv
if path.suffix != ".tsv":
path = path.with_name(path.name + ".tsv")
# Check if file already exist
if not override and path.exists():
raise OSError("File already exists: {}\n".format(
str(path).encode("ascii", "ignore")) +
"Please use the `override=True` option.")
# Check that features are in dfn.scalar_feature_names
for c in features:
if c not in dfn.scalar_feature_names:
raise ValueError("Unknown feature name {}".format(c))
# Open file
with path.open("w") as fd:
# write header
header1 = "\t".join([c for c in features])
fd.write("# "+header1+"\n")
header2 = "\t".join([dfn.feature_name2label[c] for c in features])
fd.write("# "+header2+"\n")
with path.open("ab") as fd:
# write data
if filtered:
data = [ds[c][ds._filter] for c in features]
else:
data = [ds[c] for c in features]
np.savetxt(fd,
np.array(data).transpose(),
fmt=str("%.10e"),
delimiter="\t") | python | def tsv(self, path, features, filtered=True, override=False):
"""Export the data of the current instance to a .tsv file
Parameters
----------
path: str
Path to a .tsv file. The ending .tsv is added automatically.
features: list of str
The features in the resulting .tsv file. These are strings
that are defined in `dclab.definitions.scalar_feature_names`,
e.g. "area_cvx", "deform", "frame", "fl1_max", "aspect".
filtered: bool
If set to `True`, only the filtered data (index in ds._filter)
are used.
override: bool
If set to `True`, an existing file ``path`` will be overridden.
If set to `False`, raises `OSError` if ``path`` exists.
"""
features = [c.lower() for c in features]
path = pathlib.Path(path)
ds = self.rtdc_ds
# Make sure that path ends with .tsv
if path.suffix != ".tsv":
path = path.with_name(path.name + ".tsv")
# Check if file already exist
if not override and path.exists():
raise OSError("File already exists: {}\n".format(
str(path).encode("ascii", "ignore")) +
"Please use the `override=True` option.")
# Check that features are in dfn.scalar_feature_names
for c in features:
if c not in dfn.scalar_feature_names:
raise ValueError("Unknown feature name {}".format(c))
# Open file
with path.open("w") as fd:
# write header
header1 = "\t".join([c for c in features])
fd.write("# "+header1+"\n")
header2 = "\t".join([dfn.feature_name2label[c] for c in features])
fd.write("# "+header2+"\n")
with path.open("ab") as fd:
# write data
if filtered:
data = [ds[c][ds._filter] for c in features]
else:
data = [ds[c] for c in features]
np.savetxt(fd,
np.array(data).transpose(),
fmt=str("%.10e"),
delimiter="\t") | [
"def",
"tsv",
"(",
"self",
",",
"path",
",",
"features",
",",
"filtered",
"=",
"True",
",",
"override",
"=",
"False",
")",
":",
"features",
"=",
"[",
"c",
".",
"lower",
"(",
")",
"for",
"c",
"in",
"features",
"]",
"path",
"=",
"pathlib",
".",
"Pa... | Export the data of the current instance to a .tsv file
Parameters
----------
path: str
Path to a .tsv file. The ending .tsv is added automatically.
features: list of str
The features in the resulting .tsv file. These are strings
that are defined in `dclab.definitions.scalar_feature_names`,
e.g. "area_cvx", "deform", "frame", "fl1_max", "aspect".
filtered: bool
If set to `True`, only the filtered data (index in ds._filter)
are used.
override: bool
If set to `True`, an existing file ``path`` will be overridden.
If set to `False`, raises `OSError` if ``path`` exists. | [
"Export",
"the",
"data",
"of",
"the",
"current",
"instance",
"to",
"a",
".",
"tsv",
"file"
] | 79002c4356e7020c2ba73ab0a3819c9abd4affec | https://github.com/ZELLMECHANIK-DRESDEN/dclab/blob/79002c4356e7020c2ba73ab0a3819c9abd4affec/dclab/rtdc_dataset/export.py#L265-L317 | train | 48,752 |
ZELLMECHANIK-DRESDEN/dclab | dclab/rtdc_dataset/util.py | hashfile | def hashfile(fname, blocksize=65536, count=0):
"""Compute md5 hex-hash of a file
Parameters
----------
fname: str
path to the file
blocksize: int
block size in bytes read from the file
(set to `0` to hash the entire file)
count: int
number of blocks read from the file
"""
hasher = hashlib.md5()
fname = pathlib.Path(fname)
with fname.open('rb') as fd:
buf = fd.read(blocksize)
ii = 0
while len(buf) > 0:
hasher.update(buf)
buf = fd.read(blocksize)
ii += 1
if count and ii == count:
break
return hasher.hexdigest() | python | def hashfile(fname, blocksize=65536, count=0):
"""Compute md5 hex-hash of a file
Parameters
----------
fname: str
path to the file
blocksize: int
block size in bytes read from the file
(set to `0` to hash the entire file)
count: int
number of blocks read from the file
"""
hasher = hashlib.md5()
fname = pathlib.Path(fname)
with fname.open('rb') as fd:
buf = fd.read(blocksize)
ii = 0
while len(buf) > 0:
hasher.update(buf)
buf = fd.read(blocksize)
ii += 1
if count and ii == count:
break
return hasher.hexdigest() | [
"def",
"hashfile",
"(",
"fname",
",",
"blocksize",
"=",
"65536",
",",
"count",
"=",
"0",
")",
":",
"hasher",
"=",
"hashlib",
".",
"md5",
"(",
")",
"fname",
"=",
"pathlib",
".",
"Path",
"(",
"fname",
")",
"with",
"fname",
".",
"open",
"(",
"'rb'",
... | Compute md5 hex-hash of a file
Parameters
----------
fname: str
path to the file
blocksize: int
block size in bytes read from the file
(set to `0` to hash the entire file)
count: int
number of blocks read from the file | [
"Compute",
"md5",
"hex",
"-",
"hash",
"of",
"a",
"file"
] | 79002c4356e7020c2ba73ab0a3819c9abd4affec | https://github.com/ZELLMECHANIK-DRESDEN/dclab/blob/79002c4356e7020c2ba73ab0a3819c9abd4affec/dclab/rtdc_dataset/util.py#L15-L39 | train | 48,753 |
ZELLMECHANIK-DRESDEN/dclab | dclab/rtdc_dataset/util.py | obj2str | def obj2str(obj):
"""String representation of an object for hashing"""
if isinstance(obj, str_types):
return obj.encode("utf-8")
elif isinstance(obj, pathlib.Path):
return obj2str(str(obj))
elif isinstance(obj, (bool, int, float)):
return str(obj).encode("utf-8")
elif obj is None:
return b"none"
elif isinstance(obj, np.ndarray):
return obj.tostring()
elif isinstance(obj, tuple):
return obj2str(list(obj))
elif isinstance(obj, list):
return b"".join(obj2str(o) for o in obj)
elif isinstance(obj, dict):
return obj2str(list(obj.items()))
elif hasattr(obj, "identifier"):
return obj2str(obj.identifier)
elif isinstance(obj, h5py.Dataset):
return obj2str(obj[0])
else:
raise ValueError("No rule to convert object '{}' to string.".
format(obj.__class__)) | python | def obj2str(obj):
"""String representation of an object for hashing"""
if isinstance(obj, str_types):
return obj.encode("utf-8")
elif isinstance(obj, pathlib.Path):
return obj2str(str(obj))
elif isinstance(obj, (bool, int, float)):
return str(obj).encode("utf-8")
elif obj is None:
return b"none"
elif isinstance(obj, np.ndarray):
return obj.tostring()
elif isinstance(obj, tuple):
return obj2str(list(obj))
elif isinstance(obj, list):
return b"".join(obj2str(o) for o in obj)
elif isinstance(obj, dict):
return obj2str(list(obj.items()))
elif hasattr(obj, "identifier"):
return obj2str(obj.identifier)
elif isinstance(obj, h5py.Dataset):
return obj2str(obj[0])
else:
raise ValueError("No rule to convert object '{}' to string.".
format(obj.__class__)) | [
"def",
"obj2str",
"(",
"obj",
")",
":",
"if",
"isinstance",
"(",
"obj",
",",
"str_types",
")",
":",
"return",
"obj",
".",
"encode",
"(",
"\"utf-8\"",
")",
"elif",
"isinstance",
"(",
"obj",
",",
"pathlib",
".",
"Path",
")",
":",
"return",
"obj2str",
"... | String representation of an object for hashing | [
"String",
"representation",
"of",
"an",
"object",
"for",
"hashing"
] | 79002c4356e7020c2ba73ab0a3819c9abd4affec | https://github.com/ZELLMECHANIK-DRESDEN/dclab/blob/79002c4356e7020c2ba73ab0a3819c9abd4affec/dclab/rtdc_dataset/util.py#L47-L71 | train | 48,754 |
xenon-middleware/pyxenon | xenon/create_keys.py | create_self_signed_cert | def create_self_signed_cert():
"""Creates a self-signed certificate key pair."""
config_dir = Path(BaseDirectory.xdg_config_home) / 'xenon-grpc'
config_dir.mkdir(parents=True, exist_ok=True)
key_prefix = gethostname()
crt_file = config_dir / ('%s.crt' % key_prefix)
key_file = config_dir / ('%s.key' % key_prefix)
if crt_file.exists() and key_file.exists():
return crt_file, key_file
logger = logging.getLogger('xenon')
logger.info("Creating authentication keys for xenon-grpc.")
# create a key pair
k = crypto.PKey()
k.generate_key(crypto.TYPE_RSA, 1024)
# create a self-signed cert
cert = crypto.X509()
cert.get_subject().CN = gethostname()
cert.set_serial_number(1000)
cert.gmtime_adj_notBefore(0)
# valid for almost ten years!
cert.gmtime_adj_notAfter(10 * 365 * 24 * 3600)
cert.set_issuer(cert.get_subject())
cert.set_pubkey(k)
cert.sign(k, 'sha256')
open(str(crt_file), "wb").write(
crypto.dump_certificate(crypto.FILETYPE_PEM, cert))
open(str(key_file), "wb").write(
crypto.dump_privatekey(crypto.FILETYPE_PEM, k))
return crt_file, key_file | python | def create_self_signed_cert():
"""Creates a self-signed certificate key pair."""
config_dir = Path(BaseDirectory.xdg_config_home) / 'xenon-grpc'
config_dir.mkdir(parents=True, exist_ok=True)
key_prefix = gethostname()
crt_file = config_dir / ('%s.crt' % key_prefix)
key_file = config_dir / ('%s.key' % key_prefix)
if crt_file.exists() and key_file.exists():
return crt_file, key_file
logger = logging.getLogger('xenon')
logger.info("Creating authentication keys for xenon-grpc.")
# create a key pair
k = crypto.PKey()
k.generate_key(crypto.TYPE_RSA, 1024)
# create a self-signed cert
cert = crypto.X509()
cert.get_subject().CN = gethostname()
cert.set_serial_number(1000)
cert.gmtime_adj_notBefore(0)
# valid for almost ten years!
cert.gmtime_adj_notAfter(10 * 365 * 24 * 3600)
cert.set_issuer(cert.get_subject())
cert.set_pubkey(k)
cert.sign(k, 'sha256')
open(str(crt_file), "wb").write(
crypto.dump_certificate(crypto.FILETYPE_PEM, cert))
open(str(key_file), "wb").write(
crypto.dump_privatekey(crypto.FILETYPE_PEM, k))
return crt_file, key_file | [
"def",
"create_self_signed_cert",
"(",
")",
":",
"config_dir",
"=",
"Path",
"(",
"BaseDirectory",
".",
"xdg_config_home",
")",
"/",
"'xenon-grpc'",
"config_dir",
".",
"mkdir",
"(",
"parents",
"=",
"True",
",",
"exist_ok",
"=",
"True",
")",
"key_prefix",
"=",
... | Creates a self-signed certificate key pair. | [
"Creates",
"a",
"self",
"-",
"signed",
"certificate",
"key",
"pair",
"."
] | d61109ad339ee9bb9f0723471d532727b0f235ad | https://github.com/xenon-middleware/pyxenon/blob/d61109ad339ee9bb9f0723471d532727b0f235ad/xenon/create_keys.py#L14-L49 | train | 48,755 |
ZELLMECHANIK-DRESDEN/dclab | dclab/rtdc_dataset/fmt_tdms/__init__.py | get_project_name_from_path | def get_project_name_from_path(path, append_mx=False):
"""Get the project name from a path.
For a path "/home/peter/hans/HLC12398/online/M1_13.tdms" or
For a path "/home/peter/hans/HLC12398/online/data/M1_13.tdms" or
without the ".tdms" file, this will return always "HLC12398".
Parameters
----------
path: str
path to tdms file
append_mx: bool
append measurement number, e.g. "M1"
"""
path = pathlib.Path(path)
if path.suffix == ".tdms":
dirn = path.parent
mx = path.name.split("_")[0]
elif path.is_dir():
dirn = path
mx = ""
else:
dirn = path.parent
mx = ""
project = ""
if mx:
# check para.ini
para = dirn / (mx + "_para.ini")
if para.exists():
with para.open() as fd:
lines = fd.readlines()
for line in lines:
if line.startswith("Sample Name ="):
project = line.split("=")[1].strip()
break
if not project:
# check if the directory contains data or is online
root1, trail1 = dirn.parent, dirn.name
root2, trail2 = root1.parent, root1.name
trail3 = root2.name
if trail1.lower() in ["online", "offline"]:
# /home/peter/hans/HLC12398/online/
project = trail2
elif (trail1.lower() == "data" and
trail2.lower() in ["online", "offline"]):
# this is olis new folder sctructure
# /home/peter/hans/HLC12398/online/data/
project = trail3
else:
project = trail1
if append_mx:
project += " - " + mx
return project | python | def get_project_name_from_path(path, append_mx=False):
"""Get the project name from a path.
For a path "/home/peter/hans/HLC12398/online/M1_13.tdms" or
For a path "/home/peter/hans/HLC12398/online/data/M1_13.tdms" or
without the ".tdms" file, this will return always "HLC12398".
Parameters
----------
path: str
path to tdms file
append_mx: bool
append measurement number, e.g. "M1"
"""
path = pathlib.Path(path)
if path.suffix == ".tdms":
dirn = path.parent
mx = path.name.split("_")[0]
elif path.is_dir():
dirn = path
mx = ""
else:
dirn = path.parent
mx = ""
project = ""
if mx:
# check para.ini
para = dirn / (mx + "_para.ini")
if para.exists():
with para.open() as fd:
lines = fd.readlines()
for line in lines:
if line.startswith("Sample Name ="):
project = line.split("=")[1].strip()
break
if not project:
# check if the directory contains data or is online
root1, trail1 = dirn.parent, dirn.name
root2, trail2 = root1.parent, root1.name
trail3 = root2.name
if trail1.lower() in ["online", "offline"]:
# /home/peter/hans/HLC12398/online/
project = trail2
elif (trail1.lower() == "data" and
trail2.lower() in ["online", "offline"]):
# this is olis new folder sctructure
# /home/peter/hans/HLC12398/online/data/
project = trail3
else:
project = trail1
if append_mx:
project += " - " + mx
return project | [
"def",
"get_project_name_from_path",
"(",
"path",
",",
"append_mx",
"=",
"False",
")",
":",
"path",
"=",
"pathlib",
".",
"Path",
"(",
"path",
")",
"if",
"path",
".",
"suffix",
"==",
"\".tdms\"",
":",
"dirn",
"=",
"path",
".",
"parent",
"mx",
"=",
"path... | Get the project name from a path.
For a path "/home/peter/hans/HLC12398/online/M1_13.tdms" or
For a path "/home/peter/hans/HLC12398/online/data/M1_13.tdms" or
without the ".tdms" file, this will return always "HLC12398".
Parameters
----------
path: str
path to tdms file
append_mx: bool
append measurement number, e.g. "M1" | [
"Get",
"the",
"project",
"name",
"from",
"a",
"path",
"."
] | 79002c4356e7020c2ba73ab0a3819c9abd4affec | https://github.com/ZELLMECHANIK-DRESDEN/dclab/blob/79002c4356e7020c2ba73ab0a3819c9abd4affec/dclab/rtdc_dataset/fmt_tdms/__init__.py#L183-L240 | train | 48,756 |
ZELLMECHANIK-DRESDEN/dclab | dclab/rtdc_dataset/fmt_tdms/__init__.py | get_tdms_files | def get_tdms_files(directory):
"""Recursively find projects based on '.tdms' file endings
Searches the `directory` recursively and return a sorted list
of all found '.tdms' project files, except fluorescence
data trace files which end with `_traces.tdms`.
"""
path = pathlib.Path(directory).resolve()
# get all tdms files
tdmslist = [r for r in path.rglob("*.tdms") if r.is_file()]
# exclude traces files
tdmslist = [r for r in tdmslist if not r.name.endswith("_traces.tdms")]
return sorted(tdmslist) | python | def get_tdms_files(directory):
"""Recursively find projects based on '.tdms' file endings
Searches the `directory` recursively and return a sorted list
of all found '.tdms' project files, except fluorescence
data trace files which end with `_traces.tdms`.
"""
path = pathlib.Path(directory).resolve()
# get all tdms files
tdmslist = [r for r in path.rglob("*.tdms") if r.is_file()]
# exclude traces files
tdmslist = [r for r in tdmslist if not r.name.endswith("_traces.tdms")]
return sorted(tdmslist) | [
"def",
"get_tdms_files",
"(",
"directory",
")",
":",
"path",
"=",
"pathlib",
".",
"Path",
"(",
"directory",
")",
".",
"resolve",
"(",
")",
"# get all tdms files",
"tdmslist",
"=",
"[",
"r",
"for",
"r",
"in",
"path",
".",
"rglob",
"(",
"\"*.tdms\"",
")",
... | Recursively find projects based on '.tdms' file endings
Searches the `directory` recursively and return a sorted list
of all found '.tdms' project files, except fluorescence
data trace files which end with `_traces.tdms`. | [
"Recursively",
"find",
"projects",
"based",
"on",
".",
"tdms",
"file",
"endings"
] | 79002c4356e7020c2ba73ab0a3819c9abd4affec | https://github.com/ZELLMECHANIK-DRESDEN/dclab/blob/79002c4356e7020c2ba73ab0a3819c9abd4affec/dclab/rtdc_dataset/fmt_tdms/__init__.py#L243-L255 | train | 48,757 |
ZELLMECHANIK-DRESDEN/dclab | dclab/rtdc_dataset/fmt_tdms/__init__.py | RTDC_TDMS._init_data_with_tdms | def _init_data_with_tdms(self, tdms_filename):
"""Initializes the current RT-DC dataset with a tdms file.
"""
tdms_file = TdmsFile(str(tdms_filename))
# time is always there
table = "Cell Track"
# Edit naming.dclab2tdms to add features
for arg in naming.tdms2dclab:
try:
data = tdms_file.object(table, arg).data
except KeyError:
pass
else:
if data is None or len(data) == 0:
# Ignore empty features. npTDMS treats empty
# features in the following way:
# - in nptdms 0.8.2, `data` is `None`
# - in nptdms 0.9.0, `data` is an array of length 0
continue
self._events[naming.tdms2dclab[arg]] = data
# Set up configuration
tdms_config = Configuration(
files=[self.path.with_name(self._mid + "_para.ini"),
self.path.with_name(self._mid + "_camera.ini")],
)
dclab_config = Configuration()
for section in naming.configmap:
for pname in naming.configmap[section]:
meta = naming.configmap[section][pname]
typ = dfn.config_funcs[section][pname]
if isinstance(meta, tuple):
osec, opar = meta
if osec in tdms_config and opar in tdms_config[osec]:
val = tdms_config[osec].pop(opar)
dclab_config[section][pname] = typ(val)
else:
dclab_config[section][pname] = typ(meta)
self.config = dclab_config
self._complete_config_tdms(tdms_config)
self._init_filters() | python | def _init_data_with_tdms(self, tdms_filename):
"""Initializes the current RT-DC dataset with a tdms file.
"""
tdms_file = TdmsFile(str(tdms_filename))
# time is always there
table = "Cell Track"
# Edit naming.dclab2tdms to add features
for arg in naming.tdms2dclab:
try:
data = tdms_file.object(table, arg).data
except KeyError:
pass
else:
if data is None or len(data) == 0:
# Ignore empty features. npTDMS treats empty
# features in the following way:
# - in nptdms 0.8.2, `data` is `None`
# - in nptdms 0.9.0, `data` is an array of length 0
continue
self._events[naming.tdms2dclab[arg]] = data
# Set up configuration
tdms_config = Configuration(
files=[self.path.with_name(self._mid + "_para.ini"),
self.path.with_name(self._mid + "_camera.ini")],
)
dclab_config = Configuration()
for section in naming.configmap:
for pname in naming.configmap[section]:
meta = naming.configmap[section][pname]
typ = dfn.config_funcs[section][pname]
if isinstance(meta, tuple):
osec, opar = meta
if osec in tdms_config and opar in tdms_config[osec]:
val = tdms_config[osec].pop(opar)
dclab_config[section][pname] = typ(val)
else:
dclab_config[section][pname] = typ(meta)
self.config = dclab_config
self._complete_config_tdms(tdms_config)
self._init_filters() | [
"def",
"_init_data_with_tdms",
"(",
"self",
",",
"tdms_filename",
")",
":",
"tdms_file",
"=",
"TdmsFile",
"(",
"str",
"(",
"tdms_filename",
")",
")",
"# time is always there",
"table",
"=",
"\"Cell Track\"",
"# Edit naming.dclab2tdms to add features",
"for",
"arg",
"i... | Initializes the current RT-DC dataset with a tdms file. | [
"Initializes",
"the",
"current",
"RT",
"-",
"DC",
"dataset",
"with",
"a",
"tdms",
"file",
"."
] | 79002c4356e7020c2ba73ab0a3819c9abd4affec | https://github.com/ZELLMECHANIK-DRESDEN/dclab/blob/79002c4356e7020c2ba73ab0a3819c9abd4affec/dclab/rtdc_dataset/fmt_tdms/__init__.py#L69-L111 | train | 48,758 |
ZELLMECHANIK-DRESDEN/dclab | dclab/rtdc_dataset/fmt_tdms/__init__.py | RTDC_TDMS.hash | def hash(self):
"""Hash value based on file name and .ini file content"""
if self._hash is None:
# Only hash _camera.ini and _para.ini
fsh = [self.path.with_name(self._mid + "_camera.ini"),
self.path.with_name(self._mid + "_para.ini")]
tohash = [hashfile(f) for f in fsh]
tohash.append(self.path.name)
# Hash a maximum of ~1MB of the tdms file
tohash.append(hashfile(self.path, blocksize=65536, count=20))
self._hash = hashobj(tohash)
return self._hash | python | def hash(self):
"""Hash value based on file name and .ini file content"""
if self._hash is None:
# Only hash _camera.ini and _para.ini
fsh = [self.path.with_name(self._mid + "_camera.ini"),
self.path.with_name(self._mid + "_para.ini")]
tohash = [hashfile(f) for f in fsh]
tohash.append(self.path.name)
# Hash a maximum of ~1MB of the tdms file
tohash.append(hashfile(self.path, blocksize=65536, count=20))
self._hash = hashobj(tohash)
return self._hash | [
"def",
"hash",
"(",
"self",
")",
":",
"if",
"self",
".",
"_hash",
"is",
"None",
":",
"# Only hash _camera.ini and _para.ini",
"fsh",
"=",
"[",
"self",
".",
"path",
".",
"with_name",
"(",
"self",
".",
"_mid",
"+",
"\"_camera.ini\"",
")",
",",
"self",
".",... | Hash value based on file name and .ini file content | [
"Hash",
"value",
"based",
"on",
"file",
"name",
"and",
".",
"ini",
"file",
"content"
] | 79002c4356e7020c2ba73ab0a3819c9abd4affec | https://github.com/ZELLMECHANIK-DRESDEN/dclab/blob/79002c4356e7020c2ba73ab0a3819c9abd4affec/dclab/rtdc_dataset/fmt_tdms/__init__.py#L169-L180 | train | 48,759 |
ZELLMECHANIK-DRESDEN/dclab | dclab/rtdc_dataset/fmt_tdms/event_contour.py | ContourColumn.determine_offset | def determine_offset(self):
"""Determines the offset of the contours w.r.t. other data columns
Notes
-----
- the "frame" column of `rtdc_dataset` is compared to
the first contour in the contour text file to determine an
offset by one event
- modifies the property `event_offset` and sets `_initialized`
to `True`
"""
# In case of regular RTDC, the first contour is
# missing. In case of fRTDC, it is there, so we
# might have an offset. We find out if the first
# contour frame is missing by comparing it to
# the "frame" column of the rtdc dataset.
fref = self._contour_data.get_frame(0)
f0 = self.frame[0]
f1 = self.frame[1]
# Use allclose to avoid float/integer comparison problems
if np.allclose(fref, f0):
self.event_offset = 0
elif np.allclose(fref, f1):
self.event_offset = 1
else:
msg = "Contour data has unknown offset (frame {})!".format(fref)
raise IndexError(msg)
self._initialized = True | python | def determine_offset(self):
"""Determines the offset of the contours w.r.t. other data columns
Notes
-----
- the "frame" column of `rtdc_dataset` is compared to
the first contour in the contour text file to determine an
offset by one event
- modifies the property `event_offset` and sets `_initialized`
to `True`
"""
# In case of regular RTDC, the first contour is
# missing. In case of fRTDC, it is there, so we
# might have an offset. We find out if the first
# contour frame is missing by comparing it to
# the "frame" column of the rtdc dataset.
fref = self._contour_data.get_frame(0)
f0 = self.frame[0]
f1 = self.frame[1]
# Use allclose to avoid float/integer comparison problems
if np.allclose(fref, f0):
self.event_offset = 0
elif np.allclose(fref, f1):
self.event_offset = 1
else:
msg = "Contour data has unknown offset (frame {})!".format(fref)
raise IndexError(msg)
self._initialized = True | [
"def",
"determine_offset",
"(",
"self",
")",
":",
"# In case of regular RTDC, the first contour is",
"# missing. In case of fRTDC, it is there, so we",
"# might have an offset. We find out if the first",
"# contour frame is missing by comparing it to",
"# the \"frame\" column of the rtdc dataset... | Determines the offset of the contours w.r.t. other data columns
Notes
-----
- the "frame" column of `rtdc_dataset` is compared to
the first contour in the contour text file to determine an
offset by one event
- modifies the property `event_offset` and sets `_initialized`
to `True` | [
"Determines",
"the",
"offset",
"of",
"the",
"contours",
"w",
".",
"r",
".",
"t",
".",
"other",
"data",
"columns"
] | 79002c4356e7020c2ba73ab0a3819c9abd4affec | https://github.com/ZELLMECHANIK-DRESDEN/dclab/blob/79002c4356e7020c2ba73ab0a3819c9abd4affec/dclab/rtdc_dataset/fmt_tdms/event_contour.py#L61-L89 | train | 48,760 |
ZELLMECHANIK-DRESDEN/dclab | dclab/rtdc_dataset/fmt_tdms/event_contour.py | ContourColumn.find_contour_file | def find_contour_file(rtdc_dataset):
"""Tries to find a contour file that belongs to an RTDC dataset
Returns None if no contour file is found.
"""
cont_id = rtdc_dataset.path.stem
cands = [c.name for c in rtdc_dataset._fdir.rglob("*_contours.txt")]
cands = sorted(cands)
# Search for perfect matches, e.g.
# - M1_0.240000ul_s.tdms
# - M1_0.240000ul_s_contours.txt
for c1 in cands:
if c1.startswith(cont_id):
cfile = rtdc_dataset._fdir / c1
break
else:
# Search for M* matches with most overlap, e.g.
# - M1_0.240000ul_s.tdms
# - M1_contours.txt
for c2 in cands:
if (c2.split("_")[0] == rtdc_dataset._mid):
# Do not confuse with M10_contours.txt
cfile = rtdc_dataset._fdir / c2
break
else:
msg = "No contour data found for {}".format(rtdc_dataset)
warnings.warn(msg, NoContourDataWarning)
cfile = None
return cfile | python | def find_contour_file(rtdc_dataset):
"""Tries to find a contour file that belongs to an RTDC dataset
Returns None if no contour file is found.
"""
cont_id = rtdc_dataset.path.stem
cands = [c.name for c in rtdc_dataset._fdir.rglob("*_contours.txt")]
cands = sorted(cands)
# Search for perfect matches, e.g.
# - M1_0.240000ul_s.tdms
# - M1_0.240000ul_s_contours.txt
for c1 in cands:
if c1.startswith(cont_id):
cfile = rtdc_dataset._fdir / c1
break
else:
# Search for M* matches with most overlap, e.g.
# - M1_0.240000ul_s.tdms
# - M1_contours.txt
for c2 in cands:
if (c2.split("_")[0] == rtdc_dataset._mid):
# Do not confuse with M10_contours.txt
cfile = rtdc_dataset._fdir / c2
break
else:
msg = "No contour data found for {}".format(rtdc_dataset)
warnings.warn(msg, NoContourDataWarning)
cfile = None
return cfile | [
"def",
"find_contour_file",
"(",
"rtdc_dataset",
")",
":",
"cont_id",
"=",
"rtdc_dataset",
".",
"path",
".",
"stem",
"cands",
"=",
"[",
"c",
".",
"name",
"for",
"c",
"in",
"rtdc_dataset",
".",
"_fdir",
".",
"rglob",
"(",
"\"*_contours.txt\"",
")",
"]",
"... | Tries to find a contour file that belongs to an RTDC dataset
Returns None if no contour file is found. | [
"Tries",
"to",
"find",
"a",
"contour",
"file",
"that",
"belongs",
"to",
"an",
"RTDC",
"dataset"
] | 79002c4356e7020c2ba73ab0a3819c9abd4affec | https://github.com/ZELLMECHANIK-DRESDEN/dclab/blob/79002c4356e7020c2ba73ab0a3819c9abd4affec/dclab/rtdc_dataset/fmt_tdms/event_contour.py#L92-L120 | train | 48,761 |
ZELLMECHANIK-DRESDEN/dclab | dclab/rtdc_dataset/fmt_tdms/event_contour.py | ContourData._index_file | def _index_file(self):
"""Open and index the contour file
This function populates the internal list of contours
as strings which will be available as `self.data`.
"""
with self.filename.open() as fd:
data = fd.read()
ident = "Contour in frame"
self._data = data.split(ident)[1:]
self._initialized = True | python | def _index_file(self):
"""Open and index the contour file
This function populates the internal list of contours
as strings which will be available as `self.data`.
"""
with self.filename.open() as fd:
data = fd.read()
ident = "Contour in frame"
self._data = data.split(ident)[1:]
self._initialized = True | [
"def",
"_index_file",
"(",
"self",
")",
":",
"with",
"self",
".",
"filename",
".",
"open",
"(",
")",
"as",
"fd",
":",
"data",
"=",
"fd",
".",
"read",
"(",
")",
"ident",
"=",
"\"Contour in frame\"",
"self",
".",
"_data",
"=",
"data",
".",
"split",
"... | Open and index the contour file
This function populates the internal list of contours
as strings which will be available as `self.data`. | [
"Open",
"and",
"index",
"the",
"contour",
"file"
] | 79002c4356e7020c2ba73ab0a3819c9abd4affec | https://github.com/ZELLMECHANIK-DRESDEN/dclab/blob/79002c4356e7020c2ba73ab0a3819c9abd4affec/dclab/rtdc_dataset/fmt_tdms/event_contour.py#L152-L163 | train | 48,762 |
ZELLMECHANIK-DRESDEN/dclab | dclab/rtdc_dataset/fmt_tdms/event_contour.py | ContourData.get_frame | def get_frame(self, idx):
"""Return the frame number of a contour"""
cont = self.data[idx]
frame = int(cont.strip().split(" ", 1)[0])
return frame | python | def get_frame(self, idx):
"""Return the frame number of a contour"""
cont = self.data[idx]
frame = int(cont.strip().split(" ", 1)[0])
return frame | [
"def",
"get_frame",
"(",
"self",
",",
"idx",
")",
":",
"cont",
"=",
"self",
".",
"data",
"[",
"idx",
"]",
"frame",
"=",
"int",
"(",
"cont",
".",
"strip",
"(",
")",
".",
"split",
"(",
"\" \"",
",",
"1",
")",
"[",
"0",
"]",
")",
"return",
"fram... | Return the frame number of a contour | [
"Return",
"the",
"frame",
"number",
"of",
"a",
"contour"
] | 79002c4356e7020c2ba73ab0a3819c9abd4affec | https://github.com/ZELLMECHANIK-DRESDEN/dclab/blob/79002c4356e7020c2ba73ab0a3819c9abd4affec/dclab/rtdc_dataset/fmt_tdms/event_contour.py#L175-L179 | train | 48,763 |
ZELLMECHANIK-DRESDEN/dclab | dclab/features/emodulus_viscosity.py | get_viscosity | def get_viscosity(medium="CellCarrier", channel_width=20.0, flow_rate=0.16,
temperature=23.0):
"""Returns the viscosity for RT-DC-specific media
Parameters
----------
medium: str
The medium to compute the viscosity for.
One of ["CellCarrier", "CellCarrier B", "water"].
channel_width: float
The channel width in µm
flow_rate: float
Flow rate in µl/s
temperature: float or ndarray
Temperature in °C
Returns
-------
viscosity: float or ndarray
Viscosity in mPa*s
Notes
-----
- CellCarrier and CellCarrier B media are optimized for
RT-DC measurements.
- Values for the viscosity of water are computed using
equation (15) from :cite:`Kestin_1978`.
"""
if medium.lower() not in ["cellcarrier", "cellcarrier b", "water"]:
raise ValueError("Invalid medium: {}".format(medium))
# convert flow_rate from µl/s to m³/s
# convert channel_width from µm to m
term1 = 1.1856 * 6 * flow_rate * 1e-9 / (channel_width * 1e-6)**3 * 2 / 3
if medium == "CellCarrier":
temp_corr = (temperature / 23.2)**-0.866
term2 = 0.6771 / 0.5928 + 0.2121 / (0.5928 * 0.677)
eta = 0.179 * (term1 * term2)**(0.677 - 1) * temp_corr * 1e3
elif medium == "CellCarrier B":
temp_corr = (temperature / 23.6)**-0.866
term2 = 0.6771 / 0.5928 + 0.2121 / (0.5928 * 0.634)
eta = 0.360 * (term1 * term2)**(0.634 - 1) * temp_corr * 1e3
elif medium == "water":
# see equation (15) in Kestin et al, J. Phys. Chem. 7(3) 1978
if np.min(temperature) < 0 or np.max(temperature) > 40:
msg = "For water, the temperature must be in [0, 40] degC! " \
"Got min/max values of '{}'.".format(np.min(temperature),
np.max(temperature))
raise ValueError(msg)
eta0 = 1.002 # [mPa]
right = (20-temperature) / (temperature + 96) \
* (+ 1.2364
- 1.37e-3 * (20 - temperature)
+ 5.7e-6 * (20 - temperature)**2
)
eta = eta0 * 10**right
return eta | python | def get_viscosity(medium="CellCarrier", channel_width=20.0, flow_rate=0.16,
temperature=23.0):
"""Returns the viscosity for RT-DC-specific media
Parameters
----------
medium: str
The medium to compute the viscosity for.
One of ["CellCarrier", "CellCarrier B", "water"].
channel_width: float
The channel width in µm
flow_rate: float
Flow rate in µl/s
temperature: float or ndarray
Temperature in °C
Returns
-------
viscosity: float or ndarray
Viscosity in mPa*s
Notes
-----
- CellCarrier and CellCarrier B media are optimized for
RT-DC measurements.
- Values for the viscosity of water are computed using
equation (15) from :cite:`Kestin_1978`.
"""
if medium.lower() not in ["cellcarrier", "cellcarrier b", "water"]:
raise ValueError("Invalid medium: {}".format(medium))
# convert flow_rate from µl/s to m³/s
# convert channel_width from µm to m
term1 = 1.1856 * 6 * flow_rate * 1e-9 / (channel_width * 1e-6)**3 * 2 / 3
if medium == "CellCarrier":
temp_corr = (temperature / 23.2)**-0.866
term2 = 0.6771 / 0.5928 + 0.2121 / (0.5928 * 0.677)
eta = 0.179 * (term1 * term2)**(0.677 - 1) * temp_corr * 1e3
elif medium == "CellCarrier B":
temp_corr = (temperature / 23.6)**-0.866
term2 = 0.6771 / 0.5928 + 0.2121 / (0.5928 * 0.634)
eta = 0.360 * (term1 * term2)**(0.634 - 1) * temp_corr * 1e3
elif medium == "water":
# see equation (15) in Kestin et al, J. Phys. Chem. 7(3) 1978
if np.min(temperature) < 0 or np.max(temperature) > 40:
msg = "For water, the temperature must be in [0, 40] degC! " \
"Got min/max values of '{}'.".format(np.min(temperature),
np.max(temperature))
raise ValueError(msg)
eta0 = 1.002 # [mPa]
right = (20-temperature) / (temperature + 96) \
* (+ 1.2364
- 1.37e-3 * (20 - temperature)
+ 5.7e-6 * (20 - temperature)**2
)
eta = eta0 * 10**right
return eta | [
"def",
"get_viscosity",
"(",
"medium",
"=",
"\"CellCarrier\"",
",",
"channel_width",
"=",
"20.0",
",",
"flow_rate",
"=",
"0.16",
",",
"temperature",
"=",
"23.0",
")",
":",
"if",
"medium",
".",
"lower",
"(",
")",
"not",
"in",
"[",
"\"cellcarrier\"",
",",
... | Returns the viscosity for RT-DC-specific media
Parameters
----------
medium: str
The medium to compute the viscosity for.
One of ["CellCarrier", "CellCarrier B", "water"].
channel_width: float
The channel width in µm
flow_rate: float
Flow rate in µl/s
temperature: float or ndarray
Temperature in °C
Returns
-------
viscosity: float or ndarray
Viscosity in mPa*s
Notes
-----
- CellCarrier and CellCarrier B media are optimized for
RT-DC measurements.
- Values for the viscosity of water are computed using
equation (15) from :cite:`Kestin_1978`. | [
"Returns",
"the",
"viscosity",
"for",
"RT",
"-",
"DC",
"-",
"specific",
"media"
] | 79002c4356e7020c2ba73ab0a3819c9abd4affec | https://github.com/ZELLMECHANIK-DRESDEN/dclab/blob/79002c4356e7020c2ba73ab0a3819c9abd4affec/dclab/features/emodulus_viscosity.py#L9-L66 | train | 48,764 |
ZELLMECHANIK-DRESDEN/dclab | dclab/statistics.py | get_statistics | def get_statistics(ds, methods=None, features=None):
"""Compute statistics for an RT-DC dataset
Parameters
----------
ds: dclab.rtdc_dataset.RTDCBase
The dataset for which to compute the statistics.
methods: list of str or None
The methods wih which to compute the statistics.
The list of available methods is given with
`dclab.statistics.Statistics.available_methods.keys()`
If set to `None`, statistics for all methods are computed.
features: list of str
Feature name identifiers are defined in
`dclab.definitions.scalar_feature_names`.
If set to `None`, statistics for all axes are computed.
Returns
-------
header: list of str
The header (feature + method names) of the computed statistics.
values: list of float
The computed statistics.
"""
if methods is None:
cls = list(Statistics.available_methods.keys())
# sort the features in a usable way
avm = Statistics.available_methods
me1 = [m for m in cls if not avm[m].req_feature]
me2 = [m for m in cls if avm[m].req_feature]
methods = me1 + me2
if features is None:
features = dfn.scalar_feature_names
else:
features = [a.lower() for a in features]
header = []
values = []
# To make sure that all methods are computed for each feature in a block,
# we loop over all features. It would be easier to loop over the methods,
# but the resulting statistics would not be human-friendly.
for ft in features:
for mt in methods:
meth = Statistics.available_methods[mt]
if meth.req_feature:
if ft in ds:
values.append(meth(ds=ds, feature=ft))
else:
values.append(np.nan)
header.append(" ".join([mt, dfn.feature_name2label[ft]]))
else:
# Prevent multiple entries of this method.
if not header.count(mt):
values.append(meth(ds=ds))
header.append(mt)
return header, values | python | def get_statistics(ds, methods=None, features=None):
"""Compute statistics for an RT-DC dataset
Parameters
----------
ds: dclab.rtdc_dataset.RTDCBase
The dataset for which to compute the statistics.
methods: list of str or None
The methods wih which to compute the statistics.
The list of available methods is given with
`dclab.statistics.Statistics.available_methods.keys()`
If set to `None`, statistics for all methods are computed.
features: list of str
Feature name identifiers are defined in
`dclab.definitions.scalar_feature_names`.
If set to `None`, statistics for all axes are computed.
Returns
-------
header: list of str
The header (feature + method names) of the computed statistics.
values: list of float
The computed statistics.
"""
if methods is None:
cls = list(Statistics.available_methods.keys())
# sort the features in a usable way
avm = Statistics.available_methods
me1 = [m for m in cls if not avm[m].req_feature]
me2 = [m for m in cls if avm[m].req_feature]
methods = me1 + me2
if features is None:
features = dfn.scalar_feature_names
else:
features = [a.lower() for a in features]
header = []
values = []
# To make sure that all methods are computed for each feature in a block,
# we loop over all features. It would be easier to loop over the methods,
# but the resulting statistics would not be human-friendly.
for ft in features:
for mt in methods:
meth = Statistics.available_methods[mt]
if meth.req_feature:
if ft in ds:
values.append(meth(ds=ds, feature=ft))
else:
values.append(np.nan)
header.append(" ".join([mt, dfn.feature_name2label[ft]]))
else:
# Prevent multiple entries of this method.
if not header.count(mt):
values.append(meth(ds=ds))
header.append(mt)
return header, values | [
"def",
"get_statistics",
"(",
"ds",
",",
"methods",
"=",
"None",
",",
"features",
"=",
"None",
")",
":",
"if",
"methods",
"is",
"None",
":",
"cls",
"=",
"list",
"(",
"Statistics",
".",
"available_methods",
".",
"keys",
"(",
")",
")",
"# sort the features... | Compute statistics for an RT-DC dataset
Parameters
----------
ds: dclab.rtdc_dataset.RTDCBase
The dataset for which to compute the statistics.
methods: list of str or None
The methods wih which to compute the statistics.
The list of available methods is given with
`dclab.statistics.Statistics.available_methods.keys()`
If set to `None`, statistics for all methods are computed.
features: list of str
Feature name identifiers are defined in
`dclab.definitions.scalar_feature_names`.
If set to `None`, statistics for all axes are computed.
Returns
-------
header: list of str
The header (feature + method names) of the computed statistics.
values: list of float
The computed statistics. | [
"Compute",
"statistics",
"for",
"an",
"RT",
"-",
"DC",
"dataset"
] | 79002c4356e7020c2ba73ab0a3819c9abd4affec | https://github.com/ZELLMECHANIK-DRESDEN/dclab/blob/79002c4356e7020c2ba73ab0a3819c9abd4affec/dclab/statistics.py#L92-L150 | train | 48,765 |
ZELLMECHANIK-DRESDEN/dclab | dclab/statistics.py | mode | def mode(data):
"""Compute an intelligent value for the mode
The most common value in experimental is not very useful if there
are a lot of digits after the comma. This method approaches this
issue by rounding to bin size that is determined by the
Freedman–Diaconis rule.
Parameters
----------
data: 1d ndarray
The data for which the mode should be computed.
Returns
-------
mode: float
The mode computed with the Freedman-Diaconis rule.
"""
# size
n = data.shape[0]
# interquartile range
iqr = np.percentile(data, 75)-np.percentile(data, 25)
# Freedman–Diaconis
bin_size = 2 * iqr / n**(1/3)
if bin_size == 0:
return np.nan
# Add bin_size/2, because we want the center of the bin and
# not the left corner of the bin.
databin = np.round(data/bin_size)*bin_size + bin_size/2
u, indices = np.unique(databin, return_inverse=True)
mode = u[np.argmax(np.bincount(indices))]
return mode | python | def mode(data):
"""Compute an intelligent value for the mode
The most common value in experimental is not very useful if there
are a lot of digits after the comma. This method approaches this
issue by rounding to bin size that is determined by the
Freedman–Diaconis rule.
Parameters
----------
data: 1d ndarray
The data for which the mode should be computed.
Returns
-------
mode: float
The mode computed with the Freedman-Diaconis rule.
"""
# size
n = data.shape[0]
# interquartile range
iqr = np.percentile(data, 75)-np.percentile(data, 25)
# Freedman–Diaconis
bin_size = 2 * iqr / n**(1/3)
if bin_size == 0:
return np.nan
# Add bin_size/2, because we want the center of the bin and
# not the left corner of the bin.
databin = np.round(data/bin_size)*bin_size + bin_size/2
u, indices = np.unique(databin, return_inverse=True)
mode = u[np.argmax(np.bincount(indices))]
return mode | [
"def",
"mode",
"(",
"data",
")",
":",
"# size",
"n",
"=",
"data",
".",
"shape",
"[",
"0",
"]",
"# interquartile range",
"iqr",
"=",
"np",
".",
"percentile",
"(",
"data",
",",
"75",
")",
"-",
"np",
".",
"percentile",
"(",
"data",
",",
"25",
")",
"... | Compute an intelligent value for the mode
The most common value in experimental is not very useful if there
are a lot of digits after the comma. This method approaches this
issue by rounding to bin size that is determined by the
Freedman–Diaconis rule.
Parameters
----------
data: 1d ndarray
The data for which the mode should be computed.
Returns
-------
mode: float
The mode computed with the Freedman-Diaconis rule. | [
"Compute",
"an",
"intelligent",
"value",
"for",
"the",
"mode"
] | 79002c4356e7020c2ba73ab0a3819c9abd4affec | https://github.com/ZELLMECHANIK-DRESDEN/dclab/blob/79002c4356e7020c2ba73ab0a3819c9abd4affec/dclab/statistics.py#L153-L187 | train | 48,766 |
ZELLMECHANIK-DRESDEN/dclab | dclab/statistics.py | Statistics._get_data | def _get_data(self, kwargs):
"""Convenience wrapper to get statistics data"""
if "ds" not in kwargs:
raise ValueError("Keyword argument 'ds' missing.")
ds = kwargs["ds"]
if self.req_feature:
if "feature" not in kwargs:
raise ValueError("Keyword argument 'feature' missing.")
return self.get_feature(ds, kwargs["feature"])
else:
return ds | python | def _get_data(self, kwargs):
"""Convenience wrapper to get statistics data"""
if "ds" not in kwargs:
raise ValueError("Keyword argument 'ds' missing.")
ds = kwargs["ds"]
if self.req_feature:
if "feature" not in kwargs:
raise ValueError("Keyword argument 'feature' missing.")
return self.get_feature(ds, kwargs["feature"])
else:
return ds | [
"def",
"_get_data",
"(",
"self",
",",
"kwargs",
")",
":",
"if",
"\"ds\"",
"not",
"in",
"kwargs",
":",
"raise",
"ValueError",
"(",
"\"Keyword argument 'ds' missing.\"",
")",
"ds",
"=",
"kwargs",
"[",
"\"ds\"",
"]",
"if",
"self",
".",
"req_feature",
":",
"if... | Convenience wrapper to get statistics data | [
"Convenience",
"wrapper",
"to",
"get",
"statistics",
"data"
] | 79002c4356e7020c2ba73ab0a3819c9abd4affec | https://github.com/ZELLMECHANIK-DRESDEN/dclab/blob/79002c4356e7020c2ba73ab0a3819c9abd4affec/dclab/statistics.py#L46-L58 | train | 48,767 |
ZELLMECHANIK-DRESDEN/dclab | dclab/statistics.py | Statistics.get_feature | def get_feature(self, ds, feat):
"""Return filtered feature data
The features are filtered according to the user-defined filters,
using the information in `ds._filter`. In addition, all
`nan` and `inf` values are purged.
Parameters
----------
ds: dclab.rtdc_dataset.RTDCBase
The dataset containing the feature
feat: str
The name of the feature; must be a scalar feature
"""
if ds.config["filtering"]["enable filters"]:
x = ds[feat][ds._filter]
else:
x = ds[feat]
bad = np.isnan(x) | np.isinf(x)
xout = x[~bad]
return xout | python | def get_feature(self, ds, feat):
"""Return filtered feature data
The features are filtered according to the user-defined filters,
using the information in `ds._filter`. In addition, all
`nan` and `inf` values are purged.
Parameters
----------
ds: dclab.rtdc_dataset.RTDCBase
The dataset containing the feature
feat: str
The name of the feature; must be a scalar feature
"""
if ds.config["filtering"]["enable filters"]:
x = ds[feat][ds._filter]
else:
x = ds[feat]
bad = np.isnan(x) | np.isinf(x)
xout = x[~bad]
return xout | [
"def",
"get_feature",
"(",
"self",
",",
"ds",
",",
"feat",
")",
":",
"if",
"ds",
".",
"config",
"[",
"\"filtering\"",
"]",
"[",
"\"enable filters\"",
"]",
":",
"x",
"=",
"ds",
"[",
"feat",
"]",
"[",
"ds",
".",
"_filter",
"]",
"else",
":",
"x",
"=... | Return filtered feature data
The features are filtered according to the user-defined filters,
using the information in `ds._filter`. In addition, all
`nan` and `inf` values are purged.
Parameters
----------
ds: dclab.rtdc_dataset.RTDCBase
The dataset containing the feature
feat: str
The name of the feature; must be a scalar feature | [
"Return",
"filtered",
"feature",
"data"
] | 79002c4356e7020c2ba73ab0a3819c9abd4affec | https://github.com/ZELLMECHANIK-DRESDEN/dclab/blob/79002c4356e7020c2ba73ab0a3819c9abd4affec/dclab/statistics.py#L60-L80 | train | 48,768 |
kpn-digital/py-timeexecution | time_execution/decorator.py | time_execution.get_exception | def get_exception(self):
"""Retrieve the exception"""
if self.exc_info:
try:
six.reraise(*self.exc_info)
except Exception as e:
return e | python | def get_exception(self):
"""Retrieve the exception"""
if self.exc_info:
try:
six.reraise(*self.exc_info)
except Exception as e:
return e | [
"def",
"get_exception",
"(",
"self",
")",
":",
"if",
"self",
".",
"exc_info",
":",
"try",
":",
"six",
".",
"reraise",
"(",
"*",
"self",
".",
"exc_info",
")",
"except",
"Exception",
"as",
"e",
":",
"return",
"e"
] | Retrieve the exception | [
"Retrieve",
"the",
"exception"
] | 79b991e83f783196c41b830d0acef21ac5462596 | https://github.com/kpn-digital/py-timeexecution/blob/79b991e83f783196c41b830d0acef21ac5462596/time_execution/decorator.py#L72-L78 | train | 48,769 |
mhostetter/nhl | nhl/flyweight.py | Flyweight.has_key | def has_key(cls, *args):
"""
Check whether flyweight object with specified key has already been created.
Returns:
bool: True if already created, False if not
"""
key = args if len(args) > 1 else args[0]
return key in cls._instances | python | def has_key(cls, *args):
"""
Check whether flyweight object with specified key has already been created.
Returns:
bool: True if already created, False if not
"""
key = args if len(args) > 1 else args[0]
return key in cls._instances | [
"def",
"has_key",
"(",
"cls",
",",
"*",
"args",
")",
":",
"key",
"=",
"args",
"if",
"len",
"(",
"args",
")",
">",
"1",
"else",
"args",
"[",
"0",
"]",
"return",
"key",
"in",
"cls",
".",
"_instances"
] | Check whether flyweight object with specified key has already been created.
Returns:
bool: True if already created, False if not | [
"Check",
"whether",
"flyweight",
"object",
"with",
"specified",
"key",
"has",
"already",
"been",
"created",
"."
] | 32c91cc392826e9de728563d57ab527421734ee1 | https://github.com/mhostetter/nhl/blob/32c91cc392826e9de728563d57ab527421734ee1/nhl/flyweight.py#L42-L50 | train | 48,770 |
mhostetter/nhl | nhl/flyweight.py | Flyweight.from_key | def from_key(cls, *args):
"""
Return flyweight object with specified key, if it has already been created.
Returns:
cls or None: Previously constructed flyweight object with given
key or None if key not found
"""
key = args if len(args) > 1 else args[0]
return cls._instances.get(key, None) | python | def from_key(cls, *args):
"""
Return flyweight object with specified key, if it has already been created.
Returns:
cls or None: Previously constructed flyweight object with given
key or None if key not found
"""
key = args if len(args) > 1 else args[0]
return cls._instances.get(key, None) | [
"def",
"from_key",
"(",
"cls",
",",
"*",
"args",
")",
":",
"key",
"=",
"args",
"if",
"len",
"(",
"args",
")",
">",
"1",
"else",
"args",
"[",
"0",
"]",
"return",
"cls",
".",
"_instances",
".",
"get",
"(",
"key",
",",
"None",
")"
] | Return flyweight object with specified key, if it has already been created.
Returns:
cls or None: Previously constructed flyweight object with given
key or None if key not found | [
"Return",
"flyweight",
"object",
"with",
"specified",
"key",
"if",
"it",
"has",
"already",
"been",
"created",
"."
] | 32c91cc392826e9de728563d57ab527421734ee1 | https://github.com/mhostetter/nhl/blob/32c91cc392826e9de728563d57ab527421734ee1/nhl/flyweight.py#L53-L62 | train | 48,771 |
kpn-digital/py-timeexecution | time_execution/backends/elasticsearch.py | ElasticsearchBackend.write | def write(self, name, **data):
"""
Write the metric to elasticsearch
Args:
name (str): The name of the metric to write
data (dict): Additional data to store with the metric
"""
data["name"] = name
if not ("timestamp" in data):
data["timestamp"] = datetime.utcnow()
try:
self.client.index(
index=self.get_index(),
doc_type=self.doc_type,
id=None,
body=data
)
except TransportError as exc:
logger.warning('writing metric %r failure %r', data, exc) | python | def write(self, name, **data):
"""
Write the metric to elasticsearch
Args:
name (str): The name of the metric to write
data (dict): Additional data to store with the metric
"""
data["name"] = name
if not ("timestamp" in data):
data["timestamp"] = datetime.utcnow()
try:
self.client.index(
index=self.get_index(),
doc_type=self.doc_type,
id=None,
body=data
)
except TransportError as exc:
logger.warning('writing metric %r failure %r', data, exc) | [
"def",
"write",
"(",
"self",
",",
"name",
",",
"*",
"*",
"data",
")",
":",
"data",
"[",
"\"name\"",
"]",
"=",
"name",
"if",
"not",
"(",
"\"timestamp\"",
"in",
"data",
")",
":",
"data",
"[",
"\"timestamp\"",
"]",
"=",
"datetime",
".",
"utcnow",
"(",... | Write the metric to elasticsearch
Args:
name (str): The name of the metric to write
data (dict): Additional data to store with the metric | [
"Write",
"the",
"metric",
"to",
"elasticsearch"
] | 79b991e83f783196c41b830d0acef21ac5462596 | https://github.com/kpn-digital/py-timeexecution/blob/79b991e83f783196c41b830d0acef21ac5462596/time_execution/backends/elasticsearch.py#L87-L108 | train | 48,772 |
kpn-digital/py-timeexecution | time_execution/backends/elasticsearch.py | ElasticsearchBackend.bulk_write | def bulk_write(self, metrics):
"""
Write multiple metrics to elasticsearch in one request
Args:
metrics (list): data with mappings to send to elasticsearch
"""
actions = []
index = self.get_index()
for metric in metrics:
actions.append({'index': {'_index': index, '_type': self.doc_type}})
actions.append(metric)
try:
self.client.bulk(actions)
except TransportError as exc:
logger.warning('bulk_write metrics %r failure %r', metrics, exc) | python | def bulk_write(self, metrics):
"""
Write multiple metrics to elasticsearch in one request
Args:
metrics (list): data with mappings to send to elasticsearch
"""
actions = []
index = self.get_index()
for metric in metrics:
actions.append({'index': {'_index': index, '_type': self.doc_type}})
actions.append(metric)
try:
self.client.bulk(actions)
except TransportError as exc:
logger.warning('bulk_write metrics %r failure %r', metrics, exc) | [
"def",
"bulk_write",
"(",
"self",
",",
"metrics",
")",
":",
"actions",
"=",
"[",
"]",
"index",
"=",
"self",
".",
"get_index",
"(",
")",
"for",
"metric",
"in",
"metrics",
":",
"actions",
".",
"append",
"(",
"{",
"'index'",
":",
"{",
"'_index'",
":",
... | Write multiple metrics to elasticsearch in one request
Args:
metrics (list): data with mappings to send to elasticsearch | [
"Write",
"multiple",
"metrics",
"to",
"elasticsearch",
"in",
"one",
"request"
] | 79b991e83f783196c41b830d0acef21ac5462596 | https://github.com/kpn-digital/py-timeexecution/blob/79b991e83f783196c41b830d0acef21ac5462596/time_execution/backends/elasticsearch.py#L110-L125 | train | 48,773 |
openstax/cnx-archive | cnxarchive/cache.py | search | def search(query, query_type, nocache=False):
"""Search archive contents.
Look up search results in cache, if not in cache,
do a database search and cache the result
"""
settings = get_current_registry().settings
memcache_servers = settings['memcache-servers'].split()
if not memcache_servers:
# memcache is not enabled, do a database search directly
return database_search(query, query_type)
# sort query params and create a key for the search
search_params = []
search_params += copy.deepcopy(query.terms)
search_params += copy.deepcopy(query.filters)
search_params += [('sort', i) for i in query.sorts]
search_params.sort(key=lambda record: (record[0], record[1]))
search_params.append(('query_type', query_type))
# search_key should look something like:
# '"sort:pubDate" "text:college physics" "query_type:weakAND"'
search_key = u' '.join([u'"{}"'.format(u':'.join(param))
for param in search_params])
# hash the search_key so it never exceeds the key length limit (250) in
# memcache
mc_search_key = binascii.hexlify(
hashlib.pbkdf2_hmac('sha1', search_key.encode('utf-8'), b'', 1))
# look for search results in memcache first, unless nocache
mc = memcache.Client(memcache_servers,
server_max_value_length=128*1024*1024, debug=0)
if not nocache:
search_results = mc.get(mc_search_key)
else:
search_results = None
if not search_results:
# search results is not in memcache, do a database search
search_results = database_search(query, query_type)
cache_length = int(settings['search-cache-expiration'])
# for particular searches, store in memcache for longer
if (len(search_params) == 2 and
# search by subject
search_params[0][0] == 'subject' or
# search single terms
search_params[0][0] == 'text' and
' ' not in search_params[0][1]):
# search with one term or one filter, plus query_type
cache_length = int(settings['search-long-cache-expiration'])
# store in memcache
mc.set(mc_search_key, search_results, time=cache_length,
min_compress_len=1024*1024) # compress when > 1MB
# return search results
return search_results | python | def search(query, query_type, nocache=False):
"""Search archive contents.
Look up search results in cache, if not in cache,
do a database search and cache the result
"""
settings = get_current_registry().settings
memcache_servers = settings['memcache-servers'].split()
if not memcache_servers:
# memcache is not enabled, do a database search directly
return database_search(query, query_type)
# sort query params and create a key for the search
search_params = []
search_params += copy.deepcopy(query.terms)
search_params += copy.deepcopy(query.filters)
search_params += [('sort', i) for i in query.sorts]
search_params.sort(key=lambda record: (record[0], record[1]))
search_params.append(('query_type', query_type))
# search_key should look something like:
# '"sort:pubDate" "text:college physics" "query_type:weakAND"'
search_key = u' '.join([u'"{}"'.format(u':'.join(param))
for param in search_params])
# hash the search_key so it never exceeds the key length limit (250) in
# memcache
mc_search_key = binascii.hexlify(
hashlib.pbkdf2_hmac('sha1', search_key.encode('utf-8'), b'', 1))
# look for search results in memcache first, unless nocache
mc = memcache.Client(memcache_servers,
server_max_value_length=128*1024*1024, debug=0)
if not nocache:
search_results = mc.get(mc_search_key)
else:
search_results = None
if not search_results:
# search results is not in memcache, do a database search
search_results = database_search(query, query_type)
cache_length = int(settings['search-cache-expiration'])
# for particular searches, store in memcache for longer
if (len(search_params) == 2 and
# search by subject
search_params[0][0] == 'subject' or
# search single terms
search_params[0][0] == 'text' and
' ' not in search_params[0][1]):
# search with one term or one filter, plus query_type
cache_length = int(settings['search-long-cache-expiration'])
# store in memcache
mc.set(mc_search_key, search_results, time=cache_length,
min_compress_len=1024*1024) # compress when > 1MB
# return search results
return search_results | [
"def",
"search",
"(",
"query",
",",
"query_type",
",",
"nocache",
"=",
"False",
")",
":",
"settings",
"=",
"get_current_registry",
"(",
")",
".",
"settings",
"memcache_servers",
"=",
"settings",
"[",
"'memcache-servers'",
"]",
".",
"split",
"(",
")",
"if",
... | Search archive contents.
Look up search results in cache, if not in cache,
do a database search and cache the result | [
"Search",
"archive",
"contents",
"."
] | d31d34aa8bbc8a9fde6cd4227a0df92726e8daf4 | https://github.com/openstax/cnx-archive/blob/d31d34aa8bbc8a9fde6cd4227a0df92726e8daf4/cnxarchive/cache.py#L20-L79 | train | 48,774 |
ZELLMECHANIK-DRESDEN/dclab | dclab/rtdc_dataset/ancillaries/ancillary_feature.py | AncillaryFeature.available_features | def available_features(rtdc_ds):
"""Determine available features for an RT-DC dataset
Parameters
----------
rtdc_ds: instance of RTDCBase
The dataset to check availability for
Returns
-------
features: dict
Dictionary with feature names as keys and instances
of `AncillaryFeature` as values.
"""
cols = {}
for inst in AncillaryFeature.features:
if inst.is_available(rtdc_ds):
cols[inst.feature_name] = inst
return cols | python | def available_features(rtdc_ds):
"""Determine available features for an RT-DC dataset
Parameters
----------
rtdc_ds: instance of RTDCBase
The dataset to check availability for
Returns
-------
features: dict
Dictionary with feature names as keys and instances
of `AncillaryFeature` as values.
"""
cols = {}
for inst in AncillaryFeature.features:
if inst.is_available(rtdc_ds):
cols[inst.feature_name] = inst
return cols | [
"def",
"available_features",
"(",
"rtdc_ds",
")",
":",
"cols",
"=",
"{",
"}",
"for",
"inst",
"in",
"AncillaryFeature",
".",
"features",
":",
"if",
"inst",
".",
"is_available",
"(",
"rtdc_ds",
")",
":",
"cols",
"[",
"inst",
".",
"feature_name",
"]",
"=",
... | Determine available features for an RT-DC dataset
Parameters
----------
rtdc_ds: instance of RTDCBase
The dataset to check availability for
Returns
-------
features: dict
Dictionary with feature names as keys and instances
of `AncillaryFeature` as values. | [
"Determine",
"available",
"features",
"for",
"an",
"RT",
"-",
"DC",
"dataset"
] | 79002c4356e7020c2ba73ab0a3819c9abd4affec | https://github.com/ZELLMECHANIK-DRESDEN/dclab/blob/79002c4356e7020c2ba73ab0a3819c9abd4affec/dclab/rtdc_dataset/ancillaries/ancillary_feature.py#L85-L103 | train | 48,775 |
ZELLMECHANIK-DRESDEN/dclab | dclab/rtdc_dataset/ancillaries/ancillary_feature.py | AncillaryFeature.compute | def compute(self, rtdc_ds):
"""Compute the feature with self.method
Parameters
----------
rtdc_ds: instance of RTDCBase
The dataset to compute the feature for
Returns
-------
feature: array- or list-like
The computed data feature (read-only).
"""
data = self.method(rtdc_ds)
dsize = len(rtdc_ds) - len(data)
if dsize > 0:
msg = "Growing feature {} in {} by {} to match event number!"
warnings.warn(msg.format(self.feature_name, rtdc_ds, abs(dsize)),
BadFeatureSizeWarning)
data.resize(len(rtdc_ds), refcheck=False)
data[-dsize:] = np.nan
elif dsize < 0:
msg = "Shrinking feature {} in {} by {} to match event number!"
warnings.warn(msg.format(self.feature_name, rtdc_ds, abs(dsize)),
BadFeatureSizeWarning)
data.resize(len(rtdc_ds), refcheck=False)
if isinstance(data, np.ndarray):
data.setflags(write=False)
elif isinstance(data, list):
for item in data:
if isinstance(item, np.ndarray):
item.setflags(write=False)
return data | python | def compute(self, rtdc_ds):
"""Compute the feature with self.method
Parameters
----------
rtdc_ds: instance of RTDCBase
The dataset to compute the feature for
Returns
-------
feature: array- or list-like
The computed data feature (read-only).
"""
data = self.method(rtdc_ds)
dsize = len(rtdc_ds) - len(data)
if dsize > 0:
msg = "Growing feature {} in {} by {} to match event number!"
warnings.warn(msg.format(self.feature_name, rtdc_ds, abs(dsize)),
BadFeatureSizeWarning)
data.resize(len(rtdc_ds), refcheck=False)
data[-dsize:] = np.nan
elif dsize < 0:
msg = "Shrinking feature {} in {} by {} to match event number!"
warnings.warn(msg.format(self.feature_name, rtdc_ds, abs(dsize)),
BadFeatureSizeWarning)
data.resize(len(rtdc_ds), refcheck=False)
if isinstance(data, np.ndarray):
data.setflags(write=False)
elif isinstance(data, list):
for item in data:
if isinstance(item, np.ndarray):
item.setflags(write=False)
return data | [
"def",
"compute",
"(",
"self",
",",
"rtdc_ds",
")",
":",
"data",
"=",
"self",
".",
"method",
"(",
"rtdc_ds",
")",
"dsize",
"=",
"len",
"(",
"rtdc_ds",
")",
"-",
"len",
"(",
"data",
")",
"if",
"dsize",
">",
"0",
":",
"msg",
"=",
"\"Growing feature {... | Compute the feature with self.method
Parameters
----------
rtdc_ds: instance of RTDCBase
The dataset to compute the feature for
Returns
-------
feature: array- or list-like
The computed data feature (read-only). | [
"Compute",
"the",
"feature",
"with",
"self",
".",
"method"
] | 79002c4356e7020c2ba73ab0a3819c9abd4affec | https://github.com/ZELLMECHANIK-DRESDEN/dclab/blob/79002c4356e7020c2ba73ab0a3819c9abd4affec/dclab/rtdc_dataset/ancillaries/ancillary_feature.py#L105-L140 | train | 48,776 |
ZELLMECHANIK-DRESDEN/dclab | dclab/rtdc_dataset/ancillaries/ancillary_feature.py | AncillaryFeature.get_instances | def get_instances(feature_name):
"""Return all all instances that compute `feature_name`"""
feats = []
for ft in AncillaryFeature.features:
if ft.feature_name == feature_name:
feats.append(ft)
return feats | python | def get_instances(feature_name):
"""Return all all instances that compute `feature_name`"""
feats = []
for ft in AncillaryFeature.features:
if ft.feature_name == feature_name:
feats.append(ft)
return feats | [
"def",
"get_instances",
"(",
"feature_name",
")",
":",
"feats",
"=",
"[",
"]",
"for",
"ft",
"in",
"AncillaryFeature",
".",
"features",
":",
"if",
"ft",
".",
"feature_name",
"==",
"feature_name",
":",
"feats",
".",
"append",
"(",
"ft",
")",
"return",
"fea... | Return all all instances that compute `feature_name` | [
"Return",
"all",
"all",
"instances",
"that",
"compute",
"feature_name"
] | 79002c4356e7020c2ba73ab0a3819c9abd4affec | https://github.com/ZELLMECHANIK-DRESDEN/dclab/blob/79002c4356e7020c2ba73ab0a3819c9abd4affec/dclab/rtdc_dataset/ancillaries/ancillary_feature.py#L143-L149 | train | 48,777 |
ZELLMECHANIK-DRESDEN/dclab | dclab/rtdc_dataset/ancillaries/ancillary_feature.py | AncillaryFeature.hash | def hash(self, rtdc_ds):
"""Used for identifying an ancillary computation
The data columns and the used configuration keys/values
are hashed.
"""
hasher = hashlib.md5()
# data columns
for col in self.req_features:
hasher.update(obj2str(rtdc_ds[col]))
# config keys
for sec, keys in self.req_config:
for key in keys:
val = rtdc_ds.config[sec][key]
data = "{}:{}={}".format(sec, key, val)
hasher.update(obj2str(data))
return hasher.hexdigest() | python | def hash(self, rtdc_ds):
"""Used for identifying an ancillary computation
The data columns and the used configuration keys/values
are hashed.
"""
hasher = hashlib.md5()
# data columns
for col in self.req_features:
hasher.update(obj2str(rtdc_ds[col]))
# config keys
for sec, keys in self.req_config:
for key in keys:
val = rtdc_ds.config[sec][key]
data = "{}:{}={}".format(sec, key, val)
hasher.update(obj2str(data))
return hasher.hexdigest() | [
"def",
"hash",
"(",
"self",
",",
"rtdc_ds",
")",
":",
"hasher",
"=",
"hashlib",
".",
"md5",
"(",
")",
"# data columns",
"for",
"col",
"in",
"self",
".",
"req_features",
":",
"hasher",
".",
"update",
"(",
"obj2str",
"(",
"rtdc_ds",
"[",
"col",
"]",
")... | Used for identifying an ancillary computation
The data columns and the used configuration keys/values
are hashed. | [
"Used",
"for",
"identifying",
"an",
"ancillary",
"computation"
] | 79002c4356e7020c2ba73ab0a3819c9abd4affec | https://github.com/ZELLMECHANIK-DRESDEN/dclab/blob/79002c4356e7020c2ba73ab0a3819c9abd4affec/dclab/rtdc_dataset/ancillaries/ancillary_feature.py#L151-L167 | train | 48,778 |
ZELLMECHANIK-DRESDEN/dclab | dclab/rtdc_dataset/ancillaries/ancillary_feature.py | AncillaryFeature.is_available | def is_available(self, rtdc_ds, verbose=False):
"""Check whether the feature is available
Parameters
----------
rtdc_ds: instance of RTDCBase
The dataset to check availability for
Returns
-------
available: bool
`True`, if feature can be computed with `compute`
Notes
-----
This method returns `False` for a feature if there
is a feature defined with the same name but with
higher priority (even if the feature would be
available otherwise).
"""
# Check config keys
for item in self.req_config:
section, keys = item
if section not in rtdc_ds.config:
if verbose:
print("{} not in config".format(section))
return False
else:
for key in keys:
if key not in rtdc_ds.config[section]:
if verbose:
print("{} not in config['{}']".format(key,
section))
return False
# Check features
for col in self.req_features:
if col not in rtdc_ds:
return False
# Check priorities of other features
for of in AncillaryFeature.features:
if of == self:
# nothing to compare
continue
elif of.feature_name == self.feature_name:
# same feature name
if of.priority <= self.priority:
# lower priority, ignore
continue
else:
# higher priority
if of.is_available(rtdc_ds):
# higher priority is available, thus
# this feature is not available
return False
else:
# higher priority not available
continue
else:
# other feature
continue
return True | python | def is_available(self, rtdc_ds, verbose=False):
"""Check whether the feature is available
Parameters
----------
rtdc_ds: instance of RTDCBase
The dataset to check availability for
Returns
-------
available: bool
`True`, if feature can be computed with `compute`
Notes
-----
This method returns `False` for a feature if there
is a feature defined with the same name but with
higher priority (even if the feature would be
available otherwise).
"""
# Check config keys
for item in self.req_config:
section, keys = item
if section not in rtdc_ds.config:
if verbose:
print("{} not in config".format(section))
return False
else:
for key in keys:
if key not in rtdc_ds.config[section]:
if verbose:
print("{} not in config['{}']".format(key,
section))
return False
# Check features
for col in self.req_features:
if col not in rtdc_ds:
return False
# Check priorities of other features
for of in AncillaryFeature.features:
if of == self:
# nothing to compare
continue
elif of.feature_name == self.feature_name:
# same feature name
if of.priority <= self.priority:
# lower priority, ignore
continue
else:
# higher priority
if of.is_available(rtdc_ds):
# higher priority is available, thus
# this feature is not available
return False
else:
# higher priority not available
continue
else:
# other feature
continue
return True | [
"def",
"is_available",
"(",
"self",
",",
"rtdc_ds",
",",
"verbose",
"=",
"False",
")",
":",
"# Check config keys",
"for",
"item",
"in",
"self",
".",
"req_config",
":",
"section",
",",
"keys",
"=",
"item",
"if",
"section",
"not",
"in",
"rtdc_ds",
".",
"co... | Check whether the feature is available
Parameters
----------
rtdc_ds: instance of RTDCBase
The dataset to check availability for
Returns
-------
available: bool
`True`, if feature can be computed with `compute`
Notes
-----
This method returns `False` for a feature if there
is a feature defined with the same name but with
higher priority (even if the feature would be
available otherwise). | [
"Check",
"whether",
"the",
"feature",
"is",
"available"
] | 79002c4356e7020c2ba73ab0a3819c9abd4affec | https://github.com/ZELLMECHANIK-DRESDEN/dclab/blob/79002c4356e7020c2ba73ab0a3819c9abd4affec/dclab/rtdc_dataset/ancillaries/ancillary_feature.py#L169-L229 | train | 48,779 |
kpn-digital/py-timeexecution | time_execution/backends/kafka.py | KafkaBackend.write | def write(self, name, **data):
"""
Write the metric to kafka
Args:
name (str): The name of the metric to write
data (dict): Additional data to store with the metric
"""
data["name"] = name
if not ("timestamp" in data):
data["timestamp"] = datetime.utcnow()
try:
self.producer.send(topic=self.topic, value=data)
self.producer.flush()
except (KafkaTimeoutError, NoBrokersAvailable) as exc:
logger.warning('writing metric %r failure %r', data, exc) | python | def write(self, name, **data):
"""
Write the metric to kafka
Args:
name (str): The name of the metric to write
data (dict): Additional data to store with the metric
"""
data["name"] = name
if not ("timestamp" in data):
data["timestamp"] = datetime.utcnow()
try:
self.producer.send(topic=self.topic, value=data)
self.producer.flush()
except (KafkaTimeoutError, NoBrokersAvailable) as exc:
logger.warning('writing metric %r failure %r', data, exc) | [
"def",
"write",
"(",
"self",
",",
"name",
",",
"*",
"*",
"data",
")",
":",
"data",
"[",
"\"name\"",
"]",
"=",
"name",
"if",
"not",
"(",
"\"timestamp\"",
"in",
"data",
")",
":",
"data",
"[",
"\"timestamp\"",
"]",
"=",
"datetime",
".",
"utcnow",
"(",... | Write the metric to kafka
Args:
name (str): The name of the metric to write
data (dict): Additional data to store with the metric | [
"Write",
"the",
"metric",
"to",
"kafka"
] | 79b991e83f783196c41b830d0acef21ac5462596 | https://github.com/kpn-digital/py-timeexecution/blob/79b991e83f783196c41b830d0acef21ac5462596/time_execution/backends/kafka.py#L50-L67 | train | 48,780 |
kpn-digital/py-timeexecution | time_execution/backends/kafka.py | KafkaBackend.bulk_write | def bulk_write(self, metrics):
"""
Write multiple metrics to kafka in one request
Args:
metrics (list):
"""
try:
for metric in metrics:
self.producer.send(self.topic, metric)
self.producer.flush()
except (KafkaTimeoutError, NoBrokersAvailable) as exc:
logger.warning('bulk_write metrics %r failure %r', metrics, exc) | python | def bulk_write(self, metrics):
"""
Write multiple metrics to kafka in one request
Args:
metrics (list):
"""
try:
for metric in metrics:
self.producer.send(self.topic, metric)
self.producer.flush()
except (KafkaTimeoutError, NoBrokersAvailable) as exc:
logger.warning('bulk_write metrics %r failure %r', metrics, exc) | [
"def",
"bulk_write",
"(",
"self",
",",
"metrics",
")",
":",
"try",
":",
"for",
"metric",
"in",
"metrics",
":",
"self",
".",
"producer",
".",
"send",
"(",
"self",
".",
"topic",
",",
"metric",
")",
"self",
".",
"producer",
".",
"flush",
"(",
")",
"ex... | Write multiple metrics to kafka in one request
Args:
metrics (list): | [
"Write",
"multiple",
"metrics",
"to",
"kafka",
"in",
"one",
"request"
] | 79b991e83f783196c41b830d0acef21ac5462596 | https://github.com/kpn-digital/py-timeexecution/blob/79b991e83f783196c41b830d0acef21ac5462596/time_execution/backends/kafka.py#L69-L81 | train | 48,781 |
openstax/cnx-archive | cnxarchive/utils/safe.py | safe_stat | def safe_stat(path, timeout=1, cmd=None):
"Use threads and a subproc to bodge a timeout on top of filesystem access"
global safe_stat_process
if cmd is None:
cmd = ['/usr/bin/stat']
cmd.append(path)
def target():
global safe_stat_process
logger.debug('Stat thread started')
safe_stat_process = subprocess.Popen(cmd, stdout=PIPE, stderr=PIPE)
_results = safe_stat_process.communicate() # noqa
logger.debug('Stat thread finished')
thread = threading.Thread(target=target)
thread.start()
thread.join(timeout)
if thread.is_alive(): # stat took longer than timeout
safe_stat_process.terminate()
thread.join()
return safe_stat_process.returncode == 0 | python | def safe_stat(path, timeout=1, cmd=None):
"Use threads and a subproc to bodge a timeout on top of filesystem access"
global safe_stat_process
if cmd is None:
cmd = ['/usr/bin/stat']
cmd.append(path)
def target():
global safe_stat_process
logger.debug('Stat thread started')
safe_stat_process = subprocess.Popen(cmd, stdout=PIPE, stderr=PIPE)
_results = safe_stat_process.communicate() # noqa
logger.debug('Stat thread finished')
thread = threading.Thread(target=target)
thread.start()
thread.join(timeout)
if thread.is_alive(): # stat took longer than timeout
safe_stat_process.terminate()
thread.join()
return safe_stat_process.returncode == 0 | [
"def",
"safe_stat",
"(",
"path",
",",
"timeout",
"=",
"1",
",",
"cmd",
"=",
"None",
")",
":",
"global",
"safe_stat_process",
"if",
"cmd",
"is",
"None",
":",
"cmd",
"=",
"[",
"'/usr/bin/stat'",
"]",
"cmd",
".",
"append",
"(",
"path",
")",
"def",
"targ... | Use threads and a subproc to bodge a timeout on top of filesystem access | [
"Use",
"threads",
"and",
"a",
"subproc",
"to",
"bodge",
"a",
"timeout",
"on",
"top",
"of",
"filesystem",
"access"
] | d31d34aa8bbc8a9fde6cd4227a0df92726e8daf4 | https://github.com/openstax/cnx-archive/blob/d31d34aa8bbc8a9fde6cd4227a0df92726e8daf4/cnxarchive/utils/safe.py#L12-L36 | train | 48,782 |
ZELLMECHANIK-DRESDEN/dclab | dclab/polygon_filter.py | get_polygon_filter_names | def get_polygon_filter_names():
"""Get the names of all polygon filters in the order of creation"""
names = []
for p in PolygonFilter.instances:
names.append(p.name)
return names | python | def get_polygon_filter_names():
"""Get the names of all polygon filters in the order of creation"""
names = []
for p in PolygonFilter.instances:
names.append(p.name)
return names | [
"def",
"get_polygon_filter_names",
"(",
")",
":",
"names",
"=",
"[",
"]",
"for",
"p",
"in",
"PolygonFilter",
".",
"instances",
":",
"names",
".",
"append",
"(",
"p",
".",
"name",
")",
"return",
"names"
] | Get the names of all polygon filters in the order of creation | [
"Get",
"the",
"names",
"of",
"all",
"polygon",
"filters",
"in",
"the",
"order",
"of",
"creation"
] | 79002c4356e7020c2ba73ab0a3819c9abd4affec | https://github.com/ZELLMECHANIK-DRESDEN/dclab/blob/79002c4356e7020c2ba73ab0a3819c9abd4affec/dclab/polygon_filter.py#L366-L371 | train | 48,783 |
ZELLMECHANIK-DRESDEN/dclab | dclab/polygon_filter.py | PolygonFilter._check_data | def _check_data(self):
"""Check if the data given is valid"""
if self.axes is None:
raise PolygonFilterError("`axes` parm not set.")
if self.points is None:
raise PolygonFilterError("`points` parm not set.")
self.points = np.array(self.points)
if self.points.shape[1] != 2:
raise PolygonFilterError("data points' shape[1] must be 2.")
if self.name is None:
self.name = "polygon filter {}".format(self.unique_id)
if not isinstance(self.inverted, bool):
raise PolygonFilterError("`inverted` must be boolean.") | python | def _check_data(self):
"""Check if the data given is valid"""
if self.axes is None:
raise PolygonFilterError("`axes` parm not set.")
if self.points is None:
raise PolygonFilterError("`points` parm not set.")
self.points = np.array(self.points)
if self.points.shape[1] != 2:
raise PolygonFilterError("data points' shape[1] must be 2.")
if self.name is None:
self.name = "polygon filter {}".format(self.unique_id)
if not isinstance(self.inverted, bool):
raise PolygonFilterError("`inverted` must be boolean.") | [
"def",
"_check_data",
"(",
"self",
")",
":",
"if",
"self",
".",
"axes",
"is",
"None",
":",
"raise",
"PolygonFilterError",
"(",
"\"`axes` parm not set.\"",
")",
"if",
"self",
".",
"points",
"is",
"None",
":",
"raise",
"PolygonFilterError",
"(",
"\"`points` parm... | Check if the data given is valid | [
"Check",
"if",
"the",
"data",
"given",
"is",
"valid"
] | 79002c4356e7020c2ba73ab0a3819c9abd4affec | https://github.com/ZELLMECHANIK-DRESDEN/dclab/blob/79002c4356e7020c2ba73ab0a3819c9abd4affec/dclab/polygon_filter.py#L92-L104 | train | 48,784 |
ZELLMECHANIK-DRESDEN/dclab | dclab/polygon_filter.py | PolygonFilter._load | def _load(self, filename):
"""Import all filters from a text file"""
filename = pathlib.Path(filename)
with filename.open() as fd:
data = fd.readlines()
# Get the strings that correspond to self.fileid
bool_head = [l.strip().startswith("[") for l in data]
int_head = np.squeeze(np.where(bool_head))
int_head = np.atleast_1d(int_head)
start = int_head[self.fileid]+1
if len(int_head) > self.fileid+1:
end = int_head[self.fileid+1]
else:
end = len(data)
subdata = data[start:end]
# separate all elements and strip them
subdata = [[it.strip() for it in l.split("=")] for l in subdata]
points = []
for var, val in subdata:
if var.lower() == "x axis":
xaxis = val.lower()
elif var.lower() == "y axis":
yaxis = val.lower()
elif var.lower() == "name":
self.name = val
elif var.lower() == "inverted":
if val == "True":
self.inverted = True
elif var.lower().startswith("point"):
val = np.array(val.strip("[]").split(), dtype=float)
points.append([int(var[5:]), val])
else:
raise KeyError("Unknown variable: {} = {}".
format(var, val))
self.axes = (xaxis, yaxis)
# sort points
points.sort()
# get only coordinates from points
self.points = np.array([p[1] for p in points])
# overwrite unique id
unique_id = int(data[start-1].strip().strip("Polygon []"))
self._set_unique_id(unique_id) | python | def _load(self, filename):
"""Import all filters from a text file"""
filename = pathlib.Path(filename)
with filename.open() as fd:
data = fd.readlines()
# Get the strings that correspond to self.fileid
bool_head = [l.strip().startswith("[") for l in data]
int_head = np.squeeze(np.where(bool_head))
int_head = np.atleast_1d(int_head)
start = int_head[self.fileid]+1
if len(int_head) > self.fileid+1:
end = int_head[self.fileid+1]
else:
end = len(data)
subdata = data[start:end]
# separate all elements and strip them
subdata = [[it.strip() for it in l.split("=")] for l in subdata]
points = []
for var, val in subdata:
if var.lower() == "x axis":
xaxis = val.lower()
elif var.lower() == "y axis":
yaxis = val.lower()
elif var.lower() == "name":
self.name = val
elif var.lower() == "inverted":
if val == "True":
self.inverted = True
elif var.lower().startswith("point"):
val = np.array(val.strip("[]").split(), dtype=float)
points.append([int(var[5:]), val])
else:
raise KeyError("Unknown variable: {} = {}".
format(var, val))
self.axes = (xaxis, yaxis)
# sort points
points.sort()
# get only coordinates from points
self.points = np.array([p[1] for p in points])
# overwrite unique id
unique_id = int(data[start-1].strip().strip("Polygon []"))
self._set_unique_id(unique_id) | [
"def",
"_load",
"(",
"self",
",",
"filename",
")",
":",
"filename",
"=",
"pathlib",
".",
"Path",
"(",
"filename",
")",
"with",
"filename",
".",
"open",
"(",
")",
"as",
"fd",
":",
"data",
"=",
"fd",
".",
"readlines",
"(",
")",
"# Get the strings that co... | Import all filters from a text file | [
"Import",
"all",
"filters",
"from",
"a",
"text",
"file"
] | 79002c4356e7020c2ba73ab0a3819c9abd4affec | https://github.com/ZELLMECHANIK-DRESDEN/dclab/blob/79002c4356e7020c2ba73ab0a3819c9abd4affec/dclab/polygon_filter.py#L106-L156 | train | 48,785 |
ZELLMECHANIK-DRESDEN/dclab | dclab/polygon_filter.py | PolygonFilter._set_unique_id | def _set_unique_id(self, unique_id):
"""Define a unique id"""
assert isinstance(unique_id, int), "unique_id must be an integer"
if PolygonFilter.instace_exists(unique_id):
newid = max(PolygonFilter._instance_counter, unique_id+1)
msg = "PolygonFilter with unique_id '{}' exists.".format(unique_id)
msg += " Using new unique id '{}'.".format(newid)
warnings.warn(msg, FilterIdExistsWarning)
unique_id = newid
ic = max(PolygonFilter._instance_counter, unique_id+1)
PolygonFilter._instance_counter = ic
self.unique_id = unique_id | python | def _set_unique_id(self, unique_id):
"""Define a unique id"""
assert isinstance(unique_id, int), "unique_id must be an integer"
if PolygonFilter.instace_exists(unique_id):
newid = max(PolygonFilter._instance_counter, unique_id+1)
msg = "PolygonFilter with unique_id '{}' exists.".format(unique_id)
msg += " Using new unique id '{}'.".format(newid)
warnings.warn(msg, FilterIdExistsWarning)
unique_id = newid
ic = max(PolygonFilter._instance_counter, unique_id+1)
PolygonFilter._instance_counter = ic
self.unique_id = unique_id | [
"def",
"_set_unique_id",
"(",
"self",
",",
"unique_id",
")",
":",
"assert",
"isinstance",
"(",
"unique_id",
",",
"int",
")",
",",
"\"unique_id must be an integer\"",
"if",
"PolygonFilter",
".",
"instace_exists",
"(",
"unique_id",
")",
":",
"newid",
"=",
"max",
... | Define a unique id | [
"Define",
"a",
"unique",
"id"
] | 79002c4356e7020c2ba73ab0a3819c9abd4affec | https://github.com/ZELLMECHANIK-DRESDEN/dclab/blob/79002c4356e7020c2ba73ab0a3819c9abd4affec/dclab/polygon_filter.py#L158-L171 | train | 48,786 |
ZELLMECHANIK-DRESDEN/dclab | dclab/polygon_filter.py | PolygonFilter.copy | def copy(self, invert=False):
"""Return a copy of the current instance
Parameters
----------
invert: bool
The copy will be inverted w.r.t. the original
"""
if invert:
inverted = not self.inverted
else:
inverted = self.inverted
return PolygonFilter(axes=self.axes,
points=self.points,
name=self.name,
inverted=inverted) | python | def copy(self, invert=False):
"""Return a copy of the current instance
Parameters
----------
invert: bool
The copy will be inverted w.r.t. the original
"""
if invert:
inverted = not self.inverted
else:
inverted = self.inverted
return PolygonFilter(axes=self.axes,
points=self.points,
name=self.name,
inverted=inverted) | [
"def",
"copy",
"(",
"self",
",",
"invert",
"=",
"False",
")",
":",
"if",
"invert",
":",
"inverted",
"=",
"not",
"self",
".",
"inverted",
"else",
":",
"inverted",
"=",
"self",
".",
"inverted",
"return",
"PolygonFilter",
"(",
"axes",
"=",
"self",
".",
... | Return a copy of the current instance
Parameters
----------
invert: bool
The copy will be inverted w.r.t. the original | [
"Return",
"a",
"copy",
"of",
"the",
"current",
"instance"
] | 79002c4356e7020c2ba73ab0a3819c9abd4affec | https://github.com/ZELLMECHANIK-DRESDEN/dclab/blob/79002c4356e7020c2ba73ab0a3819c9abd4affec/dclab/polygon_filter.py#L179-L195 | train | 48,787 |
ZELLMECHANIK-DRESDEN/dclab | dclab/polygon_filter.py | PolygonFilter.filter | def filter(self, datax, datay):
"""Filter a set of datax and datay according to `self.points`"""
f = np.ones(datax.shape, dtype=bool)
for i, p in enumerate(zip(datax, datay)):
f[i] = PolygonFilter.point_in_poly(p, self.points)
if self.inverted:
np.invert(f, f)
return f | python | def filter(self, datax, datay):
"""Filter a set of datax and datay according to `self.points`"""
f = np.ones(datax.shape, dtype=bool)
for i, p in enumerate(zip(datax, datay)):
f[i] = PolygonFilter.point_in_poly(p, self.points)
if self.inverted:
np.invert(f, f)
return f | [
"def",
"filter",
"(",
"self",
",",
"datax",
",",
"datay",
")",
":",
"f",
"=",
"np",
".",
"ones",
"(",
"datax",
".",
"shape",
",",
"dtype",
"=",
"bool",
")",
"for",
"i",
",",
"p",
"in",
"enumerate",
"(",
"zip",
"(",
"datax",
",",
"datay",
")",
... | Filter a set of datax and datay according to `self.points` | [
"Filter",
"a",
"set",
"of",
"datax",
"and",
"datay",
"according",
"to",
"self",
".",
"points"
] | 79002c4356e7020c2ba73ab0a3819c9abd4affec | https://github.com/ZELLMECHANIK-DRESDEN/dclab/blob/79002c4356e7020c2ba73ab0a3819c9abd4affec/dclab/polygon_filter.py#L197-L206 | train | 48,788 |
ZELLMECHANIK-DRESDEN/dclab | dclab/polygon_filter.py | PolygonFilter.get_instance_from_id | def get_instance_from_id(unique_id):
"""Get an instance of the `PolygonFilter` using a unique id"""
for instance in PolygonFilter.instances:
if instance.unique_id == unique_id:
return instance
# if this does not work:
raise KeyError("PolygonFilter with unique_id {} not found.".
format(unique_id)) | python | def get_instance_from_id(unique_id):
"""Get an instance of the `PolygonFilter` using a unique id"""
for instance in PolygonFilter.instances:
if instance.unique_id == unique_id:
return instance
# if this does not work:
raise KeyError("PolygonFilter with unique_id {} not found.".
format(unique_id)) | [
"def",
"get_instance_from_id",
"(",
"unique_id",
")",
":",
"for",
"instance",
"in",
"PolygonFilter",
".",
"instances",
":",
"if",
"instance",
".",
"unique_id",
"==",
"unique_id",
":",
"return",
"instance",
"# if this does not work:",
"raise",
"KeyError",
"(",
"\"P... | Get an instance of the `PolygonFilter` using a unique id | [
"Get",
"an",
"instance",
"of",
"the",
"PolygonFilter",
"using",
"a",
"unique",
"id"
] | 79002c4356e7020c2ba73ab0a3819c9abd4affec | https://github.com/ZELLMECHANIK-DRESDEN/dclab/blob/79002c4356e7020c2ba73ab0a3819c9abd4affec/dclab/polygon_filter.py#L209-L216 | train | 48,789 |
ZELLMECHANIK-DRESDEN/dclab | dclab/polygon_filter.py | PolygonFilter.import_all | def import_all(path):
"""Import all polygons from a .poly file.
Returns a list of the imported polygon filters
"""
plist = []
fid = 0
while True:
try:
p = PolygonFilter(filename=path, fileid=fid)
plist.append(p)
fid += 1
except IndexError:
break
return plist | python | def import_all(path):
"""Import all polygons from a .poly file.
Returns a list of the imported polygon filters
"""
plist = []
fid = 0
while True:
try:
p = PolygonFilter(filename=path, fileid=fid)
plist.append(p)
fid += 1
except IndexError:
break
return plist | [
"def",
"import_all",
"(",
"path",
")",
":",
"plist",
"=",
"[",
"]",
"fid",
"=",
"0",
"while",
"True",
":",
"try",
":",
"p",
"=",
"PolygonFilter",
"(",
"filename",
"=",
"path",
",",
"fileid",
"=",
"fid",
")",
"plist",
".",
"append",
"(",
"p",
")",... | Import all polygons from a .poly file.
Returns a list of the imported polygon filters | [
"Import",
"all",
"polygons",
"from",
"a",
".",
"poly",
"file",
"."
] | 79002c4356e7020c2ba73ab0a3819c9abd4affec | https://github.com/ZELLMECHANIK-DRESDEN/dclab/blob/79002c4356e7020c2ba73ab0a3819c9abd4affec/dclab/polygon_filter.py#L219-L233 | train | 48,790 |
ZELLMECHANIK-DRESDEN/dclab | dclab/polygon_filter.py | PolygonFilter.point_in_poly | def point_in_poly(p, poly):
"""Determine whether a point is within a polygon area
Uses the ray casting algorithm.
Parameters
----------
p: float
Coordinates of the point
poly: array_like of shape (N, 2)
Polygon (`PolygonFilter.points`)
Returns
-------
inside: bool
`True`, if point is inside.
Notes
-----
If `p` lies on a side of the polygon, it is defined as
- "inside" if it is on the top or right
- "outside" if it is on the lower or left
"""
poly = np.array(poly)
n = poly.shape[0]
inside = False
x, y = p
# Coarse bounding box exclusion:
if (x <= poly[:, 0].max() and x > poly[:, 0].min()
and y <= poly[:, 1].max() and y > poly[:, 1].min()):
# The point is within the coarse bounding box.
p1x, p1y = poly[0] # point i in contour
for ii in range(n): # also covers (n-1, 0) (circular)
p2x, p2y = poly[(ii+1) % n] # point ii+1 in contour (circular)
# Edge-wise fine bounding-ray exclusion.
# Determine whether point is in the current ray,
# defined by the y-range of p1 and p2 and whether
# it is left of p1 and p2.
if (y > min(p1y, p2y) and y <= max(p1y, p2y) # in y-range
and x <= max(p1x, p2x)): # left of p1 and p2
# Note that always p1y!=p2y due to the above test.
# Only Compute the x-coordinate of the intersection
# between line p1-p2 and the horizontal ray,
# ((y-p1y)*(p2x-p1x)/(p2y-p1y) + p1x),
# if x is not already known to be left of it
# (p1x==p2x in combination with x<=max(p1x, p2x) above).
if p1x == p2x or x <= (y-p1y)*(p2x-p1x)/(p2y-p1y) + p1x:
# Toggle `inside` if the ray intersects
# with the current edge.
inside = not inside
# Move on to the next edge of the polygon.
p1x, p1y = p2x, p2y
return inside | python | def point_in_poly(p, poly):
"""Determine whether a point is within a polygon area
Uses the ray casting algorithm.
Parameters
----------
p: float
Coordinates of the point
poly: array_like of shape (N, 2)
Polygon (`PolygonFilter.points`)
Returns
-------
inside: bool
`True`, if point is inside.
Notes
-----
If `p` lies on a side of the polygon, it is defined as
- "inside" if it is on the top or right
- "outside" if it is on the lower or left
"""
poly = np.array(poly)
n = poly.shape[0]
inside = False
x, y = p
# Coarse bounding box exclusion:
if (x <= poly[:, 0].max() and x > poly[:, 0].min()
and y <= poly[:, 1].max() and y > poly[:, 1].min()):
# The point is within the coarse bounding box.
p1x, p1y = poly[0] # point i in contour
for ii in range(n): # also covers (n-1, 0) (circular)
p2x, p2y = poly[(ii+1) % n] # point ii+1 in contour (circular)
# Edge-wise fine bounding-ray exclusion.
# Determine whether point is in the current ray,
# defined by the y-range of p1 and p2 and whether
# it is left of p1 and p2.
if (y > min(p1y, p2y) and y <= max(p1y, p2y) # in y-range
and x <= max(p1x, p2x)): # left of p1 and p2
# Note that always p1y!=p2y due to the above test.
# Only Compute the x-coordinate of the intersection
# between line p1-p2 and the horizontal ray,
# ((y-p1y)*(p2x-p1x)/(p2y-p1y) + p1x),
# if x is not already known to be left of it
# (p1x==p2x in combination with x<=max(p1x, p2x) above).
if p1x == p2x or x <= (y-p1y)*(p2x-p1x)/(p2y-p1y) + p1x:
# Toggle `inside` if the ray intersects
# with the current edge.
inside = not inside
# Move on to the next edge of the polygon.
p1x, p1y = p2x, p2y
return inside | [
"def",
"point_in_poly",
"(",
"p",
",",
"poly",
")",
":",
"poly",
"=",
"np",
".",
"array",
"(",
"poly",
")",
"n",
"=",
"poly",
".",
"shape",
"[",
"0",
"]",
"inside",
"=",
"False",
"x",
",",
"y",
"=",
"p",
"# Coarse bounding box exclusion:",
"if",
"(... | Determine whether a point is within a polygon area
Uses the ray casting algorithm.
Parameters
----------
p: float
Coordinates of the point
poly: array_like of shape (N, 2)
Polygon (`PolygonFilter.points`)
Returns
-------
inside: bool
`True`, if point is inside.
Notes
-----
If `p` lies on a side of the polygon, it is defined as
- "inside" if it is on the top or right
- "outside" if it is on the lower or left | [
"Determine",
"whether",
"a",
"point",
"is",
"within",
"a",
"polygon",
"area"
] | 79002c4356e7020c2ba73ab0a3819c9abd4affec | https://github.com/ZELLMECHANIK-DRESDEN/dclab/blob/79002c4356e7020c2ba73ab0a3819c9abd4affec/dclab/polygon_filter.py#L246-L301 | train | 48,791 |
ZELLMECHANIK-DRESDEN/dclab | dclab/polygon_filter.py | PolygonFilter.remove | def remove(unique_id):
"""Remove a polygon filter from `PolygonFilter.instances`"""
for p in PolygonFilter.instances:
if p.unique_id == unique_id:
PolygonFilter.instances.remove(p) | python | def remove(unique_id):
"""Remove a polygon filter from `PolygonFilter.instances`"""
for p in PolygonFilter.instances:
if p.unique_id == unique_id:
PolygonFilter.instances.remove(p) | [
"def",
"remove",
"(",
"unique_id",
")",
":",
"for",
"p",
"in",
"PolygonFilter",
".",
"instances",
":",
"if",
"p",
".",
"unique_id",
"==",
"unique_id",
":",
"PolygonFilter",
".",
"instances",
".",
"remove",
"(",
"p",
")"
] | Remove a polygon filter from `PolygonFilter.instances` | [
"Remove",
"a",
"polygon",
"filter",
"from",
"PolygonFilter",
".",
"instances"
] | 79002c4356e7020c2ba73ab0a3819c9abd4affec | https://github.com/ZELLMECHANIK-DRESDEN/dclab/blob/79002c4356e7020c2ba73ab0a3819c9abd4affec/dclab/polygon_filter.py#L304-L308 | train | 48,792 |
ZELLMECHANIK-DRESDEN/dclab | dclab/polygon_filter.py | PolygonFilter.save_all | def save_all(polyfile):
"""Save all polygon filters"""
nump = len(PolygonFilter.instances)
if nump == 0:
raise PolygonFilterError("There are not polygon filters to save.")
for p in PolygonFilter.instances:
# we return the ret_obj, so we don't need to open and
# close the file multiple times.
polyobj = p.save(polyfile, ret_fobj=True)
polyobj.close() | python | def save_all(polyfile):
"""Save all polygon filters"""
nump = len(PolygonFilter.instances)
if nump == 0:
raise PolygonFilterError("There are not polygon filters to save.")
for p in PolygonFilter.instances:
# we return the ret_obj, so we don't need to open and
# close the file multiple times.
polyobj = p.save(polyfile, ret_fobj=True)
polyobj.close() | [
"def",
"save_all",
"(",
"polyfile",
")",
":",
"nump",
"=",
"len",
"(",
"PolygonFilter",
".",
"instances",
")",
"if",
"nump",
"==",
"0",
":",
"raise",
"PolygonFilterError",
"(",
"\"There are not polygon filters to save.\"",
")",
"for",
"p",
"in",
"PolygonFilter",... | Save all polygon filters | [
"Save",
"all",
"polygon",
"filters"
] | 79002c4356e7020c2ba73ab0a3819c9abd4affec | https://github.com/ZELLMECHANIK-DRESDEN/dclab/blob/79002c4356e7020c2ba73ab0a3819c9abd4affec/dclab/polygon_filter.py#L350-L359 | train | 48,793 |
openstax/cnx-archive | cnxarchive/views/resource.py | get_resource | def get_resource(request):
"""Retrieve a file's data."""
hash = request.matchdict['hash']
# Do the file lookup
with db_connect() as db_connection:
with db_connection.cursor() as cursor:
args = dict(hash=hash)
cursor.execute(SQL['get-resource'], args)
try:
mimetype, file = cursor.fetchone()
except TypeError: # None returned
raise httpexceptions.HTTPNotFound()
resp = request.response
resp.status = "200 OK"
resp.content_type = mimetype
resp.body = file[:]
return resp | python | def get_resource(request):
"""Retrieve a file's data."""
hash = request.matchdict['hash']
# Do the file lookup
with db_connect() as db_connection:
with db_connection.cursor() as cursor:
args = dict(hash=hash)
cursor.execute(SQL['get-resource'], args)
try:
mimetype, file = cursor.fetchone()
except TypeError: # None returned
raise httpexceptions.HTTPNotFound()
resp = request.response
resp.status = "200 OK"
resp.content_type = mimetype
resp.body = file[:]
return resp | [
"def",
"get_resource",
"(",
"request",
")",
":",
"hash",
"=",
"request",
".",
"matchdict",
"[",
"'hash'",
"]",
"# Do the file lookup",
"with",
"db_connect",
"(",
")",
"as",
"db_connection",
":",
"with",
"db_connection",
".",
"cursor",
"(",
")",
"as",
"cursor... | Retrieve a file's data. | [
"Retrieve",
"a",
"file",
"s",
"data",
"."
] | d31d34aa8bbc8a9fde6cd4227a0df92726e8daf4 | https://github.com/openstax/cnx-archive/blob/d31d34aa8bbc8a9fde6cd4227a0df92726e8daf4/cnxarchive/views/resource.py#L32-L50 | train | 48,794 |
robmcmullen/atrcopy | atrcopy/segments.py | get_style_bits | def get_style_bits(match=False, comment=False, selected=False, data=False, diff=False, user=0):
""" Return an int value that contains the specified style bits set.
Available styles for each byte are:
match: part of the currently matched search
comment: user commented area
selected: selected region
data: labeled in the disassembler as a data region (i.e. not disassembled)
"""
style_bits = 0
if user:
style_bits |= (user & user_bit_mask)
if diff:
style_bits |= diff_bit_mask
if match:
style_bits |= match_bit_mask
if comment:
style_bits |= comment_bit_mask
if data:
style_bits |= (data_style & user_bit_mask)
if selected:
style_bits |= selected_bit_mask
return style_bits | python | def get_style_bits(match=False, comment=False, selected=False, data=False, diff=False, user=0):
""" Return an int value that contains the specified style bits set.
Available styles for each byte are:
match: part of the currently matched search
comment: user commented area
selected: selected region
data: labeled in the disassembler as a data region (i.e. not disassembled)
"""
style_bits = 0
if user:
style_bits |= (user & user_bit_mask)
if diff:
style_bits |= diff_bit_mask
if match:
style_bits |= match_bit_mask
if comment:
style_bits |= comment_bit_mask
if data:
style_bits |= (data_style & user_bit_mask)
if selected:
style_bits |= selected_bit_mask
return style_bits | [
"def",
"get_style_bits",
"(",
"match",
"=",
"False",
",",
"comment",
"=",
"False",
",",
"selected",
"=",
"False",
",",
"data",
"=",
"False",
",",
"diff",
"=",
"False",
",",
"user",
"=",
"0",
")",
":",
"style_bits",
"=",
"0",
"if",
"user",
":",
"sty... | Return an int value that contains the specified style bits set.
Available styles for each byte are:
match: part of the currently matched search
comment: user commented area
selected: selected region
data: labeled in the disassembler as a data region (i.e. not disassembled) | [
"Return",
"an",
"int",
"value",
"that",
"contains",
"the",
"specified",
"style",
"bits",
"set",
"."
] | dafba8e74c718e95cf81fd72c184fa193ecec730 | https://github.com/robmcmullen/atrcopy/blob/dafba8e74c718e95cf81fd72c184fa193ecec730/atrcopy/segments.py#L22-L45 | train | 48,795 |
robmcmullen/atrcopy | atrcopy/segments.py | get_style_mask | def get_style_mask(**kwargs):
"""Get the bit mask that, when anded with data, will turn off the
selected bits
"""
bits = get_style_bits(**kwargs)
if 'user' in kwargs and kwargs['user']:
bits |= user_bit_mask
else:
bits &= (0xff ^ user_bit_mask)
return 0xff ^ bits | python | def get_style_mask(**kwargs):
"""Get the bit mask that, when anded with data, will turn off the
selected bits
"""
bits = get_style_bits(**kwargs)
if 'user' in kwargs and kwargs['user']:
bits |= user_bit_mask
else:
bits &= (0xff ^ user_bit_mask)
return 0xff ^ bits | [
"def",
"get_style_mask",
"(",
"*",
"*",
"kwargs",
")",
":",
"bits",
"=",
"get_style_bits",
"(",
"*",
"*",
"kwargs",
")",
"if",
"'user'",
"in",
"kwargs",
"and",
"kwargs",
"[",
"'user'",
"]",
":",
"bits",
"|=",
"user_bit_mask",
"else",
":",
"bits",
"&=",... | Get the bit mask that, when anded with data, will turn off the
selected bits | [
"Get",
"the",
"bit",
"mask",
"that",
"when",
"anded",
"with",
"data",
"will",
"turn",
"off",
"the",
"selected",
"bits"
] | dafba8e74c718e95cf81fd72c184fa193ecec730 | https://github.com/robmcmullen/atrcopy/blob/dafba8e74c718e95cf81fd72c184fa193ecec730/atrcopy/segments.py#L48-L57 | train | 48,796 |
robmcmullen/atrcopy | atrcopy/segments.py | SegmentData.byte_bounds_offset | def byte_bounds_offset(self):
"""Return start and end offsets of this segment's data into the
base array's data.
This ignores the byte order index. Arrays using the byte order index
will have the entire base array's raw data.
"""
if self.data.base is None:
if self.is_indexed:
basearray = self.data.np_data
else:
basearray = self.data
return 0, len(basearray)
return int(self.data_start - self.base_start), int(self.data_end - self.base_start) | python | def byte_bounds_offset(self):
"""Return start and end offsets of this segment's data into the
base array's data.
This ignores the byte order index. Arrays using the byte order index
will have the entire base array's raw data.
"""
if self.data.base is None:
if self.is_indexed:
basearray = self.data.np_data
else:
basearray = self.data
return 0, len(basearray)
return int(self.data_start - self.base_start), int(self.data_end - self.base_start) | [
"def",
"byte_bounds_offset",
"(",
"self",
")",
":",
"if",
"self",
".",
"data",
".",
"base",
"is",
"None",
":",
"if",
"self",
".",
"is_indexed",
":",
"basearray",
"=",
"self",
".",
"data",
".",
"np_data",
"else",
":",
"basearray",
"=",
"self",
".",
"d... | Return start and end offsets of this segment's data into the
base array's data.
This ignores the byte order index. Arrays using the byte order index
will have the entire base array's raw data. | [
"Return",
"start",
"and",
"end",
"offsets",
"of",
"this",
"segment",
"s",
"data",
"into",
"the",
"base",
"array",
"s",
"data",
"."
] | dafba8e74c718e95cf81fd72c184fa193ecec730 | https://github.com/robmcmullen/atrcopy/blob/dafba8e74c718e95cf81fd72c184fa193ecec730/atrcopy/segments.py#L268-L281 | train | 48,797 |
robmcmullen/atrcopy | atrcopy/segments.py | SegmentData.get_raw_index | def get_raw_index(self, i):
"""Get index into base array's raw data, given the index into this
segment
"""
if self.is_indexed:
return int(self.order[i])
if self.data.base is None:
return int(i)
return int(self.data_start - self.base_start + i) | python | def get_raw_index(self, i):
"""Get index into base array's raw data, given the index into this
segment
"""
if self.is_indexed:
return int(self.order[i])
if self.data.base is None:
return int(i)
return int(self.data_start - self.base_start + i) | [
"def",
"get_raw_index",
"(",
"self",
",",
"i",
")",
":",
"if",
"self",
".",
"is_indexed",
":",
"return",
"int",
"(",
"self",
".",
"order",
"[",
"i",
"]",
")",
"if",
"self",
".",
"data",
".",
"base",
"is",
"None",
":",
"return",
"int",
"(",
"i",
... | Get index into base array's raw data, given the index into this
segment | [
"Get",
"index",
"into",
"base",
"array",
"s",
"raw",
"data",
"given",
"the",
"index",
"into",
"this",
"segment"
] | dafba8e74c718e95cf81fd72c184fa193ecec730 | https://github.com/robmcmullen/atrcopy/blob/dafba8e74c718e95cf81fd72c184fa193ecec730/atrcopy/segments.py#L283-L291 | train | 48,798 |
robmcmullen/atrcopy | atrcopy/segments.py | SegmentData.get_indexes_from_base | def get_indexes_from_base(self):
"""Get array of indexes from the base array, as if this raw data were
indexed.
"""
if self.is_indexed:
return np.copy(self.order[i])
if self.data.base is None:
i = 0
else:
i = self.get_raw_index(0)
return np.arange(i, i + len(self), dtype=np.uint32) | python | def get_indexes_from_base(self):
"""Get array of indexes from the base array, as if this raw data were
indexed.
"""
if self.is_indexed:
return np.copy(self.order[i])
if self.data.base is None:
i = 0
else:
i = self.get_raw_index(0)
return np.arange(i, i + len(self), dtype=np.uint32) | [
"def",
"get_indexes_from_base",
"(",
"self",
")",
":",
"if",
"self",
".",
"is_indexed",
":",
"return",
"np",
".",
"copy",
"(",
"self",
".",
"order",
"[",
"i",
"]",
")",
"if",
"self",
".",
"data",
".",
"base",
"is",
"None",
":",
"i",
"=",
"0",
"el... | Get array of indexes from the base array, as if this raw data were
indexed. | [
"Get",
"array",
"of",
"indexes",
"from",
"the",
"base",
"array",
"as",
"if",
"this",
"raw",
"data",
"were",
"indexed",
"."
] | dafba8e74c718e95cf81fd72c184fa193ecec730 | https://github.com/robmcmullen/atrcopy/blob/dafba8e74c718e95cf81fd72c184fa193ecec730/atrcopy/segments.py#L293-L303 | train | 48,799 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.