code stringlengths 75 104k | code_sememe stringlengths 47 309k | token_type stringlengths 215 214k | code_dependency stringlengths 75 155k |
|---|---|---|---|
def assert_page_source_contains(self, expected_value, failure_message='Expected page source to contain: "{}"'):
"""
Asserts that the page source contains the string passed in expected_value
"""
assertion = lambda: expected_value in self.driver_wrapper.page_source()
self.webdriver_assert(assertion, unicode(failure_message).format(expected_value)) | def function[assert_page_source_contains, parameter[self, expected_value, failure_message]]:
constant[
Asserts that the page source contains the string passed in expected_value
]
variable[assertion] assign[=] <ast.Lambda object at 0x7da1b10a4520>
call[name[self].webdriver_assert, parameter[name[assertion], call[call[name[unicode], parameter[name[failure_message]]].format, parameter[name[expected_value]]]]] | keyword[def] identifier[assert_page_source_contains] ( identifier[self] , identifier[expected_value] , identifier[failure_message] = literal[string] ):
literal[string]
identifier[assertion] = keyword[lambda] : identifier[expected_value] keyword[in] identifier[self] . identifier[driver_wrapper] . identifier[page_source] ()
identifier[self] . identifier[webdriver_assert] ( identifier[assertion] , identifier[unicode] ( identifier[failure_message] ). identifier[format] ( identifier[expected_value] )) | def assert_page_source_contains(self, expected_value, failure_message='Expected page source to contain: "{}"'):
"""
Asserts that the page source contains the string passed in expected_value
"""
assertion = lambda : expected_value in self.driver_wrapper.page_source()
self.webdriver_assert(assertion, unicode(failure_message).format(expected_value)) |
def SendKey(key: int, waitTime: float = OPERATION_WAIT_TIME) -> None:
"""
Simulate typing a key.
key: int, a value in class `Keys`.
"""
keybd_event(key, 0, KeyboardEventFlag.KeyDown | KeyboardEventFlag.ExtendedKey, 0)
keybd_event(key, 0, KeyboardEventFlag.KeyUp | KeyboardEventFlag.ExtendedKey, 0)
time.sleep(waitTime) | def function[SendKey, parameter[key, waitTime]]:
constant[
Simulate typing a key.
key: int, a value in class `Keys`.
]
call[name[keybd_event], parameter[name[key], constant[0], binary_operation[name[KeyboardEventFlag].KeyDown <ast.BitOr object at 0x7da2590d6aa0> name[KeyboardEventFlag].ExtendedKey], constant[0]]]
call[name[keybd_event], parameter[name[key], constant[0], binary_operation[name[KeyboardEventFlag].KeyUp <ast.BitOr object at 0x7da2590d6aa0> name[KeyboardEventFlag].ExtendedKey], constant[0]]]
call[name[time].sleep, parameter[name[waitTime]]] | keyword[def] identifier[SendKey] ( identifier[key] : identifier[int] , identifier[waitTime] : identifier[float] = identifier[OPERATION_WAIT_TIME] )-> keyword[None] :
literal[string]
identifier[keybd_event] ( identifier[key] , literal[int] , identifier[KeyboardEventFlag] . identifier[KeyDown] | identifier[KeyboardEventFlag] . identifier[ExtendedKey] , literal[int] )
identifier[keybd_event] ( identifier[key] , literal[int] , identifier[KeyboardEventFlag] . identifier[KeyUp] | identifier[KeyboardEventFlag] . identifier[ExtendedKey] , literal[int] )
identifier[time] . identifier[sleep] ( identifier[waitTime] ) | def SendKey(key: int, waitTime: float=OPERATION_WAIT_TIME) -> None:
"""
Simulate typing a key.
key: int, a value in class `Keys`.
"""
keybd_event(key, 0, KeyboardEventFlag.KeyDown | KeyboardEventFlag.ExtendedKey, 0)
keybd_event(key, 0, KeyboardEventFlag.KeyUp | KeyboardEventFlag.ExtendedKey, 0)
time.sleep(waitTime) |
def get_report(time='hour', threshold='significant', online=False):
"""
Retrieves a new Report about recent earthquakes.
:param str time: A string indicating the time range of earthquakes to report. Must be either "hour" (only earthquakes in the past hour), "day" (only earthquakes that happened today), "week" (only earthquakes that happened in the past 7 days), or "month" (only earthquakes that happened in the past 30 days).
:param str threshold: A string indicating what kind of earthquakes to report. Must be either "significant" (only significant earthquakes), "all" (all earthquakes, regardless of significance), "4.5", "2.5", or "1.0". Note that for the last three, all earthquakes at and above that level will be reported.
:returns: :ref:`Report`
"""
if threshold not in THRESHOLDS:
raise USGSException('Unknown threshold: "{}" (must be either "significant", "all", "4.5", "2.5", or "1.0")'.format(threshold))
if time not in TIMES:
raise USGSException('Unknown time: "{}" (must be either "hour", "day", "week", "month")'.format(time))
try:
result = _get_report_string(time, threshold, online)
except HTTPError as e:
raise USGSException("Internet error ({}): {}".format(e.code, e.reason))
if result == "":
formatted_threshold = 'Magnitude {}+' if threshold not in ('significant', 'all') else threshold.title()
result = Report._from_json({'metadata': {'title': 'USGS {} Earthquakes, Past {}'.format(formatted_threshold, time.title())}})
if _USE_CLASSES:
return result
else:
return result._to_dict()
elif result:
try:
json_result = _from_json(result)
except ValueError:
raise USGSException("The response from the server didn't make any sense.")
if _USE_CLASSES:
return Report._from_json(json_result)
else:
return Report._from_json(json_result)._to_dict()
else:
if _CONNECTED or online:
raise USGSException("No response from the server.")
else:
raise USGSException("No data was in the cache for this time and threshold ('{}', '{}').".format(time, threshold)) | def function[get_report, parameter[time, threshold, online]]:
constant[
Retrieves a new Report about recent earthquakes.
:param str time: A string indicating the time range of earthquakes to report. Must be either "hour" (only earthquakes in the past hour), "day" (only earthquakes that happened today), "week" (only earthquakes that happened in the past 7 days), or "month" (only earthquakes that happened in the past 30 days).
:param str threshold: A string indicating what kind of earthquakes to report. Must be either "significant" (only significant earthquakes), "all" (all earthquakes, regardless of significance), "4.5", "2.5", or "1.0". Note that for the last three, all earthquakes at and above that level will be reported.
:returns: :ref:`Report`
]
if compare[name[threshold] <ast.NotIn object at 0x7da2590d7190> name[THRESHOLDS]] begin[:]
<ast.Raise object at 0x7da18f09d390>
if compare[name[time] <ast.NotIn object at 0x7da2590d7190> name[TIMES]] begin[:]
<ast.Raise object at 0x7da18f09d780>
<ast.Try object at 0x7da18f09d810>
if compare[name[result] equal[==] constant[]] begin[:]
variable[formatted_threshold] assign[=] <ast.IfExp object at 0x7da18f09d1e0>
variable[result] assign[=] call[name[Report]._from_json, parameter[dictionary[[<ast.Constant object at 0x7da18f09da50>], [<ast.Dict object at 0x7da18f09fe80>]]]]
if name[_USE_CLASSES] begin[:]
return[name[result]] | keyword[def] identifier[get_report] ( identifier[time] = literal[string] , identifier[threshold] = literal[string] , identifier[online] = keyword[False] ):
literal[string]
keyword[if] identifier[threshold] keyword[not] keyword[in] identifier[THRESHOLDS] :
keyword[raise] identifier[USGSException] ( literal[string] . identifier[format] ( identifier[threshold] ))
keyword[if] identifier[time] keyword[not] keyword[in] identifier[TIMES] :
keyword[raise] identifier[USGSException] ( literal[string] . identifier[format] ( identifier[time] ))
keyword[try] :
identifier[result] = identifier[_get_report_string] ( identifier[time] , identifier[threshold] , identifier[online] )
keyword[except] identifier[HTTPError] keyword[as] identifier[e] :
keyword[raise] identifier[USGSException] ( literal[string] . identifier[format] ( identifier[e] . identifier[code] , identifier[e] . identifier[reason] ))
keyword[if] identifier[result] == literal[string] :
identifier[formatted_threshold] = literal[string] keyword[if] identifier[threshold] keyword[not] keyword[in] ( literal[string] , literal[string] ) keyword[else] identifier[threshold] . identifier[title] ()
identifier[result] = identifier[Report] . identifier[_from_json] ({ literal[string] :{ literal[string] : literal[string] . identifier[format] ( identifier[formatted_threshold] , identifier[time] . identifier[title] ())}})
keyword[if] identifier[_USE_CLASSES] :
keyword[return] identifier[result]
keyword[else] :
keyword[return] identifier[result] . identifier[_to_dict] ()
keyword[elif] identifier[result] :
keyword[try] :
identifier[json_result] = identifier[_from_json] ( identifier[result] )
keyword[except] identifier[ValueError] :
keyword[raise] identifier[USGSException] ( literal[string] )
keyword[if] identifier[_USE_CLASSES] :
keyword[return] identifier[Report] . identifier[_from_json] ( identifier[json_result] )
keyword[else] :
keyword[return] identifier[Report] . identifier[_from_json] ( identifier[json_result] ). identifier[_to_dict] ()
keyword[else] :
keyword[if] identifier[_CONNECTED] keyword[or] identifier[online] :
keyword[raise] identifier[USGSException] ( literal[string] )
keyword[else] :
keyword[raise] identifier[USGSException] ( literal[string] . identifier[format] ( identifier[time] , identifier[threshold] )) | def get_report(time='hour', threshold='significant', online=False):
"""
Retrieves a new Report about recent earthquakes.
:param str time: A string indicating the time range of earthquakes to report. Must be either "hour" (only earthquakes in the past hour), "day" (only earthquakes that happened today), "week" (only earthquakes that happened in the past 7 days), or "month" (only earthquakes that happened in the past 30 days).
:param str threshold: A string indicating what kind of earthquakes to report. Must be either "significant" (only significant earthquakes), "all" (all earthquakes, regardless of significance), "4.5", "2.5", or "1.0". Note that for the last three, all earthquakes at and above that level will be reported.
:returns: :ref:`Report`
"""
if threshold not in THRESHOLDS:
raise USGSException('Unknown threshold: "{}" (must be either "significant", "all", "4.5", "2.5", or "1.0")'.format(threshold)) # depends on [control=['if'], data=['threshold']]
if time not in TIMES:
raise USGSException('Unknown time: "{}" (must be either "hour", "day", "week", "month")'.format(time)) # depends on [control=['if'], data=['time']]
try:
result = _get_report_string(time, threshold, online) # depends on [control=['try'], data=[]]
except HTTPError as e:
raise USGSException('Internet error ({}): {}'.format(e.code, e.reason)) # depends on [control=['except'], data=['e']]
if result == '':
formatted_threshold = 'Magnitude {}+' if threshold not in ('significant', 'all') else threshold.title()
result = Report._from_json({'metadata': {'title': 'USGS {} Earthquakes, Past {}'.format(formatted_threshold, time.title())}})
if _USE_CLASSES:
return result # depends on [control=['if'], data=[]]
else:
return result._to_dict() # depends on [control=['if'], data=['result']]
elif result:
try:
json_result = _from_json(result) # depends on [control=['try'], data=[]]
except ValueError:
raise USGSException("The response from the server didn't make any sense.") # depends on [control=['except'], data=[]]
if _USE_CLASSES:
return Report._from_json(json_result) # depends on [control=['if'], data=[]]
else:
return Report._from_json(json_result)._to_dict() # depends on [control=['if'], data=[]]
elif _CONNECTED or online:
raise USGSException('No response from the server.') # depends on [control=['if'], data=[]]
else:
raise USGSException("No data was in the cache for this time and threshold ('{}', '{}').".format(time, threshold)) |
def next_retrieve_group_item(self, last_item=None, entry=None):
"""Return the item to start from in next reviews group."""
next_item = None
gerrit_version = self.version
if gerrit_version[0] == 2 and gerrit_version[1] > 9:
if last_item is None:
next_item = 0
else:
next_item = last_item
elif gerrit_version[0] == 2 and gerrit_version[1] == 9:
# https://groups.google.com/forum/#!topic/repo-discuss/yQgRR5hlS3E
cause = "Gerrit 2.9.0 does not support pagination"
raise BackendError(cause=cause)
else:
if entry is not None:
next_item = entry['sortKey']
return next_item | def function[next_retrieve_group_item, parameter[self, last_item, entry]]:
constant[Return the item to start from in next reviews group.]
variable[next_item] assign[=] constant[None]
variable[gerrit_version] assign[=] name[self].version
if <ast.BoolOp object at 0x7da1b033ad40> begin[:]
if compare[name[last_item] is constant[None]] begin[:]
variable[next_item] assign[=] constant[0]
return[name[next_item]] | keyword[def] identifier[next_retrieve_group_item] ( identifier[self] , identifier[last_item] = keyword[None] , identifier[entry] = keyword[None] ):
literal[string]
identifier[next_item] = keyword[None]
identifier[gerrit_version] = identifier[self] . identifier[version]
keyword[if] identifier[gerrit_version] [ literal[int] ]== literal[int] keyword[and] identifier[gerrit_version] [ literal[int] ]> literal[int] :
keyword[if] identifier[last_item] keyword[is] keyword[None] :
identifier[next_item] = literal[int]
keyword[else] :
identifier[next_item] = identifier[last_item]
keyword[elif] identifier[gerrit_version] [ literal[int] ]== literal[int] keyword[and] identifier[gerrit_version] [ literal[int] ]== literal[int] :
identifier[cause] = literal[string]
keyword[raise] identifier[BackendError] ( identifier[cause] = identifier[cause] )
keyword[else] :
keyword[if] identifier[entry] keyword[is] keyword[not] keyword[None] :
identifier[next_item] = identifier[entry] [ literal[string] ]
keyword[return] identifier[next_item] | def next_retrieve_group_item(self, last_item=None, entry=None):
"""Return the item to start from in next reviews group."""
next_item = None
gerrit_version = self.version
if gerrit_version[0] == 2 and gerrit_version[1] > 9:
if last_item is None:
next_item = 0 # depends on [control=['if'], data=[]]
else:
next_item = last_item # depends on [control=['if'], data=[]]
elif gerrit_version[0] == 2 and gerrit_version[1] == 9:
# https://groups.google.com/forum/#!topic/repo-discuss/yQgRR5hlS3E
cause = 'Gerrit 2.9.0 does not support pagination'
raise BackendError(cause=cause) # depends on [control=['if'], data=[]]
elif entry is not None:
next_item = entry['sortKey'] # depends on [control=['if'], data=['entry']]
return next_item |
def build_jsonld(self, url=None, code_url=None, ci_url=None,
readme_url=None, license_id=None):
"""Create a JSON-LD representation of this LSST LaTeX document.
Parameters
----------
url : `str`, optional
URL where this document is published to the web. Prefer
the LSST the Docs URL if possible.
Example: ``'https://ldm-151.lsst.io'``.
code_url : `str`, optional
Path the the document's repository, typically on GitHub.
Example: ``'https://github.com/lsst/LDM-151'``.
ci_url : `str`, optional
Path to the continuous integration service dashboard for this
document's repository.
Example: ``'https://travis-ci.org/lsst/LDM-151'``.
readme_url : `str`, optional
URL to the document repository's README file. Example:
``https://raw.githubusercontent.com/lsst/LDM-151/master/README.rst``.
license_id : `str`, optional
License identifier, if known. The identifier should be from the
listing at https://spdx.org/licenses/. Example: ``CC-BY-4.0``.
Returns
-------
jsonld : `dict`
JSON-LD-formatted dictionary.
"""
jsonld = {
'@context': [
"https://raw.githubusercontent.com/codemeta/codemeta/2.0-rc/"
"codemeta.jsonld",
"http://schema.org"],
'@type': ['Report', 'SoftwareSourceCode'],
'language': 'TeX',
'reportNumber': self.handle,
'name': self.plain_title,
'description': self.plain_abstract,
'author': [{'@type': 'Person', 'name': author_name}
for author_name in self.plain_authors],
# This is a datetime.datetime; not a string. If writing to a file,
# Need to convert this to a ISO 8601 string.
'dateModified': self.revision_datetime
}
try:
jsonld['articleBody'] = self.plain_content
jsonld['fileFormat'] = 'text/plain' # MIME type of articleBody
except RuntimeError:
# raised by pypandoc when it can't convert the tex document
self._logger.exception('Could not convert latex body to plain '
'text for articleBody.')
self._logger.warning('Falling back to tex source for articleBody')
jsonld['articleBody'] = self._tex
jsonld['fileFormat'] = 'text/plain' # no mimetype for LaTeX?
if url is not None:
jsonld['@id'] = url
jsonld['url'] = url
else:
# Fallback to using the document handle as the ID. This isn't
# entirely ideal from a linked data perspective.
jsonld['@id'] = self.handle
if code_url is not None:
jsonld['codeRepository'] = code_url
if ci_url is not None:
jsonld['contIntegration'] = ci_url
if readme_url is not None:
jsonld['readme'] = readme_url
if license_id is not None:
jsonld['license_id'] = None
return jsonld | def function[build_jsonld, parameter[self, url, code_url, ci_url, readme_url, license_id]]:
constant[Create a JSON-LD representation of this LSST LaTeX document.
Parameters
----------
url : `str`, optional
URL where this document is published to the web. Prefer
the LSST the Docs URL if possible.
Example: ``'https://ldm-151.lsst.io'``.
code_url : `str`, optional
Path the the document's repository, typically on GitHub.
Example: ``'https://github.com/lsst/LDM-151'``.
ci_url : `str`, optional
Path to the continuous integration service dashboard for this
document's repository.
Example: ``'https://travis-ci.org/lsst/LDM-151'``.
readme_url : `str`, optional
URL to the document repository's README file. Example:
``https://raw.githubusercontent.com/lsst/LDM-151/master/README.rst``.
license_id : `str`, optional
License identifier, if known. The identifier should be from the
listing at https://spdx.org/licenses/. Example: ``CC-BY-4.0``.
Returns
-------
jsonld : `dict`
JSON-LD-formatted dictionary.
]
variable[jsonld] assign[=] dictionary[[<ast.Constant object at 0x7da1b0036800>, <ast.Constant object at 0x7da1b0037820>, <ast.Constant object at 0x7da1b0035750>, <ast.Constant object at 0x7da1b0035150>, <ast.Constant object at 0x7da1b0037f70>, <ast.Constant object at 0x7da1b00359c0>, <ast.Constant object at 0x7da1b0036980>, <ast.Constant object at 0x7da1b0035db0>], [<ast.List object at 0x7da1b0034eb0>, <ast.List object at 0x7da1b0037f10>, <ast.Constant object at 0x7da1b0036560>, <ast.Attribute object at 0x7da1b0036500>, <ast.Attribute object at 0x7da1b0034730>, <ast.Attribute object at 0x7da1b0036350>, <ast.ListComp object at 0x7da1b0035960>, <ast.Attribute object at 0x7da1b0037400>]]
<ast.Try object at 0x7da1b0034250>
if compare[name[url] is_not constant[None]] begin[:]
call[name[jsonld]][constant[@id]] assign[=] name[url]
call[name[jsonld]][constant[url]] assign[=] name[url]
if compare[name[code_url] is_not constant[None]] begin[:]
call[name[jsonld]][constant[codeRepository]] assign[=] name[code_url]
if compare[name[ci_url] is_not constant[None]] begin[:]
call[name[jsonld]][constant[contIntegration]] assign[=] name[ci_url]
if compare[name[readme_url] is_not constant[None]] begin[:]
call[name[jsonld]][constant[readme]] assign[=] name[readme_url]
if compare[name[license_id] is_not constant[None]] begin[:]
call[name[jsonld]][constant[license_id]] assign[=] constant[None]
return[name[jsonld]] | keyword[def] identifier[build_jsonld] ( identifier[self] , identifier[url] = keyword[None] , identifier[code_url] = keyword[None] , identifier[ci_url] = keyword[None] ,
identifier[readme_url] = keyword[None] , identifier[license_id] = keyword[None] ):
literal[string]
identifier[jsonld] ={
literal[string] :[
literal[string]
literal[string] ,
literal[string] ],
literal[string] :[ literal[string] , literal[string] ],
literal[string] : literal[string] ,
literal[string] : identifier[self] . identifier[handle] ,
literal[string] : identifier[self] . identifier[plain_title] ,
literal[string] : identifier[self] . identifier[plain_abstract] ,
literal[string] :[{ literal[string] : literal[string] , literal[string] : identifier[author_name] }
keyword[for] identifier[author_name] keyword[in] identifier[self] . identifier[plain_authors] ],
literal[string] : identifier[self] . identifier[revision_datetime]
}
keyword[try] :
identifier[jsonld] [ literal[string] ]= identifier[self] . identifier[plain_content]
identifier[jsonld] [ literal[string] ]= literal[string]
keyword[except] identifier[RuntimeError] :
identifier[self] . identifier[_logger] . identifier[exception] ( literal[string]
literal[string] )
identifier[self] . identifier[_logger] . identifier[warning] ( literal[string] )
identifier[jsonld] [ literal[string] ]= identifier[self] . identifier[_tex]
identifier[jsonld] [ literal[string] ]= literal[string]
keyword[if] identifier[url] keyword[is] keyword[not] keyword[None] :
identifier[jsonld] [ literal[string] ]= identifier[url]
identifier[jsonld] [ literal[string] ]= identifier[url]
keyword[else] :
identifier[jsonld] [ literal[string] ]= identifier[self] . identifier[handle]
keyword[if] identifier[code_url] keyword[is] keyword[not] keyword[None] :
identifier[jsonld] [ literal[string] ]= identifier[code_url]
keyword[if] identifier[ci_url] keyword[is] keyword[not] keyword[None] :
identifier[jsonld] [ literal[string] ]= identifier[ci_url]
keyword[if] identifier[readme_url] keyword[is] keyword[not] keyword[None] :
identifier[jsonld] [ literal[string] ]= identifier[readme_url]
keyword[if] identifier[license_id] keyword[is] keyword[not] keyword[None] :
identifier[jsonld] [ literal[string] ]= keyword[None]
keyword[return] identifier[jsonld] | def build_jsonld(self, url=None, code_url=None, ci_url=None, readme_url=None, license_id=None):
"""Create a JSON-LD representation of this LSST LaTeX document.
Parameters
----------
url : `str`, optional
URL where this document is published to the web. Prefer
the LSST the Docs URL if possible.
Example: ``'https://ldm-151.lsst.io'``.
code_url : `str`, optional
Path the the document's repository, typically on GitHub.
Example: ``'https://github.com/lsst/LDM-151'``.
ci_url : `str`, optional
Path to the continuous integration service dashboard for this
document's repository.
Example: ``'https://travis-ci.org/lsst/LDM-151'``.
readme_url : `str`, optional
URL to the document repository's README file. Example:
``https://raw.githubusercontent.com/lsst/LDM-151/master/README.rst``.
license_id : `str`, optional
License identifier, if known. The identifier should be from the
listing at https://spdx.org/licenses/. Example: ``CC-BY-4.0``.
Returns
-------
jsonld : `dict`
JSON-LD-formatted dictionary.
"""
# This is a datetime.datetime; not a string. If writing to a file,
# Need to convert this to a ISO 8601 string.
jsonld = {'@context': ['https://raw.githubusercontent.com/codemeta/codemeta/2.0-rc/codemeta.jsonld', 'http://schema.org'], '@type': ['Report', 'SoftwareSourceCode'], 'language': 'TeX', 'reportNumber': self.handle, 'name': self.plain_title, 'description': self.plain_abstract, 'author': [{'@type': 'Person', 'name': author_name} for author_name in self.plain_authors], 'dateModified': self.revision_datetime}
try:
jsonld['articleBody'] = self.plain_content
jsonld['fileFormat'] = 'text/plain' # MIME type of articleBody # depends on [control=['try'], data=[]]
except RuntimeError:
# raised by pypandoc when it can't convert the tex document
self._logger.exception('Could not convert latex body to plain text for articleBody.')
self._logger.warning('Falling back to tex source for articleBody')
jsonld['articleBody'] = self._tex
jsonld['fileFormat'] = 'text/plain' # no mimetype for LaTeX? # depends on [control=['except'], data=[]]
if url is not None:
jsonld['@id'] = url
jsonld['url'] = url # depends on [control=['if'], data=['url']]
else:
# Fallback to using the document handle as the ID. This isn't
# entirely ideal from a linked data perspective.
jsonld['@id'] = self.handle
if code_url is not None:
jsonld['codeRepository'] = code_url # depends on [control=['if'], data=['code_url']]
if ci_url is not None:
jsonld['contIntegration'] = ci_url # depends on [control=['if'], data=['ci_url']]
if readme_url is not None:
jsonld['readme'] = readme_url # depends on [control=['if'], data=['readme_url']]
if license_id is not None:
jsonld['license_id'] = None # depends on [control=['if'], data=[]]
return jsonld |
def checkscript(self, content):
"""Check whether a script is valid
See MANAGESIEVE specifications, section 2.12
:param name: script's content
:rtype: boolean
"""
if "VERSION" not in self.__capabilities:
raise NotImplementedError(
"server does not support CHECKSCRIPT command")
content = tools.to_bytes(content)
content = tools.to_bytes("{%d+}" % len(content)) + CRLF + content
code, data = self.__send_command("CHECKSCRIPT", [content])
if code == "OK":
return True
return False | def function[checkscript, parameter[self, content]]:
constant[Check whether a script is valid
See MANAGESIEVE specifications, section 2.12
:param name: script's content
:rtype: boolean
]
if compare[constant[VERSION] <ast.NotIn object at 0x7da2590d7190> name[self].__capabilities] begin[:]
<ast.Raise object at 0x7da18f00e950>
variable[content] assign[=] call[name[tools].to_bytes, parameter[name[content]]]
variable[content] assign[=] binary_operation[binary_operation[call[name[tools].to_bytes, parameter[binary_operation[constant[{%d+}] <ast.Mod object at 0x7da2590d6920> call[name[len], parameter[name[content]]]]]] + name[CRLF]] + name[content]]
<ast.Tuple object at 0x7da18f721240> assign[=] call[name[self].__send_command, parameter[constant[CHECKSCRIPT], list[[<ast.Name object at 0x7da2054a7880>]]]]
if compare[name[code] equal[==] constant[OK]] begin[:]
return[constant[True]]
return[constant[False]] | keyword[def] identifier[checkscript] ( identifier[self] , identifier[content] ):
literal[string]
keyword[if] literal[string] keyword[not] keyword[in] identifier[self] . identifier[__capabilities] :
keyword[raise] identifier[NotImplementedError] (
literal[string] )
identifier[content] = identifier[tools] . identifier[to_bytes] ( identifier[content] )
identifier[content] = identifier[tools] . identifier[to_bytes] ( literal[string] % identifier[len] ( identifier[content] ))+ identifier[CRLF] + identifier[content]
identifier[code] , identifier[data] = identifier[self] . identifier[__send_command] ( literal[string] ,[ identifier[content] ])
keyword[if] identifier[code] == literal[string] :
keyword[return] keyword[True]
keyword[return] keyword[False] | def checkscript(self, content):
"""Check whether a script is valid
See MANAGESIEVE specifications, section 2.12
:param name: script's content
:rtype: boolean
"""
if 'VERSION' not in self.__capabilities:
raise NotImplementedError('server does not support CHECKSCRIPT command') # depends on [control=['if'], data=[]]
content = tools.to_bytes(content)
content = tools.to_bytes('{%d+}' % len(content)) + CRLF + content
(code, data) = self.__send_command('CHECKSCRIPT', [content])
if code == 'OK':
return True # depends on [control=['if'], data=[]]
return False |
def write_json(self, date=(datetime.date.today()),
organization='llnl',dict_to_write={}, path_ending_type=''):
"""
Writes all traffic data to file in JSON form.
"""
for repo in dict_to_write:
if len(dict_to_write[repo]) != 0:#don't need to write out empty lists
path = ('../github-data/' + organization + '/' + repo + '/' +
path_ending_type + '/' + str(date) + '.json')
self.checkDir(path)
with open(path, 'w') as out:
out.write(json.dumps(dict_to_write[repo], sort_keys=True,
indent=4, separators=(',', ': ')))
out.close() | def function[write_json, parameter[self, date, organization, dict_to_write, path_ending_type]]:
constant[
Writes all traffic data to file in JSON form.
]
for taget[name[repo]] in starred[name[dict_to_write]] begin[:]
if compare[call[name[len], parameter[call[name[dict_to_write]][name[repo]]]] not_equal[!=] constant[0]] begin[:]
variable[path] assign[=] binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[constant[../github-data/] + name[organization]] + constant[/]] + name[repo]] + constant[/]] + name[path_ending_type]] + constant[/]] + call[name[str], parameter[name[date]]]] + constant[.json]]
call[name[self].checkDir, parameter[name[path]]]
with call[name[open], parameter[name[path], constant[w]]] begin[:]
call[name[out].write, parameter[call[name[json].dumps, parameter[call[name[dict_to_write]][name[repo]]]]]]
call[name[out].close, parameter[]] | keyword[def] identifier[write_json] ( identifier[self] , identifier[date] =( identifier[datetime] . identifier[date] . identifier[today] ()),
identifier[organization] = literal[string] , identifier[dict_to_write] ={}, identifier[path_ending_type] = literal[string] ):
literal[string]
keyword[for] identifier[repo] keyword[in] identifier[dict_to_write] :
keyword[if] identifier[len] ( identifier[dict_to_write] [ identifier[repo] ])!= literal[int] :
identifier[path] =( literal[string] + identifier[organization] + literal[string] + identifier[repo] + literal[string] +
identifier[path_ending_type] + literal[string] + identifier[str] ( identifier[date] )+ literal[string] )
identifier[self] . identifier[checkDir] ( identifier[path] )
keyword[with] identifier[open] ( identifier[path] , literal[string] ) keyword[as] identifier[out] :
identifier[out] . identifier[write] ( identifier[json] . identifier[dumps] ( identifier[dict_to_write] [ identifier[repo] ], identifier[sort_keys] = keyword[True] ,
identifier[indent] = literal[int] , identifier[separators] =( literal[string] , literal[string] )))
identifier[out] . identifier[close] () | def write_json(self, date=datetime.date.today(), organization='llnl', dict_to_write={}, path_ending_type=''):
"""
Writes all traffic data to file in JSON form.
"""
for repo in dict_to_write:
if len(dict_to_write[repo]) != 0: #don't need to write out empty lists
path = '../github-data/' + organization + '/' + repo + '/' + path_ending_type + '/' + str(date) + '.json'
self.checkDir(path)
with open(path, 'w') as out:
out.write(json.dumps(dict_to_write[repo], sort_keys=True, indent=4, separators=(',', ': '))) # depends on [control=['with'], data=['out']]
out.close() # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['repo']] |
def _Open(self, path_spec, mode='rb'):
"""Opens the file system object defined by path specification.
Args:
path_spec (PathSpec): path specification.
mode (Optional[str]): file access mode.
Raises:
AccessError: if the access to open the file was denied.
IOError: if the file system object could not be opened.
PathSpecError: if the path specification is incorrect.
ValueError: if the path specification is invalid.
"""
if not path_spec.HasParent():
raise errors.PathSpecError(
'Unsupported path specification without parent.')
file_object = resolver.Resolver.OpenFileObject(
path_spec.parent, resolver_context=self._resolver_context)
try:
tsk_image_object = tsk_image.TSKFileSystemImage(file_object)
tsk_file_system = pytsk3.FS_Info(tsk_image_object)
except:
file_object.close()
raise
self._file_object = file_object
self._tsk_file_system = tsk_file_system | def function[_Open, parameter[self, path_spec, mode]]:
constant[Opens the file system object defined by path specification.
Args:
path_spec (PathSpec): path specification.
mode (Optional[str]): file access mode.
Raises:
AccessError: if the access to open the file was denied.
IOError: if the file system object could not be opened.
PathSpecError: if the path specification is incorrect.
ValueError: if the path specification is invalid.
]
if <ast.UnaryOp object at 0x7da1b0721e10> begin[:]
<ast.Raise object at 0x7da1b0721540>
variable[file_object] assign[=] call[name[resolver].Resolver.OpenFileObject, parameter[name[path_spec].parent]]
<ast.Try object at 0x7da1b0723220>
name[self]._file_object assign[=] name[file_object]
name[self]._tsk_file_system assign[=] name[tsk_file_system] | keyword[def] identifier[_Open] ( identifier[self] , identifier[path_spec] , identifier[mode] = literal[string] ):
literal[string]
keyword[if] keyword[not] identifier[path_spec] . identifier[HasParent] ():
keyword[raise] identifier[errors] . identifier[PathSpecError] (
literal[string] )
identifier[file_object] = identifier[resolver] . identifier[Resolver] . identifier[OpenFileObject] (
identifier[path_spec] . identifier[parent] , identifier[resolver_context] = identifier[self] . identifier[_resolver_context] )
keyword[try] :
identifier[tsk_image_object] = identifier[tsk_image] . identifier[TSKFileSystemImage] ( identifier[file_object] )
identifier[tsk_file_system] = identifier[pytsk3] . identifier[FS_Info] ( identifier[tsk_image_object] )
keyword[except] :
identifier[file_object] . identifier[close] ()
keyword[raise]
identifier[self] . identifier[_file_object] = identifier[file_object]
identifier[self] . identifier[_tsk_file_system] = identifier[tsk_file_system] | def _Open(self, path_spec, mode='rb'):
"""Opens the file system object defined by path specification.
Args:
path_spec (PathSpec): path specification.
mode (Optional[str]): file access mode.
Raises:
AccessError: if the access to open the file was denied.
IOError: if the file system object could not be opened.
PathSpecError: if the path specification is incorrect.
ValueError: if the path specification is invalid.
"""
if not path_spec.HasParent():
raise errors.PathSpecError('Unsupported path specification without parent.') # depends on [control=['if'], data=[]]
file_object = resolver.Resolver.OpenFileObject(path_spec.parent, resolver_context=self._resolver_context)
try:
tsk_image_object = tsk_image.TSKFileSystemImage(file_object)
tsk_file_system = pytsk3.FS_Info(tsk_image_object) # depends on [control=['try'], data=[]]
except:
file_object.close()
raise # depends on [control=['except'], data=[]]
self._file_object = file_object
self._tsk_file_system = tsk_file_system |
def format_and_annualise(self, raw_cov_array):
"""
Helper method which annualises the output of shrinkage calculations,
and formats the result into a dataframe
:param raw_cov_array: raw covariance matrix of daily returns
:type raw_cov_array: np.ndarray
:return: annualised covariance matrix
:rtype: pd.DataFrame
"""
assets = self.X.columns
return (
pd.DataFrame(raw_cov_array, index=assets, columns=assets) * self.frequency
) | def function[format_and_annualise, parameter[self, raw_cov_array]]:
constant[
Helper method which annualises the output of shrinkage calculations,
and formats the result into a dataframe
:param raw_cov_array: raw covariance matrix of daily returns
:type raw_cov_array: np.ndarray
:return: annualised covariance matrix
:rtype: pd.DataFrame
]
variable[assets] assign[=] name[self].X.columns
return[binary_operation[call[name[pd].DataFrame, parameter[name[raw_cov_array]]] * name[self].frequency]] | keyword[def] identifier[format_and_annualise] ( identifier[self] , identifier[raw_cov_array] ):
literal[string]
identifier[assets] = identifier[self] . identifier[X] . identifier[columns]
keyword[return] (
identifier[pd] . identifier[DataFrame] ( identifier[raw_cov_array] , identifier[index] = identifier[assets] , identifier[columns] = identifier[assets] )* identifier[self] . identifier[frequency]
) | def format_and_annualise(self, raw_cov_array):
"""
Helper method which annualises the output of shrinkage calculations,
and formats the result into a dataframe
:param raw_cov_array: raw covariance matrix of daily returns
:type raw_cov_array: np.ndarray
:return: annualised covariance matrix
:rtype: pd.DataFrame
"""
assets = self.X.columns
return pd.DataFrame(raw_cov_array, index=assets, columns=assets) * self.frequency |
def sg_init(sess):
r""" Initializes session variables.
Args:
sess: Session to initialize.
"""
# initialize variables
sess.run(tf.group(tf.global_variables_initializer(),
tf.local_variables_initializer())) | def function[sg_init, parameter[sess]]:
constant[ Initializes session variables.
Args:
sess: Session to initialize.
]
call[name[sess].run, parameter[call[name[tf].group, parameter[call[name[tf].global_variables_initializer, parameter[]], call[name[tf].local_variables_initializer, parameter[]]]]]] | keyword[def] identifier[sg_init] ( identifier[sess] ):
literal[string]
identifier[sess] . identifier[run] ( identifier[tf] . identifier[group] ( identifier[tf] . identifier[global_variables_initializer] (),
identifier[tf] . identifier[local_variables_initializer] ())) | def sg_init(sess):
""" Initializes session variables.
Args:
sess: Session to initialize.
"""
# initialize variables
sess.run(tf.group(tf.global_variables_initializer(), tf.local_variables_initializer())) |
def exit_with_error(msg):
'''
:param msg: string message to print before exiting
Print the error message, as well as a blurb on where to find the
job workspaces
'''
msg += '\n'
msg += 'Local job workspaces can be found in: ' + str(environ.get('DX_TEST_JOB_HOMEDIRS'))
sys.exit(msg) | def function[exit_with_error, parameter[msg]]:
constant[
:param msg: string message to print before exiting
Print the error message, as well as a blurb on where to find the
job workspaces
]
<ast.AugAssign object at 0x7da1b057ae60>
<ast.AugAssign object at 0x7da1b057aef0>
call[name[sys].exit, parameter[name[msg]]] | keyword[def] identifier[exit_with_error] ( identifier[msg] ):
literal[string]
identifier[msg] += literal[string]
identifier[msg] += literal[string] + identifier[str] ( identifier[environ] . identifier[get] ( literal[string] ))
identifier[sys] . identifier[exit] ( identifier[msg] ) | def exit_with_error(msg):
"""
:param msg: string message to print before exiting
Print the error message, as well as a blurb on where to find the
job workspaces
"""
msg += '\n'
msg += 'Local job workspaces can be found in: ' + str(environ.get('DX_TEST_JOB_HOMEDIRS'))
sys.exit(msg) |
def _trj_load_trajectory(self, traj, as_new, load_parameters, load_derived_parameters,
load_results, load_other_data, recursive, max_depth,
with_run_information, with_meta_data, force):
"""Loads a single trajectory from a given file.
:param traj: The trajectory
:param as_new: Whether to load trajectory as new
:param load_parameters: How to load parameters and config
:param load_derived_parameters: How to load derived parameters
:param load_results: How to load results
:param load_other_data: How to load anything not within the four subbranches
:param recursive: If data should be loaded recursively
:param max_depth: Maximum depth of loading
:param with_run_information:
If run information should be loaded
:param with_meta_data:
If meta data infor should be loaded
:param force: Force load in case there is a pypet version mismatch
You can specify how to load the parameters, derived parameters and results
as follows:
:const:`pypet.pypetconstants.LOAD_NOTHING`: (0)
Nothing is loaded
:const:`pypet.pypetconstants.LOAD_SKELETON`: (1)
The skeleton including annotations are loaded, i.e. the items are empty.
Non-empty items in RAM are left untouched.
:const:`pypet.pypetconstants.LOAD_DATA`: (2)
The whole data is loaded.
Only empty or in RAM non-existing instance are filled with the
data found on disk.
:const:`pypet.pypetconstants.OVERWRITE_DATA`: (3)
The whole data is loaded.
If items that are to be loaded are already in RAM and not empty,
they are emptied and new data is loaded from disk.
If `as_new=True` the old trajectory is loaded into the new one, only parameters can be
loaded. If `as_new=False` the current trajectory is completely replaced by the one
on disk, i.e. the name from disk, the timestamp, etc. are assigned to `traj`.
"""
# Some validity checks, if `as_new` is used correctly
if (as_new and (load_derived_parameters != pypetconstants.LOAD_NOTHING or
load_results != pypetconstants.LOAD_NOTHING or
load_other_data != pypetconstants.LOAD_NOTHING)):
raise ValueError('You cannot load a trajectory as new and load the derived '
'parameters and results. Only parameters are allowed.')
if as_new and load_parameters != pypetconstants.LOAD_DATA:
raise ValueError('You cannot load the trajectory as new and not load the data of '
'the parameters.')
loadconstants = (pypetconstants.LOAD_NOTHING, pypetconstants.LOAD_SKELETON,
pypetconstants.LOAD_DATA, pypetconstants.OVERWRITE_DATA)
if not (load_parameters in loadconstants and load_derived_parameters in loadconstants and
load_results in loadconstants and load_other_data in loadconstants):
raise ValueError('Please give a valid option on how to load data. Options for '
'`load_parameter`, `load_derived_parameters`, `load_results`, '
'and `load_other_data` are %s. See function documentation for '
'the semantics of the values.' % str(loadconstants))
traj._stored = not as_new
# Loads meta data like the name, timestamps etc.
# load_data is only used here to determine how to load the annotations
load_data = max(load_parameters, load_derived_parameters, load_results, load_other_data)
if with_meta_data:
self._trj_load_meta_data(traj, load_data, as_new, with_run_information, force)
if (load_parameters != pypetconstants.LOAD_NOTHING or
load_derived_parameters != pypetconstants.LOAD_NOTHING or
load_results != pypetconstants.LOAD_NOTHING or
load_other_data != pypetconstants.LOAD_NOTHING):
self._logger.info('Loading trajectory `%s`.' % traj.v_name)
else:
self._logger.info('Checked meta data of trajectory `%s`.' % traj.v_name)
return
maximum_display_other = 10
counter = 0
for children in [self._trajectory_group._v_groups, self._trajectory_group._v_links]:
for hdf5_group_name in children:
hdf5_group = children[hdf5_group_name]
child_name = hdf5_group._v_name
load_subbranch = True
if child_name == 'config':
if as_new:
loading = pypetconstants.LOAD_NOTHING
else:
# If the trajectory is loaded as new, we don't care about old config stuff
# and only load the parameters
loading = load_parameters
elif child_name == 'parameters':
loading = load_parameters
elif child_name == 'results':
loading = load_results
elif child_name == 'derived_parameters':
loading = load_derived_parameters
elif child_name == 'overview':
continue
else:
loading = load_other_data
load_subbranch = False
if loading == pypetconstants.LOAD_NOTHING:
continue
if load_subbranch:
# Load the subbranches recursively
self._logger.info('Loading branch `%s` in mode `%s`.' %
(child_name, str(loading)))
else:
if counter < maximum_display_other:
self._logger.info(
'Loading branch/node `%s` in mode `%s`.' % (child_name, str(loading)))
elif counter == maximum_display_other:
self._logger.info('To many branchs or nodes at root for display. '
'I will not inform you about loading anymore. '
'Branches are loaded silently '
'in the background. Do not worry, '
'I will not freeze! Pinky promise!!!')
counter += 1
self._tree_load_sub_branch(traj, child_name, load_data=loading, with_links=True,
recursive=recursive,
max_depth=max_depth,
_trajectory=traj, _as_new=as_new,
_hdf5_group=self._trajectory_group) | def function[_trj_load_trajectory, parameter[self, traj, as_new, load_parameters, load_derived_parameters, load_results, load_other_data, recursive, max_depth, with_run_information, with_meta_data, force]]:
constant[Loads a single trajectory from a given file.
:param traj: The trajectory
:param as_new: Whether to load trajectory as new
:param load_parameters: How to load parameters and config
:param load_derived_parameters: How to load derived parameters
:param load_results: How to load results
:param load_other_data: How to load anything not within the four subbranches
:param recursive: If data should be loaded recursively
:param max_depth: Maximum depth of loading
:param with_run_information:
If run information should be loaded
:param with_meta_data:
If meta data infor should be loaded
:param force: Force load in case there is a pypet version mismatch
You can specify how to load the parameters, derived parameters and results
as follows:
:const:`pypet.pypetconstants.LOAD_NOTHING`: (0)
Nothing is loaded
:const:`pypet.pypetconstants.LOAD_SKELETON`: (1)
The skeleton including annotations are loaded, i.e. the items are empty.
Non-empty items in RAM are left untouched.
:const:`pypet.pypetconstants.LOAD_DATA`: (2)
The whole data is loaded.
Only empty or in RAM non-existing instance are filled with the
data found on disk.
:const:`pypet.pypetconstants.OVERWRITE_DATA`: (3)
The whole data is loaded.
If items that are to be loaded are already in RAM and not empty,
they are emptied and new data is loaded from disk.
If `as_new=True` the old trajectory is loaded into the new one, only parameters can be
loaded. If `as_new=False` the current trajectory is completely replaced by the one
on disk, i.e. the name from disk, the timestamp, etc. are assigned to `traj`.
]
if <ast.BoolOp object at 0x7da1b03b9240> begin[:]
<ast.Raise object at 0x7da1b03b9ff0>
if <ast.BoolOp object at 0x7da1b03b9fc0> begin[:]
<ast.Raise object at 0x7da1b03ba860>
variable[loadconstants] assign[=] tuple[[<ast.Attribute object at 0x7da1b03ba290>, <ast.Attribute object at 0x7da1b03bba90>, <ast.Attribute object at 0x7da1b03bba00>, <ast.Attribute object at 0x7da1b03bb940>]]
if <ast.UnaryOp object at 0x7da1b03bb9d0> begin[:]
<ast.Raise object at 0x7da1b03bbd60>
name[traj]._stored assign[=] <ast.UnaryOp object at 0x7da1b03b9120>
variable[load_data] assign[=] call[name[max], parameter[name[load_parameters], name[load_derived_parameters], name[load_results], name[load_other_data]]]
if name[with_meta_data] begin[:]
call[name[self]._trj_load_meta_data, parameter[name[traj], name[load_data], name[as_new], name[with_run_information], name[force]]]
if <ast.BoolOp object at 0x7da1b03ba5c0> begin[:]
call[name[self]._logger.info, parameter[binary_operation[constant[Loading trajectory `%s`.] <ast.Mod object at 0x7da2590d6920> name[traj].v_name]]]
variable[maximum_display_other] assign[=] constant[10]
variable[counter] assign[=] constant[0]
for taget[name[children]] in starred[list[[<ast.Attribute object at 0x7da1b03ba9e0>, <ast.Attribute object at 0x7da1b03bacb0>]]] begin[:]
for taget[name[hdf5_group_name]] in starred[name[children]] begin[:]
variable[hdf5_group] assign[=] call[name[children]][name[hdf5_group_name]]
variable[child_name] assign[=] name[hdf5_group]._v_name
variable[load_subbranch] assign[=] constant[True]
if compare[name[child_name] equal[==] constant[config]] begin[:]
if name[as_new] begin[:]
variable[loading] assign[=] name[pypetconstants].LOAD_NOTHING
if compare[name[loading] equal[==] name[pypetconstants].LOAD_NOTHING] begin[:]
continue
if name[load_subbranch] begin[:]
call[name[self]._logger.info, parameter[binary_operation[constant[Loading branch `%s` in mode `%s`.] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b03b85b0>, <ast.Call object at 0x7da1b03b8520>]]]]]
call[name[self]._tree_load_sub_branch, parameter[name[traj], name[child_name]]] | keyword[def] identifier[_trj_load_trajectory] ( identifier[self] , identifier[traj] , identifier[as_new] , identifier[load_parameters] , identifier[load_derived_parameters] ,
identifier[load_results] , identifier[load_other_data] , identifier[recursive] , identifier[max_depth] ,
identifier[with_run_information] , identifier[with_meta_data] , identifier[force] ):
literal[string]
keyword[if] ( identifier[as_new] keyword[and] ( identifier[load_derived_parameters] != identifier[pypetconstants] . identifier[LOAD_NOTHING] keyword[or]
identifier[load_results] != identifier[pypetconstants] . identifier[LOAD_NOTHING] keyword[or]
identifier[load_other_data] != identifier[pypetconstants] . identifier[LOAD_NOTHING] )):
keyword[raise] identifier[ValueError] ( literal[string]
literal[string] )
keyword[if] identifier[as_new] keyword[and] identifier[load_parameters] != identifier[pypetconstants] . identifier[LOAD_DATA] :
keyword[raise] identifier[ValueError] ( literal[string]
literal[string] )
identifier[loadconstants] =( identifier[pypetconstants] . identifier[LOAD_NOTHING] , identifier[pypetconstants] . identifier[LOAD_SKELETON] ,
identifier[pypetconstants] . identifier[LOAD_DATA] , identifier[pypetconstants] . identifier[OVERWRITE_DATA] )
keyword[if] keyword[not] ( identifier[load_parameters] keyword[in] identifier[loadconstants] keyword[and] identifier[load_derived_parameters] keyword[in] identifier[loadconstants] keyword[and]
identifier[load_results] keyword[in] identifier[loadconstants] keyword[and] identifier[load_other_data] keyword[in] identifier[loadconstants] ):
keyword[raise] identifier[ValueError] ( literal[string]
literal[string]
literal[string]
literal[string] % identifier[str] ( identifier[loadconstants] ))
identifier[traj] . identifier[_stored] = keyword[not] identifier[as_new]
identifier[load_data] = identifier[max] ( identifier[load_parameters] , identifier[load_derived_parameters] , identifier[load_results] , identifier[load_other_data] )
keyword[if] identifier[with_meta_data] :
identifier[self] . identifier[_trj_load_meta_data] ( identifier[traj] , identifier[load_data] , identifier[as_new] , identifier[with_run_information] , identifier[force] )
keyword[if] ( identifier[load_parameters] != identifier[pypetconstants] . identifier[LOAD_NOTHING] keyword[or]
identifier[load_derived_parameters] != identifier[pypetconstants] . identifier[LOAD_NOTHING] keyword[or]
identifier[load_results] != identifier[pypetconstants] . identifier[LOAD_NOTHING] keyword[or]
identifier[load_other_data] != identifier[pypetconstants] . identifier[LOAD_NOTHING] ):
identifier[self] . identifier[_logger] . identifier[info] ( literal[string] % identifier[traj] . identifier[v_name] )
keyword[else] :
identifier[self] . identifier[_logger] . identifier[info] ( literal[string] % identifier[traj] . identifier[v_name] )
keyword[return]
identifier[maximum_display_other] = literal[int]
identifier[counter] = literal[int]
keyword[for] identifier[children] keyword[in] [ identifier[self] . identifier[_trajectory_group] . identifier[_v_groups] , identifier[self] . identifier[_trajectory_group] . identifier[_v_links] ]:
keyword[for] identifier[hdf5_group_name] keyword[in] identifier[children] :
identifier[hdf5_group] = identifier[children] [ identifier[hdf5_group_name] ]
identifier[child_name] = identifier[hdf5_group] . identifier[_v_name]
identifier[load_subbranch] = keyword[True]
keyword[if] identifier[child_name] == literal[string] :
keyword[if] identifier[as_new] :
identifier[loading] = identifier[pypetconstants] . identifier[LOAD_NOTHING]
keyword[else] :
identifier[loading] = identifier[load_parameters]
keyword[elif] identifier[child_name] == literal[string] :
identifier[loading] = identifier[load_parameters]
keyword[elif] identifier[child_name] == literal[string] :
identifier[loading] = identifier[load_results]
keyword[elif] identifier[child_name] == literal[string] :
identifier[loading] = identifier[load_derived_parameters]
keyword[elif] identifier[child_name] == literal[string] :
keyword[continue]
keyword[else] :
identifier[loading] = identifier[load_other_data]
identifier[load_subbranch] = keyword[False]
keyword[if] identifier[loading] == identifier[pypetconstants] . identifier[LOAD_NOTHING] :
keyword[continue]
keyword[if] identifier[load_subbranch] :
identifier[self] . identifier[_logger] . identifier[info] ( literal[string] %
( identifier[child_name] , identifier[str] ( identifier[loading] )))
keyword[else] :
keyword[if] identifier[counter] < identifier[maximum_display_other] :
identifier[self] . identifier[_logger] . identifier[info] (
literal[string] %( identifier[child_name] , identifier[str] ( identifier[loading] )))
keyword[elif] identifier[counter] == identifier[maximum_display_other] :
identifier[self] . identifier[_logger] . identifier[info] ( literal[string]
literal[string]
literal[string]
literal[string]
literal[string] )
identifier[counter] += literal[int]
identifier[self] . identifier[_tree_load_sub_branch] ( identifier[traj] , identifier[child_name] , identifier[load_data] = identifier[loading] , identifier[with_links] = keyword[True] ,
identifier[recursive] = identifier[recursive] ,
identifier[max_depth] = identifier[max_depth] ,
identifier[_trajectory] = identifier[traj] , identifier[_as_new] = identifier[as_new] ,
identifier[_hdf5_group] = identifier[self] . identifier[_trajectory_group] ) | def _trj_load_trajectory(self, traj, as_new, load_parameters, load_derived_parameters, load_results, load_other_data, recursive, max_depth, with_run_information, with_meta_data, force):
"""Loads a single trajectory from a given file.
:param traj: The trajectory
:param as_new: Whether to load trajectory as new
:param load_parameters: How to load parameters and config
:param load_derived_parameters: How to load derived parameters
:param load_results: How to load results
:param load_other_data: How to load anything not within the four subbranches
:param recursive: If data should be loaded recursively
:param max_depth: Maximum depth of loading
:param with_run_information:
If run information should be loaded
:param with_meta_data:
If meta data infor should be loaded
:param force: Force load in case there is a pypet version mismatch
You can specify how to load the parameters, derived parameters and results
as follows:
:const:`pypet.pypetconstants.LOAD_NOTHING`: (0)
Nothing is loaded
:const:`pypet.pypetconstants.LOAD_SKELETON`: (1)
The skeleton including annotations are loaded, i.e. the items are empty.
Non-empty items in RAM are left untouched.
:const:`pypet.pypetconstants.LOAD_DATA`: (2)
The whole data is loaded.
Only empty or in RAM non-existing instance are filled with the
data found on disk.
:const:`pypet.pypetconstants.OVERWRITE_DATA`: (3)
The whole data is loaded.
If items that are to be loaded are already in RAM and not empty,
they are emptied and new data is loaded from disk.
If `as_new=True` the old trajectory is loaded into the new one, only parameters can be
loaded. If `as_new=False` the current trajectory is completely replaced by the one
on disk, i.e. the name from disk, the timestamp, etc. are assigned to `traj`.
"""
# Some validity checks, if `as_new` is used correctly
if as_new and (load_derived_parameters != pypetconstants.LOAD_NOTHING or load_results != pypetconstants.LOAD_NOTHING or load_other_data != pypetconstants.LOAD_NOTHING):
raise ValueError('You cannot load a trajectory as new and load the derived parameters and results. Only parameters are allowed.') # depends on [control=['if'], data=[]]
if as_new and load_parameters != pypetconstants.LOAD_DATA:
raise ValueError('You cannot load the trajectory as new and not load the data of the parameters.') # depends on [control=['if'], data=[]]
loadconstants = (pypetconstants.LOAD_NOTHING, pypetconstants.LOAD_SKELETON, pypetconstants.LOAD_DATA, pypetconstants.OVERWRITE_DATA)
if not (load_parameters in loadconstants and load_derived_parameters in loadconstants and (load_results in loadconstants) and (load_other_data in loadconstants)):
raise ValueError('Please give a valid option on how to load data. Options for `load_parameter`, `load_derived_parameters`, `load_results`, and `load_other_data` are %s. See function documentation for the semantics of the values.' % str(loadconstants)) # depends on [control=['if'], data=[]]
traj._stored = not as_new
# Loads meta data like the name, timestamps etc.
# load_data is only used here to determine how to load the annotations
load_data = max(load_parameters, load_derived_parameters, load_results, load_other_data)
if with_meta_data:
self._trj_load_meta_data(traj, load_data, as_new, with_run_information, force) # depends on [control=['if'], data=[]]
if load_parameters != pypetconstants.LOAD_NOTHING or load_derived_parameters != pypetconstants.LOAD_NOTHING or load_results != pypetconstants.LOAD_NOTHING or (load_other_data != pypetconstants.LOAD_NOTHING):
self._logger.info('Loading trajectory `%s`.' % traj.v_name) # depends on [control=['if'], data=[]]
else:
self._logger.info('Checked meta data of trajectory `%s`.' % traj.v_name)
return
maximum_display_other = 10
counter = 0
for children in [self._trajectory_group._v_groups, self._trajectory_group._v_links]:
for hdf5_group_name in children:
hdf5_group = children[hdf5_group_name]
child_name = hdf5_group._v_name
load_subbranch = True
if child_name == 'config':
if as_new:
loading = pypetconstants.LOAD_NOTHING # depends on [control=['if'], data=[]]
else:
# If the trajectory is loaded as new, we don't care about old config stuff
# and only load the parameters
loading = load_parameters # depends on [control=['if'], data=[]]
elif child_name == 'parameters':
loading = load_parameters # depends on [control=['if'], data=[]]
elif child_name == 'results':
loading = load_results # depends on [control=['if'], data=[]]
elif child_name == 'derived_parameters':
loading = load_derived_parameters # depends on [control=['if'], data=[]]
elif child_name == 'overview':
continue # depends on [control=['if'], data=[]]
else:
loading = load_other_data
load_subbranch = False
if loading == pypetconstants.LOAD_NOTHING:
continue # depends on [control=['if'], data=[]]
if load_subbranch:
# Load the subbranches recursively
self._logger.info('Loading branch `%s` in mode `%s`.' % (child_name, str(loading))) # depends on [control=['if'], data=[]]
else:
if counter < maximum_display_other:
self._logger.info('Loading branch/node `%s` in mode `%s`.' % (child_name, str(loading))) # depends on [control=['if'], data=[]]
elif counter == maximum_display_other:
self._logger.info('To many branchs or nodes at root for display. I will not inform you about loading anymore. Branches are loaded silently in the background. Do not worry, I will not freeze! Pinky promise!!!') # depends on [control=['if'], data=[]]
counter += 1
self._tree_load_sub_branch(traj, child_name, load_data=loading, with_links=True, recursive=recursive, max_depth=max_depth, _trajectory=traj, _as_new=as_new, _hdf5_group=self._trajectory_group) # depends on [control=['for'], data=['hdf5_group_name']] # depends on [control=['for'], data=['children']] |
def _parse_metadata(self, meta):
"""Return the dict containing document metadata"""
formatted_fields = self.settings['FORMATTED_FIELDS']
output = collections.OrderedDict()
for name, value in meta.items():
name = name.lower()
if name in formatted_fields:
rendered = self._render(value).strip()
output[name] = self.process_metadata(name, rendered)
else:
output[name] = self.process_metadata(name, value)
return output | def function[_parse_metadata, parameter[self, meta]]:
constant[Return the dict containing document metadata]
variable[formatted_fields] assign[=] call[name[self].settings][constant[FORMATTED_FIELDS]]
variable[output] assign[=] call[name[collections].OrderedDict, parameter[]]
for taget[tuple[[<ast.Name object at 0x7da18c4cffd0>, <ast.Name object at 0x7da18c4cc580>]]] in starred[call[name[meta].items, parameter[]]] begin[:]
variable[name] assign[=] call[name[name].lower, parameter[]]
if compare[name[name] in name[formatted_fields]] begin[:]
variable[rendered] assign[=] call[call[name[self]._render, parameter[name[value]]].strip, parameter[]]
call[name[output]][name[name]] assign[=] call[name[self].process_metadata, parameter[name[name], name[rendered]]]
return[name[output]] | keyword[def] identifier[_parse_metadata] ( identifier[self] , identifier[meta] ):
literal[string]
identifier[formatted_fields] = identifier[self] . identifier[settings] [ literal[string] ]
identifier[output] = identifier[collections] . identifier[OrderedDict] ()
keyword[for] identifier[name] , identifier[value] keyword[in] identifier[meta] . identifier[items] ():
identifier[name] = identifier[name] . identifier[lower] ()
keyword[if] identifier[name] keyword[in] identifier[formatted_fields] :
identifier[rendered] = identifier[self] . identifier[_render] ( identifier[value] ). identifier[strip] ()
identifier[output] [ identifier[name] ]= identifier[self] . identifier[process_metadata] ( identifier[name] , identifier[rendered] )
keyword[else] :
identifier[output] [ identifier[name] ]= identifier[self] . identifier[process_metadata] ( identifier[name] , identifier[value] )
keyword[return] identifier[output] | def _parse_metadata(self, meta):
"""Return the dict containing document metadata"""
formatted_fields = self.settings['FORMATTED_FIELDS']
output = collections.OrderedDict()
for (name, value) in meta.items():
name = name.lower()
if name in formatted_fields:
rendered = self._render(value).strip()
output[name] = self.process_metadata(name, rendered) # depends on [control=['if'], data=['name']]
else:
output[name] = self.process_metadata(name, value) # depends on [control=['for'], data=[]]
return output |
def groups(self) -> typing.Iterator['Group']:
"""
Returns: generator of all groups in this country
"""
for group_category in Mission.valid_group_categories:
if group_category in self._section_this_country.keys():
for group_index in self._section_this_country[group_category]['group']:
if group_index not in self.__groups[group_category]:
self.__groups[group_category][group_index] = Group(self.d, self.l10n, self.coa_color,
self.country_index, group_category,
group_index)
yield self.__groups[group_category][group_index] | def function[groups, parameter[self]]:
constant[
Returns: generator of all groups in this country
]
for taget[name[group_category]] in starred[name[Mission].valid_group_categories] begin[:]
if compare[name[group_category] in call[name[self]._section_this_country.keys, parameter[]]] begin[:]
for taget[name[group_index]] in starred[call[call[name[self]._section_this_country][name[group_category]]][constant[group]]] begin[:]
if compare[name[group_index] <ast.NotIn object at 0x7da2590d7190> call[name[self].__groups][name[group_category]]] begin[:]
call[call[name[self].__groups][name[group_category]]][name[group_index]] assign[=] call[name[Group], parameter[name[self].d, name[self].l10n, name[self].coa_color, name[self].country_index, name[group_category], name[group_index]]]
<ast.Yield object at 0x7da1b144d930> | keyword[def] identifier[groups] ( identifier[self] )-> identifier[typing] . identifier[Iterator] [ literal[string] ]:
literal[string]
keyword[for] identifier[group_category] keyword[in] identifier[Mission] . identifier[valid_group_categories] :
keyword[if] identifier[group_category] keyword[in] identifier[self] . identifier[_section_this_country] . identifier[keys] ():
keyword[for] identifier[group_index] keyword[in] identifier[self] . identifier[_section_this_country] [ identifier[group_category] ][ literal[string] ]:
keyword[if] identifier[group_index] keyword[not] keyword[in] identifier[self] . identifier[__groups] [ identifier[group_category] ]:
identifier[self] . identifier[__groups] [ identifier[group_category] ][ identifier[group_index] ]= identifier[Group] ( identifier[self] . identifier[d] , identifier[self] . identifier[l10n] , identifier[self] . identifier[coa_color] ,
identifier[self] . identifier[country_index] , identifier[group_category] ,
identifier[group_index] )
keyword[yield] identifier[self] . identifier[__groups] [ identifier[group_category] ][ identifier[group_index] ] | def groups(self) -> typing.Iterator['Group']:
"""
Returns: generator of all groups in this country
"""
for group_category in Mission.valid_group_categories:
if group_category in self._section_this_country.keys():
for group_index in self._section_this_country[group_category]['group']:
if group_index not in self.__groups[group_category]:
self.__groups[group_category][group_index] = Group(self.d, self.l10n, self.coa_color, self.country_index, group_category, group_index) # depends on [control=['if'], data=['group_index']]
yield self.__groups[group_category][group_index] # depends on [control=['for'], data=['group_index']] # depends on [control=['if'], data=['group_category']] # depends on [control=['for'], data=['group_category']] |
def atan2(y, x, context=None):
"""
Return ``atan(y / x)`` with the appropriate choice of function branch.
If ``x > 0``, then ``atan2(y, x)`` is mathematically equivalent to ``atan(y
/ x)``. If ``x < 0`` and ``y > 0``, ``atan(y, x)`` is equivalent to ``π +
atan(y, x)``. If ``x < 0`` and ``y < 0``, the result is ``-π + atan(y,
x)``.
Geometrically, ``atan2(y, x)`` is the angle (measured counterclockwise, in
radians) from the positive x-axis to the line segment joining (0, 0) to (x,
y), in the usual representation of the x-y plane.
Special values are handled as described in the ISO C99 and IEEE 754-2008
standards for the atan2 function. The following examples illustrate the
rules for positive y; for negative y, apply the symmetry ``atan(-y, x) ==
-atan(y, x)``.
>>> finite = positive = 2.3
>>> negative = -2.3
>>> inf = BigFloat('inf')
>>> print(atan2(+0.0, -0.0)) # pi
3.1415926535897931
>>> print(atan2(+0.0, +0.0)) # 0
0
>>> print(atan2(+0.0, negative)) # pi
3.1415926535897931
>>> print(atan2(+0.0, positive)) # 0
0
>>> print(atan2(positive, 0.0)) # pi / 2
1.5707963267948966
>>> print(atan2(inf, -inf)) # 3*pi / 4
2.3561944901923448
>>> print(atan2(inf, inf)) # pi / 4
0.78539816339744828
>>> print(atan2(inf, finite)) # pi / 2
1.5707963267948966
>>> print(atan2(positive, -inf)) # pi
3.1415926535897931
>>> print(atan2(positive, +inf)) # 0
0
"""
return _apply_function_in_current_context(
BigFloat,
mpfr.mpfr_atan2,
(
BigFloat._implicit_convert(y),
BigFloat._implicit_convert(x),
),
context,
) | def function[atan2, parameter[y, x, context]]:
constant[
Return ``atan(y / x)`` with the appropriate choice of function branch.
If ``x > 0``, then ``atan2(y, x)`` is mathematically equivalent to ``atan(y
/ x)``. If ``x < 0`` and ``y > 0``, ``atan(y, x)`` is equivalent to ``π +
atan(y, x)``. If ``x < 0`` and ``y < 0``, the result is ``-π + atan(y,
x)``.
Geometrically, ``atan2(y, x)`` is the angle (measured counterclockwise, in
radians) from the positive x-axis to the line segment joining (0, 0) to (x,
y), in the usual representation of the x-y plane.
Special values are handled as described in the ISO C99 and IEEE 754-2008
standards for the atan2 function. The following examples illustrate the
rules for positive y; for negative y, apply the symmetry ``atan(-y, x) ==
-atan(y, x)``.
>>> finite = positive = 2.3
>>> negative = -2.3
>>> inf = BigFloat('inf')
>>> print(atan2(+0.0, -0.0)) # pi
3.1415926535897931
>>> print(atan2(+0.0, +0.0)) # 0
0
>>> print(atan2(+0.0, negative)) # pi
3.1415926535897931
>>> print(atan2(+0.0, positive)) # 0
0
>>> print(atan2(positive, 0.0)) # pi / 2
1.5707963267948966
>>> print(atan2(inf, -inf)) # 3*pi / 4
2.3561944901923448
>>> print(atan2(inf, inf)) # pi / 4
0.78539816339744828
>>> print(atan2(inf, finite)) # pi / 2
1.5707963267948966
>>> print(atan2(positive, -inf)) # pi
3.1415926535897931
>>> print(atan2(positive, +inf)) # 0
0
]
return[call[name[_apply_function_in_current_context], parameter[name[BigFloat], name[mpfr].mpfr_atan2, tuple[[<ast.Call object at 0x7da207f98c10>, <ast.Call object at 0x7da207f9b4f0>]], name[context]]]] | keyword[def] identifier[atan2] ( identifier[y] , identifier[x] , identifier[context] = keyword[None] ):
literal[string]
keyword[return] identifier[_apply_function_in_current_context] (
identifier[BigFloat] ,
identifier[mpfr] . identifier[mpfr_atan2] ,
(
identifier[BigFloat] . identifier[_implicit_convert] ( identifier[y] ),
identifier[BigFloat] . identifier[_implicit_convert] ( identifier[x] ),
),
identifier[context] ,
) | def atan2(y, x, context=None):
"""
Return ``atan(y / x)`` with the appropriate choice of function branch.
If ``x > 0``, then ``atan2(y, x)`` is mathematically equivalent to ``atan(y
/ x)``. If ``x < 0`` and ``y > 0``, ``atan(y, x)`` is equivalent to ``π +
atan(y, x)``. If ``x < 0`` and ``y < 0``, the result is ``-π + atan(y,
x)``.
Geometrically, ``atan2(y, x)`` is the angle (measured counterclockwise, in
radians) from the positive x-axis to the line segment joining (0, 0) to (x,
y), in the usual representation of the x-y plane.
Special values are handled as described in the ISO C99 and IEEE 754-2008
standards for the atan2 function. The following examples illustrate the
rules for positive y; for negative y, apply the symmetry ``atan(-y, x) ==
-atan(y, x)``.
>>> finite = positive = 2.3
>>> negative = -2.3
>>> inf = BigFloat('inf')
>>> print(atan2(+0.0, -0.0)) # pi
3.1415926535897931
>>> print(atan2(+0.0, +0.0)) # 0
0
>>> print(atan2(+0.0, negative)) # pi
3.1415926535897931
>>> print(atan2(+0.0, positive)) # 0
0
>>> print(atan2(positive, 0.0)) # pi / 2
1.5707963267948966
>>> print(atan2(inf, -inf)) # 3*pi / 4
2.3561944901923448
>>> print(atan2(inf, inf)) # pi / 4
0.78539816339744828
>>> print(atan2(inf, finite)) # pi / 2
1.5707963267948966
>>> print(atan2(positive, -inf)) # pi
3.1415926535897931
>>> print(atan2(positive, +inf)) # 0
0
"""
return _apply_function_in_current_context(BigFloat, mpfr.mpfr_atan2, (BigFloat._implicit_convert(y), BigFloat._implicit_convert(x)), context) |
def _init_display(self):
"""!
\~english
Initialize the SSD1306 display chip
\~chinese
初始化SSD1306显示芯片
"""
self._command([
# 0xAE
self.CMD_SSD1306_DISPLAY_OFF,
#Stop Scroll
self.CMD_SSD1306_SET_SCROLL_DEACTIVE,
# 0xA8 SET MULTIPLEX 0x3F
self.CMD_SSD1306_SET_MULTIPLEX_RATIO,
0x3F,
# 0xD3 SET DISPLAY OFFSET
self.CMD_SSD1306_SET_DISPLAY_OFFSET,
0x00,
# 0x40 Set Mapping RAM Display Start Line (0x00~0x3F)
self.CMD_SSD1306_SET_DISPLAY_START_LINE,
# 0xDA Set COM Pins hardware configuration, (0x00/0x01/0x02)
self.CMD_SSD1306_SET_COM_PINS,
(0x02 | 0x10),
self.CMD_SSD1306_SET_CONTRAST,
0x7F,
# 0xA4 Disable Entire Display On
self.CMD_SSD1306_ENTIRE_DISPLAY_ON_0,
# 0xA6 Set normal display
self.CMD_SSD1306_NORMAL_DISPLAY,
# 0xA7 Set inverse display
# CMD_SSD1306_INVERSE_DISPLAY,
# 0xD5 Set osc frequency 0x80
self.CMD_SSD1306_SET_CLOCK_DIVIDE_RATIO,
0x80,
# 0x8D Enable DC/DC charge pump regulator 0x14
self.CMD_SSD1306_CHARGE_PUMP,
0x14,
# 0x20 Set Page Addressing Mode (0x00/0x01/0x02)
self.CMD_SSD1306_SET_MEM_ADDR_MODE,
0x01,
# 0xC0 / 0xC8 Set COM Output Scan Direction
#CMD_SSD1306_SCAN_DIRECTION_INC,
#CMD_SSD1306_SCAN_DIRECTION_DEC,
self.CMD_SSD1306_SCAN_DIRECTION_INC if self._mirror_v else self.CMD_SSD1306_SCAN_DIRECTION_DEC,
# 0xA0 / oxA1 Set Segment re-map
# 0xA0 left to right
# 0xA1 right to left
self.CMD_SSD1306_SET_SEGMENT_REMAP_0 if self._mirror_h else self.CMD_SSD1306_SET_SEGMENT_REMAP_1,
]) | def function[_init_display, parameter[self]]:
constant[!
\~english
Initialize the SSD1306 display chip
\~chinese
初始化SSD1306显示芯片
]
call[name[self]._command, parameter[list[[<ast.Attribute object at 0x7da2054a5d50>, <ast.Attribute object at 0x7da2054a68c0>, <ast.Attribute object at 0x7da2054a6fb0>, <ast.Constant object at 0x7da2054a7700>, <ast.Attribute object at 0x7da2054a6a70>, <ast.Constant object at 0x7da2054a4af0>, <ast.Attribute object at 0x7da2054a70d0>, <ast.Attribute object at 0x7da2054a46a0>, <ast.BinOp object at 0x7da2054a73d0>, <ast.Attribute object at 0x7da2054a4d30>, <ast.Constant object at 0x7da2054a7d90>, <ast.Attribute object at 0x7da2054a7550>, <ast.Attribute object at 0x7da2054a4ca0>, <ast.Attribute object at 0x7da2054a4c40>, <ast.Constant object at 0x7da2054a52d0>, <ast.Attribute object at 0x7da2054a5ed0>, <ast.Constant object at 0x7da2054a6d70>, <ast.Attribute object at 0x7da2054a7f10>, <ast.Constant object at 0x7da2054a5930>, <ast.IfExp object at 0x7da2054a5150>, <ast.IfExp object at 0x7da2054a6290>]]]] | keyword[def] identifier[_init_display] ( identifier[self] ):
literal[string]
identifier[self] . identifier[_command] ([
identifier[self] . identifier[CMD_SSD1306_DISPLAY_OFF] ,
identifier[self] . identifier[CMD_SSD1306_SET_SCROLL_DEACTIVE] ,
identifier[self] . identifier[CMD_SSD1306_SET_MULTIPLEX_RATIO] ,
literal[int] ,
identifier[self] . identifier[CMD_SSD1306_SET_DISPLAY_OFFSET] ,
literal[int] ,
identifier[self] . identifier[CMD_SSD1306_SET_DISPLAY_START_LINE] ,
identifier[self] . identifier[CMD_SSD1306_SET_COM_PINS] ,
( literal[int] | literal[int] ),
identifier[self] . identifier[CMD_SSD1306_SET_CONTRAST] ,
literal[int] ,
identifier[self] . identifier[CMD_SSD1306_ENTIRE_DISPLAY_ON_0] ,
identifier[self] . identifier[CMD_SSD1306_NORMAL_DISPLAY] ,
identifier[self] . identifier[CMD_SSD1306_SET_CLOCK_DIVIDE_RATIO] ,
literal[int] ,
identifier[self] . identifier[CMD_SSD1306_CHARGE_PUMP] ,
literal[int] ,
identifier[self] . identifier[CMD_SSD1306_SET_MEM_ADDR_MODE] ,
literal[int] ,
identifier[self] . identifier[CMD_SSD1306_SCAN_DIRECTION_INC] keyword[if] identifier[self] . identifier[_mirror_v] keyword[else] identifier[self] . identifier[CMD_SSD1306_SCAN_DIRECTION_DEC] ,
identifier[self] . identifier[CMD_SSD1306_SET_SEGMENT_REMAP_0] keyword[if] identifier[self] . identifier[_mirror_h] keyword[else] identifier[self] . identifier[CMD_SSD1306_SET_SEGMENT_REMAP_1] ,
]) | def _init_display(self):
"""!
\\~english
Initialize the SSD1306 display chip
\\~chinese
初始化SSD1306显示芯片
"""
# 0xAE
#Stop Scroll
# 0xA8 SET MULTIPLEX 0x3F
# 0xD3 SET DISPLAY OFFSET
# 0x40 Set Mapping RAM Display Start Line (0x00~0x3F)
# 0xDA Set COM Pins hardware configuration, (0x00/0x01/0x02)
# 0xA4 Disable Entire Display On
# 0xA6 Set normal display
# 0xA7 Set inverse display
# CMD_SSD1306_INVERSE_DISPLAY,
# 0xD5 Set osc frequency 0x80
# 0x8D Enable DC/DC charge pump regulator 0x14
# 0x20 Set Page Addressing Mode (0x00/0x01/0x02)
# 0xC0 / 0xC8 Set COM Output Scan Direction
#CMD_SSD1306_SCAN_DIRECTION_INC,
#CMD_SSD1306_SCAN_DIRECTION_DEC,
# 0xA0 / oxA1 Set Segment re-map
# 0xA0 left to right
# 0xA1 right to left
self._command([self.CMD_SSD1306_DISPLAY_OFF, self.CMD_SSD1306_SET_SCROLL_DEACTIVE, self.CMD_SSD1306_SET_MULTIPLEX_RATIO, 63, self.CMD_SSD1306_SET_DISPLAY_OFFSET, 0, self.CMD_SSD1306_SET_DISPLAY_START_LINE, self.CMD_SSD1306_SET_COM_PINS, 2 | 16, self.CMD_SSD1306_SET_CONTRAST, 127, self.CMD_SSD1306_ENTIRE_DISPLAY_ON_0, self.CMD_SSD1306_NORMAL_DISPLAY, self.CMD_SSD1306_SET_CLOCK_DIVIDE_RATIO, 128, self.CMD_SSD1306_CHARGE_PUMP, 20, self.CMD_SSD1306_SET_MEM_ADDR_MODE, 1, self.CMD_SSD1306_SCAN_DIRECTION_INC if self._mirror_v else self.CMD_SSD1306_SCAN_DIRECTION_DEC, self.CMD_SSD1306_SET_SEGMENT_REMAP_0 if self._mirror_h else self.CMD_SSD1306_SET_SEGMENT_REMAP_1]) |
def get_portchannel_info_by_intf_output_lacp_system_priority(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_portchannel_info_by_intf = ET.Element("get_portchannel_info_by_intf")
config = get_portchannel_info_by_intf
output = ET.SubElement(get_portchannel_info_by_intf, "output")
lacp = ET.SubElement(output, "lacp")
system_priority = ET.SubElement(lacp, "system-priority")
system_priority.text = kwargs.pop('system_priority')
callback = kwargs.pop('callback', self._callback)
return callback(config) | def function[get_portchannel_info_by_intf_output_lacp_system_priority, parameter[self]]:
constant[Auto Generated Code
]
variable[config] assign[=] call[name[ET].Element, parameter[constant[config]]]
variable[get_portchannel_info_by_intf] assign[=] call[name[ET].Element, parameter[constant[get_portchannel_info_by_intf]]]
variable[config] assign[=] name[get_portchannel_info_by_intf]
variable[output] assign[=] call[name[ET].SubElement, parameter[name[get_portchannel_info_by_intf], constant[output]]]
variable[lacp] assign[=] call[name[ET].SubElement, parameter[name[output], constant[lacp]]]
variable[system_priority] assign[=] call[name[ET].SubElement, parameter[name[lacp], constant[system-priority]]]
name[system_priority].text assign[=] call[name[kwargs].pop, parameter[constant[system_priority]]]
variable[callback] assign[=] call[name[kwargs].pop, parameter[constant[callback], name[self]._callback]]
return[call[name[callback], parameter[name[config]]]] | keyword[def] identifier[get_portchannel_info_by_intf_output_lacp_system_priority] ( identifier[self] ,** identifier[kwargs] ):
literal[string]
identifier[config] = identifier[ET] . identifier[Element] ( literal[string] )
identifier[get_portchannel_info_by_intf] = identifier[ET] . identifier[Element] ( literal[string] )
identifier[config] = identifier[get_portchannel_info_by_intf]
identifier[output] = identifier[ET] . identifier[SubElement] ( identifier[get_portchannel_info_by_intf] , literal[string] )
identifier[lacp] = identifier[ET] . identifier[SubElement] ( identifier[output] , literal[string] )
identifier[system_priority] = identifier[ET] . identifier[SubElement] ( identifier[lacp] , literal[string] )
identifier[system_priority] . identifier[text] = identifier[kwargs] . identifier[pop] ( literal[string] )
identifier[callback] = identifier[kwargs] . identifier[pop] ( literal[string] , identifier[self] . identifier[_callback] )
keyword[return] identifier[callback] ( identifier[config] ) | def get_portchannel_info_by_intf_output_lacp_system_priority(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element('config')
get_portchannel_info_by_intf = ET.Element('get_portchannel_info_by_intf')
config = get_portchannel_info_by_intf
output = ET.SubElement(get_portchannel_info_by_intf, 'output')
lacp = ET.SubElement(output, 'lacp')
system_priority = ET.SubElement(lacp, 'system-priority')
system_priority.text = kwargs.pop('system_priority')
callback = kwargs.pop('callback', self._callback)
return callback(config) |
def _get_signing_jwk_key_set(jwt_issuer):
"""
Returns a JWK Keyset containing all active keys that are configured
for verifying signatures.
"""
key_set = KEYS()
# asymmetric keys
signing_jwk_set = settings.JWT_AUTH.get('JWT_PUBLIC_SIGNING_JWK_SET')
if signing_jwk_set:
key_set.load_jwks(signing_jwk_set)
# symmetric key
key_set.add({'key': jwt_issuer['SECRET_KEY'], 'kty': 'oct'})
return key_set | def function[_get_signing_jwk_key_set, parameter[jwt_issuer]]:
constant[
Returns a JWK Keyset containing all active keys that are configured
for verifying signatures.
]
variable[key_set] assign[=] call[name[KEYS], parameter[]]
variable[signing_jwk_set] assign[=] call[name[settings].JWT_AUTH.get, parameter[constant[JWT_PUBLIC_SIGNING_JWK_SET]]]
if name[signing_jwk_set] begin[:]
call[name[key_set].load_jwks, parameter[name[signing_jwk_set]]]
call[name[key_set].add, parameter[dictionary[[<ast.Constant object at 0x7da1b04d5d50>, <ast.Constant object at 0x7da1b04d61a0>], [<ast.Subscript object at 0x7da1b04d46d0>, <ast.Constant object at 0x7da1b04d4a30>]]]]
return[name[key_set]] | keyword[def] identifier[_get_signing_jwk_key_set] ( identifier[jwt_issuer] ):
literal[string]
identifier[key_set] = identifier[KEYS] ()
identifier[signing_jwk_set] = identifier[settings] . identifier[JWT_AUTH] . identifier[get] ( literal[string] )
keyword[if] identifier[signing_jwk_set] :
identifier[key_set] . identifier[load_jwks] ( identifier[signing_jwk_set] )
identifier[key_set] . identifier[add] ({ literal[string] : identifier[jwt_issuer] [ literal[string] ], literal[string] : literal[string] })
keyword[return] identifier[key_set] | def _get_signing_jwk_key_set(jwt_issuer):
"""
Returns a JWK Keyset containing all active keys that are configured
for verifying signatures.
"""
key_set = KEYS()
# asymmetric keys
signing_jwk_set = settings.JWT_AUTH.get('JWT_PUBLIC_SIGNING_JWK_SET')
if signing_jwk_set:
key_set.load_jwks(signing_jwk_set) # depends on [control=['if'], data=[]]
# symmetric key
key_set.add({'key': jwt_issuer['SECRET_KEY'], 'kty': 'oct'})
return key_set |
def list_virtual_networks(kwargs=None, conn=None, call=None):
'''
.. versionadded:: 2015.8.0
List input endpoints associated with the deployment
CLI Example:
.. code-block:: bash
salt-cloud -f list_virtual_networks my-azure service=myservice deployment=mydeployment
'''
if call != 'function':
raise SaltCloudSystemExit(
'The list_virtual_networks function must be called with -f or --function.'
)
path = 'services/networking/virtualnetwork'
data = query(path)
return data | def function[list_virtual_networks, parameter[kwargs, conn, call]]:
constant[
.. versionadded:: 2015.8.0
List input endpoints associated with the deployment
CLI Example:
.. code-block:: bash
salt-cloud -f list_virtual_networks my-azure service=myservice deployment=mydeployment
]
if compare[name[call] not_equal[!=] constant[function]] begin[:]
<ast.Raise object at 0x7da20c76de70>
variable[path] assign[=] constant[services/networking/virtualnetwork]
variable[data] assign[=] call[name[query], parameter[name[path]]]
return[name[data]] | keyword[def] identifier[list_virtual_networks] ( identifier[kwargs] = keyword[None] , identifier[conn] = keyword[None] , identifier[call] = keyword[None] ):
literal[string]
keyword[if] identifier[call] != literal[string] :
keyword[raise] identifier[SaltCloudSystemExit] (
literal[string]
)
identifier[path] = literal[string]
identifier[data] = identifier[query] ( identifier[path] )
keyword[return] identifier[data] | def list_virtual_networks(kwargs=None, conn=None, call=None):
"""
.. versionadded:: 2015.8.0
List input endpoints associated with the deployment
CLI Example:
.. code-block:: bash
salt-cloud -f list_virtual_networks my-azure service=myservice deployment=mydeployment
"""
if call != 'function':
raise SaltCloudSystemExit('The list_virtual_networks function must be called with -f or --function.') # depends on [control=['if'], data=[]]
path = 'services/networking/virtualnetwork'
data = query(path)
return data |
def set_state(self, state):
"""
:param state: a boolean of true (on) or false ('off')
:return: nothing
"""
values = {"desired_state": {"locked": state}}
response = self.api_interface.local_set_state(self, values)
self._update_state_from_response(response) | def function[set_state, parameter[self, state]]:
constant[
:param state: a boolean of true (on) or false ('off')
:return: nothing
]
variable[values] assign[=] dictionary[[<ast.Constant object at 0x7da1b2632320>], [<ast.Dict object at 0x7da1b2631210>]]
variable[response] assign[=] call[name[self].api_interface.local_set_state, parameter[name[self], name[values]]]
call[name[self]._update_state_from_response, parameter[name[response]]] | keyword[def] identifier[set_state] ( identifier[self] , identifier[state] ):
literal[string]
identifier[values] ={ literal[string] :{ literal[string] : identifier[state] }}
identifier[response] = identifier[self] . identifier[api_interface] . identifier[local_set_state] ( identifier[self] , identifier[values] )
identifier[self] . identifier[_update_state_from_response] ( identifier[response] ) | def set_state(self, state):
"""
:param state: a boolean of true (on) or false ('off')
:return: nothing
"""
values = {'desired_state': {'locked': state}}
response = self.api_interface.local_set_state(self, values)
self._update_state_from_response(response) |
def _collect_conflicts_within(
context, # type: ValidationContext
conflicts, # type: List[Tuple[Tuple[str, str], List[Node], List[Node]]]
cached_fields_and_fragment_names, # type: Dict[SelectionSet, Tuple[Dict[str, List[Tuple[Union[GraphQLInterfaceType, GraphQLObjectType, None], Field, GraphQLField]]], List[str]]]
compared_fragments, # type: PairSet
field_map, # type: Dict[str, List[Tuple[Union[GraphQLInterfaceType, GraphQLObjectType, None], Field, GraphQLField]]]
):
# type: (...) -> None
"""Collect all Conflicts "within" one collection of fields."""
# field map is a keyed collection, where each key represents a response
# name and the value at that key is a list of all fields which provide that
# response name. For every response name, if there are multiple fields, they
# must be compared to find a potential conflict.
for response_name, fields in list(field_map.items()):
# This compares every field in the list to every other field in this list
# (except to itself). If the list only has one item, nothing needs to
# be compared.
for i, field in enumerate(fields):
for other_field in fields[i + 1 :]:
# within one collection is never mutually exclusive
conflict = _find_conflict(
context,
cached_fields_and_fragment_names,
compared_fragments,
False,
response_name,
field,
other_field,
)
if conflict:
conflicts.append(conflict) | def function[_collect_conflicts_within, parameter[context, conflicts, cached_fields_and_fragment_names, compared_fragments, field_map]]:
constant[Collect all Conflicts "within" one collection of fields.]
for taget[tuple[[<ast.Name object at 0x7da1b1986e60>, <ast.Name object at 0x7da1b1987f70>]]] in starred[call[name[list], parameter[call[name[field_map].items, parameter[]]]]] begin[:]
for taget[tuple[[<ast.Name object at 0x7da1b1987310>, <ast.Name object at 0x7da1b19861a0>]]] in starred[call[name[enumerate], parameter[name[fields]]]] begin[:]
for taget[name[other_field]] in starred[call[name[fields]][<ast.Slice object at 0x7da1b19b8fa0>]] begin[:]
variable[conflict] assign[=] call[name[_find_conflict], parameter[name[context], name[cached_fields_and_fragment_names], name[compared_fragments], constant[False], name[response_name], name[field], name[other_field]]]
if name[conflict] begin[:]
call[name[conflicts].append, parameter[name[conflict]]] | keyword[def] identifier[_collect_conflicts_within] (
identifier[context] ,
identifier[conflicts] ,
identifier[cached_fields_and_fragment_names] ,
identifier[compared_fragments] ,
identifier[field_map] ,
):
literal[string]
keyword[for] identifier[response_name] , identifier[fields] keyword[in] identifier[list] ( identifier[field_map] . identifier[items] ()):
keyword[for] identifier[i] , identifier[field] keyword[in] identifier[enumerate] ( identifier[fields] ):
keyword[for] identifier[other_field] keyword[in] identifier[fields] [ identifier[i] + literal[int] :]:
identifier[conflict] = identifier[_find_conflict] (
identifier[context] ,
identifier[cached_fields_and_fragment_names] ,
identifier[compared_fragments] ,
keyword[False] ,
identifier[response_name] ,
identifier[field] ,
identifier[other_field] ,
)
keyword[if] identifier[conflict] :
identifier[conflicts] . identifier[append] ( identifier[conflict] ) | def _collect_conflicts_within(context, conflicts, cached_fields_and_fragment_names, compared_fragments, field_map): # type: ValidationContext
# type: List[Tuple[Tuple[str, str], List[Node], List[Node]]]
# type: Dict[SelectionSet, Tuple[Dict[str, List[Tuple[Union[GraphQLInterfaceType, GraphQLObjectType, None], Field, GraphQLField]]], List[str]]]
# type: PairSet
# type: Dict[str, List[Tuple[Union[GraphQLInterfaceType, GraphQLObjectType, None], Field, GraphQLField]]]
# type: (...) -> None
'Collect all Conflicts "within" one collection of fields.'
# field map is a keyed collection, where each key represents a response
# name and the value at that key is a list of all fields which provide that
# response name. For every response name, if there are multiple fields, they
# must be compared to find a potential conflict.
for (response_name, fields) in list(field_map.items()):
# This compares every field in the list to every other field in this list
# (except to itself). If the list only has one item, nothing needs to
# be compared.
for (i, field) in enumerate(fields):
for other_field in fields[i + 1:]:
# within one collection is never mutually exclusive
conflict = _find_conflict(context, cached_fields_and_fragment_names, compared_fragments, False, response_name, field, other_field)
if conflict:
conflicts.append(conflict) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['other_field']] # depends on [control=['for'], data=[]] # depends on [control=['for'], data=[]] |
def gill_king(mat, eps=1e-16):
"""
Gill-King algorithm for modified cholesky decomposition.
Args:
mat (numpy.ndarray):
Must be a non-singular and symmetric matrix. If sparse, the result
will also be sparse.
eps (float):
Error tolerance used in algorithm.
Returns:
(numpy.ndarray):
Lower triangular Cholesky factor.
Examples:
>>> mat = [[4, 2, 1], [2, 6, 3], [1, 3, -.004]]
>>> lowtri = gill_king(mat)
>>> print(numpy.around(lowtri, 4))
[[2. 0. 0. ]
[1. 2.2361 0. ]
[0.5 1.118 1.2264]]
>>> print(numpy.around(numpy.dot(lowtri, lowtri.T), 4))
[[4. 2. 1. ]
[2. 6. 3. ]
[1. 3. 3.004]]
"""
if not scipy.sparse.issparse(mat):
mat = numpy.asfarray(mat)
assert numpy.allclose(mat, mat.T)
size = mat.shape[0]
mat_diag = mat.diagonal()
gamma = abs(mat_diag).max()
off_diag = abs(mat - numpy.diag(mat_diag)).max()
delta = eps*max(gamma + off_diag, 1)
beta = numpy.sqrt(max(gamma, off_diag/size, eps))
lowtri = _gill_king(mat, beta, delta)
return lowtri | def function[gill_king, parameter[mat, eps]]:
constant[
Gill-King algorithm for modified cholesky decomposition.
Args:
mat (numpy.ndarray):
Must be a non-singular and symmetric matrix. If sparse, the result
will also be sparse.
eps (float):
Error tolerance used in algorithm.
Returns:
(numpy.ndarray):
Lower triangular Cholesky factor.
Examples:
>>> mat = [[4, 2, 1], [2, 6, 3], [1, 3, -.004]]
>>> lowtri = gill_king(mat)
>>> print(numpy.around(lowtri, 4))
[[2. 0. 0. ]
[1. 2.2361 0. ]
[0.5 1.118 1.2264]]
>>> print(numpy.around(numpy.dot(lowtri, lowtri.T), 4))
[[4. 2. 1. ]
[2. 6. 3. ]
[1. 3. 3.004]]
]
if <ast.UnaryOp object at 0x7da18f09d420> begin[:]
variable[mat] assign[=] call[name[numpy].asfarray, parameter[name[mat]]]
assert[call[name[numpy].allclose, parameter[name[mat], name[mat].T]]]
variable[size] assign[=] call[name[mat].shape][constant[0]]
variable[mat_diag] assign[=] call[name[mat].diagonal, parameter[]]
variable[gamma] assign[=] call[call[name[abs], parameter[name[mat_diag]]].max, parameter[]]
variable[off_diag] assign[=] call[call[name[abs], parameter[binary_operation[name[mat] - call[name[numpy].diag, parameter[name[mat_diag]]]]]].max, parameter[]]
variable[delta] assign[=] binary_operation[name[eps] * call[name[max], parameter[binary_operation[name[gamma] + name[off_diag]], constant[1]]]]
variable[beta] assign[=] call[name[numpy].sqrt, parameter[call[name[max], parameter[name[gamma], binary_operation[name[off_diag] / name[size]], name[eps]]]]]
variable[lowtri] assign[=] call[name[_gill_king], parameter[name[mat], name[beta], name[delta]]]
return[name[lowtri]] | keyword[def] identifier[gill_king] ( identifier[mat] , identifier[eps] = literal[int] ):
literal[string]
keyword[if] keyword[not] identifier[scipy] . identifier[sparse] . identifier[issparse] ( identifier[mat] ):
identifier[mat] = identifier[numpy] . identifier[asfarray] ( identifier[mat] )
keyword[assert] identifier[numpy] . identifier[allclose] ( identifier[mat] , identifier[mat] . identifier[T] )
identifier[size] = identifier[mat] . identifier[shape] [ literal[int] ]
identifier[mat_diag] = identifier[mat] . identifier[diagonal] ()
identifier[gamma] = identifier[abs] ( identifier[mat_diag] ). identifier[max] ()
identifier[off_diag] = identifier[abs] ( identifier[mat] - identifier[numpy] . identifier[diag] ( identifier[mat_diag] )). identifier[max] ()
identifier[delta] = identifier[eps] * identifier[max] ( identifier[gamma] + identifier[off_diag] , literal[int] )
identifier[beta] = identifier[numpy] . identifier[sqrt] ( identifier[max] ( identifier[gamma] , identifier[off_diag] / identifier[size] , identifier[eps] ))
identifier[lowtri] = identifier[_gill_king] ( identifier[mat] , identifier[beta] , identifier[delta] )
keyword[return] identifier[lowtri] | def gill_king(mat, eps=1e-16):
"""
Gill-King algorithm for modified cholesky decomposition.
Args:
mat (numpy.ndarray):
Must be a non-singular and symmetric matrix. If sparse, the result
will also be sparse.
eps (float):
Error tolerance used in algorithm.
Returns:
(numpy.ndarray):
Lower triangular Cholesky factor.
Examples:
>>> mat = [[4, 2, 1], [2, 6, 3], [1, 3, -.004]]
>>> lowtri = gill_king(mat)
>>> print(numpy.around(lowtri, 4))
[[2. 0. 0. ]
[1. 2.2361 0. ]
[0.5 1.118 1.2264]]
>>> print(numpy.around(numpy.dot(lowtri, lowtri.T), 4))
[[4. 2. 1. ]
[2. 6. 3. ]
[1. 3. 3.004]]
"""
if not scipy.sparse.issparse(mat):
mat = numpy.asfarray(mat) # depends on [control=['if'], data=[]]
assert numpy.allclose(mat, mat.T)
size = mat.shape[0]
mat_diag = mat.diagonal()
gamma = abs(mat_diag).max()
off_diag = abs(mat - numpy.diag(mat_diag)).max()
delta = eps * max(gamma + off_diag, 1)
beta = numpy.sqrt(max(gamma, off_diag / size, eps))
lowtri = _gill_king(mat, beta, delta)
return lowtri |
def _expanded_sql(self):
"""Get the expanded BigQuery SQL string of this UDF
Returns
The expanded SQL string of this UDF
"""
if not self._sql:
self._sql = UDF._build_udf(self._name, self._code, self._return_type, self._params,
self._language, self._imports)
return self._sql | def function[_expanded_sql, parameter[self]]:
constant[Get the expanded BigQuery SQL string of this UDF
Returns
The expanded SQL string of this UDF
]
if <ast.UnaryOp object at 0x7da18ede6c50> begin[:]
name[self]._sql assign[=] call[name[UDF]._build_udf, parameter[name[self]._name, name[self]._code, name[self]._return_type, name[self]._params, name[self]._language, name[self]._imports]]
return[name[self]._sql] | keyword[def] identifier[_expanded_sql] ( identifier[self] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[_sql] :
identifier[self] . identifier[_sql] = identifier[UDF] . identifier[_build_udf] ( identifier[self] . identifier[_name] , identifier[self] . identifier[_code] , identifier[self] . identifier[_return_type] , identifier[self] . identifier[_params] ,
identifier[self] . identifier[_language] , identifier[self] . identifier[_imports] )
keyword[return] identifier[self] . identifier[_sql] | def _expanded_sql(self):
"""Get the expanded BigQuery SQL string of this UDF
Returns
The expanded SQL string of this UDF
"""
if not self._sql:
self._sql = UDF._build_udf(self._name, self._code, self._return_type, self._params, self._language, self._imports) # depends on [control=['if'], data=[]]
return self._sql |
def load(language_dir, filename, encoding):
''' Open and return the supplied json file '''
global _DICTIONARY
try:
json_file = filename + '.json'
with io.open(os.path.join(language_dir, json_file), 'r', encoding=encoding) as f:
_DICTIONARY = json.load(f)
except IOError:
raise IOError('{0} Language file not found at location {1}. '
'Make sure that your translation file is in the '
'listed language directory'.format(filename.title(), language_dir)) | def function[load, parameter[language_dir, filename, encoding]]:
constant[ Open and return the supplied json file ]
<ast.Global object at 0x7da1b1f96e60>
<ast.Try object at 0x7da1b1f95000> | keyword[def] identifier[load] ( identifier[language_dir] , identifier[filename] , identifier[encoding] ):
literal[string]
keyword[global] identifier[_DICTIONARY]
keyword[try] :
identifier[json_file] = identifier[filename] + literal[string]
keyword[with] identifier[io] . identifier[open] ( identifier[os] . identifier[path] . identifier[join] ( identifier[language_dir] , identifier[json_file] ), literal[string] , identifier[encoding] = identifier[encoding] ) keyword[as] identifier[f] :
identifier[_DICTIONARY] = identifier[json] . identifier[load] ( identifier[f] )
keyword[except] identifier[IOError] :
keyword[raise] identifier[IOError] ( literal[string]
literal[string]
literal[string] . identifier[format] ( identifier[filename] . identifier[title] (), identifier[language_dir] )) | def load(language_dir, filename, encoding):
""" Open and return the supplied json file """
global _DICTIONARY
try:
json_file = filename + '.json'
with io.open(os.path.join(language_dir, json_file), 'r', encoding=encoding) as f:
_DICTIONARY = json.load(f) # depends on [control=['with'], data=['f']] # depends on [control=['try'], data=[]]
except IOError:
raise IOError('{0} Language file not found at location {1}. Make sure that your translation file is in the listed language directory'.format(filename.title(), language_dir)) # depends on [control=['except'], data=[]] |
def get_object_or_404(queryset, *args, **kwargs):
""" replacement of rest_framework.generics and django.shrtcuts analogues """
try:
return queryset.get(*args, **kwargs)
except (ValueError, TypeError, DoesNotExist, ValidationError):
raise Http404() | def function[get_object_or_404, parameter[queryset]]:
constant[ replacement of rest_framework.generics and django.shrtcuts analogues ]
<ast.Try object at 0x7da1b08e4ee0> | keyword[def] identifier[get_object_or_404] ( identifier[queryset] ,* identifier[args] ,** identifier[kwargs] ):
literal[string]
keyword[try] :
keyword[return] identifier[queryset] . identifier[get] (* identifier[args] ,** identifier[kwargs] )
keyword[except] ( identifier[ValueError] , identifier[TypeError] , identifier[DoesNotExist] , identifier[ValidationError] ):
keyword[raise] identifier[Http404] () | def get_object_or_404(queryset, *args, **kwargs):
""" replacement of rest_framework.generics and django.shrtcuts analogues """
try:
return queryset.get(*args, **kwargs) # depends on [control=['try'], data=[]]
except (ValueError, TypeError, DoesNotExist, ValidationError):
raise Http404() # depends on [control=['except'], data=[]] |
def get_items(self, sequence_id, gt=None, gte=None, lt=None, lte=None, limit=None,
query_ascending=True, results_ascending=True):
"""
Returns sequenced item generator.
"""
records = self.get_records(
sequence_id=sequence_id,
gt=gt,
gte=gte,
lt=lt,
lte=lte,
limit=limit,
query_ascending=query_ascending,
results_ascending=results_ascending,
)
for item in map(self.from_record, records):
yield item | def function[get_items, parameter[self, sequence_id, gt, gte, lt, lte, limit, query_ascending, results_ascending]]:
constant[
Returns sequenced item generator.
]
variable[records] assign[=] call[name[self].get_records, parameter[]]
for taget[name[item]] in starred[call[name[map], parameter[name[self].from_record, name[records]]]] begin[:]
<ast.Yield object at 0x7da1b1628d90> | keyword[def] identifier[get_items] ( identifier[self] , identifier[sequence_id] , identifier[gt] = keyword[None] , identifier[gte] = keyword[None] , identifier[lt] = keyword[None] , identifier[lte] = keyword[None] , identifier[limit] = keyword[None] ,
identifier[query_ascending] = keyword[True] , identifier[results_ascending] = keyword[True] ):
literal[string]
identifier[records] = identifier[self] . identifier[get_records] (
identifier[sequence_id] = identifier[sequence_id] ,
identifier[gt] = identifier[gt] ,
identifier[gte] = identifier[gte] ,
identifier[lt] = identifier[lt] ,
identifier[lte] = identifier[lte] ,
identifier[limit] = identifier[limit] ,
identifier[query_ascending] = identifier[query_ascending] ,
identifier[results_ascending] = identifier[results_ascending] ,
)
keyword[for] identifier[item] keyword[in] identifier[map] ( identifier[self] . identifier[from_record] , identifier[records] ):
keyword[yield] identifier[item] | def get_items(self, sequence_id, gt=None, gte=None, lt=None, lte=None, limit=None, query_ascending=True, results_ascending=True):
"""
Returns sequenced item generator.
"""
records = self.get_records(sequence_id=sequence_id, gt=gt, gte=gte, lt=lt, lte=lte, limit=limit, query_ascending=query_ascending, results_ascending=results_ascending)
for item in map(self.from_record, records):
yield item # depends on [control=['for'], data=['item']] |
def configure_settings(settings, environment_settings=True):
'''
Given a settings object, run automatic configuration of all
the apps in INSTALLED_APPS.
'''
changes = 1
iterations = 0
while changes:
changes = 0
app_names = ['django_autoconfig'] + list(settings['INSTALLED_APPS'])
if environment_settings:
app_names.append('django_autoconfig.environment_settings')
for app_name in app_names:
import django_autoconfig.contrib
if autoconfig_module_exists(app_name):
module = importlib.import_module("%s.autoconfig" % (app_name,))
elif app_name in django_autoconfig.contrib.CONTRIB_CONFIGS:
module = django_autoconfig.contrib.CONTRIB_CONFIGS[app_name]
else:
continue
changes += merge_dictionaries(
settings,
getattr(module, 'SETTINGS', {}),
template_special_case=True,
)
changes += merge_dictionaries(
settings,
getattr(module, 'DEFAULT_SETTINGS', {}),
only_defaults=True,
)
for relationship in getattr(module, 'RELATIONSHIPS', []):
changes += relationship.apply_changes(settings)
if iterations >= MAX_ITERATIONS:
raise ImproperlyConfigured(
'Autoconfiguration could not reach a consistent state'
)
iterations += 1
LOGGER.debug("Autoconfiguration took %d iterations.", iterations) | def function[configure_settings, parameter[settings, environment_settings]]:
constant[
Given a settings object, run automatic configuration of all
the apps in INSTALLED_APPS.
]
variable[changes] assign[=] constant[1]
variable[iterations] assign[=] constant[0]
while name[changes] begin[:]
variable[changes] assign[=] constant[0]
variable[app_names] assign[=] binary_operation[list[[<ast.Constant object at 0x7da18f811c00>]] + call[name[list], parameter[call[name[settings]][constant[INSTALLED_APPS]]]]]
if name[environment_settings] begin[:]
call[name[app_names].append, parameter[constant[django_autoconfig.environment_settings]]]
for taget[name[app_name]] in starred[name[app_names]] begin[:]
import module[django_autoconfig.contrib]
if call[name[autoconfig_module_exists], parameter[name[app_name]]] begin[:]
variable[module] assign[=] call[name[importlib].import_module, parameter[binary_operation[constant[%s.autoconfig] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da18f813400>]]]]]
<ast.AugAssign object at 0x7da18f8119f0>
<ast.AugAssign object at 0x7da18f810f10>
for taget[name[relationship]] in starred[call[name[getattr], parameter[name[module], constant[RELATIONSHIPS], list[[]]]]] begin[:]
<ast.AugAssign object at 0x7da1b0c499f0>
if compare[name[iterations] greater_or_equal[>=] name[MAX_ITERATIONS]] begin[:]
<ast.Raise object at 0x7da1b0c49e10>
<ast.AugAssign object at 0x7da1b0c49810>
call[name[LOGGER].debug, parameter[constant[Autoconfiguration took %d iterations.], name[iterations]]] | keyword[def] identifier[configure_settings] ( identifier[settings] , identifier[environment_settings] = keyword[True] ):
literal[string]
identifier[changes] = literal[int]
identifier[iterations] = literal[int]
keyword[while] identifier[changes] :
identifier[changes] = literal[int]
identifier[app_names] =[ literal[string] ]+ identifier[list] ( identifier[settings] [ literal[string] ])
keyword[if] identifier[environment_settings] :
identifier[app_names] . identifier[append] ( literal[string] )
keyword[for] identifier[app_name] keyword[in] identifier[app_names] :
keyword[import] identifier[django_autoconfig] . identifier[contrib]
keyword[if] identifier[autoconfig_module_exists] ( identifier[app_name] ):
identifier[module] = identifier[importlib] . identifier[import_module] ( literal[string] %( identifier[app_name] ,))
keyword[elif] identifier[app_name] keyword[in] identifier[django_autoconfig] . identifier[contrib] . identifier[CONTRIB_CONFIGS] :
identifier[module] = identifier[django_autoconfig] . identifier[contrib] . identifier[CONTRIB_CONFIGS] [ identifier[app_name] ]
keyword[else] :
keyword[continue]
identifier[changes] += identifier[merge_dictionaries] (
identifier[settings] ,
identifier[getattr] ( identifier[module] , literal[string] ,{}),
identifier[template_special_case] = keyword[True] ,
)
identifier[changes] += identifier[merge_dictionaries] (
identifier[settings] ,
identifier[getattr] ( identifier[module] , literal[string] ,{}),
identifier[only_defaults] = keyword[True] ,
)
keyword[for] identifier[relationship] keyword[in] identifier[getattr] ( identifier[module] , literal[string] ,[]):
identifier[changes] += identifier[relationship] . identifier[apply_changes] ( identifier[settings] )
keyword[if] identifier[iterations] >= identifier[MAX_ITERATIONS] :
keyword[raise] identifier[ImproperlyConfigured] (
literal[string]
)
identifier[iterations] += literal[int]
identifier[LOGGER] . identifier[debug] ( literal[string] , identifier[iterations] ) | def configure_settings(settings, environment_settings=True):
"""
Given a settings object, run automatic configuration of all
the apps in INSTALLED_APPS.
"""
changes = 1
iterations = 0
while changes:
changes = 0
app_names = ['django_autoconfig'] + list(settings['INSTALLED_APPS'])
if environment_settings:
app_names.append('django_autoconfig.environment_settings') # depends on [control=['if'], data=[]]
for app_name in app_names:
import django_autoconfig.contrib
if autoconfig_module_exists(app_name):
module = importlib.import_module('%s.autoconfig' % (app_name,)) # depends on [control=['if'], data=[]]
elif app_name in django_autoconfig.contrib.CONTRIB_CONFIGS:
module = django_autoconfig.contrib.CONTRIB_CONFIGS[app_name] # depends on [control=['if'], data=['app_name']]
else:
continue
changes += merge_dictionaries(settings, getattr(module, 'SETTINGS', {}), template_special_case=True)
changes += merge_dictionaries(settings, getattr(module, 'DEFAULT_SETTINGS', {}), only_defaults=True)
for relationship in getattr(module, 'RELATIONSHIPS', []):
changes += relationship.apply_changes(settings) # depends on [control=['for'], data=['relationship']] # depends on [control=['for'], data=['app_name']]
if iterations >= MAX_ITERATIONS:
raise ImproperlyConfigured('Autoconfiguration could not reach a consistent state') # depends on [control=['if'], data=[]]
iterations += 1 # depends on [control=['while'], data=[]]
LOGGER.debug('Autoconfiguration took %d iterations.', iterations) |
def _validate_unarmor(self, certs, var_name):
"""
Takes a list of byte strings or asn1crypto.x509.Certificates objects,
validates and loads them while unarmoring any PEM-encoded contents
:param certs:
A list of byte strings or asn1crypto.x509.Certificate objects
:param var_name:
A unicode variable name to use in any TypeError exceptions
:return:
A list of asn1crypto.x509.Certificate objects
"""
output = []
for cert in certs:
if isinstance(cert, x509.Certificate):
output.append(cert)
else:
if not isinstance(cert, byte_cls):
raise TypeError(pretty_message(
'''
%s must contain only byte strings or
asn1crypto.x509.Certificate objects, not %s
''',
var_name,
type_name(cert)
))
if pem.detect(cert):
_, _, cert = pem.unarmor(cert)
output.append(x509.Certificate.load(cert))
return output | def function[_validate_unarmor, parameter[self, certs, var_name]]:
constant[
Takes a list of byte strings or asn1crypto.x509.Certificates objects,
validates and loads them while unarmoring any PEM-encoded contents
:param certs:
A list of byte strings or asn1crypto.x509.Certificate objects
:param var_name:
A unicode variable name to use in any TypeError exceptions
:return:
A list of asn1crypto.x509.Certificate objects
]
variable[output] assign[=] list[[]]
for taget[name[cert]] in starred[name[certs]] begin[:]
if call[name[isinstance], parameter[name[cert], name[x509].Certificate]] begin[:]
call[name[output].append, parameter[name[cert]]]
return[name[output]] | keyword[def] identifier[_validate_unarmor] ( identifier[self] , identifier[certs] , identifier[var_name] ):
literal[string]
identifier[output] =[]
keyword[for] identifier[cert] keyword[in] identifier[certs] :
keyword[if] identifier[isinstance] ( identifier[cert] , identifier[x509] . identifier[Certificate] ):
identifier[output] . identifier[append] ( identifier[cert] )
keyword[else] :
keyword[if] keyword[not] identifier[isinstance] ( identifier[cert] , identifier[byte_cls] ):
keyword[raise] identifier[TypeError] ( identifier[pretty_message] (
literal[string] ,
identifier[var_name] ,
identifier[type_name] ( identifier[cert] )
))
keyword[if] identifier[pem] . identifier[detect] ( identifier[cert] ):
identifier[_] , identifier[_] , identifier[cert] = identifier[pem] . identifier[unarmor] ( identifier[cert] )
identifier[output] . identifier[append] ( identifier[x509] . identifier[Certificate] . identifier[load] ( identifier[cert] ))
keyword[return] identifier[output] | def _validate_unarmor(self, certs, var_name):
"""
Takes a list of byte strings or asn1crypto.x509.Certificates objects,
validates and loads them while unarmoring any PEM-encoded contents
:param certs:
A list of byte strings or asn1crypto.x509.Certificate objects
:param var_name:
A unicode variable name to use in any TypeError exceptions
:return:
A list of asn1crypto.x509.Certificate objects
"""
output = []
for cert in certs:
if isinstance(cert, x509.Certificate):
output.append(cert) # depends on [control=['if'], data=[]]
else:
if not isinstance(cert, byte_cls):
raise TypeError(pretty_message('\n %s must contain only byte strings or\n asn1crypto.x509.Certificate objects, not %s\n ', var_name, type_name(cert))) # depends on [control=['if'], data=[]]
if pem.detect(cert):
(_, _, cert) = pem.unarmor(cert) # depends on [control=['if'], data=[]]
output.append(x509.Certificate.load(cert)) # depends on [control=['for'], data=['cert']]
return output |
def get_flair_choices(self, *args, **kwargs):
"""Return available link flair choices and current flair.
Convenience function for
:meth:`~.AuthenticatedReddit.get_flair_choices` populating both the
`subreddit` and `link` parameters.
:returns: The json response from the server.
"""
return self.subreddit.get_flair_choices(self.fullname, *args, **kwargs) | def function[get_flair_choices, parameter[self]]:
constant[Return available link flair choices and current flair.
Convenience function for
:meth:`~.AuthenticatedReddit.get_flair_choices` populating both the
`subreddit` and `link` parameters.
:returns: The json response from the server.
]
return[call[name[self].subreddit.get_flair_choices, parameter[name[self].fullname, <ast.Starred object at 0x7da20c76ca00>]]] | keyword[def] identifier[get_flair_choices] ( identifier[self] ,* identifier[args] ,** identifier[kwargs] ):
literal[string]
keyword[return] identifier[self] . identifier[subreddit] . identifier[get_flair_choices] ( identifier[self] . identifier[fullname] ,* identifier[args] ,** identifier[kwargs] ) | def get_flair_choices(self, *args, **kwargs):
"""Return available link flair choices and current flair.
Convenience function for
:meth:`~.AuthenticatedReddit.get_flair_choices` populating both the
`subreddit` and `link` parameters.
:returns: The json response from the server.
"""
return self.subreddit.get_flair_choices(self.fullname, *args, **kwargs) |
def facility(self, column=None, value=None, **kwargs):
"""
Check information related to Radiation facilities.
>>> RADInfo().facility('state_code', 'CA')
"""
return self._resolve_call('RAD_FACILITY', column, value, **kwargs) | def function[facility, parameter[self, column, value]]:
constant[
Check information related to Radiation facilities.
>>> RADInfo().facility('state_code', 'CA')
]
return[call[name[self]._resolve_call, parameter[constant[RAD_FACILITY], name[column], name[value]]]] | keyword[def] identifier[facility] ( identifier[self] , identifier[column] = keyword[None] , identifier[value] = keyword[None] ,** identifier[kwargs] ):
literal[string]
keyword[return] identifier[self] . identifier[_resolve_call] ( literal[string] , identifier[column] , identifier[value] ,** identifier[kwargs] ) | def facility(self, column=None, value=None, **kwargs):
"""
Check information related to Radiation facilities.
>>> RADInfo().facility('state_code', 'CA')
"""
return self._resolve_call('RAD_FACILITY', column, value, **kwargs) |
def l2n(l, c):
"host to network long"
c = []
c.append(int((l >> 24) & U32(0xFF)))
c.append(int((l >> 16) & U32(0xFF)))
c.append(int((l >> 8) & U32(0xFF)))
c.append(int((l ) & U32(0xFF)))
return c | def function[l2n, parameter[l, c]]:
constant[host to network long]
variable[c] assign[=] list[[]]
call[name[c].append, parameter[call[name[int], parameter[binary_operation[binary_operation[name[l] <ast.RShift object at 0x7da2590d6a40> constant[24]] <ast.BitAnd object at 0x7da2590d6b60> call[name[U32], parameter[constant[255]]]]]]]]
call[name[c].append, parameter[call[name[int], parameter[binary_operation[binary_operation[name[l] <ast.RShift object at 0x7da2590d6a40> constant[16]] <ast.BitAnd object at 0x7da2590d6b60> call[name[U32], parameter[constant[255]]]]]]]]
call[name[c].append, parameter[call[name[int], parameter[binary_operation[binary_operation[name[l] <ast.RShift object at 0x7da2590d6a40> constant[8]] <ast.BitAnd object at 0x7da2590d6b60> call[name[U32], parameter[constant[255]]]]]]]]
call[name[c].append, parameter[call[name[int], parameter[binary_operation[name[l] <ast.BitAnd object at 0x7da2590d6b60> call[name[U32], parameter[constant[255]]]]]]]]
return[name[c]] | keyword[def] identifier[l2n] ( identifier[l] , identifier[c] ):
literal[string]
identifier[c] =[]
identifier[c] . identifier[append] ( identifier[int] (( identifier[l] >> literal[int] )& identifier[U32] ( literal[int] )))
identifier[c] . identifier[append] ( identifier[int] (( identifier[l] >> literal[int] )& identifier[U32] ( literal[int] )))
identifier[c] . identifier[append] ( identifier[int] (( identifier[l] >> literal[int] )& identifier[U32] ( literal[int] )))
identifier[c] . identifier[append] ( identifier[int] (( identifier[l] )& identifier[U32] ( literal[int] )))
keyword[return] identifier[c] | def l2n(l, c):
"""host to network long"""
c = []
c.append(int(l >> 24 & U32(255)))
c.append(int(l >> 16 & U32(255)))
c.append(int(l >> 8 & U32(255)))
c.append(int(l & U32(255)))
return c |
def p_program_def(t):
"""program_def : PROGRAM ID LBRACE version_def version_def_list RBRACE EQUALS constant SEMI"""
print("Ignoring program {0:s} = {1:s}".format(t[2], t[8]))
global name_dict
id = t[2]
value = t[8]
lineno = t.lineno(1)
if id_unique(id, 'program', lineno):
name_dict[id] = const_info(id, value, lineno) | def function[p_program_def, parameter[t]]:
constant[program_def : PROGRAM ID LBRACE version_def version_def_list RBRACE EQUALS constant SEMI]
call[name[print], parameter[call[constant[Ignoring program {0:s} = {1:s}].format, parameter[call[name[t]][constant[2]], call[name[t]][constant[8]]]]]]
<ast.Global object at 0x7da18dc9aef0>
variable[id] assign[=] call[name[t]][constant[2]]
variable[value] assign[=] call[name[t]][constant[8]]
variable[lineno] assign[=] call[name[t].lineno, parameter[constant[1]]]
if call[name[id_unique], parameter[name[id], constant[program], name[lineno]]] begin[:]
call[name[name_dict]][name[id]] assign[=] call[name[const_info], parameter[name[id], name[value], name[lineno]]] | keyword[def] identifier[p_program_def] ( identifier[t] ):
literal[string]
identifier[print] ( literal[string] . identifier[format] ( identifier[t] [ literal[int] ], identifier[t] [ literal[int] ]))
keyword[global] identifier[name_dict]
identifier[id] = identifier[t] [ literal[int] ]
identifier[value] = identifier[t] [ literal[int] ]
identifier[lineno] = identifier[t] . identifier[lineno] ( literal[int] )
keyword[if] identifier[id_unique] ( identifier[id] , literal[string] , identifier[lineno] ):
identifier[name_dict] [ identifier[id] ]= identifier[const_info] ( identifier[id] , identifier[value] , identifier[lineno] ) | def p_program_def(t):
"""program_def : PROGRAM ID LBRACE version_def version_def_list RBRACE EQUALS constant SEMI"""
print('Ignoring program {0:s} = {1:s}'.format(t[2], t[8]))
global name_dict
id = t[2]
value = t[8]
lineno = t.lineno(1)
if id_unique(id, 'program', lineno):
name_dict[id] = const_info(id, value, lineno) # depends on [control=['if'], data=[]] |
def export(self, class_name, method_name, export_data=False,
export_dir='.', export_filename='data.json',
export_append_checksum=False, **kwargs):
"""
Port a trained estimator to the syntax of a chosen programming language.
Parameters
----------
:param class_name : string
The name of the class in the returned result.
:param method_name : string
The name of the method in the returned result.
:param export_data : bool, default: False
Whether the model data should be saved or not.
:param export_dir : string, default: '.' (current directory)
The directory where the model data should be saved.
:param export_filename : string, default: 'data.json'
The filename of the exported model data.
:param export_append_checksum : bool, default: False
Whether to append the checksum to the filename or not.
Returns
-------
:return : string
The transpiled algorithm with the defined placeholders.
"""
# Arguments:
self.class_name = class_name
self.method_name = method_name
# Estimator:
est = self.estimator
# Basic parameters:
self.metric = est.metric
self.n_classes = len(est.classes_)
self.n_templates = len(est._fit_X) # pylint: disable=W0212
self.n_features = len(est._fit_X[0]) # pylint: disable=W0212
self.n_neighbors = est.n_neighbors
self.algorithm = est.algorithm
self.power_param = est.p
if self.algorithm != 'brute':
from sklearn.neighbors.kd_tree import KDTree # pylint: disable-msg=E0611
from sklearn.neighbors.ball_tree import BallTree # pylint: disable-msg=E0611
tree = est._tree # pylint: disable=W0212
if isinstance(tree, (KDTree, BallTree)):
self.tree = tree
if self.target_method == 'predict':
# Exported:
if export_data and os.path.isdir(export_dir):
self.export_data(export_dir, export_filename,
export_append_checksum)
return self.predict('exported')
# Separated:
return self.predict('separated') | def function[export, parameter[self, class_name, method_name, export_data, export_dir, export_filename, export_append_checksum]]:
constant[
Port a trained estimator to the syntax of a chosen programming language.
Parameters
----------
:param class_name : string
The name of the class in the returned result.
:param method_name : string
The name of the method in the returned result.
:param export_data : bool, default: False
Whether the model data should be saved or not.
:param export_dir : string, default: '.' (current directory)
The directory where the model data should be saved.
:param export_filename : string, default: 'data.json'
The filename of the exported model data.
:param export_append_checksum : bool, default: False
Whether to append the checksum to the filename or not.
Returns
-------
:return : string
The transpiled algorithm with the defined placeholders.
]
name[self].class_name assign[=] name[class_name]
name[self].method_name assign[=] name[method_name]
variable[est] assign[=] name[self].estimator
name[self].metric assign[=] name[est].metric
name[self].n_classes assign[=] call[name[len], parameter[name[est].classes_]]
name[self].n_templates assign[=] call[name[len], parameter[name[est]._fit_X]]
name[self].n_features assign[=] call[name[len], parameter[call[name[est]._fit_X][constant[0]]]]
name[self].n_neighbors assign[=] name[est].n_neighbors
name[self].algorithm assign[=] name[est].algorithm
name[self].power_param assign[=] name[est].p
if compare[name[self].algorithm not_equal[!=] constant[brute]] begin[:]
from relative_module[sklearn.neighbors.kd_tree] import module[KDTree]
from relative_module[sklearn.neighbors.ball_tree] import module[BallTree]
variable[tree] assign[=] name[est]._tree
if call[name[isinstance], parameter[name[tree], tuple[[<ast.Name object at 0x7da1b19800d0>, <ast.Name object at 0x7da1b19821a0>]]]] begin[:]
name[self].tree assign[=] name[tree]
if compare[name[self].target_method equal[==] constant[predict]] begin[:]
if <ast.BoolOp object at 0x7da1b19824a0> begin[:]
call[name[self].export_data, parameter[name[export_dir], name[export_filename], name[export_append_checksum]]]
return[call[name[self].predict, parameter[constant[exported]]]]
return[call[name[self].predict, parameter[constant[separated]]]] | keyword[def] identifier[export] ( identifier[self] , identifier[class_name] , identifier[method_name] , identifier[export_data] = keyword[False] ,
identifier[export_dir] = literal[string] , identifier[export_filename] = literal[string] ,
identifier[export_append_checksum] = keyword[False] ,** identifier[kwargs] ):
literal[string]
identifier[self] . identifier[class_name] = identifier[class_name]
identifier[self] . identifier[method_name] = identifier[method_name]
identifier[est] = identifier[self] . identifier[estimator]
identifier[self] . identifier[metric] = identifier[est] . identifier[metric]
identifier[self] . identifier[n_classes] = identifier[len] ( identifier[est] . identifier[classes_] )
identifier[self] . identifier[n_templates] = identifier[len] ( identifier[est] . identifier[_fit_X] )
identifier[self] . identifier[n_features] = identifier[len] ( identifier[est] . identifier[_fit_X] [ literal[int] ])
identifier[self] . identifier[n_neighbors] = identifier[est] . identifier[n_neighbors]
identifier[self] . identifier[algorithm] = identifier[est] . identifier[algorithm]
identifier[self] . identifier[power_param] = identifier[est] . identifier[p]
keyword[if] identifier[self] . identifier[algorithm] != literal[string] :
keyword[from] identifier[sklearn] . identifier[neighbors] . identifier[kd_tree] keyword[import] identifier[KDTree]
keyword[from] identifier[sklearn] . identifier[neighbors] . identifier[ball_tree] keyword[import] identifier[BallTree]
identifier[tree] = identifier[est] . identifier[_tree]
keyword[if] identifier[isinstance] ( identifier[tree] ,( identifier[KDTree] , identifier[BallTree] )):
identifier[self] . identifier[tree] = identifier[tree]
keyword[if] identifier[self] . identifier[target_method] == literal[string] :
keyword[if] identifier[export_data] keyword[and] identifier[os] . identifier[path] . identifier[isdir] ( identifier[export_dir] ):
identifier[self] . identifier[export_data] ( identifier[export_dir] , identifier[export_filename] ,
identifier[export_append_checksum] )
keyword[return] identifier[self] . identifier[predict] ( literal[string] )
keyword[return] identifier[self] . identifier[predict] ( literal[string] ) | def export(self, class_name, method_name, export_data=False, export_dir='.', export_filename='data.json', export_append_checksum=False, **kwargs):
"""
Port a trained estimator to the syntax of a chosen programming language.
Parameters
----------
:param class_name : string
The name of the class in the returned result.
:param method_name : string
The name of the method in the returned result.
:param export_data : bool, default: False
Whether the model data should be saved or not.
:param export_dir : string, default: '.' (current directory)
The directory where the model data should be saved.
:param export_filename : string, default: 'data.json'
The filename of the exported model data.
:param export_append_checksum : bool, default: False
Whether to append the checksum to the filename or not.
Returns
-------
:return : string
The transpiled algorithm with the defined placeholders.
"""
# Arguments:
self.class_name = class_name
self.method_name = method_name
# Estimator:
est = self.estimator
# Basic parameters:
self.metric = est.metric
self.n_classes = len(est.classes_)
self.n_templates = len(est._fit_X) # pylint: disable=W0212
self.n_features = len(est._fit_X[0]) # pylint: disable=W0212
self.n_neighbors = est.n_neighbors
self.algorithm = est.algorithm
self.power_param = est.p
if self.algorithm != 'brute':
from sklearn.neighbors.kd_tree import KDTree # pylint: disable-msg=E0611
from sklearn.neighbors.ball_tree import BallTree # pylint: disable-msg=E0611
tree = est._tree # pylint: disable=W0212
if isinstance(tree, (KDTree, BallTree)):
self.tree = tree # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
if self.target_method == 'predict':
# Exported:
if export_data and os.path.isdir(export_dir):
self.export_data(export_dir, export_filename, export_append_checksum)
return self.predict('exported') # depends on [control=['if'], data=[]]
# Separated:
return self.predict('separated') # depends on [control=['if'], data=[]] |
def squad(self, squad_id=0, persona_id=None):
"""Return a squad.
:params squad_id: Squad id.
"""
method = 'GET'
url = 'squad/%s/user/%s' % (squad_id, persona_id or self.persona_id)
# pinEvents
events = [self.pin.event('page_view', 'Hub - Squads')]
self.pin.send(events)
# TODO: ability to return other info than players only
rc = self.__request__(method, url)
# pinEvents
events = [self.pin.event('page_view', 'Squad Details'), self.pin.event('page_view', 'Squads - Squad Overview')]
self.pin.send(events)
return [itemParse(i) for i in rc.get('players', ())] | def function[squad, parameter[self, squad_id, persona_id]]:
constant[Return a squad.
:params squad_id: Squad id.
]
variable[method] assign[=] constant[GET]
variable[url] assign[=] binary_operation[constant[squad/%s/user/%s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da2049635b0>, <ast.BoolOp object at 0x7da204962950>]]]
variable[events] assign[=] list[[<ast.Call object at 0x7da204961cf0>]]
call[name[self].pin.send, parameter[name[events]]]
variable[rc] assign[=] call[name[self].__request__, parameter[name[method], name[url]]]
variable[events] assign[=] list[[<ast.Call object at 0x7da1b014e710>, <ast.Call object at 0x7da1b014c1f0>]]
call[name[self].pin.send, parameter[name[events]]]
return[<ast.ListComp object at 0x7da1b014e9b0>] | keyword[def] identifier[squad] ( identifier[self] , identifier[squad_id] = literal[int] , identifier[persona_id] = keyword[None] ):
literal[string]
identifier[method] = literal[string]
identifier[url] = literal[string] %( identifier[squad_id] , identifier[persona_id] keyword[or] identifier[self] . identifier[persona_id] )
identifier[events] =[ identifier[self] . identifier[pin] . identifier[event] ( literal[string] , literal[string] )]
identifier[self] . identifier[pin] . identifier[send] ( identifier[events] )
identifier[rc] = identifier[self] . identifier[__request__] ( identifier[method] , identifier[url] )
identifier[events] =[ identifier[self] . identifier[pin] . identifier[event] ( literal[string] , literal[string] ), identifier[self] . identifier[pin] . identifier[event] ( literal[string] , literal[string] )]
identifier[self] . identifier[pin] . identifier[send] ( identifier[events] )
keyword[return] [ identifier[itemParse] ( identifier[i] ) keyword[for] identifier[i] keyword[in] identifier[rc] . identifier[get] ( literal[string] ,())] | def squad(self, squad_id=0, persona_id=None):
"""Return a squad.
:params squad_id: Squad id.
"""
method = 'GET'
url = 'squad/%s/user/%s' % (squad_id, persona_id or self.persona_id)
# pinEvents
events = [self.pin.event('page_view', 'Hub - Squads')]
self.pin.send(events)
# TODO: ability to return other info than players only
rc = self.__request__(method, url)
# pinEvents
events = [self.pin.event('page_view', 'Squad Details'), self.pin.event('page_view', 'Squads - Squad Overview')]
self.pin.send(events)
return [itemParse(i) for i in rc.get('players', ())] |
def spawn(self, *targets: Callable, count: int = 1, **process_kwargs):
r"""
Produce one or many child process(s) bound to this context.
:param \*targets:
Passed on to the :py:class:`Process` constructor, one at a time.
:param count:
The number of processes to spawn for each item in ``targets``.
:param \*\*process_kwargs:
.. include:: /api/context/params/process_kwargs.rst
:return:
A ``ProcessList`` of the :py:class:`Process` instance(s) produced.
"""
if not targets:
def wrapper(target: Callable):
return self.spawn(target, count=count, **process_kwargs)
return wrapper
if len(targets) * count == 1:
return self._process(targets[0], **process_kwargs)
return ProcessList(
self._process(target, **process_kwargs)
for _ in range(count)
for target in targets
) | def function[spawn, parameter[self]]:
constant[
Produce one or many child process(s) bound to this context.
:param \*targets:
Passed on to the :py:class:`Process` constructor, one at a time.
:param count:
The number of processes to spawn for each item in ``targets``.
:param \*\*process_kwargs:
.. include:: /api/context/params/process_kwargs.rst
:return:
A ``ProcessList`` of the :py:class:`Process` instance(s) produced.
]
if <ast.UnaryOp object at 0x7da1b0504ee0> begin[:]
def function[wrapper, parameter[target]]:
return[call[name[self].spawn, parameter[name[target]]]]
return[name[wrapper]]
if compare[binary_operation[call[name[len], parameter[name[targets]]] * name[count]] equal[==] constant[1]] begin[:]
return[call[name[self]._process, parameter[call[name[targets]][constant[0]]]]]
return[call[name[ProcessList], parameter[<ast.GeneratorExp object at 0x7da1b0505e70>]]] | keyword[def] identifier[spawn] ( identifier[self] ,* identifier[targets] : identifier[Callable] , identifier[count] : identifier[int] = literal[int] ,** identifier[process_kwargs] ):
literal[string]
keyword[if] keyword[not] identifier[targets] :
keyword[def] identifier[wrapper] ( identifier[target] : identifier[Callable] ):
keyword[return] identifier[self] . identifier[spawn] ( identifier[target] , identifier[count] = identifier[count] ,** identifier[process_kwargs] )
keyword[return] identifier[wrapper]
keyword[if] identifier[len] ( identifier[targets] )* identifier[count] == literal[int] :
keyword[return] identifier[self] . identifier[_process] ( identifier[targets] [ literal[int] ],** identifier[process_kwargs] )
keyword[return] identifier[ProcessList] (
identifier[self] . identifier[_process] ( identifier[target] ,** identifier[process_kwargs] )
keyword[for] identifier[_] keyword[in] identifier[range] ( identifier[count] )
keyword[for] identifier[target] keyword[in] identifier[targets]
) | def spawn(self, *targets: Callable, count: int=1, **process_kwargs):
"""
Produce one or many child process(s) bound to this context.
:param \\*targets:
Passed on to the :py:class:`Process` constructor, one at a time.
:param count:
The number of processes to spawn for each item in ``targets``.
:param \\*\\*process_kwargs:
.. include:: /api/context/params/process_kwargs.rst
:return:
A ``ProcessList`` of the :py:class:`Process` instance(s) produced.
"""
if not targets:
def wrapper(target: Callable):
return self.spawn(target, count=count, **process_kwargs)
return wrapper # depends on [control=['if'], data=[]]
if len(targets) * count == 1:
return self._process(targets[0], **process_kwargs) # depends on [control=['if'], data=[]]
return ProcessList((self._process(target, **process_kwargs) for _ in range(count) for target in targets)) |
def make_cidr(gw, mask):
"""Create network address in CIDR format.
Return network address for a given gateway address and netmask.
"""
try:
int_mask = (0xFFFFFFFF << (32 - int(mask))) & 0xFFFFFFFF
gw_addr_int = struct.unpack('>L', socket.inet_aton(gw))[0] & int_mask
return (socket.inet_ntoa(struct.pack("!I", gw_addr_int)) +
'/' + str(mask))
except (socket.error, struct.error, ValueError, TypeError):
return | def function[make_cidr, parameter[gw, mask]]:
constant[Create network address in CIDR format.
Return network address for a given gateway address and netmask.
]
<ast.Try object at 0x7da1b1b35e70> | keyword[def] identifier[make_cidr] ( identifier[gw] , identifier[mask] ):
literal[string]
keyword[try] :
identifier[int_mask] =( literal[int] <<( literal[int] - identifier[int] ( identifier[mask] )))& literal[int]
identifier[gw_addr_int] = identifier[struct] . identifier[unpack] ( literal[string] , identifier[socket] . identifier[inet_aton] ( identifier[gw] ))[ literal[int] ]& identifier[int_mask]
keyword[return] ( identifier[socket] . identifier[inet_ntoa] ( identifier[struct] . identifier[pack] ( literal[string] , identifier[gw_addr_int] ))+
literal[string] + identifier[str] ( identifier[mask] ))
keyword[except] ( identifier[socket] . identifier[error] , identifier[struct] . identifier[error] , identifier[ValueError] , identifier[TypeError] ):
keyword[return] | def make_cidr(gw, mask):
"""Create network address in CIDR format.
Return network address for a given gateway address and netmask.
"""
try:
int_mask = 4294967295 << 32 - int(mask) & 4294967295
gw_addr_int = struct.unpack('>L', socket.inet_aton(gw))[0] & int_mask
return socket.inet_ntoa(struct.pack('!I', gw_addr_int)) + '/' + str(mask) # depends on [control=['try'], data=[]]
except (socket.error, struct.error, ValueError, TypeError):
return # depends on [control=['except'], data=[]] |
def has_device_info(self, key):
"""Return true iff cache has information about the device."""
if _debug: DeviceInfoCache._debug("has_device_info %r", key)
return key in self.cache | def function[has_device_info, parameter[self, key]]:
constant[Return true iff cache has information about the device.]
if name[_debug] begin[:]
call[name[DeviceInfoCache]._debug, parameter[constant[has_device_info %r], name[key]]]
return[compare[name[key] in name[self].cache]] | keyword[def] identifier[has_device_info] ( identifier[self] , identifier[key] ):
literal[string]
keyword[if] identifier[_debug] : identifier[DeviceInfoCache] . identifier[_debug] ( literal[string] , identifier[key] )
keyword[return] identifier[key] keyword[in] identifier[self] . identifier[cache] | def has_device_info(self, key):
"""Return true iff cache has information about the device."""
if _debug:
DeviceInfoCache._debug('has_device_info %r', key) # depends on [control=['if'], data=[]]
return key in self.cache |
def is_ready(self, key):
"""Returns true if a result is available for ``key``."""
if self.pending_callbacks is None or key not in self.pending_callbacks:
raise UnknownKeyError("key %r is not pending" % (key,))
return key in self.results | def function[is_ready, parameter[self, key]]:
constant[Returns true if a result is available for ``key``.]
if <ast.BoolOp object at 0x7da2041dafb0> begin[:]
<ast.Raise object at 0x7da2041d83d0>
return[compare[name[key] in name[self].results]] | keyword[def] identifier[is_ready] ( identifier[self] , identifier[key] ):
literal[string]
keyword[if] identifier[self] . identifier[pending_callbacks] keyword[is] keyword[None] keyword[or] identifier[key] keyword[not] keyword[in] identifier[self] . identifier[pending_callbacks] :
keyword[raise] identifier[UnknownKeyError] ( literal[string] %( identifier[key] ,))
keyword[return] identifier[key] keyword[in] identifier[self] . identifier[results] | def is_ready(self, key):
"""Returns true if a result is available for ``key``."""
if self.pending_callbacks is None or key not in self.pending_callbacks:
raise UnknownKeyError('key %r is not pending' % (key,)) # depends on [control=['if'], data=[]]
return key in self.results |
def _get_attribute_info(self):
""" Preprocess the training dataset to identify which features/attributes are discrete vs. continuous valued. Ignores missing values in this determination."""
attr = dict()
d = 0
limit = self.discrete_threshold
w = self._X.transpose()
for idx in range(len(w)):
h = self._headers[idx]
z = w[idx]
if self._missing_data_count > 0:
z = z[np.logical_not(np.isnan(z))] # Exclude any missing values from consideration
zlen = len(np.unique(z))
if zlen <= limit:
attr[h] = ('discrete', 0, 0, 0, 0)
d += 1
else:
mx = np.max(z)
mn = np.min(z)
sd = np.std(z)
attr[h] = ('continuous', mx, mn, mx - mn, sd)
# For each feature/attribute we store (type, max value, min value, max min difference, average, standard deviation) - the latter three values are set to zero if feature is discrete.
return attr | def function[_get_attribute_info, parameter[self]]:
constant[ Preprocess the training dataset to identify which features/attributes are discrete vs. continuous valued. Ignores missing values in this determination.]
variable[attr] assign[=] call[name[dict], parameter[]]
variable[d] assign[=] constant[0]
variable[limit] assign[=] name[self].discrete_threshold
variable[w] assign[=] call[name[self]._X.transpose, parameter[]]
for taget[name[idx]] in starred[call[name[range], parameter[call[name[len], parameter[name[w]]]]]] begin[:]
variable[h] assign[=] call[name[self]._headers][name[idx]]
variable[z] assign[=] call[name[w]][name[idx]]
if compare[name[self]._missing_data_count greater[>] constant[0]] begin[:]
variable[z] assign[=] call[name[z]][call[name[np].logical_not, parameter[call[name[np].isnan, parameter[name[z]]]]]]
variable[zlen] assign[=] call[name[len], parameter[call[name[np].unique, parameter[name[z]]]]]
if compare[name[zlen] less_or_equal[<=] name[limit]] begin[:]
call[name[attr]][name[h]] assign[=] tuple[[<ast.Constant object at 0x7da1b0c500a0>, <ast.Constant object at 0x7da1b0c51150>, <ast.Constant object at 0x7da1b0c50760>, <ast.Constant object at 0x7da1b0c528c0>, <ast.Constant object at 0x7da1b0c52b00>]]
<ast.AugAssign object at 0x7da1b0c52d70>
return[name[attr]] | keyword[def] identifier[_get_attribute_info] ( identifier[self] ):
literal[string]
identifier[attr] = identifier[dict] ()
identifier[d] = literal[int]
identifier[limit] = identifier[self] . identifier[discrete_threshold]
identifier[w] = identifier[self] . identifier[_X] . identifier[transpose] ()
keyword[for] identifier[idx] keyword[in] identifier[range] ( identifier[len] ( identifier[w] )):
identifier[h] = identifier[self] . identifier[_headers] [ identifier[idx] ]
identifier[z] = identifier[w] [ identifier[idx] ]
keyword[if] identifier[self] . identifier[_missing_data_count] > literal[int] :
identifier[z] = identifier[z] [ identifier[np] . identifier[logical_not] ( identifier[np] . identifier[isnan] ( identifier[z] ))]
identifier[zlen] = identifier[len] ( identifier[np] . identifier[unique] ( identifier[z] ))
keyword[if] identifier[zlen] <= identifier[limit] :
identifier[attr] [ identifier[h] ]=( literal[string] , literal[int] , literal[int] , literal[int] , literal[int] )
identifier[d] += literal[int]
keyword[else] :
identifier[mx] = identifier[np] . identifier[max] ( identifier[z] )
identifier[mn] = identifier[np] . identifier[min] ( identifier[z] )
identifier[sd] = identifier[np] . identifier[std] ( identifier[z] )
identifier[attr] [ identifier[h] ]=( literal[string] , identifier[mx] , identifier[mn] , identifier[mx] - identifier[mn] , identifier[sd] )
keyword[return] identifier[attr] | def _get_attribute_info(self):
""" Preprocess the training dataset to identify which features/attributes are discrete vs. continuous valued. Ignores missing values in this determination."""
attr = dict()
d = 0
limit = self.discrete_threshold
w = self._X.transpose()
for idx in range(len(w)):
h = self._headers[idx]
z = w[idx]
if self._missing_data_count > 0:
z = z[np.logical_not(np.isnan(z))] # Exclude any missing values from consideration # depends on [control=['if'], data=[]]
zlen = len(np.unique(z))
if zlen <= limit:
attr[h] = ('discrete', 0, 0, 0, 0)
d += 1 # depends on [control=['if'], data=[]]
else:
mx = np.max(z)
mn = np.min(z)
sd = np.std(z)
attr[h] = ('continuous', mx, mn, mx - mn, sd) # depends on [control=['for'], data=['idx']]
# For each feature/attribute we store (type, max value, min value, max min difference, average, standard deviation) - the latter three values are set to zero if feature is discrete.
return attr |
def GetDateRangeWithOrigins(self):
"""Returns a tuple of (earliest, latest, earliest_origin, latest_origin)
dates on which the service periods in the schedule define service, in
YYYYMMDD form.
The origins specify where the earliest or latest dates come from. In
particular, whether the date is a regular ServicePeriod start_date or
end_date in calendar.txt, a service exception of type add in
calendar_dates.txt, or feed start/end date defined in feed_info.txt.
"""
period_list = self.GetServicePeriodList()
ranges = [period.GetDateRange() for period in period_list]
starts = filter(lambda x: x, [item[0] for item in ranges])
ends = filter(lambda x: x, [item[1] for item in ranges])
if not starts or not ends:
return (None, None, None, None)
minvalue, minindex = min(itertools.izip(starts, itertools.count()))
maxvalue, maxindex = max(itertools.izip(ends, itertools.count()))
minreason = (period_list[minindex].HasDateExceptionOn(minvalue) and
"earliest service exception date in calendar_dates.txt" or
"earliest service date in calendar.txt")
maxreason = (period_list[maxindex].HasDateExceptionOn(maxvalue) and
"last service exception date in calendar_dates.txt" or
"last service date in calendar.txt")
# Override with feed_info.txt feed_start_date and feed_end_date values, if
# defined
if self.feed_info and self.feed_info.feed_start_date:
minvalue = self.feed_info.feed_start_date
minreason = "feed_start_date in feed_info.txt"
if self.feed_info and self.feed_info.feed_end_date:
maxvalue = self.feed_info.feed_end_date
maxreason = "feed_end_date in feed_info.txt"
return (minvalue, maxvalue, minreason, maxreason) | def function[GetDateRangeWithOrigins, parameter[self]]:
constant[Returns a tuple of (earliest, latest, earliest_origin, latest_origin)
dates on which the service periods in the schedule define service, in
YYYYMMDD form.
The origins specify where the earliest or latest dates come from. In
particular, whether the date is a regular ServicePeriod start_date or
end_date in calendar.txt, a service exception of type add in
calendar_dates.txt, or feed start/end date defined in feed_info.txt.
]
variable[period_list] assign[=] call[name[self].GetServicePeriodList, parameter[]]
variable[ranges] assign[=] <ast.ListComp object at 0x7da1b17e4c10>
variable[starts] assign[=] call[name[filter], parameter[<ast.Lambda object at 0x7da1b17e4520>, <ast.ListComp object at 0x7da1b17e5d80>]]
variable[ends] assign[=] call[name[filter], parameter[<ast.Lambda object at 0x7da1b17e46d0>, <ast.ListComp object at 0x7da1b17e65c0>]]
if <ast.BoolOp object at 0x7da1b17e6bf0> begin[:]
return[tuple[[<ast.Constant object at 0x7da1b17e5900>, <ast.Constant object at 0x7da1b17e7eb0>, <ast.Constant object at 0x7da1b17e6ec0>, <ast.Constant object at 0x7da1b17e6ef0>]]]
<ast.Tuple object at 0x7da1b17e4fa0> assign[=] call[name[min], parameter[call[name[itertools].izip, parameter[name[starts], call[name[itertools].count, parameter[]]]]]]
<ast.Tuple object at 0x7da1b1783010> assign[=] call[name[max], parameter[call[name[itertools].izip, parameter[name[ends], call[name[itertools].count, parameter[]]]]]]
variable[minreason] assign[=] <ast.BoolOp object at 0x7da1b17d9c00>
variable[maxreason] assign[=] <ast.BoolOp object at 0x7da1b17db730>
if <ast.BoolOp object at 0x7da1b17da3e0> begin[:]
variable[minvalue] assign[=] name[self].feed_info.feed_start_date
variable[minreason] assign[=] constant[feed_start_date in feed_info.txt]
if <ast.BoolOp object at 0x7da1b17d9d50> begin[:]
variable[maxvalue] assign[=] name[self].feed_info.feed_end_date
variable[maxreason] assign[=] constant[feed_end_date in feed_info.txt]
return[tuple[[<ast.Name object at 0x7da1b17d92d0>, <ast.Name object at 0x7da1b17dac50>, <ast.Name object at 0x7da1b17db430>, <ast.Name object at 0x7da1b17da770>]]] | keyword[def] identifier[GetDateRangeWithOrigins] ( identifier[self] ):
literal[string]
identifier[period_list] = identifier[self] . identifier[GetServicePeriodList] ()
identifier[ranges] =[ identifier[period] . identifier[GetDateRange] () keyword[for] identifier[period] keyword[in] identifier[period_list] ]
identifier[starts] = identifier[filter] ( keyword[lambda] identifier[x] : identifier[x] ,[ identifier[item] [ literal[int] ] keyword[for] identifier[item] keyword[in] identifier[ranges] ])
identifier[ends] = identifier[filter] ( keyword[lambda] identifier[x] : identifier[x] ,[ identifier[item] [ literal[int] ] keyword[for] identifier[item] keyword[in] identifier[ranges] ])
keyword[if] keyword[not] identifier[starts] keyword[or] keyword[not] identifier[ends] :
keyword[return] ( keyword[None] , keyword[None] , keyword[None] , keyword[None] )
identifier[minvalue] , identifier[minindex] = identifier[min] ( identifier[itertools] . identifier[izip] ( identifier[starts] , identifier[itertools] . identifier[count] ()))
identifier[maxvalue] , identifier[maxindex] = identifier[max] ( identifier[itertools] . identifier[izip] ( identifier[ends] , identifier[itertools] . identifier[count] ()))
identifier[minreason] =( identifier[period_list] [ identifier[minindex] ]. identifier[HasDateExceptionOn] ( identifier[minvalue] ) keyword[and]
literal[string] keyword[or]
literal[string] )
identifier[maxreason] =( identifier[period_list] [ identifier[maxindex] ]. identifier[HasDateExceptionOn] ( identifier[maxvalue] ) keyword[and]
literal[string] keyword[or]
literal[string] )
keyword[if] identifier[self] . identifier[feed_info] keyword[and] identifier[self] . identifier[feed_info] . identifier[feed_start_date] :
identifier[minvalue] = identifier[self] . identifier[feed_info] . identifier[feed_start_date]
identifier[minreason] = literal[string]
keyword[if] identifier[self] . identifier[feed_info] keyword[and] identifier[self] . identifier[feed_info] . identifier[feed_end_date] :
identifier[maxvalue] = identifier[self] . identifier[feed_info] . identifier[feed_end_date]
identifier[maxreason] = literal[string]
keyword[return] ( identifier[minvalue] , identifier[maxvalue] , identifier[minreason] , identifier[maxreason] ) | def GetDateRangeWithOrigins(self):
"""Returns a tuple of (earliest, latest, earliest_origin, latest_origin)
dates on which the service periods in the schedule define service, in
YYYYMMDD form.
The origins specify where the earliest or latest dates come from. In
particular, whether the date is a regular ServicePeriod start_date or
end_date in calendar.txt, a service exception of type add in
calendar_dates.txt, or feed start/end date defined in feed_info.txt.
"""
period_list = self.GetServicePeriodList()
ranges = [period.GetDateRange() for period in period_list]
starts = filter(lambda x: x, [item[0] for item in ranges])
ends = filter(lambda x: x, [item[1] for item in ranges])
if not starts or not ends:
return (None, None, None, None) # depends on [control=['if'], data=[]]
(minvalue, minindex) = min(itertools.izip(starts, itertools.count()))
(maxvalue, maxindex) = max(itertools.izip(ends, itertools.count()))
minreason = period_list[minindex].HasDateExceptionOn(minvalue) and 'earliest service exception date in calendar_dates.txt' or 'earliest service date in calendar.txt'
maxreason = period_list[maxindex].HasDateExceptionOn(maxvalue) and 'last service exception date in calendar_dates.txt' or 'last service date in calendar.txt'
# Override with feed_info.txt feed_start_date and feed_end_date values, if
# defined
if self.feed_info and self.feed_info.feed_start_date:
minvalue = self.feed_info.feed_start_date
minreason = 'feed_start_date in feed_info.txt' # depends on [control=['if'], data=[]]
if self.feed_info and self.feed_info.feed_end_date:
maxvalue = self.feed_info.feed_end_date
maxreason = 'feed_end_date in feed_info.txt' # depends on [control=['if'], data=[]]
return (minvalue, maxvalue, minreason, maxreason) |
def read_value(self):
"""Reads the raw red, green, blue and clear channel values. Will return
a 4-tuple with the red, green, blue, clear color values (unsigned 16-bit
numbers).
"""
while not self._valid():
time.sleep((self._integration_time + 0.9)/1000.0)
# Read each color register.
r = self._readU16LE(TCS34725_RDATAL)
g = self._readU16LE(TCS34725_GDATAL)
b = self._readU16LE(TCS34725_BDATAL)
c = self._readU16LE(TCS34725_CDATAL)
# Delay for the integration time to allow for next reading immediately.
red = int(pow((int((r/c) * 256) / 255), 2.5) * 255)
green = int(pow((int((g/c) * 256) / 255), 2.5) * 255)
blue = int(pow((int((b/c) * 256) / 255), 2.5) * 255)
return [r, g, b] | def function[read_value, parameter[self]]:
constant[Reads the raw red, green, blue and clear channel values. Will return
a 4-tuple with the red, green, blue, clear color values (unsigned 16-bit
numbers).
]
while <ast.UnaryOp object at 0x7da18c4ce770> begin[:]
call[name[time].sleep, parameter[binary_operation[binary_operation[name[self]._integration_time + constant[0.9]] / constant[1000.0]]]]
variable[r] assign[=] call[name[self]._readU16LE, parameter[name[TCS34725_RDATAL]]]
variable[g] assign[=] call[name[self]._readU16LE, parameter[name[TCS34725_GDATAL]]]
variable[b] assign[=] call[name[self]._readU16LE, parameter[name[TCS34725_BDATAL]]]
variable[c] assign[=] call[name[self]._readU16LE, parameter[name[TCS34725_CDATAL]]]
variable[red] assign[=] call[name[int], parameter[binary_operation[call[name[pow], parameter[binary_operation[call[name[int], parameter[binary_operation[binary_operation[name[r] / name[c]] * constant[256]]]] / constant[255]], constant[2.5]]] * constant[255]]]]
variable[green] assign[=] call[name[int], parameter[binary_operation[call[name[pow], parameter[binary_operation[call[name[int], parameter[binary_operation[binary_operation[name[g] / name[c]] * constant[256]]]] / constant[255]], constant[2.5]]] * constant[255]]]]
variable[blue] assign[=] call[name[int], parameter[binary_operation[call[name[pow], parameter[binary_operation[call[name[int], parameter[binary_operation[binary_operation[name[b] / name[c]] * constant[256]]]] / constant[255]], constant[2.5]]] * constant[255]]]]
return[list[[<ast.Name object at 0x7da20c992ec0>, <ast.Name object at 0x7da20c991ff0>, <ast.Name object at 0x7da20c990be0>]]] | keyword[def] identifier[read_value] ( identifier[self] ):
literal[string]
keyword[while] keyword[not] identifier[self] . identifier[_valid] ():
identifier[time] . identifier[sleep] (( identifier[self] . identifier[_integration_time] + literal[int] )/ literal[int] )
identifier[r] = identifier[self] . identifier[_readU16LE] ( identifier[TCS34725_RDATAL] )
identifier[g] = identifier[self] . identifier[_readU16LE] ( identifier[TCS34725_GDATAL] )
identifier[b] = identifier[self] . identifier[_readU16LE] ( identifier[TCS34725_BDATAL] )
identifier[c] = identifier[self] . identifier[_readU16LE] ( identifier[TCS34725_CDATAL] )
identifier[red] = identifier[int] ( identifier[pow] (( identifier[int] (( identifier[r] / identifier[c] )* literal[int] )/ literal[int] ), literal[int] )* literal[int] )
identifier[green] = identifier[int] ( identifier[pow] (( identifier[int] (( identifier[g] / identifier[c] )* literal[int] )/ literal[int] ), literal[int] )* literal[int] )
identifier[blue] = identifier[int] ( identifier[pow] (( identifier[int] (( identifier[b] / identifier[c] )* literal[int] )/ literal[int] ), literal[int] )* literal[int] )
keyword[return] [ identifier[r] , identifier[g] , identifier[b] ] | def read_value(self):
"""Reads the raw red, green, blue and clear channel values. Will return
a 4-tuple with the red, green, blue, clear color values (unsigned 16-bit
numbers).
"""
while not self._valid():
time.sleep((self._integration_time + 0.9) / 1000.0) # depends on [control=['while'], data=[]]
# Read each color register.
r = self._readU16LE(TCS34725_RDATAL)
g = self._readU16LE(TCS34725_GDATAL)
b = self._readU16LE(TCS34725_BDATAL)
c = self._readU16LE(TCS34725_CDATAL)
# Delay for the integration time to allow for next reading immediately.
red = int(pow(int(r / c * 256) / 255, 2.5) * 255)
green = int(pow(int(g / c * 256) / 255, 2.5) * 255)
blue = int(pow(int(b / c * 256) / 255, 2.5) * 255)
return [r, g, b] |
def validate_public_key(value):
"""
Check that the given value is a valid RSA Public key in either PEM or OpenSSH format. If it is invalid,
raises ``django.core.exceptions.ValidationError``.
"""
is_valid = False
exc = None
for load in (load_pem_public_key, load_ssh_public_key):
if not is_valid:
try:
load(value.encode('utf-8'), default_backend())
is_valid = True
except Exception as e:
exc = e
if not is_valid:
raise ValidationError('Public key is invalid: %s' % exc) | def function[validate_public_key, parameter[value]]:
constant[
Check that the given value is a valid RSA Public key in either PEM or OpenSSH format. If it is invalid,
raises ``django.core.exceptions.ValidationError``.
]
variable[is_valid] assign[=] constant[False]
variable[exc] assign[=] constant[None]
for taget[name[load]] in starred[tuple[[<ast.Name object at 0x7da1b1b7e290>, <ast.Name object at 0x7da1b1b7e1a0>]]] begin[:]
if <ast.UnaryOp object at 0x7da1b1b7cc70> begin[:]
<ast.Try object at 0x7da1b1b7fe80>
if <ast.UnaryOp object at 0x7da18c4ce890> begin[:]
<ast.Raise object at 0x7da18dc99c60> | keyword[def] identifier[validate_public_key] ( identifier[value] ):
literal[string]
identifier[is_valid] = keyword[False]
identifier[exc] = keyword[None]
keyword[for] identifier[load] keyword[in] ( identifier[load_pem_public_key] , identifier[load_ssh_public_key] ):
keyword[if] keyword[not] identifier[is_valid] :
keyword[try] :
identifier[load] ( identifier[value] . identifier[encode] ( literal[string] ), identifier[default_backend] ())
identifier[is_valid] = keyword[True]
keyword[except] identifier[Exception] keyword[as] identifier[e] :
identifier[exc] = identifier[e]
keyword[if] keyword[not] identifier[is_valid] :
keyword[raise] identifier[ValidationError] ( literal[string] % identifier[exc] ) | def validate_public_key(value):
"""
Check that the given value is a valid RSA Public key in either PEM or OpenSSH format. If it is invalid,
raises ``django.core.exceptions.ValidationError``.
"""
is_valid = False
exc = None
for load in (load_pem_public_key, load_ssh_public_key):
if not is_valid:
try:
load(value.encode('utf-8'), default_backend())
is_valid = True # depends on [control=['try'], data=[]]
except Exception as e:
exc = e # depends on [control=['except'], data=['e']] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['load']]
if not is_valid:
raise ValidationError('Public key is invalid: %s' % exc) # depends on [control=['if'], data=[]] |
def cli_frontend(ctx, verbose):
"""
Boussole is a commandline interface to build Sass projects using libsass.
Every project will need a settings file containing all needed settings to
build it.
"""
printout = True
if verbose == 0:
verbose = 1
printout = False
# Verbosity is the inverse of logging levels
levels = [item for item in BOUSSOLE_LOGGER_CONF]
levels.reverse()
# Init the logger config
root_logger = init_logger(levels[verbose], printout=printout)
# Init the default context that will be passed to commands
ctx.obj = {
'verbosity': verbose,
'logger': root_logger,
} | def function[cli_frontend, parameter[ctx, verbose]]:
constant[
Boussole is a commandline interface to build Sass projects using libsass.
Every project will need a settings file containing all needed settings to
build it.
]
variable[printout] assign[=] constant[True]
if compare[name[verbose] equal[==] constant[0]] begin[:]
variable[verbose] assign[=] constant[1]
variable[printout] assign[=] constant[False]
variable[levels] assign[=] <ast.ListComp object at 0x7da1b0a20490>
call[name[levels].reverse, parameter[]]
variable[root_logger] assign[=] call[name[init_logger], parameter[call[name[levels]][name[verbose]]]]
name[ctx].obj assign[=] dictionary[[<ast.Constant object at 0x7da1b092ded0>, <ast.Constant object at 0x7da1b092d540>], [<ast.Name object at 0x7da1b092f280>, <ast.Name object at 0x7da1b092c250>]] | keyword[def] identifier[cli_frontend] ( identifier[ctx] , identifier[verbose] ):
literal[string]
identifier[printout] = keyword[True]
keyword[if] identifier[verbose] == literal[int] :
identifier[verbose] = literal[int]
identifier[printout] = keyword[False]
identifier[levels] =[ identifier[item] keyword[for] identifier[item] keyword[in] identifier[BOUSSOLE_LOGGER_CONF] ]
identifier[levels] . identifier[reverse] ()
identifier[root_logger] = identifier[init_logger] ( identifier[levels] [ identifier[verbose] ], identifier[printout] = identifier[printout] )
identifier[ctx] . identifier[obj] ={
literal[string] : identifier[verbose] ,
literal[string] : identifier[root_logger] ,
} | def cli_frontend(ctx, verbose):
"""
Boussole is a commandline interface to build Sass projects using libsass.
Every project will need a settings file containing all needed settings to
build it.
"""
printout = True
if verbose == 0:
verbose = 1
printout = False # depends on [control=['if'], data=['verbose']]
# Verbosity is the inverse of logging levels
levels = [item for item in BOUSSOLE_LOGGER_CONF]
levels.reverse()
# Init the logger config
root_logger = init_logger(levels[verbose], printout=printout)
# Init the default context that will be passed to commands
ctx.obj = {'verbosity': verbose, 'logger': root_logger} |
def feed(self, data):
"""
added this check as sometimes we are getting the data in integer format instead of string
"""
try:
self.rawdata = self.rawdata + data
except TypeError:
data = unicode(data)
self.rawdata = self.rawdata + data
self.goahead(0) | def function[feed, parameter[self, data]]:
constant[
added this check as sometimes we are getting the data in integer format instead of string
]
<ast.Try object at 0x7da1b1197490>
call[name[self].goahead, parameter[constant[0]]] | keyword[def] identifier[feed] ( identifier[self] , identifier[data] ):
literal[string]
keyword[try] :
identifier[self] . identifier[rawdata] = identifier[self] . identifier[rawdata] + identifier[data]
keyword[except] identifier[TypeError] :
identifier[data] = identifier[unicode] ( identifier[data] )
identifier[self] . identifier[rawdata] = identifier[self] . identifier[rawdata] + identifier[data]
identifier[self] . identifier[goahead] ( literal[int] ) | def feed(self, data):
"""
added this check as sometimes we are getting the data in integer format instead of string
"""
try:
self.rawdata = self.rawdata + data # depends on [control=['try'], data=[]]
except TypeError:
data = unicode(data)
self.rawdata = self.rawdata + data # depends on [control=['except'], data=[]]
self.goahead(0) |
def finalize(self):
"""Output the number of instances that contained dead code."""
if self.total_instances > 1:
print('{} of {} instances contained dead code.'
.format(self.dead_code_instances, self.total_instances)) | def function[finalize, parameter[self]]:
constant[Output the number of instances that contained dead code.]
if compare[name[self].total_instances greater[>] constant[1]] begin[:]
call[name[print], parameter[call[constant[{} of {} instances contained dead code.].format, parameter[name[self].dead_code_instances, name[self].total_instances]]]] | keyword[def] identifier[finalize] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[total_instances] > literal[int] :
identifier[print] ( literal[string]
. identifier[format] ( identifier[self] . identifier[dead_code_instances] , identifier[self] . identifier[total_instances] )) | def finalize(self):
"""Output the number of instances that contained dead code."""
if self.total_instances > 1:
print('{} of {} instances contained dead code.'.format(self.dead_code_instances, self.total_instances)) # depends on [control=['if'], data=[]] |
def _ecc_public_numbers_from_compressed_point(curve, compressed_point):
"""Decodes a compressed elliptic curve point
as described in SEC-1 v2 section 2.3.3
and returns a PublicNumbers instance
based on the decoded point.
http://www.secg.org/sec1-v2.pdf
:param curve: Elliptic curve type to generate
:type curve: cryptography.hazmat.primitives.asymmetric.ec.EllipticCurve
:param bytes compressed_point: Encoded compressed elliptic curve point
:returns: EllipticCurvePublicNumbers instance generated from compressed point and curve
:rtype: cryptography.hazmat.primitives.asymmetric.ec.EllipticCurvePublicNumbers
"""
x, y = _ecc_decode_compressed_point(curve, compressed_point)
return ec.EllipticCurvePublicNumbers(x=x, y=y, curve=curve) | def function[_ecc_public_numbers_from_compressed_point, parameter[curve, compressed_point]]:
constant[Decodes a compressed elliptic curve point
as described in SEC-1 v2 section 2.3.3
and returns a PublicNumbers instance
based on the decoded point.
http://www.secg.org/sec1-v2.pdf
:param curve: Elliptic curve type to generate
:type curve: cryptography.hazmat.primitives.asymmetric.ec.EllipticCurve
:param bytes compressed_point: Encoded compressed elliptic curve point
:returns: EllipticCurvePublicNumbers instance generated from compressed point and curve
:rtype: cryptography.hazmat.primitives.asymmetric.ec.EllipticCurvePublicNumbers
]
<ast.Tuple object at 0x7da18f00ea40> assign[=] call[name[_ecc_decode_compressed_point], parameter[name[curve], name[compressed_point]]]
return[call[name[ec].EllipticCurvePublicNumbers, parameter[]]] | keyword[def] identifier[_ecc_public_numbers_from_compressed_point] ( identifier[curve] , identifier[compressed_point] ):
literal[string]
identifier[x] , identifier[y] = identifier[_ecc_decode_compressed_point] ( identifier[curve] , identifier[compressed_point] )
keyword[return] identifier[ec] . identifier[EllipticCurvePublicNumbers] ( identifier[x] = identifier[x] , identifier[y] = identifier[y] , identifier[curve] = identifier[curve] ) | def _ecc_public_numbers_from_compressed_point(curve, compressed_point):
"""Decodes a compressed elliptic curve point
as described in SEC-1 v2 section 2.3.3
and returns a PublicNumbers instance
based on the decoded point.
http://www.secg.org/sec1-v2.pdf
:param curve: Elliptic curve type to generate
:type curve: cryptography.hazmat.primitives.asymmetric.ec.EllipticCurve
:param bytes compressed_point: Encoded compressed elliptic curve point
:returns: EllipticCurvePublicNumbers instance generated from compressed point and curve
:rtype: cryptography.hazmat.primitives.asymmetric.ec.EllipticCurvePublicNumbers
"""
(x, y) = _ecc_decode_compressed_point(curve, compressed_point)
return ec.EllipticCurvePublicNumbers(x=x, y=y, curve=curve) |
def update_matches(self, other):
"""
Update this instance's Matches to point to the given other's Matches.
Find Matches with the same TextLogError as our Matches, updating their
score if less than ours and mark our matches for deletion.
If there are no other matches, update ours to point to the other
ClassifiedFailure.
"""
for match in self.error_matches.all():
other_matches = TextLogErrorMatch.objects.filter(
classified_failure=other,
text_log_error=match.text_log_error,
)
if not other_matches:
match.classified_failure = other
match.save(update_fields=['classified_failure'])
continue
# if any of our matches have higher scores than other's matches,
# overwrite with our score.
other_matches.filter(score__lt=match.score).update(score=match.score)
yield match.id | def function[update_matches, parameter[self, other]]:
constant[
Update this instance's Matches to point to the given other's Matches.
Find Matches with the same TextLogError as our Matches, updating their
score if less than ours and mark our matches for deletion.
If there are no other matches, update ours to point to the other
ClassifiedFailure.
]
for taget[name[match]] in starred[call[name[self].error_matches.all, parameter[]]] begin[:]
variable[other_matches] assign[=] call[name[TextLogErrorMatch].objects.filter, parameter[]]
if <ast.UnaryOp object at 0x7da1b0868b20> begin[:]
name[match].classified_failure assign[=] name[other]
call[name[match].save, parameter[]]
continue
call[call[name[other_matches].filter, parameter[]].update, parameter[]]
<ast.Yield object at 0x7da1b086bf10> | keyword[def] identifier[update_matches] ( identifier[self] , identifier[other] ):
literal[string]
keyword[for] identifier[match] keyword[in] identifier[self] . identifier[error_matches] . identifier[all] ():
identifier[other_matches] = identifier[TextLogErrorMatch] . identifier[objects] . identifier[filter] (
identifier[classified_failure] = identifier[other] ,
identifier[text_log_error] = identifier[match] . identifier[text_log_error] ,
)
keyword[if] keyword[not] identifier[other_matches] :
identifier[match] . identifier[classified_failure] = identifier[other]
identifier[match] . identifier[save] ( identifier[update_fields] =[ literal[string] ])
keyword[continue]
identifier[other_matches] . identifier[filter] ( identifier[score__lt] = identifier[match] . identifier[score] ). identifier[update] ( identifier[score] = identifier[match] . identifier[score] )
keyword[yield] identifier[match] . identifier[id] | def update_matches(self, other):
"""
Update this instance's Matches to point to the given other's Matches.
Find Matches with the same TextLogError as our Matches, updating their
score if less than ours and mark our matches for deletion.
If there are no other matches, update ours to point to the other
ClassifiedFailure.
"""
for match in self.error_matches.all():
other_matches = TextLogErrorMatch.objects.filter(classified_failure=other, text_log_error=match.text_log_error)
if not other_matches:
match.classified_failure = other
match.save(update_fields=['classified_failure'])
continue # depends on [control=['if'], data=[]]
# if any of our matches have higher scores than other's matches,
# overwrite with our score.
other_matches.filter(score__lt=match.score).update(score=match.score)
yield match.id # depends on [control=['for'], data=['match']] |
def _mass(self,R,z=0.,t=0.):
"""
NAME:
_mass
PURPOSE:
evaluate the mass within R for this potential
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
t - time
OUTPUT:
the mass enclosed
HISTORY:
2013-XX-XX - Written - Bovy (IAS)
"""
if z is None: r= R
else: r= nu.sqrt(R**2.+z**2.)
return 2.*nu.pi*self.rc**(3.-self.alpha)*special.gammainc(1.5-self.alpha/2.,(r/self.rc)**2.)*special.gamma(1.5-self.alpha/2.) | def function[_mass, parameter[self, R, z, t]]:
constant[
NAME:
_mass
PURPOSE:
evaluate the mass within R for this potential
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
t - time
OUTPUT:
the mass enclosed
HISTORY:
2013-XX-XX - Written - Bovy (IAS)
]
if compare[name[z] is constant[None]] begin[:]
variable[r] assign[=] name[R]
return[binary_operation[binary_operation[binary_operation[binary_operation[constant[2.0] * name[nu].pi] * binary_operation[name[self].rc ** binary_operation[constant[3.0] - name[self].alpha]]] * call[name[special].gammainc, parameter[binary_operation[constant[1.5] - binary_operation[name[self].alpha / constant[2.0]]], binary_operation[binary_operation[name[r] / name[self].rc] ** constant[2.0]]]]] * call[name[special].gamma, parameter[binary_operation[constant[1.5] - binary_operation[name[self].alpha / constant[2.0]]]]]]] | keyword[def] identifier[_mass] ( identifier[self] , identifier[R] , identifier[z] = literal[int] , identifier[t] = literal[int] ):
literal[string]
keyword[if] identifier[z] keyword[is] keyword[None] : identifier[r] = identifier[R]
keyword[else] : identifier[r] = identifier[nu] . identifier[sqrt] ( identifier[R] ** literal[int] + identifier[z] ** literal[int] )
keyword[return] literal[int] * identifier[nu] . identifier[pi] * identifier[self] . identifier[rc] **( literal[int] - identifier[self] . identifier[alpha] )* identifier[special] . identifier[gammainc] ( literal[int] - identifier[self] . identifier[alpha] / literal[int] ,( identifier[r] / identifier[self] . identifier[rc] )** literal[int] )* identifier[special] . identifier[gamma] ( literal[int] - identifier[self] . identifier[alpha] / literal[int] ) | def _mass(self, R, z=0.0, t=0.0):
"""
NAME:
_mass
PURPOSE:
evaluate the mass within R for this potential
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
t - time
OUTPUT:
the mass enclosed
HISTORY:
2013-XX-XX - Written - Bovy (IAS)
"""
if z is None:
r = R # depends on [control=['if'], data=[]]
else:
r = nu.sqrt(R ** 2.0 + z ** 2.0)
return 2.0 * nu.pi * self.rc ** (3.0 - self.alpha) * special.gammainc(1.5 - self.alpha / 2.0, (r / self.rc) ** 2.0) * special.gamma(1.5 - self.alpha / 2.0) |
def new_type(cls, **kwargs) -> typing.Type:
"""Create a user defined type.
The new type will contain all attributes of the `cls` type passed in.
Any attribute's value can be overwritten using kwargs.
:param kwargs: Can include any attribute defined in
the provided user defined type.
"""
props = dict(cls.__dict__)
props.update(kwargs)
return type(cls.__name__, (cls,), props) | def function[new_type, parameter[cls]]:
constant[Create a user defined type.
The new type will contain all attributes of the `cls` type passed in.
Any attribute's value can be overwritten using kwargs.
:param kwargs: Can include any attribute defined in
the provided user defined type.
]
variable[props] assign[=] call[name[dict], parameter[name[cls].__dict__]]
call[name[props].update, parameter[name[kwargs]]]
return[call[name[type], parameter[name[cls].__name__, tuple[[<ast.Name object at 0x7da20c7ca350>]], name[props]]]] | keyword[def] identifier[new_type] ( identifier[cls] ,** identifier[kwargs] )-> identifier[typing] . identifier[Type] :
literal[string]
identifier[props] = identifier[dict] ( identifier[cls] . identifier[__dict__] )
identifier[props] . identifier[update] ( identifier[kwargs] )
keyword[return] identifier[type] ( identifier[cls] . identifier[__name__] ,( identifier[cls] ,), identifier[props] ) | def new_type(cls, **kwargs) -> typing.Type:
"""Create a user defined type.
The new type will contain all attributes of the `cls` type passed in.
Any attribute's value can be overwritten using kwargs.
:param kwargs: Can include any attribute defined in
the provided user defined type.
"""
props = dict(cls.__dict__)
props.update(kwargs)
return type(cls.__name__, (cls,), props) |
def open_zarr(store, group=None, synchronizer=None, chunks='auto',
decode_cf=True, mask_and_scale=True, decode_times=True,
concat_characters=True, decode_coords=True,
drop_variables=None, consolidated=False,
overwrite_encoded_chunks=False, **kwargs):
"""Load and decode a dataset from a Zarr store.
.. note:: Experimental
The Zarr backend is new and experimental. Please report any
unexpected behavior via github issues.
The `store` object should be a valid store for a Zarr group. `store`
variables must contain dimension metadata encoded in the
`_ARRAY_DIMENSIONS` attribute.
Parameters
----------
store : MutableMapping or str
A MutableMapping where a Zarr Group has been stored or a path to a
directory in file system where a Zarr DirectoryStore has been stored.
synchronizer : object, optional
Array synchronizer provided to zarr
group : str, obtional
Group path. (a.k.a. `path` in zarr terminology.)
chunks : int or dict or tuple or {None, 'auto'}, optional
Chunk sizes along each dimension, e.g., ``5`` or
``{'x': 5, 'y': 5}``. If `chunks='auto'`, dask chunks are created
based on the variable's zarr chunks. If `chunks=None`, zarr array
data will lazily convert to numpy arrays upon access. This accepts
all the chunk specifications as Dask does.
overwrite_encoded_chunks: bool, optional
Whether to drop the zarr chunks encoded for each variable when a
dataset is loaded with specified chunk sizes (default: False)
decode_cf : bool, optional
Whether to decode these variables, assuming they were saved according
to CF conventions.
mask_and_scale : bool, optional
If True, replace array values equal to `_FillValue` with NA and scale
values according to the formula `original_values * scale_factor +
add_offset`, where `_FillValue`, `scale_factor` and `add_offset` are
taken from variable attributes (if they exist). If the `_FillValue` or
`missing_value` attribute contains multiple values a warning will be
issued and all array values matching one of the multiple values will
be replaced by NA.
decode_times : bool, optional
If True, decode times encoded in the standard NetCDF datetime format
into datetime objects. Otherwise, leave them encoded as numbers.
concat_characters : bool, optional
If True, concatenate along the last dimension of character arrays to
form string arrays. Dimensions will only be concatenated over (and
removed) if they have no corresponding variable and if they are only
used as the last dimension of character arrays.
decode_coords : bool, optional
If True, decode the 'coordinates' attribute to identify coordinates in
the resulting dataset.
drop_variables : string or iterable, optional
A variable or list of variables to exclude from being parsed from the
dataset. This may be useful to drop variables with problems or
inconsistent values.
consolidated : bool, optional
Whether to open the store using zarr's consolidated metadata
capability. Only works for stores that have already been consolidated.
Returns
-------
dataset : Dataset
The newly created dataset.
See Also
--------
open_dataset
References
----------
http://zarr.readthedocs.io/
"""
if 'auto_chunk' in kwargs:
auto_chunk = kwargs.pop('auto_chunk')
if auto_chunk:
chunks = 'auto' # maintain backwards compatibility
else:
chunks = None
warnings.warn("auto_chunk is deprecated. Use chunks='auto' instead.",
FutureWarning, stacklevel=2)
if kwargs:
raise TypeError("open_zarr() got unexpected keyword arguments " +
",".join(kwargs.keys()))
if not isinstance(chunks, (int, dict)):
if chunks != 'auto' and chunks is not None:
raise ValueError("chunks must be an int, dict, 'auto', or None. "
"Instead found %s. " % chunks)
if not decode_cf:
mask_and_scale = False
decode_times = False
concat_characters = False
decode_coords = False
def maybe_decode_store(store, lock=False):
ds = conventions.decode_cf(
store, mask_and_scale=mask_and_scale, decode_times=decode_times,
concat_characters=concat_characters, decode_coords=decode_coords,
drop_variables=drop_variables)
# TODO: this is where we would apply caching
return ds
# Zarr supports a wide range of access modes, but for now xarray either
# reads or writes from a store, never both. For open_zarr, we only read
mode = 'r'
zarr_store = ZarrStore.open_group(store, mode=mode,
synchronizer=synchronizer,
group=group, consolidated=consolidated)
ds = maybe_decode_store(zarr_store)
# auto chunking needs to be here and not in ZarrStore because variable
# chunks do not survive decode_cf
# return trivial case
if not chunks:
return ds
# adapted from Dataset.Chunk()
if isinstance(chunks, int):
chunks = dict.fromkeys(ds.dims, chunks)
if isinstance(chunks, tuple) and len(chunks) == len(ds.dims):
chunks = dict(zip(ds.dims, chunks))
def get_chunk(name, var, chunks):
chunk_spec = dict(zip(var.dims, var.encoding.get('chunks')))
# Coordinate labels aren't chunked
if var.ndim == 1 and var.dims[0] == name:
return chunk_spec
if chunks == 'auto':
return chunk_spec
for dim in var.dims:
if dim in chunks:
spec = chunks[dim]
if isinstance(spec, int):
spec = (spec,)
if isinstance(spec, (tuple, list)) and chunk_spec[dim]:
if any(s % chunk_spec[dim] for s in spec):
warnings.warn("Specified Dask chunks %r would "
"separate Zarr chunk shape %r for "
"dimension %r. This significantly "
"degrades performance. Consider "
"rechunking after loading instead."
% (chunks[dim], chunk_spec[dim], dim),
stacklevel=2)
chunk_spec[dim] = chunks[dim]
return chunk_spec
def maybe_chunk(name, var, chunks):
from dask.base import tokenize
chunk_spec = get_chunk(name, var, chunks)
if (var.ndim > 0) and (chunk_spec is not None):
# does this cause any data to be read?
token2 = tokenize(name, var._data)
name2 = 'zarr-%s' % token2
var = var.chunk(chunk_spec, name=name2, lock=None)
if overwrite_encoded_chunks and var.chunks is not None:
var.encoding['chunks'] = tuple(x[0] for x in var.chunks)
return var
else:
return var
variables = OrderedDict([(k, maybe_chunk(k, v, chunks))
for k, v in ds.variables.items()])
return ds._replace_vars_and_dims(variables) | def function[open_zarr, parameter[store, group, synchronizer, chunks, decode_cf, mask_and_scale, decode_times, concat_characters, decode_coords, drop_variables, consolidated, overwrite_encoded_chunks]]:
constant[Load and decode a dataset from a Zarr store.
.. note:: Experimental
The Zarr backend is new and experimental. Please report any
unexpected behavior via github issues.
The `store` object should be a valid store for a Zarr group. `store`
variables must contain dimension metadata encoded in the
`_ARRAY_DIMENSIONS` attribute.
Parameters
----------
store : MutableMapping or str
A MutableMapping where a Zarr Group has been stored or a path to a
directory in file system where a Zarr DirectoryStore has been stored.
synchronizer : object, optional
Array synchronizer provided to zarr
group : str, obtional
Group path. (a.k.a. `path` in zarr terminology.)
chunks : int or dict or tuple or {None, 'auto'}, optional
Chunk sizes along each dimension, e.g., ``5`` or
``{'x': 5, 'y': 5}``. If `chunks='auto'`, dask chunks are created
based on the variable's zarr chunks. If `chunks=None`, zarr array
data will lazily convert to numpy arrays upon access. This accepts
all the chunk specifications as Dask does.
overwrite_encoded_chunks: bool, optional
Whether to drop the zarr chunks encoded for each variable when a
dataset is loaded with specified chunk sizes (default: False)
decode_cf : bool, optional
Whether to decode these variables, assuming they were saved according
to CF conventions.
mask_and_scale : bool, optional
If True, replace array values equal to `_FillValue` with NA and scale
values according to the formula `original_values * scale_factor +
add_offset`, where `_FillValue`, `scale_factor` and `add_offset` are
taken from variable attributes (if they exist). If the `_FillValue` or
`missing_value` attribute contains multiple values a warning will be
issued and all array values matching one of the multiple values will
be replaced by NA.
decode_times : bool, optional
If True, decode times encoded in the standard NetCDF datetime format
into datetime objects. Otherwise, leave them encoded as numbers.
concat_characters : bool, optional
If True, concatenate along the last dimension of character arrays to
form string arrays. Dimensions will only be concatenated over (and
removed) if they have no corresponding variable and if they are only
used as the last dimension of character arrays.
decode_coords : bool, optional
If True, decode the 'coordinates' attribute to identify coordinates in
the resulting dataset.
drop_variables : string or iterable, optional
A variable or list of variables to exclude from being parsed from the
dataset. This may be useful to drop variables with problems or
inconsistent values.
consolidated : bool, optional
Whether to open the store using zarr's consolidated metadata
capability. Only works for stores that have already been consolidated.
Returns
-------
dataset : Dataset
The newly created dataset.
See Also
--------
open_dataset
References
----------
http://zarr.readthedocs.io/
]
if compare[constant[auto_chunk] in name[kwargs]] begin[:]
variable[auto_chunk] assign[=] call[name[kwargs].pop, parameter[constant[auto_chunk]]]
if name[auto_chunk] begin[:]
variable[chunks] assign[=] constant[auto]
call[name[warnings].warn, parameter[constant[auto_chunk is deprecated. Use chunks='auto' instead.], name[FutureWarning]]]
if name[kwargs] begin[:]
<ast.Raise object at 0x7da18ede5360>
if <ast.UnaryOp object at 0x7da18ede5900> begin[:]
if <ast.BoolOp object at 0x7da18ede6b90> begin[:]
<ast.Raise object at 0x7da18ede4cd0>
if <ast.UnaryOp object at 0x7da18ede6cb0> begin[:]
variable[mask_and_scale] assign[=] constant[False]
variable[decode_times] assign[=] constant[False]
variable[concat_characters] assign[=] constant[False]
variable[decode_coords] assign[=] constant[False]
def function[maybe_decode_store, parameter[store, lock]]:
variable[ds] assign[=] call[name[conventions].decode_cf, parameter[name[store]]]
return[name[ds]]
variable[mode] assign[=] constant[r]
variable[zarr_store] assign[=] call[name[ZarrStore].open_group, parameter[name[store]]]
variable[ds] assign[=] call[name[maybe_decode_store], parameter[name[zarr_store]]]
if <ast.UnaryOp object at 0x7da18dc99960> begin[:]
return[name[ds]]
if call[name[isinstance], parameter[name[chunks], name[int]]] begin[:]
variable[chunks] assign[=] call[name[dict].fromkeys, parameter[name[ds].dims, name[chunks]]]
if <ast.BoolOp object at 0x7da18dc9be80> begin[:]
variable[chunks] assign[=] call[name[dict], parameter[call[name[zip], parameter[name[ds].dims, name[chunks]]]]]
def function[get_chunk, parameter[name, var, chunks]]:
variable[chunk_spec] assign[=] call[name[dict], parameter[call[name[zip], parameter[name[var].dims, call[name[var].encoding.get, parameter[constant[chunks]]]]]]]
if <ast.BoolOp object at 0x7da18dc9b820> begin[:]
return[name[chunk_spec]]
if compare[name[chunks] equal[==] constant[auto]] begin[:]
return[name[chunk_spec]]
for taget[name[dim]] in starred[name[var].dims] begin[:]
if compare[name[dim] in name[chunks]] begin[:]
variable[spec] assign[=] call[name[chunks]][name[dim]]
if call[name[isinstance], parameter[name[spec], name[int]]] begin[:]
variable[spec] assign[=] tuple[[<ast.Name object at 0x7da18dc99360>]]
if <ast.BoolOp object at 0x7da18dc9ab90> begin[:]
if call[name[any], parameter[<ast.GeneratorExp object at 0x7da18dc99a80>]] begin[:]
call[name[warnings].warn, parameter[binary_operation[constant[Specified Dask chunks %r would separate Zarr chunk shape %r for dimension %r. This significantly degrades performance. Consider rechunking after loading instead.] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Subscript object at 0x7da18dc9b250>, <ast.Subscript object at 0x7da18dc989d0>, <ast.Name object at 0x7da18dc99c30>]]]]]
call[name[chunk_spec]][name[dim]] assign[=] call[name[chunks]][name[dim]]
return[name[chunk_spec]]
def function[maybe_chunk, parameter[name, var, chunks]]:
from relative_module[dask.base] import module[tokenize]
variable[chunk_spec] assign[=] call[name[get_chunk], parameter[name[name], name[var], name[chunks]]]
if <ast.BoolOp object at 0x7da18dc998a0> begin[:]
variable[token2] assign[=] call[name[tokenize], parameter[name[name], name[var]._data]]
variable[name2] assign[=] binary_operation[constant[zarr-%s] <ast.Mod object at 0x7da2590d6920> name[token2]]
variable[var] assign[=] call[name[var].chunk, parameter[name[chunk_spec]]]
if <ast.BoolOp object at 0x7da18dc994b0> begin[:]
call[name[var].encoding][constant[chunks]] assign[=] call[name[tuple], parameter[<ast.GeneratorExp object at 0x7da18dc99d80>]]
return[name[var]]
variable[variables] assign[=] call[name[OrderedDict], parameter[<ast.ListComp object at 0x7da1b1f964d0>]]
return[call[name[ds]._replace_vars_and_dims, parameter[name[variables]]]] | keyword[def] identifier[open_zarr] ( identifier[store] , identifier[group] = keyword[None] , identifier[synchronizer] = keyword[None] , identifier[chunks] = literal[string] ,
identifier[decode_cf] = keyword[True] , identifier[mask_and_scale] = keyword[True] , identifier[decode_times] = keyword[True] ,
identifier[concat_characters] = keyword[True] , identifier[decode_coords] = keyword[True] ,
identifier[drop_variables] = keyword[None] , identifier[consolidated] = keyword[False] ,
identifier[overwrite_encoded_chunks] = keyword[False] ,** identifier[kwargs] ):
literal[string]
keyword[if] literal[string] keyword[in] identifier[kwargs] :
identifier[auto_chunk] = identifier[kwargs] . identifier[pop] ( literal[string] )
keyword[if] identifier[auto_chunk] :
identifier[chunks] = literal[string]
keyword[else] :
identifier[chunks] = keyword[None]
identifier[warnings] . identifier[warn] ( literal[string] ,
identifier[FutureWarning] , identifier[stacklevel] = literal[int] )
keyword[if] identifier[kwargs] :
keyword[raise] identifier[TypeError] ( literal[string] +
literal[string] . identifier[join] ( identifier[kwargs] . identifier[keys] ()))
keyword[if] keyword[not] identifier[isinstance] ( identifier[chunks] ,( identifier[int] , identifier[dict] )):
keyword[if] identifier[chunks] != literal[string] keyword[and] identifier[chunks] keyword[is] keyword[not] keyword[None] :
keyword[raise] identifier[ValueError] ( literal[string]
literal[string] % identifier[chunks] )
keyword[if] keyword[not] identifier[decode_cf] :
identifier[mask_and_scale] = keyword[False]
identifier[decode_times] = keyword[False]
identifier[concat_characters] = keyword[False]
identifier[decode_coords] = keyword[False]
keyword[def] identifier[maybe_decode_store] ( identifier[store] , identifier[lock] = keyword[False] ):
identifier[ds] = identifier[conventions] . identifier[decode_cf] (
identifier[store] , identifier[mask_and_scale] = identifier[mask_and_scale] , identifier[decode_times] = identifier[decode_times] ,
identifier[concat_characters] = identifier[concat_characters] , identifier[decode_coords] = identifier[decode_coords] ,
identifier[drop_variables] = identifier[drop_variables] )
keyword[return] identifier[ds]
identifier[mode] = literal[string]
identifier[zarr_store] = identifier[ZarrStore] . identifier[open_group] ( identifier[store] , identifier[mode] = identifier[mode] ,
identifier[synchronizer] = identifier[synchronizer] ,
identifier[group] = identifier[group] , identifier[consolidated] = identifier[consolidated] )
identifier[ds] = identifier[maybe_decode_store] ( identifier[zarr_store] )
keyword[if] keyword[not] identifier[chunks] :
keyword[return] identifier[ds]
keyword[if] identifier[isinstance] ( identifier[chunks] , identifier[int] ):
identifier[chunks] = identifier[dict] . identifier[fromkeys] ( identifier[ds] . identifier[dims] , identifier[chunks] )
keyword[if] identifier[isinstance] ( identifier[chunks] , identifier[tuple] ) keyword[and] identifier[len] ( identifier[chunks] )== identifier[len] ( identifier[ds] . identifier[dims] ):
identifier[chunks] = identifier[dict] ( identifier[zip] ( identifier[ds] . identifier[dims] , identifier[chunks] ))
keyword[def] identifier[get_chunk] ( identifier[name] , identifier[var] , identifier[chunks] ):
identifier[chunk_spec] = identifier[dict] ( identifier[zip] ( identifier[var] . identifier[dims] , identifier[var] . identifier[encoding] . identifier[get] ( literal[string] )))
keyword[if] identifier[var] . identifier[ndim] == literal[int] keyword[and] identifier[var] . identifier[dims] [ literal[int] ]== identifier[name] :
keyword[return] identifier[chunk_spec]
keyword[if] identifier[chunks] == literal[string] :
keyword[return] identifier[chunk_spec]
keyword[for] identifier[dim] keyword[in] identifier[var] . identifier[dims] :
keyword[if] identifier[dim] keyword[in] identifier[chunks] :
identifier[spec] = identifier[chunks] [ identifier[dim] ]
keyword[if] identifier[isinstance] ( identifier[spec] , identifier[int] ):
identifier[spec] =( identifier[spec] ,)
keyword[if] identifier[isinstance] ( identifier[spec] ,( identifier[tuple] , identifier[list] )) keyword[and] identifier[chunk_spec] [ identifier[dim] ]:
keyword[if] identifier[any] ( identifier[s] % identifier[chunk_spec] [ identifier[dim] ] keyword[for] identifier[s] keyword[in] identifier[spec] ):
identifier[warnings] . identifier[warn] ( literal[string]
literal[string]
literal[string]
literal[string]
literal[string]
%( identifier[chunks] [ identifier[dim] ], identifier[chunk_spec] [ identifier[dim] ], identifier[dim] ),
identifier[stacklevel] = literal[int] )
identifier[chunk_spec] [ identifier[dim] ]= identifier[chunks] [ identifier[dim] ]
keyword[return] identifier[chunk_spec]
keyword[def] identifier[maybe_chunk] ( identifier[name] , identifier[var] , identifier[chunks] ):
keyword[from] identifier[dask] . identifier[base] keyword[import] identifier[tokenize]
identifier[chunk_spec] = identifier[get_chunk] ( identifier[name] , identifier[var] , identifier[chunks] )
keyword[if] ( identifier[var] . identifier[ndim] > literal[int] ) keyword[and] ( identifier[chunk_spec] keyword[is] keyword[not] keyword[None] ):
identifier[token2] = identifier[tokenize] ( identifier[name] , identifier[var] . identifier[_data] )
identifier[name2] = literal[string] % identifier[token2]
identifier[var] = identifier[var] . identifier[chunk] ( identifier[chunk_spec] , identifier[name] = identifier[name2] , identifier[lock] = keyword[None] )
keyword[if] identifier[overwrite_encoded_chunks] keyword[and] identifier[var] . identifier[chunks] keyword[is] keyword[not] keyword[None] :
identifier[var] . identifier[encoding] [ literal[string] ]= identifier[tuple] ( identifier[x] [ literal[int] ] keyword[for] identifier[x] keyword[in] identifier[var] . identifier[chunks] )
keyword[return] identifier[var]
keyword[else] :
keyword[return] identifier[var]
identifier[variables] = identifier[OrderedDict] ([( identifier[k] , identifier[maybe_chunk] ( identifier[k] , identifier[v] , identifier[chunks] ))
keyword[for] identifier[k] , identifier[v] keyword[in] identifier[ds] . identifier[variables] . identifier[items] ()])
keyword[return] identifier[ds] . identifier[_replace_vars_and_dims] ( identifier[variables] ) | def open_zarr(store, group=None, synchronizer=None, chunks='auto', decode_cf=True, mask_and_scale=True, decode_times=True, concat_characters=True, decode_coords=True, drop_variables=None, consolidated=False, overwrite_encoded_chunks=False, **kwargs):
"""Load and decode a dataset from a Zarr store.
.. note:: Experimental
The Zarr backend is new and experimental. Please report any
unexpected behavior via github issues.
The `store` object should be a valid store for a Zarr group. `store`
variables must contain dimension metadata encoded in the
`_ARRAY_DIMENSIONS` attribute.
Parameters
----------
store : MutableMapping or str
A MutableMapping where a Zarr Group has been stored or a path to a
directory in file system where a Zarr DirectoryStore has been stored.
synchronizer : object, optional
Array synchronizer provided to zarr
group : str, obtional
Group path. (a.k.a. `path` in zarr terminology.)
chunks : int or dict or tuple or {None, 'auto'}, optional
Chunk sizes along each dimension, e.g., ``5`` or
``{'x': 5, 'y': 5}``. If `chunks='auto'`, dask chunks are created
based on the variable's zarr chunks. If `chunks=None`, zarr array
data will lazily convert to numpy arrays upon access. This accepts
all the chunk specifications as Dask does.
overwrite_encoded_chunks: bool, optional
Whether to drop the zarr chunks encoded for each variable when a
dataset is loaded with specified chunk sizes (default: False)
decode_cf : bool, optional
Whether to decode these variables, assuming they were saved according
to CF conventions.
mask_and_scale : bool, optional
If True, replace array values equal to `_FillValue` with NA and scale
values according to the formula `original_values * scale_factor +
add_offset`, where `_FillValue`, `scale_factor` and `add_offset` are
taken from variable attributes (if they exist). If the `_FillValue` or
`missing_value` attribute contains multiple values a warning will be
issued and all array values matching one of the multiple values will
be replaced by NA.
decode_times : bool, optional
If True, decode times encoded in the standard NetCDF datetime format
into datetime objects. Otherwise, leave them encoded as numbers.
concat_characters : bool, optional
If True, concatenate along the last dimension of character arrays to
form string arrays. Dimensions will only be concatenated over (and
removed) if they have no corresponding variable and if they are only
used as the last dimension of character arrays.
decode_coords : bool, optional
If True, decode the 'coordinates' attribute to identify coordinates in
the resulting dataset.
drop_variables : string or iterable, optional
A variable or list of variables to exclude from being parsed from the
dataset. This may be useful to drop variables with problems or
inconsistent values.
consolidated : bool, optional
Whether to open the store using zarr's consolidated metadata
capability. Only works for stores that have already been consolidated.
Returns
-------
dataset : Dataset
The newly created dataset.
See Also
--------
open_dataset
References
----------
http://zarr.readthedocs.io/
"""
if 'auto_chunk' in kwargs:
auto_chunk = kwargs.pop('auto_chunk')
if auto_chunk:
chunks = 'auto' # maintain backwards compatibility # depends on [control=['if'], data=[]]
else:
chunks = None
warnings.warn("auto_chunk is deprecated. Use chunks='auto' instead.", FutureWarning, stacklevel=2) # depends on [control=['if'], data=['kwargs']]
if kwargs:
raise TypeError('open_zarr() got unexpected keyword arguments ' + ','.join(kwargs.keys())) # depends on [control=['if'], data=[]]
if not isinstance(chunks, (int, dict)):
if chunks != 'auto' and chunks is not None:
raise ValueError("chunks must be an int, dict, 'auto', or None. Instead found %s. " % chunks) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
if not decode_cf:
mask_and_scale = False
decode_times = False
concat_characters = False
decode_coords = False # depends on [control=['if'], data=[]]
def maybe_decode_store(store, lock=False):
ds = conventions.decode_cf(store, mask_and_scale=mask_and_scale, decode_times=decode_times, concat_characters=concat_characters, decode_coords=decode_coords, drop_variables=drop_variables)
# TODO: this is where we would apply caching
return ds
# Zarr supports a wide range of access modes, but for now xarray either
# reads or writes from a store, never both. For open_zarr, we only read
mode = 'r'
zarr_store = ZarrStore.open_group(store, mode=mode, synchronizer=synchronizer, group=group, consolidated=consolidated)
ds = maybe_decode_store(zarr_store)
# auto chunking needs to be here and not in ZarrStore because variable
# chunks do not survive decode_cf
# return trivial case
if not chunks:
return ds # depends on [control=['if'], data=[]]
# adapted from Dataset.Chunk()
if isinstance(chunks, int):
chunks = dict.fromkeys(ds.dims, chunks) # depends on [control=['if'], data=[]]
if isinstance(chunks, tuple) and len(chunks) == len(ds.dims):
chunks = dict(zip(ds.dims, chunks)) # depends on [control=['if'], data=[]]
def get_chunk(name, var, chunks):
chunk_spec = dict(zip(var.dims, var.encoding.get('chunks')))
# Coordinate labels aren't chunked
if var.ndim == 1 and var.dims[0] == name:
return chunk_spec # depends on [control=['if'], data=[]]
if chunks == 'auto':
return chunk_spec # depends on [control=['if'], data=[]]
for dim in var.dims:
if dim in chunks:
spec = chunks[dim]
if isinstance(spec, int):
spec = (spec,) # depends on [control=['if'], data=[]]
if isinstance(spec, (tuple, list)) and chunk_spec[dim]:
if any((s % chunk_spec[dim] for s in spec)):
warnings.warn('Specified Dask chunks %r would separate Zarr chunk shape %r for dimension %r. This significantly degrades performance. Consider rechunking after loading instead.' % (chunks[dim], chunk_spec[dim], dim), stacklevel=2) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
chunk_spec[dim] = chunks[dim] # depends on [control=['if'], data=['dim', 'chunks']] # depends on [control=['for'], data=['dim']]
return chunk_spec
def maybe_chunk(name, var, chunks):
from dask.base import tokenize
chunk_spec = get_chunk(name, var, chunks)
if var.ndim > 0 and chunk_spec is not None:
# does this cause any data to be read?
token2 = tokenize(name, var._data)
name2 = 'zarr-%s' % token2
var = var.chunk(chunk_spec, name=name2, lock=None)
if overwrite_encoded_chunks and var.chunks is not None:
var.encoding['chunks'] = tuple((x[0] for x in var.chunks)) # depends on [control=['if'], data=[]]
return var # depends on [control=['if'], data=[]]
else:
return var
variables = OrderedDict([(k, maybe_chunk(k, v, chunks)) for (k, v) in ds.variables.items()])
return ds._replace_vars_and_dims(variables) |
def get_active_course_runs(course, users_all_enrolled_courses):
"""
Return active course runs (user is enrolled in) of the given course.
This function will return the course_runs of 'course' which have
active enrollment by looking into 'users_all_enrolled_courses'
"""
# User's all course_run ids in which he has enrolled.
enrolled_course_run_ids = [
enrolled_course_run['course_details']['course_id'] for enrolled_course_run in users_all_enrolled_courses
if enrolled_course_run['is_active'] and enrolled_course_run.get('course_details')
]
return [course_run for course_run in course['course_runs'] if course_run['key'] in enrolled_course_run_ids] | def function[get_active_course_runs, parameter[course, users_all_enrolled_courses]]:
constant[
Return active course runs (user is enrolled in) of the given course.
This function will return the course_runs of 'course' which have
active enrollment by looking into 'users_all_enrolled_courses'
]
variable[enrolled_course_run_ids] assign[=] <ast.ListComp object at 0x7da1b00556f0>
return[<ast.ListComp object at 0x7da1b00559f0>] | keyword[def] identifier[get_active_course_runs] ( identifier[course] , identifier[users_all_enrolled_courses] ):
literal[string]
identifier[enrolled_course_run_ids] =[
identifier[enrolled_course_run] [ literal[string] ][ literal[string] ] keyword[for] identifier[enrolled_course_run] keyword[in] identifier[users_all_enrolled_courses]
keyword[if] identifier[enrolled_course_run] [ literal[string] ] keyword[and] identifier[enrolled_course_run] . identifier[get] ( literal[string] )
]
keyword[return] [ identifier[course_run] keyword[for] identifier[course_run] keyword[in] identifier[course] [ literal[string] ] keyword[if] identifier[course_run] [ literal[string] ] keyword[in] identifier[enrolled_course_run_ids] ] | def get_active_course_runs(course, users_all_enrolled_courses):
"""
Return active course runs (user is enrolled in) of the given course.
This function will return the course_runs of 'course' which have
active enrollment by looking into 'users_all_enrolled_courses'
"""
# User's all course_run ids in which he has enrolled.
enrolled_course_run_ids = [enrolled_course_run['course_details']['course_id'] for enrolled_course_run in users_all_enrolled_courses if enrolled_course_run['is_active'] and enrolled_course_run.get('course_details')]
return [course_run for course_run in course['course_runs'] if course_run['key'] in enrolled_course_run_ids] |
def systemInformationType17():
"""SYSTEM INFORMATION TYPE 17 Section 9.1.43e"""
a = L2PseudoLength(l2pLength=0x01)
b = TpPd(pd=0x6)
c = MessageType(mesType=0x3e) # 00111110
d = Si17RestOctets()
packet = a / b / c / d
return packet | def function[systemInformationType17, parameter[]]:
constant[SYSTEM INFORMATION TYPE 17 Section 9.1.43e]
variable[a] assign[=] call[name[L2PseudoLength], parameter[]]
variable[b] assign[=] call[name[TpPd], parameter[]]
variable[c] assign[=] call[name[MessageType], parameter[]]
variable[d] assign[=] call[name[Si17RestOctets], parameter[]]
variable[packet] assign[=] binary_operation[binary_operation[binary_operation[name[a] / name[b]] / name[c]] / name[d]]
return[name[packet]] | keyword[def] identifier[systemInformationType17] ():
literal[string]
identifier[a] = identifier[L2PseudoLength] ( identifier[l2pLength] = literal[int] )
identifier[b] = identifier[TpPd] ( identifier[pd] = literal[int] )
identifier[c] = identifier[MessageType] ( identifier[mesType] = literal[int] )
identifier[d] = identifier[Si17RestOctets] ()
identifier[packet] = identifier[a] / identifier[b] / identifier[c] / identifier[d]
keyword[return] identifier[packet] | def systemInformationType17():
"""SYSTEM INFORMATION TYPE 17 Section 9.1.43e"""
a = L2PseudoLength(l2pLength=1)
b = TpPd(pd=6)
c = MessageType(mesType=62) # 00111110
d = Si17RestOctets()
packet = a / b / c / d
return packet |
def stop_pipeline(self, status=COMPLETE_FLAG):
"""
Terminate the pipeline.
This is the "healthy" pipeline completion function.
The normal pipeline completion function, to be run by the pipeline
at the end of the script. It sets status flag to completed and records
some time and memory statistics to the log file.
"""
self._set_status_flag(status)
self._cleanup()
self.report_result("Time", str(datetime.timedelta(seconds=self.time_elapsed(self.starttime))))
self.report_result("Success", time.strftime("%m-%d-%H:%M:%S"))
print("\n##### [Epilogue:]")
print("* " + "Total elapsed time".rjust(20) + ": " + str(datetime.timedelta(seconds=self.time_elapsed(self.starttime))))
# print("Peak memory used: " + str(memory_usage()["peak"]) + "kb")
print("* " + "Peak memory used".rjust(20) + ": " + str(round(self.peak_memory, 2)) + " GB")
if self.halted:
return
self.timestamp("* Pipeline completed at: ".rjust(20)) | def function[stop_pipeline, parameter[self, status]]:
constant[
Terminate the pipeline.
This is the "healthy" pipeline completion function.
The normal pipeline completion function, to be run by the pipeline
at the end of the script. It sets status flag to completed and records
some time and memory statistics to the log file.
]
call[name[self]._set_status_flag, parameter[name[status]]]
call[name[self]._cleanup, parameter[]]
call[name[self].report_result, parameter[constant[Time], call[name[str], parameter[call[name[datetime].timedelta, parameter[]]]]]]
call[name[self].report_result, parameter[constant[Success], call[name[time].strftime, parameter[constant[%m-%d-%H:%M:%S]]]]]
call[name[print], parameter[constant[
##### [Epilogue:]]]]
call[name[print], parameter[binary_operation[binary_operation[binary_operation[constant[* ] + call[constant[Total elapsed time].rjust, parameter[constant[20]]]] + constant[: ]] + call[name[str], parameter[call[name[datetime].timedelta, parameter[]]]]]]]
call[name[print], parameter[binary_operation[binary_operation[binary_operation[binary_operation[constant[* ] + call[constant[Peak memory used].rjust, parameter[constant[20]]]] + constant[: ]] + call[name[str], parameter[call[name[round], parameter[name[self].peak_memory, constant[2]]]]]] + constant[ GB]]]]
if name[self].halted begin[:]
return[None]
call[name[self].timestamp, parameter[call[constant[* Pipeline completed at: ].rjust, parameter[constant[20]]]]] | keyword[def] identifier[stop_pipeline] ( identifier[self] , identifier[status] = identifier[COMPLETE_FLAG] ):
literal[string]
identifier[self] . identifier[_set_status_flag] ( identifier[status] )
identifier[self] . identifier[_cleanup] ()
identifier[self] . identifier[report_result] ( literal[string] , identifier[str] ( identifier[datetime] . identifier[timedelta] ( identifier[seconds] = identifier[self] . identifier[time_elapsed] ( identifier[self] . identifier[starttime] ))))
identifier[self] . identifier[report_result] ( literal[string] , identifier[time] . identifier[strftime] ( literal[string] ))
identifier[print] ( literal[string] )
identifier[print] ( literal[string] + literal[string] . identifier[rjust] ( literal[int] )+ literal[string] + identifier[str] ( identifier[datetime] . identifier[timedelta] ( identifier[seconds] = identifier[self] . identifier[time_elapsed] ( identifier[self] . identifier[starttime] ))))
identifier[print] ( literal[string] + literal[string] . identifier[rjust] ( literal[int] )+ literal[string] + identifier[str] ( identifier[round] ( identifier[self] . identifier[peak_memory] , literal[int] ))+ literal[string] )
keyword[if] identifier[self] . identifier[halted] :
keyword[return]
identifier[self] . identifier[timestamp] ( literal[string] . identifier[rjust] ( literal[int] )) | def stop_pipeline(self, status=COMPLETE_FLAG):
"""
Terminate the pipeline.
This is the "healthy" pipeline completion function.
The normal pipeline completion function, to be run by the pipeline
at the end of the script. It sets status flag to completed and records
some time and memory statistics to the log file.
"""
self._set_status_flag(status)
self._cleanup()
self.report_result('Time', str(datetime.timedelta(seconds=self.time_elapsed(self.starttime))))
self.report_result('Success', time.strftime('%m-%d-%H:%M:%S'))
print('\n##### [Epilogue:]')
print('* ' + 'Total elapsed time'.rjust(20) + ': ' + str(datetime.timedelta(seconds=self.time_elapsed(self.starttime))))
# print("Peak memory used: " + str(memory_usage()["peak"]) + "kb")
print('* ' + 'Peak memory used'.rjust(20) + ': ' + str(round(self.peak_memory, 2)) + ' GB')
if self.halted:
return # depends on [control=['if'], data=[]]
self.timestamp('* Pipeline completed at: '.rjust(20)) |
def _get_smma(cls, df, column, windows):
""" get smoothed moving average.
:param df: data
:param windows: range
:return: result series
"""
window = cls.get_only_one_positive_int(windows)
column_name = '{}_{}_smma'.format(column, window)
smma = df[column].ewm(
ignore_na=False, alpha=1.0 / window,
min_periods=0, adjust=True).mean()
df[column_name] = smma
return smma | def function[_get_smma, parameter[cls, df, column, windows]]:
constant[ get smoothed moving average.
:param df: data
:param windows: range
:return: result series
]
variable[window] assign[=] call[name[cls].get_only_one_positive_int, parameter[name[windows]]]
variable[column_name] assign[=] call[constant[{}_{}_smma].format, parameter[name[column], name[window]]]
variable[smma] assign[=] call[call[call[name[df]][name[column]].ewm, parameter[]].mean, parameter[]]
call[name[df]][name[column_name]] assign[=] name[smma]
return[name[smma]] | keyword[def] identifier[_get_smma] ( identifier[cls] , identifier[df] , identifier[column] , identifier[windows] ):
literal[string]
identifier[window] = identifier[cls] . identifier[get_only_one_positive_int] ( identifier[windows] )
identifier[column_name] = literal[string] . identifier[format] ( identifier[column] , identifier[window] )
identifier[smma] = identifier[df] [ identifier[column] ]. identifier[ewm] (
identifier[ignore_na] = keyword[False] , identifier[alpha] = literal[int] / identifier[window] ,
identifier[min_periods] = literal[int] , identifier[adjust] = keyword[True] ). identifier[mean] ()
identifier[df] [ identifier[column_name] ]= identifier[smma]
keyword[return] identifier[smma] | def _get_smma(cls, df, column, windows):
""" get smoothed moving average.
:param df: data
:param windows: range
:return: result series
"""
window = cls.get_only_one_positive_int(windows)
column_name = '{}_{}_smma'.format(column, window)
smma = df[column].ewm(ignore_na=False, alpha=1.0 / window, min_periods=0, adjust=True).mean()
df[column_name] = smma
return smma |
def initFeatureWriters(self, featureWriters=None):
""" Initialize feature writer classes as specified in the UFO lib.
If none are defined in the UFO, the default feature writers are used:
currently, KernFeatureWriter and MarkFeatureWriter.
The 'featureWriters' argument can be used to override these.
The method sets the `self.featureWriters` attribute with the list of
writers.
Note that the writers that generate GSUB features are placed first in
this list, before all others. This is because the GSUB table may be
used in the subsequent feature writers to resolve substitutions from
glyphs with unicodes to their alternates.
"""
if featureWriters is None:
featureWriters = loadFeatureWriters(self.ufo)
if featureWriters is None:
featureWriters = self.defaultFeatureWriters
gsubWriters = []
others = []
for writer in featureWriters:
if isclass(writer):
writer = writer()
if writer.tableTag == "GSUB":
gsubWriters.append(writer)
else:
others.append(writer)
self.featureWriters = gsubWriters + others | def function[initFeatureWriters, parameter[self, featureWriters]]:
constant[ Initialize feature writer classes as specified in the UFO lib.
If none are defined in the UFO, the default feature writers are used:
currently, KernFeatureWriter and MarkFeatureWriter.
The 'featureWriters' argument can be used to override these.
The method sets the `self.featureWriters` attribute with the list of
writers.
Note that the writers that generate GSUB features are placed first in
this list, before all others. This is because the GSUB table may be
used in the subsequent feature writers to resolve substitutions from
glyphs with unicodes to their alternates.
]
if compare[name[featureWriters] is constant[None]] begin[:]
variable[featureWriters] assign[=] call[name[loadFeatureWriters], parameter[name[self].ufo]]
if compare[name[featureWriters] is constant[None]] begin[:]
variable[featureWriters] assign[=] name[self].defaultFeatureWriters
variable[gsubWriters] assign[=] list[[]]
variable[others] assign[=] list[[]]
for taget[name[writer]] in starred[name[featureWriters]] begin[:]
if call[name[isclass], parameter[name[writer]]] begin[:]
variable[writer] assign[=] call[name[writer], parameter[]]
if compare[name[writer].tableTag equal[==] constant[GSUB]] begin[:]
call[name[gsubWriters].append, parameter[name[writer]]]
name[self].featureWriters assign[=] binary_operation[name[gsubWriters] + name[others]] | keyword[def] identifier[initFeatureWriters] ( identifier[self] , identifier[featureWriters] = keyword[None] ):
literal[string]
keyword[if] identifier[featureWriters] keyword[is] keyword[None] :
identifier[featureWriters] = identifier[loadFeatureWriters] ( identifier[self] . identifier[ufo] )
keyword[if] identifier[featureWriters] keyword[is] keyword[None] :
identifier[featureWriters] = identifier[self] . identifier[defaultFeatureWriters]
identifier[gsubWriters] =[]
identifier[others] =[]
keyword[for] identifier[writer] keyword[in] identifier[featureWriters] :
keyword[if] identifier[isclass] ( identifier[writer] ):
identifier[writer] = identifier[writer] ()
keyword[if] identifier[writer] . identifier[tableTag] == literal[string] :
identifier[gsubWriters] . identifier[append] ( identifier[writer] )
keyword[else] :
identifier[others] . identifier[append] ( identifier[writer] )
identifier[self] . identifier[featureWriters] = identifier[gsubWriters] + identifier[others] | def initFeatureWriters(self, featureWriters=None):
""" Initialize feature writer classes as specified in the UFO lib.
If none are defined in the UFO, the default feature writers are used:
currently, KernFeatureWriter and MarkFeatureWriter.
The 'featureWriters' argument can be used to override these.
The method sets the `self.featureWriters` attribute with the list of
writers.
Note that the writers that generate GSUB features are placed first in
this list, before all others. This is because the GSUB table may be
used in the subsequent feature writers to resolve substitutions from
glyphs with unicodes to their alternates.
"""
if featureWriters is None:
featureWriters = loadFeatureWriters(self.ufo)
if featureWriters is None:
featureWriters = self.defaultFeatureWriters # depends on [control=['if'], data=['featureWriters']] # depends on [control=['if'], data=['featureWriters']]
gsubWriters = []
others = []
for writer in featureWriters:
if isclass(writer):
writer = writer() # depends on [control=['if'], data=[]]
if writer.tableTag == 'GSUB':
gsubWriters.append(writer) # depends on [control=['if'], data=[]]
else:
others.append(writer) # depends on [control=['for'], data=['writer']]
self.featureWriters = gsubWriters + others |
def listen(self, port=None):
"""Make the fake executable listen to the specified port.
Possible values for 'port' are:
- None: Allocate immediately a free port and instruct the fake
executable to use it when it's invoked. This is subject to
a race condition, if that port that was free when listen() was
invoked later becomes used before the fake executable had chance
to bind to it. However it has the advantage of exposing the
free port as FakeExecutable.port instance variable, that can easily
be consumed by tests.
- An integer: Listen to this specific port.
"""
if port is None:
port = allocate_port()
self.port = port
self.line("import socket")
self.line("sock = socket.socket()")
self.line("sock.bind(('localhost', {}))".format(self.port))
self.log("listening: %d" % self.port)
self.line("sock.listen(0)") | def function[listen, parameter[self, port]]:
constant[Make the fake executable listen to the specified port.
Possible values for 'port' are:
- None: Allocate immediately a free port and instruct the fake
executable to use it when it's invoked. This is subject to
a race condition, if that port that was free when listen() was
invoked later becomes used before the fake executable had chance
to bind to it. However it has the advantage of exposing the
free port as FakeExecutable.port instance variable, that can easily
be consumed by tests.
- An integer: Listen to this specific port.
]
if compare[name[port] is constant[None]] begin[:]
variable[port] assign[=] call[name[allocate_port], parameter[]]
name[self].port assign[=] name[port]
call[name[self].line, parameter[constant[import socket]]]
call[name[self].line, parameter[constant[sock = socket.socket()]]]
call[name[self].line, parameter[call[constant[sock.bind(('localhost', {}))].format, parameter[name[self].port]]]]
call[name[self].log, parameter[binary_operation[constant[listening: %d] <ast.Mod object at 0x7da2590d6920> name[self].port]]]
call[name[self].line, parameter[constant[sock.listen(0)]]] | keyword[def] identifier[listen] ( identifier[self] , identifier[port] = keyword[None] ):
literal[string]
keyword[if] identifier[port] keyword[is] keyword[None] :
identifier[port] = identifier[allocate_port] ()
identifier[self] . identifier[port] = identifier[port]
identifier[self] . identifier[line] ( literal[string] )
identifier[self] . identifier[line] ( literal[string] )
identifier[self] . identifier[line] ( literal[string] . identifier[format] ( identifier[self] . identifier[port] ))
identifier[self] . identifier[log] ( literal[string] % identifier[self] . identifier[port] )
identifier[self] . identifier[line] ( literal[string] ) | def listen(self, port=None):
"""Make the fake executable listen to the specified port.
Possible values for 'port' are:
- None: Allocate immediately a free port and instruct the fake
executable to use it when it's invoked. This is subject to
a race condition, if that port that was free when listen() was
invoked later becomes used before the fake executable had chance
to bind to it. However it has the advantage of exposing the
free port as FakeExecutable.port instance variable, that can easily
be consumed by tests.
- An integer: Listen to this specific port.
"""
if port is None:
port = allocate_port() # depends on [control=['if'], data=['port']]
self.port = port
self.line('import socket')
self.line('sock = socket.socket()')
self.line("sock.bind(('localhost', {}))".format(self.port))
self.log('listening: %d' % self.port)
self.line('sock.listen(0)') |
def _get_handler(self, namespace, remove_handler=False):
"""Get a handler (if present) from a namespace.
Returns
-------
function or None
The handler defined in the namespace.
"""
if hasattr(namespace, '_func'):
_func = namespace._func
if remove_handler:
del namespace._func
return _func | def function[_get_handler, parameter[self, namespace, remove_handler]]:
constant[Get a handler (if present) from a namespace.
Returns
-------
function or None
The handler defined in the namespace.
]
if call[name[hasattr], parameter[name[namespace], constant[_func]]] begin[:]
variable[_func] assign[=] name[namespace]._func
if name[remove_handler] begin[:]
<ast.Delete object at 0x7da1b157a980>
return[name[_func]] | keyword[def] identifier[_get_handler] ( identifier[self] , identifier[namespace] , identifier[remove_handler] = keyword[False] ):
literal[string]
keyword[if] identifier[hasattr] ( identifier[namespace] , literal[string] ):
identifier[_func] = identifier[namespace] . identifier[_func]
keyword[if] identifier[remove_handler] :
keyword[del] identifier[namespace] . identifier[_func]
keyword[return] identifier[_func] | def _get_handler(self, namespace, remove_handler=False):
"""Get a handler (if present) from a namespace.
Returns
-------
function or None
The handler defined in the namespace.
"""
if hasattr(namespace, '_func'):
_func = namespace._func
if remove_handler:
del namespace._func # depends on [control=['if'], data=[]]
return _func # depends on [control=['if'], data=[]] |
def _get_connection(self):
"""
Returns our cached LDAPObject, which may or may not be bound.
"""
if self._connection is None:
uri = self.settings.SERVER_URI
if callable(uri):
if func_supports_parameter(uri, "request"):
uri = uri(self._request)
else:
warnings.warn(
"Update AUTH_LDAP_SERVER_URI callable %s.%s to accept "
"a positional `request` argument. Support for callables "
"accepting no arguments will be removed in a future "
"version." % (uri.__module__, uri.__name__),
DeprecationWarning,
)
uri = uri()
self._connection = self.backend.ldap.initialize(uri, bytes_mode=False)
for opt, value in self.settings.CONNECTION_OPTIONS.items():
self._connection.set_option(opt, value)
if self.settings.START_TLS:
logger.debug("Initiating TLS")
self._connection.start_tls_s()
return self._connection | def function[_get_connection, parameter[self]]:
constant[
Returns our cached LDAPObject, which may or may not be bound.
]
if compare[name[self]._connection is constant[None]] begin[:]
variable[uri] assign[=] name[self].settings.SERVER_URI
if call[name[callable], parameter[name[uri]]] begin[:]
if call[name[func_supports_parameter], parameter[name[uri], constant[request]]] begin[:]
variable[uri] assign[=] call[name[uri], parameter[name[self]._request]]
name[self]._connection assign[=] call[name[self].backend.ldap.initialize, parameter[name[uri]]]
for taget[tuple[[<ast.Name object at 0x7da1b1609de0>, <ast.Name object at 0x7da1b160b2b0>]]] in starred[call[name[self].settings.CONNECTION_OPTIONS.items, parameter[]]] begin[:]
call[name[self]._connection.set_option, parameter[name[opt], name[value]]]
if name[self].settings.START_TLS begin[:]
call[name[logger].debug, parameter[constant[Initiating TLS]]]
call[name[self]._connection.start_tls_s, parameter[]]
return[name[self]._connection] | keyword[def] identifier[_get_connection] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[_connection] keyword[is] keyword[None] :
identifier[uri] = identifier[self] . identifier[settings] . identifier[SERVER_URI]
keyword[if] identifier[callable] ( identifier[uri] ):
keyword[if] identifier[func_supports_parameter] ( identifier[uri] , literal[string] ):
identifier[uri] = identifier[uri] ( identifier[self] . identifier[_request] )
keyword[else] :
identifier[warnings] . identifier[warn] (
literal[string]
literal[string]
literal[string]
literal[string] %( identifier[uri] . identifier[__module__] , identifier[uri] . identifier[__name__] ),
identifier[DeprecationWarning] ,
)
identifier[uri] = identifier[uri] ()
identifier[self] . identifier[_connection] = identifier[self] . identifier[backend] . identifier[ldap] . identifier[initialize] ( identifier[uri] , identifier[bytes_mode] = keyword[False] )
keyword[for] identifier[opt] , identifier[value] keyword[in] identifier[self] . identifier[settings] . identifier[CONNECTION_OPTIONS] . identifier[items] ():
identifier[self] . identifier[_connection] . identifier[set_option] ( identifier[opt] , identifier[value] )
keyword[if] identifier[self] . identifier[settings] . identifier[START_TLS] :
identifier[logger] . identifier[debug] ( literal[string] )
identifier[self] . identifier[_connection] . identifier[start_tls_s] ()
keyword[return] identifier[self] . identifier[_connection] | def _get_connection(self):
"""
Returns our cached LDAPObject, which may or may not be bound.
"""
if self._connection is None:
uri = self.settings.SERVER_URI
if callable(uri):
if func_supports_parameter(uri, 'request'):
uri = uri(self._request) # depends on [control=['if'], data=[]]
else:
warnings.warn('Update AUTH_LDAP_SERVER_URI callable %s.%s to accept a positional `request` argument. Support for callables accepting no arguments will be removed in a future version.' % (uri.__module__, uri.__name__), DeprecationWarning)
uri = uri() # depends on [control=['if'], data=[]]
self._connection = self.backend.ldap.initialize(uri, bytes_mode=False)
for (opt, value) in self.settings.CONNECTION_OPTIONS.items():
self._connection.set_option(opt, value) # depends on [control=['for'], data=[]]
if self.settings.START_TLS:
logger.debug('Initiating TLS')
self._connection.start_tls_s() # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
return self._connection |
def configure_doc_job(
config_url, rosdistro_name, doc_build_name,
repo_name, os_name, os_code_name, arch,
config=None, build_file=None,
index=None, dist_file=None, dist_cache=None,
jenkins=None, views=None,
is_disabled=False,
groovy_script=None,
doc_repository=None,
dry_run=False):
"""
Configure a single Jenkins doc job.
This includes the following steps:
- clone the doc repository to use
- clone the ros_buildfarm repository
- write the distribution repository keys into files
- invoke the run_doc_job.py script
"""
if config is None:
config = get_config_index(config_url)
if build_file is None:
build_files = get_doc_build_files(config, rosdistro_name)
build_file = build_files[doc_build_name]
if index is None:
index = get_index(config.rosdistro_index_url)
if dist_file is None:
dist_file = get_distribution_file(index, rosdistro_name, build_file)
if not dist_file:
raise JobValidationError(
'No distribution file matches the build file')
repo_names = dist_file.repositories.keys()
if repo_name is not None:
if repo_name not in repo_names:
raise JobValidationError(
"Invalid repository name '%s' " % repo_name +
'choose one of the following: %s' %
', '.join(sorted(repo_names)))
repo = dist_file.repositories[repo_name]
if not repo.doc_repository:
raise JobValidationError(
"Repository '%s' has no doc section" % repo_name)
if not repo.doc_repository.version:
raise JobValidationError(
"Repository '%s' has no doc version" % repo_name)
doc_repository = repo.doc_repository
if os_name not in build_file.targets.keys():
raise JobValidationError(
"Invalid OS name '%s' " % os_name +
'choose one of the following: ' +
', '.join(sorted(build_file.targets.keys())))
if os_code_name not in build_file.targets[os_name].keys():
raise JobValidationError(
"Invalid OS code name '%s' " % os_code_name +
'choose one of the following: ' +
', '.join(sorted(build_file.targets[os_name].keys())))
if arch not in build_file.targets[os_name][os_code_name]:
raise JobValidationError(
"Invalid architecture '%s' " % arch +
'choose one of the following: %s' % ', '.join(sorted(
build_file.targets[os_name][os_code_name])))
if dist_cache is None and build_file.notify_maintainers:
dist_cache = get_distribution_cache(index, rosdistro_name)
if jenkins is None:
from ros_buildfarm.jenkins import connect
jenkins = connect(config.jenkins_url)
if views is None:
view_name = get_doc_view_name(
rosdistro_name, doc_build_name)
configure_doc_view(jenkins, view_name, dry_run=dry_run)
job_name = get_doc_job_name(
rosdistro_name, doc_build_name,
repo_name, os_name, os_code_name, arch)
job_config = _get_doc_job_config(
config, config_url, rosdistro_name, doc_build_name,
build_file, os_name, os_code_name, arch, doc_repository,
repo_name, dist_cache=dist_cache, is_disabled=is_disabled)
# jenkinsapi.jenkins.Jenkins evaluates to false if job count is zero
if isinstance(jenkins, object) and jenkins is not False:
from ros_buildfarm.jenkins import configure_job
configure_job(jenkins, job_name, job_config, dry_run=dry_run)
return job_name, job_config | def function[configure_doc_job, parameter[config_url, rosdistro_name, doc_build_name, repo_name, os_name, os_code_name, arch, config, build_file, index, dist_file, dist_cache, jenkins, views, is_disabled, groovy_script, doc_repository, dry_run]]:
constant[
Configure a single Jenkins doc job.
This includes the following steps:
- clone the doc repository to use
- clone the ros_buildfarm repository
- write the distribution repository keys into files
- invoke the run_doc_job.py script
]
if compare[name[config] is constant[None]] begin[:]
variable[config] assign[=] call[name[get_config_index], parameter[name[config_url]]]
if compare[name[build_file] is constant[None]] begin[:]
variable[build_files] assign[=] call[name[get_doc_build_files], parameter[name[config], name[rosdistro_name]]]
variable[build_file] assign[=] call[name[build_files]][name[doc_build_name]]
if compare[name[index] is constant[None]] begin[:]
variable[index] assign[=] call[name[get_index], parameter[name[config].rosdistro_index_url]]
if compare[name[dist_file] is constant[None]] begin[:]
variable[dist_file] assign[=] call[name[get_distribution_file], parameter[name[index], name[rosdistro_name], name[build_file]]]
if <ast.UnaryOp object at 0x7da1b009bcd0> begin[:]
<ast.Raise object at 0x7da1b009bd30>
variable[repo_names] assign[=] call[name[dist_file].repositories.keys, parameter[]]
if compare[name[repo_name] is_not constant[None]] begin[:]
if compare[name[repo_name] <ast.NotIn object at 0x7da2590d7190> name[repo_names]] begin[:]
<ast.Raise object at 0x7da1b009ba90>
variable[repo] assign[=] call[name[dist_file].repositories][name[repo_name]]
if <ast.UnaryOp object at 0x7da1b0098160> begin[:]
<ast.Raise object at 0x7da1b0098070>
if <ast.UnaryOp object at 0x7da1b0098340> begin[:]
<ast.Raise object at 0x7da1b0098a90>
variable[doc_repository] assign[=] name[repo].doc_repository
if compare[name[os_name] <ast.NotIn object at 0x7da2590d7190> call[name[build_file].targets.keys, parameter[]]] begin[:]
<ast.Raise object at 0x7da1b00985e0>
if compare[name[os_code_name] <ast.NotIn object at 0x7da2590d7190> call[call[name[build_file].targets][name[os_name]].keys, parameter[]]] begin[:]
<ast.Raise object at 0x7da1b0098b20>
if compare[name[arch] <ast.NotIn object at 0x7da2590d7190> call[call[name[build_file].targets][name[os_name]]][name[os_code_name]]] begin[:]
<ast.Raise object at 0x7da1b009a620>
if <ast.BoolOp object at 0x7da1b009a9b0> begin[:]
variable[dist_cache] assign[=] call[name[get_distribution_cache], parameter[name[index], name[rosdistro_name]]]
if compare[name[jenkins] is constant[None]] begin[:]
from relative_module[ros_buildfarm.jenkins] import module[connect]
variable[jenkins] assign[=] call[name[connect], parameter[name[config].jenkins_url]]
if compare[name[views] is constant[None]] begin[:]
variable[view_name] assign[=] call[name[get_doc_view_name], parameter[name[rosdistro_name], name[doc_build_name]]]
call[name[configure_doc_view], parameter[name[jenkins], name[view_name]]]
variable[job_name] assign[=] call[name[get_doc_job_name], parameter[name[rosdistro_name], name[doc_build_name], name[repo_name], name[os_name], name[os_code_name], name[arch]]]
variable[job_config] assign[=] call[name[_get_doc_job_config], parameter[name[config], name[config_url], name[rosdistro_name], name[doc_build_name], name[build_file], name[os_name], name[os_code_name], name[arch], name[doc_repository], name[repo_name]]]
if <ast.BoolOp object at 0x7da1b009b640> begin[:]
from relative_module[ros_buildfarm.jenkins] import module[configure_job]
call[name[configure_job], parameter[name[jenkins], name[job_name], name[job_config]]]
return[tuple[[<ast.Name object at 0x7da1b009b7c0>, <ast.Name object at 0x7da1b009b880>]]] | keyword[def] identifier[configure_doc_job] (
identifier[config_url] , identifier[rosdistro_name] , identifier[doc_build_name] ,
identifier[repo_name] , identifier[os_name] , identifier[os_code_name] , identifier[arch] ,
identifier[config] = keyword[None] , identifier[build_file] = keyword[None] ,
identifier[index] = keyword[None] , identifier[dist_file] = keyword[None] , identifier[dist_cache] = keyword[None] ,
identifier[jenkins] = keyword[None] , identifier[views] = keyword[None] ,
identifier[is_disabled] = keyword[False] ,
identifier[groovy_script] = keyword[None] ,
identifier[doc_repository] = keyword[None] ,
identifier[dry_run] = keyword[False] ):
literal[string]
keyword[if] identifier[config] keyword[is] keyword[None] :
identifier[config] = identifier[get_config_index] ( identifier[config_url] )
keyword[if] identifier[build_file] keyword[is] keyword[None] :
identifier[build_files] = identifier[get_doc_build_files] ( identifier[config] , identifier[rosdistro_name] )
identifier[build_file] = identifier[build_files] [ identifier[doc_build_name] ]
keyword[if] identifier[index] keyword[is] keyword[None] :
identifier[index] = identifier[get_index] ( identifier[config] . identifier[rosdistro_index_url] )
keyword[if] identifier[dist_file] keyword[is] keyword[None] :
identifier[dist_file] = identifier[get_distribution_file] ( identifier[index] , identifier[rosdistro_name] , identifier[build_file] )
keyword[if] keyword[not] identifier[dist_file] :
keyword[raise] identifier[JobValidationError] (
literal[string] )
identifier[repo_names] = identifier[dist_file] . identifier[repositories] . identifier[keys] ()
keyword[if] identifier[repo_name] keyword[is] keyword[not] keyword[None] :
keyword[if] identifier[repo_name] keyword[not] keyword[in] identifier[repo_names] :
keyword[raise] identifier[JobValidationError] (
literal[string] % identifier[repo_name] +
literal[string] %
literal[string] . identifier[join] ( identifier[sorted] ( identifier[repo_names] )))
identifier[repo] = identifier[dist_file] . identifier[repositories] [ identifier[repo_name] ]
keyword[if] keyword[not] identifier[repo] . identifier[doc_repository] :
keyword[raise] identifier[JobValidationError] (
literal[string] % identifier[repo_name] )
keyword[if] keyword[not] identifier[repo] . identifier[doc_repository] . identifier[version] :
keyword[raise] identifier[JobValidationError] (
literal[string] % identifier[repo_name] )
identifier[doc_repository] = identifier[repo] . identifier[doc_repository]
keyword[if] identifier[os_name] keyword[not] keyword[in] identifier[build_file] . identifier[targets] . identifier[keys] ():
keyword[raise] identifier[JobValidationError] (
literal[string] % identifier[os_name] +
literal[string] +
literal[string] . identifier[join] ( identifier[sorted] ( identifier[build_file] . identifier[targets] . identifier[keys] ())))
keyword[if] identifier[os_code_name] keyword[not] keyword[in] identifier[build_file] . identifier[targets] [ identifier[os_name] ]. identifier[keys] ():
keyword[raise] identifier[JobValidationError] (
literal[string] % identifier[os_code_name] +
literal[string] +
literal[string] . identifier[join] ( identifier[sorted] ( identifier[build_file] . identifier[targets] [ identifier[os_name] ]. identifier[keys] ())))
keyword[if] identifier[arch] keyword[not] keyword[in] identifier[build_file] . identifier[targets] [ identifier[os_name] ][ identifier[os_code_name] ]:
keyword[raise] identifier[JobValidationError] (
literal[string] % identifier[arch] +
literal[string] % literal[string] . identifier[join] ( identifier[sorted] (
identifier[build_file] . identifier[targets] [ identifier[os_name] ][ identifier[os_code_name] ])))
keyword[if] identifier[dist_cache] keyword[is] keyword[None] keyword[and] identifier[build_file] . identifier[notify_maintainers] :
identifier[dist_cache] = identifier[get_distribution_cache] ( identifier[index] , identifier[rosdistro_name] )
keyword[if] identifier[jenkins] keyword[is] keyword[None] :
keyword[from] identifier[ros_buildfarm] . identifier[jenkins] keyword[import] identifier[connect]
identifier[jenkins] = identifier[connect] ( identifier[config] . identifier[jenkins_url] )
keyword[if] identifier[views] keyword[is] keyword[None] :
identifier[view_name] = identifier[get_doc_view_name] (
identifier[rosdistro_name] , identifier[doc_build_name] )
identifier[configure_doc_view] ( identifier[jenkins] , identifier[view_name] , identifier[dry_run] = identifier[dry_run] )
identifier[job_name] = identifier[get_doc_job_name] (
identifier[rosdistro_name] , identifier[doc_build_name] ,
identifier[repo_name] , identifier[os_name] , identifier[os_code_name] , identifier[arch] )
identifier[job_config] = identifier[_get_doc_job_config] (
identifier[config] , identifier[config_url] , identifier[rosdistro_name] , identifier[doc_build_name] ,
identifier[build_file] , identifier[os_name] , identifier[os_code_name] , identifier[arch] , identifier[doc_repository] ,
identifier[repo_name] , identifier[dist_cache] = identifier[dist_cache] , identifier[is_disabled] = identifier[is_disabled] )
keyword[if] identifier[isinstance] ( identifier[jenkins] , identifier[object] ) keyword[and] identifier[jenkins] keyword[is] keyword[not] keyword[False] :
keyword[from] identifier[ros_buildfarm] . identifier[jenkins] keyword[import] identifier[configure_job]
identifier[configure_job] ( identifier[jenkins] , identifier[job_name] , identifier[job_config] , identifier[dry_run] = identifier[dry_run] )
keyword[return] identifier[job_name] , identifier[job_config] | def configure_doc_job(config_url, rosdistro_name, doc_build_name, repo_name, os_name, os_code_name, arch, config=None, build_file=None, index=None, dist_file=None, dist_cache=None, jenkins=None, views=None, is_disabled=False, groovy_script=None, doc_repository=None, dry_run=False):
"""
Configure a single Jenkins doc job.
This includes the following steps:
- clone the doc repository to use
- clone the ros_buildfarm repository
- write the distribution repository keys into files
- invoke the run_doc_job.py script
"""
if config is None:
config = get_config_index(config_url) # depends on [control=['if'], data=['config']]
if build_file is None:
build_files = get_doc_build_files(config, rosdistro_name)
build_file = build_files[doc_build_name] # depends on [control=['if'], data=['build_file']]
if index is None:
index = get_index(config.rosdistro_index_url) # depends on [control=['if'], data=['index']]
if dist_file is None:
dist_file = get_distribution_file(index, rosdistro_name, build_file)
if not dist_file:
raise JobValidationError('No distribution file matches the build file') # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['dist_file']]
repo_names = dist_file.repositories.keys()
if repo_name is not None:
if repo_name not in repo_names:
raise JobValidationError("Invalid repository name '%s' " % repo_name + 'choose one of the following: %s' % ', '.join(sorted(repo_names))) # depends on [control=['if'], data=['repo_name', 'repo_names']]
repo = dist_file.repositories[repo_name]
if not repo.doc_repository:
raise JobValidationError("Repository '%s' has no doc section" % repo_name) # depends on [control=['if'], data=[]]
if not repo.doc_repository.version:
raise JobValidationError("Repository '%s' has no doc version" % repo_name) # depends on [control=['if'], data=[]]
doc_repository = repo.doc_repository # depends on [control=['if'], data=['repo_name']]
if os_name not in build_file.targets.keys():
raise JobValidationError("Invalid OS name '%s' " % os_name + 'choose one of the following: ' + ', '.join(sorted(build_file.targets.keys()))) # depends on [control=['if'], data=['os_name']]
if os_code_name not in build_file.targets[os_name].keys():
raise JobValidationError("Invalid OS code name '%s' " % os_code_name + 'choose one of the following: ' + ', '.join(sorted(build_file.targets[os_name].keys()))) # depends on [control=['if'], data=['os_code_name']]
if arch not in build_file.targets[os_name][os_code_name]:
raise JobValidationError("Invalid architecture '%s' " % arch + 'choose one of the following: %s' % ', '.join(sorted(build_file.targets[os_name][os_code_name]))) # depends on [control=['if'], data=['arch']]
if dist_cache is None and build_file.notify_maintainers:
dist_cache = get_distribution_cache(index, rosdistro_name) # depends on [control=['if'], data=[]]
if jenkins is None:
from ros_buildfarm.jenkins import connect
jenkins = connect(config.jenkins_url) # depends on [control=['if'], data=['jenkins']]
if views is None:
view_name = get_doc_view_name(rosdistro_name, doc_build_name)
configure_doc_view(jenkins, view_name, dry_run=dry_run) # depends on [control=['if'], data=[]]
job_name = get_doc_job_name(rosdistro_name, doc_build_name, repo_name, os_name, os_code_name, arch)
job_config = _get_doc_job_config(config, config_url, rosdistro_name, doc_build_name, build_file, os_name, os_code_name, arch, doc_repository, repo_name, dist_cache=dist_cache, is_disabled=is_disabled)
# jenkinsapi.jenkins.Jenkins evaluates to false if job count is zero
if isinstance(jenkins, object) and jenkins is not False:
from ros_buildfarm.jenkins import configure_job
configure_job(jenkins, job_name, job_config, dry_run=dry_run) # depends on [control=['if'], data=[]]
return (job_name, job_config) |
def disassemble(self):
"""Disassemble serialized protocol buffers file.
"""
ser_pb = open(self.input_file, 'rb').read() # Read serialized pb file
fd = FileDescriptorProto()
fd.ParseFromString(ser_pb)
self.name = fd.name
self._print('// Reversed by pbd (https://github.com/rsc-dev/pbd)')
self._print('syntax = "proto2";')
self._print('')
if len(fd.package) > 0:
self._print('package {};'.format(fd.package))
self.package = fd.package
else:
self._print('// Package not defined')
self._walk(fd) | def function[disassemble, parameter[self]]:
constant[Disassemble serialized protocol buffers file.
]
variable[ser_pb] assign[=] call[call[name[open], parameter[name[self].input_file, constant[rb]]].read, parameter[]]
variable[fd] assign[=] call[name[FileDescriptorProto], parameter[]]
call[name[fd].ParseFromString, parameter[name[ser_pb]]]
name[self].name assign[=] name[fd].name
call[name[self]._print, parameter[constant[// Reversed by pbd (https://github.com/rsc-dev/pbd)]]]
call[name[self]._print, parameter[constant[syntax = "proto2";]]]
call[name[self]._print, parameter[constant[]]]
if compare[call[name[len], parameter[name[fd].package]] greater[>] constant[0]] begin[:]
call[name[self]._print, parameter[call[constant[package {};].format, parameter[name[fd].package]]]]
name[self].package assign[=] name[fd].package
call[name[self]._walk, parameter[name[fd]]] | keyword[def] identifier[disassemble] ( identifier[self] ):
literal[string]
identifier[ser_pb] = identifier[open] ( identifier[self] . identifier[input_file] , literal[string] ). identifier[read] ()
identifier[fd] = identifier[FileDescriptorProto] ()
identifier[fd] . identifier[ParseFromString] ( identifier[ser_pb] )
identifier[self] . identifier[name] = identifier[fd] . identifier[name]
identifier[self] . identifier[_print] ( literal[string] )
identifier[self] . identifier[_print] ( literal[string] )
identifier[self] . identifier[_print] ( literal[string] )
keyword[if] identifier[len] ( identifier[fd] . identifier[package] )> literal[int] :
identifier[self] . identifier[_print] ( literal[string] . identifier[format] ( identifier[fd] . identifier[package] ))
identifier[self] . identifier[package] = identifier[fd] . identifier[package]
keyword[else] :
identifier[self] . identifier[_print] ( literal[string] )
identifier[self] . identifier[_walk] ( identifier[fd] ) | def disassemble(self):
"""Disassemble serialized protocol buffers file.
"""
ser_pb = open(self.input_file, 'rb').read() # Read serialized pb file
fd = FileDescriptorProto()
fd.ParseFromString(ser_pb)
self.name = fd.name
self._print('// Reversed by pbd (https://github.com/rsc-dev/pbd)')
self._print('syntax = "proto2";')
self._print('')
if len(fd.package) > 0:
self._print('package {};'.format(fd.package))
self.package = fd.package # depends on [control=['if'], data=[]]
else:
self._print('// Package not defined')
self._walk(fd) |
def _train_model(self, train_set, train_ref=None, validation_set=None,
Validation_ref=None):
"""Train the model.
:param train_set: training set
:param validation_set: validation set. optional, default None
:return: self
"""
pbar = tqdm(range(self.num_epochs))
for i in pbar:
self._run_train_step(train_set)
if validation_set is not None:
feed = self._create_feed_dict(validation_set)
err = tf_utils.run_summaries(
self.tf_session, self.tf_merged_summaries,
self.tf_summary_writer, i, feed, self.cost)
pbar.set_description("Reconstruction loss: %s" % (err)) | def function[_train_model, parameter[self, train_set, train_ref, validation_set, Validation_ref]]:
constant[Train the model.
:param train_set: training set
:param validation_set: validation set. optional, default None
:return: self
]
variable[pbar] assign[=] call[name[tqdm], parameter[call[name[range], parameter[name[self].num_epochs]]]]
for taget[name[i]] in starred[name[pbar]] begin[:]
call[name[self]._run_train_step, parameter[name[train_set]]]
if compare[name[validation_set] is_not constant[None]] begin[:]
variable[feed] assign[=] call[name[self]._create_feed_dict, parameter[name[validation_set]]]
variable[err] assign[=] call[name[tf_utils].run_summaries, parameter[name[self].tf_session, name[self].tf_merged_summaries, name[self].tf_summary_writer, name[i], name[feed], name[self].cost]]
call[name[pbar].set_description, parameter[binary_operation[constant[Reconstruction loss: %s] <ast.Mod object at 0x7da2590d6920> name[err]]]] | keyword[def] identifier[_train_model] ( identifier[self] , identifier[train_set] , identifier[train_ref] = keyword[None] , identifier[validation_set] = keyword[None] ,
identifier[Validation_ref] = keyword[None] ):
literal[string]
identifier[pbar] = identifier[tqdm] ( identifier[range] ( identifier[self] . identifier[num_epochs] ))
keyword[for] identifier[i] keyword[in] identifier[pbar] :
identifier[self] . identifier[_run_train_step] ( identifier[train_set] )
keyword[if] identifier[validation_set] keyword[is] keyword[not] keyword[None] :
identifier[feed] = identifier[self] . identifier[_create_feed_dict] ( identifier[validation_set] )
identifier[err] = identifier[tf_utils] . identifier[run_summaries] (
identifier[self] . identifier[tf_session] , identifier[self] . identifier[tf_merged_summaries] ,
identifier[self] . identifier[tf_summary_writer] , identifier[i] , identifier[feed] , identifier[self] . identifier[cost] )
identifier[pbar] . identifier[set_description] ( literal[string] %( identifier[err] )) | def _train_model(self, train_set, train_ref=None, validation_set=None, Validation_ref=None):
"""Train the model.
:param train_set: training set
:param validation_set: validation set. optional, default None
:return: self
"""
pbar = tqdm(range(self.num_epochs))
for i in pbar:
self._run_train_step(train_set)
if validation_set is not None:
feed = self._create_feed_dict(validation_set)
err = tf_utils.run_summaries(self.tf_session, self.tf_merged_summaries, self.tf_summary_writer, i, feed, self.cost)
pbar.set_description('Reconstruction loss: %s' % err) # depends on [control=['if'], data=['validation_set']] # depends on [control=['for'], data=['i']] |
def compose_containerized_launch_cmd(self, filepath, engine_dir, container_image):
"""Reads the json contents from filepath and uses that to compose the engine launch command.
Notes: Add this to the ipengine launch for debug logs :
--log-to-file --debug
Args:
filepath (str): Path to the engine file
engine_dir (str): CWD for the engines .
container_image (str): The container to be used to launch workers
"""
self.engine_file = os.path.expanduser(filepath)
uid = str(uuid.uuid4())
engine_json = None
try:
with open(self.engine_file, 'r') as f:
engine_json = f.read()
except OSError as e:
logger.error("Could not open engine_json : ", self.engine_file)
raise e
return """mkdir -p {0}
cd {0}
cat <<EOF > ipengine.{uid}.json
{1}
EOF
DOCKER_ID=$(docker create --network host {2} ipengine --file=/tmp/ipengine.{uid}.json) {debug_option}
docker cp ipengine.{uid}.json $DOCKER_ID:/tmp/ipengine.{uid}.json
# Copy current dir to the working directory
DOCKER_CWD=$(docker image inspect --format='{{{{.Config.WorkingDir}}}}' {2})
docker cp -a . $DOCKER_ID:$DOCKER_CWD
docker start $DOCKER_ID
at_exit() {{
echo "Caught SIGTERM/SIGINT signal!"
docker stop $DOCKER_ID
}}
trap at_exit SIGTERM SIGINT
sleep infinity
""".format(engine_dir, engine_json, container_image, debug_option=self.debug_option, uid=uid) | def function[compose_containerized_launch_cmd, parameter[self, filepath, engine_dir, container_image]]:
constant[Reads the json contents from filepath and uses that to compose the engine launch command.
Notes: Add this to the ipengine launch for debug logs :
--log-to-file --debug
Args:
filepath (str): Path to the engine file
engine_dir (str): CWD for the engines .
container_image (str): The container to be used to launch workers
]
name[self].engine_file assign[=] call[name[os].path.expanduser, parameter[name[filepath]]]
variable[uid] assign[=] call[name[str], parameter[call[name[uuid].uuid4, parameter[]]]]
variable[engine_json] assign[=] constant[None]
<ast.Try object at 0x7da207f027d0>
return[call[constant[mkdir -p {0}
cd {0}
cat <<EOF > ipengine.{uid}.json
{1}
EOF
DOCKER_ID=$(docker create --network host {2} ipengine --file=/tmp/ipengine.{uid}.json) {debug_option}
docker cp ipengine.{uid}.json $DOCKER_ID:/tmp/ipengine.{uid}.json
# Copy current dir to the working directory
DOCKER_CWD=$(docker image inspect --format='{{{{.Config.WorkingDir}}}}' {2})
docker cp -a . $DOCKER_ID:$DOCKER_CWD
docker start $DOCKER_ID
at_exit() {{
echo "Caught SIGTERM/SIGINT signal!"
docker stop $DOCKER_ID
}}
trap at_exit SIGTERM SIGINT
sleep infinity
].format, parameter[name[engine_dir], name[engine_json], name[container_image]]]] | keyword[def] identifier[compose_containerized_launch_cmd] ( identifier[self] , identifier[filepath] , identifier[engine_dir] , identifier[container_image] ):
literal[string]
identifier[self] . identifier[engine_file] = identifier[os] . identifier[path] . identifier[expanduser] ( identifier[filepath] )
identifier[uid] = identifier[str] ( identifier[uuid] . identifier[uuid4] ())
identifier[engine_json] = keyword[None]
keyword[try] :
keyword[with] identifier[open] ( identifier[self] . identifier[engine_file] , literal[string] ) keyword[as] identifier[f] :
identifier[engine_json] = identifier[f] . identifier[read] ()
keyword[except] identifier[OSError] keyword[as] identifier[e] :
identifier[logger] . identifier[error] ( literal[string] , identifier[self] . identifier[engine_file] )
keyword[raise] identifier[e]
keyword[return] literal[string] . identifier[format] ( identifier[engine_dir] , identifier[engine_json] , identifier[container_image] , identifier[debug_option] = identifier[self] . identifier[debug_option] , identifier[uid] = identifier[uid] ) | def compose_containerized_launch_cmd(self, filepath, engine_dir, container_image):
"""Reads the json contents from filepath and uses that to compose the engine launch command.
Notes: Add this to the ipengine launch for debug logs :
--log-to-file --debug
Args:
filepath (str): Path to the engine file
engine_dir (str): CWD for the engines .
container_image (str): The container to be used to launch workers
"""
self.engine_file = os.path.expanduser(filepath)
uid = str(uuid.uuid4())
engine_json = None
try:
with open(self.engine_file, 'r') as f:
engine_json = f.read() # depends on [control=['with'], data=['f']] # depends on [control=['try'], data=[]]
except OSError as e:
logger.error('Could not open engine_json : ', self.engine_file)
raise e # depends on [control=['except'], data=['e']]
return 'mkdir -p {0}\ncd {0}\ncat <<EOF > ipengine.{uid}.json\n{1}\nEOF\n\nDOCKER_ID=$(docker create --network host {2} ipengine --file=/tmp/ipengine.{uid}.json) {debug_option}\ndocker cp ipengine.{uid}.json $DOCKER_ID:/tmp/ipengine.{uid}.json\n\n# Copy current dir to the working directory\nDOCKER_CWD=$(docker image inspect --format=\'{{{{.Config.WorkingDir}}}}\' {2})\ndocker cp -a . $DOCKER_ID:$DOCKER_CWD\ndocker start $DOCKER_ID\n\nat_exit() {{\n echo "Caught SIGTERM/SIGINT signal!"\n docker stop $DOCKER_ID\n}}\n\ntrap at_exit SIGTERM SIGINT\nsleep infinity\n'.format(engine_dir, engine_json, container_image, debug_option=self.debug_option, uid=uid) |
def update_user(email, profile="splunk", **kwargs):
'''
Create a splunk user by email
CLI Example:
salt myminion splunk.update_user example@domain.com roles=['user'] realname="Test User"
'''
client = _get_splunk(profile)
email = email.lower()
user = list_users(profile).get(email)
if not user:
log.error('Failed to retrieve user %s', email)
return False
property_map = {}
for field in ALLOWED_FIELDS_FOR_MODIFICATION:
if kwargs.get(field):
property_map[field] = kwargs.get(field)
# update
kwargs = {}
roles = [role.name for role in user.role_entities]
for k, v in property_map.items():
resource_value = user[k]
if resource_value is not None:
# you can't update the username in update api call
if k.lower() == 'name':
continue
if k.lower() == 'roles':
if isinstance(v, six.string_types):
v = v.split(',')
if set(roles) != set(v):
kwargs['roles'] = list(set(v))
elif resource_value != v:
kwargs[k] = v
if kwargs:
user.update(**kwargs).refresh()
fields_modified = {}
for field in ALLOWED_FIELDS_FOR_MODIFICATION:
fields_modified[field] = user[field]
else:
#succeeded, no change
return True | def function[update_user, parameter[email, profile]]:
constant[
Create a splunk user by email
CLI Example:
salt myminion splunk.update_user example@domain.com roles=['user'] realname="Test User"
]
variable[client] assign[=] call[name[_get_splunk], parameter[name[profile]]]
variable[email] assign[=] call[name[email].lower, parameter[]]
variable[user] assign[=] call[call[name[list_users], parameter[name[profile]]].get, parameter[name[email]]]
if <ast.UnaryOp object at 0x7da18f00c4f0> begin[:]
call[name[log].error, parameter[constant[Failed to retrieve user %s], name[email]]]
return[constant[False]]
variable[property_map] assign[=] dictionary[[], []]
for taget[name[field]] in starred[name[ALLOWED_FIELDS_FOR_MODIFICATION]] begin[:]
if call[name[kwargs].get, parameter[name[field]]] begin[:]
call[name[property_map]][name[field]] assign[=] call[name[kwargs].get, parameter[name[field]]]
variable[kwargs] assign[=] dictionary[[], []]
variable[roles] assign[=] <ast.ListComp object at 0x7da18f00ccd0>
for taget[tuple[[<ast.Name object at 0x7da18f00eb30>, <ast.Name object at 0x7da18f00dae0>]]] in starred[call[name[property_map].items, parameter[]]] begin[:]
variable[resource_value] assign[=] call[name[user]][name[k]]
if compare[name[resource_value] is_not constant[None]] begin[:]
if compare[call[name[k].lower, parameter[]] equal[==] constant[name]] begin[:]
continue
if compare[call[name[k].lower, parameter[]] equal[==] constant[roles]] begin[:]
if call[name[isinstance], parameter[name[v], name[six].string_types]] begin[:]
variable[v] assign[=] call[name[v].split, parameter[constant[,]]]
if compare[call[name[set], parameter[name[roles]]] not_equal[!=] call[name[set], parameter[name[v]]]] begin[:]
call[name[kwargs]][constant[roles]] assign[=] call[name[list], parameter[call[name[set], parameter[name[v]]]]]
if name[kwargs] begin[:]
call[call[name[user].update, parameter[]].refresh, parameter[]]
variable[fields_modified] assign[=] dictionary[[], []]
for taget[name[field]] in starred[name[ALLOWED_FIELDS_FOR_MODIFICATION]] begin[:]
call[name[fields_modified]][name[field]] assign[=] call[name[user]][name[field]] | keyword[def] identifier[update_user] ( identifier[email] , identifier[profile] = literal[string] ,** identifier[kwargs] ):
literal[string]
identifier[client] = identifier[_get_splunk] ( identifier[profile] )
identifier[email] = identifier[email] . identifier[lower] ()
identifier[user] = identifier[list_users] ( identifier[profile] ). identifier[get] ( identifier[email] )
keyword[if] keyword[not] identifier[user] :
identifier[log] . identifier[error] ( literal[string] , identifier[email] )
keyword[return] keyword[False]
identifier[property_map] ={}
keyword[for] identifier[field] keyword[in] identifier[ALLOWED_FIELDS_FOR_MODIFICATION] :
keyword[if] identifier[kwargs] . identifier[get] ( identifier[field] ):
identifier[property_map] [ identifier[field] ]= identifier[kwargs] . identifier[get] ( identifier[field] )
identifier[kwargs] ={}
identifier[roles] =[ identifier[role] . identifier[name] keyword[for] identifier[role] keyword[in] identifier[user] . identifier[role_entities] ]
keyword[for] identifier[k] , identifier[v] keyword[in] identifier[property_map] . identifier[items] ():
identifier[resource_value] = identifier[user] [ identifier[k] ]
keyword[if] identifier[resource_value] keyword[is] keyword[not] keyword[None] :
keyword[if] identifier[k] . identifier[lower] ()== literal[string] :
keyword[continue]
keyword[if] identifier[k] . identifier[lower] ()== literal[string] :
keyword[if] identifier[isinstance] ( identifier[v] , identifier[six] . identifier[string_types] ):
identifier[v] = identifier[v] . identifier[split] ( literal[string] )
keyword[if] identifier[set] ( identifier[roles] )!= identifier[set] ( identifier[v] ):
identifier[kwargs] [ literal[string] ]= identifier[list] ( identifier[set] ( identifier[v] ))
keyword[elif] identifier[resource_value] != identifier[v] :
identifier[kwargs] [ identifier[k] ]= identifier[v]
keyword[if] identifier[kwargs] :
identifier[user] . identifier[update] (** identifier[kwargs] ). identifier[refresh] ()
identifier[fields_modified] ={}
keyword[for] identifier[field] keyword[in] identifier[ALLOWED_FIELDS_FOR_MODIFICATION] :
identifier[fields_modified] [ identifier[field] ]= identifier[user] [ identifier[field] ]
keyword[else] :
keyword[return] keyword[True] | def update_user(email, profile='splunk', **kwargs):
"""
Create a splunk user by email
CLI Example:
salt myminion splunk.update_user example@domain.com roles=['user'] realname="Test User"
"""
client = _get_splunk(profile)
email = email.lower()
user = list_users(profile).get(email)
if not user:
log.error('Failed to retrieve user %s', email)
return False # depends on [control=['if'], data=[]]
property_map = {}
for field in ALLOWED_FIELDS_FOR_MODIFICATION:
if kwargs.get(field):
property_map[field] = kwargs.get(field) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['field']]
# update
kwargs = {}
roles = [role.name for role in user.role_entities]
for (k, v) in property_map.items():
resource_value = user[k]
if resource_value is not None:
# you can't update the username in update api call
if k.lower() == 'name':
continue # depends on [control=['if'], data=[]]
if k.lower() == 'roles':
if isinstance(v, six.string_types):
v = v.split(',') # depends on [control=['if'], data=[]]
if set(roles) != set(v):
kwargs['roles'] = list(set(v)) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif resource_value != v:
kwargs[k] = v # depends on [control=['if'], data=['v']] # depends on [control=['if'], data=['resource_value']] # depends on [control=['for'], data=[]]
if kwargs:
user.update(**kwargs).refresh()
fields_modified = {}
for field in ALLOWED_FIELDS_FOR_MODIFICATION:
fields_modified[field] = user[field] # depends on [control=['for'], data=['field']] # depends on [control=['if'], data=[]]
else:
#succeeded, no change
return True |
def get_runner(worker_type, max_workers=None, workers_window=None):
"""returns a runner callable.
:param str worker_type: one of `simple` or `thread`.
:param int max_workers: max workers the runner can spawn in parallel.
:param in workers_window: max number of jobs waiting to be done by the
workers at any given time.
:return:
"""
worker_func = _runners_mapping[worker_type]
return partial(
worker_func, max_workers=max_workers, workers_window=workers_window
) | def function[get_runner, parameter[worker_type, max_workers, workers_window]]:
constant[returns a runner callable.
:param str worker_type: one of `simple` or `thread`.
:param int max_workers: max workers the runner can spawn in parallel.
:param in workers_window: max number of jobs waiting to be done by the
workers at any given time.
:return:
]
variable[worker_func] assign[=] call[name[_runners_mapping]][name[worker_type]]
return[call[name[partial], parameter[name[worker_func]]]] | keyword[def] identifier[get_runner] ( identifier[worker_type] , identifier[max_workers] = keyword[None] , identifier[workers_window] = keyword[None] ):
literal[string]
identifier[worker_func] = identifier[_runners_mapping] [ identifier[worker_type] ]
keyword[return] identifier[partial] (
identifier[worker_func] , identifier[max_workers] = identifier[max_workers] , identifier[workers_window] = identifier[workers_window]
) | def get_runner(worker_type, max_workers=None, workers_window=None):
"""returns a runner callable.
:param str worker_type: one of `simple` or `thread`.
:param int max_workers: max workers the runner can spawn in parallel.
:param in workers_window: max number of jobs waiting to be done by the
workers at any given time.
:return:
"""
worker_func = _runners_mapping[worker_type]
return partial(worker_func, max_workers=max_workers, workers_window=workers_window) |
def get_span_char_height(span, row_heights):
"""
Get the height of a span in the number of newlines it fills.
Parameters
----------
span : list of list of int
A list of [row, column] pairs that make up the span
row_heights : list of int
A list of the number of newlines for each row in the table
Returns
-------
total_height : int
The height of the span in number of newlines
"""
start_row = span[0][0]
row_count = get_span_row_count(span)
total_height = 0
for i in range(start_row, start_row + row_count):
total_height += row_heights[i]
total_height += row_count - 1
return total_height | def function[get_span_char_height, parameter[span, row_heights]]:
constant[
Get the height of a span in the number of newlines it fills.
Parameters
----------
span : list of list of int
A list of [row, column] pairs that make up the span
row_heights : list of int
A list of the number of newlines for each row in the table
Returns
-------
total_height : int
The height of the span in number of newlines
]
variable[start_row] assign[=] call[call[name[span]][constant[0]]][constant[0]]
variable[row_count] assign[=] call[name[get_span_row_count], parameter[name[span]]]
variable[total_height] assign[=] constant[0]
for taget[name[i]] in starred[call[name[range], parameter[name[start_row], binary_operation[name[start_row] + name[row_count]]]]] begin[:]
<ast.AugAssign object at 0x7da18c4ceb00>
<ast.AugAssign object at 0x7da18c4cebf0>
return[name[total_height]] | keyword[def] identifier[get_span_char_height] ( identifier[span] , identifier[row_heights] ):
literal[string]
identifier[start_row] = identifier[span] [ literal[int] ][ literal[int] ]
identifier[row_count] = identifier[get_span_row_count] ( identifier[span] )
identifier[total_height] = literal[int]
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[start_row] , identifier[start_row] + identifier[row_count] ):
identifier[total_height] += identifier[row_heights] [ identifier[i] ]
identifier[total_height] += identifier[row_count] - literal[int]
keyword[return] identifier[total_height] | def get_span_char_height(span, row_heights):
"""
Get the height of a span in the number of newlines it fills.
Parameters
----------
span : list of list of int
A list of [row, column] pairs that make up the span
row_heights : list of int
A list of the number of newlines for each row in the table
Returns
-------
total_height : int
The height of the span in number of newlines
"""
start_row = span[0][0]
row_count = get_span_row_count(span)
total_height = 0
for i in range(start_row, start_row + row_count):
total_height += row_heights[i] # depends on [control=['for'], data=['i']]
total_height += row_count - 1
return total_height |
def sort(self, search):
"""
Add sorting information to the request.
"""
if self._sort:
search = search.sort(*self._sort)
return search | def function[sort, parameter[self, search]]:
constant[
Add sorting information to the request.
]
if name[self]._sort begin[:]
variable[search] assign[=] call[name[search].sort, parameter[<ast.Starred object at 0x7da1b2114520>]]
return[name[search]] | keyword[def] identifier[sort] ( identifier[self] , identifier[search] ):
literal[string]
keyword[if] identifier[self] . identifier[_sort] :
identifier[search] = identifier[search] . identifier[sort] (* identifier[self] . identifier[_sort] )
keyword[return] identifier[search] | def sort(self, search):
"""
Add sorting information to the request.
"""
if self._sort:
search = search.sort(*self._sort) # depends on [control=['if'], data=[]]
return search |
def getLinkerFlags(self, engineRoot, fmt, includeLibs=True):
"""
Constructs the linker flags string for building against this library
"""
components = self.resolveRoot(self.ldFlags, engineRoot)
if includeLibs == True:
components.extend(self.prefixedStrings(self.linkerDirPrefix, self.linkDirs, engineRoot))
components.extend(self.resolveRoot(self.libs, engineRoot))
return Utility.join(fmt.delim, components, fmt.quotes) | def function[getLinkerFlags, parameter[self, engineRoot, fmt, includeLibs]]:
constant[
Constructs the linker flags string for building against this library
]
variable[components] assign[=] call[name[self].resolveRoot, parameter[name[self].ldFlags, name[engineRoot]]]
if compare[name[includeLibs] equal[==] constant[True]] begin[:]
call[name[components].extend, parameter[call[name[self].prefixedStrings, parameter[name[self].linkerDirPrefix, name[self].linkDirs, name[engineRoot]]]]]
call[name[components].extend, parameter[call[name[self].resolveRoot, parameter[name[self].libs, name[engineRoot]]]]]
return[call[name[Utility].join, parameter[name[fmt].delim, name[components], name[fmt].quotes]]] | keyword[def] identifier[getLinkerFlags] ( identifier[self] , identifier[engineRoot] , identifier[fmt] , identifier[includeLibs] = keyword[True] ):
literal[string]
identifier[components] = identifier[self] . identifier[resolveRoot] ( identifier[self] . identifier[ldFlags] , identifier[engineRoot] )
keyword[if] identifier[includeLibs] == keyword[True] :
identifier[components] . identifier[extend] ( identifier[self] . identifier[prefixedStrings] ( identifier[self] . identifier[linkerDirPrefix] , identifier[self] . identifier[linkDirs] , identifier[engineRoot] ))
identifier[components] . identifier[extend] ( identifier[self] . identifier[resolveRoot] ( identifier[self] . identifier[libs] , identifier[engineRoot] ))
keyword[return] identifier[Utility] . identifier[join] ( identifier[fmt] . identifier[delim] , identifier[components] , identifier[fmt] . identifier[quotes] ) | def getLinkerFlags(self, engineRoot, fmt, includeLibs=True):
"""
Constructs the linker flags string for building against this library
"""
components = self.resolveRoot(self.ldFlags, engineRoot)
if includeLibs == True:
components.extend(self.prefixedStrings(self.linkerDirPrefix, self.linkDirs, engineRoot))
components.extend(self.resolveRoot(self.libs, engineRoot)) # depends on [control=['if'], data=[]]
return Utility.join(fmt.delim, components, fmt.quotes) |
def parse_pr_numbers(git_log_lines):
"""
Parse PR numbers from commit messages. At GitHub those have the format:
`here is the message (#1234)`
being `1234` the PR number.
"""
prs = []
for line in git_log_lines:
pr_number = parse_pr_number(line)
if pr_number:
prs.append(pr_number)
return prs | def function[parse_pr_numbers, parameter[git_log_lines]]:
constant[
Parse PR numbers from commit messages. At GitHub those have the format:
`here is the message (#1234)`
being `1234` the PR number.
]
variable[prs] assign[=] list[[]]
for taget[name[line]] in starred[name[git_log_lines]] begin[:]
variable[pr_number] assign[=] call[name[parse_pr_number], parameter[name[line]]]
if name[pr_number] begin[:]
call[name[prs].append, parameter[name[pr_number]]]
return[name[prs]] | keyword[def] identifier[parse_pr_numbers] ( identifier[git_log_lines] ):
literal[string]
identifier[prs] =[]
keyword[for] identifier[line] keyword[in] identifier[git_log_lines] :
identifier[pr_number] = identifier[parse_pr_number] ( identifier[line] )
keyword[if] identifier[pr_number] :
identifier[prs] . identifier[append] ( identifier[pr_number] )
keyword[return] identifier[prs] | def parse_pr_numbers(git_log_lines):
"""
Parse PR numbers from commit messages. At GitHub those have the format:
`here is the message (#1234)`
being `1234` the PR number.
"""
prs = []
for line in git_log_lines:
pr_number = parse_pr_number(line)
if pr_number:
prs.append(pr_number) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['line']]
return prs |
def get_file(self, *args, **kwargs):
"""See :func:`get_file`"""
return get_file(*args, **self._merge_overrides(**kwargs)).run() | def function[get_file, parameter[self]]:
constant[See :func:`get_file`]
return[call[call[name[get_file], parameter[<ast.Starred object at 0x7da18dc99d50>]].run, parameter[]]] | keyword[def] identifier[get_file] ( identifier[self] ,* identifier[args] ,** identifier[kwargs] ):
literal[string]
keyword[return] identifier[get_file] (* identifier[args] ,** identifier[self] . identifier[_merge_overrides] (** identifier[kwargs] )). identifier[run] () | def get_file(self, *args, **kwargs):
"""See :func:`get_file`"""
return get_file(*args, **self._merge_overrides(**kwargs)).run() |
def from_tibiadata(cls, content):
"""Parses the content of the World Overview section from TibiaData.com into an object of this class.
Notes
-----
Due to TibiaData limitations, :py:attr:`record_count` and :py:attr:`record_date` are unavailable
object.
Additionally, the listed worlds in :py:attr:`worlds` lack some information when obtained from TibiaData.
The following attributes are unavailable:
- :py:attr:`ListedWorld.status` is always ``Online``.
- :py:attr:`ListedWorld.battleye_protected` is always ``False``
- :py:attr:`ListedWorld.battleye_date` is always ``None``.
Parameters
----------
content: :class:`str`
The JSON response of the worlds section in TibiaData.com
Returns
-------
:class:`WorldOverview`
An instance of this class containing only the available worlds.
Raises
------
InvalidContent
If the provided content is the json content of the world section in TibiaData.com
"""
json_data = parse_json(content)
try:
worlds_json = json_data["worlds"]["allworlds"]
world_overview = cls()
for world_json in worlds_json:
world = ListedWorld(world_json["name"], world_json["location"], world_json["worldtype"])
world._parse_additional_info(world_json["additional"])
world.online_count = world_json["online"]
world_overview.worlds.append(world)
return world_overview
except KeyError:
raise InvalidContent("content is not a worlds json response from TibiaData.com.") | def function[from_tibiadata, parameter[cls, content]]:
constant[Parses the content of the World Overview section from TibiaData.com into an object of this class.
Notes
-----
Due to TibiaData limitations, :py:attr:`record_count` and :py:attr:`record_date` are unavailable
object.
Additionally, the listed worlds in :py:attr:`worlds` lack some information when obtained from TibiaData.
The following attributes are unavailable:
- :py:attr:`ListedWorld.status` is always ``Online``.
- :py:attr:`ListedWorld.battleye_protected` is always ``False``
- :py:attr:`ListedWorld.battleye_date` is always ``None``.
Parameters
----------
content: :class:`str`
The JSON response of the worlds section in TibiaData.com
Returns
-------
:class:`WorldOverview`
An instance of this class containing only the available worlds.
Raises
------
InvalidContent
If the provided content is the json content of the world section in TibiaData.com
]
variable[json_data] assign[=] call[name[parse_json], parameter[name[content]]]
<ast.Try object at 0x7da207f03190> | keyword[def] identifier[from_tibiadata] ( identifier[cls] , identifier[content] ):
literal[string]
identifier[json_data] = identifier[parse_json] ( identifier[content] )
keyword[try] :
identifier[worlds_json] = identifier[json_data] [ literal[string] ][ literal[string] ]
identifier[world_overview] = identifier[cls] ()
keyword[for] identifier[world_json] keyword[in] identifier[worlds_json] :
identifier[world] = identifier[ListedWorld] ( identifier[world_json] [ literal[string] ], identifier[world_json] [ literal[string] ], identifier[world_json] [ literal[string] ])
identifier[world] . identifier[_parse_additional_info] ( identifier[world_json] [ literal[string] ])
identifier[world] . identifier[online_count] = identifier[world_json] [ literal[string] ]
identifier[world_overview] . identifier[worlds] . identifier[append] ( identifier[world] )
keyword[return] identifier[world_overview]
keyword[except] identifier[KeyError] :
keyword[raise] identifier[InvalidContent] ( literal[string] ) | def from_tibiadata(cls, content):
"""Parses the content of the World Overview section from TibiaData.com into an object of this class.
Notes
-----
Due to TibiaData limitations, :py:attr:`record_count` and :py:attr:`record_date` are unavailable
object.
Additionally, the listed worlds in :py:attr:`worlds` lack some information when obtained from TibiaData.
The following attributes are unavailable:
- :py:attr:`ListedWorld.status` is always ``Online``.
- :py:attr:`ListedWorld.battleye_protected` is always ``False``
- :py:attr:`ListedWorld.battleye_date` is always ``None``.
Parameters
----------
content: :class:`str`
The JSON response of the worlds section in TibiaData.com
Returns
-------
:class:`WorldOverview`
An instance of this class containing only the available worlds.
Raises
------
InvalidContent
If the provided content is the json content of the world section in TibiaData.com
"""
json_data = parse_json(content)
try:
worlds_json = json_data['worlds']['allworlds']
world_overview = cls()
for world_json in worlds_json:
world = ListedWorld(world_json['name'], world_json['location'], world_json['worldtype'])
world._parse_additional_info(world_json['additional'])
world.online_count = world_json['online']
world_overview.worlds.append(world) # depends on [control=['for'], data=['world_json']]
return world_overview # depends on [control=['try'], data=[]]
except KeyError:
raise InvalidContent('content is not a worlds json response from TibiaData.com.') # depends on [control=['except'], data=[]] |
def set_feature(dev, feature, recipient = None):
r"""Set/enable a specific feature.
dev is the Device object to which the request will be
sent to.
feature is the feature you want to enable.
The recipient can be None (on which the status will be queried
from the device), an Interface or Endpoint descriptors.
"""
bmRequestType, wIndex = _parse_recipient(recipient, util.CTRL_OUT)
dev.ctrl_transfer(bmRequestType = bmRequestType,
bRequest = 0x03,
wIndex = wIndex,
wValue = feature) | def function[set_feature, parameter[dev, feature, recipient]]:
constant[Set/enable a specific feature.
dev is the Device object to which the request will be
sent to.
feature is the feature you want to enable.
The recipient can be None (on which the status will be queried
from the device), an Interface or Endpoint descriptors.
]
<ast.Tuple object at 0x7da18ede4ca0> assign[=] call[name[_parse_recipient], parameter[name[recipient], name[util].CTRL_OUT]]
call[name[dev].ctrl_transfer, parameter[]] | keyword[def] identifier[set_feature] ( identifier[dev] , identifier[feature] , identifier[recipient] = keyword[None] ):
literal[string]
identifier[bmRequestType] , identifier[wIndex] = identifier[_parse_recipient] ( identifier[recipient] , identifier[util] . identifier[CTRL_OUT] )
identifier[dev] . identifier[ctrl_transfer] ( identifier[bmRequestType] = identifier[bmRequestType] ,
identifier[bRequest] = literal[int] ,
identifier[wIndex] = identifier[wIndex] ,
identifier[wValue] = identifier[feature] ) | def set_feature(dev, feature, recipient=None):
"""Set/enable a specific feature.
dev is the Device object to which the request will be
sent to.
feature is the feature you want to enable.
The recipient can be None (on which the status will be queried
from the device), an Interface or Endpoint descriptors.
"""
(bmRequestType, wIndex) = _parse_recipient(recipient, util.CTRL_OUT)
dev.ctrl_transfer(bmRequestType=bmRequestType, bRequest=3, wIndex=wIndex, wValue=feature) |
def add_attribute_listener(self, attr_name, *args, **kwargs):
"""
Add a listener callback on a particular parameter.
The callback can be removed using :py:func:`remove_attribute_listener`.
.. note::
The :py:func:`on_attribute` decorator performs the same operation as this method, but with
a more elegant syntax. Use ``add_attribute_listener`` only if you will need to remove
the observer.
The callback function is invoked only when the parameter changes.
The callback arguments are:
* ``self`` - the associated :py:class:`Parameters`.
* ``attr_name`` - the parameter name. This can be used to infer which parameter has triggered
if the same callback is used for watching multiple parameters.
* ``msg`` - the new parameter value (so you don't need to re-query the vehicle object).
The example below shows how to get callbacks for the ``THR_MIN`` parameter:
.. code:: python
#Callback function for the THR_MIN parameter
def thr_min_callback(self, attr_name, value):
print " PARAMETER CALLBACK: %s changed to: %s" % (attr_name, value)
#Add observer for the vehicle's THR_MIN parameter
vehicle.parameters.add_attribute_listener('THR_MIN', thr_min_callback)
See :ref:`vehicle_state_observing_parameters` for more information.
:param String attr_name: The name of the parameter to watch (or '*' to watch all parameters).
:param args: The callback to invoke when a change in the parameter is detected.
"""
attr_name = attr_name.upper()
return super(Parameters, self).add_attribute_listener(attr_name, *args, **kwargs) | def function[add_attribute_listener, parameter[self, attr_name]]:
constant[
Add a listener callback on a particular parameter.
The callback can be removed using :py:func:`remove_attribute_listener`.
.. note::
The :py:func:`on_attribute` decorator performs the same operation as this method, but with
a more elegant syntax. Use ``add_attribute_listener`` only if you will need to remove
the observer.
The callback function is invoked only when the parameter changes.
The callback arguments are:
* ``self`` - the associated :py:class:`Parameters`.
* ``attr_name`` - the parameter name. This can be used to infer which parameter has triggered
if the same callback is used for watching multiple parameters.
* ``msg`` - the new parameter value (so you don't need to re-query the vehicle object).
The example below shows how to get callbacks for the ``THR_MIN`` parameter:
.. code:: python
#Callback function for the THR_MIN parameter
def thr_min_callback(self, attr_name, value):
print " PARAMETER CALLBACK: %s changed to: %s" % (attr_name, value)
#Add observer for the vehicle's THR_MIN parameter
vehicle.parameters.add_attribute_listener('THR_MIN', thr_min_callback)
See :ref:`vehicle_state_observing_parameters` for more information.
:param String attr_name: The name of the parameter to watch (or '*' to watch all parameters).
:param args: The callback to invoke when a change in the parameter is detected.
]
variable[attr_name] assign[=] call[name[attr_name].upper, parameter[]]
return[call[call[name[super], parameter[name[Parameters], name[self]]].add_attribute_listener, parameter[name[attr_name], <ast.Starred object at 0x7da1b1acfdf0>]]] | keyword[def] identifier[add_attribute_listener] ( identifier[self] , identifier[attr_name] ,* identifier[args] ,** identifier[kwargs] ):
literal[string]
identifier[attr_name] = identifier[attr_name] . identifier[upper] ()
keyword[return] identifier[super] ( identifier[Parameters] , identifier[self] ). identifier[add_attribute_listener] ( identifier[attr_name] ,* identifier[args] ,** identifier[kwargs] ) | def add_attribute_listener(self, attr_name, *args, **kwargs):
"""
Add a listener callback on a particular parameter.
The callback can be removed using :py:func:`remove_attribute_listener`.
.. note::
The :py:func:`on_attribute` decorator performs the same operation as this method, but with
a more elegant syntax. Use ``add_attribute_listener`` only if you will need to remove
the observer.
The callback function is invoked only when the parameter changes.
The callback arguments are:
* ``self`` - the associated :py:class:`Parameters`.
* ``attr_name`` - the parameter name. This can be used to infer which parameter has triggered
if the same callback is used for watching multiple parameters.
* ``msg`` - the new parameter value (so you don't need to re-query the vehicle object).
The example below shows how to get callbacks for the ``THR_MIN`` parameter:
.. code:: python
#Callback function for the THR_MIN parameter
def thr_min_callback(self, attr_name, value):
print " PARAMETER CALLBACK: %s changed to: %s" % (attr_name, value)
#Add observer for the vehicle's THR_MIN parameter
vehicle.parameters.add_attribute_listener('THR_MIN', thr_min_callback)
See :ref:`vehicle_state_observing_parameters` for more information.
:param String attr_name: The name of the parameter to watch (or '*' to watch all parameters).
:param args: The callback to invoke when a change in the parameter is detected.
"""
attr_name = attr_name.upper()
return super(Parameters, self).add_attribute_listener(attr_name, *args, **kwargs) |
def windowed_run_count_ufunc(x, window):
"""Dask-parallel version of windowed_run_count_1d, ie the number of consecutive true values in
array for runs at least as long as given duration.
Parameters
----------
x : bool array
Input array
window : int
Minimum duration of consecutive run to accumulate values.
Returns
-------
out : func
A function operating along the time dimension of a dask-array.
"""
return xr.apply_ufunc(windowed_run_count_1d,
x,
input_core_dims=[['time'], ],
vectorize=True,
dask='parallelized',
output_dtypes=[np.int, ],
keep_attrs=True,
kwargs={'window': window}) | def function[windowed_run_count_ufunc, parameter[x, window]]:
constant[Dask-parallel version of windowed_run_count_1d, ie the number of consecutive true values in
array for runs at least as long as given duration.
Parameters
----------
x : bool array
Input array
window : int
Minimum duration of consecutive run to accumulate values.
Returns
-------
out : func
A function operating along the time dimension of a dask-array.
]
return[call[name[xr].apply_ufunc, parameter[name[windowed_run_count_1d], name[x]]]] | keyword[def] identifier[windowed_run_count_ufunc] ( identifier[x] , identifier[window] ):
literal[string]
keyword[return] identifier[xr] . identifier[apply_ufunc] ( identifier[windowed_run_count_1d] ,
identifier[x] ,
identifier[input_core_dims] =[[ literal[string] ],],
identifier[vectorize] = keyword[True] ,
identifier[dask] = literal[string] ,
identifier[output_dtypes] =[ identifier[np] . identifier[int] ,],
identifier[keep_attrs] = keyword[True] ,
identifier[kwargs] ={ literal[string] : identifier[window] }) | def windowed_run_count_ufunc(x, window):
"""Dask-parallel version of windowed_run_count_1d, ie the number of consecutive true values in
array for runs at least as long as given duration.
Parameters
----------
x : bool array
Input array
window : int
Minimum duration of consecutive run to accumulate values.
Returns
-------
out : func
A function operating along the time dimension of a dask-array.
"""
return xr.apply_ufunc(windowed_run_count_1d, x, input_core_dims=[['time']], vectorize=True, dask='parallelized', output_dtypes=[np.int], keep_attrs=True, kwargs={'window': window}) |
def download_apk(self, path='.'):
"""
Download Android .apk
@type path:
@param path:
"""
apk_fd, apk_fn = tempfile.mkstemp(prefix='fuzzfetch-', suffix='.apk')
os.close(apk_fd)
try:
_download_url(self.artifact_url('apk'), apk_fn)
shutil.copy(apk_fn, os.path.join(path, 'target.apk'))
finally:
os.unlink(apk_fn) | def function[download_apk, parameter[self, path]]:
constant[
Download Android .apk
@type path:
@param path:
]
<ast.Tuple object at 0x7da1b1969f00> assign[=] call[name[tempfile].mkstemp, parameter[]]
call[name[os].close, parameter[name[apk_fd]]]
<ast.Try object at 0x7da1b196aaa0> | keyword[def] identifier[download_apk] ( identifier[self] , identifier[path] = literal[string] ):
literal[string]
identifier[apk_fd] , identifier[apk_fn] = identifier[tempfile] . identifier[mkstemp] ( identifier[prefix] = literal[string] , identifier[suffix] = literal[string] )
identifier[os] . identifier[close] ( identifier[apk_fd] )
keyword[try] :
identifier[_download_url] ( identifier[self] . identifier[artifact_url] ( literal[string] ), identifier[apk_fn] )
identifier[shutil] . identifier[copy] ( identifier[apk_fn] , identifier[os] . identifier[path] . identifier[join] ( identifier[path] , literal[string] ))
keyword[finally] :
identifier[os] . identifier[unlink] ( identifier[apk_fn] ) | def download_apk(self, path='.'):
"""
Download Android .apk
@type path:
@param path:
"""
(apk_fd, apk_fn) = tempfile.mkstemp(prefix='fuzzfetch-', suffix='.apk')
os.close(apk_fd)
try:
_download_url(self.artifact_url('apk'), apk_fn)
shutil.copy(apk_fn, os.path.join(path, 'target.apk')) # depends on [control=['try'], data=[]]
finally:
os.unlink(apk_fn) |
def consumer_group(self, group, keys, consumer=None):
"""
Create a named :py:class:`ConsumerGroup` instance for the given key(s).
:param group: name of consumer group
:param keys: stream identifier(s) to monitor. May be a single stream
key, a list of stream keys, or a key-to-minimum id mapping. The
minimum id for each stream should be considered an exclusive
lower-bound. The '$' value can also be used to only read values
added *after* our command started blocking.
:param consumer: name for consumer within group
:returns: a :py:class:`ConsumerGroup` instance
"""
return ConsumerGroup(self, group, keys, consumer=consumer) | def function[consumer_group, parameter[self, group, keys, consumer]]:
constant[
Create a named :py:class:`ConsumerGroup` instance for the given key(s).
:param group: name of consumer group
:param keys: stream identifier(s) to monitor. May be a single stream
key, a list of stream keys, or a key-to-minimum id mapping. The
minimum id for each stream should be considered an exclusive
lower-bound. The '$' value can also be used to only read values
added *after* our command started blocking.
:param consumer: name for consumer within group
:returns: a :py:class:`ConsumerGroup` instance
]
return[call[name[ConsumerGroup], parameter[name[self], name[group], name[keys]]]] | keyword[def] identifier[consumer_group] ( identifier[self] , identifier[group] , identifier[keys] , identifier[consumer] = keyword[None] ):
literal[string]
keyword[return] identifier[ConsumerGroup] ( identifier[self] , identifier[group] , identifier[keys] , identifier[consumer] = identifier[consumer] ) | def consumer_group(self, group, keys, consumer=None):
"""
Create a named :py:class:`ConsumerGroup` instance for the given key(s).
:param group: name of consumer group
:param keys: stream identifier(s) to monitor. May be a single stream
key, a list of stream keys, or a key-to-minimum id mapping. The
minimum id for each stream should be considered an exclusive
lower-bound. The '$' value can also be used to only read values
added *after* our command started blocking.
:param consumer: name for consumer within group
:returns: a :py:class:`ConsumerGroup` instance
"""
return ConsumerGroup(self, group, keys, consumer=consumer) |
def drain(self, cycles=None):
"""Execute _drain while trapping KeyboardInterrupt"""
if not self._check_service_requirements():
self.init_timer.stop()
return self.finish()
if self.anybar: self.anybar.change("orange")
self.init_timer.stop()
log.info("Trapping CTRL+C and starting to drain.")
signal.signal(signal.SIGINT, self._handle_ctrl_c)
with ignored(KeyboardInterrupt):
return self._drain(cycles) | def function[drain, parameter[self, cycles]]:
constant[Execute _drain while trapping KeyboardInterrupt]
if <ast.UnaryOp object at 0x7da1b23870a0> begin[:]
call[name[self].init_timer.stop, parameter[]]
return[call[name[self].finish, parameter[]]]
if name[self].anybar begin[:]
call[name[self].anybar.change, parameter[constant[orange]]]
call[name[self].init_timer.stop, parameter[]]
call[name[log].info, parameter[constant[Trapping CTRL+C and starting to drain.]]]
call[name[signal].signal, parameter[name[signal].SIGINT, name[self]._handle_ctrl_c]]
with call[name[ignored], parameter[name[KeyboardInterrupt]]] begin[:]
return[call[name[self]._drain, parameter[name[cycles]]]] | keyword[def] identifier[drain] ( identifier[self] , identifier[cycles] = keyword[None] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[_check_service_requirements] ():
identifier[self] . identifier[init_timer] . identifier[stop] ()
keyword[return] identifier[self] . identifier[finish] ()
keyword[if] identifier[self] . identifier[anybar] : identifier[self] . identifier[anybar] . identifier[change] ( literal[string] )
identifier[self] . identifier[init_timer] . identifier[stop] ()
identifier[log] . identifier[info] ( literal[string] )
identifier[signal] . identifier[signal] ( identifier[signal] . identifier[SIGINT] , identifier[self] . identifier[_handle_ctrl_c] )
keyword[with] identifier[ignored] ( identifier[KeyboardInterrupt] ):
keyword[return] identifier[self] . identifier[_drain] ( identifier[cycles] ) | def drain(self, cycles=None):
"""Execute _drain while trapping KeyboardInterrupt"""
if not self._check_service_requirements():
self.init_timer.stop()
return self.finish() # depends on [control=['if'], data=[]]
if self.anybar:
self.anybar.change('orange') # depends on [control=['if'], data=[]]
self.init_timer.stop()
log.info('Trapping CTRL+C and starting to drain.')
signal.signal(signal.SIGINT, self._handle_ctrl_c)
with ignored(KeyboardInterrupt):
return self._drain(cycles) # depends on [control=['with'], data=[]] |
def window_riesz(N):
r"""Riesz tapering window
:param N: window length
.. math:: w(n) = 1 - \left| \frac{n}{N/2} \right|^2
with :math:`-N/2 \leq n \leq N/2`.
.. plot::
:width: 80%
:include-source:
from spectrum import window_visu
window_visu(64, 'riesz')
.. seealso:: :func:`create_window`, :class:`Window`
"""
n = linspace(-N/2., (N)/2., N)
w = 1 - abs(n/(N/2.))**2.
return w | def function[window_riesz, parameter[N]]:
constant[Riesz tapering window
:param N: window length
.. math:: w(n) = 1 - \left| \frac{n}{N/2} \right|^2
with :math:`-N/2 \leq n \leq N/2`.
.. plot::
:width: 80%
:include-source:
from spectrum import window_visu
window_visu(64, 'riesz')
.. seealso:: :func:`create_window`, :class:`Window`
]
variable[n] assign[=] call[name[linspace], parameter[binary_operation[<ast.UnaryOp object at 0x7da1b021c520> / constant[2.0]], binary_operation[name[N] / constant[2.0]], name[N]]]
variable[w] assign[=] binary_operation[constant[1] - binary_operation[call[name[abs], parameter[binary_operation[name[n] / binary_operation[name[N] / constant[2.0]]]]] ** constant[2.0]]]
return[name[w]] | keyword[def] identifier[window_riesz] ( identifier[N] ):
literal[string]
identifier[n] = identifier[linspace] (- identifier[N] / literal[int] ,( identifier[N] )/ literal[int] , identifier[N] )
identifier[w] = literal[int] - identifier[abs] ( identifier[n] /( identifier[N] / literal[int] ))** literal[int]
keyword[return] identifier[w] | def window_riesz(N):
"""Riesz tapering window
:param N: window length
.. math:: w(n) = 1 - \\left| \\frac{n}{N/2} \\right|^2
with :math:`-N/2 \\leq n \\leq N/2`.
.. plot::
:width: 80%
:include-source:
from spectrum import window_visu
window_visu(64, 'riesz')
.. seealso:: :func:`create_window`, :class:`Window`
"""
n = linspace(-N / 2.0, N / 2.0, N)
w = 1 - abs(n / (N / 2.0)) ** 2.0
return w |
def config(self, **kwargs):
"""
Configure resources of the widget.
To get the list of options for this widget, call the method :meth:`~Balloon.keys`.
See :meth:`~Balloon.__init__` for a description of the widget specific option.
"""
self.__headertext = kwargs.pop("headertext", self.__headertext)
self.__text = kwargs.pop("text", self.__text)
self.__width = kwargs.pop("width", self.__width)
self._timeout = kwargs.pop("timeout", self._timeout)
self.__background = kwargs.pop("background", self.__background)
if self._toplevel:
self._on_leave(None)
self.show()
ttk.Frame.config(self, **kwargs) | def function[config, parameter[self]]:
constant[
Configure resources of the widget.
To get the list of options for this widget, call the method :meth:`~Balloon.keys`.
See :meth:`~Balloon.__init__` for a description of the widget specific option.
]
name[self].__headertext assign[=] call[name[kwargs].pop, parameter[constant[headertext], name[self].__headertext]]
name[self].__text assign[=] call[name[kwargs].pop, parameter[constant[text], name[self].__text]]
name[self].__width assign[=] call[name[kwargs].pop, parameter[constant[width], name[self].__width]]
name[self]._timeout assign[=] call[name[kwargs].pop, parameter[constant[timeout], name[self]._timeout]]
name[self].__background assign[=] call[name[kwargs].pop, parameter[constant[background], name[self].__background]]
if name[self]._toplevel begin[:]
call[name[self]._on_leave, parameter[constant[None]]]
call[name[self].show, parameter[]]
call[name[ttk].Frame.config, parameter[name[self]]] | keyword[def] identifier[config] ( identifier[self] ,** identifier[kwargs] ):
literal[string]
identifier[self] . identifier[__headertext] = identifier[kwargs] . identifier[pop] ( literal[string] , identifier[self] . identifier[__headertext] )
identifier[self] . identifier[__text] = identifier[kwargs] . identifier[pop] ( literal[string] , identifier[self] . identifier[__text] )
identifier[self] . identifier[__width] = identifier[kwargs] . identifier[pop] ( literal[string] , identifier[self] . identifier[__width] )
identifier[self] . identifier[_timeout] = identifier[kwargs] . identifier[pop] ( literal[string] , identifier[self] . identifier[_timeout] )
identifier[self] . identifier[__background] = identifier[kwargs] . identifier[pop] ( literal[string] , identifier[self] . identifier[__background] )
keyword[if] identifier[self] . identifier[_toplevel] :
identifier[self] . identifier[_on_leave] ( keyword[None] )
identifier[self] . identifier[show] ()
identifier[ttk] . identifier[Frame] . identifier[config] ( identifier[self] ,** identifier[kwargs] ) | def config(self, **kwargs):
"""
Configure resources of the widget.
To get the list of options for this widget, call the method :meth:`~Balloon.keys`.
See :meth:`~Balloon.__init__` for a description of the widget specific option.
"""
self.__headertext = kwargs.pop('headertext', self.__headertext)
self.__text = kwargs.pop('text', self.__text)
self.__width = kwargs.pop('width', self.__width)
self._timeout = kwargs.pop('timeout', self._timeout)
self.__background = kwargs.pop('background', self.__background)
if self._toplevel:
self._on_leave(None)
self.show() # depends on [control=['if'], data=[]]
ttk.Frame.config(self, **kwargs) |
async def _async_forward(async_chunks: collections.AsyncIterable,
out: Optional[Union[TeeCapture, IO[str]]]
) -> Optional[str]:
"""Prints/captures output from the given asynchronous iterable.
Args:
async_chunks: An asynchronous source of bytes or str.
out: Where to put the chunks.
Returns:
The complete captured output, or else None if the out argument wasn't a
TeeCapture instance.
"""
capture = isinstance(out, TeeCapture)
out_pipe = out.out_pipe if isinstance(out, TeeCapture) else out
chunks = [] if capture else None # type: Optional[List[str]]
async for chunk in async_chunks:
if not isinstance(chunk, str):
chunk = chunk.decode()
if out_pipe:
print(chunk, file=out_pipe, end='')
if chunks is not None:
chunks.append(chunk)
return ''.join(chunks) if chunks is not None else None | <ast.AsyncFunctionDef object at 0x7da1b1c3cc10> | keyword[async] keyword[def] identifier[_async_forward] ( identifier[async_chunks] : identifier[collections] . identifier[AsyncIterable] ,
identifier[out] : identifier[Optional] [ identifier[Union] [ identifier[TeeCapture] , identifier[IO] [ identifier[str] ]]]
)-> identifier[Optional] [ identifier[str] ]:
literal[string]
identifier[capture] = identifier[isinstance] ( identifier[out] , identifier[TeeCapture] )
identifier[out_pipe] = identifier[out] . identifier[out_pipe] keyword[if] identifier[isinstance] ( identifier[out] , identifier[TeeCapture] ) keyword[else] identifier[out]
identifier[chunks] =[] keyword[if] identifier[capture] keyword[else] keyword[None]
keyword[async] keyword[for] identifier[chunk] keyword[in] identifier[async_chunks] :
keyword[if] keyword[not] identifier[isinstance] ( identifier[chunk] , identifier[str] ):
identifier[chunk] = identifier[chunk] . identifier[decode] ()
keyword[if] identifier[out_pipe] :
identifier[print] ( identifier[chunk] , identifier[file] = identifier[out_pipe] , identifier[end] = literal[string] )
keyword[if] identifier[chunks] keyword[is] keyword[not] keyword[None] :
identifier[chunks] . identifier[append] ( identifier[chunk] )
keyword[return] literal[string] . identifier[join] ( identifier[chunks] ) keyword[if] identifier[chunks] keyword[is] keyword[not] keyword[None] keyword[else] keyword[None] | async def _async_forward(async_chunks: collections.AsyncIterable, out: Optional[Union[TeeCapture, IO[str]]]) -> Optional[str]:
"""Prints/captures output from the given asynchronous iterable.
Args:
async_chunks: An asynchronous source of bytes or str.
out: Where to put the chunks.
Returns:
The complete captured output, or else None if the out argument wasn't a
TeeCapture instance.
"""
capture = isinstance(out, TeeCapture)
out_pipe = out.out_pipe if isinstance(out, TeeCapture) else out
chunks = [] if capture else None # type: Optional[List[str]]
async for chunk in async_chunks:
if not isinstance(chunk, str):
chunk = chunk.decode() # depends on [control=['if'], data=[]]
if out_pipe:
print(chunk, file=out_pipe, end='') # depends on [control=['if'], data=[]]
if chunks is not None:
chunks.append(chunk) # depends on [control=['if'], data=['chunks']]
return ''.join(chunks) if chunks is not None else None |
def get_help_usage(command):
# type: (str) -> None
"""Print out a help message and exit the program.
Args:
command: If a command value is supplied then print the help message for
the command module if available. If the command is '-a' or '--all',
then print the standard help message but with a full list of
available commands.
Raises:
ValueError: Raised if the help message is requested for an invalid
command or an unrecognized option is passed to help.
"""
if not command:
doc = get_primary_command_usage()
elif command in ('-a', '--all'):
subcommands = [k for k in settings.subcommands if k is not None]
available_commands = subcommands + ['help']
command_doc = '\nAvailable commands:\n{}\n'.format(
'\n'.join(' {}'.format(c) for c in sorted(available_commands)))
doc = get_primary_command_usage(command_doc)
elif command.startswith('-'):
raise ValueError("Unrecognized option '{}'.".format(command))
elif command in settings.subcommands:
subcommand = settings.subcommands[command]
doc = format_usage(subcommand.__doc__)
docopt.docopt(doc, argv=('--help',)) | def function[get_help_usage, parameter[command]]:
constant[Print out a help message and exit the program.
Args:
command: If a command value is supplied then print the help message for
the command module if available. If the command is '-a' or '--all',
then print the standard help message but with a full list of
available commands.
Raises:
ValueError: Raised if the help message is requested for an invalid
command or an unrecognized option is passed to help.
]
if <ast.UnaryOp object at 0x7da1b28ac8e0> begin[:]
variable[doc] assign[=] call[name[get_primary_command_usage], parameter[]]
call[name[docopt].docopt, parameter[name[doc]]] | keyword[def] identifier[get_help_usage] ( identifier[command] ):
literal[string]
keyword[if] keyword[not] identifier[command] :
identifier[doc] = identifier[get_primary_command_usage] ()
keyword[elif] identifier[command] keyword[in] ( literal[string] , literal[string] ):
identifier[subcommands] =[ identifier[k] keyword[for] identifier[k] keyword[in] identifier[settings] . identifier[subcommands] keyword[if] identifier[k] keyword[is] keyword[not] keyword[None] ]
identifier[available_commands] = identifier[subcommands] +[ literal[string] ]
identifier[command_doc] = literal[string] . identifier[format] (
literal[string] . identifier[join] ( literal[string] . identifier[format] ( identifier[c] ) keyword[for] identifier[c] keyword[in] identifier[sorted] ( identifier[available_commands] )))
identifier[doc] = identifier[get_primary_command_usage] ( identifier[command_doc] )
keyword[elif] identifier[command] . identifier[startswith] ( literal[string] ):
keyword[raise] identifier[ValueError] ( literal[string] . identifier[format] ( identifier[command] ))
keyword[elif] identifier[command] keyword[in] identifier[settings] . identifier[subcommands] :
identifier[subcommand] = identifier[settings] . identifier[subcommands] [ identifier[command] ]
identifier[doc] = identifier[format_usage] ( identifier[subcommand] . identifier[__doc__] )
identifier[docopt] . identifier[docopt] ( identifier[doc] , identifier[argv] =( literal[string] ,)) | def get_help_usage(command):
# type: (str) -> None
"Print out a help message and exit the program.\n\n Args:\n command: If a command value is supplied then print the help message for\n the command module if available. If the command is '-a' or '--all',\n then print the standard help message but with a full list of\n available commands.\n\n Raises:\n ValueError: Raised if the help message is requested for an invalid\n command or an unrecognized option is passed to help.\n "
if not command:
doc = get_primary_command_usage() # depends on [control=['if'], data=[]]
elif command in ('-a', '--all'):
subcommands = [k for k in settings.subcommands if k is not None]
available_commands = subcommands + ['help']
command_doc = '\nAvailable commands:\n{}\n'.format('\n'.join((' {}'.format(c) for c in sorted(available_commands))))
doc = get_primary_command_usage(command_doc) # depends on [control=['if'], data=[]]
elif command.startswith('-'):
raise ValueError("Unrecognized option '{}'.".format(command)) # depends on [control=['if'], data=[]]
elif command in settings.subcommands:
subcommand = settings.subcommands[command]
doc = format_usage(subcommand.__doc__) # depends on [control=['if'], data=['command']]
docopt.docopt(doc, argv=('--help',)) |
def results(self, times='all', t_precision=12, **kwargs):
r"""
Fetches the calculated quantity from the algorithm and returns it as
an array.
Parameters
----------
times : scalar or list
Time steps to be returned. The default value is 'all' which results
in returning all time steps. If a scalar is given, only the
corresponding time step is returned. If a range is given
(e.g., 'range(0, 1, 1e-3)'), time steps in this range are returned.
t_precision : integer
The time precision (number of decimal places). Default value is 12.
Notes
-----
The keyword steps is interpreted in the same way as times.
"""
if 'steps' in kwargs.keys():
times = kwargs['steps']
t_pre = t_precision
quantity = self.settings['quantity']
q = [k for k in list(self.keys()) if quantity in k]
if times == 'all':
t = q
elif type(times) in [float, int]:
n = int(-dc(str(round(times, t_pre))).as_tuple().exponent *
(round(times, t_pre) != int(times)))
t_str = (str(int(round(times, t_pre)*10**n)) +
('e-'+str(n))*(n != 0))
t = [k for k in q if t_str == k.split('@')[-1]]
elif 'range' in times:
t = times.replace(' ', '')
t = t[6:-1]
t = t.split(',')
out = np.arange(float(t[0]), float(t[1]), float(t[2]))
out = np.append(out, float(t[1]))
out = np.unique(out)
out = np.around(out, decimals=t_pre)
t = []
for i in out:
n = int(-dc(str(round(i, t_pre))).as_tuple().exponent *
(round(i, t_pre) != int(i)))
j = (str(int(round(i, t_pre)*10**n))+('e-'+str(n))*(n != 0))
t_str = [k for k in q if j == k.split('@')[-1]]
t += (t_str)
d = {k: self[k] for k in t}
return d | def function[results, parameter[self, times, t_precision]]:
constant[
Fetches the calculated quantity from the algorithm and returns it as
an array.
Parameters
----------
times : scalar or list
Time steps to be returned. The default value is 'all' which results
in returning all time steps. If a scalar is given, only the
corresponding time step is returned. If a range is given
(e.g., 'range(0, 1, 1e-3)'), time steps in this range are returned.
t_precision : integer
The time precision (number of decimal places). Default value is 12.
Notes
-----
The keyword steps is interpreted in the same way as times.
]
if compare[constant[steps] in call[name[kwargs].keys, parameter[]]] begin[:]
variable[times] assign[=] call[name[kwargs]][constant[steps]]
variable[t_pre] assign[=] name[t_precision]
variable[quantity] assign[=] call[name[self].settings][constant[quantity]]
variable[q] assign[=] <ast.ListComp object at 0x7da18f58ebf0>
if compare[name[times] equal[==] constant[all]] begin[:]
variable[t] assign[=] name[q]
variable[d] assign[=] <ast.DictComp object at 0x7da18eb571c0>
return[name[d]] | keyword[def] identifier[results] ( identifier[self] , identifier[times] = literal[string] , identifier[t_precision] = literal[int] ,** identifier[kwargs] ):
literal[string]
keyword[if] literal[string] keyword[in] identifier[kwargs] . identifier[keys] ():
identifier[times] = identifier[kwargs] [ literal[string] ]
identifier[t_pre] = identifier[t_precision]
identifier[quantity] = identifier[self] . identifier[settings] [ literal[string] ]
identifier[q] =[ identifier[k] keyword[for] identifier[k] keyword[in] identifier[list] ( identifier[self] . identifier[keys] ()) keyword[if] identifier[quantity] keyword[in] identifier[k] ]
keyword[if] identifier[times] == literal[string] :
identifier[t] = identifier[q]
keyword[elif] identifier[type] ( identifier[times] ) keyword[in] [ identifier[float] , identifier[int] ]:
identifier[n] = identifier[int] (- identifier[dc] ( identifier[str] ( identifier[round] ( identifier[times] , identifier[t_pre] ))). identifier[as_tuple] (). identifier[exponent] *
( identifier[round] ( identifier[times] , identifier[t_pre] )!= identifier[int] ( identifier[times] )))
identifier[t_str] =( identifier[str] ( identifier[int] ( identifier[round] ( identifier[times] , identifier[t_pre] )* literal[int] ** identifier[n] ))+
( literal[string] + identifier[str] ( identifier[n] ))*( identifier[n] != literal[int] ))
identifier[t] =[ identifier[k] keyword[for] identifier[k] keyword[in] identifier[q] keyword[if] identifier[t_str] == identifier[k] . identifier[split] ( literal[string] )[- literal[int] ]]
keyword[elif] literal[string] keyword[in] identifier[times] :
identifier[t] = identifier[times] . identifier[replace] ( literal[string] , literal[string] )
identifier[t] = identifier[t] [ literal[int] :- literal[int] ]
identifier[t] = identifier[t] . identifier[split] ( literal[string] )
identifier[out] = identifier[np] . identifier[arange] ( identifier[float] ( identifier[t] [ literal[int] ]), identifier[float] ( identifier[t] [ literal[int] ]), identifier[float] ( identifier[t] [ literal[int] ]))
identifier[out] = identifier[np] . identifier[append] ( identifier[out] , identifier[float] ( identifier[t] [ literal[int] ]))
identifier[out] = identifier[np] . identifier[unique] ( identifier[out] )
identifier[out] = identifier[np] . identifier[around] ( identifier[out] , identifier[decimals] = identifier[t_pre] )
identifier[t] =[]
keyword[for] identifier[i] keyword[in] identifier[out] :
identifier[n] = identifier[int] (- identifier[dc] ( identifier[str] ( identifier[round] ( identifier[i] , identifier[t_pre] ))). identifier[as_tuple] (). identifier[exponent] *
( identifier[round] ( identifier[i] , identifier[t_pre] )!= identifier[int] ( identifier[i] )))
identifier[j] =( identifier[str] ( identifier[int] ( identifier[round] ( identifier[i] , identifier[t_pre] )* literal[int] ** identifier[n] ))+( literal[string] + identifier[str] ( identifier[n] ))*( identifier[n] != literal[int] ))
identifier[t_str] =[ identifier[k] keyword[for] identifier[k] keyword[in] identifier[q] keyword[if] identifier[j] == identifier[k] . identifier[split] ( literal[string] )[- literal[int] ]]
identifier[t] +=( identifier[t_str] )
identifier[d] ={ identifier[k] : identifier[self] [ identifier[k] ] keyword[for] identifier[k] keyword[in] identifier[t] }
keyword[return] identifier[d] | def results(self, times='all', t_precision=12, **kwargs):
"""
Fetches the calculated quantity from the algorithm and returns it as
an array.
Parameters
----------
times : scalar or list
Time steps to be returned. The default value is 'all' which results
in returning all time steps. If a scalar is given, only the
corresponding time step is returned. If a range is given
(e.g., 'range(0, 1, 1e-3)'), time steps in this range are returned.
t_precision : integer
The time precision (number of decimal places). Default value is 12.
Notes
-----
The keyword steps is interpreted in the same way as times.
"""
if 'steps' in kwargs.keys():
times = kwargs['steps'] # depends on [control=['if'], data=[]]
t_pre = t_precision
quantity = self.settings['quantity']
q = [k for k in list(self.keys()) if quantity in k]
if times == 'all':
t = q # depends on [control=['if'], data=[]]
elif type(times) in [float, int]:
n = int(-dc(str(round(times, t_pre))).as_tuple().exponent * (round(times, t_pre) != int(times)))
t_str = str(int(round(times, t_pre) * 10 ** n)) + ('e-' + str(n)) * (n != 0)
t = [k for k in q if t_str == k.split('@')[-1]] # depends on [control=['if'], data=[]]
elif 'range' in times:
t = times.replace(' ', '')
t = t[6:-1]
t = t.split(',')
out = np.arange(float(t[0]), float(t[1]), float(t[2]))
out = np.append(out, float(t[1]))
out = np.unique(out)
out = np.around(out, decimals=t_pre)
t = []
for i in out:
n = int(-dc(str(round(i, t_pre))).as_tuple().exponent * (round(i, t_pre) != int(i)))
j = str(int(round(i, t_pre) * 10 ** n)) + ('e-' + str(n)) * (n != 0)
t_str = [k for k in q if j == k.split('@')[-1]]
t += t_str # depends on [control=['for'], data=['i']] # depends on [control=['if'], data=['times']]
d = {k: self[k] for k in t}
return d |
def _dev_api(cls):
"""Get a developer instance for GitHub API access."""
gh = github3.GitHub()
gh.set_client_id(cls.remote.consumer_key, cls.remote.consumer_secret)
return gh | def function[_dev_api, parameter[cls]]:
constant[Get a developer instance for GitHub API access.]
variable[gh] assign[=] call[name[github3].GitHub, parameter[]]
call[name[gh].set_client_id, parameter[name[cls].remote.consumer_key, name[cls].remote.consumer_secret]]
return[name[gh]] | keyword[def] identifier[_dev_api] ( identifier[cls] ):
literal[string]
identifier[gh] = identifier[github3] . identifier[GitHub] ()
identifier[gh] . identifier[set_client_id] ( identifier[cls] . identifier[remote] . identifier[consumer_key] , identifier[cls] . identifier[remote] . identifier[consumer_secret] )
keyword[return] identifier[gh] | def _dev_api(cls):
"""Get a developer instance for GitHub API access."""
gh = github3.GitHub()
gh.set_client_id(cls.remote.consumer_key, cls.remote.consumer_secret)
return gh |
def search(self, text, lookup=None):
'''Returns a new :class:`Query` for :attr:`Manager.model` with
a full text search value.'''
return self.query().search(text, lookup=lookup) | def function[search, parameter[self, text, lookup]]:
constant[Returns a new :class:`Query` for :attr:`Manager.model` with
a full text search value.]
return[call[call[name[self].query, parameter[]].search, parameter[name[text]]]] | keyword[def] identifier[search] ( identifier[self] , identifier[text] , identifier[lookup] = keyword[None] ):
literal[string]
keyword[return] identifier[self] . identifier[query] (). identifier[search] ( identifier[text] , identifier[lookup] = identifier[lookup] ) | def search(self, text, lookup=None):
"""Returns a new :class:`Query` for :attr:`Manager.model` with
a full text search value."""
return self.query().search(text, lookup=lookup) |
def get_securitygroup(vm_):
'''
Return the security group
'''
sgs = list_securitygroup()
securitygroup = config.get_cloud_config_value(
'securitygroup', vm_, __opts__, search_global=False
)
if not securitygroup:
raise SaltCloudNotFound('No securitygroup ID specified for this VM.')
if securitygroup and six.text_type(securitygroup) in sgs:
return sgs[securitygroup]['SecurityGroupId']
raise SaltCloudNotFound(
'The specified security group, \'{0}\', could not be found.'.format(
securitygroup)
) | def function[get_securitygroup, parameter[vm_]]:
constant[
Return the security group
]
variable[sgs] assign[=] call[name[list_securitygroup], parameter[]]
variable[securitygroup] assign[=] call[name[config].get_cloud_config_value, parameter[constant[securitygroup], name[vm_], name[__opts__]]]
if <ast.UnaryOp object at 0x7da18c4cd900> begin[:]
<ast.Raise object at 0x7da18c4cc4f0>
if <ast.BoolOp object at 0x7da18c4cc400> begin[:]
return[call[call[name[sgs]][name[securitygroup]]][constant[SecurityGroupId]]]
<ast.Raise object at 0x7da18c4cc070> | keyword[def] identifier[get_securitygroup] ( identifier[vm_] ):
literal[string]
identifier[sgs] = identifier[list_securitygroup] ()
identifier[securitygroup] = identifier[config] . identifier[get_cloud_config_value] (
literal[string] , identifier[vm_] , identifier[__opts__] , identifier[search_global] = keyword[False]
)
keyword[if] keyword[not] identifier[securitygroup] :
keyword[raise] identifier[SaltCloudNotFound] ( literal[string] )
keyword[if] identifier[securitygroup] keyword[and] identifier[six] . identifier[text_type] ( identifier[securitygroup] ) keyword[in] identifier[sgs] :
keyword[return] identifier[sgs] [ identifier[securitygroup] ][ literal[string] ]
keyword[raise] identifier[SaltCloudNotFound] (
literal[string] . identifier[format] (
identifier[securitygroup] )
) | def get_securitygroup(vm_):
"""
Return the security group
"""
sgs = list_securitygroup()
securitygroup = config.get_cloud_config_value('securitygroup', vm_, __opts__, search_global=False)
if not securitygroup:
raise SaltCloudNotFound('No securitygroup ID specified for this VM.') # depends on [control=['if'], data=[]]
if securitygroup and six.text_type(securitygroup) in sgs:
return sgs[securitygroup]['SecurityGroupId'] # depends on [control=['if'], data=[]]
raise SaltCloudNotFound("The specified security group, '{0}', could not be found.".format(securitygroup)) |
def find(ellipsname, crstype, strict=False):
"""
Search for a ellipsoid name located in this module.
Arguments:
- **ellipsname**: The ellipsoid name to search for.
- **crstype**: Which CRS naming convention to search (different
CRS formats have different names for the same ellipsoid).
- **strict** (optional): If False, ignores minor name mismatches
such as underscore or character casing, otherwise must be exact
match (defaults to False).
"""
if not strict:
ellipsname = ellipsname.lower().replace(" ","_")
for itemname,item in globals().items():
if itemname.startswith("_") or itemname == 'Ellipsoid':
continue
try:
if hasattr(item.name, crstype):
itemname = getattr(item.name, crstype)
if not strict:
itemname = itemname.lower().replace(" ","_")
if ellipsname == itemname:
return item
except:
pass
else:
return None | def function[find, parameter[ellipsname, crstype, strict]]:
constant[
Search for a ellipsoid name located in this module.
Arguments:
- **ellipsname**: The ellipsoid name to search for.
- **crstype**: Which CRS naming convention to search (different
CRS formats have different names for the same ellipsoid).
- **strict** (optional): If False, ignores minor name mismatches
such as underscore or character casing, otherwise must be exact
match (defaults to False).
]
if <ast.UnaryOp object at 0x7da20c991e40> begin[:]
variable[ellipsname] assign[=] call[call[name[ellipsname].lower, parameter[]].replace, parameter[constant[ ], constant[_]]]
for taget[tuple[[<ast.Name object at 0x7da20c9901f0>, <ast.Name object at 0x7da20c993160>]]] in starred[call[call[name[globals], parameter[]].items, parameter[]]] begin[:]
if <ast.BoolOp object at 0x7da20c9902b0> begin[:]
continue
<ast.Try object at 0x7da18c4ce140> | keyword[def] identifier[find] ( identifier[ellipsname] , identifier[crstype] , identifier[strict] = keyword[False] ):
literal[string]
keyword[if] keyword[not] identifier[strict] :
identifier[ellipsname] = identifier[ellipsname] . identifier[lower] (). identifier[replace] ( literal[string] , literal[string] )
keyword[for] identifier[itemname] , identifier[item] keyword[in] identifier[globals] (). identifier[items] ():
keyword[if] identifier[itemname] . identifier[startswith] ( literal[string] ) keyword[or] identifier[itemname] == literal[string] :
keyword[continue]
keyword[try] :
keyword[if] identifier[hasattr] ( identifier[item] . identifier[name] , identifier[crstype] ):
identifier[itemname] = identifier[getattr] ( identifier[item] . identifier[name] , identifier[crstype] )
keyword[if] keyword[not] identifier[strict] :
identifier[itemname] = identifier[itemname] . identifier[lower] (). identifier[replace] ( literal[string] , literal[string] )
keyword[if] identifier[ellipsname] == identifier[itemname] :
keyword[return] identifier[item]
keyword[except] :
keyword[pass]
keyword[else] :
keyword[return] keyword[None] | def find(ellipsname, crstype, strict=False):
"""
Search for a ellipsoid name located in this module.
Arguments:
- **ellipsname**: The ellipsoid name to search for.
- **crstype**: Which CRS naming convention to search (different
CRS formats have different names for the same ellipsoid).
- **strict** (optional): If False, ignores minor name mismatches
such as underscore or character casing, otherwise must be exact
match (defaults to False).
"""
if not strict:
ellipsname = ellipsname.lower().replace(' ', '_') # depends on [control=['if'], data=[]]
for (itemname, item) in globals().items():
if itemname.startswith('_') or itemname == 'Ellipsoid':
continue # depends on [control=['if'], data=[]]
try:
if hasattr(item.name, crstype):
itemname = getattr(item.name, crstype)
if not strict:
itemname = itemname.lower().replace(' ', '_') # depends on [control=['if'], data=[]]
if ellipsname == itemname:
return item # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]]
except:
pass # depends on [control=['except'], data=[]] # depends on [control=['for'], data=[]]
else:
return None |
def open_dialog(self, verbose=False):
"""
The command line dialog provides a field to enter commands and view
results. It also provides the help command to display namespaces,
commands, and arguments.
:param verbose: print more
"""
response=api(url=self.__url+"/open dialog", verbose=verbose)
return response | def function[open_dialog, parameter[self, verbose]]:
constant[
The command line dialog provides a field to enter commands and view
results. It also provides the help command to display namespaces,
commands, and arguments.
:param verbose: print more
]
variable[response] assign[=] call[name[api], parameter[]]
return[name[response]] | keyword[def] identifier[open_dialog] ( identifier[self] , identifier[verbose] = keyword[False] ):
literal[string]
identifier[response] = identifier[api] ( identifier[url] = identifier[self] . identifier[__url] + literal[string] , identifier[verbose] = identifier[verbose] )
keyword[return] identifier[response] | def open_dialog(self, verbose=False):
"""
The command line dialog provides a field to enter commands and view
results. It also provides the help command to display namespaces,
commands, and arguments.
:param verbose: print more
"""
response = api(url=self.__url + '/open dialog', verbose=verbose)
return response |
def init_app(self, app):
'''
Configures this extension with the given app. This registers an
``teardown_appcontext`` call, and attaches this ``LDAP3LoginManager``
to it as ``app.ldap3_login_manager``.
Args:
app (flask.Flask): The flask app to initialise with
'''
app.ldap3_login_manager = self
servers = list(self._server_pool)
for s in servers:
self._server_pool.remove(s)
self.init_config(app.config)
if hasattr(app, 'teardown_appcontext'):
app.teardown_appcontext(self.teardown)
else: # pragma: no cover
app.teardown_request(self.teardown)
self.app = app | def function[init_app, parameter[self, app]]:
constant[
Configures this extension with the given app. This registers an
``teardown_appcontext`` call, and attaches this ``LDAP3LoginManager``
to it as ``app.ldap3_login_manager``.
Args:
app (flask.Flask): The flask app to initialise with
]
name[app].ldap3_login_manager assign[=] name[self]
variable[servers] assign[=] call[name[list], parameter[name[self]._server_pool]]
for taget[name[s]] in starred[name[servers]] begin[:]
call[name[self]._server_pool.remove, parameter[name[s]]]
call[name[self].init_config, parameter[name[app].config]]
if call[name[hasattr], parameter[name[app], constant[teardown_appcontext]]] begin[:]
call[name[app].teardown_appcontext, parameter[name[self].teardown]]
name[self].app assign[=] name[app] | keyword[def] identifier[init_app] ( identifier[self] , identifier[app] ):
literal[string]
identifier[app] . identifier[ldap3_login_manager] = identifier[self]
identifier[servers] = identifier[list] ( identifier[self] . identifier[_server_pool] )
keyword[for] identifier[s] keyword[in] identifier[servers] :
identifier[self] . identifier[_server_pool] . identifier[remove] ( identifier[s] )
identifier[self] . identifier[init_config] ( identifier[app] . identifier[config] )
keyword[if] identifier[hasattr] ( identifier[app] , literal[string] ):
identifier[app] . identifier[teardown_appcontext] ( identifier[self] . identifier[teardown] )
keyword[else] :
identifier[app] . identifier[teardown_request] ( identifier[self] . identifier[teardown] )
identifier[self] . identifier[app] = identifier[app] | def init_app(self, app):
"""
Configures this extension with the given app. This registers an
``teardown_appcontext`` call, and attaches this ``LDAP3LoginManager``
to it as ``app.ldap3_login_manager``.
Args:
app (flask.Flask): The flask app to initialise with
"""
app.ldap3_login_manager = self
servers = list(self._server_pool)
for s in servers:
self._server_pool.remove(s) # depends on [control=['for'], data=['s']]
self.init_config(app.config)
if hasattr(app, 'teardown_appcontext'):
app.teardown_appcontext(self.teardown) # depends on [control=['if'], data=[]]
else: # pragma: no cover
app.teardown_request(self.teardown)
self.app = app |
def update(self, data):
"""Add data to running digest, increasing the accumulators for 0-8
triplets formed by this char and the previous 0-3 chars."""
for character in data:
if PY3:
ch = character
else:
ch = ord(character)
self.count += 1
# incr accumulators for triplets
if self.lastch[1] > -1:
self.acc[self.tran3(ch, self.lastch[0], self.lastch[1], 0)] +=1
if self.lastch[2] > -1:
self.acc[self.tran3(ch, self.lastch[0], self.lastch[2], 1)] +=1
self.acc[self.tran3(ch, self.lastch[1], self.lastch[2], 2)] +=1
if self.lastch[3] > -1:
self.acc[self.tran3(ch, self.lastch[0], self.lastch[3], 3)] +=1
self.acc[self.tran3(ch, self.lastch[1], self.lastch[3], 4)] +=1
self.acc[self.tran3(ch, self.lastch[2], self.lastch[3], 5)] +=1
self.acc[self.tran3(self.lastch[3], self.lastch[0], ch, 6)] +=1
self.acc[self.tran3(self.lastch[3], self.lastch[2], ch, 7)] +=1
# adjust last seen chars
self.lastch = [ch] + self.lastch[:3] | def function[update, parameter[self, data]]:
constant[Add data to running digest, increasing the accumulators for 0-8
triplets formed by this char and the previous 0-3 chars.]
for taget[name[character]] in starred[name[data]] begin[:]
if name[PY3] begin[:]
variable[ch] assign[=] name[character]
<ast.AugAssign object at 0x7da1b10a7b50>
if compare[call[name[self].lastch][constant[1]] greater[>] <ast.UnaryOp object at 0x7da1b10a6200>] begin[:]
<ast.AugAssign object at 0x7da1b10a6290>
if compare[call[name[self].lastch][constant[2]] greater[>] <ast.UnaryOp object at 0x7da1b10a5150>] begin[:]
<ast.AugAssign object at 0x7da1b10a6920>
<ast.AugAssign object at 0x7da1b0fb1ff0>
if compare[call[name[self].lastch][constant[3]] greater[>] <ast.UnaryOp object at 0x7da1b0fb1150>] begin[:]
<ast.AugAssign object at 0x7da1b0fb3ca0>
<ast.AugAssign object at 0x7da1b0fb2200>
<ast.AugAssign object at 0x7da1b0f2e740>
<ast.AugAssign object at 0x7da1b0f2e620>
<ast.AugAssign object at 0x7da1b0f2f820>
name[self].lastch assign[=] binary_operation[list[[<ast.Name object at 0x7da1b0fb2f20>]] + call[name[self].lastch][<ast.Slice object at 0x7da1b0fb2ef0>]] | keyword[def] identifier[update] ( identifier[self] , identifier[data] ):
literal[string]
keyword[for] identifier[character] keyword[in] identifier[data] :
keyword[if] identifier[PY3] :
identifier[ch] = identifier[character]
keyword[else] :
identifier[ch] = identifier[ord] ( identifier[character] )
identifier[self] . identifier[count] += literal[int]
keyword[if] identifier[self] . identifier[lastch] [ literal[int] ]>- literal[int] :
identifier[self] . identifier[acc] [ identifier[self] . identifier[tran3] ( identifier[ch] , identifier[self] . identifier[lastch] [ literal[int] ], identifier[self] . identifier[lastch] [ literal[int] ], literal[int] )]+= literal[int]
keyword[if] identifier[self] . identifier[lastch] [ literal[int] ]>- literal[int] :
identifier[self] . identifier[acc] [ identifier[self] . identifier[tran3] ( identifier[ch] , identifier[self] . identifier[lastch] [ literal[int] ], identifier[self] . identifier[lastch] [ literal[int] ], literal[int] )]+= literal[int]
identifier[self] . identifier[acc] [ identifier[self] . identifier[tran3] ( identifier[ch] , identifier[self] . identifier[lastch] [ literal[int] ], identifier[self] . identifier[lastch] [ literal[int] ], literal[int] )]+= literal[int]
keyword[if] identifier[self] . identifier[lastch] [ literal[int] ]>- literal[int] :
identifier[self] . identifier[acc] [ identifier[self] . identifier[tran3] ( identifier[ch] , identifier[self] . identifier[lastch] [ literal[int] ], identifier[self] . identifier[lastch] [ literal[int] ], literal[int] )]+= literal[int]
identifier[self] . identifier[acc] [ identifier[self] . identifier[tran3] ( identifier[ch] , identifier[self] . identifier[lastch] [ literal[int] ], identifier[self] . identifier[lastch] [ literal[int] ], literal[int] )]+= literal[int]
identifier[self] . identifier[acc] [ identifier[self] . identifier[tran3] ( identifier[ch] , identifier[self] . identifier[lastch] [ literal[int] ], identifier[self] . identifier[lastch] [ literal[int] ], literal[int] )]+= literal[int]
identifier[self] . identifier[acc] [ identifier[self] . identifier[tran3] ( identifier[self] . identifier[lastch] [ literal[int] ], identifier[self] . identifier[lastch] [ literal[int] ], identifier[ch] , literal[int] )]+= literal[int]
identifier[self] . identifier[acc] [ identifier[self] . identifier[tran3] ( identifier[self] . identifier[lastch] [ literal[int] ], identifier[self] . identifier[lastch] [ literal[int] ], identifier[ch] , literal[int] )]+= literal[int]
identifier[self] . identifier[lastch] =[ identifier[ch] ]+ identifier[self] . identifier[lastch] [: literal[int] ] | def update(self, data):
"""Add data to running digest, increasing the accumulators for 0-8
triplets formed by this char and the previous 0-3 chars."""
for character in data:
if PY3:
ch = character # depends on [control=['if'], data=[]]
else:
ch = ord(character)
self.count += 1
# incr accumulators for triplets
if self.lastch[1] > -1:
self.acc[self.tran3(ch, self.lastch[0], self.lastch[1], 0)] += 1 # depends on [control=['if'], data=[]]
if self.lastch[2] > -1:
self.acc[self.tran3(ch, self.lastch[0], self.lastch[2], 1)] += 1
self.acc[self.tran3(ch, self.lastch[1], self.lastch[2], 2)] += 1 # depends on [control=['if'], data=[]]
if self.lastch[3] > -1:
self.acc[self.tran3(ch, self.lastch[0], self.lastch[3], 3)] += 1
self.acc[self.tran3(ch, self.lastch[1], self.lastch[3], 4)] += 1
self.acc[self.tran3(ch, self.lastch[2], self.lastch[3], 5)] += 1
self.acc[self.tran3(self.lastch[3], self.lastch[0], ch, 6)] += 1
self.acc[self.tran3(self.lastch[3], self.lastch[2], ch, 7)] += 1 # depends on [control=['if'], data=[]]
# adjust last seen chars
self.lastch = [ch] + self.lastch[:3] # depends on [control=['for'], data=['character']] |
def generator(name):
""" Return generator by its name
:param name: name of hash-generator
:return: WHashGeneratorProto class
"""
name = name.upper()
if name not in WHash.__hash_map__.keys():
raise ValueError('Hash generator "%s" not available' % name)
return WHash.__hash_map__[name] | def function[generator, parameter[name]]:
constant[ Return generator by its name
:param name: name of hash-generator
:return: WHashGeneratorProto class
]
variable[name] assign[=] call[name[name].upper, parameter[]]
if compare[name[name] <ast.NotIn object at 0x7da2590d7190> call[name[WHash].__hash_map__.keys, parameter[]]] begin[:]
<ast.Raise object at 0x7da20e9551e0>
return[call[name[WHash].__hash_map__][name[name]]] | keyword[def] identifier[generator] ( identifier[name] ):
literal[string]
identifier[name] = identifier[name] . identifier[upper] ()
keyword[if] identifier[name] keyword[not] keyword[in] identifier[WHash] . identifier[__hash_map__] . identifier[keys] ():
keyword[raise] identifier[ValueError] ( literal[string] % identifier[name] )
keyword[return] identifier[WHash] . identifier[__hash_map__] [ identifier[name] ] | def generator(name):
""" Return generator by its name
:param name: name of hash-generator
:return: WHashGeneratorProto class
"""
name = name.upper()
if name not in WHash.__hash_map__.keys():
raise ValueError('Hash generator "%s" not available' % name) # depends on [control=['if'], data=['name']]
return WHash.__hash_map__[name] |
def stats(path, fmt, nocolor, timezones, utc, noprogress, most_common, resolve, length):
"""Analysis of Facebook chat history."""
with colorize_output(nocolor):
try:
chat_history = _process_history(
path=path, thread='', timezones=timezones,
utc=utc, noprogress=noprogress, resolve=resolve)
except ProcessingFailure:
return
statistics = ChatHistoryStatistics(
chat_history, most_common=None if most_common < 0 else most_common)
if fmt == 'text':
statistics.write_text(sys.stdout, -1 if length < 0 else length)
elif fmt == 'json':
statistics.write_json(sys.stdout)
elif fmt == 'pretty-json':
statistics.write_json(sys.stdout, pretty=True)
elif fmt == 'yaml':
statistics.write_yaml(sys.stdout) | def function[stats, parameter[path, fmt, nocolor, timezones, utc, noprogress, most_common, resolve, length]]:
constant[Analysis of Facebook chat history.]
with call[name[colorize_output], parameter[name[nocolor]]] begin[:]
<ast.Try object at 0x7da18ede5f60>
variable[statistics] assign[=] call[name[ChatHistoryStatistics], parameter[name[chat_history]]]
if compare[name[fmt] equal[==] constant[text]] begin[:]
call[name[statistics].write_text, parameter[name[sys].stdout, <ast.IfExp object at 0x7da18ede44c0>]] | keyword[def] identifier[stats] ( identifier[path] , identifier[fmt] , identifier[nocolor] , identifier[timezones] , identifier[utc] , identifier[noprogress] , identifier[most_common] , identifier[resolve] , identifier[length] ):
literal[string]
keyword[with] identifier[colorize_output] ( identifier[nocolor] ):
keyword[try] :
identifier[chat_history] = identifier[_process_history] (
identifier[path] = identifier[path] , identifier[thread] = literal[string] , identifier[timezones] = identifier[timezones] ,
identifier[utc] = identifier[utc] , identifier[noprogress] = identifier[noprogress] , identifier[resolve] = identifier[resolve] )
keyword[except] identifier[ProcessingFailure] :
keyword[return]
identifier[statistics] = identifier[ChatHistoryStatistics] (
identifier[chat_history] , identifier[most_common] = keyword[None] keyword[if] identifier[most_common] < literal[int] keyword[else] identifier[most_common] )
keyword[if] identifier[fmt] == literal[string] :
identifier[statistics] . identifier[write_text] ( identifier[sys] . identifier[stdout] ,- literal[int] keyword[if] identifier[length] < literal[int] keyword[else] identifier[length] )
keyword[elif] identifier[fmt] == literal[string] :
identifier[statistics] . identifier[write_json] ( identifier[sys] . identifier[stdout] )
keyword[elif] identifier[fmt] == literal[string] :
identifier[statistics] . identifier[write_json] ( identifier[sys] . identifier[stdout] , identifier[pretty] = keyword[True] )
keyword[elif] identifier[fmt] == literal[string] :
identifier[statistics] . identifier[write_yaml] ( identifier[sys] . identifier[stdout] ) | def stats(path, fmt, nocolor, timezones, utc, noprogress, most_common, resolve, length):
"""Analysis of Facebook chat history."""
with colorize_output(nocolor):
try:
chat_history = _process_history(path=path, thread='', timezones=timezones, utc=utc, noprogress=noprogress, resolve=resolve) # depends on [control=['try'], data=[]]
except ProcessingFailure:
return # depends on [control=['except'], data=[]]
statistics = ChatHistoryStatistics(chat_history, most_common=None if most_common < 0 else most_common)
if fmt == 'text':
statistics.write_text(sys.stdout, -1 if length < 0 else length) # depends on [control=['if'], data=[]]
elif fmt == 'json':
statistics.write_json(sys.stdout) # depends on [control=['if'], data=[]]
elif fmt == 'pretty-json':
statistics.write_json(sys.stdout, pretty=True) # depends on [control=['if'], data=[]]
elif fmt == 'yaml':
statistics.write_yaml(sys.stdout) # depends on [control=['if'], data=[]] # depends on [control=['with'], data=[]] |
def get_rows_to_keep(gctoo, rid=None, row_bool=None, ridx=None, exclude_rid=None):
""" Figure out based on the possible row inputs which rows to keep.
Args:
gctoo (GCToo object):
rid (list of strings):
row_bool (boolean array):
ridx (list of integers):
exclude_rid (list of strings):
Returns:
rows_to_keep (list of strings): row ids to be kept
"""
# Use rid if provided
if rid is not None:
assert type(rid) == list, "rid must be a list. rid: {}".format(rid)
rows_to_keep = [gctoo_row for gctoo_row in gctoo.data_df.index if gctoo_row in rid]
# Tell user if some rids not found
num_missing_rids = len(rid) - len(rows_to_keep)
if num_missing_rids != 0:
logger.info("{} rids were not found in the GCT.".format(num_missing_rids))
# Use row_bool if provided
elif row_bool is not None:
assert len(row_bool) == gctoo.data_df.shape[0], (
"row_bool must have length equal to gctoo.data_df.shape[0]. " +
"len(row_bool): {}, gctoo.data_df.shape[0]: {}".format(
len(row_bool), gctoo.data_df.shape[0]))
rows_to_keep = gctoo.data_df.index[row_bool].values
# Use ridx if provided
elif ridx is not None:
assert type(ridx[0]) is int, (
"ridx must be a list of integers. ridx[0]: {}, " +
"type(ridx[0]): {}").format(ridx[0], type(ridx[0]))
assert max(ridx) <= gctoo.data_df.shape[0], (
"ridx contains an integer larger than the number of rows in " +
"the GCToo. max(ridx): {}, gctoo.data_df.shape[0]: {}").format(
max(ridx), gctoo.data_df.shape[0])
rows_to_keep = gctoo.data_df.index[ridx].values
# If rid, row_bool, and ridx are all None, return all rows
else:
rows_to_keep = gctoo.data_df.index.values
# Use exclude_rid if provided
if exclude_rid is not None:
# Keep only those rows that are not in exclude_rid
rows_to_keep = [row_to_keep for row_to_keep in rows_to_keep if row_to_keep not in exclude_rid]
return rows_to_keep | def function[get_rows_to_keep, parameter[gctoo, rid, row_bool, ridx, exclude_rid]]:
constant[ Figure out based on the possible row inputs which rows to keep.
Args:
gctoo (GCToo object):
rid (list of strings):
row_bool (boolean array):
ridx (list of integers):
exclude_rid (list of strings):
Returns:
rows_to_keep (list of strings): row ids to be kept
]
if compare[name[rid] is_not constant[None]] begin[:]
assert[compare[call[name[type], parameter[name[rid]]] equal[==] name[list]]]
variable[rows_to_keep] assign[=] <ast.ListComp object at 0x7da20e9579a0>
variable[num_missing_rids] assign[=] binary_operation[call[name[len], parameter[name[rid]]] - call[name[len], parameter[name[rows_to_keep]]]]
if compare[name[num_missing_rids] not_equal[!=] constant[0]] begin[:]
call[name[logger].info, parameter[call[constant[{} rids were not found in the GCT.].format, parameter[name[num_missing_rids]]]]]
if compare[name[exclude_rid] is_not constant[None]] begin[:]
variable[rows_to_keep] assign[=] <ast.ListComp object at 0x7da20c76df00>
return[name[rows_to_keep]] | keyword[def] identifier[get_rows_to_keep] ( identifier[gctoo] , identifier[rid] = keyword[None] , identifier[row_bool] = keyword[None] , identifier[ridx] = keyword[None] , identifier[exclude_rid] = keyword[None] ):
literal[string]
keyword[if] identifier[rid] keyword[is] keyword[not] keyword[None] :
keyword[assert] identifier[type] ( identifier[rid] )== identifier[list] , literal[string] . identifier[format] ( identifier[rid] )
identifier[rows_to_keep] =[ identifier[gctoo_row] keyword[for] identifier[gctoo_row] keyword[in] identifier[gctoo] . identifier[data_df] . identifier[index] keyword[if] identifier[gctoo_row] keyword[in] identifier[rid] ]
identifier[num_missing_rids] = identifier[len] ( identifier[rid] )- identifier[len] ( identifier[rows_to_keep] )
keyword[if] identifier[num_missing_rids] != literal[int] :
identifier[logger] . identifier[info] ( literal[string] . identifier[format] ( identifier[num_missing_rids] ))
keyword[elif] identifier[row_bool] keyword[is] keyword[not] keyword[None] :
keyword[assert] identifier[len] ( identifier[row_bool] )== identifier[gctoo] . identifier[data_df] . identifier[shape] [ literal[int] ],(
literal[string] +
literal[string] . identifier[format] (
identifier[len] ( identifier[row_bool] ), identifier[gctoo] . identifier[data_df] . identifier[shape] [ literal[int] ]))
identifier[rows_to_keep] = identifier[gctoo] . identifier[data_df] . identifier[index] [ identifier[row_bool] ]. identifier[values]
keyword[elif] identifier[ridx] keyword[is] keyword[not] keyword[None] :
keyword[assert] identifier[type] ( identifier[ridx] [ literal[int] ]) keyword[is] identifier[int] ,(
literal[string] +
literal[string] ). identifier[format] ( identifier[ridx] [ literal[int] ], identifier[type] ( identifier[ridx] [ literal[int] ]))
keyword[assert] identifier[max] ( identifier[ridx] )<= identifier[gctoo] . identifier[data_df] . identifier[shape] [ literal[int] ],(
literal[string] +
literal[string] ). identifier[format] (
identifier[max] ( identifier[ridx] ), identifier[gctoo] . identifier[data_df] . identifier[shape] [ literal[int] ])
identifier[rows_to_keep] = identifier[gctoo] . identifier[data_df] . identifier[index] [ identifier[ridx] ]. identifier[values]
keyword[else] :
identifier[rows_to_keep] = identifier[gctoo] . identifier[data_df] . identifier[index] . identifier[values]
keyword[if] identifier[exclude_rid] keyword[is] keyword[not] keyword[None] :
identifier[rows_to_keep] =[ identifier[row_to_keep] keyword[for] identifier[row_to_keep] keyword[in] identifier[rows_to_keep] keyword[if] identifier[row_to_keep] keyword[not] keyword[in] identifier[exclude_rid] ]
keyword[return] identifier[rows_to_keep] | def get_rows_to_keep(gctoo, rid=None, row_bool=None, ridx=None, exclude_rid=None):
""" Figure out based on the possible row inputs which rows to keep.
Args:
gctoo (GCToo object):
rid (list of strings):
row_bool (boolean array):
ridx (list of integers):
exclude_rid (list of strings):
Returns:
rows_to_keep (list of strings): row ids to be kept
"""
# Use rid if provided
if rid is not None:
assert type(rid) == list, 'rid must be a list. rid: {}'.format(rid)
rows_to_keep = [gctoo_row for gctoo_row in gctoo.data_df.index if gctoo_row in rid]
# Tell user if some rids not found
num_missing_rids = len(rid) - len(rows_to_keep)
if num_missing_rids != 0:
logger.info('{} rids were not found in the GCT.'.format(num_missing_rids)) # depends on [control=['if'], data=['num_missing_rids']] # depends on [control=['if'], data=['rid']]
# Use row_bool if provided
elif row_bool is not None:
assert len(row_bool) == gctoo.data_df.shape[0], 'row_bool must have length equal to gctoo.data_df.shape[0]. ' + 'len(row_bool): {}, gctoo.data_df.shape[0]: {}'.format(len(row_bool), gctoo.data_df.shape[0])
rows_to_keep = gctoo.data_df.index[row_bool].values # depends on [control=['if'], data=['row_bool']]
# Use ridx if provided
elif ridx is not None:
assert type(ridx[0]) is int, ('ridx must be a list of integers. ridx[0]: {}, ' + 'type(ridx[0]): {}').format(ridx[0], type(ridx[0]))
assert max(ridx) <= gctoo.data_df.shape[0], ('ridx contains an integer larger than the number of rows in ' + 'the GCToo. max(ridx): {}, gctoo.data_df.shape[0]: {}').format(max(ridx), gctoo.data_df.shape[0])
rows_to_keep = gctoo.data_df.index[ridx].values # depends on [control=['if'], data=['ridx']]
else:
# If rid, row_bool, and ridx are all None, return all rows
rows_to_keep = gctoo.data_df.index.values
# Use exclude_rid if provided
if exclude_rid is not None:
# Keep only those rows that are not in exclude_rid
rows_to_keep = [row_to_keep for row_to_keep in rows_to_keep if row_to_keep not in exclude_rid] # depends on [control=['if'], data=['exclude_rid']]
return rows_to_keep |
def maybe_convert_indices(indices, n):
"""
Attempt to convert indices into valid, positive indices.
If we have negative indices, translate to positive here.
If we have indices that are out-of-bounds, raise an IndexError.
Parameters
----------
indices : array-like
The array of indices that we are to convert.
n : int
The number of elements in the array that we are indexing.
Returns
-------
valid_indices : array-like
An array-like of positive indices that correspond to the ones
that were passed in initially to this function.
Raises
------
IndexError : one of the converted indices either exceeded the number
of elements (specified by `n`) OR was still negative.
"""
if isinstance(indices, list):
indices = np.array(indices)
if len(indices) == 0:
# If list is empty, np.array will return float and cause indexing
# errors.
return np.empty(0, dtype=np.intp)
mask = indices < 0
if mask.any():
indices = indices.copy()
indices[mask] += n
mask = (indices >= n) | (indices < 0)
if mask.any():
raise IndexError("indices are out-of-bounds")
return indices | def function[maybe_convert_indices, parameter[indices, n]]:
constant[
Attempt to convert indices into valid, positive indices.
If we have negative indices, translate to positive here.
If we have indices that are out-of-bounds, raise an IndexError.
Parameters
----------
indices : array-like
The array of indices that we are to convert.
n : int
The number of elements in the array that we are indexing.
Returns
-------
valid_indices : array-like
An array-like of positive indices that correspond to the ones
that were passed in initially to this function.
Raises
------
IndexError : one of the converted indices either exceeded the number
of elements (specified by `n`) OR was still negative.
]
if call[name[isinstance], parameter[name[indices], name[list]]] begin[:]
variable[indices] assign[=] call[name[np].array, parameter[name[indices]]]
if compare[call[name[len], parameter[name[indices]]] equal[==] constant[0]] begin[:]
return[call[name[np].empty, parameter[constant[0]]]]
variable[mask] assign[=] compare[name[indices] less[<] constant[0]]
if call[name[mask].any, parameter[]] begin[:]
variable[indices] assign[=] call[name[indices].copy, parameter[]]
<ast.AugAssign object at 0x7da207f01ed0>
variable[mask] assign[=] binary_operation[compare[name[indices] greater_or_equal[>=] name[n]] <ast.BitOr object at 0x7da2590d6aa0> compare[name[indices] less[<] constant[0]]]
if call[name[mask].any, parameter[]] begin[:]
<ast.Raise object at 0x7da207f038b0>
return[name[indices]] | keyword[def] identifier[maybe_convert_indices] ( identifier[indices] , identifier[n] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[indices] , identifier[list] ):
identifier[indices] = identifier[np] . identifier[array] ( identifier[indices] )
keyword[if] identifier[len] ( identifier[indices] )== literal[int] :
keyword[return] identifier[np] . identifier[empty] ( literal[int] , identifier[dtype] = identifier[np] . identifier[intp] )
identifier[mask] = identifier[indices] < literal[int]
keyword[if] identifier[mask] . identifier[any] ():
identifier[indices] = identifier[indices] . identifier[copy] ()
identifier[indices] [ identifier[mask] ]+= identifier[n]
identifier[mask] =( identifier[indices] >= identifier[n] )|( identifier[indices] < literal[int] )
keyword[if] identifier[mask] . identifier[any] ():
keyword[raise] identifier[IndexError] ( literal[string] )
keyword[return] identifier[indices] | def maybe_convert_indices(indices, n):
"""
Attempt to convert indices into valid, positive indices.
If we have negative indices, translate to positive here.
If we have indices that are out-of-bounds, raise an IndexError.
Parameters
----------
indices : array-like
The array of indices that we are to convert.
n : int
The number of elements in the array that we are indexing.
Returns
-------
valid_indices : array-like
An array-like of positive indices that correspond to the ones
that were passed in initially to this function.
Raises
------
IndexError : one of the converted indices either exceeded the number
of elements (specified by `n`) OR was still negative.
"""
if isinstance(indices, list):
indices = np.array(indices)
if len(indices) == 0:
# If list is empty, np.array will return float and cause indexing
# errors.
return np.empty(0, dtype=np.intp) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
mask = indices < 0
if mask.any():
indices = indices.copy()
indices[mask] += n # depends on [control=['if'], data=[]]
mask = (indices >= n) | (indices < 0)
if mask.any():
raise IndexError('indices are out-of-bounds') # depends on [control=['if'], data=[]]
return indices |
def setup_cluster(self, cluster, extra_args=tuple()):
"""
Configure the cluster by running an Ansible playbook.
The ElastiCluster configuration attribute `<kind>_groups`
determines, for each node kind, what Ansible groups nodes of
that kind are assigned to.
:param cluster: cluster to configure
:type cluster: :py:class:`elasticluster.cluster.Cluster`
:param list extra_args:
List of additional command-line arguments
that are appended to each invocation of the setup program.
:return: ``True`` on success, ``False`` otherwise. Please note, if nothing
has to be configured, then ``True`` is returned.
:raises: `ConfigurationError` if the playbook can not be found
or is corrupt.
"""
return self._run_playbook(cluster, self._playbook_path, extra_args) | def function[setup_cluster, parameter[self, cluster, extra_args]]:
constant[
Configure the cluster by running an Ansible playbook.
The ElastiCluster configuration attribute `<kind>_groups`
determines, for each node kind, what Ansible groups nodes of
that kind are assigned to.
:param cluster: cluster to configure
:type cluster: :py:class:`elasticluster.cluster.Cluster`
:param list extra_args:
List of additional command-line arguments
that are appended to each invocation of the setup program.
:return: ``True`` on success, ``False`` otherwise. Please note, if nothing
has to be configured, then ``True`` is returned.
:raises: `ConfigurationError` if the playbook can not be found
or is corrupt.
]
return[call[name[self]._run_playbook, parameter[name[cluster], name[self]._playbook_path, name[extra_args]]]] | keyword[def] identifier[setup_cluster] ( identifier[self] , identifier[cluster] , identifier[extra_args] = identifier[tuple] ()):
literal[string]
keyword[return] identifier[self] . identifier[_run_playbook] ( identifier[cluster] , identifier[self] . identifier[_playbook_path] , identifier[extra_args] ) | def setup_cluster(self, cluster, extra_args=tuple()):
"""
Configure the cluster by running an Ansible playbook.
The ElastiCluster configuration attribute `<kind>_groups`
determines, for each node kind, what Ansible groups nodes of
that kind are assigned to.
:param cluster: cluster to configure
:type cluster: :py:class:`elasticluster.cluster.Cluster`
:param list extra_args:
List of additional command-line arguments
that are appended to each invocation of the setup program.
:return: ``True`` on success, ``False`` otherwise. Please note, if nothing
has to be configured, then ``True`` is returned.
:raises: `ConfigurationError` if the playbook can not be found
or is corrupt.
"""
return self._run_playbook(cluster, self._playbook_path, extra_args) |
def mpi_weighted_mean(comm, local_name2valcount):
"""
Perform a weighted average over dicts that are each on a different node
Input: local_name2valcount: dict mapping key -> (value, count)
Returns: key -> mean
"""
all_name2valcount = comm.gather(local_name2valcount)
if comm.rank == 0:
name2sum = defaultdict(float)
name2count = defaultdict(float)
for n2vc in all_name2valcount:
for (name, (val, count)) in n2vc.items():
try:
val = float(val)
except ValueError:
if comm.rank == 0:
warnings.warn('WARNING: tried to compute mean on non-float {}={}'.format(name, val))
else:
name2sum[name] += val * count
name2count[name] += count
return {name : name2sum[name] / name2count[name] for name in name2sum}
else:
return {} | def function[mpi_weighted_mean, parameter[comm, local_name2valcount]]:
constant[
Perform a weighted average over dicts that are each on a different node
Input: local_name2valcount: dict mapping key -> (value, count)
Returns: key -> mean
]
variable[all_name2valcount] assign[=] call[name[comm].gather, parameter[name[local_name2valcount]]]
if compare[name[comm].rank equal[==] constant[0]] begin[:]
variable[name2sum] assign[=] call[name[defaultdict], parameter[name[float]]]
variable[name2count] assign[=] call[name[defaultdict], parameter[name[float]]]
for taget[name[n2vc]] in starred[name[all_name2valcount]] begin[:]
for taget[tuple[[<ast.Name object at 0x7da18c4ccc10>, <ast.Tuple object at 0x7da18c4cc640>]]] in starred[call[name[n2vc].items, parameter[]]] begin[:]
<ast.Try object at 0x7da2045660b0>
return[<ast.DictComp object at 0x7da204567c70>] | keyword[def] identifier[mpi_weighted_mean] ( identifier[comm] , identifier[local_name2valcount] ):
literal[string]
identifier[all_name2valcount] = identifier[comm] . identifier[gather] ( identifier[local_name2valcount] )
keyword[if] identifier[comm] . identifier[rank] == literal[int] :
identifier[name2sum] = identifier[defaultdict] ( identifier[float] )
identifier[name2count] = identifier[defaultdict] ( identifier[float] )
keyword[for] identifier[n2vc] keyword[in] identifier[all_name2valcount] :
keyword[for] ( identifier[name] ,( identifier[val] , identifier[count] )) keyword[in] identifier[n2vc] . identifier[items] ():
keyword[try] :
identifier[val] = identifier[float] ( identifier[val] )
keyword[except] identifier[ValueError] :
keyword[if] identifier[comm] . identifier[rank] == literal[int] :
identifier[warnings] . identifier[warn] ( literal[string] . identifier[format] ( identifier[name] , identifier[val] ))
keyword[else] :
identifier[name2sum] [ identifier[name] ]+= identifier[val] * identifier[count]
identifier[name2count] [ identifier[name] ]+= identifier[count]
keyword[return] { identifier[name] : identifier[name2sum] [ identifier[name] ]/ identifier[name2count] [ identifier[name] ] keyword[for] identifier[name] keyword[in] identifier[name2sum] }
keyword[else] :
keyword[return] {} | def mpi_weighted_mean(comm, local_name2valcount):
"""
Perform a weighted average over dicts that are each on a different node
Input: local_name2valcount: dict mapping key -> (value, count)
Returns: key -> mean
"""
all_name2valcount = comm.gather(local_name2valcount)
if comm.rank == 0:
name2sum = defaultdict(float)
name2count = defaultdict(float)
for n2vc in all_name2valcount:
for (name, (val, count)) in n2vc.items():
try:
val = float(val) # depends on [control=['try'], data=[]]
except ValueError:
if comm.rank == 0:
warnings.warn('WARNING: tried to compute mean on non-float {}={}'.format(name, val)) # depends on [control=['if'], data=[]] # depends on [control=['except'], data=[]]
else:
name2sum[name] += val * count
name2count[name] += count # depends on [control=['for'], data=[]] # depends on [control=['for'], data=['n2vc']]
return {name: name2sum[name] / name2count[name] for name in name2sum} # depends on [control=['if'], data=[]]
else:
return {} |
def _get_item_position(self, idx):
"""Return a tuple of (start, end) indices of an item from its index."""
start = 0 if idx == 0 else self._index[idx - 1] + 1
end = self._index[idx]
return start, end | def function[_get_item_position, parameter[self, idx]]:
constant[Return a tuple of (start, end) indices of an item from its index.]
variable[start] assign[=] <ast.IfExp object at 0x7da1b0c89360>
variable[end] assign[=] call[name[self]._index][name[idx]]
return[tuple[[<ast.Name object at 0x7da1b0c8bee0>, <ast.Name object at 0x7da1b0c8b790>]]] | keyword[def] identifier[_get_item_position] ( identifier[self] , identifier[idx] ):
literal[string]
identifier[start] = literal[int] keyword[if] identifier[idx] == literal[int] keyword[else] identifier[self] . identifier[_index] [ identifier[idx] - literal[int] ]+ literal[int]
identifier[end] = identifier[self] . identifier[_index] [ identifier[idx] ]
keyword[return] identifier[start] , identifier[end] | def _get_item_position(self, idx):
"""Return a tuple of (start, end) indices of an item from its index."""
start = 0 if idx == 0 else self._index[idx - 1] + 1
end = self._index[idx]
return (start, end) |
def from_file(filename, srcLang, destLang, serverEndpoint=ServerEndpoint):
'''
Traslates the content of source file to destination language
:param filename: file whose contents needs translation
:param srcLang: name of language of input file
:param destLang: name of language of desired language
:param serverEndpoint: Tika server end point (Optional)
:return: translated content
'''
jsonOutput = doTranslate1(srcLang+':'+destLang, filename, serverEndpoint)
return jsonOutput[1] | def function[from_file, parameter[filename, srcLang, destLang, serverEndpoint]]:
constant[
Traslates the content of source file to destination language
:param filename: file whose contents needs translation
:param srcLang: name of language of input file
:param destLang: name of language of desired language
:param serverEndpoint: Tika server end point (Optional)
:return: translated content
]
variable[jsonOutput] assign[=] call[name[doTranslate1], parameter[binary_operation[binary_operation[name[srcLang] + constant[:]] + name[destLang]], name[filename], name[serverEndpoint]]]
return[call[name[jsonOutput]][constant[1]]] | keyword[def] identifier[from_file] ( identifier[filename] , identifier[srcLang] , identifier[destLang] , identifier[serverEndpoint] = identifier[ServerEndpoint] ):
literal[string]
identifier[jsonOutput] = identifier[doTranslate1] ( identifier[srcLang] + literal[string] + identifier[destLang] , identifier[filename] , identifier[serverEndpoint] )
keyword[return] identifier[jsonOutput] [ literal[int] ] | def from_file(filename, srcLang, destLang, serverEndpoint=ServerEndpoint):
"""
Traslates the content of source file to destination language
:param filename: file whose contents needs translation
:param srcLang: name of language of input file
:param destLang: name of language of desired language
:param serverEndpoint: Tika server end point (Optional)
:return: translated content
"""
jsonOutput = doTranslate1(srcLang + ':' + destLang, filename, serverEndpoint)
return jsonOutput[1] |
def _biotype_lookup_fn(gtf):
"""
return a function that will look up the biotype of a feature
this checks for either gene_biotype or biotype being set or for the source
column to have biotype information
"""
db = get_gtf_db(gtf)
sources = set([feature.source for feature in db.all_features()])
gene_biotypes = set([feature.attributes.get("gene_biotype", [None])[0]
for feature in db.all_features()])
biotypes = set([feature.attributes.get("biotype", [None])[0]
for feature in db.all_features()])
if "protein_coding" in sources:
return lambda feature: feature.source
elif "protein_coding" in biotypes:
return lambda feature: feature.attributes.get("biotype", [None])[0]
elif "protein_coding" in gene_biotypes:
return lambda feature: feature.attributes.get("gene_biotype", [None])[0]
else:
return None | def function[_biotype_lookup_fn, parameter[gtf]]:
constant[
return a function that will look up the biotype of a feature
this checks for either gene_biotype or biotype being set or for the source
column to have biotype information
]
variable[db] assign[=] call[name[get_gtf_db], parameter[name[gtf]]]
variable[sources] assign[=] call[name[set], parameter[<ast.ListComp object at 0x7da1b1845060>]]
variable[gene_biotypes] assign[=] call[name[set], parameter[<ast.ListComp object at 0x7da1b1845900>]]
variable[biotypes] assign[=] call[name[set], parameter[<ast.ListComp object at 0x7da1b18465c0>]]
if compare[constant[protein_coding] in name[sources]] begin[:]
return[<ast.Lambda object at 0x7da1b18454e0>] | keyword[def] identifier[_biotype_lookup_fn] ( identifier[gtf] ):
literal[string]
identifier[db] = identifier[get_gtf_db] ( identifier[gtf] )
identifier[sources] = identifier[set] ([ identifier[feature] . identifier[source] keyword[for] identifier[feature] keyword[in] identifier[db] . identifier[all_features] ()])
identifier[gene_biotypes] = identifier[set] ([ identifier[feature] . identifier[attributes] . identifier[get] ( literal[string] ,[ keyword[None] ])[ literal[int] ]
keyword[for] identifier[feature] keyword[in] identifier[db] . identifier[all_features] ()])
identifier[biotypes] = identifier[set] ([ identifier[feature] . identifier[attributes] . identifier[get] ( literal[string] ,[ keyword[None] ])[ literal[int] ]
keyword[for] identifier[feature] keyword[in] identifier[db] . identifier[all_features] ()])
keyword[if] literal[string] keyword[in] identifier[sources] :
keyword[return] keyword[lambda] identifier[feature] : identifier[feature] . identifier[source]
keyword[elif] literal[string] keyword[in] identifier[biotypes] :
keyword[return] keyword[lambda] identifier[feature] : identifier[feature] . identifier[attributes] . identifier[get] ( literal[string] ,[ keyword[None] ])[ literal[int] ]
keyword[elif] literal[string] keyword[in] identifier[gene_biotypes] :
keyword[return] keyword[lambda] identifier[feature] : identifier[feature] . identifier[attributes] . identifier[get] ( literal[string] ,[ keyword[None] ])[ literal[int] ]
keyword[else] :
keyword[return] keyword[None] | def _biotype_lookup_fn(gtf):
"""
return a function that will look up the biotype of a feature
this checks for either gene_biotype or biotype being set or for the source
column to have biotype information
"""
db = get_gtf_db(gtf)
sources = set([feature.source for feature in db.all_features()])
gene_biotypes = set([feature.attributes.get('gene_biotype', [None])[0] for feature in db.all_features()])
biotypes = set([feature.attributes.get('biotype', [None])[0] for feature in db.all_features()])
if 'protein_coding' in sources:
return lambda feature: feature.source # depends on [control=['if'], data=[]]
elif 'protein_coding' in biotypes:
return lambda feature: feature.attributes.get('biotype', [None])[0] # depends on [control=['if'], data=[]]
elif 'protein_coding' in gene_biotypes:
return lambda feature: feature.attributes.get('gene_biotype', [None])[0] # depends on [control=['if'], data=[]]
else:
return None |
def add_arguments(self, parser):
"""Adds the information commands to the parser.
Args:
self (InfoCommand): the ``InfoCommand`` instance
parser (argparse.ArgumentParser): the parser to add the arguments to
Returns:
``None``
"""
parser.add_argument('-p', '--product', action='store_true',
help='print the production information')
parser.add_argument('-j', '--jtag', action='store_true',
help='print the JTAG pin status')
return self.add_common_arguments(parser, False) | def function[add_arguments, parameter[self, parser]]:
constant[Adds the information commands to the parser.
Args:
self (InfoCommand): the ``InfoCommand`` instance
parser (argparse.ArgumentParser): the parser to add the arguments to
Returns:
``None``
]
call[name[parser].add_argument, parameter[constant[-p], constant[--product]]]
call[name[parser].add_argument, parameter[constant[-j], constant[--jtag]]]
return[call[name[self].add_common_arguments, parameter[name[parser], constant[False]]]] | keyword[def] identifier[add_arguments] ( identifier[self] , identifier[parser] ):
literal[string]
identifier[parser] . identifier[add_argument] ( literal[string] , literal[string] , identifier[action] = literal[string] ,
identifier[help] = literal[string] )
identifier[parser] . identifier[add_argument] ( literal[string] , literal[string] , identifier[action] = literal[string] ,
identifier[help] = literal[string] )
keyword[return] identifier[self] . identifier[add_common_arguments] ( identifier[parser] , keyword[False] ) | def add_arguments(self, parser):
"""Adds the information commands to the parser.
Args:
self (InfoCommand): the ``InfoCommand`` instance
parser (argparse.ArgumentParser): the parser to add the arguments to
Returns:
``None``
"""
parser.add_argument('-p', '--product', action='store_true', help='print the production information')
parser.add_argument('-j', '--jtag', action='store_true', help='print the JTAG pin status')
return self.add_common_arguments(parser, False) |
def from_optimize_result(cls, result, n, m, index=None):
"""Construct a Projection from the output of an optimization.
Args:
result (:py:class:`scipy.optimize.OptimizeResult`): Object
returned by :py:func:`scipy.optimize.minimize`.
n (`int`): Number of dimensions.
m (`int`): Number of samples.
index (`list-like`): Names of samples. (Optional).
Returns:
:py:class:`pymds.Projection`
"""
coords = pd.DataFrame(result.x.reshape((m, n)), index=index)
projection = cls(coords)
projection.stress = result.fun
return projection | def function[from_optimize_result, parameter[cls, result, n, m, index]]:
constant[Construct a Projection from the output of an optimization.
Args:
result (:py:class:`scipy.optimize.OptimizeResult`): Object
returned by :py:func:`scipy.optimize.minimize`.
n (`int`): Number of dimensions.
m (`int`): Number of samples.
index (`list-like`): Names of samples. (Optional).
Returns:
:py:class:`pymds.Projection`
]
variable[coords] assign[=] call[name[pd].DataFrame, parameter[call[name[result].x.reshape, parameter[tuple[[<ast.Name object at 0x7da1b0a2d540>, <ast.Name object at 0x7da1b0a2f6d0>]]]]]]
variable[projection] assign[=] call[name[cls], parameter[name[coords]]]
name[projection].stress assign[=] name[result].fun
return[name[projection]] | keyword[def] identifier[from_optimize_result] ( identifier[cls] , identifier[result] , identifier[n] , identifier[m] , identifier[index] = keyword[None] ):
literal[string]
identifier[coords] = identifier[pd] . identifier[DataFrame] ( identifier[result] . identifier[x] . identifier[reshape] (( identifier[m] , identifier[n] )), identifier[index] = identifier[index] )
identifier[projection] = identifier[cls] ( identifier[coords] )
identifier[projection] . identifier[stress] = identifier[result] . identifier[fun]
keyword[return] identifier[projection] | def from_optimize_result(cls, result, n, m, index=None):
"""Construct a Projection from the output of an optimization.
Args:
result (:py:class:`scipy.optimize.OptimizeResult`): Object
returned by :py:func:`scipy.optimize.minimize`.
n (`int`): Number of dimensions.
m (`int`): Number of samples.
index (`list-like`): Names of samples. (Optional).
Returns:
:py:class:`pymds.Projection`
"""
coords = pd.DataFrame(result.x.reshape((m, n)), index=index)
projection = cls(coords)
projection.stress = result.fun
return projection |
def fix_schema(prefix, schema):
"""
Create an Elasticsearch field name from a schema string
"""
schema_dict = extract_schema(schema)
snake_case_organization = schema_dict['vendor'].replace('.', '_').lower()
snake_case_name = re.sub('([^A-Z_])([A-Z])', '\g<1>_\g<2>', schema_dict['name']).lower()
model = schema_dict['version'].split('-')[0]
return "{}_{}_{}_{}".format(prefix, snake_case_organization, snake_case_name, model) | def function[fix_schema, parameter[prefix, schema]]:
constant[
Create an Elasticsearch field name from a schema string
]
variable[schema_dict] assign[=] call[name[extract_schema], parameter[name[schema]]]
variable[snake_case_organization] assign[=] call[call[call[name[schema_dict]][constant[vendor]].replace, parameter[constant[.], constant[_]]].lower, parameter[]]
variable[snake_case_name] assign[=] call[call[name[re].sub, parameter[constant[([^A-Z_])([A-Z])], constant[\g<1>_\g<2>], call[name[schema_dict]][constant[name]]]].lower, parameter[]]
variable[model] assign[=] call[call[call[name[schema_dict]][constant[version]].split, parameter[constant[-]]]][constant[0]]
return[call[constant[{}_{}_{}_{}].format, parameter[name[prefix], name[snake_case_organization], name[snake_case_name], name[model]]]] | keyword[def] identifier[fix_schema] ( identifier[prefix] , identifier[schema] ):
literal[string]
identifier[schema_dict] = identifier[extract_schema] ( identifier[schema] )
identifier[snake_case_organization] = identifier[schema_dict] [ literal[string] ]. identifier[replace] ( literal[string] , literal[string] ). identifier[lower] ()
identifier[snake_case_name] = identifier[re] . identifier[sub] ( literal[string] , literal[string] , identifier[schema_dict] [ literal[string] ]). identifier[lower] ()
identifier[model] = identifier[schema_dict] [ literal[string] ]. identifier[split] ( literal[string] )[ literal[int] ]
keyword[return] literal[string] . identifier[format] ( identifier[prefix] , identifier[snake_case_organization] , identifier[snake_case_name] , identifier[model] ) | def fix_schema(prefix, schema):
"""
Create an Elasticsearch field name from a schema string
"""
schema_dict = extract_schema(schema)
snake_case_organization = schema_dict['vendor'].replace('.', '_').lower()
snake_case_name = re.sub('([^A-Z_])([A-Z])', '\\g<1>_\\g<2>', schema_dict['name']).lower()
model = schema_dict['version'].split('-')[0]
return '{}_{}_{}_{}'.format(prefix, snake_case_organization, snake_case_name, model) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.