code
stringlengths 75
104k
| code_sememe
stringlengths 47
309k
| token_type
stringlengths 215
214k
| code_dependency
stringlengths 75
155k
|
|---|---|---|---|
def _expected_condition_value_in_element_attribute(self, element_attribute_value):
"""Tries to find the element and checks that it contains the requested attribute with the expected value,
but does not thrown an exception if the element is not found
:param element_attribute_value: Tuple with 3 items where:
[0] element: PageElement or element locator as a tuple (locator_type, locator_value) to be found
[1] attribute: element's attribute where to check its value
[2] value: expected value for the element's attribute
:returns: the web element if it contains the expected value for the requested attribute or False
:rtype: selenium.webdriver.remote.webelement.WebElement or appium.webdriver.webelement.WebElement
"""
element, attribute, value = element_attribute_value
web_element = self._expected_condition_find_element(element)
try:
return web_element if web_element and web_element.get_attribute(attribute) == value else False
except StaleElementReferenceException:
return False
|
def function[_expected_condition_value_in_element_attribute, parameter[self, element_attribute_value]]:
constant[Tries to find the element and checks that it contains the requested attribute with the expected value,
but does not thrown an exception if the element is not found
:param element_attribute_value: Tuple with 3 items where:
[0] element: PageElement or element locator as a tuple (locator_type, locator_value) to be found
[1] attribute: element's attribute where to check its value
[2] value: expected value for the element's attribute
:returns: the web element if it contains the expected value for the requested attribute or False
:rtype: selenium.webdriver.remote.webelement.WebElement or appium.webdriver.webelement.WebElement
]
<ast.Tuple object at 0x7da1b2308b50> assign[=] name[element_attribute_value]
variable[web_element] assign[=] call[name[self]._expected_condition_find_element, parameter[name[element]]]
<ast.Try object at 0x7da1b2308fa0>
|
keyword[def] identifier[_expected_condition_value_in_element_attribute] ( identifier[self] , identifier[element_attribute_value] ):
literal[string]
identifier[element] , identifier[attribute] , identifier[value] = identifier[element_attribute_value]
identifier[web_element] = identifier[self] . identifier[_expected_condition_find_element] ( identifier[element] )
keyword[try] :
keyword[return] identifier[web_element] keyword[if] identifier[web_element] keyword[and] identifier[web_element] . identifier[get_attribute] ( identifier[attribute] )== identifier[value] keyword[else] keyword[False]
keyword[except] identifier[StaleElementReferenceException] :
keyword[return] keyword[False]
|
def _expected_condition_value_in_element_attribute(self, element_attribute_value):
"""Tries to find the element and checks that it contains the requested attribute with the expected value,
but does not thrown an exception if the element is not found
:param element_attribute_value: Tuple with 3 items where:
[0] element: PageElement or element locator as a tuple (locator_type, locator_value) to be found
[1] attribute: element's attribute where to check its value
[2] value: expected value for the element's attribute
:returns: the web element if it contains the expected value for the requested attribute or False
:rtype: selenium.webdriver.remote.webelement.WebElement or appium.webdriver.webelement.WebElement
"""
(element, attribute, value) = element_attribute_value
web_element = self._expected_condition_find_element(element)
try:
return web_element if web_element and web_element.get_attribute(attribute) == value else False # depends on [control=['try'], data=[]]
except StaleElementReferenceException:
return False # depends on [control=['except'], data=[]]
|
def refresh_persistent_maps(self):
"""
Get information about persistent maps of the robots.
:return:
"""
for robot in self._robots:
resp2 = (requests.get(urljoin(
self.ENDPOINT,
'users/me/robots/{}/persistent_maps'.format(robot.serial)),
headers=self._headers))
resp2.raise_for_status()
self._persistent_maps.update({robot.serial: resp2.json()})
|
def function[refresh_persistent_maps, parameter[self]]:
constant[
Get information about persistent maps of the robots.
:return:
]
for taget[name[robot]] in starred[name[self]._robots] begin[:]
variable[resp2] assign[=] call[name[requests].get, parameter[call[name[urljoin], parameter[name[self].ENDPOINT, call[constant[users/me/robots/{}/persistent_maps].format, parameter[name[robot].serial]]]]]]
call[name[resp2].raise_for_status, parameter[]]
call[name[self]._persistent_maps.update, parameter[dictionary[[<ast.Attribute object at 0x7da1b0126440>], [<ast.Call object at 0x7da1b01259f0>]]]]
|
keyword[def] identifier[refresh_persistent_maps] ( identifier[self] ):
literal[string]
keyword[for] identifier[robot] keyword[in] identifier[self] . identifier[_robots] :
identifier[resp2] =( identifier[requests] . identifier[get] ( identifier[urljoin] (
identifier[self] . identifier[ENDPOINT] ,
literal[string] . identifier[format] ( identifier[robot] . identifier[serial] )),
identifier[headers] = identifier[self] . identifier[_headers] ))
identifier[resp2] . identifier[raise_for_status] ()
identifier[self] . identifier[_persistent_maps] . identifier[update] ({ identifier[robot] . identifier[serial] : identifier[resp2] . identifier[json] ()})
|
def refresh_persistent_maps(self):
"""
Get information about persistent maps of the robots.
:return:
"""
for robot in self._robots:
resp2 = requests.get(urljoin(self.ENDPOINT, 'users/me/robots/{}/persistent_maps'.format(robot.serial)), headers=self._headers)
resp2.raise_for_status()
self._persistent_maps.update({robot.serial: resp2.json()}) # depends on [control=['for'], data=['robot']]
|
def _validate_logical_id(cls, logical_id):
"""Validates that the provided logical id is an alphanumeric string.
:param str logical_id: the logical id to validate
:returns: True if the logical id is valid
:rtype: bool
:raises TypeError: if the logical id is invalid
"""
pattern = re.compile(r'^[A-Za-z0-9]+$')
if logical_id is not None and pattern.match(logical_id):
return True
raise InvalidResourceException(logical_id, "Logical ids must be alphanumeric.")
|
def function[_validate_logical_id, parameter[cls, logical_id]]:
constant[Validates that the provided logical id is an alphanumeric string.
:param str logical_id: the logical id to validate
:returns: True if the logical id is valid
:rtype: bool
:raises TypeError: if the logical id is invalid
]
variable[pattern] assign[=] call[name[re].compile, parameter[constant[^[A-Za-z0-9]+$]]]
if <ast.BoolOp object at 0x7da2044c3bb0> begin[:]
return[constant[True]]
<ast.Raise object at 0x7da20c990850>
|
keyword[def] identifier[_validate_logical_id] ( identifier[cls] , identifier[logical_id] ):
literal[string]
identifier[pattern] = identifier[re] . identifier[compile] ( literal[string] )
keyword[if] identifier[logical_id] keyword[is] keyword[not] keyword[None] keyword[and] identifier[pattern] . identifier[match] ( identifier[logical_id] ):
keyword[return] keyword[True]
keyword[raise] identifier[InvalidResourceException] ( identifier[logical_id] , literal[string] )
|
def _validate_logical_id(cls, logical_id):
"""Validates that the provided logical id is an alphanumeric string.
:param str logical_id: the logical id to validate
:returns: True if the logical id is valid
:rtype: bool
:raises TypeError: if the logical id is invalid
"""
pattern = re.compile('^[A-Za-z0-9]+$')
if logical_id is not None and pattern.match(logical_id):
return True # depends on [control=['if'], data=[]]
raise InvalidResourceException(logical_id, 'Logical ids must be alphanumeric.')
|
def restore_edge(self, edge):
"""
Restores a previously hidden edge back into the graph.
"""
try:
head_id, tail_id, data = self.hidden_edges[edge]
self.nodes[tail_id][0].append(edge)
self.nodes[head_id][1].append(edge)
self.edges[edge] = head_id, tail_id, data
del self.hidden_edges[edge]
except KeyError:
raise GraphError('Invalid edge %s' % edge)
|
def function[restore_edge, parameter[self, edge]]:
constant[
Restores a previously hidden edge back into the graph.
]
<ast.Try object at 0x7da1b0e271f0>
|
keyword[def] identifier[restore_edge] ( identifier[self] , identifier[edge] ):
literal[string]
keyword[try] :
identifier[head_id] , identifier[tail_id] , identifier[data] = identifier[self] . identifier[hidden_edges] [ identifier[edge] ]
identifier[self] . identifier[nodes] [ identifier[tail_id] ][ literal[int] ]. identifier[append] ( identifier[edge] )
identifier[self] . identifier[nodes] [ identifier[head_id] ][ literal[int] ]. identifier[append] ( identifier[edge] )
identifier[self] . identifier[edges] [ identifier[edge] ]= identifier[head_id] , identifier[tail_id] , identifier[data]
keyword[del] identifier[self] . identifier[hidden_edges] [ identifier[edge] ]
keyword[except] identifier[KeyError] :
keyword[raise] identifier[GraphError] ( literal[string] % identifier[edge] )
|
def restore_edge(self, edge):
"""
Restores a previously hidden edge back into the graph.
"""
try:
(head_id, tail_id, data) = self.hidden_edges[edge]
self.nodes[tail_id][0].append(edge)
self.nodes[head_id][1].append(edge)
self.edges[edge] = (head_id, tail_id, data)
del self.hidden_edges[edge] # depends on [control=['try'], data=[]]
except KeyError:
raise GraphError('Invalid edge %s' % edge) # depends on [control=['except'], data=[]]
|
def copy(self):
'''Generate a Primer copy.
:returns: A safely-editable copy of the current primer.
:rtype: coral.DNA
'''
return type(self)(self.anneal, self.tm, overhang=self.overhang,
name=self.name, note=self.note)
|
def function[copy, parameter[self]]:
constant[Generate a Primer copy.
:returns: A safely-editable copy of the current primer.
:rtype: coral.DNA
]
return[call[call[name[type], parameter[name[self]]], parameter[name[self].anneal, name[self].tm]]]
|
keyword[def] identifier[copy] ( identifier[self] ):
literal[string]
keyword[return] identifier[type] ( identifier[self] )( identifier[self] . identifier[anneal] , identifier[self] . identifier[tm] , identifier[overhang] = identifier[self] . identifier[overhang] ,
identifier[name] = identifier[self] . identifier[name] , identifier[note] = identifier[self] . identifier[note] )
|
def copy(self):
"""Generate a Primer copy.
:returns: A safely-editable copy of the current primer.
:rtype: coral.DNA
"""
return type(self)(self.anneal, self.tm, overhang=self.overhang, name=self.name, note=self.note)
|
def on_tblFunctions1_itemSelectionChanged(self):
"""Choose selected hazard x exposure combination.
.. note:: This is an automatic Qt slot
executed when the category selection changes.
"""
# Clear the selection on the 2nd matrix
self.parent.step_fc_functions2.tblFunctions2.clearContents()
self.parent.step_fc_functions2.lblAvailableFunctions2.clear()
self.parent.pbnNext.setEnabled(True)
# Put a dot to the selected cell - note there is no way
# to center an icon without using a custom ItemDelegate
selection = self.tblFunctions1.selectedItems()
selItem = (len(selection) == 1) and selection[0] or None
for row in range(self.tblFunctions1.rowCount()):
for column in range(self.tblFunctions1.columnCount()):
item = self.tblFunctions1.item(row, column)
item.setText((item == selItem) and '•' or '')
|
def function[on_tblFunctions1_itemSelectionChanged, parameter[self]]:
constant[Choose selected hazard x exposure combination.
.. note:: This is an automatic Qt slot
executed when the category selection changes.
]
call[name[self].parent.step_fc_functions2.tblFunctions2.clearContents, parameter[]]
call[name[self].parent.step_fc_functions2.lblAvailableFunctions2.clear, parameter[]]
call[name[self].parent.pbnNext.setEnabled, parameter[constant[True]]]
variable[selection] assign[=] call[name[self].tblFunctions1.selectedItems, parameter[]]
variable[selItem] assign[=] <ast.BoolOp object at 0x7da2054a7220>
for taget[name[row]] in starred[call[name[range], parameter[call[name[self].tblFunctions1.rowCount, parameter[]]]]] begin[:]
for taget[name[column]] in starred[call[name[range], parameter[call[name[self].tblFunctions1.columnCount, parameter[]]]]] begin[:]
variable[item] assign[=] call[name[self].tblFunctions1.item, parameter[name[row], name[column]]]
call[name[item].setText, parameter[<ast.BoolOp object at 0x7da2054a5780>]]
|
keyword[def] identifier[on_tblFunctions1_itemSelectionChanged] ( identifier[self] ):
literal[string]
identifier[self] . identifier[parent] . identifier[step_fc_functions2] . identifier[tblFunctions2] . identifier[clearContents] ()
identifier[self] . identifier[parent] . identifier[step_fc_functions2] . identifier[lblAvailableFunctions2] . identifier[clear] ()
identifier[self] . identifier[parent] . identifier[pbnNext] . identifier[setEnabled] ( keyword[True] )
identifier[selection] = identifier[self] . identifier[tblFunctions1] . identifier[selectedItems] ()
identifier[selItem] =( identifier[len] ( identifier[selection] )== literal[int] ) keyword[and] identifier[selection] [ literal[int] ] keyword[or] keyword[None]
keyword[for] identifier[row] keyword[in] identifier[range] ( identifier[self] . identifier[tblFunctions1] . identifier[rowCount] ()):
keyword[for] identifier[column] keyword[in] identifier[range] ( identifier[self] . identifier[tblFunctions1] . identifier[columnCount] ()):
identifier[item] = identifier[self] . identifier[tblFunctions1] . identifier[item] ( identifier[row] , identifier[column] )
identifier[item] . identifier[setText] (( identifier[item] == identifier[selItem] ) keyword[and] literal[string] keyword[or] literal[string] )
|
def on_tblFunctions1_itemSelectionChanged(self):
"""Choose selected hazard x exposure combination.
.. note:: This is an automatic Qt slot
executed when the category selection changes.
"""
# Clear the selection on the 2nd matrix
self.parent.step_fc_functions2.tblFunctions2.clearContents()
self.parent.step_fc_functions2.lblAvailableFunctions2.clear()
self.parent.pbnNext.setEnabled(True)
# Put a dot to the selected cell - note there is no way
# to center an icon without using a custom ItemDelegate
selection = self.tblFunctions1.selectedItems()
selItem = len(selection) == 1 and selection[0] or None
for row in range(self.tblFunctions1.rowCount()):
for column in range(self.tblFunctions1.columnCount()):
item = self.tblFunctions1.item(row, column)
item.setText(item == selItem and '•' or '') # depends on [control=['for'], data=['column']] # depends on [control=['for'], data=['row']]
|
def get_polling_override(self):
"""Get the current polling override value in milliseconds.
See :meth:`set_polling_override` for more information.
Returns:
None on error, otherwise the current override period in milliseconds
(0 = disabled).
"""
polling_override = self.get_characteristic_handle_from_uuid(UUID_POLLING_OVERRIDE)
if polling_override is None:
logger.warn('Failed to find handle for polling override')
return None
override_ms = self.dongle._read_attribute(self.conn_handle, polling_override, True)
return None if override_ms is None else ord(override_ms)
|
def function[get_polling_override, parameter[self]]:
constant[Get the current polling override value in milliseconds.
See :meth:`set_polling_override` for more information.
Returns:
None on error, otherwise the current override period in milliseconds
(0 = disabled).
]
variable[polling_override] assign[=] call[name[self].get_characteristic_handle_from_uuid, parameter[name[UUID_POLLING_OVERRIDE]]]
if compare[name[polling_override] is constant[None]] begin[:]
call[name[logger].warn, parameter[constant[Failed to find handle for polling override]]]
return[constant[None]]
variable[override_ms] assign[=] call[name[self].dongle._read_attribute, parameter[name[self].conn_handle, name[polling_override], constant[True]]]
return[<ast.IfExp object at 0x7da18eb545e0>]
|
keyword[def] identifier[get_polling_override] ( identifier[self] ):
literal[string]
identifier[polling_override] = identifier[self] . identifier[get_characteristic_handle_from_uuid] ( identifier[UUID_POLLING_OVERRIDE] )
keyword[if] identifier[polling_override] keyword[is] keyword[None] :
identifier[logger] . identifier[warn] ( literal[string] )
keyword[return] keyword[None]
identifier[override_ms] = identifier[self] . identifier[dongle] . identifier[_read_attribute] ( identifier[self] . identifier[conn_handle] , identifier[polling_override] , keyword[True] )
keyword[return] keyword[None] keyword[if] identifier[override_ms] keyword[is] keyword[None] keyword[else] identifier[ord] ( identifier[override_ms] )
|
def get_polling_override(self):
"""Get the current polling override value in milliseconds.
See :meth:`set_polling_override` for more information.
Returns:
None on error, otherwise the current override period in milliseconds
(0 = disabled).
"""
polling_override = self.get_characteristic_handle_from_uuid(UUID_POLLING_OVERRIDE)
if polling_override is None:
logger.warn('Failed to find handle for polling override')
return None # depends on [control=['if'], data=[]]
override_ms = self.dongle._read_attribute(self.conn_handle, polling_override, True)
return None if override_ms is None else ord(override_ms)
|
def check_hash_key(query_on, key):
"""Only allows == against query_on.hash_key"""
return (
isinstance(key, BaseCondition) and
(key.operation == "==") and
(key.column is query_on.hash_key)
)
|
def function[check_hash_key, parameter[query_on, key]]:
constant[Only allows == against query_on.hash_key]
return[<ast.BoolOp object at 0x7da1b0f2e7d0>]
|
keyword[def] identifier[check_hash_key] ( identifier[query_on] , identifier[key] ):
literal[string]
keyword[return] (
identifier[isinstance] ( identifier[key] , identifier[BaseCondition] ) keyword[and]
( identifier[key] . identifier[operation] == literal[string] ) keyword[and]
( identifier[key] . identifier[column] keyword[is] identifier[query_on] . identifier[hash_key] )
)
|
def check_hash_key(query_on, key):
"""Only allows == against query_on.hash_key"""
return isinstance(key, BaseCondition) and key.operation == '==' and (key.column is query_on.hash_key)
|
def lookup_init_systems(self):
"""Return the relevant init system and its version.
This will try to look at the mapping first. If the mapping
doesn't exist, it will try to identify it automatically.
Windows lookup is not supported and `nssm` is assumed.
"""
if utils.IS_WIN:
logger.debug(
'Lookup is not supported on Windows. Assuming nssm...')
return ['nssm']
if utils.IS_DARWIN:
logger.debug(
'Lookup is not supported on OS X, Assuming launchd...')
return ['launchd']
logger.debug('Looking up init method...')
return self._lookup_by_mapping() \
or self._init_sys_auto_lookup()
|
def function[lookup_init_systems, parameter[self]]:
constant[Return the relevant init system and its version.
This will try to look at the mapping first. If the mapping
doesn't exist, it will try to identify it automatically.
Windows lookup is not supported and `nssm` is assumed.
]
if name[utils].IS_WIN begin[:]
call[name[logger].debug, parameter[constant[Lookup is not supported on Windows. Assuming nssm...]]]
return[list[[<ast.Constant object at 0x7da18f721e70>]]]
if name[utils].IS_DARWIN begin[:]
call[name[logger].debug, parameter[constant[Lookup is not supported on OS X, Assuming launchd...]]]
return[list[[<ast.Constant object at 0x7da18f722890>]]]
call[name[logger].debug, parameter[constant[Looking up init method...]]]
return[<ast.BoolOp object at 0x7da18f7223e0>]
|
keyword[def] identifier[lookup_init_systems] ( identifier[self] ):
literal[string]
keyword[if] identifier[utils] . identifier[IS_WIN] :
identifier[logger] . identifier[debug] (
literal[string] )
keyword[return] [ literal[string] ]
keyword[if] identifier[utils] . identifier[IS_DARWIN] :
identifier[logger] . identifier[debug] (
literal[string] )
keyword[return] [ literal[string] ]
identifier[logger] . identifier[debug] ( literal[string] )
keyword[return] identifier[self] . identifier[_lookup_by_mapping] () keyword[or] identifier[self] . identifier[_init_sys_auto_lookup] ()
|
def lookup_init_systems(self):
"""Return the relevant init system and its version.
This will try to look at the mapping first. If the mapping
doesn't exist, it will try to identify it automatically.
Windows lookup is not supported and `nssm` is assumed.
"""
if utils.IS_WIN:
logger.debug('Lookup is not supported on Windows. Assuming nssm...')
return ['nssm'] # depends on [control=['if'], data=[]]
if utils.IS_DARWIN:
logger.debug('Lookup is not supported on OS X, Assuming launchd...')
return ['launchd'] # depends on [control=['if'], data=[]]
logger.debug('Looking up init method...')
return self._lookup_by_mapping() or self._init_sys_auto_lookup()
|
def to_glyphs(ufos_or_designspace, glyphs_module=classes, minimize_ufo_diffs=False):
"""
Take a list of UFOs or a single DesignspaceDocument with attached UFOs
and converts it into a GSFont object.
The GSFont object is in-memory, it's up to the user to write it to the disk
if needed.
This should be the inverse function of `to_ufos` and `to_designspace`,
so we should have to_glyphs(to_ufos(font)) == font
and also to_glyphs(to_designspace(font)) == font
"""
if hasattr(ufos_or_designspace, "sources"):
builder = GlyphsBuilder(
designspace=ufos_or_designspace,
glyphs_module=glyphs_module,
minimize_ufo_diffs=minimize_ufo_diffs,
)
else:
builder = GlyphsBuilder(
ufos=ufos_or_designspace,
glyphs_module=glyphs_module,
minimize_ufo_diffs=minimize_ufo_diffs,
)
return builder.font
|
def function[to_glyphs, parameter[ufos_or_designspace, glyphs_module, minimize_ufo_diffs]]:
constant[
Take a list of UFOs or a single DesignspaceDocument with attached UFOs
and converts it into a GSFont object.
The GSFont object is in-memory, it's up to the user to write it to the disk
if needed.
This should be the inverse function of `to_ufos` and `to_designspace`,
so we should have to_glyphs(to_ufos(font)) == font
and also to_glyphs(to_designspace(font)) == font
]
if call[name[hasattr], parameter[name[ufos_or_designspace], constant[sources]]] begin[:]
variable[builder] assign[=] call[name[GlyphsBuilder], parameter[]]
return[name[builder].font]
|
keyword[def] identifier[to_glyphs] ( identifier[ufos_or_designspace] , identifier[glyphs_module] = identifier[classes] , identifier[minimize_ufo_diffs] = keyword[False] ):
literal[string]
keyword[if] identifier[hasattr] ( identifier[ufos_or_designspace] , literal[string] ):
identifier[builder] = identifier[GlyphsBuilder] (
identifier[designspace] = identifier[ufos_or_designspace] ,
identifier[glyphs_module] = identifier[glyphs_module] ,
identifier[minimize_ufo_diffs] = identifier[minimize_ufo_diffs] ,
)
keyword[else] :
identifier[builder] = identifier[GlyphsBuilder] (
identifier[ufos] = identifier[ufos_or_designspace] ,
identifier[glyphs_module] = identifier[glyphs_module] ,
identifier[minimize_ufo_diffs] = identifier[minimize_ufo_diffs] ,
)
keyword[return] identifier[builder] . identifier[font]
|
def to_glyphs(ufos_or_designspace, glyphs_module=classes, minimize_ufo_diffs=False):
"""
Take a list of UFOs or a single DesignspaceDocument with attached UFOs
and converts it into a GSFont object.
The GSFont object is in-memory, it's up to the user to write it to the disk
if needed.
This should be the inverse function of `to_ufos` and `to_designspace`,
so we should have to_glyphs(to_ufos(font)) == font
and also to_glyphs(to_designspace(font)) == font
"""
if hasattr(ufos_or_designspace, 'sources'):
builder = GlyphsBuilder(designspace=ufos_or_designspace, glyphs_module=glyphs_module, minimize_ufo_diffs=minimize_ufo_diffs) # depends on [control=['if'], data=[]]
else:
builder = GlyphsBuilder(ufos=ufos_or_designspace, glyphs_module=glyphs_module, minimize_ufo_diffs=minimize_ufo_diffs)
return builder.font
|
async def blob(self, elem=None, elem_type=None, params=None):
"""
Loads/dumps blob
:return:
"""
elem_type = elem_type if elem_type else elem.__class__
if hasattr(elem_type, 'blob_serialize'):
elem = elem_type() if elem is None else elem
return await elem.blob_serialize(self, elem=elem, elem_type=elem_type, params=params)
if self.writing:
return await x.dump_blob(self.iobj, elem=elem, elem_type=elem_type, params=params)
else:
return await x.load_blob(self.iobj, elem_type=elem_type, params=params, elem=elem)
|
<ast.AsyncFunctionDef object at 0x7da1b2457910>
|
keyword[async] keyword[def] identifier[blob] ( identifier[self] , identifier[elem] = keyword[None] , identifier[elem_type] = keyword[None] , identifier[params] = keyword[None] ):
literal[string]
identifier[elem_type] = identifier[elem_type] keyword[if] identifier[elem_type] keyword[else] identifier[elem] . identifier[__class__]
keyword[if] identifier[hasattr] ( identifier[elem_type] , literal[string] ):
identifier[elem] = identifier[elem_type] () keyword[if] identifier[elem] keyword[is] keyword[None] keyword[else] identifier[elem]
keyword[return] keyword[await] identifier[elem] . identifier[blob_serialize] ( identifier[self] , identifier[elem] = identifier[elem] , identifier[elem_type] = identifier[elem_type] , identifier[params] = identifier[params] )
keyword[if] identifier[self] . identifier[writing] :
keyword[return] keyword[await] identifier[x] . identifier[dump_blob] ( identifier[self] . identifier[iobj] , identifier[elem] = identifier[elem] , identifier[elem_type] = identifier[elem_type] , identifier[params] = identifier[params] )
keyword[else] :
keyword[return] keyword[await] identifier[x] . identifier[load_blob] ( identifier[self] . identifier[iobj] , identifier[elem_type] = identifier[elem_type] , identifier[params] = identifier[params] , identifier[elem] = identifier[elem] )
|
async def blob(self, elem=None, elem_type=None, params=None):
"""
Loads/dumps blob
:return:
"""
elem_type = elem_type if elem_type else elem.__class__
if hasattr(elem_type, 'blob_serialize'):
elem = elem_type() if elem is None else elem
return await elem.blob_serialize(self, elem=elem, elem_type=elem_type, params=params) # depends on [control=['if'], data=[]]
if self.writing:
return await x.dump_blob(self.iobj, elem=elem, elem_type=elem_type, params=params) # depends on [control=['if'], data=[]]
else:
return await x.load_blob(self.iobj, elem_type=elem_type, params=params, elem=elem)
|
def validate(self, model, checks=[]):
"""Use a defined schema to validate the medium table format."""
custom = [
check_partial(gene_id_check, frozenset(g.id for g in model.genes))
]
super(EssentialityExperiment, self).validate(
model=model, checks=checks + custom)
|
def function[validate, parameter[self, model, checks]]:
constant[Use a defined schema to validate the medium table format.]
variable[custom] assign[=] list[[<ast.Call object at 0x7da1b23448b0>]]
call[call[name[super], parameter[name[EssentialityExperiment], name[self]]].validate, parameter[]]
|
keyword[def] identifier[validate] ( identifier[self] , identifier[model] , identifier[checks] =[]):
literal[string]
identifier[custom] =[
identifier[check_partial] ( identifier[gene_id_check] , identifier[frozenset] ( identifier[g] . identifier[id] keyword[for] identifier[g] keyword[in] identifier[model] . identifier[genes] ))
]
identifier[super] ( identifier[EssentialityExperiment] , identifier[self] ). identifier[validate] (
identifier[model] = identifier[model] , identifier[checks] = identifier[checks] + identifier[custom] )
|
def validate(self, model, checks=[]):
"""Use a defined schema to validate the medium table format."""
custom = [check_partial(gene_id_check, frozenset((g.id for g in model.genes)))]
super(EssentialityExperiment, self).validate(model=model, checks=checks + custom)
|
def restore_from_archive(self, parent=None):
"""Function to restore a DP from archived copy
Asks for confirmation along the way if parent is not None
(in which case it will be the parent widget for confirmation dialogs)
"""
from PyQt4.Qt import QMessageBox
exists = os.path.exists(self.sourcepath)
if parent:
msg = """<P>Do you really want to restore <tt>%s</tt> from
this entry's copy of <tt>%s</tt>?</P>""" % (self.sourcepath, self.filename)
exists = os.path.exists(self.sourcepath)
if exists:
msg += """<P>Current file exists, and will be overwritten.</P>"""
if QMessageBox.warning(parent, "Restoring from archive", msg,
QMessageBox.Yes, QMessageBox.No) != QMessageBox.Yes:
return False
else:
if QMessageBox.question(parent, "Restoring from archive", msg,
QMessageBox.Yes, QMessageBox.No) != QMessageBox.Yes:
return False
busy = Purr.BusyIndicator()
# remove file if in the way
if exists:
if os.system("/bin/rm -fr '%s'" % self.sourcepath):
busy = None
if parent:
QMessageBox.warning(parent, "Error removing file", """<P>
There was an error removing %s. Archived copy was not restored.
The text console may have more information.</P>""" % self.sourcepath,
QMessageBox.Ok, 0)
return False
# unpack archived file
if self.fullpath.endswith('.tgz'):
parent_dir = os.path.dirname(self.sourcepath.rstrip('/'))
os.system("/bin/rm -fr %s" % self.sourcepath)
if os.system("tar zxf '%s' -C '%s'" % (self.fullpath, parent_dir)):
busy = None
if parent:
QMessageBox.warning(parent, "Error unpacking file", """<P>
There was an error unpacking the archived version to %s. The text console may have more information.</P>""" % self.sourcepath,
QMessageBox.Ok, 0)
return False
# else simply copy over
else:
if os.system("/bin/cp -a '%s' '%s'" % (self.fullpath, self.sourcepath)):
busy = None
if parent:
QMessageBox.warning(parent, "Error copying file", """<P>
There was an error copying the archived version to %s. The text console may have more information.</P>""" % self.sourcepath,
QMessageBox.Ok, 0)
return False
busy = None
if parent:
QMessageBox.information(parent, "Restored file", """<P>Restored %s from this entry's
archived copy.</P>""" % self.sourcepath,
QMessageBox.Ok, 0)
return True
|
def function[restore_from_archive, parameter[self, parent]]:
constant[Function to restore a DP from archived copy
Asks for confirmation along the way if parent is not None
(in which case it will be the parent widget for confirmation dialogs)
]
from relative_module[PyQt4.Qt] import module[QMessageBox]
variable[exists] assign[=] call[name[os].path.exists, parameter[name[self].sourcepath]]
if name[parent] begin[:]
variable[msg] assign[=] binary_operation[constant[<P>Do you really want to restore <tt>%s</tt> from
this entry's copy of <tt>%s</tt>?</P>] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Attribute object at 0x7da1b09d3af0>, <ast.Attribute object at 0x7da1b09d3a90>]]]
variable[exists] assign[=] call[name[os].path.exists, parameter[name[self].sourcepath]]
if name[exists] begin[:]
<ast.AugAssign object at 0x7da1b09d3850>
if compare[call[name[QMessageBox].warning, parameter[name[parent], constant[Restoring from archive], name[msg], name[QMessageBox].Yes, name[QMessageBox].No]] not_equal[!=] name[QMessageBox].Yes] begin[:]
return[constant[False]]
variable[busy] assign[=] call[name[Purr].BusyIndicator, parameter[]]
if name[exists] begin[:]
if call[name[os].system, parameter[binary_operation[constant[/bin/rm -fr '%s'] <ast.Mod object at 0x7da2590d6920> name[self].sourcepath]]] begin[:]
variable[busy] assign[=] constant[None]
if name[parent] begin[:]
call[name[QMessageBox].warning, parameter[name[parent], constant[Error removing file], binary_operation[constant[<P>
There was an error removing %s. Archived copy was not restored.
The text console may have more information.</P>] <ast.Mod object at 0x7da2590d6920> name[self].sourcepath], name[QMessageBox].Ok, constant[0]]]
return[constant[False]]
if call[name[self].fullpath.endswith, parameter[constant[.tgz]]] begin[:]
variable[parent_dir] assign[=] call[name[os].path.dirname, parameter[call[name[self].sourcepath.rstrip, parameter[constant[/]]]]]
call[name[os].system, parameter[binary_operation[constant[/bin/rm -fr %s] <ast.Mod object at 0x7da2590d6920> name[self].sourcepath]]]
if call[name[os].system, parameter[binary_operation[constant[tar zxf '%s' -C '%s'] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Attribute object at 0x7da1b09d21a0>, <ast.Name object at 0x7da1b09d2140>]]]]] begin[:]
variable[busy] assign[=] constant[None]
if name[parent] begin[:]
call[name[QMessageBox].warning, parameter[name[parent], constant[Error unpacking file], binary_operation[constant[<P>
There was an error unpacking the archived version to %s. The text console may have more information.</P>] <ast.Mod object at 0x7da2590d6920> name[self].sourcepath], name[QMessageBox].Ok, constant[0]]]
return[constant[False]]
variable[busy] assign[=] constant[None]
if name[parent] begin[:]
call[name[QMessageBox].information, parameter[name[parent], constant[Restored file], binary_operation[constant[<P>Restored %s from this entry's
archived copy.</P>] <ast.Mod object at 0x7da2590d6920> name[self].sourcepath], name[QMessageBox].Ok, constant[0]]]
return[constant[True]]
|
keyword[def] identifier[restore_from_archive] ( identifier[self] , identifier[parent] = keyword[None] ):
literal[string]
keyword[from] identifier[PyQt4] . identifier[Qt] keyword[import] identifier[QMessageBox]
identifier[exists] = identifier[os] . identifier[path] . identifier[exists] ( identifier[self] . identifier[sourcepath] )
keyword[if] identifier[parent] :
identifier[msg] = literal[string] %( identifier[self] . identifier[sourcepath] , identifier[self] . identifier[filename] )
identifier[exists] = identifier[os] . identifier[path] . identifier[exists] ( identifier[self] . identifier[sourcepath] )
keyword[if] identifier[exists] :
identifier[msg] += literal[string]
keyword[if] identifier[QMessageBox] . identifier[warning] ( identifier[parent] , literal[string] , identifier[msg] ,
identifier[QMessageBox] . identifier[Yes] , identifier[QMessageBox] . identifier[No] )!= identifier[QMessageBox] . identifier[Yes] :
keyword[return] keyword[False]
keyword[else] :
keyword[if] identifier[QMessageBox] . identifier[question] ( identifier[parent] , literal[string] , identifier[msg] ,
identifier[QMessageBox] . identifier[Yes] , identifier[QMessageBox] . identifier[No] )!= identifier[QMessageBox] . identifier[Yes] :
keyword[return] keyword[False]
identifier[busy] = identifier[Purr] . identifier[BusyIndicator] ()
keyword[if] identifier[exists] :
keyword[if] identifier[os] . identifier[system] ( literal[string] % identifier[self] . identifier[sourcepath] ):
identifier[busy] = keyword[None]
keyword[if] identifier[parent] :
identifier[QMessageBox] . identifier[warning] ( identifier[parent] , literal[string] , literal[string] % identifier[self] . identifier[sourcepath] ,
identifier[QMessageBox] . identifier[Ok] , literal[int] )
keyword[return] keyword[False]
keyword[if] identifier[self] . identifier[fullpath] . identifier[endswith] ( literal[string] ):
identifier[parent_dir] = identifier[os] . identifier[path] . identifier[dirname] ( identifier[self] . identifier[sourcepath] . identifier[rstrip] ( literal[string] ))
identifier[os] . identifier[system] ( literal[string] % identifier[self] . identifier[sourcepath] )
keyword[if] identifier[os] . identifier[system] ( literal[string] %( identifier[self] . identifier[fullpath] , identifier[parent_dir] )):
identifier[busy] = keyword[None]
keyword[if] identifier[parent] :
identifier[QMessageBox] . identifier[warning] ( identifier[parent] , literal[string] , literal[string] % identifier[self] . identifier[sourcepath] ,
identifier[QMessageBox] . identifier[Ok] , literal[int] )
keyword[return] keyword[False]
keyword[else] :
keyword[if] identifier[os] . identifier[system] ( literal[string] %( identifier[self] . identifier[fullpath] , identifier[self] . identifier[sourcepath] )):
identifier[busy] = keyword[None]
keyword[if] identifier[parent] :
identifier[QMessageBox] . identifier[warning] ( identifier[parent] , literal[string] , literal[string] % identifier[self] . identifier[sourcepath] ,
identifier[QMessageBox] . identifier[Ok] , literal[int] )
keyword[return] keyword[False]
identifier[busy] = keyword[None]
keyword[if] identifier[parent] :
identifier[QMessageBox] . identifier[information] ( identifier[parent] , literal[string] , literal[string] % identifier[self] . identifier[sourcepath] ,
identifier[QMessageBox] . identifier[Ok] , literal[int] )
keyword[return] keyword[True]
|
def restore_from_archive(self, parent=None):
"""Function to restore a DP from archived copy
Asks for confirmation along the way if parent is not None
(in which case it will be the parent widget for confirmation dialogs)
"""
from PyQt4.Qt import QMessageBox
exists = os.path.exists(self.sourcepath)
if parent:
msg = "<P>Do you really want to restore <tt>%s</tt> from\n this entry's copy of <tt>%s</tt>?</P>" % (self.sourcepath, self.filename)
exists = os.path.exists(self.sourcepath)
if exists:
msg += '<P>Current file exists, and will be overwritten.</P>'
if QMessageBox.warning(parent, 'Restoring from archive', msg, QMessageBox.Yes, QMessageBox.No) != QMessageBox.Yes:
return False # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif QMessageBox.question(parent, 'Restoring from archive', msg, QMessageBox.Yes, QMessageBox.No) != QMessageBox.Yes:
return False # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
busy = Purr.BusyIndicator()
# remove file if in the way
if exists:
if os.system("/bin/rm -fr '%s'" % self.sourcepath):
busy = None
if parent:
QMessageBox.warning(parent, 'Error removing file', '<P>\n There was an error removing %s. Archived copy was not restored.\n The text console may have more information.</P>' % self.sourcepath, QMessageBox.Ok, 0) # depends on [control=['if'], data=[]]
return False # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
# unpack archived file
if self.fullpath.endswith('.tgz'):
parent_dir = os.path.dirname(self.sourcepath.rstrip('/'))
os.system('/bin/rm -fr %s' % self.sourcepath)
if os.system("tar zxf '%s' -C '%s'" % (self.fullpath, parent_dir)):
busy = None
if parent:
QMessageBox.warning(parent, 'Error unpacking file', '<P>\n There was an error unpacking the archived version to %s. The text console may have more information.</P>' % self.sourcepath, QMessageBox.Ok, 0) # depends on [control=['if'], data=[]]
return False # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
# else simply copy over
elif os.system("/bin/cp -a '%s' '%s'" % (self.fullpath, self.sourcepath)):
busy = None
if parent:
QMessageBox.warning(parent, 'Error copying file', '<P>\n There was an error copying the archived version to %s. The text console may have more information.</P>' % self.sourcepath, QMessageBox.Ok, 0) # depends on [control=['if'], data=[]]
return False # depends on [control=['if'], data=[]]
busy = None
if parent:
QMessageBox.information(parent, 'Restored file', "<P>Restored %s from this entry's\n archived copy.</P>" % self.sourcepath, QMessageBox.Ok, 0) # depends on [control=['if'], data=[]]
return True
|
def get_profile(name=None, **kwargs):
"""Get the profile by name; if no name is given, return the
default profile.
"""
if isinstance(name, Profile):
return name
clazz = get_profile_class(name or 'default')
return clazz(**kwargs)
|
def function[get_profile, parameter[name]]:
constant[Get the profile by name; if no name is given, return the
default profile.
]
if call[name[isinstance], parameter[name[name], name[Profile]]] begin[:]
return[name[name]]
variable[clazz] assign[=] call[name[get_profile_class], parameter[<ast.BoolOp object at 0x7da1b1e7e4a0>]]
return[call[name[clazz], parameter[]]]
|
keyword[def] identifier[get_profile] ( identifier[name] = keyword[None] ,** identifier[kwargs] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[name] , identifier[Profile] ):
keyword[return] identifier[name]
identifier[clazz] = identifier[get_profile_class] ( identifier[name] keyword[or] literal[string] )
keyword[return] identifier[clazz] (** identifier[kwargs] )
|
def get_profile(name=None, **kwargs):
"""Get the profile by name; if no name is given, return the
default profile.
"""
if isinstance(name, Profile):
return name # depends on [control=['if'], data=[]]
clazz = get_profile_class(name or 'default')
return clazz(**kwargs)
|
def get_fallback_resolution(self):
"""Returns the previous fallback resolution
set by :meth:`set_fallback_resolution`,
or default fallback resolution if never set.
:returns: ``(x_pixels_per_inch, y_pixels_per_inch)``
"""
ppi = ffi.new('double[2]')
cairo.cairo_surface_get_fallback_resolution(
self._pointer, ppi + 0, ppi + 1)
return tuple(ppi)
|
def function[get_fallback_resolution, parameter[self]]:
constant[Returns the previous fallback resolution
set by :meth:`set_fallback_resolution`,
or default fallback resolution if never set.
:returns: ``(x_pixels_per_inch, y_pixels_per_inch)``
]
variable[ppi] assign[=] call[name[ffi].new, parameter[constant[double[2]]]]
call[name[cairo].cairo_surface_get_fallback_resolution, parameter[name[self]._pointer, binary_operation[name[ppi] + constant[0]], binary_operation[name[ppi] + constant[1]]]]
return[call[name[tuple], parameter[name[ppi]]]]
|
keyword[def] identifier[get_fallback_resolution] ( identifier[self] ):
literal[string]
identifier[ppi] = identifier[ffi] . identifier[new] ( literal[string] )
identifier[cairo] . identifier[cairo_surface_get_fallback_resolution] (
identifier[self] . identifier[_pointer] , identifier[ppi] + literal[int] , identifier[ppi] + literal[int] )
keyword[return] identifier[tuple] ( identifier[ppi] )
|
def get_fallback_resolution(self):
"""Returns the previous fallback resolution
set by :meth:`set_fallback_resolution`,
or default fallback resolution if never set.
:returns: ``(x_pixels_per_inch, y_pixels_per_inch)``
"""
ppi = ffi.new('double[2]')
cairo.cairo_surface_get_fallback_resolution(self._pointer, ppi + 0, ppi + 1)
return tuple(ppi)
|
def name(self, value):
"""
Setter for **self.__name** attribute.
:param value: Attribute value.
:type value: unicode
"""
if value is not None:
assert type(value) is unicode, "'{0}' attribute: '{1}' type is not 'unicode'!".format("name", value)
self.__name = value
|
def function[name, parameter[self, value]]:
constant[
Setter for **self.__name** attribute.
:param value: Attribute value.
:type value: unicode
]
if compare[name[value] is_not constant[None]] begin[:]
assert[compare[call[name[type], parameter[name[value]]] is name[unicode]]]
name[self].__name assign[=] name[value]
|
keyword[def] identifier[name] ( identifier[self] , identifier[value] ):
literal[string]
keyword[if] identifier[value] keyword[is] keyword[not] keyword[None] :
keyword[assert] identifier[type] ( identifier[value] ) keyword[is] identifier[unicode] , literal[string] . identifier[format] ( literal[string] , identifier[value] )
identifier[self] . identifier[__name] = identifier[value]
|
def name(self, value):
"""
Setter for **self.__name** attribute.
:param value: Attribute value.
:type value: unicode
"""
if value is not None:
assert type(value) is unicode, "'{0}' attribute: '{1}' type is not 'unicode'!".format('name', value) # depends on [control=['if'], data=['value']]
self.__name = value
|
def thub(data, n):
"""
Tee or "T" hub auto-copier to help working with Stream instances as well as
with numbers.
Parameters
----------
data :
Input to be copied. Can be anything.
n :
Number of copies.
Returns
-------
A StreamTeeHub instance, if input data is iterable.
The data itself, otherwise.
Examples
--------
>>> def sub_sum(x, y):
... x = thub(x, 2) # Casts to StreamTeeHub, when needed
... y = thub(y, 2)
... return (x - y) / (x + y) # Return type might be number or Stream
With numbers:
>>> sub_sum(1, 1.)
0.0
Combining number with iterable:
>>> sub_sum(3., [1, 2, 3])
<audiolazy.lazy_stream.Stream object at 0x...>
>>> list(sub_sum(3., [1, 2, 3]))
[0.5, 0.2, 0.0]
Both iterables (the Stream input behaves like an endless [6, 1, 6, 1, ...]):
>>> list(sub_sum([4., 3., 2., 1.], [1, 2, 3]))
[0.6, 0.2, -0.2]
>>> list(sub_sum([4., 3., 2., 1.], Stream(6, 1)))
[-0.2, 0.5, -0.5, 0.0]
This function can also be used as a an alternative to the Stream
constructor when your function has only one parameter, to avoid casting
when that's not needed:
>>> func = lambda x: 250 * thub(x, 1)
>>> func(1)
250
>>> func([2] * 10)
<audiolazy.lazy_stream.Stream object at 0x...>
>>> func([2] * 10).take(5)
[500, 500, 500, 500, 500]
"""
return StreamTeeHub(data, n) if isinstance(data, Iterable) else data
|
def function[thub, parameter[data, n]]:
constant[
Tee or "T" hub auto-copier to help working with Stream instances as well as
with numbers.
Parameters
----------
data :
Input to be copied. Can be anything.
n :
Number of copies.
Returns
-------
A StreamTeeHub instance, if input data is iterable.
The data itself, otherwise.
Examples
--------
>>> def sub_sum(x, y):
... x = thub(x, 2) # Casts to StreamTeeHub, when needed
... y = thub(y, 2)
... return (x - y) / (x + y) # Return type might be number or Stream
With numbers:
>>> sub_sum(1, 1.)
0.0
Combining number with iterable:
>>> sub_sum(3., [1, 2, 3])
<audiolazy.lazy_stream.Stream object at 0x...>
>>> list(sub_sum(3., [1, 2, 3]))
[0.5, 0.2, 0.0]
Both iterables (the Stream input behaves like an endless [6, 1, 6, 1, ...]):
>>> list(sub_sum([4., 3., 2., 1.], [1, 2, 3]))
[0.6, 0.2, -0.2]
>>> list(sub_sum([4., 3., 2., 1.], Stream(6, 1)))
[-0.2, 0.5, -0.5, 0.0]
This function can also be used as a an alternative to the Stream
constructor when your function has only one parameter, to avoid casting
when that's not needed:
>>> func = lambda x: 250 * thub(x, 1)
>>> func(1)
250
>>> func([2] * 10)
<audiolazy.lazy_stream.Stream object at 0x...>
>>> func([2] * 10).take(5)
[500, 500, 500, 500, 500]
]
return[<ast.IfExp object at 0x7da1b07fa620>]
|
keyword[def] identifier[thub] ( identifier[data] , identifier[n] ):
literal[string]
keyword[return] identifier[StreamTeeHub] ( identifier[data] , identifier[n] ) keyword[if] identifier[isinstance] ( identifier[data] , identifier[Iterable] ) keyword[else] identifier[data]
|
def thub(data, n):
"""
Tee or "T" hub auto-copier to help working with Stream instances as well as
with numbers.
Parameters
----------
data :
Input to be copied. Can be anything.
n :
Number of copies.
Returns
-------
A StreamTeeHub instance, if input data is iterable.
The data itself, otherwise.
Examples
--------
>>> def sub_sum(x, y):
... x = thub(x, 2) # Casts to StreamTeeHub, when needed
... y = thub(y, 2)
... return (x - y) / (x + y) # Return type might be number or Stream
With numbers:
>>> sub_sum(1, 1.)
0.0
Combining number with iterable:
>>> sub_sum(3., [1, 2, 3])
<audiolazy.lazy_stream.Stream object at 0x...>
>>> list(sub_sum(3., [1, 2, 3]))
[0.5, 0.2, 0.0]
Both iterables (the Stream input behaves like an endless [6, 1, 6, 1, ...]):
>>> list(sub_sum([4., 3., 2., 1.], [1, 2, 3]))
[0.6, 0.2, -0.2]
>>> list(sub_sum([4., 3., 2., 1.], Stream(6, 1)))
[-0.2, 0.5, -0.5, 0.0]
This function can also be used as a an alternative to the Stream
constructor when your function has only one parameter, to avoid casting
when that's not needed:
>>> func = lambda x: 250 * thub(x, 1)
>>> func(1)
250
>>> func([2] * 10)
<audiolazy.lazy_stream.Stream object at 0x...>
>>> func([2] * 10).take(5)
[500, 500, 500, 500, 500]
"""
return StreamTeeHub(data, n) if isinstance(data, Iterable) else data
|
def lock_access(repository_path, callback):
""" Synchronise access to the user file between processes, this specifies
which user is allowed write access at the current time """
with open(cpjoin(repository_path, 'lock_file'), 'w') as fd:
try:
fcntl.flock(fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
returned = callback()
fcntl.flock(fd, fcntl.LOCK_UN)
return returned
except IOError:
return fail(lock_fail_msg)
|
def function[lock_access, parameter[repository_path, callback]]:
constant[ Synchronise access to the user file between processes, this specifies
which user is allowed write access at the current time ]
with call[name[open], parameter[call[name[cpjoin], parameter[name[repository_path], constant[lock_file]]], constant[w]]] begin[:]
<ast.Try object at 0x7da20c991780>
|
keyword[def] identifier[lock_access] ( identifier[repository_path] , identifier[callback] ):
literal[string]
keyword[with] identifier[open] ( identifier[cpjoin] ( identifier[repository_path] , literal[string] ), literal[string] ) keyword[as] identifier[fd] :
keyword[try] :
identifier[fcntl] . identifier[flock] ( identifier[fd] , identifier[fcntl] . identifier[LOCK_EX] | identifier[fcntl] . identifier[LOCK_NB] )
identifier[returned] = identifier[callback] ()
identifier[fcntl] . identifier[flock] ( identifier[fd] , identifier[fcntl] . identifier[LOCK_UN] )
keyword[return] identifier[returned]
keyword[except] identifier[IOError] :
keyword[return] identifier[fail] ( identifier[lock_fail_msg] )
|
def lock_access(repository_path, callback):
""" Synchronise access to the user file between processes, this specifies
which user is allowed write access at the current time """
with open(cpjoin(repository_path, 'lock_file'), 'w') as fd:
try:
fcntl.flock(fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
returned = callback()
fcntl.flock(fd, fcntl.LOCK_UN)
return returned # depends on [control=['try'], data=[]]
except IOError:
return fail(lock_fail_msg) # depends on [control=['except'], data=[]] # depends on [control=['with'], data=['fd']]
|
def load_font(self, font_path, font_size):
"""Load the specified font from a file."""
self.__font_path = font_path
self.__font_size = font_size
if font_path != "":
self.__font = pygame.font.Font(font_path, font_size)
self.__set_text(self.__text)
|
def function[load_font, parameter[self, font_path, font_size]]:
constant[Load the specified font from a file.]
name[self].__font_path assign[=] name[font_path]
name[self].__font_size assign[=] name[font_size]
if compare[name[font_path] not_equal[!=] constant[]] begin[:]
name[self].__font assign[=] call[name[pygame].font.Font, parameter[name[font_path], name[font_size]]]
call[name[self].__set_text, parameter[name[self].__text]]
|
keyword[def] identifier[load_font] ( identifier[self] , identifier[font_path] , identifier[font_size] ):
literal[string]
identifier[self] . identifier[__font_path] = identifier[font_path]
identifier[self] . identifier[__font_size] = identifier[font_size]
keyword[if] identifier[font_path] != literal[string] :
identifier[self] . identifier[__font] = identifier[pygame] . identifier[font] . identifier[Font] ( identifier[font_path] , identifier[font_size] )
identifier[self] . identifier[__set_text] ( identifier[self] . identifier[__text] )
|
def load_font(self, font_path, font_size):
"""Load the specified font from a file."""
self.__font_path = font_path
self.__font_size = font_size
if font_path != '':
self.__font = pygame.font.Font(font_path, font_size)
self.__set_text(self.__text) # depends on [control=['if'], data=['font_path']]
|
def show_fabric_trunk_info_output_show_trunk_list_trunk_list_groups_trunk_list_member_trunk_list_src_interface(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
show_fabric_trunk_info = ET.Element("show_fabric_trunk_info")
config = show_fabric_trunk_info
output = ET.SubElement(show_fabric_trunk_info, "output")
show_trunk_list = ET.SubElement(output, "show-trunk-list")
trunk_list_groups = ET.SubElement(show_trunk_list, "trunk-list-groups")
trunk_list_member = ET.SubElement(trunk_list_groups, "trunk-list-member")
trunk_list_src_interface = ET.SubElement(trunk_list_member, "trunk-list-src-interface")
trunk_list_src_interface.text = kwargs.pop('trunk_list_src_interface')
callback = kwargs.pop('callback', self._callback)
return callback(config)
|
def function[show_fabric_trunk_info_output_show_trunk_list_trunk_list_groups_trunk_list_member_trunk_list_src_interface, parameter[self]]:
constant[Auto Generated Code
]
variable[config] assign[=] call[name[ET].Element, parameter[constant[config]]]
variable[show_fabric_trunk_info] assign[=] call[name[ET].Element, parameter[constant[show_fabric_trunk_info]]]
variable[config] assign[=] name[show_fabric_trunk_info]
variable[output] assign[=] call[name[ET].SubElement, parameter[name[show_fabric_trunk_info], constant[output]]]
variable[show_trunk_list] assign[=] call[name[ET].SubElement, parameter[name[output], constant[show-trunk-list]]]
variable[trunk_list_groups] assign[=] call[name[ET].SubElement, parameter[name[show_trunk_list], constant[trunk-list-groups]]]
variable[trunk_list_member] assign[=] call[name[ET].SubElement, parameter[name[trunk_list_groups], constant[trunk-list-member]]]
variable[trunk_list_src_interface] assign[=] call[name[ET].SubElement, parameter[name[trunk_list_member], constant[trunk-list-src-interface]]]
name[trunk_list_src_interface].text assign[=] call[name[kwargs].pop, parameter[constant[trunk_list_src_interface]]]
variable[callback] assign[=] call[name[kwargs].pop, parameter[constant[callback], name[self]._callback]]
return[call[name[callback], parameter[name[config]]]]
|
keyword[def] identifier[show_fabric_trunk_info_output_show_trunk_list_trunk_list_groups_trunk_list_member_trunk_list_src_interface] ( identifier[self] ,** identifier[kwargs] ):
literal[string]
identifier[config] = identifier[ET] . identifier[Element] ( literal[string] )
identifier[show_fabric_trunk_info] = identifier[ET] . identifier[Element] ( literal[string] )
identifier[config] = identifier[show_fabric_trunk_info]
identifier[output] = identifier[ET] . identifier[SubElement] ( identifier[show_fabric_trunk_info] , literal[string] )
identifier[show_trunk_list] = identifier[ET] . identifier[SubElement] ( identifier[output] , literal[string] )
identifier[trunk_list_groups] = identifier[ET] . identifier[SubElement] ( identifier[show_trunk_list] , literal[string] )
identifier[trunk_list_member] = identifier[ET] . identifier[SubElement] ( identifier[trunk_list_groups] , literal[string] )
identifier[trunk_list_src_interface] = identifier[ET] . identifier[SubElement] ( identifier[trunk_list_member] , literal[string] )
identifier[trunk_list_src_interface] . identifier[text] = identifier[kwargs] . identifier[pop] ( literal[string] )
identifier[callback] = identifier[kwargs] . identifier[pop] ( literal[string] , identifier[self] . identifier[_callback] )
keyword[return] identifier[callback] ( identifier[config] )
|
def show_fabric_trunk_info_output_show_trunk_list_trunk_list_groups_trunk_list_member_trunk_list_src_interface(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element('config')
show_fabric_trunk_info = ET.Element('show_fabric_trunk_info')
config = show_fabric_trunk_info
output = ET.SubElement(show_fabric_trunk_info, 'output')
show_trunk_list = ET.SubElement(output, 'show-trunk-list')
trunk_list_groups = ET.SubElement(show_trunk_list, 'trunk-list-groups')
trunk_list_member = ET.SubElement(trunk_list_groups, 'trunk-list-member')
trunk_list_src_interface = ET.SubElement(trunk_list_member, 'trunk-list-src-interface')
trunk_list_src_interface.text = kwargs.pop('trunk_list_src_interface')
callback = kwargs.pop('callback', self._callback)
return callback(config)
|
def _loadDeclarations(self):
"""
load declaratoins from _declr method
This function is called first for parent and then for children
"""
if not hasattr(self, "_interfaces"):
self._interfaces = []
self._setAttrListener = self._declrCollector
self._declr()
self._setAttrListener = None
for i in self._interfaces:
i._isExtern = self._isExtern
i._loadDeclarations()
for p in self._params:
p.setReadOnly()
if self._isExtern:
# direction from inside of unit (reverset compared to outside direction)
if self._direction == INTF_DIRECTION.UNKNOWN:
self._direction = INTF_DIRECTION.MASTER
self._setDirectionsLikeIn(self._direction)
|
def function[_loadDeclarations, parameter[self]]:
constant[
load declaratoins from _declr method
This function is called first for parent and then for children
]
if <ast.UnaryOp object at 0x7da1b0381000> begin[:]
name[self]._interfaces assign[=] list[[]]
name[self]._setAttrListener assign[=] name[self]._declrCollector
call[name[self]._declr, parameter[]]
name[self]._setAttrListener assign[=] constant[None]
for taget[name[i]] in starred[name[self]._interfaces] begin[:]
name[i]._isExtern assign[=] name[self]._isExtern
call[name[i]._loadDeclarations, parameter[]]
for taget[name[p]] in starred[name[self]._params] begin[:]
call[name[p].setReadOnly, parameter[]]
if name[self]._isExtern begin[:]
if compare[name[self]._direction equal[==] name[INTF_DIRECTION].UNKNOWN] begin[:]
name[self]._direction assign[=] name[INTF_DIRECTION].MASTER
call[name[self]._setDirectionsLikeIn, parameter[name[self]._direction]]
|
keyword[def] identifier[_loadDeclarations] ( identifier[self] ):
literal[string]
keyword[if] keyword[not] identifier[hasattr] ( identifier[self] , literal[string] ):
identifier[self] . identifier[_interfaces] =[]
identifier[self] . identifier[_setAttrListener] = identifier[self] . identifier[_declrCollector]
identifier[self] . identifier[_declr] ()
identifier[self] . identifier[_setAttrListener] = keyword[None]
keyword[for] identifier[i] keyword[in] identifier[self] . identifier[_interfaces] :
identifier[i] . identifier[_isExtern] = identifier[self] . identifier[_isExtern]
identifier[i] . identifier[_loadDeclarations] ()
keyword[for] identifier[p] keyword[in] identifier[self] . identifier[_params] :
identifier[p] . identifier[setReadOnly] ()
keyword[if] identifier[self] . identifier[_isExtern] :
keyword[if] identifier[self] . identifier[_direction] == identifier[INTF_DIRECTION] . identifier[UNKNOWN] :
identifier[self] . identifier[_direction] = identifier[INTF_DIRECTION] . identifier[MASTER]
identifier[self] . identifier[_setDirectionsLikeIn] ( identifier[self] . identifier[_direction] )
|
def _loadDeclarations(self):
"""
load declaratoins from _declr method
This function is called first for parent and then for children
"""
if not hasattr(self, '_interfaces'):
self._interfaces = [] # depends on [control=['if'], data=[]]
self._setAttrListener = self._declrCollector
self._declr()
self._setAttrListener = None
for i in self._interfaces:
i._isExtern = self._isExtern
i._loadDeclarations() # depends on [control=['for'], data=['i']]
for p in self._params:
p.setReadOnly() # depends on [control=['for'], data=['p']]
if self._isExtern:
# direction from inside of unit (reverset compared to outside direction)
if self._direction == INTF_DIRECTION.UNKNOWN:
self._direction = INTF_DIRECTION.MASTER # depends on [control=['if'], data=[]]
self._setDirectionsLikeIn(self._direction) # depends on [control=['if'], data=[]]
|
def make_blastcmd_builder(
mode, outdir, format_exe=None, blast_exe=None, prefix="ANIBLAST"
):
"""Returns BLASTcmds object for construction of BLAST commands."""
if mode == "ANIb": # BLAST/formatting executable depends on mode
blastcmds = BLASTcmds(
BLASTfunctions(construct_makeblastdb_cmd, construct_blastn_cmdline),
BLASTexes(
format_exe or pyani_config.MAKEBLASTDB_DEFAULT,
blast_exe or pyani_config.BLASTN_DEFAULT,
),
prefix,
outdir,
)
else:
blastcmds = BLASTcmds(
BLASTfunctions(construct_formatdb_cmd, construct_blastall_cmdline),
BLASTexes(
format_exe or pyani_config.FORMATDB_DEFAULT,
blast_exe or pyani_config.BLASTALL_DEFAULT,
),
prefix,
outdir,
)
return blastcmds
|
def function[make_blastcmd_builder, parameter[mode, outdir, format_exe, blast_exe, prefix]]:
constant[Returns BLASTcmds object for construction of BLAST commands.]
if compare[name[mode] equal[==] constant[ANIb]] begin[:]
variable[blastcmds] assign[=] call[name[BLASTcmds], parameter[call[name[BLASTfunctions], parameter[name[construct_makeblastdb_cmd], name[construct_blastn_cmdline]]], call[name[BLASTexes], parameter[<ast.BoolOp object at 0x7da1b0d6bb80>, <ast.BoolOp object at 0x7da1b0d68ca0>]], name[prefix], name[outdir]]]
return[name[blastcmds]]
|
keyword[def] identifier[make_blastcmd_builder] (
identifier[mode] , identifier[outdir] , identifier[format_exe] = keyword[None] , identifier[blast_exe] = keyword[None] , identifier[prefix] = literal[string]
):
literal[string]
keyword[if] identifier[mode] == literal[string] :
identifier[blastcmds] = identifier[BLASTcmds] (
identifier[BLASTfunctions] ( identifier[construct_makeblastdb_cmd] , identifier[construct_blastn_cmdline] ),
identifier[BLASTexes] (
identifier[format_exe] keyword[or] identifier[pyani_config] . identifier[MAKEBLASTDB_DEFAULT] ,
identifier[blast_exe] keyword[or] identifier[pyani_config] . identifier[BLASTN_DEFAULT] ,
),
identifier[prefix] ,
identifier[outdir] ,
)
keyword[else] :
identifier[blastcmds] = identifier[BLASTcmds] (
identifier[BLASTfunctions] ( identifier[construct_formatdb_cmd] , identifier[construct_blastall_cmdline] ),
identifier[BLASTexes] (
identifier[format_exe] keyword[or] identifier[pyani_config] . identifier[FORMATDB_DEFAULT] ,
identifier[blast_exe] keyword[or] identifier[pyani_config] . identifier[BLASTALL_DEFAULT] ,
),
identifier[prefix] ,
identifier[outdir] ,
)
keyword[return] identifier[blastcmds]
|
def make_blastcmd_builder(mode, outdir, format_exe=None, blast_exe=None, prefix='ANIBLAST'):
"""Returns BLASTcmds object for construction of BLAST commands."""
if mode == 'ANIb': # BLAST/formatting executable depends on mode
blastcmds = BLASTcmds(BLASTfunctions(construct_makeblastdb_cmd, construct_blastn_cmdline), BLASTexes(format_exe or pyani_config.MAKEBLASTDB_DEFAULT, blast_exe or pyani_config.BLASTN_DEFAULT), prefix, outdir) # depends on [control=['if'], data=[]]
else:
blastcmds = BLASTcmds(BLASTfunctions(construct_formatdb_cmd, construct_blastall_cmdline), BLASTexes(format_exe or pyani_config.FORMATDB_DEFAULT, blast_exe or pyani_config.BLASTALL_DEFAULT), prefix, outdir)
return blastcmds
|
def context(self, async_context):
"""\
Opens the given asynchronous contest manager on the event loop.
For a context manager that would be called like this::
async with ctx as value:
body
This method allows a call like this::
with event_loop_thread.context(ctx) as value:
body
The asynchronous work of the context manager is run on the dedicated thread.
"""
exit = type(async_context).__aexit__
value = self.run_coroutine(type(async_context).__aenter__(async_context)).result()
exc = True
try:
try:
yield value
except:
exc = False
if not self.run_coroutine(exit(async_context, *sys.exc_info())).result():
raise
finally:
if exc:
self.run_coroutine(exit(async_context, None, None, None)).result()
|
def function[context, parameter[self, async_context]]:
constant[ Opens the given asynchronous contest manager on the event loop.
For a context manager that would be called like this::
async with ctx as value:
body
This method allows a call like this::
with event_loop_thread.context(ctx) as value:
body
The asynchronous work of the context manager is run on the dedicated thread.
]
variable[exit] assign[=] call[name[type], parameter[name[async_context]]].__aexit__
variable[value] assign[=] call[call[name[self].run_coroutine, parameter[call[call[name[type], parameter[name[async_context]]].__aenter__, parameter[name[async_context]]]]].result, parameter[]]
variable[exc] assign[=] constant[True]
<ast.Try object at 0x7da1b0a499c0>
|
keyword[def] identifier[context] ( identifier[self] , identifier[async_context] ):
literal[string]
identifier[exit] = identifier[type] ( identifier[async_context] ). identifier[__aexit__]
identifier[value] = identifier[self] . identifier[run_coroutine] ( identifier[type] ( identifier[async_context] ). identifier[__aenter__] ( identifier[async_context] )). identifier[result] ()
identifier[exc] = keyword[True]
keyword[try] :
keyword[try] :
keyword[yield] identifier[value]
keyword[except] :
identifier[exc] = keyword[False]
keyword[if] keyword[not] identifier[self] . identifier[run_coroutine] ( identifier[exit] ( identifier[async_context] ,* identifier[sys] . identifier[exc_info] ())). identifier[result] ():
keyword[raise]
keyword[finally] :
keyword[if] identifier[exc] :
identifier[self] . identifier[run_coroutine] ( identifier[exit] ( identifier[async_context] , keyword[None] , keyword[None] , keyword[None] )). identifier[result] ()
|
def context(self, async_context):
""" Opens the given asynchronous contest manager on the event loop.
For a context manager that would be called like this::
async with ctx as value:
body
This method allows a call like this::
with event_loop_thread.context(ctx) as value:
body
The asynchronous work of the context manager is run on the dedicated thread.
"""
exit = type(async_context).__aexit__
value = self.run_coroutine(type(async_context).__aenter__(async_context)).result()
exc = True
try:
try:
yield value # depends on [control=['try'], data=[]]
except:
exc = False
if not self.run_coroutine(exit(async_context, *sys.exc_info())).result():
raise # depends on [control=['if'], data=[]] # depends on [control=['except'], data=[]] # depends on [control=['try'], data=[]]
finally:
if exc:
self.run_coroutine(exit(async_context, None, None, None)).result() # depends on [control=['if'], data=[]]
|
def create(cls, package, inputs=None, settings=None, resume_from=None,
name=None, secret_settings=None, api=None):
"""
Create and start a new run.
:param package: Automation package id
:param inputs: Input dictionary
:param settings: Settings override dictionary
:param resume_from: Run to resume from
:param name: Automation run name
:param secret_settings: dict to override secret_settings from
automation template
:param api: sevenbridges Api instance
:return: AutomationRun object
"""
package = Transform.to_automation_package(package)
data = {'package': package}
if inputs:
data['inputs'] = inputs
if settings:
data['settings'] = settings
if resume_from:
data['resume_from'] = resume_from
if name:
data['name'] = name
if secret_settings:
data['secret_settings'] = secret_settings
api = api or cls._API
automation_run = api.post(
url=cls._URL['query'],
data=data,
).json()
return AutomationRun(api=api, **automation_run)
|
def function[create, parameter[cls, package, inputs, settings, resume_from, name, secret_settings, api]]:
constant[
Create and start a new run.
:param package: Automation package id
:param inputs: Input dictionary
:param settings: Settings override dictionary
:param resume_from: Run to resume from
:param name: Automation run name
:param secret_settings: dict to override secret_settings from
automation template
:param api: sevenbridges Api instance
:return: AutomationRun object
]
variable[package] assign[=] call[name[Transform].to_automation_package, parameter[name[package]]]
variable[data] assign[=] dictionary[[<ast.Constant object at 0x7da20c993f10>], [<ast.Name object at 0x7da20c992c20>]]
if name[inputs] begin[:]
call[name[data]][constant[inputs]] assign[=] name[inputs]
if name[settings] begin[:]
call[name[data]][constant[settings]] assign[=] name[settings]
if name[resume_from] begin[:]
call[name[data]][constant[resume_from]] assign[=] name[resume_from]
if name[name] begin[:]
call[name[data]][constant[name]] assign[=] name[name]
if name[secret_settings] begin[:]
call[name[data]][constant[secret_settings]] assign[=] name[secret_settings]
variable[api] assign[=] <ast.BoolOp object at 0x7da20c7961a0>
variable[automation_run] assign[=] call[call[name[api].post, parameter[]].json, parameter[]]
return[call[name[AutomationRun], parameter[]]]
|
keyword[def] identifier[create] ( identifier[cls] , identifier[package] , identifier[inputs] = keyword[None] , identifier[settings] = keyword[None] , identifier[resume_from] = keyword[None] ,
identifier[name] = keyword[None] , identifier[secret_settings] = keyword[None] , identifier[api] = keyword[None] ):
literal[string]
identifier[package] = identifier[Transform] . identifier[to_automation_package] ( identifier[package] )
identifier[data] ={ literal[string] : identifier[package] }
keyword[if] identifier[inputs] :
identifier[data] [ literal[string] ]= identifier[inputs]
keyword[if] identifier[settings] :
identifier[data] [ literal[string] ]= identifier[settings]
keyword[if] identifier[resume_from] :
identifier[data] [ literal[string] ]= identifier[resume_from]
keyword[if] identifier[name] :
identifier[data] [ literal[string] ]= identifier[name]
keyword[if] identifier[secret_settings] :
identifier[data] [ literal[string] ]= identifier[secret_settings]
identifier[api] = identifier[api] keyword[or] identifier[cls] . identifier[_API]
identifier[automation_run] = identifier[api] . identifier[post] (
identifier[url] = identifier[cls] . identifier[_URL] [ literal[string] ],
identifier[data] = identifier[data] ,
). identifier[json] ()
keyword[return] identifier[AutomationRun] ( identifier[api] = identifier[api] ,** identifier[automation_run] )
|
def create(cls, package, inputs=None, settings=None, resume_from=None, name=None, secret_settings=None, api=None):
"""
Create and start a new run.
:param package: Automation package id
:param inputs: Input dictionary
:param settings: Settings override dictionary
:param resume_from: Run to resume from
:param name: Automation run name
:param secret_settings: dict to override secret_settings from
automation template
:param api: sevenbridges Api instance
:return: AutomationRun object
"""
package = Transform.to_automation_package(package)
data = {'package': package}
if inputs:
data['inputs'] = inputs # depends on [control=['if'], data=[]]
if settings:
data['settings'] = settings # depends on [control=['if'], data=[]]
if resume_from:
data['resume_from'] = resume_from # depends on [control=['if'], data=[]]
if name:
data['name'] = name # depends on [control=['if'], data=[]]
if secret_settings:
data['secret_settings'] = secret_settings # depends on [control=['if'], data=[]]
api = api or cls._API
automation_run = api.post(url=cls._URL['query'], data=data).json()
return AutomationRun(api=api, **automation_run)
|
def assign_default_log_values(self, fpath, line, formatter):
'''
>>> lc = LogCollector('file=/path/to/log_file.log:formatter=logagg.formatters.basescript', 30)
>>> from pprint import pprint
>>> formatter = 'logagg.formatters.mongodb'
>>> fpath = '/var/log/mongodb/mongodb.log'
>>> line = 'some log line here'
>>> default_log = lc.assign_default_log_values(fpath, line, formatter)
>>> pprint(default_log) #doctest: +ELLIPSIS
{'data': {},
'error': False,
'error_tb': '',
'event': 'event',
'file': '/var/log/mongodb/mongodb.log',
'formatter': 'logagg.formatters.mongodb',
'host': '...',
'id': None,
'level': 'debug',
'raw': 'some log line here',
'timestamp': '...',
'type': 'log'}
'''
return dict(
id=None,
file=fpath,
host=self.HOST,
formatter=formatter,
event='event',
data={},
raw=line,
timestamp=datetime.datetime.utcnow().isoformat(),
type='log',
level='debug',
error= False,
error_tb='',
)
|
def function[assign_default_log_values, parameter[self, fpath, line, formatter]]:
constant[
>>> lc = LogCollector('file=/path/to/log_file.log:formatter=logagg.formatters.basescript', 30)
>>> from pprint import pprint
>>> formatter = 'logagg.formatters.mongodb'
>>> fpath = '/var/log/mongodb/mongodb.log'
>>> line = 'some log line here'
>>> default_log = lc.assign_default_log_values(fpath, line, formatter)
>>> pprint(default_log) #doctest: +ELLIPSIS
{'data': {},
'error': False,
'error_tb': '',
'event': 'event',
'file': '/var/log/mongodb/mongodb.log',
'formatter': 'logagg.formatters.mongodb',
'host': '...',
'id': None,
'level': 'debug',
'raw': 'some log line here',
'timestamp': '...',
'type': 'log'}
]
return[call[name[dict], parameter[]]]
|
keyword[def] identifier[assign_default_log_values] ( identifier[self] , identifier[fpath] , identifier[line] , identifier[formatter] ):
literal[string]
keyword[return] identifier[dict] (
identifier[id] = keyword[None] ,
identifier[file] = identifier[fpath] ,
identifier[host] = identifier[self] . identifier[HOST] ,
identifier[formatter] = identifier[formatter] ,
identifier[event] = literal[string] ,
identifier[data] ={},
identifier[raw] = identifier[line] ,
identifier[timestamp] = identifier[datetime] . identifier[datetime] . identifier[utcnow] (). identifier[isoformat] (),
identifier[type] = literal[string] ,
identifier[level] = literal[string] ,
identifier[error] = keyword[False] ,
identifier[error_tb] = literal[string] ,
)
|
def assign_default_log_values(self, fpath, line, formatter):
"""
>>> lc = LogCollector('file=/path/to/log_file.log:formatter=logagg.formatters.basescript', 30)
>>> from pprint import pprint
>>> formatter = 'logagg.formatters.mongodb'
>>> fpath = '/var/log/mongodb/mongodb.log'
>>> line = 'some log line here'
>>> default_log = lc.assign_default_log_values(fpath, line, formatter)
>>> pprint(default_log) #doctest: +ELLIPSIS
{'data': {},
'error': False,
'error_tb': '',
'event': 'event',
'file': '/var/log/mongodb/mongodb.log',
'formatter': 'logagg.formatters.mongodb',
'host': '...',
'id': None,
'level': 'debug',
'raw': 'some log line here',
'timestamp': '...',
'type': 'log'}
"""
return dict(id=None, file=fpath, host=self.HOST, formatter=formatter, event='event', data={}, raw=line, timestamp=datetime.datetime.utcnow().isoformat(), type='log', level='debug', error=False, error_tb='')
|
def get(self, sid):
"""
Constructs a ExecutionStepContext
:param sid: Step Sid.
:returns: twilio.rest.studio.v1.flow.execution.execution_step.ExecutionStepContext
:rtype: twilio.rest.studio.v1.flow.execution.execution_step.ExecutionStepContext
"""
return ExecutionStepContext(
self._version,
flow_sid=self._solution['flow_sid'],
execution_sid=self._solution['execution_sid'],
sid=sid,
)
|
def function[get, parameter[self, sid]]:
constant[
Constructs a ExecutionStepContext
:param sid: Step Sid.
:returns: twilio.rest.studio.v1.flow.execution.execution_step.ExecutionStepContext
:rtype: twilio.rest.studio.v1.flow.execution.execution_step.ExecutionStepContext
]
return[call[name[ExecutionStepContext], parameter[name[self]._version]]]
|
keyword[def] identifier[get] ( identifier[self] , identifier[sid] ):
literal[string]
keyword[return] identifier[ExecutionStepContext] (
identifier[self] . identifier[_version] ,
identifier[flow_sid] = identifier[self] . identifier[_solution] [ literal[string] ],
identifier[execution_sid] = identifier[self] . identifier[_solution] [ literal[string] ],
identifier[sid] = identifier[sid] ,
)
|
def get(self, sid):
"""
Constructs a ExecutionStepContext
:param sid: Step Sid.
:returns: twilio.rest.studio.v1.flow.execution.execution_step.ExecutionStepContext
:rtype: twilio.rest.studio.v1.flow.execution.execution_step.ExecutionStepContext
"""
return ExecutionStepContext(self._version, flow_sid=self._solution['flow_sid'], execution_sid=self._solution['execution_sid'], sid=sid)
|
def write_configs(self, project_root):
"""Wrapper method that writes all configuration files to the pipeline
directory
"""
# Write resources config
with open(join(project_root, "resources.config"), "w") as fh:
fh.write(self.resources)
# Write containers config
with open(join(project_root, "containers.config"), "w") as fh:
fh.write(self.containers)
# Write containers config
with open(join(project_root, "params.config"), "w") as fh:
fh.write(self.params)
# Write manifest config
with open(join(project_root, "manifest.config"), "w") as fh:
fh.write(self.manifest)
# Write user config if not present in the project directory
if not exists(join(project_root, "user.config")):
with open(join(project_root, "user.config"), "w") as fh:
fh.write(self.user_config)
lib_dir = join(project_root, "lib")
if not exists(lib_dir):
os.makedirs(lib_dir)
with open(join(lib_dir, "Helper.groovy"), "w") as fh:
fh.write(self.help)
# Generate the pipeline DAG
pipeline_to_json = self.render_pipeline()
with open(splitext(self.nf_file)[0] + ".html", "w") as fh:
fh.write(pipeline_to_json)
|
def function[write_configs, parameter[self, project_root]]:
constant[Wrapper method that writes all configuration files to the pipeline
directory
]
with call[name[open], parameter[call[name[join], parameter[name[project_root], constant[resources.config]]], constant[w]]] begin[:]
call[name[fh].write, parameter[name[self].resources]]
with call[name[open], parameter[call[name[join], parameter[name[project_root], constant[containers.config]]], constant[w]]] begin[:]
call[name[fh].write, parameter[name[self].containers]]
with call[name[open], parameter[call[name[join], parameter[name[project_root], constant[params.config]]], constant[w]]] begin[:]
call[name[fh].write, parameter[name[self].params]]
with call[name[open], parameter[call[name[join], parameter[name[project_root], constant[manifest.config]]], constant[w]]] begin[:]
call[name[fh].write, parameter[name[self].manifest]]
if <ast.UnaryOp object at 0x7da1b02c55a0> begin[:]
with call[name[open], parameter[call[name[join], parameter[name[project_root], constant[user.config]]], constant[w]]] begin[:]
call[name[fh].write, parameter[name[self].user_config]]
variable[lib_dir] assign[=] call[name[join], parameter[name[project_root], constant[lib]]]
if <ast.UnaryOp object at 0x7da1b0216110> begin[:]
call[name[os].makedirs, parameter[name[lib_dir]]]
with call[name[open], parameter[call[name[join], parameter[name[lib_dir], constant[Helper.groovy]]], constant[w]]] begin[:]
call[name[fh].write, parameter[name[self].help]]
variable[pipeline_to_json] assign[=] call[name[self].render_pipeline, parameter[]]
with call[name[open], parameter[binary_operation[call[call[name[splitext], parameter[name[self].nf_file]]][constant[0]] + constant[.html]], constant[w]]] begin[:]
call[name[fh].write, parameter[name[pipeline_to_json]]]
|
keyword[def] identifier[write_configs] ( identifier[self] , identifier[project_root] ):
literal[string]
keyword[with] identifier[open] ( identifier[join] ( identifier[project_root] , literal[string] ), literal[string] ) keyword[as] identifier[fh] :
identifier[fh] . identifier[write] ( identifier[self] . identifier[resources] )
keyword[with] identifier[open] ( identifier[join] ( identifier[project_root] , literal[string] ), literal[string] ) keyword[as] identifier[fh] :
identifier[fh] . identifier[write] ( identifier[self] . identifier[containers] )
keyword[with] identifier[open] ( identifier[join] ( identifier[project_root] , literal[string] ), literal[string] ) keyword[as] identifier[fh] :
identifier[fh] . identifier[write] ( identifier[self] . identifier[params] )
keyword[with] identifier[open] ( identifier[join] ( identifier[project_root] , literal[string] ), literal[string] ) keyword[as] identifier[fh] :
identifier[fh] . identifier[write] ( identifier[self] . identifier[manifest] )
keyword[if] keyword[not] identifier[exists] ( identifier[join] ( identifier[project_root] , literal[string] )):
keyword[with] identifier[open] ( identifier[join] ( identifier[project_root] , literal[string] ), literal[string] ) keyword[as] identifier[fh] :
identifier[fh] . identifier[write] ( identifier[self] . identifier[user_config] )
identifier[lib_dir] = identifier[join] ( identifier[project_root] , literal[string] )
keyword[if] keyword[not] identifier[exists] ( identifier[lib_dir] ):
identifier[os] . identifier[makedirs] ( identifier[lib_dir] )
keyword[with] identifier[open] ( identifier[join] ( identifier[lib_dir] , literal[string] ), literal[string] ) keyword[as] identifier[fh] :
identifier[fh] . identifier[write] ( identifier[self] . identifier[help] )
identifier[pipeline_to_json] = identifier[self] . identifier[render_pipeline] ()
keyword[with] identifier[open] ( identifier[splitext] ( identifier[self] . identifier[nf_file] )[ literal[int] ]+ literal[string] , literal[string] ) keyword[as] identifier[fh] :
identifier[fh] . identifier[write] ( identifier[pipeline_to_json] )
|
def write_configs(self, project_root):
"""Wrapper method that writes all configuration files to the pipeline
directory
"""
# Write resources config
with open(join(project_root, 'resources.config'), 'w') as fh:
fh.write(self.resources) # depends on [control=['with'], data=['fh']]
# Write containers config
with open(join(project_root, 'containers.config'), 'w') as fh:
fh.write(self.containers) # depends on [control=['with'], data=['fh']]
# Write containers config
with open(join(project_root, 'params.config'), 'w') as fh:
fh.write(self.params) # depends on [control=['with'], data=['fh']]
# Write manifest config
with open(join(project_root, 'manifest.config'), 'w') as fh:
fh.write(self.manifest) # depends on [control=['with'], data=['fh']]
# Write user config if not present in the project directory
if not exists(join(project_root, 'user.config')):
with open(join(project_root, 'user.config'), 'w') as fh:
fh.write(self.user_config) # depends on [control=['with'], data=['fh']] # depends on [control=['if'], data=[]]
lib_dir = join(project_root, 'lib')
if not exists(lib_dir):
os.makedirs(lib_dir) # depends on [control=['if'], data=[]]
with open(join(lib_dir, 'Helper.groovy'), 'w') as fh:
fh.write(self.help) # depends on [control=['with'], data=['fh']]
# Generate the pipeline DAG
pipeline_to_json = self.render_pipeline()
with open(splitext(self.nf_file)[0] + '.html', 'w') as fh:
fh.write(pipeline_to_json) # depends on [control=['with'], data=['fh']]
|
def count_dict_values(dict_of_counters: Mapping[X, Sized]) -> typing.Counter[X]:
"""Count the number of elements in each value (can be list, Counter, etc).
:param dict_of_counters: A dictionary of things whose lengths can be measured (lists, Counters, dicts)
:return: A Counter with the same keys as the input but the count of the length of the values list/tuple/set/Counter
"""
return Counter({
k: len(v)
for k, v in dict_of_counters.items()
})
|
def function[count_dict_values, parameter[dict_of_counters]]:
constant[Count the number of elements in each value (can be list, Counter, etc).
:param dict_of_counters: A dictionary of things whose lengths can be measured (lists, Counters, dicts)
:return: A Counter with the same keys as the input but the count of the length of the values list/tuple/set/Counter
]
return[call[name[Counter], parameter[<ast.DictComp object at 0x7da18f09e290>]]]
|
keyword[def] identifier[count_dict_values] ( identifier[dict_of_counters] : identifier[Mapping] [ identifier[X] , identifier[Sized] ])-> identifier[typing] . identifier[Counter] [ identifier[X] ]:
literal[string]
keyword[return] identifier[Counter] ({
identifier[k] : identifier[len] ( identifier[v] )
keyword[for] identifier[k] , identifier[v] keyword[in] identifier[dict_of_counters] . identifier[items] ()
})
|
def count_dict_values(dict_of_counters: Mapping[X, Sized]) -> typing.Counter[X]:
"""Count the number of elements in each value (can be list, Counter, etc).
:param dict_of_counters: A dictionary of things whose lengths can be measured (lists, Counters, dicts)
:return: A Counter with the same keys as the input but the count of the length of the values list/tuple/set/Counter
"""
return Counter({k: len(v) for (k, v) in dict_of_counters.items()})
|
def _create_centerline(self):
"""
Calculate the centerline of a polygon.
Densifies the border of a polygon which is then represented by a Numpy
array of points necessary for creating the Voronoi diagram. Once the
diagram is created, the ridges located within the polygon are
joined and returned.
Returns:
a union of lines that are located within the polygon.
"""
border = array(self.__densify_border())
vor = Voronoi(border)
vertex = vor.vertices
lst_lines = []
for j, ridge in enumerate(vor.ridge_vertices):
if -1 not in ridge:
line = LineString([
(vertex[ridge[0]][0] + self._minx,
vertex[ridge[0]][1] + self._miny),
(vertex[ridge[1]][0] + self._minx,
vertex[ridge[1]][1] + self._miny)])
if line.within(self._input_geom) and len(line.coords[0]) > 1:
lst_lines.append(line)
nr_lines = len(lst_lines)
if nr_lines < 2:
raise RuntimeError((
"Number of produced ridges is too small: {}"
", this might be caused by too large interpolation distance."
).format(nr_lines))
return unary_union(lst_lines)
|
def function[_create_centerline, parameter[self]]:
constant[
Calculate the centerline of a polygon.
Densifies the border of a polygon which is then represented by a Numpy
array of points necessary for creating the Voronoi diagram. Once the
diagram is created, the ridges located within the polygon are
joined and returned.
Returns:
a union of lines that are located within the polygon.
]
variable[border] assign[=] call[name[array], parameter[call[name[self].__densify_border, parameter[]]]]
variable[vor] assign[=] call[name[Voronoi], parameter[name[border]]]
variable[vertex] assign[=] name[vor].vertices
variable[lst_lines] assign[=] list[[]]
for taget[tuple[[<ast.Name object at 0x7da2054a7310>, <ast.Name object at 0x7da2054a7280>]]] in starred[call[name[enumerate], parameter[name[vor].ridge_vertices]]] begin[:]
if compare[<ast.UnaryOp object at 0x7da2054a5f90> <ast.NotIn object at 0x7da2590d7190> name[ridge]] begin[:]
variable[line] assign[=] call[name[LineString], parameter[list[[<ast.Tuple object at 0x7da2054a4cd0>, <ast.Tuple object at 0x7da2054a5b70>]]]]
if <ast.BoolOp object at 0x7da2054a4520> begin[:]
call[name[lst_lines].append, parameter[name[line]]]
variable[nr_lines] assign[=] call[name[len], parameter[name[lst_lines]]]
if compare[name[nr_lines] less[<] constant[2]] begin[:]
<ast.Raise object at 0x7da2054a5810>
return[call[name[unary_union], parameter[name[lst_lines]]]]
|
keyword[def] identifier[_create_centerline] ( identifier[self] ):
literal[string]
identifier[border] = identifier[array] ( identifier[self] . identifier[__densify_border] ())
identifier[vor] = identifier[Voronoi] ( identifier[border] )
identifier[vertex] = identifier[vor] . identifier[vertices]
identifier[lst_lines] =[]
keyword[for] identifier[j] , identifier[ridge] keyword[in] identifier[enumerate] ( identifier[vor] . identifier[ridge_vertices] ):
keyword[if] - literal[int] keyword[not] keyword[in] identifier[ridge] :
identifier[line] = identifier[LineString] ([
( identifier[vertex] [ identifier[ridge] [ literal[int] ]][ literal[int] ]+ identifier[self] . identifier[_minx] ,
identifier[vertex] [ identifier[ridge] [ literal[int] ]][ literal[int] ]+ identifier[self] . identifier[_miny] ),
( identifier[vertex] [ identifier[ridge] [ literal[int] ]][ literal[int] ]+ identifier[self] . identifier[_minx] ,
identifier[vertex] [ identifier[ridge] [ literal[int] ]][ literal[int] ]+ identifier[self] . identifier[_miny] )])
keyword[if] identifier[line] . identifier[within] ( identifier[self] . identifier[_input_geom] ) keyword[and] identifier[len] ( identifier[line] . identifier[coords] [ literal[int] ])> literal[int] :
identifier[lst_lines] . identifier[append] ( identifier[line] )
identifier[nr_lines] = identifier[len] ( identifier[lst_lines] )
keyword[if] identifier[nr_lines] < literal[int] :
keyword[raise] identifier[RuntimeError] ((
literal[string]
literal[string]
). identifier[format] ( identifier[nr_lines] ))
keyword[return] identifier[unary_union] ( identifier[lst_lines] )
|
def _create_centerline(self):
"""
Calculate the centerline of a polygon.
Densifies the border of a polygon which is then represented by a Numpy
array of points necessary for creating the Voronoi diagram. Once the
diagram is created, the ridges located within the polygon are
joined and returned.
Returns:
a union of lines that are located within the polygon.
"""
border = array(self.__densify_border())
vor = Voronoi(border)
vertex = vor.vertices
lst_lines = []
for (j, ridge) in enumerate(vor.ridge_vertices):
if -1 not in ridge:
line = LineString([(vertex[ridge[0]][0] + self._minx, vertex[ridge[0]][1] + self._miny), (vertex[ridge[1]][0] + self._minx, vertex[ridge[1]][1] + self._miny)])
if line.within(self._input_geom) and len(line.coords[0]) > 1:
lst_lines.append(line) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['ridge']] # depends on [control=['for'], data=[]]
nr_lines = len(lst_lines)
if nr_lines < 2:
raise RuntimeError('Number of produced ridges is too small: {}, this might be caused by too large interpolation distance.'.format(nr_lines)) # depends on [control=['if'], data=['nr_lines']]
return unary_union(lst_lines)
|
def np2str(value):
"""Convert an `numpy.string_` to str.
Args:
value (ndarray): scalar or 1-element numpy array to convert
Raises:
ValueError: if value is array larger than 1-element or it is not of
type `numpy.string_` or it is not a numpy array
"""
if hasattr(value, 'dtype') and \
issubclass(value.dtype.type, (np.string_, np.object_)) and value.size == 1:
value = np.asscalar(value)
if not isinstance(value, str):
# python 3 - was scalar numpy array of bytes
# otherwise python 2 - scalar numpy array of 'str'
value = value.decode()
return value
else:
raise ValueError("Array is not a string type or is larger than 1")
|
def function[np2str, parameter[value]]:
constant[Convert an `numpy.string_` to str.
Args:
value (ndarray): scalar or 1-element numpy array to convert
Raises:
ValueError: if value is array larger than 1-element or it is not of
type `numpy.string_` or it is not a numpy array
]
if <ast.BoolOp object at 0x7da1b1d6e080> begin[:]
variable[value] assign[=] call[name[np].asscalar, parameter[name[value]]]
if <ast.UnaryOp object at 0x7da1b1d6e7a0> begin[:]
variable[value] assign[=] call[name[value].decode, parameter[]]
return[name[value]]
|
keyword[def] identifier[np2str] ( identifier[value] ):
literal[string]
keyword[if] identifier[hasattr] ( identifier[value] , literal[string] ) keyword[and] identifier[issubclass] ( identifier[value] . identifier[dtype] . identifier[type] ,( identifier[np] . identifier[string_] , identifier[np] . identifier[object_] )) keyword[and] identifier[value] . identifier[size] == literal[int] :
identifier[value] = identifier[np] . identifier[asscalar] ( identifier[value] )
keyword[if] keyword[not] identifier[isinstance] ( identifier[value] , identifier[str] ):
identifier[value] = identifier[value] . identifier[decode] ()
keyword[return] identifier[value]
keyword[else] :
keyword[raise] identifier[ValueError] ( literal[string] )
|
def np2str(value):
"""Convert an `numpy.string_` to str.
Args:
value (ndarray): scalar or 1-element numpy array to convert
Raises:
ValueError: if value is array larger than 1-element or it is not of
type `numpy.string_` or it is not a numpy array
"""
if hasattr(value, 'dtype') and issubclass(value.dtype.type, (np.string_, np.object_)) and (value.size == 1):
value = np.asscalar(value)
if not isinstance(value, str):
# python 3 - was scalar numpy array of bytes
# otherwise python 2 - scalar numpy array of 'str'
value = value.decode() # depends on [control=['if'], data=[]]
return value # depends on [control=['if'], data=[]]
else:
raise ValueError('Array is not a string type or is larger than 1')
|
def is_rpc_error_exempt(self, error_text):
"""
Check whether an RPC error message is excempt, thus NOT causing an exception.
On some devices the RPC operations may indicate an error response, even though
the operation actually succeeded. This may be in cases where a warning would be
more appropriate. In that case, the client may be better advised to simply
ignore that error and not raise an exception.
Note that there is also the "raise_mode", set on session and manager, which
controls the exception-raising behaviour in case of returned errors. This error
filter here is independent of that: No matter what the raise_mode says, if the
error message matches one of the exempt errors returned here, an exception
will not be raised.
The exempt error messages are defined in the _EXEMPT_ERRORS field of the device
handler object and can be overwritten by child classes. Wild cards are
possible: Start and/or end with a '*' to indicate that the text can appear at
the start, the end or the middle of the error message to still match. All
comparisons are case insensitive.
Return True/False depending on found match.
"""
if error_text is not None:
error_text = error_text.lower().strip()
else:
error_text = 'no error given'
# Compare the error text against all the exempt errors.
for ex in self._exempt_errors_exact_match:
if error_text == ex:
return True
for ex in self._exempt_errors_startwith_wildcard_match:
if error_text.endswith(ex):
return True
for ex in self._exempt_errors_endwith_wildcard_match:
if error_text.startswith(ex):
return True
for ex in self._exempt_errors_full_wildcard_match:
if ex in error_text:
return True
return False
|
def function[is_rpc_error_exempt, parameter[self, error_text]]:
constant[
Check whether an RPC error message is excempt, thus NOT causing an exception.
On some devices the RPC operations may indicate an error response, even though
the operation actually succeeded. This may be in cases where a warning would be
more appropriate. In that case, the client may be better advised to simply
ignore that error and not raise an exception.
Note that there is also the "raise_mode", set on session and manager, which
controls the exception-raising behaviour in case of returned errors. This error
filter here is independent of that: No matter what the raise_mode says, if the
error message matches one of the exempt errors returned here, an exception
will not be raised.
The exempt error messages are defined in the _EXEMPT_ERRORS field of the device
handler object and can be overwritten by child classes. Wild cards are
possible: Start and/or end with a '*' to indicate that the text can appear at
the start, the end or the middle of the error message to still match. All
comparisons are case insensitive.
Return True/False depending on found match.
]
if compare[name[error_text] is_not constant[None]] begin[:]
variable[error_text] assign[=] call[call[name[error_text].lower, parameter[]].strip, parameter[]]
for taget[name[ex]] in starred[name[self]._exempt_errors_exact_match] begin[:]
if compare[name[error_text] equal[==] name[ex]] begin[:]
return[constant[True]]
for taget[name[ex]] in starred[name[self]._exempt_errors_startwith_wildcard_match] begin[:]
if call[name[error_text].endswith, parameter[name[ex]]] begin[:]
return[constant[True]]
for taget[name[ex]] in starred[name[self]._exempt_errors_endwith_wildcard_match] begin[:]
if call[name[error_text].startswith, parameter[name[ex]]] begin[:]
return[constant[True]]
for taget[name[ex]] in starred[name[self]._exempt_errors_full_wildcard_match] begin[:]
if compare[name[ex] in name[error_text]] begin[:]
return[constant[True]]
return[constant[False]]
|
keyword[def] identifier[is_rpc_error_exempt] ( identifier[self] , identifier[error_text] ):
literal[string]
keyword[if] identifier[error_text] keyword[is] keyword[not] keyword[None] :
identifier[error_text] = identifier[error_text] . identifier[lower] (). identifier[strip] ()
keyword[else] :
identifier[error_text] = literal[string]
keyword[for] identifier[ex] keyword[in] identifier[self] . identifier[_exempt_errors_exact_match] :
keyword[if] identifier[error_text] == identifier[ex] :
keyword[return] keyword[True]
keyword[for] identifier[ex] keyword[in] identifier[self] . identifier[_exempt_errors_startwith_wildcard_match] :
keyword[if] identifier[error_text] . identifier[endswith] ( identifier[ex] ):
keyword[return] keyword[True]
keyword[for] identifier[ex] keyword[in] identifier[self] . identifier[_exempt_errors_endwith_wildcard_match] :
keyword[if] identifier[error_text] . identifier[startswith] ( identifier[ex] ):
keyword[return] keyword[True]
keyword[for] identifier[ex] keyword[in] identifier[self] . identifier[_exempt_errors_full_wildcard_match] :
keyword[if] identifier[ex] keyword[in] identifier[error_text] :
keyword[return] keyword[True]
keyword[return] keyword[False]
|
def is_rpc_error_exempt(self, error_text):
"""
Check whether an RPC error message is excempt, thus NOT causing an exception.
On some devices the RPC operations may indicate an error response, even though
the operation actually succeeded. This may be in cases where a warning would be
more appropriate. In that case, the client may be better advised to simply
ignore that error and not raise an exception.
Note that there is also the "raise_mode", set on session and manager, which
controls the exception-raising behaviour in case of returned errors. This error
filter here is independent of that: No matter what the raise_mode says, if the
error message matches one of the exempt errors returned here, an exception
will not be raised.
The exempt error messages are defined in the _EXEMPT_ERRORS field of the device
handler object and can be overwritten by child classes. Wild cards are
possible: Start and/or end with a '*' to indicate that the text can appear at
the start, the end or the middle of the error message to still match. All
comparisons are case insensitive.
Return True/False depending on found match.
"""
if error_text is not None:
error_text = error_text.lower().strip() # depends on [control=['if'], data=['error_text']]
else:
error_text = 'no error given'
# Compare the error text against all the exempt errors.
for ex in self._exempt_errors_exact_match:
if error_text == ex:
return True # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['ex']]
for ex in self._exempt_errors_startwith_wildcard_match:
if error_text.endswith(ex):
return True # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['ex']]
for ex in self._exempt_errors_endwith_wildcard_match:
if error_text.startswith(ex):
return True # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['ex']]
for ex in self._exempt_errors_full_wildcard_match:
if ex in error_text:
return True # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['ex']]
return False
|
def set_gene_name(self,name):
"""assign a gene name
:param name: name
:type name: string
"""
self._options = self._options._replace(gene_name = name)
|
def function[set_gene_name, parameter[self, name]]:
constant[assign a gene name
:param name: name
:type name: string
]
name[self]._options assign[=] call[name[self]._options._replace, parameter[]]
|
keyword[def] identifier[set_gene_name] ( identifier[self] , identifier[name] ):
literal[string]
identifier[self] . identifier[_options] = identifier[self] . identifier[_options] . identifier[_replace] ( identifier[gene_name] = identifier[name] )
|
def set_gene_name(self, name):
"""assign a gene name
:param name: name
:type name: string
"""
self._options = self._options._replace(gene_name=name)
|
def _read(self, directory, filename, session, path, name, extension, spatial, spatialReferenceID, replaceParamFile):
"""
Raster Map File Read from File Method
"""
# Assign file extension attribute to file object
self.fileExtension = extension
self.filename = filename
self._load_raster_text(path)
if spatial:
# Get well known binary from the raster file using the MapKit RasterLoader
wkbRaster = RasterLoader.grassAsciiRasterToWKB(session=session,
grassRasterPath=path,
srid=str(spatialReferenceID),
noData='0')
self.raster = wkbRaster
|
def function[_read, parameter[self, directory, filename, session, path, name, extension, spatial, spatialReferenceID, replaceParamFile]]:
constant[
Raster Map File Read from File Method
]
name[self].fileExtension assign[=] name[extension]
name[self].filename assign[=] name[filename]
call[name[self]._load_raster_text, parameter[name[path]]]
if name[spatial] begin[:]
variable[wkbRaster] assign[=] call[name[RasterLoader].grassAsciiRasterToWKB, parameter[]]
name[self].raster assign[=] name[wkbRaster]
|
keyword[def] identifier[_read] ( identifier[self] , identifier[directory] , identifier[filename] , identifier[session] , identifier[path] , identifier[name] , identifier[extension] , identifier[spatial] , identifier[spatialReferenceID] , identifier[replaceParamFile] ):
literal[string]
identifier[self] . identifier[fileExtension] = identifier[extension]
identifier[self] . identifier[filename] = identifier[filename]
identifier[self] . identifier[_load_raster_text] ( identifier[path] )
keyword[if] identifier[spatial] :
identifier[wkbRaster] = identifier[RasterLoader] . identifier[grassAsciiRasterToWKB] ( identifier[session] = identifier[session] ,
identifier[grassRasterPath] = identifier[path] ,
identifier[srid] = identifier[str] ( identifier[spatialReferenceID] ),
identifier[noData] = literal[string] )
identifier[self] . identifier[raster] = identifier[wkbRaster]
|
def _read(self, directory, filename, session, path, name, extension, spatial, spatialReferenceID, replaceParamFile):
"""
Raster Map File Read from File Method
"""
# Assign file extension attribute to file object
self.fileExtension = extension
self.filename = filename
self._load_raster_text(path)
if spatial:
# Get well known binary from the raster file using the MapKit RasterLoader
wkbRaster = RasterLoader.grassAsciiRasterToWKB(session=session, grassRasterPath=path, srid=str(spatialReferenceID), noData='0')
self.raster = wkbRaster # depends on [control=['if'], data=[]]
|
def parse_param(param, include_desc=False):
"""Parse a single typed parameter statement."""
param_def, _colon, desc = param.partition(':')
if not include_desc:
desc = None
else:
desc = desc.lstrip()
if _colon == "":
raise ValidationError("Invalid parameter declaration in docstring, missing colon", declaration=param)
param_name, _space, param_type = param_def.partition(' ')
if len(param_type) < 2 or param_type[0] != '(' or param_type[-1] != ')':
raise ValidationError("Invalid parameter type string not enclosed in ( ) characters", param_string=param_def, type_string=param_type)
param_type = param_type[1:-1]
return param_name, ParameterInfo(param_type, [], desc)
|
def function[parse_param, parameter[param, include_desc]]:
constant[Parse a single typed parameter statement.]
<ast.Tuple object at 0x7da1b026e2f0> assign[=] call[name[param].partition, parameter[constant[:]]]
if <ast.UnaryOp object at 0x7da1b026c820> begin[:]
variable[desc] assign[=] constant[None]
if compare[name[_colon] equal[==] constant[]] begin[:]
<ast.Raise object at 0x7da1b026d180>
<ast.Tuple object at 0x7da1b026c700> assign[=] call[name[param_def].partition, parameter[constant[ ]]]
if <ast.BoolOp object at 0x7da1b026d030> begin[:]
<ast.Raise object at 0x7da1b026caf0>
variable[param_type] assign[=] call[name[param_type]][<ast.Slice object at 0x7da1b026e9e0>]
return[tuple[[<ast.Name object at 0x7da1b026f220>, <ast.Call object at 0x7da1b026f160>]]]
|
keyword[def] identifier[parse_param] ( identifier[param] , identifier[include_desc] = keyword[False] ):
literal[string]
identifier[param_def] , identifier[_colon] , identifier[desc] = identifier[param] . identifier[partition] ( literal[string] )
keyword[if] keyword[not] identifier[include_desc] :
identifier[desc] = keyword[None]
keyword[else] :
identifier[desc] = identifier[desc] . identifier[lstrip] ()
keyword[if] identifier[_colon] == literal[string] :
keyword[raise] identifier[ValidationError] ( literal[string] , identifier[declaration] = identifier[param] )
identifier[param_name] , identifier[_space] , identifier[param_type] = identifier[param_def] . identifier[partition] ( literal[string] )
keyword[if] identifier[len] ( identifier[param_type] )< literal[int] keyword[or] identifier[param_type] [ literal[int] ]!= literal[string] keyword[or] identifier[param_type] [- literal[int] ]!= literal[string] :
keyword[raise] identifier[ValidationError] ( literal[string] , identifier[param_string] = identifier[param_def] , identifier[type_string] = identifier[param_type] )
identifier[param_type] = identifier[param_type] [ literal[int] :- literal[int] ]
keyword[return] identifier[param_name] , identifier[ParameterInfo] ( identifier[param_type] ,[], identifier[desc] )
|
def parse_param(param, include_desc=False):
"""Parse a single typed parameter statement."""
(param_def, _colon, desc) = param.partition(':')
if not include_desc:
desc = None # depends on [control=['if'], data=[]]
else:
desc = desc.lstrip()
if _colon == '':
raise ValidationError('Invalid parameter declaration in docstring, missing colon', declaration=param) # depends on [control=['if'], data=[]]
(param_name, _space, param_type) = param_def.partition(' ')
if len(param_type) < 2 or param_type[0] != '(' or param_type[-1] != ')':
raise ValidationError('Invalid parameter type string not enclosed in ( ) characters', param_string=param_def, type_string=param_type) # depends on [control=['if'], data=[]]
param_type = param_type[1:-1]
return (param_name, ParameterInfo(param_type, [], desc))
|
def Sh(L: float, h: float, D: float) -> float:
"""
Calculate the Sherwood number.
:param L: [m] mass transfer surface characteristic length.
:param h: [m/s] mass transfer coefficient.
:param D: [m2/s] fluid mass diffusivity.
:returns: float
"""
return h * L / D
|
def function[Sh, parameter[L, h, D]]:
constant[
Calculate the Sherwood number.
:param L: [m] mass transfer surface characteristic length.
:param h: [m/s] mass transfer coefficient.
:param D: [m2/s] fluid mass diffusivity.
:returns: float
]
return[binary_operation[binary_operation[name[h] * name[L]] / name[D]]]
|
keyword[def] identifier[Sh] ( identifier[L] : identifier[float] , identifier[h] : identifier[float] , identifier[D] : identifier[float] )-> identifier[float] :
literal[string]
keyword[return] identifier[h] * identifier[L] / identifier[D]
|
def Sh(L: float, h: float, D: float) -> float:
"""
Calculate the Sherwood number.
:param L: [m] mass transfer surface characteristic length.
:param h: [m/s] mass transfer coefficient.
:param D: [m2/s] fluid mass diffusivity.
:returns: float
"""
return h * L / D
|
def user_get(self, domain, userid):
"""
Retrieve a user from the server
:param AuthDomain domain: The authentication domain for the user.
:param userid: The user ID.
:raise: :exc:`couchbase.exceptions.HTTPError` if the user does not exist.
:return: :class:`~.HttpResult`. The user can be obtained from the
returned object's `value` property.
"""
path = self._get_management_path(domain, userid)
return self.http_request(path=path,
method='GET')
|
def function[user_get, parameter[self, domain, userid]]:
constant[
Retrieve a user from the server
:param AuthDomain domain: The authentication domain for the user.
:param userid: The user ID.
:raise: :exc:`couchbase.exceptions.HTTPError` if the user does not exist.
:return: :class:`~.HttpResult`. The user can be obtained from the
returned object's `value` property.
]
variable[path] assign[=] call[name[self]._get_management_path, parameter[name[domain], name[userid]]]
return[call[name[self].http_request, parameter[]]]
|
keyword[def] identifier[user_get] ( identifier[self] , identifier[domain] , identifier[userid] ):
literal[string]
identifier[path] = identifier[self] . identifier[_get_management_path] ( identifier[domain] , identifier[userid] )
keyword[return] identifier[self] . identifier[http_request] ( identifier[path] = identifier[path] ,
identifier[method] = literal[string] )
|
def user_get(self, domain, userid):
"""
Retrieve a user from the server
:param AuthDomain domain: The authentication domain for the user.
:param userid: The user ID.
:raise: :exc:`couchbase.exceptions.HTTPError` if the user does not exist.
:return: :class:`~.HttpResult`. The user can be obtained from the
returned object's `value` property.
"""
path = self._get_management_path(domain, userid)
return self.http_request(path=path, method='GET')
|
def get_gradebooks(self):
"""Pass through to provider GradebookLookupSession.get_gradebooks"""
# Implemented from kitosid template for -
# osid.resource.BinLookupSession.get_bins_template
catalogs = self._get_provider_session('gradebook_lookup_session').get_gradebooks()
cat_list = []
for cat in catalogs:
cat_list.append(Gradebook(self._provider_manager, cat, self._runtime, self._proxy))
return GradebookList(cat_list)
|
def function[get_gradebooks, parameter[self]]:
constant[Pass through to provider GradebookLookupSession.get_gradebooks]
variable[catalogs] assign[=] call[call[name[self]._get_provider_session, parameter[constant[gradebook_lookup_session]]].get_gradebooks, parameter[]]
variable[cat_list] assign[=] list[[]]
for taget[name[cat]] in starred[name[catalogs]] begin[:]
call[name[cat_list].append, parameter[call[name[Gradebook], parameter[name[self]._provider_manager, name[cat], name[self]._runtime, name[self]._proxy]]]]
return[call[name[GradebookList], parameter[name[cat_list]]]]
|
keyword[def] identifier[get_gradebooks] ( identifier[self] ):
literal[string]
identifier[catalogs] = identifier[self] . identifier[_get_provider_session] ( literal[string] ). identifier[get_gradebooks] ()
identifier[cat_list] =[]
keyword[for] identifier[cat] keyword[in] identifier[catalogs] :
identifier[cat_list] . identifier[append] ( identifier[Gradebook] ( identifier[self] . identifier[_provider_manager] , identifier[cat] , identifier[self] . identifier[_runtime] , identifier[self] . identifier[_proxy] ))
keyword[return] identifier[GradebookList] ( identifier[cat_list] )
|
def get_gradebooks(self):
"""Pass through to provider GradebookLookupSession.get_gradebooks"""
# Implemented from kitosid template for -
# osid.resource.BinLookupSession.get_bins_template
catalogs = self._get_provider_session('gradebook_lookup_session').get_gradebooks()
cat_list = []
for cat in catalogs:
cat_list.append(Gradebook(self._provider_manager, cat, self._runtime, self._proxy)) # depends on [control=['for'], data=['cat']]
return GradebookList(cat_list)
|
def assignIPAddresses(self, prefix=None):
'''
Assign IP addresses to all interfaces on hosts and routers in the
network.
NB: this method assumes that all interfaces are assigned
addresses on the same subnet. If you don't want that behavior,
the setInterfaceAddresses method must be used.
'''
if not prefix:
subnet = ipaddress.IPv4Network('10.0.0.0/8')
else:
subnet = ipaddress.IPv4Network(str(prefix),strict=False)
ipgenerator = subnet.hosts()
# collect all links; figure out which ones need to be numbered (i.e.,
# only interfaces connected to hosts and routers)
nodes_to_number = self.hosts + self.routers
for u,v in sorted(self.links):
linkdata = self.getLink(u,v)
for node in [u,v]:
if node in nodes_to_number:
ifname = linkdata[node]
intf = self.getNode(node)['nodeobj'].getInterface(ifname)
intf.ipaddr = next(ipgenerator)
intf.netmask = subnet.netmask
|
def function[assignIPAddresses, parameter[self, prefix]]:
constant[
Assign IP addresses to all interfaces on hosts and routers in the
network.
NB: this method assumes that all interfaces are assigned
addresses on the same subnet. If you don't want that behavior,
the setInterfaceAddresses method must be used.
]
if <ast.UnaryOp object at 0x7da18eb57fa0> begin[:]
variable[subnet] assign[=] call[name[ipaddress].IPv4Network, parameter[constant[10.0.0.0/8]]]
variable[ipgenerator] assign[=] call[name[subnet].hosts, parameter[]]
variable[nodes_to_number] assign[=] binary_operation[name[self].hosts + name[self].routers]
for taget[tuple[[<ast.Name object at 0x7da18eb55c90>, <ast.Name object at 0x7da18eb55780>]]] in starred[call[name[sorted], parameter[name[self].links]]] begin[:]
variable[linkdata] assign[=] call[name[self].getLink, parameter[name[u], name[v]]]
for taget[name[node]] in starred[list[[<ast.Name object at 0x7da18eb543a0>, <ast.Name object at 0x7da18eb56920>]]] begin[:]
if compare[name[node] in name[nodes_to_number]] begin[:]
variable[ifname] assign[=] call[name[linkdata]][name[node]]
variable[intf] assign[=] call[call[call[name[self].getNode, parameter[name[node]]]][constant[nodeobj]].getInterface, parameter[name[ifname]]]
name[intf].ipaddr assign[=] call[name[next], parameter[name[ipgenerator]]]
name[intf].netmask assign[=] name[subnet].netmask
|
keyword[def] identifier[assignIPAddresses] ( identifier[self] , identifier[prefix] = keyword[None] ):
literal[string]
keyword[if] keyword[not] identifier[prefix] :
identifier[subnet] = identifier[ipaddress] . identifier[IPv4Network] ( literal[string] )
keyword[else] :
identifier[subnet] = identifier[ipaddress] . identifier[IPv4Network] ( identifier[str] ( identifier[prefix] ), identifier[strict] = keyword[False] )
identifier[ipgenerator] = identifier[subnet] . identifier[hosts] ()
identifier[nodes_to_number] = identifier[self] . identifier[hosts] + identifier[self] . identifier[routers]
keyword[for] identifier[u] , identifier[v] keyword[in] identifier[sorted] ( identifier[self] . identifier[links] ):
identifier[linkdata] = identifier[self] . identifier[getLink] ( identifier[u] , identifier[v] )
keyword[for] identifier[node] keyword[in] [ identifier[u] , identifier[v] ]:
keyword[if] identifier[node] keyword[in] identifier[nodes_to_number] :
identifier[ifname] = identifier[linkdata] [ identifier[node] ]
identifier[intf] = identifier[self] . identifier[getNode] ( identifier[node] )[ literal[string] ]. identifier[getInterface] ( identifier[ifname] )
identifier[intf] . identifier[ipaddr] = identifier[next] ( identifier[ipgenerator] )
identifier[intf] . identifier[netmask] = identifier[subnet] . identifier[netmask]
|
def assignIPAddresses(self, prefix=None):
"""
Assign IP addresses to all interfaces on hosts and routers in the
network.
NB: this method assumes that all interfaces are assigned
addresses on the same subnet. If you don't want that behavior,
the setInterfaceAddresses method must be used.
"""
if not prefix:
subnet = ipaddress.IPv4Network('10.0.0.0/8') # depends on [control=['if'], data=[]]
else:
subnet = ipaddress.IPv4Network(str(prefix), strict=False)
ipgenerator = subnet.hosts() # collect all links; figure out which ones need to be numbered (i.e.,
# only interfaces connected to hosts and routers)
nodes_to_number = self.hosts + self.routers
for (u, v) in sorted(self.links):
linkdata = self.getLink(u, v)
for node in [u, v]:
if node in nodes_to_number:
ifname = linkdata[node]
intf = self.getNode(node)['nodeobj'].getInterface(ifname)
intf.ipaddr = next(ipgenerator)
intf.netmask = subnet.netmask # depends on [control=['if'], data=['node']] # depends on [control=['for'], data=['node']] # depends on [control=['for'], data=[]]
|
def from_ini(cls, folder, ini_file='fpp.ini', ichrone='mist', recalc=False,
refit_trap=False, **kwargs):
"""
To enable simple usage, initializes a FPPCalculation from a .ini file
By default, a file called ``fpp.ini`` will be looked for in the
current folder. Also present must be a ``star.ini`` file that
contains the observed properties of the target star.
``fpp.ini`` must be of the following form::
name = k2oi
ra = 11:30:14.510
dec = +07:35:18.21
period = 32.988 #days
rprs = 0.0534 #Rp/Rstar
photfile = lc_k2oi.csv
[constraints]
maxrad = 10 #exclusion radius [arcsec]
secthresh = 0.001 #maximum allowed secondary signal depth
#This variable defines contrast curves
#ccfiles = Keck_J.cc, Lick_J.cc
Photfile must be a text file with columns ``(days_from_midtransit,
flux, flux_err)``. Both whitespace- and comma-delimited
will be tried, using ``np.loadtxt``. Photfile need not be there
if there is a pickled :class:`TransitSignal` saved in the same
directory as ``ini_file``, named ``trsig.pkl`` (or another name
as defined by ``trsig`` keyword in ``.ini`` file).
``star.ini`` should look something like the following::
B = 15.005, 0.06
V = 13.496, 0.05
g = 14.223, 0.05
r = 12.858, 0.04
i = 11.661, 0.08
J = 9.763, 0.03
H = 9.135, 0.03
K = 8.899, 0.02
W1 = 8.769, 0.023
W2 = 8.668, 0.02
W3 = 8.552, 0.025
Kepler = 12.473
#Teff = 3503, 80
#feh = 0.09, 0.09
#logg = 4.89, 0.1
Any star properties can be defined; if errors are included
then they will be used in the :class:`isochrones.StarModel`
MCMC fit.
Spectroscopic parameters (``Teff, feh, logg``) are optional.
If included, then they will also be included in
:class:`isochrones.StarModel` fit. A magnitude for the
band in which the transit signal is observed (e.g., ``Kepler``)
is required, though need not have associated uncertainty.
:param folder:
Folder to find configuration files.
:param ini_file:
Input configuration file.
:param star_ini_file:
Input config file for :class:`isochrones.StarModel` fits.
:param recalc:
Whether to re-calculate :class:`PopulationSet`, if a
``popset.h5`` file is already present
:param **kwargs:
Keyword arguments passed to :class:`PopulationSet`.
Creates:
* ``trsig.pkl``: the pickled :class:`vespa.TransitSignal` object.
* ``starfield.h5``: the TRILEGAL field star simulation
* ``starmodel.h5``: the :class:`isochrones.StarModel` fit
* ``popset.h5``: the :class:`vespa.PopulationSet` object
representing the model population simulations.
Raises
------
RuntimeError :
If single, double, and triple starmodels are
not computed, then raises with admonition to run
`starfit --all`.
AttributeError :
If `trsig.pkl` not present in folder, and
`photfile` is not defined in config file.
"""
# Check if all starmodel fits are done.
# If not, tell user to run 'starfit --all'
config = ConfigObj(os.path.join(folder, ini_file))
# Load required entries from ini_file
try:
name = config['name']
ra, dec = config['ra'], config['dec']
period = float(config['period'])
rprs = float(config['rprs'])
except KeyError as err:
raise KeyError('Missing required element of ini file: {}'.format(err))
try:
cadence = float(config['cadence'])
except KeyError:
logging.warning('Cadence not provided in fpp.ini; defaulting to Kepler cadence.')
logging.warning('If this is not a Kepler target, please set cadence (in days).')
cadence = 1626./86400 # Default to Kepler cadence
def fullpath(filename):
if os.path.isabs(filename):
return filename
else:
return os.path.join(folder, filename)
# Non-required entries with default values
popset_file = fullpath(config.get('popset', 'popset.h5'))
starfield_file = fullpath(config.get('starfield', 'starfield.h5'))
trsig_file = fullpath(config.get('trsig', 'trsig.pkl'))
# Check for StarModel fits
starmodel_basename = config.get('starmodel_basename',
'{}_starmodel'.format(ichrone))
single_starmodel_file = os.path.join(folder,'{}_single.h5'.format(starmodel_basename))
binary_starmodel_file = os.path.join(folder,'{}_binary.h5'.format(starmodel_basename))
triple_starmodel_file = os.path.join(folder,'{}_triple.h5'.format(starmodel_basename))
try:
single_starmodel = StarModel.load_hdf(single_starmodel_file)
binary_starmodel = StarModel.load_hdf(binary_starmodel_file)
triple_starmodel = StarModel.load_hdf(triple_starmodel_file)
except Exception as e:
print(e)
raise RuntimeError('Cannot load StarModels. ' +
'Please run `starfit --all {}`.'.format(folder))
# Create (or load) TransitSignal
if os.path.exists(trsig_file):
logging.info('Loading transit signal from {}...'.format(trsig_file))
with open(trsig_file, 'rb') as f:
trsig = pickle.load(f)
else:
try:
photfile = fullpath(config['photfile'])
except KeyError:
raise AttributeError('If transit pickle file (trsig.pkl) ' +
'not present, "photfile" must be' +
'defined.')
trsig = TransitSignal.from_ascii(photfile, P=period, name=name)
if not trsig.hasMCMC or refit_trap:
logging.info('Fitting transitsignal with MCMC...')
trsig.MCMC()
trsig.save(trsig_file)
# Create (or load) PopulationSet
do_only = DEFAULT_MODELS
if os.path.exists(popset_file):
if recalc:
os.remove(popset_file)
else:
with pd.HDFStore(popset_file) as store:
do_only = [m for m in DEFAULT_MODELS if m not in store]
# Check that properties of saved population match requested
try:
popset = PopulationSet.load_hdf(popset_file)
for pop in popset.poplist:
if pop.cadence != cadence:
raise ValueError('Requested cadence ({}) '.format(cadence) +
'does not match stored {})! Set recalc=True.'.format(pop.cadence))
except:
raise
if do_only:
logging.info('Generating {} models for PopulationSet...'.format(do_only))
else:
logging.info('Populations ({}) already generated.'.format(DEFAULT_MODELS))
popset = PopulationSet(period=period, cadence=cadence,
mags=single_starmodel.mags,
ra=ra, dec=dec,
trilegal_filename=starfield_file, # Maybe change parameter name?
starmodel=single_starmodel,
binary_starmodel=binary_starmodel,
triple_starmodel=triple_starmodel,
rprs=rprs, do_only=do_only,
savefile=popset_file, **kwargs)
fpp = cls(trsig, popset, folder=folder)
#############
# Apply constraints
# Exclusion radius
maxrad = float(config['constraints']['maxrad'])
fpp.set_maxrad(maxrad)
if 'secthresh' in config['constraints']:
secthresh = float(config['constraints']['secthresh'])
if not np.isnan(secthresh):
fpp.apply_secthresh(secthresh)
# Odd-even constraint
diff = 3 * np.max(trsig.depthfit[1])
fpp.constrain_oddeven(diff)
#apply contrast curve constraints if present
if 'ccfiles' in config['constraints']:
ccfiles = config['constraints']['ccfiles']
if isinstance(ccfiles, string_types):
ccfiles = [ccfiles]
for ccfile in ccfiles:
if not os.path.isabs(ccfile):
ccfile = os.path.join(folder, ccfile)
m = re.search('(\w+)_(\w+)\.cc',os.path.basename(ccfile))
if not m:
logging.warning('Invalid CC filename ({}); '.format(ccfile) +
'skipping.')
continue
else:
band = m.group(2)
inst = m.group(1)
name = '{} {}-band'.format(inst, band)
cc = ContrastCurveFromFile(ccfile, band, name=name)
fpp.apply_cc(cc)
#apply "velocity contrast curve" if present
if 'vcc' in config['constraints']:
dv = float(config['constraints']['vcc'][0])
dmag = float(config['constraints']['vcc'][1])
vcc = VelocityContrastCurve(dv, dmag)
fpp.apply_vcc(vcc)
return fpp
|
def function[from_ini, parameter[cls, folder, ini_file, ichrone, recalc, refit_trap]]:
constant[
To enable simple usage, initializes a FPPCalculation from a .ini file
By default, a file called ``fpp.ini`` will be looked for in the
current folder. Also present must be a ``star.ini`` file that
contains the observed properties of the target star.
``fpp.ini`` must be of the following form::
name = k2oi
ra = 11:30:14.510
dec = +07:35:18.21
period = 32.988 #days
rprs = 0.0534 #Rp/Rstar
photfile = lc_k2oi.csv
[constraints]
maxrad = 10 #exclusion radius [arcsec]
secthresh = 0.001 #maximum allowed secondary signal depth
#This variable defines contrast curves
#ccfiles = Keck_J.cc, Lick_J.cc
Photfile must be a text file with columns ``(days_from_midtransit,
flux, flux_err)``. Both whitespace- and comma-delimited
will be tried, using ``np.loadtxt``. Photfile need not be there
if there is a pickled :class:`TransitSignal` saved in the same
directory as ``ini_file``, named ``trsig.pkl`` (or another name
as defined by ``trsig`` keyword in ``.ini`` file).
``star.ini`` should look something like the following::
B = 15.005, 0.06
V = 13.496, 0.05
g = 14.223, 0.05
r = 12.858, 0.04
i = 11.661, 0.08
J = 9.763, 0.03
H = 9.135, 0.03
K = 8.899, 0.02
W1 = 8.769, 0.023
W2 = 8.668, 0.02
W3 = 8.552, 0.025
Kepler = 12.473
#Teff = 3503, 80
#feh = 0.09, 0.09
#logg = 4.89, 0.1
Any star properties can be defined; if errors are included
then they will be used in the :class:`isochrones.StarModel`
MCMC fit.
Spectroscopic parameters (``Teff, feh, logg``) are optional.
If included, then they will also be included in
:class:`isochrones.StarModel` fit. A magnitude for the
band in which the transit signal is observed (e.g., ``Kepler``)
is required, though need not have associated uncertainty.
:param folder:
Folder to find configuration files.
:param ini_file:
Input configuration file.
:param star_ini_file:
Input config file for :class:`isochrones.StarModel` fits.
:param recalc:
Whether to re-calculate :class:`PopulationSet`, if a
``popset.h5`` file is already present
:param **kwargs:
Keyword arguments passed to :class:`PopulationSet`.
Creates:
* ``trsig.pkl``: the pickled :class:`vespa.TransitSignal` object.
* ``starfield.h5``: the TRILEGAL field star simulation
* ``starmodel.h5``: the :class:`isochrones.StarModel` fit
* ``popset.h5``: the :class:`vespa.PopulationSet` object
representing the model population simulations.
Raises
------
RuntimeError :
If single, double, and triple starmodels are
not computed, then raises with admonition to run
`starfit --all`.
AttributeError :
If `trsig.pkl` not present in folder, and
`photfile` is not defined in config file.
]
variable[config] assign[=] call[name[ConfigObj], parameter[call[name[os].path.join, parameter[name[folder], name[ini_file]]]]]
<ast.Try object at 0x7da20c6c6e60>
<ast.Try object at 0x7da20c6c7820>
def function[fullpath, parameter[filename]]:
if call[name[os].path.isabs, parameter[name[filename]]] begin[:]
return[name[filename]]
variable[popset_file] assign[=] call[name[fullpath], parameter[call[name[config].get, parameter[constant[popset], constant[popset.h5]]]]]
variable[starfield_file] assign[=] call[name[fullpath], parameter[call[name[config].get, parameter[constant[starfield], constant[starfield.h5]]]]]
variable[trsig_file] assign[=] call[name[fullpath], parameter[call[name[config].get, parameter[constant[trsig], constant[trsig.pkl]]]]]
variable[starmodel_basename] assign[=] call[name[config].get, parameter[constant[starmodel_basename], call[constant[{}_starmodel].format, parameter[name[ichrone]]]]]
variable[single_starmodel_file] assign[=] call[name[os].path.join, parameter[name[folder], call[constant[{}_single.h5].format, parameter[name[starmodel_basename]]]]]
variable[binary_starmodel_file] assign[=] call[name[os].path.join, parameter[name[folder], call[constant[{}_binary.h5].format, parameter[name[starmodel_basename]]]]]
variable[triple_starmodel_file] assign[=] call[name[os].path.join, parameter[name[folder], call[constant[{}_triple.h5].format, parameter[name[starmodel_basename]]]]]
<ast.Try object at 0x7da1b285b2b0>
if call[name[os].path.exists, parameter[name[trsig_file]]] begin[:]
call[name[logging].info, parameter[call[constant[Loading transit signal from {}...].format, parameter[name[trsig_file]]]]]
with call[name[open], parameter[name[trsig_file], constant[rb]]] begin[:]
variable[trsig] assign[=] call[name[pickle].load, parameter[name[f]]]
variable[do_only] assign[=] name[DEFAULT_MODELS]
if call[name[os].path.exists, parameter[name[popset_file]]] begin[:]
if name[recalc] begin[:]
call[name[os].remove, parameter[name[popset_file]]]
<ast.Try object at 0x7da1b2858dc0>
if name[do_only] begin[:]
call[name[logging].info, parameter[call[constant[Generating {} models for PopulationSet...].format, parameter[name[do_only]]]]]
variable[popset] assign[=] call[name[PopulationSet], parameter[]]
variable[fpp] assign[=] call[name[cls], parameter[name[trsig], name[popset]]]
variable[maxrad] assign[=] call[name[float], parameter[call[call[name[config]][constant[constraints]]][constant[maxrad]]]]
call[name[fpp].set_maxrad, parameter[name[maxrad]]]
if compare[constant[secthresh] in call[name[config]][constant[constraints]]] begin[:]
variable[secthresh] assign[=] call[name[float], parameter[call[call[name[config]][constant[constraints]]][constant[secthresh]]]]
if <ast.UnaryOp object at 0x7da1b28d5c90> begin[:]
call[name[fpp].apply_secthresh, parameter[name[secthresh]]]
variable[diff] assign[=] binary_operation[constant[3] * call[name[np].max, parameter[call[name[trsig].depthfit][constant[1]]]]]
call[name[fpp].constrain_oddeven, parameter[name[diff]]]
if compare[constant[ccfiles] in call[name[config]][constant[constraints]]] begin[:]
variable[ccfiles] assign[=] call[call[name[config]][constant[constraints]]][constant[ccfiles]]
if call[name[isinstance], parameter[name[ccfiles], name[string_types]]] begin[:]
variable[ccfiles] assign[=] list[[<ast.Name object at 0x7da1b28d4100>]]
for taget[name[ccfile]] in starred[name[ccfiles]] begin[:]
if <ast.UnaryOp object at 0x7da1b2891ff0> begin[:]
variable[ccfile] assign[=] call[name[os].path.join, parameter[name[folder], name[ccfile]]]
variable[m] assign[=] call[name[re].search, parameter[constant[(\w+)_(\w+)\.cc], call[name[os].path.basename, parameter[name[ccfile]]]]]
if <ast.UnaryOp object at 0x7da1b2890fd0> begin[:]
call[name[logging].warning, parameter[binary_operation[call[constant[Invalid CC filename ({}); ].format, parameter[name[ccfile]]] + constant[skipping.]]]]
continue
if compare[constant[vcc] in call[name[config]][constant[constraints]]] begin[:]
variable[dv] assign[=] call[name[float], parameter[call[call[call[name[config]][constant[constraints]]][constant[vcc]]][constant[0]]]]
variable[dmag] assign[=] call[name[float], parameter[call[call[call[name[config]][constant[constraints]]][constant[vcc]]][constant[1]]]]
variable[vcc] assign[=] call[name[VelocityContrastCurve], parameter[name[dv], name[dmag]]]
call[name[fpp].apply_vcc, parameter[name[vcc]]]
return[name[fpp]]
|
keyword[def] identifier[from_ini] ( identifier[cls] , identifier[folder] , identifier[ini_file] = literal[string] , identifier[ichrone] = literal[string] , identifier[recalc] = keyword[False] ,
identifier[refit_trap] = keyword[False] ,** identifier[kwargs] ):
literal[string]
identifier[config] = identifier[ConfigObj] ( identifier[os] . identifier[path] . identifier[join] ( identifier[folder] , identifier[ini_file] ))
keyword[try] :
identifier[name] = identifier[config] [ literal[string] ]
identifier[ra] , identifier[dec] = identifier[config] [ literal[string] ], identifier[config] [ literal[string] ]
identifier[period] = identifier[float] ( identifier[config] [ literal[string] ])
identifier[rprs] = identifier[float] ( identifier[config] [ literal[string] ])
keyword[except] identifier[KeyError] keyword[as] identifier[err] :
keyword[raise] identifier[KeyError] ( literal[string] . identifier[format] ( identifier[err] ))
keyword[try] :
identifier[cadence] = identifier[float] ( identifier[config] [ literal[string] ])
keyword[except] identifier[KeyError] :
identifier[logging] . identifier[warning] ( literal[string] )
identifier[logging] . identifier[warning] ( literal[string] )
identifier[cadence] = literal[int] / literal[int]
keyword[def] identifier[fullpath] ( identifier[filename] ):
keyword[if] identifier[os] . identifier[path] . identifier[isabs] ( identifier[filename] ):
keyword[return] identifier[filename]
keyword[else] :
keyword[return] identifier[os] . identifier[path] . identifier[join] ( identifier[folder] , identifier[filename] )
identifier[popset_file] = identifier[fullpath] ( identifier[config] . identifier[get] ( literal[string] , literal[string] ))
identifier[starfield_file] = identifier[fullpath] ( identifier[config] . identifier[get] ( literal[string] , literal[string] ))
identifier[trsig_file] = identifier[fullpath] ( identifier[config] . identifier[get] ( literal[string] , literal[string] ))
identifier[starmodel_basename] = identifier[config] . identifier[get] ( literal[string] ,
literal[string] . identifier[format] ( identifier[ichrone] ))
identifier[single_starmodel_file] = identifier[os] . identifier[path] . identifier[join] ( identifier[folder] , literal[string] . identifier[format] ( identifier[starmodel_basename] ))
identifier[binary_starmodel_file] = identifier[os] . identifier[path] . identifier[join] ( identifier[folder] , literal[string] . identifier[format] ( identifier[starmodel_basename] ))
identifier[triple_starmodel_file] = identifier[os] . identifier[path] . identifier[join] ( identifier[folder] , literal[string] . identifier[format] ( identifier[starmodel_basename] ))
keyword[try] :
identifier[single_starmodel] = identifier[StarModel] . identifier[load_hdf] ( identifier[single_starmodel_file] )
identifier[binary_starmodel] = identifier[StarModel] . identifier[load_hdf] ( identifier[binary_starmodel_file] )
identifier[triple_starmodel] = identifier[StarModel] . identifier[load_hdf] ( identifier[triple_starmodel_file] )
keyword[except] identifier[Exception] keyword[as] identifier[e] :
identifier[print] ( identifier[e] )
keyword[raise] identifier[RuntimeError] ( literal[string] +
literal[string] . identifier[format] ( identifier[folder] ))
keyword[if] identifier[os] . identifier[path] . identifier[exists] ( identifier[trsig_file] ):
identifier[logging] . identifier[info] ( literal[string] . identifier[format] ( identifier[trsig_file] ))
keyword[with] identifier[open] ( identifier[trsig_file] , literal[string] ) keyword[as] identifier[f] :
identifier[trsig] = identifier[pickle] . identifier[load] ( identifier[f] )
keyword[else] :
keyword[try] :
identifier[photfile] = identifier[fullpath] ( identifier[config] [ literal[string] ])
keyword[except] identifier[KeyError] :
keyword[raise] identifier[AttributeError] ( literal[string] +
literal[string] +
literal[string] )
identifier[trsig] = identifier[TransitSignal] . identifier[from_ascii] ( identifier[photfile] , identifier[P] = identifier[period] , identifier[name] = identifier[name] )
keyword[if] keyword[not] identifier[trsig] . identifier[hasMCMC] keyword[or] identifier[refit_trap] :
identifier[logging] . identifier[info] ( literal[string] )
identifier[trsig] . identifier[MCMC] ()
identifier[trsig] . identifier[save] ( identifier[trsig_file] )
identifier[do_only] = identifier[DEFAULT_MODELS]
keyword[if] identifier[os] . identifier[path] . identifier[exists] ( identifier[popset_file] ):
keyword[if] identifier[recalc] :
identifier[os] . identifier[remove] ( identifier[popset_file] )
keyword[else] :
keyword[with] identifier[pd] . identifier[HDFStore] ( identifier[popset_file] ) keyword[as] identifier[store] :
identifier[do_only] =[ identifier[m] keyword[for] identifier[m] keyword[in] identifier[DEFAULT_MODELS] keyword[if] identifier[m] keyword[not] keyword[in] identifier[store] ]
keyword[try] :
identifier[popset] = identifier[PopulationSet] . identifier[load_hdf] ( identifier[popset_file] )
keyword[for] identifier[pop] keyword[in] identifier[popset] . identifier[poplist] :
keyword[if] identifier[pop] . identifier[cadence] != identifier[cadence] :
keyword[raise] identifier[ValueError] ( literal[string] . identifier[format] ( identifier[cadence] )+
literal[string] . identifier[format] ( identifier[pop] . identifier[cadence] ))
keyword[except] :
keyword[raise]
keyword[if] identifier[do_only] :
identifier[logging] . identifier[info] ( literal[string] . identifier[format] ( identifier[do_only] ))
keyword[else] :
identifier[logging] . identifier[info] ( literal[string] . identifier[format] ( identifier[DEFAULT_MODELS] ))
identifier[popset] = identifier[PopulationSet] ( identifier[period] = identifier[period] , identifier[cadence] = identifier[cadence] ,
identifier[mags] = identifier[single_starmodel] . identifier[mags] ,
identifier[ra] = identifier[ra] , identifier[dec] = identifier[dec] ,
identifier[trilegal_filename] = identifier[starfield_file] ,
identifier[starmodel] = identifier[single_starmodel] ,
identifier[binary_starmodel] = identifier[binary_starmodel] ,
identifier[triple_starmodel] = identifier[triple_starmodel] ,
identifier[rprs] = identifier[rprs] , identifier[do_only] = identifier[do_only] ,
identifier[savefile] = identifier[popset_file] ,** identifier[kwargs] )
identifier[fpp] = identifier[cls] ( identifier[trsig] , identifier[popset] , identifier[folder] = identifier[folder] )
identifier[maxrad] = identifier[float] ( identifier[config] [ literal[string] ][ literal[string] ])
identifier[fpp] . identifier[set_maxrad] ( identifier[maxrad] )
keyword[if] literal[string] keyword[in] identifier[config] [ literal[string] ]:
identifier[secthresh] = identifier[float] ( identifier[config] [ literal[string] ][ literal[string] ])
keyword[if] keyword[not] identifier[np] . identifier[isnan] ( identifier[secthresh] ):
identifier[fpp] . identifier[apply_secthresh] ( identifier[secthresh] )
identifier[diff] = literal[int] * identifier[np] . identifier[max] ( identifier[trsig] . identifier[depthfit] [ literal[int] ])
identifier[fpp] . identifier[constrain_oddeven] ( identifier[diff] )
keyword[if] literal[string] keyword[in] identifier[config] [ literal[string] ]:
identifier[ccfiles] = identifier[config] [ literal[string] ][ literal[string] ]
keyword[if] identifier[isinstance] ( identifier[ccfiles] , identifier[string_types] ):
identifier[ccfiles] =[ identifier[ccfiles] ]
keyword[for] identifier[ccfile] keyword[in] identifier[ccfiles] :
keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[isabs] ( identifier[ccfile] ):
identifier[ccfile] = identifier[os] . identifier[path] . identifier[join] ( identifier[folder] , identifier[ccfile] )
identifier[m] = identifier[re] . identifier[search] ( literal[string] , identifier[os] . identifier[path] . identifier[basename] ( identifier[ccfile] ))
keyword[if] keyword[not] identifier[m] :
identifier[logging] . identifier[warning] ( literal[string] . identifier[format] ( identifier[ccfile] )+
literal[string] )
keyword[continue]
keyword[else] :
identifier[band] = identifier[m] . identifier[group] ( literal[int] )
identifier[inst] = identifier[m] . identifier[group] ( literal[int] )
identifier[name] = literal[string] . identifier[format] ( identifier[inst] , identifier[band] )
identifier[cc] = identifier[ContrastCurveFromFile] ( identifier[ccfile] , identifier[band] , identifier[name] = identifier[name] )
identifier[fpp] . identifier[apply_cc] ( identifier[cc] )
keyword[if] literal[string] keyword[in] identifier[config] [ literal[string] ]:
identifier[dv] = identifier[float] ( identifier[config] [ literal[string] ][ literal[string] ][ literal[int] ])
identifier[dmag] = identifier[float] ( identifier[config] [ literal[string] ][ literal[string] ][ literal[int] ])
identifier[vcc] = identifier[VelocityContrastCurve] ( identifier[dv] , identifier[dmag] )
identifier[fpp] . identifier[apply_vcc] ( identifier[vcc] )
keyword[return] identifier[fpp]
|
def from_ini(cls, folder, ini_file='fpp.ini', ichrone='mist', recalc=False, refit_trap=False, **kwargs):
"""
To enable simple usage, initializes a FPPCalculation from a .ini file
By default, a file called ``fpp.ini`` will be looked for in the
current folder. Also present must be a ``star.ini`` file that
contains the observed properties of the target star.
``fpp.ini`` must be of the following form::
name = k2oi
ra = 11:30:14.510
dec = +07:35:18.21
period = 32.988 #days
rprs = 0.0534 #Rp/Rstar
photfile = lc_k2oi.csv
[constraints]
maxrad = 10 #exclusion radius [arcsec]
secthresh = 0.001 #maximum allowed secondary signal depth
#This variable defines contrast curves
#ccfiles = Keck_J.cc, Lick_J.cc
Photfile must be a text file with columns ``(days_from_midtransit,
flux, flux_err)``. Both whitespace- and comma-delimited
will be tried, using ``np.loadtxt``. Photfile need not be there
if there is a pickled :class:`TransitSignal` saved in the same
directory as ``ini_file``, named ``trsig.pkl`` (or another name
as defined by ``trsig`` keyword in ``.ini`` file).
``star.ini`` should look something like the following::
B = 15.005, 0.06
V = 13.496, 0.05
g = 14.223, 0.05
r = 12.858, 0.04
i = 11.661, 0.08
J = 9.763, 0.03
H = 9.135, 0.03
K = 8.899, 0.02
W1 = 8.769, 0.023
W2 = 8.668, 0.02
W3 = 8.552, 0.025
Kepler = 12.473
#Teff = 3503, 80
#feh = 0.09, 0.09
#logg = 4.89, 0.1
Any star properties can be defined; if errors are included
then they will be used in the :class:`isochrones.StarModel`
MCMC fit.
Spectroscopic parameters (``Teff, feh, logg``) are optional.
If included, then they will also be included in
:class:`isochrones.StarModel` fit. A magnitude for the
band in which the transit signal is observed (e.g., ``Kepler``)
is required, though need not have associated uncertainty.
:param folder:
Folder to find configuration files.
:param ini_file:
Input configuration file.
:param star_ini_file:
Input config file for :class:`isochrones.StarModel` fits.
:param recalc:
Whether to re-calculate :class:`PopulationSet`, if a
``popset.h5`` file is already present
:param **kwargs:
Keyword arguments passed to :class:`PopulationSet`.
Creates:
* ``trsig.pkl``: the pickled :class:`vespa.TransitSignal` object.
* ``starfield.h5``: the TRILEGAL field star simulation
* ``starmodel.h5``: the :class:`isochrones.StarModel` fit
* ``popset.h5``: the :class:`vespa.PopulationSet` object
representing the model population simulations.
Raises
------
RuntimeError :
If single, double, and triple starmodels are
not computed, then raises with admonition to run
`starfit --all`.
AttributeError :
If `trsig.pkl` not present in folder, and
`photfile` is not defined in config file.
"""
# Check if all starmodel fits are done.
# If not, tell user to run 'starfit --all'
config = ConfigObj(os.path.join(folder, ini_file))
# Load required entries from ini_file
try:
name = config['name']
(ra, dec) = (config['ra'], config['dec'])
period = float(config['period'])
rprs = float(config['rprs']) # depends on [control=['try'], data=[]]
except KeyError as err:
raise KeyError('Missing required element of ini file: {}'.format(err)) # depends on [control=['except'], data=['err']]
try:
cadence = float(config['cadence']) # depends on [control=['try'], data=[]]
except KeyError:
logging.warning('Cadence not provided in fpp.ini; defaulting to Kepler cadence.')
logging.warning('If this is not a Kepler target, please set cadence (in days).')
cadence = 1626.0 / 86400 # Default to Kepler cadence # depends on [control=['except'], data=[]]
def fullpath(filename):
if os.path.isabs(filename):
return filename # depends on [control=['if'], data=[]]
else:
return os.path.join(folder, filename)
# Non-required entries with default values
popset_file = fullpath(config.get('popset', 'popset.h5'))
starfield_file = fullpath(config.get('starfield', 'starfield.h5'))
trsig_file = fullpath(config.get('trsig', 'trsig.pkl'))
# Check for StarModel fits
starmodel_basename = config.get('starmodel_basename', '{}_starmodel'.format(ichrone))
single_starmodel_file = os.path.join(folder, '{}_single.h5'.format(starmodel_basename))
binary_starmodel_file = os.path.join(folder, '{}_binary.h5'.format(starmodel_basename))
triple_starmodel_file = os.path.join(folder, '{}_triple.h5'.format(starmodel_basename))
try:
single_starmodel = StarModel.load_hdf(single_starmodel_file)
binary_starmodel = StarModel.load_hdf(binary_starmodel_file)
triple_starmodel = StarModel.load_hdf(triple_starmodel_file) # depends on [control=['try'], data=[]]
except Exception as e:
print(e)
raise RuntimeError('Cannot load StarModels. ' + 'Please run `starfit --all {}`.'.format(folder)) # depends on [control=['except'], data=['e']]
# Create (or load) TransitSignal
if os.path.exists(trsig_file):
logging.info('Loading transit signal from {}...'.format(trsig_file))
with open(trsig_file, 'rb') as f:
trsig = pickle.load(f) # depends on [control=['with'], data=['f']] # depends on [control=['if'], data=[]]
else:
try:
photfile = fullpath(config['photfile']) # depends on [control=['try'], data=[]]
except KeyError:
raise AttributeError('If transit pickle file (trsig.pkl) ' + 'not present, "photfile" must be' + 'defined.') # depends on [control=['except'], data=[]]
trsig = TransitSignal.from_ascii(photfile, P=period, name=name)
if not trsig.hasMCMC or refit_trap:
logging.info('Fitting transitsignal with MCMC...')
trsig.MCMC()
trsig.save(trsig_file) # depends on [control=['if'], data=[]]
# Create (or load) PopulationSet
do_only = DEFAULT_MODELS
if os.path.exists(popset_file):
if recalc:
os.remove(popset_file) # depends on [control=['if'], data=[]]
else:
with pd.HDFStore(popset_file) as store:
do_only = [m for m in DEFAULT_MODELS if m not in store] # depends on [control=['with'], data=['store']] # depends on [control=['if'], data=[]]
# Check that properties of saved population match requested
try:
popset = PopulationSet.load_hdf(popset_file)
for pop in popset.poplist:
if pop.cadence != cadence:
raise ValueError('Requested cadence ({}) '.format(cadence) + 'does not match stored {})! Set recalc=True.'.format(pop.cadence)) # depends on [control=['if'], data=['cadence']] # depends on [control=['for'], data=['pop']] # depends on [control=['try'], data=[]]
except:
raise # depends on [control=['except'], data=[]]
if do_only:
logging.info('Generating {} models for PopulationSet...'.format(do_only)) # depends on [control=['if'], data=[]]
else:
logging.info('Populations ({}) already generated.'.format(DEFAULT_MODELS)) # Maybe change parameter name?
popset = PopulationSet(period=period, cadence=cadence, mags=single_starmodel.mags, ra=ra, dec=dec, trilegal_filename=starfield_file, starmodel=single_starmodel, binary_starmodel=binary_starmodel, triple_starmodel=triple_starmodel, rprs=rprs, do_only=do_only, savefile=popset_file, **kwargs)
fpp = cls(trsig, popset, folder=folder)
#############
# Apply constraints
# Exclusion radius
maxrad = float(config['constraints']['maxrad'])
fpp.set_maxrad(maxrad)
if 'secthresh' in config['constraints']:
secthresh = float(config['constraints']['secthresh'])
if not np.isnan(secthresh):
fpp.apply_secthresh(secthresh) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
# Odd-even constraint
diff = 3 * np.max(trsig.depthfit[1])
fpp.constrain_oddeven(diff)
#apply contrast curve constraints if present
if 'ccfiles' in config['constraints']:
ccfiles = config['constraints']['ccfiles']
if isinstance(ccfiles, string_types):
ccfiles = [ccfiles] # depends on [control=['if'], data=[]]
for ccfile in ccfiles:
if not os.path.isabs(ccfile):
ccfile = os.path.join(folder, ccfile) # depends on [control=['if'], data=[]]
m = re.search('(\\w+)_(\\w+)\\.cc', os.path.basename(ccfile))
if not m:
logging.warning('Invalid CC filename ({}); '.format(ccfile) + 'skipping.')
continue # depends on [control=['if'], data=[]]
else:
band = m.group(2)
inst = m.group(1)
name = '{} {}-band'.format(inst, band)
cc = ContrastCurveFromFile(ccfile, band, name=name)
fpp.apply_cc(cc) # depends on [control=['for'], data=['ccfile']] # depends on [control=['if'], data=[]]
#apply "velocity contrast curve" if present
if 'vcc' in config['constraints']:
dv = float(config['constraints']['vcc'][0])
dmag = float(config['constraints']['vcc'][1])
vcc = VelocityContrastCurve(dv, dmag)
fpp.apply_vcc(vcc) # depends on [control=['if'], data=[]]
return fpp
|
def worker_activated(name, workers=None, profile='default'):
'''
Activate all the workers in the modjk load balancer
Example:
.. code-block:: yaml
loadbalancer:
modjk.worker_activated:
- workers:
- app1
- app2
'''
if workers is None:
workers = []
return _bulk_state(
'modjk.bulk_activate', name, workers, profile
)
|
def function[worker_activated, parameter[name, workers, profile]]:
constant[
Activate all the workers in the modjk load balancer
Example:
.. code-block:: yaml
loadbalancer:
modjk.worker_activated:
- workers:
- app1
- app2
]
if compare[name[workers] is constant[None]] begin[:]
variable[workers] assign[=] list[[]]
return[call[name[_bulk_state], parameter[constant[modjk.bulk_activate], name[name], name[workers], name[profile]]]]
|
keyword[def] identifier[worker_activated] ( identifier[name] , identifier[workers] = keyword[None] , identifier[profile] = literal[string] ):
literal[string]
keyword[if] identifier[workers] keyword[is] keyword[None] :
identifier[workers] =[]
keyword[return] identifier[_bulk_state] (
literal[string] , identifier[name] , identifier[workers] , identifier[profile]
)
|
def worker_activated(name, workers=None, profile='default'):
"""
Activate all the workers in the modjk load balancer
Example:
.. code-block:: yaml
loadbalancer:
modjk.worker_activated:
- workers:
- app1
- app2
"""
if workers is None:
workers = [] # depends on [control=['if'], data=['workers']]
return _bulk_state('modjk.bulk_activate', name, workers, profile)
|
def transform_non_affine(self, s):
"""
Apply transformation to a Nx1 numpy array.
Parameters
----------
s : array
Data to be transformed in display scale units.
Return
------
array or masked array
Transformed data, in data value units.
"""
T = self._T
M = self._M
W = self._W
p = self._p
# Calculate x
return T * 10**(-(M-W)) * (10**(s-W) - (p**2)*10**(-(s-W)/p) + p**2 - 1)
|
def function[transform_non_affine, parameter[self, s]]:
constant[
Apply transformation to a Nx1 numpy array.
Parameters
----------
s : array
Data to be transformed in display scale units.
Return
------
array or masked array
Transformed data, in data value units.
]
variable[T] assign[=] name[self]._T
variable[M] assign[=] name[self]._M
variable[W] assign[=] name[self]._W
variable[p] assign[=] name[self]._p
return[binary_operation[binary_operation[name[T] * binary_operation[constant[10] ** <ast.UnaryOp object at 0x7da1b1bac5b0>]] * binary_operation[binary_operation[binary_operation[binary_operation[constant[10] ** binary_operation[name[s] - name[W]]] - binary_operation[binary_operation[name[p] ** constant[2]] * binary_operation[constant[10] ** binary_operation[<ast.UnaryOp object at 0x7da1b1bad5d0> / name[p]]]]] + binary_operation[name[p] ** constant[2]]] - constant[1]]]]
|
keyword[def] identifier[transform_non_affine] ( identifier[self] , identifier[s] ):
literal[string]
identifier[T] = identifier[self] . identifier[_T]
identifier[M] = identifier[self] . identifier[_M]
identifier[W] = identifier[self] . identifier[_W]
identifier[p] = identifier[self] . identifier[_p]
keyword[return] identifier[T] * literal[int] **(-( identifier[M] - identifier[W] ))*( literal[int] **( identifier[s] - identifier[W] )-( identifier[p] ** literal[int] )* literal[int] **(-( identifier[s] - identifier[W] )/ identifier[p] )+ identifier[p] ** literal[int] - literal[int] )
|
def transform_non_affine(self, s):
"""
Apply transformation to a Nx1 numpy array.
Parameters
----------
s : array
Data to be transformed in display scale units.
Return
------
array or masked array
Transformed data, in data value units.
"""
T = self._T
M = self._M
W = self._W
p = self._p
# Calculate x
return T * 10 ** (-(M - W)) * (10 ** (s - W) - p ** 2 * 10 ** (-(s - W) / p) + p ** 2 - 1)
|
def check_input(self, token):
"""
Performs checks on the input token. Raises an exception if unsupported.
:param token: the token to check
:type token: Token
"""
if token is None:
raise Exception(self.full_name + ": No token provided!")
if isinstance(token.payload, str):
return
raise Exception(self.full_name + ": Unhandled class: " + classes.get_classname(token.payload))
|
def function[check_input, parameter[self, token]]:
constant[
Performs checks on the input token. Raises an exception if unsupported.
:param token: the token to check
:type token: Token
]
if compare[name[token] is constant[None]] begin[:]
<ast.Raise object at 0x7da1b0619270>
if call[name[isinstance], parameter[name[token].payload, name[str]]] begin[:]
return[None]
<ast.Raise object at 0x7da1b0618b50>
|
keyword[def] identifier[check_input] ( identifier[self] , identifier[token] ):
literal[string]
keyword[if] identifier[token] keyword[is] keyword[None] :
keyword[raise] identifier[Exception] ( identifier[self] . identifier[full_name] + literal[string] )
keyword[if] identifier[isinstance] ( identifier[token] . identifier[payload] , identifier[str] ):
keyword[return]
keyword[raise] identifier[Exception] ( identifier[self] . identifier[full_name] + literal[string] + identifier[classes] . identifier[get_classname] ( identifier[token] . identifier[payload] ))
|
def check_input(self, token):
"""
Performs checks on the input token. Raises an exception if unsupported.
:param token: the token to check
:type token: Token
"""
if token is None:
raise Exception(self.full_name + ': No token provided!') # depends on [control=['if'], data=[]]
if isinstance(token.payload, str):
return # depends on [control=['if'], data=[]]
raise Exception(self.full_name + ': Unhandled class: ' + classes.get_classname(token.payload))
|
def filter_human_only(stmts_in, **kwargs):
"""Filter out statements that are grounded, but not to a human gene.
Parameters
----------
stmts_in : list[indra.statements.Statement]
A list of statements to filter.
save : Optional[str]
The name of a pickle file to save the results (stmts_out) into.
remove_bound: Optional[bool]
If true, removes all bound conditions that are grounded but not to human
genes. If false (default), filters out statements with boundary
conditions that are grounded to non-human genes.
Returns
-------
stmts_out : list[indra.statements.Statement]
A list of filtered statements.
"""
from indra.databases import uniprot_client
if 'remove_bound' in kwargs and kwargs['remove_bound']:
remove_bound = True
else:
remove_bound = False
dump_pkl = kwargs.get('save')
logger.info('Filtering %d statements for human genes only...' %
len(stmts_in))
stmts_out = []
def criterion(agent):
upid = agent.db_refs.get('UP')
if upid and not uniprot_client.is_human(upid):
return False
else:
return True
for st in stmts_in:
human_genes = True
for agent in st.agent_list():
if agent is not None:
if not criterion(agent):
human_genes = False
break
if remove_bound:
_remove_bound_conditions(agent, criterion)
elif _any_bound_condition_fails_criterion(agent, criterion):
human_genes = False
break
if human_genes:
stmts_out.append(st)
logger.info('%d statements after filter...' % len(stmts_out))
if dump_pkl:
dump_statements(stmts_out, dump_pkl)
return stmts_out
|
def function[filter_human_only, parameter[stmts_in]]:
constant[Filter out statements that are grounded, but not to a human gene.
Parameters
----------
stmts_in : list[indra.statements.Statement]
A list of statements to filter.
save : Optional[str]
The name of a pickle file to save the results (stmts_out) into.
remove_bound: Optional[bool]
If true, removes all bound conditions that are grounded but not to human
genes. If false (default), filters out statements with boundary
conditions that are grounded to non-human genes.
Returns
-------
stmts_out : list[indra.statements.Statement]
A list of filtered statements.
]
from relative_module[indra.databases] import module[uniprot_client]
if <ast.BoolOp object at 0x7da18fe90df0> begin[:]
variable[remove_bound] assign[=] constant[True]
variable[dump_pkl] assign[=] call[name[kwargs].get, parameter[constant[save]]]
call[name[logger].info, parameter[binary_operation[constant[Filtering %d statements for human genes only...] <ast.Mod object at 0x7da2590d6920> call[name[len], parameter[name[stmts_in]]]]]]
variable[stmts_out] assign[=] list[[]]
def function[criterion, parameter[agent]]:
variable[upid] assign[=] call[name[agent].db_refs.get, parameter[constant[UP]]]
if <ast.BoolOp object at 0x7da20c9913c0> begin[:]
return[constant[False]]
for taget[name[st]] in starred[name[stmts_in]] begin[:]
variable[human_genes] assign[=] constant[True]
for taget[name[agent]] in starred[call[name[st].agent_list, parameter[]]] begin[:]
if compare[name[agent] is_not constant[None]] begin[:]
if <ast.UnaryOp object at 0x7da20c9907c0> begin[:]
variable[human_genes] assign[=] constant[False]
break
if name[remove_bound] begin[:]
call[name[_remove_bound_conditions], parameter[name[agent], name[criterion]]]
if name[human_genes] begin[:]
call[name[stmts_out].append, parameter[name[st]]]
call[name[logger].info, parameter[binary_operation[constant[%d statements after filter...] <ast.Mod object at 0x7da2590d6920> call[name[len], parameter[name[stmts_out]]]]]]
if name[dump_pkl] begin[:]
call[name[dump_statements], parameter[name[stmts_out], name[dump_pkl]]]
return[name[stmts_out]]
|
keyword[def] identifier[filter_human_only] ( identifier[stmts_in] ,** identifier[kwargs] ):
literal[string]
keyword[from] identifier[indra] . identifier[databases] keyword[import] identifier[uniprot_client]
keyword[if] literal[string] keyword[in] identifier[kwargs] keyword[and] identifier[kwargs] [ literal[string] ]:
identifier[remove_bound] = keyword[True]
keyword[else] :
identifier[remove_bound] = keyword[False]
identifier[dump_pkl] = identifier[kwargs] . identifier[get] ( literal[string] )
identifier[logger] . identifier[info] ( literal[string] %
identifier[len] ( identifier[stmts_in] ))
identifier[stmts_out] =[]
keyword[def] identifier[criterion] ( identifier[agent] ):
identifier[upid] = identifier[agent] . identifier[db_refs] . identifier[get] ( literal[string] )
keyword[if] identifier[upid] keyword[and] keyword[not] identifier[uniprot_client] . identifier[is_human] ( identifier[upid] ):
keyword[return] keyword[False]
keyword[else] :
keyword[return] keyword[True]
keyword[for] identifier[st] keyword[in] identifier[stmts_in] :
identifier[human_genes] = keyword[True]
keyword[for] identifier[agent] keyword[in] identifier[st] . identifier[agent_list] ():
keyword[if] identifier[agent] keyword[is] keyword[not] keyword[None] :
keyword[if] keyword[not] identifier[criterion] ( identifier[agent] ):
identifier[human_genes] = keyword[False]
keyword[break]
keyword[if] identifier[remove_bound] :
identifier[_remove_bound_conditions] ( identifier[agent] , identifier[criterion] )
keyword[elif] identifier[_any_bound_condition_fails_criterion] ( identifier[agent] , identifier[criterion] ):
identifier[human_genes] = keyword[False]
keyword[break]
keyword[if] identifier[human_genes] :
identifier[stmts_out] . identifier[append] ( identifier[st] )
identifier[logger] . identifier[info] ( literal[string] % identifier[len] ( identifier[stmts_out] ))
keyword[if] identifier[dump_pkl] :
identifier[dump_statements] ( identifier[stmts_out] , identifier[dump_pkl] )
keyword[return] identifier[stmts_out]
|
def filter_human_only(stmts_in, **kwargs):
"""Filter out statements that are grounded, but not to a human gene.
Parameters
----------
stmts_in : list[indra.statements.Statement]
A list of statements to filter.
save : Optional[str]
The name of a pickle file to save the results (stmts_out) into.
remove_bound: Optional[bool]
If true, removes all bound conditions that are grounded but not to human
genes. If false (default), filters out statements with boundary
conditions that are grounded to non-human genes.
Returns
-------
stmts_out : list[indra.statements.Statement]
A list of filtered statements.
"""
from indra.databases import uniprot_client
if 'remove_bound' in kwargs and kwargs['remove_bound']:
remove_bound = True # depends on [control=['if'], data=[]]
else:
remove_bound = False
dump_pkl = kwargs.get('save')
logger.info('Filtering %d statements for human genes only...' % len(stmts_in))
stmts_out = []
def criterion(agent):
upid = agent.db_refs.get('UP')
if upid and (not uniprot_client.is_human(upid)):
return False # depends on [control=['if'], data=[]]
else:
return True
for st in stmts_in:
human_genes = True
for agent in st.agent_list():
if agent is not None:
if not criterion(agent):
human_genes = False
break # depends on [control=['if'], data=[]]
if remove_bound:
_remove_bound_conditions(agent, criterion) # depends on [control=['if'], data=[]]
elif _any_bound_condition_fails_criterion(agent, criterion):
human_genes = False
break # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['agent']] # depends on [control=['for'], data=['agent']]
if human_genes:
stmts_out.append(st) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['st']]
logger.info('%d statements after filter...' % len(stmts_out))
if dump_pkl:
dump_statements(stmts_out, dump_pkl) # depends on [control=['if'], data=[]]
return stmts_out
|
def _drop_membership_multicast_socket(self):
"""
Drop membership to multicast
:rtype: None
"""
# Leave group
self._multicast_socket.setsockopt(
socket.IPPROTO_IP,
socket.IP_DROP_MEMBERSHIP,
self._membership_request
)
self._membership_request = None
|
def function[_drop_membership_multicast_socket, parameter[self]]:
constant[
Drop membership to multicast
:rtype: None
]
call[name[self]._multicast_socket.setsockopt, parameter[name[socket].IPPROTO_IP, name[socket].IP_DROP_MEMBERSHIP, name[self]._membership_request]]
name[self]._membership_request assign[=] constant[None]
|
keyword[def] identifier[_drop_membership_multicast_socket] ( identifier[self] ):
literal[string]
identifier[self] . identifier[_multicast_socket] . identifier[setsockopt] (
identifier[socket] . identifier[IPPROTO_IP] ,
identifier[socket] . identifier[IP_DROP_MEMBERSHIP] ,
identifier[self] . identifier[_membership_request]
)
identifier[self] . identifier[_membership_request] = keyword[None]
|
def _drop_membership_multicast_socket(self):
"""
Drop membership to multicast
:rtype: None
"""
# Leave group
self._multicast_socket.setsockopt(socket.IPPROTO_IP, socket.IP_DROP_MEMBERSHIP, self._membership_request)
self._membership_request = None
|
def _sync_to_disk(self):
"""Write any changes made on Refpkg to disk.
Other methods of Refpkg that alter the contents of the package
will call this method themselves. Generally you should never
have to call it by hand. The only exception would be if
another program has changed the Refpkg on disk while your
program is running and you want to force your version over it.
Otherwise it should only be called by other methods of refpkg.
"""
with self.open_manifest('w') as h:
json.dump(self.contents, h, indent=4)
h.write('\n')
|
def function[_sync_to_disk, parameter[self]]:
constant[Write any changes made on Refpkg to disk.
Other methods of Refpkg that alter the contents of the package
will call this method themselves. Generally you should never
have to call it by hand. The only exception would be if
another program has changed the Refpkg on disk while your
program is running and you want to force your version over it.
Otherwise it should only be called by other methods of refpkg.
]
with call[name[self].open_manifest, parameter[constant[w]]] begin[:]
call[name[json].dump, parameter[name[self].contents, name[h]]]
call[name[h].write, parameter[constant[
]]]
|
keyword[def] identifier[_sync_to_disk] ( identifier[self] ):
literal[string]
keyword[with] identifier[self] . identifier[open_manifest] ( literal[string] ) keyword[as] identifier[h] :
identifier[json] . identifier[dump] ( identifier[self] . identifier[contents] , identifier[h] , identifier[indent] = literal[int] )
identifier[h] . identifier[write] ( literal[string] )
|
def _sync_to_disk(self):
"""Write any changes made on Refpkg to disk.
Other methods of Refpkg that alter the contents of the package
will call this method themselves. Generally you should never
have to call it by hand. The only exception would be if
another program has changed the Refpkg on disk while your
program is running and you want to force your version over it.
Otherwise it should only be called by other methods of refpkg.
"""
with self.open_manifest('w') as h:
json.dump(self.contents, h, indent=4)
h.write('\n') # depends on [control=['with'], data=['h']]
|
def getKwConfig(self, kw):
""" return the configuration of kw, dict
USAGE: rdict = getKwConfig(kw)
"""
confd = self.getKwAsDict(kw).values()[0].values()[0]
return {k.lower(): v for k, v in confd.items()}
|
def function[getKwConfig, parameter[self, kw]]:
constant[ return the configuration of kw, dict
USAGE: rdict = getKwConfig(kw)
]
variable[confd] assign[=] call[call[call[call[call[name[self].getKwAsDict, parameter[name[kw]]].values, parameter[]]][constant[0]].values, parameter[]]][constant[0]]
return[<ast.DictComp object at 0x7da1b0807df0>]
|
keyword[def] identifier[getKwConfig] ( identifier[self] , identifier[kw] ):
literal[string]
identifier[confd] = identifier[self] . identifier[getKwAsDict] ( identifier[kw] ). identifier[values] ()[ literal[int] ]. identifier[values] ()[ literal[int] ]
keyword[return] { identifier[k] . identifier[lower] (): identifier[v] keyword[for] identifier[k] , identifier[v] keyword[in] identifier[confd] . identifier[items] ()}
|
def getKwConfig(self, kw):
""" return the configuration of kw, dict
USAGE: rdict = getKwConfig(kw)
"""
confd = self.getKwAsDict(kw).values()[0].values()[0]
return {k.lower(): v for (k, v) in confd.items()}
|
def namedb_get_record_states_at(cur, history_id, block_number):
"""
Get the state(s) that the given history record was in at a given block height.
Normally, this is one state (i.e. if a name was registered at block 8, then it is in a NAME_REGISTRATION state in block 10)
However, if the record changed at this block, then this method returns all states the record passed through.
Returns an array of record states
"""
query = 'SELECT block_id,history_data FROM history WHERE history_id = ? AND block_id == ? ORDER BY block_id DESC,vtxindex DESC'
args = (history_id, block_number)
history_rows = namedb_query_execute(cur, query, args)
ret = []
for row in history_rows:
history_data = simplejson.loads(row['history_data'])
ret.append(history_data)
if len(ret) > 0:
# record changed in this block
return ret
# if the record did not change in this block, then find the last version of the record
query = 'SELECT block_id,history_data FROM history WHERE history_id = ? AND block_id < ? ORDER BY block_id DESC,vtxindex DESC LIMIT 1'
args = (history_id, block_number)
history_rows = namedb_query_execute(cur, query, args)
for row in history_rows:
history_data = simplejson.loads(row['history_data'])
ret.append(history_data)
return ret
|
def function[namedb_get_record_states_at, parameter[cur, history_id, block_number]]:
constant[
Get the state(s) that the given history record was in at a given block height.
Normally, this is one state (i.e. if a name was registered at block 8, then it is in a NAME_REGISTRATION state in block 10)
However, if the record changed at this block, then this method returns all states the record passed through.
Returns an array of record states
]
variable[query] assign[=] constant[SELECT block_id,history_data FROM history WHERE history_id = ? AND block_id == ? ORDER BY block_id DESC,vtxindex DESC]
variable[args] assign[=] tuple[[<ast.Name object at 0x7da20c6aa110>, <ast.Name object at 0x7da20c6aa3b0>]]
variable[history_rows] assign[=] call[name[namedb_query_execute], parameter[name[cur], name[query], name[args]]]
variable[ret] assign[=] list[[]]
for taget[name[row]] in starred[name[history_rows]] begin[:]
variable[history_data] assign[=] call[name[simplejson].loads, parameter[call[name[row]][constant[history_data]]]]
call[name[ret].append, parameter[name[history_data]]]
if compare[call[name[len], parameter[name[ret]]] greater[>] constant[0]] begin[:]
return[name[ret]]
variable[query] assign[=] constant[SELECT block_id,history_data FROM history WHERE history_id = ? AND block_id < ? ORDER BY block_id DESC,vtxindex DESC LIMIT 1]
variable[args] assign[=] tuple[[<ast.Name object at 0x7da1b16326e0>, <ast.Name object at 0x7da1b1632380>]]
variable[history_rows] assign[=] call[name[namedb_query_execute], parameter[name[cur], name[query], name[args]]]
for taget[name[row]] in starred[name[history_rows]] begin[:]
variable[history_data] assign[=] call[name[simplejson].loads, parameter[call[name[row]][constant[history_data]]]]
call[name[ret].append, parameter[name[history_data]]]
return[name[ret]]
|
keyword[def] identifier[namedb_get_record_states_at] ( identifier[cur] , identifier[history_id] , identifier[block_number] ):
literal[string]
identifier[query] = literal[string]
identifier[args] =( identifier[history_id] , identifier[block_number] )
identifier[history_rows] = identifier[namedb_query_execute] ( identifier[cur] , identifier[query] , identifier[args] )
identifier[ret] =[]
keyword[for] identifier[row] keyword[in] identifier[history_rows] :
identifier[history_data] = identifier[simplejson] . identifier[loads] ( identifier[row] [ literal[string] ])
identifier[ret] . identifier[append] ( identifier[history_data] )
keyword[if] identifier[len] ( identifier[ret] )> literal[int] :
keyword[return] identifier[ret]
identifier[query] = literal[string]
identifier[args] =( identifier[history_id] , identifier[block_number] )
identifier[history_rows] = identifier[namedb_query_execute] ( identifier[cur] , identifier[query] , identifier[args] )
keyword[for] identifier[row] keyword[in] identifier[history_rows] :
identifier[history_data] = identifier[simplejson] . identifier[loads] ( identifier[row] [ literal[string] ])
identifier[ret] . identifier[append] ( identifier[history_data] )
keyword[return] identifier[ret]
|
def namedb_get_record_states_at(cur, history_id, block_number):
"""
Get the state(s) that the given history record was in at a given block height.
Normally, this is one state (i.e. if a name was registered at block 8, then it is in a NAME_REGISTRATION state in block 10)
However, if the record changed at this block, then this method returns all states the record passed through.
Returns an array of record states
"""
query = 'SELECT block_id,history_data FROM history WHERE history_id = ? AND block_id == ? ORDER BY block_id DESC,vtxindex DESC'
args = (history_id, block_number)
history_rows = namedb_query_execute(cur, query, args)
ret = []
for row in history_rows:
history_data = simplejson.loads(row['history_data'])
ret.append(history_data) # depends on [control=['for'], data=['row']]
if len(ret) > 0:
# record changed in this block
return ret # depends on [control=['if'], data=[]]
# if the record did not change in this block, then find the last version of the record
query = 'SELECT block_id,history_data FROM history WHERE history_id = ? AND block_id < ? ORDER BY block_id DESC,vtxindex DESC LIMIT 1'
args = (history_id, block_number)
history_rows = namedb_query_execute(cur, query, args)
for row in history_rows:
history_data = simplejson.loads(row['history_data'])
ret.append(history_data) # depends on [control=['for'], data=['row']]
return ret
|
def setup_data_stream(
self,
connection_factory: Callable[[tuple], Connection],
data_stream_factory: Callable[[Connection], DataStream]=DataStream) -> \
DataStream:
'''Create and setup a data stream.
This function will set up passive and binary mode and handle
connecting to the data connection.
Args:
connection_factory: A coroutine callback that returns a connection
data_stream_factory: A callback that returns a data stream
Coroutine.
Returns:
DataStream
'''
yield from self._control_stream.write_command(Command('TYPE', 'I'))
reply = yield from self._control_stream.read_reply()
self.raise_if_not_match('Binary mode', ReplyCodes.command_okay, reply)
address = yield from self.passive_mode()
connection = yield from connection_factory(address)
# TODO: unit test for following line for connections that have
# the same port over time but within pool cleaning intervals
connection.reset()
yield from connection.connect()
data_stream = data_stream_factory(connection)
return data_stream
|
def function[setup_data_stream, parameter[self, connection_factory, data_stream_factory]]:
constant[Create and setup a data stream.
This function will set up passive and binary mode and handle
connecting to the data connection.
Args:
connection_factory: A coroutine callback that returns a connection
data_stream_factory: A callback that returns a data stream
Coroutine.
Returns:
DataStream
]
<ast.YieldFrom object at 0x7da1b2345f60>
variable[reply] assign[=] <ast.YieldFrom object at 0x7da1b2347520>
call[name[self].raise_if_not_match, parameter[constant[Binary mode], name[ReplyCodes].command_okay, name[reply]]]
variable[address] assign[=] <ast.YieldFrom object at 0x7da1b2346c80>
variable[connection] assign[=] <ast.YieldFrom object at 0x7da20e961c00>
call[name[connection].reset, parameter[]]
<ast.YieldFrom object at 0x7da18f7220e0>
variable[data_stream] assign[=] call[name[data_stream_factory], parameter[name[connection]]]
return[name[data_stream]]
|
keyword[def] identifier[setup_data_stream] (
identifier[self] ,
identifier[connection_factory] : identifier[Callable] [[ identifier[tuple] ], identifier[Connection] ],
identifier[data_stream_factory] : identifier[Callable] [[ identifier[Connection] ], identifier[DataStream] ]= identifier[DataStream] )-> identifier[DataStream] :
literal[string]
keyword[yield] keyword[from] identifier[self] . identifier[_control_stream] . identifier[write_command] ( identifier[Command] ( literal[string] , literal[string] ))
identifier[reply] = keyword[yield] keyword[from] identifier[self] . identifier[_control_stream] . identifier[read_reply] ()
identifier[self] . identifier[raise_if_not_match] ( literal[string] , identifier[ReplyCodes] . identifier[command_okay] , identifier[reply] )
identifier[address] = keyword[yield] keyword[from] identifier[self] . identifier[passive_mode] ()
identifier[connection] = keyword[yield] keyword[from] identifier[connection_factory] ( identifier[address] )
identifier[connection] . identifier[reset] ()
keyword[yield] keyword[from] identifier[connection] . identifier[connect] ()
identifier[data_stream] = identifier[data_stream_factory] ( identifier[connection] )
keyword[return] identifier[data_stream]
|
def setup_data_stream(self, connection_factory: Callable[[tuple], Connection], data_stream_factory: Callable[[Connection], DataStream]=DataStream) -> DataStream:
"""Create and setup a data stream.
This function will set up passive and binary mode and handle
connecting to the data connection.
Args:
connection_factory: A coroutine callback that returns a connection
data_stream_factory: A callback that returns a data stream
Coroutine.
Returns:
DataStream
"""
yield from self._control_stream.write_command(Command('TYPE', 'I'))
reply = (yield from self._control_stream.read_reply())
self.raise_if_not_match('Binary mode', ReplyCodes.command_okay, reply)
address = (yield from self.passive_mode())
connection = (yield from connection_factory(address))
# TODO: unit test for following line for connections that have
# the same port over time but within pool cleaning intervals
connection.reset()
yield from connection.connect()
data_stream = data_stream_factory(connection)
return data_stream
|
def open_tablebase_native(directory: PathLike, *, libgtb: Any = None, LibraryLoader: Any = ctypes.cdll) -> NativeTablebase:
"""
Opens a collection of tables for probing using libgtb.
In most cases :func:`~chess.gaviota.open_tablebase()` should be used.
Use this function only if you do not want to downgrade to pure Python
tablebase probing.
:raises: :exc:`RuntimeError` or :exc:`OSError` when libgtb can not be used.
"""
libgtb = libgtb or ctypes.util.find_library("gtb") or "libgtb.so.1.0.1"
tables = NativeTablebase(LibraryLoader.LoadLibrary(libgtb))
tables.add_directory(directory)
return tables
|
def function[open_tablebase_native, parameter[directory]]:
constant[
Opens a collection of tables for probing using libgtb.
In most cases :func:`~chess.gaviota.open_tablebase()` should be used.
Use this function only if you do not want to downgrade to pure Python
tablebase probing.
:raises: :exc:`RuntimeError` or :exc:`OSError` when libgtb can not be used.
]
variable[libgtb] assign[=] <ast.BoolOp object at 0x7da1b17e5930>
variable[tables] assign[=] call[name[NativeTablebase], parameter[call[name[LibraryLoader].LoadLibrary, parameter[name[libgtb]]]]]
call[name[tables].add_directory, parameter[name[directory]]]
return[name[tables]]
|
keyword[def] identifier[open_tablebase_native] ( identifier[directory] : identifier[PathLike] ,*, identifier[libgtb] : identifier[Any] = keyword[None] , identifier[LibraryLoader] : identifier[Any] = identifier[ctypes] . identifier[cdll] )-> identifier[NativeTablebase] :
literal[string]
identifier[libgtb] = identifier[libgtb] keyword[or] identifier[ctypes] . identifier[util] . identifier[find_library] ( literal[string] ) keyword[or] literal[string]
identifier[tables] = identifier[NativeTablebase] ( identifier[LibraryLoader] . identifier[LoadLibrary] ( identifier[libgtb] ))
identifier[tables] . identifier[add_directory] ( identifier[directory] )
keyword[return] identifier[tables]
|
def open_tablebase_native(directory: PathLike, *, libgtb: Any=None, LibraryLoader: Any=ctypes.cdll) -> NativeTablebase:
"""
Opens a collection of tables for probing using libgtb.
In most cases :func:`~chess.gaviota.open_tablebase()` should be used.
Use this function only if you do not want to downgrade to pure Python
tablebase probing.
:raises: :exc:`RuntimeError` or :exc:`OSError` when libgtb can not be used.
"""
libgtb = libgtb or ctypes.util.find_library('gtb') or 'libgtb.so.1.0.1'
tables = NativeTablebase(LibraryLoader.LoadLibrary(libgtb))
tables.add_directory(directory)
return tables
|
def _get_order_by(order, orderby, order_by_fields):
"""
Return the order by syntax for a model.
Checks whether use ascending or descending order, and maps the fieldnames.
"""
try:
# Find the actual database fieldnames for the keyword.
db_fieldnames = order_by_fields[orderby]
except KeyError:
raise ValueError("Invalid value for 'orderby': '{0}', supported values are: {1}".format(orderby, ', '.join(sorted(order_by_fields.keys()))))
# Default to descending for some fields, otherwise be ascending
is_desc = (not order and orderby in ORDER_BY_DESC) \
or (order or 'asc').lower() in ('desc', 'descending')
if is_desc:
return map(lambda name: '-' + name, db_fieldnames)
else:
return db_fieldnames
|
def function[_get_order_by, parameter[order, orderby, order_by_fields]]:
constant[
Return the order by syntax for a model.
Checks whether use ascending or descending order, and maps the fieldnames.
]
<ast.Try object at 0x7da18c4cdae0>
variable[is_desc] assign[=] <ast.BoolOp object at 0x7da18c4cc1f0>
if name[is_desc] begin[:]
return[call[name[map], parameter[<ast.Lambda object at 0x7da204345f00>, name[db_fieldnames]]]]
|
keyword[def] identifier[_get_order_by] ( identifier[order] , identifier[orderby] , identifier[order_by_fields] ):
literal[string]
keyword[try] :
identifier[db_fieldnames] = identifier[order_by_fields] [ identifier[orderby] ]
keyword[except] identifier[KeyError] :
keyword[raise] identifier[ValueError] ( literal[string] . identifier[format] ( identifier[orderby] , literal[string] . identifier[join] ( identifier[sorted] ( identifier[order_by_fields] . identifier[keys] ()))))
identifier[is_desc] =( keyword[not] identifier[order] keyword[and] identifier[orderby] keyword[in] identifier[ORDER_BY_DESC] ) keyword[or] ( identifier[order] keyword[or] literal[string] ). identifier[lower] () keyword[in] ( literal[string] , literal[string] )
keyword[if] identifier[is_desc] :
keyword[return] identifier[map] ( keyword[lambda] identifier[name] : literal[string] + identifier[name] , identifier[db_fieldnames] )
keyword[else] :
keyword[return] identifier[db_fieldnames]
|
def _get_order_by(order, orderby, order_by_fields):
"""
Return the order by syntax for a model.
Checks whether use ascending or descending order, and maps the fieldnames.
"""
try:
# Find the actual database fieldnames for the keyword.
db_fieldnames = order_by_fields[orderby] # depends on [control=['try'], data=[]]
except KeyError:
raise ValueError("Invalid value for 'orderby': '{0}', supported values are: {1}".format(orderby, ', '.join(sorted(order_by_fields.keys())))) # depends on [control=['except'], data=[]]
# Default to descending for some fields, otherwise be ascending
is_desc = not order and orderby in ORDER_BY_DESC or (order or 'asc').lower() in ('desc', 'descending')
if is_desc:
return map(lambda name: '-' + name, db_fieldnames) # depends on [control=['if'], data=[]]
else:
return db_fieldnames
|
def parse_roadmap_gwas(fn):
"""
Read Roadmap GWAS file and filter for unique, significant (p < 1e-5)
SNPs.
Parameters
----------
fn : str
Path to (subset of) GRASP database.
Returns
-------
df : pandas.DataFrame
Pandas dataframe with de-duplicated, significant SNPs. The index is of
the form chrom:pos where pos is the one-based position of the SNP. The
columns are chrom, start, end, rsid, and pvalue. rsid may be empty or
not actually an RSID. chrom, start, end make a zero-based bed file with
the SNP coordinates.
"""
df = pd.read_table(fn, low_memory=False,
names=['chrom', 'start', 'end', 'rsid', 'pvalue'])
df = df[df.pvalue < 1e-5]
df = df.sort(columns=['chrom', 'start', 'pvalue'])
df = df.drop_duplicates(subset=['chrom', 'start'])
df = df[df['chrom'] != 'chrY']
df.index = df['chrom'].astype(str) + ':' + df['end'].astype(str)
return df
|
def function[parse_roadmap_gwas, parameter[fn]]:
constant[
Read Roadmap GWAS file and filter for unique, significant (p < 1e-5)
SNPs.
Parameters
----------
fn : str
Path to (subset of) GRASP database.
Returns
-------
df : pandas.DataFrame
Pandas dataframe with de-duplicated, significant SNPs. The index is of
the form chrom:pos where pos is the one-based position of the SNP. The
columns are chrom, start, end, rsid, and pvalue. rsid may be empty or
not actually an RSID. chrom, start, end make a zero-based bed file with
the SNP coordinates.
]
variable[df] assign[=] call[name[pd].read_table, parameter[name[fn]]]
variable[df] assign[=] call[name[df]][compare[name[df].pvalue less[<] constant[1e-05]]]
variable[df] assign[=] call[name[df].sort, parameter[]]
variable[df] assign[=] call[name[df].drop_duplicates, parameter[]]
variable[df] assign[=] call[name[df]][compare[call[name[df]][constant[chrom]] not_equal[!=] constant[chrY]]]
name[df].index assign[=] binary_operation[binary_operation[call[call[name[df]][constant[chrom]].astype, parameter[name[str]]] + constant[:]] + call[call[name[df]][constant[end]].astype, parameter[name[str]]]]
return[name[df]]
|
keyword[def] identifier[parse_roadmap_gwas] ( identifier[fn] ):
literal[string]
identifier[df] = identifier[pd] . identifier[read_table] ( identifier[fn] , identifier[low_memory] = keyword[False] ,
identifier[names] =[ literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ])
identifier[df] = identifier[df] [ identifier[df] . identifier[pvalue] < literal[int] ]
identifier[df] = identifier[df] . identifier[sort] ( identifier[columns] =[ literal[string] , literal[string] , literal[string] ])
identifier[df] = identifier[df] . identifier[drop_duplicates] ( identifier[subset] =[ literal[string] , literal[string] ])
identifier[df] = identifier[df] [ identifier[df] [ literal[string] ]!= literal[string] ]
identifier[df] . identifier[index] = identifier[df] [ literal[string] ]. identifier[astype] ( identifier[str] )+ literal[string] + identifier[df] [ literal[string] ]. identifier[astype] ( identifier[str] )
keyword[return] identifier[df]
|
def parse_roadmap_gwas(fn):
"""
Read Roadmap GWAS file and filter for unique, significant (p < 1e-5)
SNPs.
Parameters
----------
fn : str
Path to (subset of) GRASP database.
Returns
-------
df : pandas.DataFrame
Pandas dataframe with de-duplicated, significant SNPs. The index is of
the form chrom:pos where pos is the one-based position of the SNP. The
columns are chrom, start, end, rsid, and pvalue. rsid may be empty or
not actually an RSID. chrom, start, end make a zero-based bed file with
the SNP coordinates.
"""
df = pd.read_table(fn, low_memory=False, names=['chrom', 'start', 'end', 'rsid', 'pvalue'])
df = df[df.pvalue < 1e-05]
df = df.sort(columns=['chrom', 'start', 'pvalue'])
df = df.drop_duplicates(subset=['chrom', 'start'])
df = df[df['chrom'] != 'chrY']
df.index = df['chrom'].astype(str) + ':' + df['end'].astype(str)
return df
|
def get_tokens_list(self, registry_address: PaymentNetworkID):
"""Returns a list of tokens the node knows about"""
tokens_list = views.get_token_identifiers(
chain_state=views.state_from_raiden(self.raiden),
payment_network_id=registry_address,
)
return tokens_list
|
def function[get_tokens_list, parameter[self, registry_address]]:
constant[Returns a list of tokens the node knows about]
variable[tokens_list] assign[=] call[name[views].get_token_identifiers, parameter[]]
return[name[tokens_list]]
|
keyword[def] identifier[get_tokens_list] ( identifier[self] , identifier[registry_address] : identifier[PaymentNetworkID] ):
literal[string]
identifier[tokens_list] = identifier[views] . identifier[get_token_identifiers] (
identifier[chain_state] = identifier[views] . identifier[state_from_raiden] ( identifier[self] . identifier[raiden] ),
identifier[payment_network_id] = identifier[registry_address] ,
)
keyword[return] identifier[tokens_list]
|
def get_tokens_list(self, registry_address: PaymentNetworkID):
"""Returns a list of tokens the node knows about"""
tokens_list = views.get_token_identifiers(chain_state=views.state_from_raiden(self.raiden), payment_network_id=registry_address)
return tokens_list
|
def get_dir(self, scope, class_):
"""
Return the callable function from appdirs, but with the
result wrapped in self.path_class
"""
prop_name = '{scope}_{class_}_dir'.format(**locals())
value = getattr(self.wrapper, prop_name)
MultiPath = Multi.for_class(self.path_class)
return MultiPath.detect(value)
|
def function[get_dir, parameter[self, scope, class_]]:
constant[
Return the callable function from appdirs, but with the
result wrapped in self.path_class
]
variable[prop_name] assign[=] call[constant[{scope}_{class_}_dir].format, parameter[]]
variable[value] assign[=] call[name[getattr], parameter[name[self].wrapper, name[prop_name]]]
variable[MultiPath] assign[=] call[name[Multi].for_class, parameter[name[self].path_class]]
return[call[name[MultiPath].detect, parameter[name[value]]]]
|
keyword[def] identifier[get_dir] ( identifier[self] , identifier[scope] , identifier[class_] ):
literal[string]
identifier[prop_name] = literal[string] . identifier[format] (** identifier[locals] ())
identifier[value] = identifier[getattr] ( identifier[self] . identifier[wrapper] , identifier[prop_name] )
identifier[MultiPath] = identifier[Multi] . identifier[for_class] ( identifier[self] . identifier[path_class] )
keyword[return] identifier[MultiPath] . identifier[detect] ( identifier[value] )
|
def get_dir(self, scope, class_):
"""
Return the callable function from appdirs, but with the
result wrapped in self.path_class
"""
prop_name = '{scope}_{class_}_dir'.format(**locals())
value = getattr(self.wrapper, prop_name)
MultiPath = Multi.for_class(self.path_class)
return MultiPath.detect(value)
|
def runDia(diagram):
"""Generate the diagrams using Dia."""
ifname = '{}.dia'.format(diagram)
ofname = '{}.png'.format(diagram)
cmd = 'dia -t png-libart -e {} {}'.format(ofname, ifname)
print(' {}'.format(cmd))
subprocess.call(cmd, shell=True)
return True
|
def function[runDia, parameter[diagram]]:
constant[Generate the diagrams using Dia.]
variable[ifname] assign[=] call[constant[{}.dia].format, parameter[name[diagram]]]
variable[ofname] assign[=] call[constant[{}.png].format, parameter[name[diagram]]]
variable[cmd] assign[=] call[constant[dia -t png-libart -e {} {}].format, parameter[name[ofname], name[ifname]]]
call[name[print], parameter[call[constant[ {}].format, parameter[name[cmd]]]]]
call[name[subprocess].call, parameter[name[cmd]]]
return[constant[True]]
|
keyword[def] identifier[runDia] ( identifier[diagram] ):
literal[string]
identifier[ifname] = literal[string] . identifier[format] ( identifier[diagram] )
identifier[ofname] = literal[string] . identifier[format] ( identifier[diagram] )
identifier[cmd] = literal[string] . identifier[format] ( identifier[ofname] , identifier[ifname] )
identifier[print] ( literal[string] . identifier[format] ( identifier[cmd] ))
identifier[subprocess] . identifier[call] ( identifier[cmd] , identifier[shell] = keyword[True] )
keyword[return] keyword[True]
|
def runDia(diagram):
"""Generate the diagrams using Dia."""
ifname = '{}.dia'.format(diagram)
ofname = '{}.png'.format(diagram)
cmd = 'dia -t png-libart -e {} {}'.format(ofname, ifname)
print(' {}'.format(cmd))
subprocess.call(cmd, shell=True)
return True
|
def asdict(self, name, _type=None, _set=False):
"""
Turn this 'a:2,b:blabla,c:True,a:'d' to
{a:[2, 'd'], b:'blabla', c:True}
"""
if _type is None:
_type = lambda t: t
dict_str = self.pop(name, None)
if not dict_str:
return {}
_dict = {}
for item in split_strip(dict_str):
key, _, val = item.partition(':')
val = _type(val)
if key in _dict:
if isinstance(_dict[key], list):
_dict[key].append(val)
else:
_dict[key] = [_dict[key], val]
else:
_dict[key] = val
if _set:
self[name] = _dict
return _dict
|
def function[asdict, parameter[self, name, _type, _set]]:
constant[
Turn this 'a:2,b:blabla,c:True,a:'d' to
{a:[2, 'd'], b:'blabla', c:True}
]
if compare[name[_type] is constant[None]] begin[:]
variable[_type] assign[=] <ast.Lambda object at 0x7da18f58f430>
variable[dict_str] assign[=] call[name[self].pop, parameter[name[name], constant[None]]]
if <ast.UnaryOp object at 0x7da18f58f340> begin[:]
return[dictionary[[], []]]
variable[_dict] assign[=] dictionary[[], []]
for taget[name[item]] in starred[call[name[split_strip], parameter[name[dict_str]]]] begin[:]
<ast.Tuple object at 0x7da18f58d660> assign[=] call[name[item].partition, parameter[constant[:]]]
variable[val] assign[=] call[name[_type], parameter[name[val]]]
if compare[name[key] in name[_dict]] begin[:]
if call[name[isinstance], parameter[call[name[_dict]][name[key]], name[list]]] begin[:]
call[call[name[_dict]][name[key]].append, parameter[name[val]]]
if name[_set] begin[:]
call[name[self]][name[name]] assign[=] name[_dict]
return[name[_dict]]
|
keyword[def] identifier[asdict] ( identifier[self] , identifier[name] , identifier[_type] = keyword[None] , identifier[_set] = keyword[False] ):
literal[string]
keyword[if] identifier[_type] keyword[is] keyword[None] :
identifier[_type] = keyword[lambda] identifier[t] : identifier[t]
identifier[dict_str] = identifier[self] . identifier[pop] ( identifier[name] , keyword[None] )
keyword[if] keyword[not] identifier[dict_str] :
keyword[return] {}
identifier[_dict] ={}
keyword[for] identifier[item] keyword[in] identifier[split_strip] ( identifier[dict_str] ):
identifier[key] , identifier[_] , identifier[val] = identifier[item] . identifier[partition] ( literal[string] )
identifier[val] = identifier[_type] ( identifier[val] )
keyword[if] identifier[key] keyword[in] identifier[_dict] :
keyword[if] identifier[isinstance] ( identifier[_dict] [ identifier[key] ], identifier[list] ):
identifier[_dict] [ identifier[key] ]. identifier[append] ( identifier[val] )
keyword[else] :
identifier[_dict] [ identifier[key] ]=[ identifier[_dict] [ identifier[key] ], identifier[val] ]
keyword[else] :
identifier[_dict] [ identifier[key] ]= identifier[val]
keyword[if] identifier[_set] :
identifier[self] [ identifier[name] ]= identifier[_dict]
keyword[return] identifier[_dict]
|
def asdict(self, name, _type=None, _set=False):
"""
Turn this 'a:2,b:blabla,c:True,a:'d' to
{a:[2, 'd'], b:'blabla', c:True}
"""
if _type is None:
_type = lambda t: t # depends on [control=['if'], data=['_type']]
dict_str = self.pop(name, None)
if not dict_str:
return {} # depends on [control=['if'], data=[]]
_dict = {}
for item in split_strip(dict_str):
(key, _, val) = item.partition(':')
val = _type(val)
if key in _dict:
if isinstance(_dict[key], list):
_dict[key].append(val) # depends on [control=['if'], data=[]]
else:
_dict[key] = [_dict[key], val] # depends on [control=['if'], data=['key', '_dict']]
else:
_dict[key] = val # depends on [control=['for'], data=['item']]
if _set:
self[name] = _dict # depends on [control=['if'], data=[]]
return _dict
|
def nr_genes(self):
"""Return the number of genes"""
if self['genes']:
nr_genes = len(self['genes'])
else:
nr_genes = len(self['gene_symbols'])
return nr_genes
|
def function[nr_genes, parameter[self]]:
constant[Return the number of genes]
if call[name[self]][constant[genes]] begin[:]
variable[nr_genes] assign[=] call[name[len], parameter[call[name[self]][constant[genes]]]]
return[name[nr_genes]]
|
keyword[def] identifier[nr_genes] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] [ literal[string] ]:
identifier[nr_genes] = identifier[len] ( identifier[self] [ literal[string] ])
keyword[else] :
identifier[nr_genes] = identifier[len] ( identifier[self] [ literal[string] ])
keyword[return] identifier[nr_genes]
|
def nr_genes(self):
"""Return the number of genes"""
if self['genes']:
nr_genes = len(self['genes']) # depends on [control=['if'], data=[]]
else:
nr_genes = len(self['gene_symbols'])
return nr_genes
|
def assert_valid_name(name: str) -> str:
"""Uphold the spec rules about naming."""
error = is_valid_name_error(name)
if error:
raise error
return name
|
def function[assert_valid_name, parameter[name]]:
constant[Uphold the spec rules about naming.]
variable[error] assign[=] call[name[is_valid_name_error], parameter[name[name]]]
if name[error] begin[:]
<ast.Raise object at 0x7da1b1de0250>
return[name[name]]
|
keyword[def] identifier[assert_valid_name] ( identifier[name] : identifier[str] )-> identifier[str] :
literal[string]
identifier[error] = identifier[is_valid_name_error] ( identifier[name] )
keyword[if] identifier[error] :
keyword[raise] identifier[error]
keyword[return] identifier[name]
|
def assert_valid_name(name: str) -> str:
"""Uphold the spec rules about naming."""
error = is_valid_name_error(name)
if error:
raise error # depends on [control=['if'], data=[]]
return name
|
def ScanForStorageMediaImage(self, source_path_spec):
"""Scans the path specification for a supported storage media image format.
Args:
source_path_spec (PathSpec): source path specification.
Returns:
PathSpec: storage media image path specification or None if no supported
storage media image type was found.
Raises:
BackEndError: if the source cannot be scanned or more than one storage
media image type is found.
"""
try:
type_indicators = analyzer.Analyzer.GetStorageMediaImageTypeIndicators(
source_path_spec, resolver_context=self._resolver_context)
except RuntimeError as exception:
raise errors.BackEndError((
'Unable to process source path specification with error: '
'{0!s}').format(exception))
if not type_indicators:
# The RAW storage media image type cannot be detected based on
# a signature so we try to detect it based on common file naming schemas.
file_system = resolver.Resolver.OpenFileSystem(
source_path_spec, resolver_context=self._resolver_context)
raw_path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_RAW, parent=source_path_spec)
try:
# The RAW glob function will raise a PathSpecError if the path
# specification is unsuitable for globbing.
glob_results = raw.RawGlobPathSpec(file_system, raw_path_spec)
except errors.PathSpecError:
glob_results = None
file_system.Close()
if not glob_results:
return None
return raw_path_spec
if len(type_indicators) > 1:
raise errors.BackEndError(
'Unsupported source found more than one storage media image types.')
return path_spec_factory.Factory.NewPathSpec(
type_indicators[0], parent=source_path_spec)
|
def function[ScanForStorageMediaImage, parameter[self, source_path_spec]]:
constant[Scans the path specification for a supported storage media image format.
Args:
source_path_spec (PathSpec): source path specification.
Returns:
PathSpec: storage media image path specification or None if no supported
storage media image type was found.
Raises:
BackEndError: if the source cannot be scanned or more than one storage
media image type is found.
]
<ast.Try object at 0x7da1b067b220>
if <ast.UnaryOp object at 0x7da1b0720f70> begin[:]
variable[file_system] assign[=] call[name[resolver].Resolver.OpenFileSystem, parameter[name[source_path_spec]]]
variable[raw_path_spec] assign[=] call[name[path_spec_factory].Factory.NewPathSpec, parameter[name[definitions].TYPE_INDICATOR_RAW]]
<ast.Try object at 0x7da1b0722200>
call[name[file_system].Close, parameter[]]
if <ast.UnaryOp object at 0x7da1b0847490> begin[:]
return[constant[None]]
return[name[raw_path_spec]]
if compare[call[name[len], parameter[name[type_indicators]]] greater[>] constant[1]] begin[:]
<ast.Raise object at 0x7da1b07adc00>
return[call[name[path_spec_factory].Factory.NewPathSpec, parameter[call[name[type_indicators]][constant[0]]]]]
|
keyword[def] identifier[ScanForStorageMediaImage] ( identifier[self] , identifier[source_path_spec] ):
literal[string]
keyword[try] :
identifier[type_indicators] = identifier[analyzer] . identifier[Analyzer] . identifier[GetStorageMediaImageTypeIndicators] (
identifier[source_path_spec] , identifier[resolver_context] = identifier[self] . identifier[_resolver_context] )
keyword[except] identifier[RuntimeError] keyword[as] identifier[exception] :
keyword[raise] identifier[errors] . identifier[BackEndError] ((
literal[string]
literal[string] ). identifier[format] ( identifier[exception] ))
keyword[if] keyword[not] identifier[type_indicators] :
identifier[file_system] = identifier[resolver] . identifier[Resolver] . identifier[OpenFileSystem] (
identifier[source_path_spec] , identifier[resolver_context] = identifier[self] . identifier[_resolver_context] )
identifier[raw_path_spec] = identifier[path_spec_factory] . identifier[Factory] . identifier[NewPathSpec] (
identifier[definitions] . identifier[TYPE_INDICATOR_RAW] , identifier[parent] = identifier[source_path_spec] )
keyword[try] :
identifier[glob_results] = identifier[raw] . identifier[RawGlobPathSpec] ( identifier[file_system] , identifier[raw_path_spec] )
keyword[except] identifier[errors] . identifier[PathSpecError] :
identifier[glob_results] = keyword[None]
identifier[file_system] . identifier[Close] ()
keyword[if] keyword[not] identifier[glob_results] :
keyword[return] keyword[None]
keyword[return] identifier[raw_path_spec]
keyword[if] identifier[len] ( identifier[type_indicators] )> literal[int] :
keyword[raise] identifier[errors] . identifier[BackEndError] (
literal[string] )
keyword[return] identifier[path_spec_factory] . identifier[Factory] . identifier[NewPathSpec] (
identifier[type_indicators] [ literal[int] ], identifier[parent] = identifier[source_path_spec] )
|
def ScanForStorageMediaImage(self, source_path_spec):
"""Scans the path specification for a supported storage media image format.
Args:
source_path_spec (PathSpec): source path specification.
Returns:
PathSpec: storage media image path specification or None if no supported
storage media image type was found.
Raises:
BackEndError: if the source cannot be scanned or more than one storage
media image type is found.
"""
try:
type_indicators = analyzer.Analyzer.GetStorageMediaImageTypeIndicators(source_path_spec, resolver_context=self._resolver_context) # depends on [control=['try'], data=[]]
except RuntimeError as exception:
raise errors.BackEndError('Unable to process source path specification with error: {0!s}'.format(exception)) # depends on [control=['except'], data=['exception']]
if not type_indicators:
# The RAW storage media image type cannot be detected based on
# a signature so we try to detect it based on common file naming schemas.
file_system = resolver.Resolver.OpenFileSystem(source_path_spec, resolver_context=self._resolver_context)
raw_path_spec = path_spec_factory.Factory.NewPathSpec(definitions.TYPE_INDICATOR_RAW, parent=source_path_spec)
try:
# The RAW glob function will raise a PathSpecError if the path
# specification is unsuitable for globbing.
glob_results = raw.RawGlobPathSpec(file_system, raw_path_spec) # depends on [control=['try'], data=[]]
except errors.PathSpecError:
glob_results = None # depends on [control=['except'], data=[]]
file_system.Close()
if not glob_results:
return None # depends on [control=['if'], data=[]]
return raw_path_spec # depends on [control=['if'], data=[]]
if len(type_indicators) > 1:
raise errors.BackEndError('Unsupported source found more than one storage media image types.') # depends on [control=['if'], data=[]]
return path_spec_factory.Factory.NewPathSpec(type_indicators[0], parent=source_path_spec)
|
def sdb_get_or_set_hash(uri,
opts,
length=8,
chars='abcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*(-_=+)',
utils=None):
'''
Check if value exists in sdb. If it does, return, otherwise generate a
random string and store it. This can be used for storing secrets in a
centralized place.
'''
if not isinstance(uri, string_types) or not uri.startswith('sdb://'):
return False
if utils is None:
utils = salt.loader.utils(opts)
ret = sdb_get(uri, opts, utils=utils)
if ret is None:
val = ''.join([random.SystemRandom().choice(chars) for _ in range(length)])
sdb_set(uri, val, opts, utils)
return ret or val
|
def function[sdb_get_or_set_hash, parameter[uri, opts, length, chars, utils]]:
constant[
Check if value exists in sdb. If it does, return, otherwise generate a
random string and store it. This can be used for storing secrets in a
centralized place.
]
if <ast.BoolOp object at 0x7da20e9b3700> begin[:]
return[constant[False]]
if compare[name[utils] is constant[None]] begin[:]
variable[utils] assign[=] call[name[salt].loader.utils, parameter[name[opts]]]
variable[ret] assign[=] call[name[sdb_get], parameter[name[uri], name[opts]]]
if compare[name[ret] is constant[None]] begin[:]
variable[val] assign[=] call[constant[].join, parameter[<ast.ListComp object at 0x7da18eb55570>]]
call[name[sdb_set], parameter[name[uri], name[val], name[opts], name[utils]]]
return[<ast.BoolOp object at 0x7da18eb57550>]
|
keyword[def] identifier[sdb_get_or_set_hash] ( identifier[uri] ,
identifier[opts] ,
identifier[length] = literal[int] ,
identifier[chars] = literal[string] ,
identifier[utils] = keyword[None] ):
literal[string]
keyword[if] keyword[not] identifier[isinstance] ( identifier[uri] , identifier[string_types] ) keyword[or] keyword[not] identifier[uri] . identifier[startswith] ( literal[string] ):
keyword[return] keyword[False]
keyword[if] identifier[utils] keyword[is] keyword[None] :
identifier[utils] = identifier[salt] . identifier[loader] . identifier[utils] ( identifier[opts] )
identifier[ret] = identifier[sdb_get] ( identifier[uri] , identifier[opts] , identifier[utils] = identifier[utils] )
keyword[if] identifier[ret] keyword[is] keyword[None] :
identifier[val] = literal[string] . identifier[join] ([ identifier[random] . identifier[SystemRandom] (). identifier[choice] ( identifier[chars] ) keyword[for] identifier[_] keyword[in] identifier[range] ( identifier[length] )])
identifier[sdb_set] ( identifier[uri] , identifier[val] , identifier[opts] , identifier[utils] )
keyword[return] identifier[ret] keyword[or] identifier[val]
|
def sdb_get_or_set_hash(uri, opts, length=8, chars='abcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*(-_=+)', utils=None):
"""
Check if value exists in sdb. If it does, return, otherwise generate a
random string and store it. This can be used for storing secrets in a
centralized place.
"""
if not isinstance(uri, string_types) or not uri.startswith('sdb://'):
return False # depends on [control=['if'], data=[]]
if utils is None:
utils = salt.loader.utils(opts) # depends on [control=['if'], data=['utils']]
ret = sdb_get(uri, opts, utils=utils)
if ret is None:
val = ''.join([random.SystemRandom().choice(chars) for _ in range(length)])
sdb_set(uri, val, opts, utils) # depends on [control=['if'], data=[]]
return ret or val
|
def groupby_transform(iterable, keyfunc=None, valuefunc=None):
"""An extension of :func:`itertools.groupby` that transforms the values of
*iterable* after grouping them.
*keyfunc* is a function used to compute a grouping key for each item.
*valuefunc* is a function for transforming the items after grouping.
>>> iterable = 'AaaABbBCcA'
>>> keyfunc = lambda x: x.upper()
>>> valuefunc = lambda x: x.lower()
>>> grouper = groupby_transform(iterable, keyfunc, valuefunc)
>>> [(k, ''.join(g)) for k, g in grouper]
[('A', 'aaaa'), ('B', 'bbb'), ('C', 'cc'), ('A', 'a')]
*keyfunc* and *valuefunc* default to identity functions if they are not
specified.
:func:`groupby_transform` is useful when grouping elements of an iterable
using a separate iterable as the key. To do this, :func:`zip` the iterables
and pass a *keyfunc* that extracts the first element and a *valuefunc*
that extracts the second element::
>>> from operator import itemgetter
>>> keys = [0, 0, 1, 1, 1, 2, 2, 2, 3]
>>> values = 'abcdefghi'
>>> iterable = zip(keys, values)
>>> grouper = groupby_transform(iterable, itemgetter(0), itemgetter(1))
>>> [(k, ''.join(g)) for k, g in grouper]
[(0, 'ab'), (1, 'cde'), (2, 'fgh'), (3, 'i')]
Note that the order of items in the iterable is significant.
Only adjacent items are grouped together, so if you don't want any
duplicate groups, you should sort the iterable by the key function.
"""
valuefunc = (lambda x: x) if valuefunc is None else valuefunc
return ((k, map(valuefunc, g)) for k, g in groupby(iterable, keyfunc))
|
def function[groupby_transform, parameter[iterable, keyfunc, valuefunc]]:
constant[An extension of :func:`itertools.groupby` that transforms the values of
*iterable* after grouping them.
*keyfunc* is a function used to compute a grouping key for each item.
*valuefunc* is a function for transforming the items after grouping.
>>> iterable = 'AaaABbBCcA'
>>> keyfunc = lambda x: x.upper()
>>> valuefunc = lambda x: x.lower()
>>> grouper = groupby_transform(iterable, keyfunc, valuefunc)
>>> [(k, ''.join(g)) for k, g in grouper]
[('A', 'aaaa'), ('B', 'bbb'), ('C', 'cc'), ('A', 'a')]
*keyfunc* and *valuefunc* default to identity functions if they are not
specified.
:func:`groupby_transform` is useful when grouping elements of an iterable
using a separate iterable as the key. To do this, :func:`zip` the iterables
and pass a *keyfunc* that extracts the first element and a *valuefunc*
that extracts the second element::
>>> from operator import itemgetter
>>> keys = [0, 0, 1, 1, 1, 2, 2, 2, 3]
>>> values = 'abcdefghi'
>>> iterable = zip(keys, values)
>>> grouper = groupby_transform(iterable, itemgetter(0), itemgetter(1))
>>> [(k, ''.join(g)) for k, g in grouper]
[(0, 'ab'), (1, 'cde'), (2, 'fgh'), (3, 'i')]
Note that the order of items in the iterable is significant.
Only adjacent items are grouped together, so if you don't want any
duplicate groups, you should sort the iterable by the key function.
]
variable[valuefunc] assign[=] <ast.IfExp object at 0x7da1b1da1cc0>
return[<ast.GeneratorExp object at 0x7da1b1da31f0>]
|
keyword[def] identifier[groupby_transform] ( identifier[iterable] , identifier[keyfunc] = keyword[None] , identifier[valuefunc] = keyword[None] ):
literal[string]
identifier[valuefunc] =( keyword[lambda] identifier[x] : identifier[x] ) keyword[if] identifier[valuefunc] keyword[is] keyword[None] keyword[else] identifier[valuefunc]
keyword[return] (( identifier[k] , identifier[map] ( identifier[valuefunc] , identifier[g] )) keyword[for] identifier[k] , identifier[g] keyword[in] identifier[groupby] ( identifier[iterable] , identifier[keyfunc] ))
|
def groupby_transform(iterable, keyfunc=None, valuefunc=None):
"""An extension of :func:`itertools.groupby` that transforms the values of
*iterable* after grouping them.
*keyfunc* is a function used to compute a grouping key for each item.
*valuefunc* is a function for transforming the items after grouping.
>>> iterable = 'AaaABbBCcA'
>>> keyfunc = lambda x: x.upper()
>>> valuefunc = lambda x: x.lower()
>>> grouper = groupby_transform(iterable, keyfunc, valuefunc)
>>> [(k, ''.join(g)) for k, g in grouper]
[('A', 'aaaa'), ('B', 'bbb'), ('C', 'cc'), ('A', 'a')]
*keyfunc* and *valuefunc* default to identity functions if they are not
specified.
:func:`groupby_transform` is useful when grouping elements of an iterable
using a separate iterable as the key. To do this, :func:`zip` the iterables
and pass a *keyfunc* that extracts the first element and a *valuefunc*
that extracts the second element::
>>> from operator import itemgetter
>>> keys = [0, 0, 1, 1, 1, 2, 2, 2, 3]
>>> values = 'abcdefghi'
>>> iterable = zip(keys, values)
>>> grouper = groupby_transform(iterable, itemgetter(0), itemgetter(1))
>>> [(k, ''.join(g)) for k, g in grouper]
[(0, 'ab'), (1, 'cde'), (2, 'fgh'), (3, 'i')]
Note that the order of items in the iterable is significant.
Only adjacent items are grouped together, so if you don't want any
duplicate groups, you should sort the iterable by the key function.
"""
valuefunc = (lambda x: x) if valuefunc is None else valuefunc
return ((k, map(valuefunc, g)) for (k, g) in groupby(iterable, keyfunc))
|
def new_tmp(self):
""" Create a new temp file allocation """
self.tmp_idx += 1
return p.join(self.tmp_dir, 'tmp_' + str(self.tmp_idx))
|
def function[new_tmp, parameter[self]]:
constant[ Create a new temp file allocation ]
<ast.AugAssign object at 0x7da18f58f5b0>
return[call[name[p].join, parameter[name[self].tmp_dir, binary_operation[constant[tmp_] + call[name[str], parameter[name[self].tmp_idx]]]]]]
|
keyword[def] identifier[new_tmp] ( identifier[self] ):
literal[string]
identifier[self] . identifier[tmp_idx] += literal[int]
keyword[return] identifier[p] . identifier[join] ( identifier[self] . identifier[tmp_dir] , literal[string] + identifier[str] ( identifier[self] . identifier[tmp_idx] ))
|
def new_tmp(self):
""" Create a new temp file allocation """
self.tmp_idx += 1
return p.join(self.tmp_dir, 'tmp_' + str(self.tmp_idx))
|
def open(self, value, nt=None, wrap=None, unwrap=None):
"""Mark the PV as opened an provide its initial value.
This initial value is later updated with post().
:param value: A Value, or appropriate object (see nt= and wrap= of the constructor).
Any clients which have begun connecting which began connecting while
this PV was in the close'd state will complete connecting.
Only those fields of the value which are marked as changed will be stored.
"""
self._wrap = wrap or (nt and nt.wrap) or self._wrap
self._unwrap = unwrap or (nt and nt.unwrap) or self._unwrap
_SharedPV.open(self, self._wrap(value))
|
def function[open, parameter[self, value, nt, wrap, unwrap]]:
constant[Mark the PV as opened an provide its initial value.
This initial value is later updated with post().
:param value: A Value, or appropriate object (see nt= and wrap= of the constructor).
Any clients which have begun connecting which began connecting while
this PV was in the close'd state will complete connecting.
Only those fields of the value which are marked as changed will be stored.
]
name[self]._wrap assign[=] <ast.BoolOp object at 0x7da20c76cbb0>
name[self]._unwrap assign[=] <ast.BoolOp object at 0x7da20c76dd50>
call[name[_SharedPV].open, parameter[name[self], call[name[self]._wrap, parameter[name[value]]]]]
|
keyword[def] identifier[open] ( identifier[self] , identifier[value] , identifier[nt] = keyword[None] , identifier[wrap] = keyword[None] , identifier[unwrap] = keyword[None] ):
literal[string]
identifier[self] . identifier[_wrap] = identifier[wrap] keyword[or] ( identifier[nt] keyword[and] identifier[nt] . identifier[wrap] ) keyword[or] identifier[self] . identifier[_wrap]
identifier[self] . identifier[_unwrap] = identifier[unwrap] keyword[or] ( identifier[nt] keyword[and] identifier[nt] . identifier[unwrap] ) keyword[or] identifier[self] . identifier[_unwrap]
identifier[_SharedPV] . identifier[open] ( identifier[self] , identifier[self] . identifier[_wrap] ( identifier[value] ))
|
def open(self, value, nt=None, wrap=None, unwrap=None):
"""Mark the PV as opened an provide its initial value.
This initial value is later updated with post().
:param value: A Value, or appropriate object (see nt= and wrap= of the constructor).
Any clients which have begun connecting which began connecting while
this PV was in the close'd state will complete connecting.
Only those fields of the value which are marked as changed will be stored.
"""
self._wrap = wrap or (nt and nt.wrap) or self._wrap
self._unwrap = unwrap or (nt and nt.unwrap) or self._unwrap
_SharedPV.open(self, self._wrap(value))
|
def _set_axis(self, axis, labels, fastpath=False):
"""
Override generic, we want to set the _typ here.
"""
if not fastpath:
labels = ensure_index(labels)
is_all_dates = labels.is_all_dates
if is_all_dates:
if not isinstance(labels,
(DatetimeIndex, PeriodIndex, TimedeltaIndex)):
try:
labels = DatetimeIndex(labels)
# need to set here because we changed the index
if fastpath:
self._data.set_axis(axis, labels)
except (tslibs.OutOfBoundsDatetime, ValueError):
# labels may exceeds datetime bounds,
# or not be a DatetimeIndex
pass
self._set_subtyp(is_all_dates)
object.__setattr__(self, '_index', labels)
if not fastpath:
self._data.set_axis(axis, labels)
|
def function[_set_axis, parameter[self, axis, labels, fastpath]]:
constant[
Override generic, we want to set the _typ here.
]
if <ast.UnaryOp object at 0x7da18ede6b00> begin[:]
variable[labels] assign[=] call[name[ensure_index], parameter[name[labels]]]
variable[is_all_dates] assign[=] name[labels].is_all_dates
if name[is_all_dates] begin[:]
if <ast.UnaryOp object at 0x7da1b1d6e320> begin[:]
<ast.Try object at 0x7da1b1d6e260>
call[name[self]._set_subtyp, parameter[name[is_all_dates]]]
call[name[object].__setattr__, parameter[name[self], constant[_index], name[labels]]]
if <ast.UnaryOp object at 0x7da1b1d6c4f0> begin[:]
call[name[self]._data.set_axis, parameter[name[axis], name[labels]]]
|
keyword[def] identifier[_set_axis] ( identifier[self] , identifier[axis] , identifier[labels] , identifier[fastpath] = keyword[False] ):
literal[string]
keyword[if] keyword[not] identifier[fastpath] :
identifier[labels] = identifier[ensure_index] ( identifier[labels] )
identifier[is_all_dates] = identifier[labels] . identifier[is_all_dates]
keyword[if] identifier[is_all_dates] :
keyword[if] keyword[not] identifier[isinstance] ( identifier[labels] ,
( identifier[DatetimeIndex] , identifier[PeriodIndex] , identifier[TimedeltaIndex] )):
keyword[try] :
identifier[labels] = identifier[DatetimeIndex] ( identifier[labels] )
keyword[if] identifier[fastpath] :
identifier[self] . identifier[_data] . identifier[set_axis] ( identifier[axis] , identifier[labels] )
keyword[except] ( identifier[tslibs] . identifier[OutOfBoundsDatetime] , identifier[ValueError] ):
keyword[pass]
identifier[self] . identifier[_set_subtyp] ( identifier[is_all_dates] )
identifier[object] . identifier[__setattr__] ( identifier[self] , literal[string] , identifier[labels] )
keyword[if] keyword[not] identifier[fastpath] :
identifier[self] . identifier[_data] . identifier[set_axis] ( identifier[axis] , identifier[labels] )
|
def _set_axis(self, axis, labels, fastpath=False):
"""
Override generic, we want to set the _typ here.
"""
if not fastpath:
labels = ensure_index(labels) # depends on [control=['if'], data=[]]
is_all_dates = labels.is_all_dates
if is_all_dates:
if not isinstance(labels, (DatetimeIndex, PeriodIndex, TimedeltaIndex)):
try:
labels = DatetimeIndex(labels)
# need to set here because we changed the index
if fastpath:
self._data.set_axis(axis, labels) # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]]
except (tslibs.OutOfBoundsDatetime, ValueError):
# labels may exceeds datetime bounds,
# or not be a DatetimeIndex
pass # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
self._set_subtyp(is_all_dates)
object.__setattr__(self, '_index', labels)
if not fastpath:
self._data.set_axis(axis, labels) # depends on [control=['if'], data=[]]
|
def _matplotlib_circuit_drawer(circuit,
scale=0.7,
filename=None,
style=None,
plot_barriers=True,
reverse_bits=False,
justify=None):
"""Draw a quantum circuit based on matplotlib.
If `%matplotlib inline` is invoked in a Jupyter notebook, it visualizes a circuit inline.
We recommend `%config InlineBackend.figure_format = 'svg'` for the inline visualization.
Args:
circuit (QuantumCircuit): a quantum circuit
scale (float): scaling factor
filename (str): file path to save image to
style (dict or str): dictionary of style or file name of style file
reverse_bits (bool): When set to True reverse the bit order inside
registers for the output visualization.
plot_barriers (bool): Enable/disable drawing barriers in the output
circuit. Defaults to True.
justify (str) : `left`, `right` or `none`. Defaults to `left`. Says how
the circuit should be justified.
Returns:
matplotlib.figure: a matplotlib figure object for the circuit diagram
"""
qregs, cregs, ops = utils._get_layered_instructions(circuit,
reverse_bits=reverse_bits,
justify=justify)
qcd = _matplotlib.MatplotlibDrawer(qregs, cregs, ops, scale=scale, style=style,
plot_barriers=plot_barriers,
reverse_bits=reverse_bits)
return qcd.draw(filename)
|
def function[_matplotlib_circuit_drawer, parameter[circuit, scale, filename, style, plot_barriers, reverse_bits, justify]]:
constant[Draw a quantum circuit based on matplotlib.
If `%matplotlib inline` is invoked in a Jupyter notebook, it visualizes a circuit inline.
We recommend `%config InlineBackend.figure_format = 'svg'` for the inline visualization.
Args:
circuit (QuantumCircuit): a quantum circuit
scale (float): scaling factor
filename (str): file path to save image to
style (dict or str): dictionary of style or file name of style file
reverse_bits (bool): When set to True reverse the bit order inside
registers for the output visualization.
plot_barriers (bool): Enable/disable drawing barriers in the output
circuit. Defaults to True.
justify (str) : `left`, `right` or `none`. Defaults to `left`. Says how
the circuit should be justified.
Returns:
matplotlib.figure: a matplotlib figure object for the circuit diagram
]
<ast.Tuple object at 0x7da1b05f82e0> assign[=] call[name[utils]._get_layered_instructions, parameter[name[circuit]]]
variable[qcd] assign[=] call[name[_matplotlib].MatplotlibDrawer, parameter[name[qregs], name[cregs], name[ops]]]
return[call[name[qcd].draw, parameter[name[filename]]]]
|
keyword[def] identifier[_matplotlib_circuit_drawer] ( identifier[circuit] ,
identifier[scale] = literal[int] ,
identifier[filename] = keyword[None] ,
identifier[style] = keyword[None] ,
identifier[plot_barriers] = keyword[True] ,
identifier[reverse_bits] = keyword[False] ,
identifier[justify] = keyword[None] ):
literal[string]
identifier[qregs] , identifier[cregs] , identifier[ops] = identifier[utils] . identifier[_get_layered_instructions] ( identifier[circuit] ,
identifier[reverse_bits] = identifier[reverse_bits] ,
identifier[justify] = identifier[justify] )
identifier[qcd] = identifier[_matplotlib] . identifier[MatplotlibDrawer] ( identifier[qregs] , identifier[cregs] , identifier[ops] , identifier[scale] = identifier[scale] , identifier[style] = identifier[style] ,
identifier[plot_barriers] = identifier[plot_barriers] ,
identifier[reverse_bits] = identifier[reverse_bits] )
keyword[return] identifier[qcd] . identifier[draw] ( identifier[filename] )
|
def _matplotlib_circuit_drawer(circuit, scale=0.7, filename=None, style=None, plot_barriers=True, reverse_bits=False, justify=None):
"""Draw a quantum circuit based on matplotlib.
If `%matplotlib inline` is invoked in a Jupyter notebook, it visualizes a circuit inline.
We recommend `%config InlineBackend.figure_format = 'svg'` for the inline visualization.
Args:
circuit (QuantumCircuit): a quantum circuit
scale (float): scaling factor
filename (str): file path to save image to
style (dict or str): dictionary of style or file name of style file
reverse_bits (bool): When set to True reverse the bit order inside
registers for the output visualization.
plot_barriers (bool): Enable/disable drawing barriers in the output
circuit. Defaults to True.
justify (str) : `left`, `right` or `none`. Defaults to `left`. Says how
the circuit should be justified.
Returns:
matplotlib.figure: a matplotlib figure object for the circuit diagram
"""
(qregs, cregs, ops) = utils._get_layered_instructions(circuit, reverse_bits=reverse_bits, justify=justify)
qcd = _matplotlib.MatplotlibDrawer(qregs, cregs, ops, scale=scale, style=style, plot_barriers=plot_barriers, reverse_bits=reverse_bits)
return qcd.draw(filename)
|
def get_var(self, name):
"""
Retrieve a variable assigned to this user
:param name: The name of the variable to retrieve
:type name: str
:rtype: str
:raises VarNotDefinedError: The requested variable has not been defined
"""
if name not in self._vars:
raise VarNotDefinedError
return self._vars[name]
|
def function[get_var, parameter[self, name]]:
constant[
Retrieve a variable assigned to this user
:param name: The name of the variable to retrieve
:type name: str
:rtype: str
:raises VarNotDefinedError: The requested variable has not been defined
]
if compare[name[name] <ast.NotIn object at 0x7da2590d7190> name[self]._vars] begin[:]
<ast.Raise object at 0x7da1b1472b60>
return[call[name[self]._vars][name[name]]]
|
keyword[def] identifier[get_var] ( identifier[self] , identifier[name] ):
literal[string]
keyword[if] identifier[name] keyword[not] keyword[in] identifier[self] . identifier[_vars] :
keyword[raise] identifier[VarNotDefinedError]
keyword[return] identifier[self] . identifier[_vars] [ identifier[name] ]
|
def get_var(self, name):
"""
Retrieve a variable assigned to this user
:param name: The name of the variable to retrieve
:type name: str
:rtype: str
:raises VarNotDefinedError: The requested variable has not been defined
"""
if name not in self._vars:
raise VarNotDefinedError # depends on [control=['if'], data=[]]
return self._vars[name]
|
def run_mnist_DistilledSGLD(num_training=50000, gpu_id=None):
"""Run DistilledSGLD on mnist dataset"""
X, Y, X_test, Y_test = load_mnist(num_training)
minibatch_size = 100
if num_training >= 10000:
num_hidden = 800
total_iter_num = 1000000
teacher_learning_rate = 1E-6
student_learning_rate = 0.0001
teacher_prior = 1
student_prior = 0.1
perturb_deviation = 0.1
else:
num_hidden = 400
total_iter_num = 20000
teacher_learning_rate = 4E-5
student_learning_rate = 0.0001
teacher_prior = 1
student_prior = 0.1
perturb_deviation = 0.001
teacher_net = get_mnist_sym(num_hidden=num_hidden)
logsoftmax = LogSoftmax()
student_net = get_mnist_sym(output_op=logsoftmax, num_hidden=num_hidden)
data_shape = (minibatch_size,) + X.shape[1::]
teacher_data_inputs = {'data': nd.zeros(data_shape, ctx=dev(gpu_id)),
'softmax_label': nd.zeros((minibatch_size,), ctx=dev(gpu_id))}
student_data_inputs = {'data': nd.zeros(data_shape, ctx=dev(gpu_id)),
'softmax_label': nd.zeros((minibatch_size, 10), ctx=dev(gpu_id))}
teacher_initializer = BiasXavier(factor_type="in", magnitude=1)
student_initializer = BiasXavier(factor_type="in", magnitude=1)
student_exe, student_params, _ = \
DistilledSGLD(teacher_sym=teacher_net, student_sym=student_net,
teacher_data_inputs=teacher_data_inputs,
student_data_inputs=student_data_inputs,
X=X, Y=Y, X_test=X_test, Y_test=Y_test, total_iter_num=total_iter_num,
student_initializer=student_initializer,
teacher_initializer=teacher_initializer,
student_optimizing_algorithm="adam",
teacher_learning_rate=teacher_learning_rate,
student_learning_rate=student_learning_rate,
teacher_prior_precision=teacher_prior, student_prior_precision=student_prior,
perturb_deviation=perturb_deviation, minibatch_size=100, dev=dev(gpu_id))
|
def function[run_mnist_DistilledSGLD, parameter[num_training, gpu_id]]:
constant[Run DistilledSGLD on mnist dataset]
<ast.Tuple object at 0x7da1b200bdc0> assign[=] call[name[load_mnist], parameter[name[num_training]]]
variable[minibatch_size] assign[=] constant[100]
if compare[name[num_training] greater_or_equal[>=] constant[10000]] begin[:]
variable[num_hidden] assign[=] constant[800]
variable[total_iter_num] assign[=] constant[1000000]
variable[teacher_learning_rate] assign[=] constant[1e-06]
variable[student_learning_rate] assign[=] constant[0.0001]
variable[teacher_prior] assign[=] constant[1]
variable[student_prior] assign[=] constant[0.1]
variable[perturb_deviation] assign[=] constant[0.1]
variable[teacher_net] assign[=] call[name[get_mnist_sym], parameter[]]
variable[logsoftmax] assign[=] call[name[LogSoftmax], parameter[]]
variable[student_net] assign[=] call[name[get_mnist_sym], parameter[]]
variable[data_shape] assign[=] binary_operation[tuple[[<ast.Name object at 0x7da1b200aef0>]] + call[name[X].shape][<ast.Slice object at 0x7da1b200ae30>]]
variable[teacher_data_inputs] assign[=] dictionary[[<ast.Constant object at 0x7da1b200ad40>, <ast.Constant object at 0x7da1b200ad10>], [<ast.Call object at 0x7da1b200ace0>, <ast.Call object at 0x7da1b200ab60>]]
variable[student_data_inputs] assign[=] dictionary[[<ast.Constant object at 0x7da1b200a920>, <ast.Constant object at 0x7da1b200a8f0>], [<ast.Call object at 0x7da1b200a8c0>, <ast.Call object at 0x7da1b200a740>]]
variable[teacher_initializer] assign[=] call[name[BiasXavier], parameter[]]
variable[student_initializer] assign[=] call[name[BiasXavier], parameter[]]
<ast.Tuple object at 0x7da1b200a230> assign[=] call[name[DistilledSGLD], parameter[]]
|
keyword[def] identifier[run_mnist_DistilledSGLD] ( identifier[num_training] = literal[int] , identifier[gpu_id] = keyword[None] ):
literal[string]
identifier[X] , identifier[Y] , identifier[X_test] , identifier[Y_test] = identifier[load_mnist] ( identifier[num_training] )
identifier[minibatch_size] = literal[int]
keyword[if] identifier[num_training] >= literal[int] :
identifier[num_hidden] = literal[int]
identifier[total_iter_num] = literal[int]
identifier[teacher_learning_rate] = literal[int]
identifier[student_learning_rate] = literal[int]
identifier[teacher_prior] = literal[int]
identifier[student_prior] = literal[int]
identifier[perturb_deviation] = literal[int]
keyword[else] :
identifier[num_hidden] = literal[int]
identifier[total_iter_num] = literal[int]
identifier[teacher_learning_rate] = literal[int]
identifier[student_learning_rate] = literal[int]
identifier[teacher_prior] = literal[int]
identifier[student_prior] = literal[int]
identifier[perturb_deviation] = literal[int]
identifier[teacher_net] = identifier[get_mnist_sym] ( identifier[num_hidden] = identifier[num_hidden] )
identifier[logsoftmax] = identifier[LogSoftmax] ()
identifier[student_net] = identifier[get_mnist_sym] ( identifier[output_op] = identifier[logsoftmax] , identifier[num_hidden] = identifier[num_hidden] )
identifier[data_shape] =( identifier[minibatch_size] ,)+ identifier[X] . identifier[shape] [ literal[int] ::]
identifier[teacher_data_inputs] ={ literal[string] : identifier[nd] . identifier[zeros] ( identifier[data_shape] , identifier[ctx] = identifier[dev] ( identifier[gpu_id] )),
literal[string] : identifier[nd] . identifier[zeros] (( identifier[minibatch_size] ,), identifier[ctx] = identifier[dev] ( identifier[gpu_id] ))}
identifier[student_data_inputs] ={ literal[string] : identifier[nd] . identifier[zeros] ( identifier[data_shape] , identifier[ctx] = identifier[dev] ( identifier[gpu_id] )),
literal[string] : identifier[nd] . identifier[zeros] (( identifier[minibatch_size] , literal[int] ), identifier[ctx] = identifier[dev] ( identifier[gpu_id] ))}
identifier[teacher_initializer] = identifier[BiasXavier] ( identifier[factor_type] = literal[string] , identifier[magnitude] = literal[int] )
identifier[student_initializer] = identifier[BiasXavier] ( identifier[factor_type] = literal[string] , identifier[magnitude] = literal[int] )
identifier[student_exe] , identifier[student_params] , identifier[_] = identifier[DistilledSGLD] ( identifier[teacher_sym] = identifier[teacher_net] , identifier[student_sym] = identifier[student_net] ,
identifier[teacher_data_inputs] = identifier[teacher_data_inputs] ,
identifier[student_data_inputs] = identifier[student_data_inputs] ,
identifier[X] = identifier[X] , identifier[Y] = identifier[Y] , identifier[X_test] = identifier[X_test] , identifier[Y_test] = identifier[Y_test] , identifier[total_iter_num] = identifier[total_iter_num] ,
identifier[student_initializer] = identifier[student_initializer] ,
identifier[teacher_initializer] = identifier[teacher_initializer] ,
identifier[student_optimizing_algorithm] = literal[string] ,
identifier[teacher_learning_rate] = identifier[teacher_learning_rate] ,
identifier[student_learning_rate] = identifier[student_learning_rate] ,
identifier[teacher_prior_precision] = identifier[teacher_prior] , identifier[student_prior_precision] = identifier[student_prior] ,
identifier[perturb_deviation] = identifier[perturb_deviation] , identifier[minibatch_size] = literal[int] , identifier[dev] = identifier[dev] ( identifier[gpu_id] ))
|
def run_mnist_DistilledSGLD(num_training=50000, gpu_id=None):
"""Run DistilledSGLD on mnist dataset"""
(X, Y, X_test, Y_test) = load_mnist(num_training)
minibatch_size = 100
if num_training >= 10000:
num_hidden = 800
total_iter_num = 1000000
teacher_learning_rate = 1e-06
student_learning_rate = 0.0001
teacher_prior = 1
student_prior = 0.1
perturb_deviation = 0.1 # depends on [control=['if'], data=[]]
else:
num_hidden = 400
total_iter_num = 20000
teacher_learning_rate = 4e-05
student_learning_rate = 0.0001
teacher_prior = 1
student_prior = 0.1
perturb_deviation = 0.001
teacher_net = get_mnist_sym(num_hidden=num_hidden)
logsoftmax = LogSoftmax()
student_net = get_mnist_sym(output_op=logsoftmax, num_hidden=num_hidden)
data_shape = (minibatch_size,) + X.shape[1:]
teacher_data_inputs = {'data': nd.zeros(data_shape, ctx=dev(gpu_id)), 'softmax_label': nd.zeros((minibatch_size,), ctx=dev(gpu_id))}
student_data_inputs = {'data': nd.zeros(data_shape, ctx=dev(gpu_id)), 'softmax_label': nd.zeros((minibatch_size, 10), ctx=dev(gpu_id))}
teacher_initializer = BiasXavier(factor_type='in', magnitude=1)
student_initializer = BiasXavier(factor_type='in', magnitude=1)
(student_exe, student_params, _) = DistilledSGLD(teacher_sym=teacher_net, student_sym=student_net, teacher_data_inputs=teacher_data_inputs, student_data_inputs=student_data_inputs, X=X, Y=Y, X_test=X_test, Y_test=Y_test, total_iter_num=total_iter_num, student_initializer=student_initializer, teacher_initializer=teacher_initializer, student_optimizing_algorithm='adam', teacher_learning_rate=teacher_learning_rate, student_learning_rate=student_learning_rate, teacher_prior_precision=teacher_prior, student_prior_precision=student_prior, perturb_deviation=perturb_deviation, minibatch_size=100, dev=dev(gpu_id))
|
async def govt(self, root):
"""Nation's government expenditure, as percentages.
Returns
-------
an :class:`ApiQuery` of :class:`collections.OrderedDict` with \
keys of str and values of float
Keys being, in order: ``Administration``, ``Defense``,
``Education``, ``Environment``, ``Healthcare``, ``Industry``,
``International Aid``, ``Law & Order``, ``Public Transport``,
``Social Policy``, ``Spirituality``, and ``Welfare``.
"""
elem = root.find('GOVT')
result = OrderedDict()
result['Administration'] = float(elem.find('ADMINISTRATION').text)
result['Defense'] = float(elem.find('DEFENCE').text) # match the web UI
result['Education'] = float(elem.find('EDUCATION').text)
result['Environment'] = float(elem.find('ENVIRONMENT').text)
result['Healthcare'] = float(elem.find('HEALTHCARE').text)
result['Industry'] = float(elem.find('COMMERCE').text) # Don't ask
result['International Aid'] = float(elem.find('INTERNATIONALAID').text)
result['Law & Order'] = float(elem.find('LAWANDORDER').text)
result['Public Transport'] = float(elem.find('PUBLICTRANSPORT').text)
result['Social Policy'] = float(elem.find('SOCIALEQUALITY').text) # Shh
result['Spirituality'] = float(elem.find('SPIRITUALITY').text)
result['Welfare'] = float(elem.find('WELFARE').text)
return result
|
<ast.AsyncFunctionDef object at 0x7da1b2776a10>
|
keyword[async] keyword[def] identifier[govt] ( identifier[self] , identifier[root] ):
literal[string]
identifier[elem] = identifier[root] . identifier[find] ( literal[string] )
identifier[result] = identifier[OrderedDict] ()
identifier[result] [ literal[string] ]= identifier[float] ( identifier[elem] . identifier[find] ( literal[string] ). identifier[text] )
identifier[result] [ literal[string] ]= identifier[float] ( identifier[elem] . identifier[find] ( literal[string] ). identifier[text] )
identifier[result] [ literal[string] ]= identifier[float] ( identifier[elem] . identifier[find] ( literal[string] ). identifier[text] )
identifier[result] [ literal[string] ]= identifier[float] ( identifier[elem] . identifier[find] ( literal[string] ). identifier[text] )
identifier[result] [ literal[string] ]= identifier[float] ( identifier[elem] . identifier[find] ( literal[string] ). identifier[text] )
identifier[result] [ literal[string] ]= identifier[float] ( identifier[elem] . identifier[find] ( literal[string] ). identifier[text] )
identifier[result] [ literal[string] ]= identifier[float] ( identifier[elem] . identifier[find] ( literal[string] ). identifier[text] )
identifier[result] [ literal[string] ]= identifier[float] ( identifier[elem] . identifier[find] ( literal[string] ). identifier[text] )
identifier[result] [ literal[string] ]= identifier[float] ( identifier[elem] . identifier[find] ( literal[string] ). identifier[text] )
identifier[result] [ literal[string] ]= identifier[float] ( identifier[elem] . identifier[find] ( literal[string] ). identifier[text] )
identifier[result] [ literal[string] ]= identifier[float] ( identifier[elem] . identifier[find] ( literal[string] ). identifier[text] )
identifier[result] [ literal[string] ]= identifier[float] ( identifier[elem] . identifier[find] ( literal[string] ). identifier[text] )
keyword[return] identifier[result]
|
async def govt(self, root):
"""Nation's government expenditure, as percentages.
Returns
-------
an :class:`ApiQuery` of :class:`collections.OrderedDict` with keys of str and values of float
Keys being, in order: ``Administration``, ``Defense``,
``Education``, ``Environment``, ``Healthcare``, ``Industry``,
``International Aid``, ``Law & Order``, ``Public Transport``,
``Social Policy``, ``Spirituality``, and ``Welfare``.
"""
elem = root.find('GOVT')
result = OrderedDict()
result['Administration'] = float(elem.find('ADMINISTRATION').text)
result['Defense'] = float(elem.find('DEFENCE').text) # match the web UI
result['Education'] = float(elem.find('EDUCATION').text)
result['Environment'] = float(elem.find('ENVIRONMENT').text)
result['Healthcare'] = float(elem.find('HEALTHCARE').text)
result['Industry'] = float(elem.find('COMMERCE').text) # Don't ask
result['International Aid'] = float(elem.find('INTERNATIONALAID').text)
result['Law & Order'] = float(elem.find('LAWANDORDER').text)
result['Public Transport'] = float(elem.find('PUBLICTRANSPORT').text)
result['Social Policy'] = float(elem.find('SOCIALEQUALITY').text) # Shh
result['Spirituality'] = float(elem.find('SPIRITUALITY').text)
result['Welfare'] = float(elem.find('WELFARE').text)
return result
|
def email(self, subject, text_body, html_body=None, sender=None, **kwargs):
# type: (str, str, Optional[str], Optional[str], Any) -> None
"""Emails a user.
Args:
subject (str): Email subject
text_body (str): Plain text email body
html_body (str): HTML email body
sender (Optional[str]): Email sender. Defaults to SMTP username.
**kwargs: See below
mail_options (List): Mail options (see smtplib documentation)
rcpt_options (List): Recipient options (see smtplib documentation)
Returns:
None
"""
self.configuration.emailer().send([self.data['email']], subject, text_body, html_body=html_body, sender=sender,
**kwargs)
|
def function[email, parameter[self, subject, text_body, html_body, sender]]:
constant[Emails a user.
Args:
subject (str): Email subject
text_body (str): Plain text email body
html_body (str): HTML email body
sender (Optional[str]): Email sender. Defaults to SMTP username.
**kwargs: See below
mail_options (List): Mail options (see smtplib documentation)
rcpt_options (List): Recipient options (see smtplib documentation)
Returns:
None
]
call[call[name[self].configuration.emailer, parameter[]].send, parameter[list[[<ast.Subscript object at 0x7da1b0e306a0>]], name[subject], name[text_body]]]
|
keyword[def] identifier[email] ( identifier[self] , identifier[subject] , identifier[text_body] , identifier[html_body] = keyword[None] , identifier[sender] = keyword[None] ,** identifier[kwargs] ):
literal[string]
identifier[self] . identifier[configuration] . identifier[emailer] (). identifier[send] ([ identifier[self] . identifier[data] [ literal[string] ]], identifier[subject] , identifier[text_body] , identifier[html_body] = identifier[html_body] , identifier[sender] = identifier[sender] ,
** identifier[kwargs] )
|
def email(self, subject, text_body, html_body=None, sender=None, **kwargs):
# type: (str, str, Optional[str], Optional[str], Any) -> None
'Emails a user.\n\n Args:\n subject (str): Email subject\n text_body (str): Plain text email body\n html_body (str): HTML email body\n sender (Optional[str]): Email sender. Defaults to SMTP username.\n **kwargs: See below\n mail_options (List): Mail options (see smtplib documentation)\n rcpt_options (List): Recipient options (see smtplib documentation)\n\n Returns:\n None\n '
self.configuration.emailer().send([self.data['email']], subject, text_body, html_body=html_body, sender=sender, **kwargs)
|
def unbroadcast(a, b):
'''
unbroadcast(a, b) yields a tuple (aa, bb) that is equivalent to (a, b) except that aa and bb
have been reshaped such that arithmetic numpy operations such as aa * bb will result in
row-wise operation instead of column-wise broadcasting.
'''
# they could be sparse:
spa = sps.issparse(a)
spb = sps.issparse(b)
if spa and spb: return (a,b)
elif spa or spb:
def fix(sp,nm):
nm = np.asarray(nm)
dnm = len(nm.shape)
nnm = np.prod(nm.shape)
# if we have (sparse matrix) * (high-dim array), unbroadcast the dense array
if dnm == 0: return (sp, np.reshape(nm, (1, 1)))
elif dnm == 1: return (sp, np.reshape(nm, (nnm, 1)))
elif dnm == 2: return (sp, nm)
else: return unbroadcast(sp.toarray(), nm)
return fix(a, b) if spa else tuple(reversed(fix(b, a)))
# okay, no sparse matrices found:
a = np.asarray(a)
b = np.asarray(b)
da = len(a.shape)
db = len(b.shape)
if da > db: return (a, np.reshape(b, b.shape + tuple(np.ones(da-db, dtype=np.int))))
elif da < db: return (np.reshape(a, a.shape + tuple(np.ones(db-da, dtype=np.int))), b)
else: return (a, b)
|
def function[unbroadcast, parameter[a, b]]:
constant[
unbroadcast(a, b) yields a tuple (aa, bb) that is equivalent to (a, b) except that aa and bb
have been reshaped such that arithmetic numpy operations such as aa * bb will result in
row-wise operation instead of column-wise broadcasting.
]
variable[spa] assign[=] call[name[sps].issparse, parameter[name[a]]]
variable[spb] assign[=] call[name[sps].issparse, parameter[name[b]]]
if <ast.BoolOp object at 0x7da18eb54d00> begin[:]
return[tuple[[<ast.Name object at 0x7da18eb56530>, <ast.Name object at 0x7da18eb55240>]]]
variable[a] assign[=] call[name[np].asarray, parameter[name[a]]]
variable[b] assign[=] call[name[np].asarray, parameter[name[b]]]
variable[da] assign[=] call[name[len], parameter[name[a].shape]]
variable[db] assign[=] call[name[len], parameter[name[b].shape]]
if compare[name[da] greater[>] name[db]] begin[:]
return[tuple[[<ast.Name object at 0x7da18eb554e0>, <ast.Call object at 0x7da18eb57670>]]]
|
keyword[def] identifier[unbroadcast] ( identifier[a] , identifier[b] ):
literal[string]
identifier[spa] = identifier[sps] . identifier[issparse] ( identifier[a] )
identifier[spb] = identifier[sps] . identifier[issparse] ( identifier[b] )
keyword[if] identifier[spa] keyword[and] identifier[spb] : keyword[return] ( identifier[a] , identifier[b] )
keyword[elif] identifier[spa] keyword[or] identifier[spb] :
keyword[def] identifier[fix] ( identifier[sp] , identifier[nm] ):
identifier[nm] = identifier[np] . identifier[asarray] ( identifier[nm] )
identifier[dnm] = identifier[len] ( identifier[nm] . identifier[shape] )
identifier[nnm] = identifier[np] . identifier[prod] ( identifier[nm] . identifier[shape] )
keyword[if] identifier[dnm] == literal[int] : keyword[return] ( identifier[sp] , identifier[np] . identifier[reshape] ( identifier[nm] ,( literal[int] , literal[int] )))
keyword[elif] identifier[dnm] == literal[int] : keyword[return] ( identifier[sp] , identifier[np] . identifier[reshape] ( identifier[nm] ,( identifier[nnm] , literal[int] )))
keyword[elif] identifier[dnm] == literal[int] : keyword[return] ( identifier[sp] , identifier[nm] )
keyword[else] : keyword[return] identifier[unbroadcast] ( identifier[sp] . identifier[toarray] (), identifier[nm] )
keyword[return] identifier[fix] ( identifier[a] , identifier[b] ) keyword[if] identifier[spa] keyword[else] identifier[tuple] ( identifier[reversed] ( identifier[fix] ( identifier[b] , identifier[a] )))
identifier[a] = identifier[np] . identifier[asarray] ( identifier[a] )
identifier[b] = identifier[np] . identifier[asarray] ( identifier[b] )
identifier[da] = identifier[len] ( identifier[a] . identifier[shape] )
identifier[db] = identifier[len] ( identifier[b] . identifier[shape] )
keyword[if] identifier[da] > identifier[db] : keyword[return] ( identifier[a] , identifier[np] . identifier[reshape] ( identifier[b] , identifier[b] . identifier[shape] + identifier[tuple] ( identifier[np] . identifier[ones] ( identifier[da] - identifier[db] , identifier[dtype] = identifier[np] . identifier[int] ))))
keyword[elif] identifier[da] < identifier[db] : keyword[return] ( identifier[np] . identifier[reshape] ( identifier[a] , identifier[a] . identifier[shape] + identifier[tuple] ( identifier[np] . identifier[ones] ( identifier[db] - identifier[da] , identifier[dtype] = identifier[np] . identifier[int] ))), identifier[b] )
keyword[else] : keyword[return] ( identifier[a] , identifier[b] )
|
def unbroadcast(a, b):
"""
unbroadcast(a, b) yields a tuple (aa, bb) that is equivalent to (a, b) except that aa and bb
have been reshaped such that arithmetic numpy operations such as aa * bb will result in
row-wise operation instead of column-wise broadcasting.
"""
# they could be sparse:
spa = sps.issparse(a)
spb = sps.issparse(b)
if spa and spb:
return (a, b) # depends on [control=['if'], data=[]]
elif spa or spb:
def fix(sp, nm):
nm = np.asarray(nm)
dnm = len(nm.shape)
nnm = np.prod(nm.shape)
# if we have (sparse matrix) * (high-dim array), unbroadcast the dense array
if dnm == 0:
return (sp, np.reshape(nm, (1, 1))) # depends on [control=['if'], data=[]]
elif dnm == 1:
return (sp, np.reshape(nm, (nnm, 1))) # depends on [control=['if'], data=[]]
elif dnm == 2:
return (sp, nm) # depends on [control=['if'], data=[]]
else:
return unbroadcast(sp.toarray(), nm)
return fix(a, b) if spa else tuple(reversed(fix(b, a))) # depends on [control=['if'], data=[]]
# okay, no sparse matrices found:
a = np.asarray(a)
b = np.asarray(b)
da = len(a.shape)
db = len(b.shape)
if da > db:
return (a, np.reshape(b, b.shape + tuple(np.ones(da - db, dtype=np.int)))) # depends on [control=['if'], data=['da', 'db']]
elif da < db:
return (np.reshape(a, a.shape + tuple(np.ones(db - da, dtype=np.int))), b) # depends on [control=['if'], data=['da', 'db']]
else:
return (a, b)
|
def cached_node_creator(self, target_to_vts):
"""Strategy restores dependency graph node from the build cache.
"""
def creator(target):
vt = target_to_vts[target]
if vt.valid and os.path.exists(self.nodes_json(vt.results_dir)):
try:
with open(self.nodes_json(vt.results_dir), 'r') as fp:
return Node.from_cacheable_dict(json.load(fp),
lambda spec: next(self.context.resolve(spec).__iter__()))
except Exception:
self.context.log.warn("Can't deserialize json for target {}".format(target))
return Node(target.concrete_derived_from)
else:
self.context.log.warn("No cache entry for {}".format(target))
return Node(target.concrete_derived_from)
return creator
|
def function[cached_node_creator, parameter[self, target_to_vts]]:
constant[Strategy restores dependency graph node from the build cache.
]
def function[creator, parameter[target]]:
variable[vt] assign[=] call[name[target_to_vts]][name[target]]
if <ast.BoolOp object at 0x7da1b1e5e0e0> begin[:]
<ast.Try object at 0x7da1b1e5ef80>
return[name[creator]]
|
keyword[def] identifier[cached_node_creator] ( identifier[self] , identifier[target_to_vts] ):
literal[string]
keyword[def] identifier[creator] ( identifier[target] ):
identifier[vt] = identifier[target_to_vts] [ identifier[target] ]
keyword[if] identifier[vt] . identifier[valid] keyword[and] identifier[os] . identifier[path] . identifier[exists] ( identifier[self] . identifier[nodes_json] ( identifier[vt] . identifier[results_dir] )):
keyword[try] :
keyword[with] identifier[open] ( identifier[self] . identifier[nodes_json] ( identifier[vt] . identifier[results_dir] ), literal[string] ) keyword[as] identifier[fp] :
keyword[return] identifier[Node] . identifier[from_cacheable_dict] ( identifier[json] . identifier[load] ( identifier[fp] ),
keyword[lambda] identifier[spec] : identifier[next] ( identifier[self] . identifier[context] . identifier[resolve] ( identifier[spec] ). identifier[__iter__] ()))
keyword[except] identifier[Exception] :
identifier[self] . identifier[context] . identifier[log] . identifier[warn] ( literal[string] . identifier[format] ( identifier[target] ))
keyword[return] identifier[Node] ( identifier[target] . identifier[concrete_derived_from] )
keyword[else] :
identifier[self] . identifier[context] . identifier[log] . identifier[warn] ( literal[string] . identifier[format] ( identifier[target] ))
keyword[return] identifier[Node] ( identifier[target] . identifier[concrete_derived_from] )
keyword[return] identifier[creator]
|
def cached_node_creator(self, target_to_vts):
"""Strategy restores dependency graph node from the build cache.
"""
def creator(target):
vt = target_to_vts[target]
if vt.valid and os.path.exists(self.nodes_json(vt.results_dir)):
try:
with open(self.nodes_json(vt.results_dir), 'r') as fp:
return Node.from_cacheable_dict(json.load(fp), lambda spec: next(self.context.resolve(spec).__iter__())) # depends on [control=['with'], data=['fp']] # depends on [control=['try'], data=[]]
except Exception:
self.context.log.warn("Can't deserialize json for target {}".format(target))
return Node(target.concrete_derived_from) # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
else:
self.context.log.warn('No cache entry for {}'.format(target))
return Node(target.concrete_derived_from)
return creator
|
def download_pisa_multimers_xml(pdb_ids, save_single_xml_files=True, outdir=None, force_rerun=False):
"""Download the PISA XML file for multimers.
See: http://www.ebi.ac.uk/pdbe/pisa/pi_download.html for more info
XML description of macromolecular assemblies:
http://www.ebi.ac.uk/pdbe/pisa/cgi-bin/multimers.pisa?pdbcodelist
where "pdbcodelist" is a comma-separated (strictly no spaces) list of PDB codes. The resulting file contain XML
output of assembly data, equivalent to that displayed in PISA assembly pages, for each of the specified PDB
entries. NOTE: If a mass-download is intended, please minimize the number of retrievals by specifying as many
PDB codes in the URL as feasible (20-50 is a good range), and never send another URL request until the previous
one has been completed (meaning that the multimers.pisa file has been downloaded). Excessive requests will
silently die in the server queue.
Args:
pdb_ids (str, list): PDB ID or list of IDs
save_single_xml_files (bool): If single XML files should be saved per PDB ID. If False, if multiple PDB IDs are
provided, then a single, combined XML output file is downloaded
outdir (str): Directory to output PISA XML files
force_rerun (bool): Redownload files if they already exist
Returns:
list: of files downloaded
"""
if not outdir:
outdir = os.getcwd()
files = {}
pdb_ids = ssbio.utils.force_lower_list(sorted(pdb_ids))
# If we want to save single PISA XML files per PDB ID...
if save_single_xml_files:
# Check for existing PISA XML files
if not force_rerun:
existing_files = [op.basename(x) for x in glob.glob(op.join(outdir, '*_multimers.pisa.xml'))]
# Store the paths to these files to return
files = {v.split('_')[0]: op.join(outdir, v) for v in existing_files}
log.debug('Already downloaded PISA files for {}'.format(list(files.keys())))
else:
existing_files = []
# Filter PDB IDs based on existing file
pdb_ids = [x for x in pdb_ids if '{}_multimers.pisa.xml'.format(x) not in existing_files]
# Split the list into 50 to limit requests
split_list = ssbio.utils.split_list_by_n(pdb_ids, 40)
# Download PISA files
for l in split_list:
pdbs = ','.join(l)
all_pisa_link = 'http://www.ebi.ac.uk/pdbe/pisa/cgi-bin/multimers.pisa?{}'.format(pdbs)
r = requests.get(all_pisa_link)
# Parse PISA file and save individual XML files
parser = etree.XMLParser(ns_clean=True)
tree = etree.fromstring(r.text, parser)
for pdb in tree.findall('pdb_entry'):
filename = op.join(outdir, '{}_multimers.pisa.xml'.format(pdb.find('pdb_code').text))
add_root = etree.Element('pisa_multimers')
add_root.append(pdb)
with open(filename, 'wb') as f:
f.write(etree.tostring(add_root))
files[pdb.find('pdb_code').text] = filename
log.debug('{}: downloaded PISA results'.format(pdb))
else:
split_list = ssbio.utils.split_list_by_n(pdb_ids, 40)
for l in split_list:
pdbs = ','.join(l)
all_pisa_link = 'http://www.ebi.ac.uk/pdbe/pisa/cgi-bin/multimers.pisa?{}'.format(pdbs)
filename = op.join(outdir, '{}_multimers.pisa.xml'.format(pdbs))
if ssbio.utils.force_rerun(flag=force_rerun, outfile=filename):
r = requests.get(all_pisa_link)
with open(filename, 'w') as f:
f.write(r.text)
log.debug('Downloaded PISA results')
else:
log.debug('PISA results already downloaded')
for x in l:
files[x] = filename
return files
|
def function[download_pisa_multimers_xml, parameter[pdb_ids, save_single_xml_files, outdir, force_rerun]]:
constant[Download the PISA XML file for multimers.
See: http://www.ebi.ac.uk/pdbe/pisa/pi_download.html for more info
XML description of macromolecular assemblies:
http://www.ebi.ac.uk/pdbe/pisa/cgi-bin/multimers.pisa?pdbcodelist
where "pdbcodelist" is a comma-separated (strictly no spaces) list of PDB codes. The resulting file contain XML
output of assembly data, equivalent to that displayed in PISA assembly pages, for each of the specified PDB
entries. NOTE: If a mass-download is intended, please minimize the number of retrievals by specifying as many
PDB codes in the URL as feasible (20-50 is a good range), and never send another URL request until the previous
one has been completed (meaning that the multimers.pisa file has been downloaded). Excessive requests will
silently die in the server queue.
Args:
pdb_ids (str, list): PDB ID or list of IDs
save_single_xml_files (bool): If single XML files should be saved per PDB ID. If False, if multiple PDB IDs are
provided, then a single, combined XML output file is downloaded
outdir (str): Directory to output PISA XML files
force_rerun (bool): Redownload files if they already exist
Returns:
list: of files downloaded
]
if <ast.UnaryOp object at 0x7da1b0ed0850> begin[:]
variable[outdir] assign[=] call[name[os].getcwd, parameter[]]
variable[files] assign[=] dictionary[[], []]
variable[pdb_ids] assign[=] call[name[ssbio].utils.force_lower_list, parameter[call[name[sorted], parameter[name[pdb_ids]]]]]
if name[save_single_xml_files] begin[:]
if <ast.UnaryOp object at 0x7da1b0ed0c40> begin[:]
variable[existing_files] assign[=] <ast.ListComp object at 0x7da1b0ed0af0>
variable[files] assign[=] <ast.DictComp object at 0x7da1b0ed0eb0>
call[name[log].debug, parameter[call[constant[Already downloaded PISA files for {}].format, parameter[call[name[list], parameter[call[name[files].keys, parameter[]]]]]]]]
variable[pdb_ids] assign[=] <ast.ListComp object at 0x7da1b0ed3190>
variable[split_list] assign[=] call[name[ssbio].utils.split_list_by_n, parameter[name[pdb_ids], constant[40]]]
for taget[name[l]] in starred[name[split_list]] begin[:]
variable[pdbs] assign[=] call[constant[,].join, parameter[name[l]]]
variable[all_pisa_link] assign[=] call[constant[http://www.ebi.ac.uk/pdbe/pisa/cgi-bin/multimers.pisa?{}].format, parameter[name[pdbs]]]
variable[r] assign[=] call[name[requests].get, parameter[name[all_pisa_link]]]
variable[parser] assign[=] call[name[etree].XMLParser, parameter[]]
variable[tree] assign[=] call[name[etree].fromstring, parameter[name[r].text, name[parser]]]
for taget[name[pdb]] in starred[call[name[tree].findall, parameter[constant[pdb_entry]]]] begin[:]
variable[filename] assign[=] call[name[op].join, parameter[name[outdir], call[constant[{}_multimers.pisa.xml].format, parameter[call[name[pdb].find, parameter[constant[pdb_code]]].text]]]]
variable[add_root] assign[=] call[name[etree].Element, parameter[constant[pisa_multimers]]]
call[name[add_root].append, parameter[name[pdb]]]
with call[name[open], parameter[name[filename], constant[wb]]] begin[:]
call[name[f].write, parameter[call[name[etree].tostring, parameter[name[add_root]]]]]
call[name[files]][call[name[pdb].find, parameter[constant[pdb_code]]].text] assign[=] name[filename]
call[name[log].debug, parameter[call[constant[{}: downloaded PISA results].format, parameter[name[pdb]]]]]
return[name[files]]
|
keyword[def] identifier[download_pisa_multimers_xml] ( identifier[pdb_ids] , identifier[save_single_xml_files] = keyword[True] , identifier[outdir] = keyword[None] , identifier[force_rerun] = keyword[False] ):
literal[string]
keyword[if] keyword[not] identifier[outdir] :
identifier[outdir] = identifier[os] . identifier[getcwd] ()
identifier[files] ={}
identifier[pdb_ids] = identifier[ssbio] . identifier[utils] . identifier[force_lower_list] ( identifier[sorted] ( identifier[pdb_ids] ))
keyword[if] identifier[save_single_xml_files] :
keyword[if] keyword[not] identifier[force_rerun] :
identifier[existing_files] =[ identifier[op] . identifier[basename] ( identifier[x] ) keyword[for] identifier[x] keyword[in] identifier[glob] . identifier[glob] ( identifier[op] . identifier[join] ( identifier[outdir] , literal[string] ))]
identifier[files] ={ identifier[v] . identifier[split] ( literal[string] )[ literal[int] ]: identifier[op] . identifier[join] ( identifier[outdir] , identifier[v] ) keyword[for] identifier[v] keyword[in] identifier[existing_files] }
identifier[log] . identifier[debug] ( literal[string] . identifier[format] ( identifier[list] ( identifier[files] . identifier[keys] ())))
keyword[else] :
identifier[existing_files] =[]
identifier[pdb_ids] =[ identifier[x] keyword[for] identifier[x] keyword[in] identifier[pdb_ids] keyword[if] literal[string] . identifier[format] ( identifier[x] ) keyword[not] keyword[in] identifier[existing_files] ]
identifier[split_list] = identifier[ssbio] . identifier[utils] . identifier[split_list_by_n] ( identifier[pdb_ids] , literal[int] )
keyword[for] identifier[l] keyword[in] identifier[split_list] :
identifier[pdbs] = literal[string] . identifier[join] ( identifier[l] )
identifier[all_pisa_link] = literal[string] . identifier[format] ( identifier[pdbs] )
identifier[r] = identifier[requests] . identifier[get] ( identifier[all_pisa_link] )
identifier[parser] = identifier[etree] . identifier[XMLParser] ( identifier[ns_clean] = keyword[True] )
identifier[tree] = identifier[etree] . identifier[fromstring] ( identifier[r] . identifier[text] , identifier[parser] )
keyword[for] identifier[pdb] keyword[in] identifier[tree] . identifier[findall] ( literal[string] ):
identifier[filename] = identifier[op] . identifier[join] ( identifier[outdir] , literal[string] . identifier[format] ( identifier[pdb] . identifier[find] ( literal[string] ). identifier[text] ))
identifier[add_root] = identifier[etree] . identifier[Element] ( literal[string] )
identifier[add_root] . identifier[append] ( identifier[pdb] )
keyword[with] identifier[open] ( identifier[filename] , literal[string] ) keyword[as] identifier[f] :
identifier[f] . identifier[write] ( identifier[etree] . identifier[tostring] ( identifier[add_root] ))
identifier[files] [ identifier[pdb] . identifier[find] ( literal[string] ). identifier[text] ]= identifier[filename]
identifier[log] . identifier[debug] ( literal[string] . identifier[format] ( identifier[pdb] ))
keyword[else] :
identifier[split_list] = identifier[ssbio] . identifier[utils] . identifier[split_list_by_n] ( identifier[pdb_ids] , literal[int] )
keyword[for] identifier[l] keyword[in] identifier[split_list] :
identifier[pdbs] = literal[string] . identifier[join] ( identifier[l] )
identifier[all_pisa_link] = literal[string] . identifier[format] ( identifier[pdbs] )
identifier[filename] = identifier[op] . identifier[join] ( identifier[outdir] , literal[string] . identifier[format] ( identifier[pdbs] ))
keyword[if] identifier[ssbio] . identifier[utils] . identifier[force_rerun] ( identifier[flag] = identifier[force_rerun] , identifier[outfile] = identifier[filename] ):
identifier[r] = identifier[requests] . identifier[get] ( identifier[all_pisa_link] )
keyword[with] identifier[open] ( identifier[filename] , literal[string] ) keyword[as] identifier[f] :
identifier[f] . identifier[write] ( identifier[r] . identifier[text] )
identifier[log] . identifier[debug] ( literal[string] )
keyword[else] :
identifier[log] . identifier[debug] ( literal[string] )
keyword[for] identifier[x] keyword[in] identifier[l] :
identifier[files] [ identifier[x] ]= identifier[filename]
keyword[return] identifier[files]
|
def download_pisa_multimers_xml(pdb_ids, save_single_xml_files=True, outdir=None, force_rerun=False):
"""Download the PISA XML file for multimers.
See: http://www.ebi.ac.uk/pdbe/pisa/pi_download.html for more info
XML description of macromolecular assemblies:
http://www.ebi.ac.uk/pdbe/pisa/cgi-bin/multimers.pisa?pdbcodelist
where "pdbcodelist" is a comma-separated (strictly no spaces) list of PDB codes. The resulting file contain XML
output of assembly data, equivalent to that displayed in PISA assembly pages, for each of the specified PDB
entries. NOTE: If a mass-download is intended, please minimize the number of retrievals by specifying as many
PDB codes in the URL as feasible (20-50 is a good range), and never send another URL request until the previous
one has been completed (meaning that the multimers.pisa file has been downloaded). Excessive requests will
silently die in the server queue.
Args:
pdb_ids (str, list): PDB ID or list of IDs
save_single_xml_files (bool): If single XML files should be saved per PDB ID. If False, if multiple PDB IDs are
provided, then a single, combined XML output file is downloaded
outdir (str): Directory to output PISA XML files
force_rerun (bool): Redownload files if they already exist
Returns:
list: of files downloaded
"""
if not outdir:
outdir = os.getcwd() # depends on [control=['if'], data=[]]
files = {}
pdb_ids = ssbio.utils.force_lower_list(sorted(pdb_ids))
# If we want to save single PISA XML files per PDB ID...
if save_single_xml_files:
# Check for existing PISA XML files
if not force_rerun:
existing_files = [op.basename(x) for x in glob.glob(op.join(outdir, '*_multimers.pisa.xml'))]
# Store the paths to these files to return
files = {v.split('_')[0]: op.join(outdir, v) for v in existing_files}
log.debug('Already downloaded PISA files for {}'.format(list(files.keys()))) # depends on [control=['if'], data=[]]
else:
existing_files = []
# Filter PDB IDs based on existing file
pdb_ids = [x for x in pdb_ids if '{}_multimers.pisa.xml'.format(x) not in existing_files]
# Split the list into 50 to limit requests
split_list = ssbio.utils.split_list_by_n(pdb_ids, 40)
# Download PISA files
for l in split_list:
pdbs = ','.join(l)
all_pisa_link = 'http://www.ebi.ac.uk/pdbe/pisa/cgi-bin/multimers.pisa?{}'.format(pdbs)
r = requests.get(all_pisa_link)
# Parse PISA file and save individual XML files
parser = etree.XMLParser(ns_clean=True)
tree = etree.fromstring(r.text, parser)
for pdb in tree.findall('pdb_entry'):
filename = op.join(outdir, '{}_multimers.pisa.xml'.format(pdb.find('pdb_code').text))
add_root = etree.Element('pisa_multimers')
add_root.append(pdb)
with open(filename, 'wb') as f:
f.write(etree.tostring(add_root)) # depends on [control=['with'], data=['f']]
files[pdb.find('pdb_code').text] = filename
log.debug('{}: downloaded PISA results'.format(pdb)) # depends on [control=['for'], data=['pdb']] # depends on [control=['for'], data=['l']] # depends on [control=['if'], data=[]]
else:
split_list = ssbio.utils.split_list_by_n(pdb_ids, 40)
for l in split_list:
pdbs = ','.join(l)
all_pisa_link = 'http://www.ebi.ac.uk/pdbe/pisa/cgi-bin/multimers.pisa?{}'.format(pdbs)
filename = op.join(outdir, '{}_multimers.pisa.xml'.format(pdbs))
if ssbio.utils.force_rerun(flag=force_rerun, outfile=filename):
r = requests.get(all_pisa_link)
with open(filename, 'w') as f:
f.write(r.text) # depends on [control=['with'], data=['f']]
log.debug('Downloaded PISA results') # depends on [control=['if'], data=[]]
else:
log.debug('PISA results already downloaded')
for x in l:
files[x] = filename # depends on [control=['for'], data=['x']] # depends on [control=['for'], data=['l']]
return files
|
def _parse_seq_header(line):
"""Unique ID, head/tail lengths and taxonomy info from a sequence header.
The description is the part of the FASTA/CMA sequence header starting after
the first space (i.e. excluding ID), to the end of the line.
This function looks inside the first '{...}' pair to extract info.
Ex:
>consensus seq
>gi|15606894|ref|NP_214275.1| {|2(244)|<Aquificae(B)>}DNA polymerase III gamma subunit [Aquifex aeolicus VF5] >gi|2984127|gb|AAC07663.1| DNA polymerase III gamma subunit [Aquifex aeolicus VF5] >gi|75
>gi|3212262|pdb|1A2K|C {<Chordata(M)>}Chain C, Gdpran-Ntf2 Complex >gi|3212263|pdb|1A2K|D Chain D, Gdpran-Ntf2 Complex >gi|3212264|pdb|1A2K|E Chain E, Gdpran-Ntf2 Complex >gi|5542273|pdb|1IBR|A C
"""
# ENH: use the two functions in esbglib.parseutils
# or, move one or both of those functions into here
_parts = line[1:].split(None, 1)
rec_id = _parts[0]
descr = _parts[1] if _parts[1:] else ''
# Database cross references
dbxrefs = {}
if '|' in rec_id:
id_gen = iter(rec_id.rstrip('|').split('|'))
for key in id_gen:
try:
dbxrefs[key] = next(id_gen)
except StopIteration:
break
# Head/tail lengths and taxonomy codes
headlen = taillen = None
phylum = taxchar = ''
if descr.startswith('{'):
_deets, description = descr[1:].split('}', 1)
match = re.search(r"""
(?:
\| (?P<headlen> \d+)
\( (?P<taillen> \d+)
\)
\|
)?
(?:
< (?P<phylum> .+?)
\( (?P<taxchar> \w)
\)
>
)?
""", _deets, re.VERBOSE)
if match:
headlen, taillen, phylum, taxchar = match.groups()
if headlen is not None:
headlen = int(headlen)
if taillen is not None:
taillen = int(taillen)
if phylum is None:
phylum = ''
if taxchar is None:
taxchar = ''
else:
logging.warn("Couldn't match head/tail: %s", _deets)
else:
description = descr
# TODO - return a dictionary here, update it in _parse_sequences
return rec_id, dbxrefs, headlen, taillen, phylum, taxchar, description
|
def function[_parse_seq_header, parameter[line]]:
constant[Unique ID, head/tail lengths and taxonomy info from a sequence header.
The description is the part of the FASTA/CMA sequence header starting after
the first space (i.e. excluding ID), to the end of the line.
This function looks inside the first '{...}' pair to extract info.
Ex:
>consensus seq
>gi|15606894|ref|NP_214275.1| {|2(244)|<Aquificae(B)>}DNA polymerase III gamma subunit [Aquifex aeolicus VF5] >gi|2984127|gb|AAC07663.1| DNA polymerase III gamma subunit [Aquifex aeolicus VF5] >gi|75
>gi|3212262|pdb|1A2K|C {<Chordata(M)>}Chain C, Gdpran-Ntf2 Complex >gi|3212263|pdb|1A2K|D Chain D, Gdpran-Ntf2 Complex >gi|3212264|pdb|1A2K|E Chain E, Gdpran-Ntf2 Complex >gi|5542273|pdb|1IBR|A C
]
variable[_parts] assign[=] call[call[name[line]][<ast.Slice object at 0x7da1b23476d0>].split, parameter[constant[None], constant[1]]]
variable[rec_id] assign[=] call[name[_parts]][constant[0]]
variable[descr] assign[=] <ast.IfExp object at 0x7da1b2344070>
variable[dbxrefs] assign[=] dictionary[[], []]
if compare[constant[|] in name[rec_id]] begin[:]
variable[id_gen] assign[=] call[name[iter], parameter[call[call[name[rec_id].rstrip, parameter[constant[|]]].split, parameter[constant[|]]]]]
for taget[name[key]] in starred[name[id_gen]] begin[:]
<ast.Try object at 0x7da1b2344c40>
variable[headlen] assign[=] constant[None]
variable[phylum] assign[=] constant[]
if call[name[descr].startswith, parameter[constant[{]]] begin[:]
<ast.Tuple object at 0x7da1b2345360> assign[=] call[call[name[descr]][<ast.Slice object at 0x7da18f00d780>].split, parameter[constant[}], constant[1]]]
variable[match] assign[=] call[name[re].search, parameter[constant[
(?:
\| (?P<headlen> \d+)
\( (?P<taillen> \d+)
\)
\|
)?
(?:
< (?P<phylum> .+?)
\( (?P<taxchar> \w)
\)
>
)?
], name[_deets], name[re].VERBOSE]]
if name[match] begin[:]
<ast.Tuple object at 0x7da18f00dde0> assign[=] call[name[match].groups, parameter[]]
if compare[name[headlen] is_not constant[None]] begin[:]
variable[headlen] assign[=] call[name[int], parameter[name[headlen]]]
if compare[name[taillen] is_not constant[None]] begin[:]
variable[taillen] assign[=] call[name[int], parameter[name[taillen]]]
if compare[name[phylum] is constant[None]] begin[:]
variable[phylum] assign[=] constant[]
if compare[name[taxchar] is constant[None]] begin[:]
variable[taxchar] assign[=] constant[]
return[tuple[[<ast.Name object at 0x7da18f00d8d0>, <ast.Name object at 0x7da18f00d6c0>, <ast.Name object at 0x7da18f00f2e0>, <ast.Name object at 0x7da18f00e6e0>, <ast.Name object at 0x7da18f00e6b0>, <ast.Name object at 0x7da18f00d2d0>, <ast.Name object at 0x7da18f00c2e0>]]]
|
keyword[def] identifier[_parse_seq_header] ( identifier[line] ):
literal[string]
identifier[_parts] = identifier[line] [ literal[int] :]. identifier[split] ( keyword[None] , literal[int] )
identifier[rec_id] = identifier[_parts] [ literal[int] ]
identifier[descr] = identifier[_parts] [ literal[int] ] keyword[if] identifier[_parts] [ literal[int] :] keyword[else] literal[string]
identifier[dbxrefs] ={}
keyword[if] literal[string] keyword[in] identifier[rec_id] :
identifier[id_gen] = identifier[iter] ( identifier[rec_id] . identifier[rstrip] ( literal[string] ). identifier[split] ( literal[string] ))
keyword[for] identifier[key] keyword[in] identifier[id_gen] :
keyword[try] :
identifier[dbxrefs] [ identifier[key] ]= identifier[next] ( identifier[id_gen] )
keyword[except] identifier[StopIteration] :
keyword[break]
identifier[headlen] = identifier[taillen] = keyword[None]
identifier[phylum] = identifier[taxchar] = literal[string]
keyword[if] identifier[descr] . identifier[startswith] ( literal[string] ):
identifier[_deets] , identifier[description] = identifier[descr] [ literal[int] :]. identifier[split] ( literal[string] , literal[int] )
identifier[match] = identifier[re] . identifier[search] ( literal[string] , identifier[_deets] , identifier[re] . identifier[VERBOSE] )
keyword[if] identifier[match] :
identifier[headlen] , identifier[taillen] , identifier[phylum] , identifier[taxchar] = identifier[match] . identifier[groups] ()
keyword[if] identifier[headlen] keyword[is] keyword[not] keyword[None] :
identifier[headlen] = identifier[int] ( identifier[headlen] )
keyword[if] identifier[taillen] keyword[is] keyword[not] keyword[None] :
identifier[taillen] = identifier[int] ( identifier[taillen] )
keyword[if] identifier[phylum] keyword[is] keyword[None] :
identifier[phylum] = literal[string]
keyword[if] identifier[taxchar] keyword[is] keyword[None] :
identifier[taxchar] = literal[string]
keyword[else] :
identifier[logging] . identifier[warn] ( literal[string] , identifier[_deets] )
keyword[else] :
identifier[description] = identifier[descr]
keyword[return] identifier[rec_id] , identifier[dbxrefs] , identifier[headlen] , identifier[taillen] , identifier[phylum] , identifier[taxchar] , identifier[description]
|
def _parse_seq_header(line):
"""Unique ID, head/tail lengths and taxonomy info from a sequence header.
The description is the part of the FASTA/CMA sequence header starting after
the first space (i.e. excluding ID), to the end of the line.
This function looks inside the first '{...}' pair to extract info.
Ex:
>consensus seq
>gi|15606894|ref|NP_214275.1| {|2(244)|<Aquificae(B)>}DNA polymerase III gamma subunit [Aquifex aeolicus VF5] >gi|2984127|gb|AAC07663.1| DNA polymerase III gamma subunit [Aquifex aeolicus VF5] >gi|75
>gi|3212262|pdb|1A2K|C {<Chordata(M)>}Chain C, Gdpran-Ntf2 Complex >gi|3212263|pdb|1A2K|D Chain D, Gdpran-Ntf2 Complex >gi|3212264|pdb|1A2K|E Chain E, Gdpran-Ntf2 Complex >gi|5542273|pdb|1IBR|A C
"""
# ENH: use the two functions in esbglib.parseutils
# or, move one or both of those functions into here
_parts = line[1:].split(None, 1)
rec_id = _parts[0]
descr = _parts[1] if _parts[1:] else ''
# Database cross references
dbxrefs = {}
if '|' in rec_id:
id_gen = iter(rec_id.rstrip('|').split('|'))
for key in id_gen:
try:
dbxrefs[key] = next(id_gen) # depends on [control=['try'], data=[]]
except StopIteration:
break # depends on [control=['except'], data=[]] # depends on [control=['for'], data=['key']] # depends on [control=['if'], data=['rec_id']]
# Head/tail lengths and taxonomy codes
headlen = taillen = None
phylum = taxchar = ''
if descr.startswith('{'):
(_deets, description) = descr[1:].split('}', 1)
match = re.search('\n (?:\n \\| (?P<headlen> \\d+)\n \\( (?P<taillen> \\d+)\n \\)\n \\|\n )?\n (?:\n < (?P<phylum> .+?)\n \\( (?P<taxchar> \\w)\n \\)\n >\n )?\n ', _deets, re.VERBOSE)
if match:
(headlen, taillen, phylum, taxchar) = match.groups()
if headlen is not None:
headlen = int(headlen) # depends on [control=['if'], data=['headlen']]
if taillen is not None:
taillen = int(taillen) # depends on [control=['if'], data=['taillen']]
if phylum is None:
phylum = '' # depends on [control=['if'], data=['phylum']]
if taxchar is None:
taxchar = '' # depends on [control=['if'], data=['taxchar']] # depends on [control=['if'], data=[]]
else:
logging.warn("Couldn't match head/tail: %s", _deets) # depends on [control=['if'], data=[]]
else:
description = descr
# TODO - return a dictionary here, update it in _parse_sequences
return (rec_id, dbxrefs, headlen, taillen, phylum, taxchar, description)
|
def jr6_jr6(mag_file, dir_path=".", input_dir_path="",
meas_file="measurements.txt", spec_file="specimens.txt",
samp_file="samples.txt", site_file="sites.txt", loc_file="locations.txt",
specnum=1, samp_con='1', location='unknown', lat='', lon='',
noave=False, meth_code="LP-NO", volume=12, JR=False, user=""):
"""
Convert JR6 .jr6 files to MagIC file(s)
Parameters
----------
mag_file : str
input file name
dir_path : str
working directory, default "."
input_dir_path : str
input file directory IF different from dir_path, default ""
meas_file : str
output measurement file name, default "measurements.txt"
spec_file : str
output specimen file name, default "specimens.txt"
samp_file: str
output sample file name, default "samples.txt"
site_file : str
output site file name, default "sites.txt"
loc_file : str
output location file name, default "locations.txt"
specnum : int
number of characters to designate a specimen, default 0
samp_con : str
sample/site naming convention, default '1', see info below
location : str
location name, default "unknown"
lat : float
latitude, default ""
lon : float
longitude, default ""
noave : bool
do not average duplicate measurements, default False (so by default, DO average)
meth_code : str
colon-delimited method codes, default "LP-NO"
volume : float
volume in ccs, default 12
JR : bool
IODP samples were measured on the JOIDES RESOLUTION, default False
user : str
user name, default ""
Returns
---------
Tuple : (True or False indicating if conversion was sucessful, meas_file name written)
Info
--------
Sample naming convention:
[1] XXXXY: where XXXX is an arbitrary length site designation and Y
is the single character sample designation. e.g., TG001a is the
first sample from site TG001. [default]
[2] XXXX-YY: YY sample from site XXXX (XXX, YY of arbitary length)
[3] XXXX.YY: YY sample from site XXXX (XXX, YY of arbitary length)
[4-Z] XXXX[YYY]: YYY is sample designation with Z characters from site XXX
[5] site name same as sample
[6] site is entered under a separate column -- NOT CURRENTLY SUPPORTED
[7-Z] [XXXX]YYY: XXXX is site designation with Z characters with sample name XXXXYYYY
"""
version_num = pmag.get_version()
input_dir_path, output_dir_path = pmag.fix_directories(input_dir_path, dir_path)
specnum = - int(specnum)
samp_con = str(samp_con)
volume = float(volume) * 1e-6
# need to add these
meas_file = pmag.resolve_file_name(meas_file, output_dir_path)
spec_file = pmag.resolve_file_name(spec_file, output_dir_path)
samp_file = pmag.resolve_file_name(samp_file, output_dir_path)
site_file = pmag.resolve_file_name(site_file, output_dir_path)
loc_file = pmag.resolve_file_name(loc_file, output_dir_path)
mag_file = pmag.resolve_file_name(mag_file, input_dir_path)
if JR:
if meth_code == "LP-NO":
meth_code = ""
meth_code = meth_code+":FS-C-DRILL-IODP:SP-SS-C:SO-V"
meth_code = meth_code.strip(":")
samp_con = '5'
# format variables
tmp_file = mag_file.split(os.extsep)[0]+os.extsep+'tmp'
mag_file = pmag.resolve_file_name(mag_file, input_dir_path)
if samp_con.startswith("4"):
if "-" not in samp_con:
print("option [4] must be in form 4-Z where Z is an integer")
return False, "naming convention option [4] must be in form 4-Z where Z is an integer"
else:
Z = samp_con.split("-")[1]
samp_con = "4"
elif samp_con.startswith("7"):
if "-" not in samp_con:
print("option [7] must be in form 7-Z where Z is an integer")
return False, "naming convention option [7] must be in form 7-Z where Z is an integer"
else:
Z = samp_con.split("-")[1]
samp_con = "7"
else:
Z = 1
# parse data
# fix .jr6 file so that there are spaces between all the columns.
pre_data = open(mag_file, 'r')
tmp_data = open(tmp_file, 'w')
if samp_con != '2':
fixed_data = pre_data.read().replace('-', ' -')
else:
fixed_data = ""
for line in pre_data.readlines():
entries = line.split()
if len(entries) < 2:
continue
fixed_line = entries[0] + ' ' + reduce(
lambda x, y: x+' '+y, [x.replace('-', ' -') for x in entries[1:]])
fixed_data += fixed_line+os.linesep
tmp_data.write(fixed_data)
tmp_data.close()
pre_data.close()
if not JR:
column_names = ['specimen', 'step', 'x', 'y', 'z', 'expon', 'azimuth', 'dip', 'bed_dip_direction',
'bed_dip', 'bed_dip_dir2', 'bed_dip2', 'param1', 'param2', 'param3', 'param4', 'dir_csd']
else: # measured on the Joides Resolution JR6
column_names = ['specimen', 'step', 'negz', 'y', 'x', 'expon', 'azimuth', 'dip', 'bed_dip_direction',
'bed_dip', 'bed_dip_dir2', 'bed_dip2', 'param1', 'param2', 'param3', 'param4', 'dir_csd']
data = pd.read_csv(tmp_file, delim_whitespace=True,
names=column_names, index_col=False)
if isinstance(data['x'][0], str):
column_names = ['specimen', 'step', 'step_unit', 'x', 'y', 'z', 'expon', 'azimuth', 'dip', 'bed_dip_direction',
'bed_dip', 'bed_dip_dir2', 'bed_dip2', 'param1', 'param2', 'param3', 'param4', 'dir_csd']
data = pd.read_csv(tmp_file, delim_whitespace=True,
names=column_names, index_col=False)
if JR:
data['z'] = -data['negz']
cart = np.array([data['x'], data['y'], data['z']]).transpose()
dir_dat = pmag.cart2dir(cart).transpose()
data['dir_dec'] = dir_dat[0]
data['dir_inc'] = dir_dat[1]
# the data are in A/m - this converts to Am^2
data['magn_moment'] = dir_dat[2]*(10.0**data['expon'])*volume
data['magn_volume'] = dir_dat[2] * \
(10.0**data['expon']) # A/m - data in A/m
data['dip'] = -data['dip']
data['specimen']
# put data into magic tables
MagRecs, SpecRecs, SampRecs, SiteRecs, LocRecs = [], [], [], [], []
for rowNum, row in data.iterrows():
MeasRec, SpecRec, SampRec, SiteRec, LocRec = {}, {}, {}, {}, {}
specimen = row['specimen']
if specnum != 0:
sample = specimen[:specnum]
else:
sample = specimen
site = pmag.parse_site(sample, samp_con, Z)
if specimen != "" and specimen not in [x['specimen'] if 'specimen' in list(x.keys()) else "" for x in SpecRecs]:
SpecRec['specimen'] = specimen
SpecRec['sample'] = sample
SpecRec["citations"] = "This study"
SpecRec["analysts"] = user
SpecRec['volume'] = volume
SpecRecs.append(SpecRec)
if sample != "" and sample not in [x['sample'] if 'sample' in list(x.keys()) else "" for x in SampRecs]:
SampRec['sample'] = sample
SampRec['site'] = site
SampRec["citations"] = "This study"
SampRec["analysts"] = user
SampRec['azimuth'] = row['azimuth']
SampRec['dip'] = row['dip']
SampRec['bed_dip_direction'] = row['bed_dip_direction']
SampRec['bed_dip'] = row['bed_dip']
SampRec['method_codes'] = meth_code
SampRecs.append(SampRec)
if site != "" and site not in [x['site'] if 'site' in list(x.keys()) else "" for x in SiteRecs]:
SiteRec['site'] = site
SiteRec['location'] = location
SiteRec["citations"] = "This study"
SiteRec["analysts"] = user
SiteRec['lat'] = lat
SiteRec['lon'] = lon
SiteRecs.append(SiteRec)
if location != "" and location not in [x['location'] if 'location' in list(x.keys()) else "" for x in LocRecs]:
LocRec['location'] = location
LocRec["citations"] = "This study"
LocRec["analysts"] = user
LocRec['lat_n'] = lat
LocRec['lon_e'] = lon
LocRec['lat_s'] = lat
LocRec['lon_w'] = lon
LocRecs.append(LocRec)
MeasRec["citations"] = "This study"
MeasRec["analysts"] = user
MeasRec["specimen"] = specimen
MeasRec['software_packages'] = version_num
MeasRec["treat_temp"] = '%8.3e' % (273) # room temp in kelvin
MeasRec["meas_temp"] = '%8.3e' % (273) # room temp in kelvin
MeasRec["quality"] = 'g'
MeasRec["standard"] = 'u'
MeasRec["treat_step_num"] = 0
MeasRec["treat_ac_field"] = '0'
if row['step'] == 'NRM':
meas_type = "LT-NO"
elif 'step_unit' in row and row['step_unit'] == 'C':
meas_type = "LT-T-Z"
treat = float(row['step'])
MeasRec["treat_temp"] = '%8.3e' % (treat+273.) # temp in kelvin
elif row['step'][0:2] == 'AD':
meas_type = "LT-AF-Z"
treat = float(row['step'][2:])
MeasRec["treat_ac_field"] = '%8.3e' % (
treat*1e-3) # convert from mT to tesla
elif row['step'][0] == 'A':
meas_type = "LT-AF-Z"
treat = float(row['step'][1:])
MeasRec["treat_ac_field"] = '%8.3e' % (
treat*1e-3) # convert from mT to tesla
elif row['step'][0] == 'TD':
meas_type = "LT-T-Z"
treat = float(row['step'][2:])
MeasRec["treat_temp"] = '%8.3e' % (treat+273.) # temp in kelvin
elif row['step'][0] == 'T':
meas_type = "LT-T-Z"
treat = float(row['step'][1:])
MeasRec["treat_temp"] = '%8.3e' % (treat+273.) # temp in kelvin
else: # need to add IRM, and ARM options
print("measurement type unknown", row['step'])
return False, "measurement type unknown"
MeasRec["magn_moment"] = str(row['magn_moment'])
MeasRec["magn_volume"] = str(row['magn_volume'])
MeasRec["dir_dec"] = str(row['dir_dec'])
MeasRec["dir_inc"] = str(row['dir_inc'])
MeasRec['method_codes'] = meas_type
MagRecs.append(MeasRec)
con = cb.Contribution(output_dir_path, read_tables=[])
con.add_magic_table_from_data(dtype='specimens', data=SpecRecs)
con.add_magic_table_from_data(dtype='samples', data=SampRecs)
con.add_magic_table_from_data(dtype='sites', data=SiteRecs)
con.add_magic_table_from_data(dtype='locations', data=LocRecs)
MeasOuts = pmag.measurements_methods3(MagRecs, noave)
con.add_magic_table_from_data(dtype='measurements', data=MeasOuts)
con.tables['specimens'].write_magic_file(custom_name=spec_file)
con.tables['samples'].write_magic_file(custom_name=samp_file)
con.tables['sites'].write_magic_file(custom_name=site_file)
con.tables['locations'].write_magic_file(custom_name=loc_file)
con.tables['measurements'].write_magic_file(custom_name=meas_file)
try:
os.remove(tmp_file)
except (OSError, IOError) as e:
print("couldn't remove temperary fixed JR6 file %s" % tmp_file)
return True, meas_file
|
def function[jr6_jr6, parameter[mag_file, dir_path, input_dir_path, meas_file, spec_file, samp_file, site_file, loc_file, specnum, samp_con, location, lat, lon, noave, meth_code, volume, JR, user]]:
constant[
Convert JR6 .jr6 files to MagIC file(s)
Parameters
----------
mag_file : str
input file name
dir_path : str
working directory, default "."
input_dir_path : str
input file directory IF different from dir_path, default ""
meas_file : str
output measurement file name, default "measurements.txt"
spec_file : str
output specimen file name, default "specimens.txt"
samp_file: str
output sample file name, default "samples.txt"
site_file : str
output site file name, default "sites.txt"
loc_file : str
output location file name, default "locations.txt"
specnum : int
number of characters to designate a specimen, default 0
samp_con : str
sample/site naming convention, default '1', see info below
location : str
location name, default "unknown"
lat : float
latitude, default ""
lon : float
longitude, default ""
noave : bool
do not average duplicate measurements, default False (so by default, DO average)
meth_code : str
colon-delimited method codes, default "LP-NO"
volume : float
volume in ccs, default 12
JR : bool
IODP samples were measured on the JOIDES RESOLUTION, default False
user : str
user name, default ""
Returns
---------
Tuple : (True or False indicating if conversion was sucessful, meas_file name written)
Info
--------
Sample naming convention:
[1] XXXXY: where XXXX is an arbitrary length site designation and Y
is the single character sample designation. e.g., TG001a is the
first sample from site TG001. [default]
[2] XXXX-YY: YY sample from site XXXX (XXX, YY of arbitary length)
[3] XXXX.YY: YY sample from site XXXX (XXX, YY of arbitary length)
[4-Z] XXXX[YYY]: YYY is sample designation with Z characters from site XXX
[5] site name same as sample
[6] site is entered under a separate column -- NOT CURRENTLY SUPPORTED
[7-Z] [XXXX]YYY: XXXX is site designation with Z characters with sample name XXXXYYYY
]
variable[version_num] assign[=] call[name[pmag].get_version, parameter[]]
<ast.Tuple object at 0x7da1b020cdc0> assign[=] call[name[pmag].fix_directories, parameter[name[input_dir_path], name[dir_path]]]
variable[specnum] assign[=] <ast.UnaryOp object at 0x7da1b020dc60>
variable[samp_con] assign[=] call[name[str], parameter[name[samp_con]]]
variable[volume] assign[=] binary_operation[call[name[float], parameter[name[volume]]] * constant[1e-06]]
variable[meas_file] assign[=] call[name[pmag].resolve_file_name, parameter[name[meas_file], name[output_dir_path]]]
variable[spec_file] assign[=] call[name[pmag].resolve_file_name, parameter[name[spec_file], name[output_dir_path]]]
variable[samp_file] assign[=] call[name[pmag].resolve_file_name, parameter[name[samp_file], name[output_dir_path]]]
variable[site_file] assign[=] call[name[pmag].resolve_file_name, parameter[name[site_file], name[output_dir_path]]]
variable[loc_file] assign[=] call[name[pmag].resolve_file_name, parameter[name[loc_file], name[output_dir_path]]]
variable[mag_file] assign[=] call[name[pmag].resolve_file_name, parameter[name[mag_file], name[input_dir_path]]]
if name[JR] begin[:]
if compare[name[meth_code] equal[==] constant[LP-NO]] begin[:]
variable[meth_code] assign[=] constant[]
variable[meth_code] assign[=] binary_operation[name[meth_code] + constant[:FS-C-DRILL-IODP:SP-SS-C:SO-V]]
variable[meth_code] assign[=] call[name[meth_code].strip, parameter[constant[:]]]
variable[samp_con] assign[=] constant[5]
variable[tmp_file] assign[=] binary_operation[binary_operation[call[call[name[mag_file].split, parameter[name[os].extsep]]][constant[0]] + name[os].extsep] + constant[tmp]]
variable[mag_file] assign[=] call[name[pmag].resolve_file_name, parameter[name[mag_file], name[input_dir_path]]]
if call[name[samp_con].startswith, parameter[constant[4]]] begin[:]
if compare[constant[-] <ast.NotIn object at 0x7da2590d7190> name[samp_con]] begin[:]
call[name[print], parameter[constant[option [4] must be in form 4-Z where Z is an integer]]]
return[tuple[[<ast.Constant object at 0x7da1b020c6a0>, <ast.Constant object at 0x7da1b020c700>]]]
variable[pre_data] assign[=] call[name[open], parameter[name[mag_file], constant[r]]]
variable[tmp_data] assign[=] call[name[open], parameter[name[tmp_file], constant[w]]]
if compare[name[samp_con] not_equal[!=] constant[2]] begin[:]
variable[fixed_data] assign[=] call[call[name[pre_data].read, parameter[]].replace, parameter[constant[-], constant[ -]]]
call[name[tmp_data].write, parameter[name[fixed_data]]]
call[name[tmp_data].close, parameter[]]
call[name[pre_data].close, parameter[]]
if <ast.UnaryOp object at 0x7da18f8120b0> begin[:]
variable[column_names] assign[=] list[[<ast.Constant object at 0x7da18f810370>, <ast.Constant object at 0x7da18f813430>, <ast.Constant object at 0x7da18f811fc0>, <ast.Constant object at 0x7da18f813340>, <ast.Constant object at 0x7da18f812980>, <ast.Constant object at 0x7da18f812440>, <ast.Constant object at 0x7da18f811570>, <ast.Constant object at 0x7da18f812860>, <ast.Constant object at 0x7da18f810190>, <ast.Constant object at 0x7da18f810520>, <ast.Constant object at 0x7da18f8133a0>, <ast.Constant object at 0x7da18f813ee0>, <ast.Constant object at 0x7da18f812d10>, <ast.Constant object at 0x7da18f8123e0>, <ast.Constant object at 0x7da18f811e70>, <ast.Constant object at 0x7da18f813040>, <ast.Constant object at 0x7da18f813d60>]]
variable[data] assign[=] call[name[pd].read_csv, parameter[name[tmp_file]]]
if call[name[isinstance], parameter[call[call[name[data]][constant[x]]][constant[0]], name[str]]] begin[:]
variable[column_names] assign[=] list[[<ast.Constant object at 0x7da18f8115d0>, <ast.Constant object at 0x7da18f813580>, <ast.Constant object at 0x7da18f813ac0>, <ast.Constant object at 0x7da18f8116f0>, <ast.Constant object at 0x7da18f810580>, <ast.Constant object at 0x7da18f8114b0>, <ast.Constant object at 0x7da18f811a80>, <ast.Constant object at 0x7da18f810400>, <ast.Constant object at 0x7da18f810a30>, <ast.Constant object at 0x7da18f8109a0>, <ast.Constant object at 0x7da18f8100d0>, <ast.Constant object at 0x7da18f8104f0>, <ast.Constant object at 0x7da18f8111e0>, <ast.Constant object at 0x7da18f811000>, <ast.Constant object at 0x7da18f813b20>, <ast.Constant object at 0x7da18f811630>, <ast.Constant object at 0x7da18f811180>, <ast.Constant object at 0x7da18f8132b0>]]
variable[data] assign[=] call[name[pd].read_csv, parameter[name[tmp_file]]]
if name[JR] begin[:]
call[name[data]][constant[z]] assign[=] <ast.UnaryOp object at 0x7da18f810e20>
variable[cart] assign[=] call[call[name[np].array, parameter[list[[<ast.Subscript object at 0x7da18f812380>, <ast.Subscript object at 0x7da18f813910>, <ast.Subscript object at 0x7da18f810c10>]]]].transpose, parameter[]]
variable[dir_dat] assign[=] call[call[name[pmag].cart2dir, parameter[name[cart]]].transpose, parameter[]]
call[name[data]][constant[dir_dec]] assign[=] call[name[dir_dat]][constant[0]]
call[name[data]][constant[dir_inc]] assign[=] call[name[dir_dat]][constant[1]]
call[name[data]][constant[magn_moment]] assign[=] binary_operation[binary_operation[call[name[dir_dat]][constant[2]] * binary_operation[constant[10.0] ** call[name[data]][constant[expon]]]] * name[volume]]
call[name[data]][constant[magn_volume]] assign[=] binary_operation[call[name[dir_dat]][constant[2]] * binary_operation[constant[10.0] ** call[name[data]][constant[expon]]]]
call[name[data]][constant[dip]] assign[=] <ast.UnaryOp object at 0x7da18f811780>
call[name[data]][constant[specimen]]
<ast.Tuple object at 0x7da18f8112a0> assign[=] tuple[[<ast.List object at 0x7da18f8121d0>, <ast.List object at 0x7da18f812b90>, <ast.List object at 0x7da18f8125f0>, <ast.List object at 0x7da18f812290>, <ast.List object at 0x7da18f812bc0>]]
for taget[tuple[[<ast.Name object at 0x7da18f812770>, <ast.Name object at 0x7da18f8106a0>]]] in starred[call[name[data].iterrows, parameter[]]] begin[:]
<ast.Tuple object at 0x7da18f813700> assign[=] tuple[[<ast.Dict object at 0x7da18f811de0>, <ast.Dict object at 0x7da18f812230>, <ast.Dict object at 0x7da18f8102e0>, <ast.Dict object at 0x7da18f8112d0>, <ast.Dict object at 0x7da18f813cd0>]]
variable[specimen] assign[=] call[name[row]][constant[specimen]]
if compare[name[specnum] not_equal[!=] constant[0]] begin[:]
variable[sample] assign[=] call[name[specimen]][<ast.Slice object at 0x7da18f812590>]
variable[site] assign[=] call[name[pmag].parse_site, parameter[name[sample], name[samp_con], name[Z]]]
if <ast.BoolOp object at 0x7da18f813a30> begin[:]
call[name[SpecRec]][constant[specimen]] assign[=] name[specimen]
call[name[SpecRec]][constant[sample]] assign[=] name[sample]
call[name[SpecRec]][constant[citations]] assign[=] constant[This study]
call[name[SpecRec]][constant[analysts]] assign[=] name[user]
call[name[SpecRec]][constant[volume]] assign[=] name[volume]
call[name[SpecRecs].append, parameter[name[SpecRec]]]
if <ast.BoolOp object at 0x7da18dc98400> begin[:]
call[name[SampRec]][constant[sample]] assign[=] name[sample]
call[name[SampRec]][constant[site]] assign[=] name[site]
call[name[SampRec]][constant[citations]] assign[=] constant[This study]
call[name[SampRec]][constant[analysts]] assign[=] name[user]
call[name[SampRec]][constant[azimuth]] assign[=] call[name[row]][constant[azimuth]]
call[name[SampRec]][constant[dip]] assign[=] call[name[row]][constant[dip]]
call[name[SampRec]][constant[bed_dip_direction]] assign[=] call[name[row]][constant[bed_dip_direction]]
call[name[SampRec]][constant[bed_dip]] assign[=] call[name[row]][constant[bed_dip]]
call[name[SampRec]][constant[method_codes]] assign[=] name[meth_code]
call[name[SampRecs].append, parameter[name[SampRec]]]
if <ast.BoolOp object at 0x7da18dc99210> begin[:]
call[name[SiteRec]][constant[site]] assign[=] name[site]
call[name[SiteRec]][constant[location]] assign[=] name[location]
call[name[SiteRec]][constant[citations]] assign[=] constant[This study]
call[name[SiteRec]][constant[analysts]] assign[=] name[user]
call[name[SiteRec]][constant[lat]] assign[=] name[lat]
call[name[SiteRec]][constant[lon]] assign[=] name[lon]
call[name[SiteRecs].append, parameter[name[SiteRec]]]
if <ast.BoolOp object at 0x7da18dc99c60> begin[:]
call[name[LocRec]][constant[location]] assign[=] name[location]
call[name[LocRec]][constant[citations]] assign[=] constant[This study]
call[name[LocRec]][constant[analysts]] assign[=] name[user]
call[name[LocRec]][constant[lat_n]] assign[=] name[lat]
call[name[LocRec]][constant[lon_e]] assign[=] name[lon]
call[name[LocRec]][constant[lat_s]] assign[=] name[lat]
call[name[LocRec]][constant[lon_w]] assign[=] name[lon]
call[name[LocRecs].append, parameter[name[LocRec]]]
call[name[MeasRec]][constant[citations]] assign[=] constant[This study]
call[name[MeasRec]][constant[analysts]] assign[=] name[user]
call[name[MeasRec]][constant[specimen]] assign[=] name[specimen]
call[name[MeasRec]][constant[software_packages]] assign[=] name[version_num]
call[name[MeasRec]][constant[treat_temp]] assign[=] binary_operation[constant[%8.3e] <ast.Mod object at 0x7da2590d6920> constant[273]]
call[name[MeasRec]][constant[meas_temp]] assign[=] binary_operation[constant[%8.3e] <ast.Mod object at 0x7da2590d6920> constant[273]]
call[name[MeasRec]][constant[quality]] assign[=] constant[g]
call[name[MeasRec]][constant[standard]] assign[=] constant[u]
call[name[MeasRec]][constant[treat_step_num]] assign[=] constant[0]
call[name[MeasRec]][constant[treat_ac_field]] assign[=] constant[0]
if compare[call[name[row]][constant[step]] equal[==] constant[NRM]] begin[:]
variable[meas_type] assign[=] constant[LT-NO]
call[name[MeasRec]][constant[magn_moment]] assign[=] call[name[str], parameter[call[name[row]][constant[magn_moment]]]]
call[name[MeasRec]][constant[magn_volume]] assign[=] call[name[str], parameter[call[name[row]][constant[magn_volume]]]]
call[name[MeasRec]][constant[dir_dec]] assign[=] call[name[str], parameter[call[name[row]][constant[dir_dec]]]]
call[name[MeasRec]][constant[dir_inc]] assign[=] call[name[str], parameter[call[name[row]][constant[dir_inc]]]]
call[name[MeasRec]][constant[method_codes]] assign[=] name[meas_type]
call[name[MagRecs].append, parameter[name[MeasRec]]]
variable[con] assign[=] call[name[cb].Contribution, parameter[name[output_dir_path]]]
call[name[con].add_magic_table_from_data, parameter[]]
call[name[con].add_magic_table_from_data, parameter[]]
call[name[con].add_magic_table_from_data, parameter[]]
call[name[con].add_magic_table_from_data, parameter[]]
variable[MeasOuts] assign[=] call[name[pmag].measurements_methods3, parameter[name[MagRecs], name[noave]]]
call[name[con].add_magic_table_from_data, parameter[]]
call[call[name[con].tables][constant[specimens]].write_magic_file, parameter[]]
call[call[name[con].tables][constant[samples]].write_magic_file, parameter[]]
call[call[name[con].tables][constant[sites]].write_magic_file, parameter[]]
call[call[name[con].tables][constant[locations]].write_magic_file, parameter[]]
call[call[name[con].tables][constant[measurements]].write_magic_file, parameter[]]
<ast.Try object at 0x7da2044c2bc0>
return[tuple[[<ast.Constant object at 0x7da2044c3010>, <ast.Name object at 0x7da2044c3be0>]]]
|
keyword[def] identifier[jr6_jr6] ( identifier[mag_file] , identifier[dir_path] = literal[string] , identifier[input_dir_path] = literal[string] ,
identifier[meas_file] = literal[string] , identifier[spec_file] = literal[string] ,
identifier[samp_file] = literal[string] , identifier[site_file] = literal[string] , identifier[loc_file] = literal[string] ,
identifier[specnum] = literal[int] , identifier[samp_con] = literal[string] , identifier[location] = literal[string] , identifier[lat] = literal[string] , identifier[lon] = literal[string] ,
identifier[noave] = keyword[False] , identifier[meth_code] = literal[string] , identifier[volume] = literal[int] , identifier[JR] = keyword[False] , identifier[user] = literal[string] ):
literal[string]
identifier[version_num] = identifier[pmag] . identifier[get_version] ()
identifier[input_dir_path] , identifier[output_dir_path] = identifier[pmag] . identifier[fix_directories] ( identifier[input_dir_path] , identifier[dir_path] )
identifier[specnum] =- identifier[int] ( identifier[specnum] )
identifier[samp_con] = identifier[str] ( identifier[samp_con] )
identifier[volume] = identifier[float] ( identifier[volume] )* literal[int]
identifier[meas_file] = identifier[pmag] . identifier[resolve_file_name] ( identifier[meas_file] , identifier[output_dir_path] )
identifier[spec_file] = identifier[pmag] . identifier[resolve_file_name] ( identifier[spec_file] , identifier[output_dir_path] )
identifier[samp_file] = identifier[pmag] . identifier[resolve_file_name] ( identifier[samp_file] , identifier[output_dir_path] )
identifier[site_file] = identifier[pmag] . identifier[resolve_file_name] ( identifier[site_file] , identifier[output_dir_path] )
identifier[loc_file] = identifier[pmag] . identifier[resolve_file_name] ( identifier[loc_file] , identifier[output_dir_path] )
identifier[mag_file] = identifier[pmag] . identifier[resolve_file_name] ( identifier[mag_file] , identifier[input_dir_path] )
keyword[if] identifier[JR] :
keyword[if] identifier[meth_code] == literal[string] :
identifier[meth_code] = literal[string]
identifier[meth_code] = identifier[meth_code] + literal[string]
identifier[meth_code] = identifier[meth_code] . identifier[strip] ( literal[string] )
identifier[samp_con] = literal[string]
identifier[tmp_file] = identifier[mag_file] . identifier[split] ( identifier[os] . identifier[extsep] )[ literal[int] ]+ identifier[os] . identifier[extsep] + literal[string]
identifier[mag_file] = identifier[pmag] . identifier[resolve_file_name] ( identifier[mag_file] , identifier[input_dir_path] )
keyword[if] identifier[samp_con] . identifier[startswith] ( literal[string] ):
keyword[if] literal[string] keyword[not] keyword[in] identifier[samp_con] :
identifier[print] ( literal[string] )
keyword[return] keyword[False] , literal[string]
keyword[else] :
identifier[Z] = identifier[samp_con] . identifier[split] ( literal[string] )[ literal[int] ]
identifier[samp_con] = literal[string]
keyword[elif] identifier[samp_con] . identifier[startswith] ( literal[string] ):
keyword[if] literal[string] keyword[not] keyword[in] identifier[samp_con] :
identifier[print] ( literal[string] )
keyword[return] keyword[False] , literal[string]
keyword[else] :
identifier[Z] = identifier[samp_con] . identifier[split] ( literal[string] )[ literal[int] ]
identifier[samp_con] = literal[string]
keyword[else] :
identifier[Z] = literal[int]
identifier[pre_data] = identifier[open] ( identifier[mag_file] , literal[string] )
identifier[tmp_data] = identifier[open] ( identifier[tmp_file] , literal[string] )
keyword[if] identifier[samp_con] != literal[string] :
identifier[fixed_data] = identifier[pre_data] . identifier[read] (). identifier[replace] ( literal[string] , literal[string] )
keyword[else] :
identifier[fixed_data] = literal[string]
keyword[for] identifier[line] keyword[in] identifier[pre_data] . identifier[readlines] ():
identifier[entries] = identifier[line] . identifier[split] ()
keyword[if] identifier[len] ( identifier[entries] )< literal[int] :
keyword[continue]
identifier[fixed_line] = identifier[entries] [ literal[int] ]+ literal[string] + identifier[reduce] (
keyword[lambda] identifier[x] , identifier[y] : identifier[x] + literal[string] + identifier[y] ,[ identifier[x] . identifier[replace] ( literal[string] , literal[string] ) keyword[for] identifier[x] keyword[in] identifier[entries] [ literal[int] :]])
identifier[fixed_data] += identifier[fixed_line] + identifier[os] . identifier[linesep]
identifier[tmp_data] . identifier[write] ( identifier[fixed_data] )
identifier[tmp_data] . identifier[close] ()
identifier[pre_data] . identifier[close] ()
keyword[if] keyword[not] identifier[JR] :
identifier[column_names] =[ literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ,
literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ]
keyword[else] :
identifier[column_names] =[ literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ,
literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ]
identifier[data] = identifier[pd] . identifier[read_csv] ( identifier[tmp_file] , identifier[delim_whitespace] = keyword[True] ,
identifier[names] = identifier[column_names] , identifier[index_col] = keyword[False] )
keyword[if] identifier[isinstance] ( identifier[data] [ literal[string] ][ literal[int] ], identifier[str] ):
identifier[column_names] =[ literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ,
literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ]
identifier[data] = identifier[pd] . identifier[read_csv] ( identifier[tmp_file] , identifier[delim_whitespace] = keyword[True] ,
identifier[names] = identifier[column_names] , identifier[index_col] = keyword[False] )
keyword[if] identifier[JR] :
identifier[data] [ literal[string] ]=- identifier[data] [ literal[string] ]
identifier[cart] = identifier[np] . identifier[array] ([ identifier[data] [ literal[string] ], identifier[data] [ literal[string] ], identifier[data] [ literal[string] ]]). identifier[transpose] ()
identifier[dir_dat] = identifier[pmag] . identifier[cart2dir] ( identifier[cart] ). identifier[transpose] ()
identifier[data] [ literal[string] ]= identifier[dir_dat] [ literal[int] ]
identifier[data] [ literal[string] ]= identifier[dir_dat] [ literal[int] ]
identifier[data] [ literal[string] ]= identifier[dir_dat] [ literal[int] ]*( literal[int] ** identifier[data] [ literal[string] ])* identifier[volume]
identifier[data] [ literal[string] ]= identifier[dir_dat] [ literal[int] ]*( literal[int] ** identifier[data] [ literal[string] ])
identifier[data] [ literal[string] ]=- identifier[data] [ literal[string] ]
identifier[data] [ literal[string] ]
identifier[MagRecs] , identifier[SpecRecs] , identifier[SampRecs] , identifier[SiteRecs] , identifier[LocRecs] =[],[],[],[],[]
keyword[for] identifier[rowNum] , identifier[row] keyword[in] identifier[data] . identifier[iterrows] ():
identifier[MeasRec] , identifier[SpecRec] , identifier[SampRec] , identifier[SiteRec] , identifier[LocRec] ={},{},{},{},{}
identifier[specimen] = identifier[row] [ literal[string] ]
keyword[if] identifier[specnum] != literal[int] :
identifier[sample] = identifier[specimen] [: identifier[specnum] ]
keyword[else] :
identifier[sample] = identifier[specimen]
identifier[site] = identifier[pmag] . identifier[parse_site] ( identifier[sample] , identifier[samp_con] , identifier[Z] )
keyword[if] identifier[specimen] != literal[string] keyword[and] identifier[specimen] keyword[not] keyword[in] [ identifier[x] [ literal[string] ] keyword[if] literal[string] keyword[in] identifier[list] ( identifier[x] . identifier[keys] ()) keyword[else] literal[string] keyword[for] identifier[x] keyword[in] identifier[SpecRecs] ]:
identifier[SpecRec] [ literal[string] ]= identifier[specimen]
identifier[SpecRec] [ literal[string] ]= identifier[sample]
identifier[SpecRec] [ literal[string] ]= literal[string]
identifier[SpecRec] [ literal[string] ]= identifier[user]
identifier[SpecRec] [ literal[string] ]= identifier[volume]
identifier[SpecRecs] . identifier[append] ( identifier[SpecRec] )
keyword[if] identifier[sample] != literal[string] keyword[and] identifier[sample] keyword[not] keyword[in] [ identifier[x] [ literal[string] ] keyword[if] literal[string] keyword[in] identifier[list] ( identifier[x] . identifier[keys] ()) keyword[else] literal[string] keyword[for] identifier[x] keyword[in] identifier[SampRecs] ]:
identifier[SampRec] [ literal[string] ]= identifier[sample]
identifier[SampRec] [ literal[string] ]= identifier[site]
identifier[SampRec] [ literal[string] ]= literal[string]
identifier[SampRec] [ literal[string] ]= identifier[user]
identifier[SampRec] [ literal[string] ]= identifier[row] [ literal[string] ]
identifier[SampRec] [ literal[string] ]= identifier[row] [ literal[string] ]
identifier[SampRec] [ literal[string] ]= identifier[row] [ literal[string] ]
identifier[SampRec] [ literal[string] ]= identifier[row] [ literal[string] ]
identifier[SampRec] [ literal[string] ]= identifier[meth_code]
identifier[SampRecs] . identifier[append] ( identifier[SampRec] )
keyword[if] identifier[site] != literal[string] keyword[and] identifier[site] keyword[not] keyword[in] [ identifier[x] [ literal[string] ] keyword[if] literal[string] keyword[in] identifier[list] ( identifier[x] . identifier[keys] ()) keyword[else] literal[string] keyword[for] identifier[x] keyword[in] identifier[SiteRecs] ]:
identifier[SiteRec] [ literal[string] ]= identifier[site]
identifier[SiteRec] [ literal[string] ]= identifier[location]
identifier[SiteRec] [ literal[string] ]= literal[string]
identifier[SiteRec] [ literal[string] ]= identifier[user]
identifier[SiteRec] [ literal[string] ]= identifier[lat]
identifier[SiteRec] [ literal[string] ]= identifier[lon]
identifier[SiteRecs] . identifier[append] ( identifier[SiteRec] )
keyword[if] identifier[location] != literal[string] keyword[and] identifier[location] keyword[not] keyword[in] [ identifier[x] [ literal[string] ] keyword[if] literal[string] keyword[in] identifier[list] ( identifier[x] . identifier[keys] ()) keyword[else] literal[string] keyword[for] identifier[x] keyword[in] identifier[LocRecs] ]:
identifier[LocRec] [ literal[string] ]= identifier[location]
identifier[LocRec] [ literal[string] ]= literal[string]
identifier[LocRec] [ literal[string] ]= identifier[user]
identifier[LocRec] [ literal[string] ]= identifier[lat]
identifier[LocRec] [ literal[string] ]= identifier[lon]
identifier[LocRec] [ literal[string] ]= identifier[lat]
identifier[LocRec] [ literal[string] ]= identifier[lon]
identifier[LocRecs] . identifier[append] ( identifier[LocRec] )
identifier[MeasRec] [ literal[string] ]= literal[string]
identifier[MeasRec] [ literal[string] ]= identifier[user]
identifier[MeasRec] [ literal[string] ]= identifier[specimen]
identifier[MeasRec] [ literal[string] ]= identifier[version_num]
identifier[MeasRec] [ literal[string] ]= literal[string] %( literal[int] )
identifier[MeasRec] [ literal[string] ]= literal[string] %( literal[int] )
identifier[MeasRec] [ literal[string] ]= literal[string]
identifier[MeasRec] [ literal[string] ]= literal[string]
identifier[MeasRec] [ literal[string] ]= literal[int]
identifier[MeasRec] [ literal[string] ]= literal[string]
keyword[if] identifier[row] [ literal[string] ]== literal[string] :
identifier[meas_type] = literal[string]
keyword[elif] literal[string] keyword[in] identifier[row] keyword[and] identifier[row] [ literal[string] ]== literal[string] :
identifier[meas_type] = literal[string]
identifier[treat] = identifier[float] ( identifier[row] [ literal[string] ])
identifier[MeasRec] [ literal[string] ]= literal[string] %( identifier[treat] + literal[int] )
keyword[elif] identifier[row] [ literal[string] ][ literal[int] : literal[int] ]== literal[string] :
identifier[meas_type] = literal[string]
identifier[treat] = identifier[float] ( identifier[row] [ literal[string] ][ literal[int] :])
identifier[MeasRec] [ literal[string] ]= literal[string] %(
identifier[treat] * literal[int] )
keyword[elif] identifier[row] [ literal[string] ][ literal[int] ]== literal[string] :
identifier[meas_type] = literal[string]
identifier[treat] = identifier[float] ( identifier[row] [ literal[string] ][ literal[int] :])
identifier[MeasRec] [ literal[string] ]= literal[string] %(
identifier[treat] * literal[int] )
keyword[elif] identifier[row] [ literal[string] ][ literal[int] ]== literal[string] :
identifier[meas_type] = literal[string]
identifier[treat] = identifier[float] ( identifier[row] [ literal[string] ][ literal[int] :])
identifier[MeasRec] [ literal[string] ]= literal[string] %( identifier[treat] + literal[int] )
keyword[elif] identifier[row] [ literal[string] ][ literal[int] ]== literal[string] :
identifier[meas_type] = literal[string]
identifier[treat] = identifier[float] ( identifier[row] [ literal[string] ][ literal[int] :])
identifier[MeasRec] [ literal[string] ]= literal[string] %( identifier[treat] + literal[int] )
keyword[else] :
identifier[print] ( literal[string] , identifier[row] [ literal[string] ])
keyword[return] keyword[False] , literal[string]
identifier[MeasRec] [ literal[string] ]= identifier[str] ( identifier[row] [ literal[string] ])
identifier[MeasRec] [ literal[string] ]= identifier[str] ( identifier[row] [ literal[string] ])
identifier[MeasRec] [ literal[string] ]= identifier[str] ( identifier[row] [ literal[string] ])
identifier[MeasRec] [ literal[string] ]= identifier[str] ( identifier[row] [ literal[string] ])
identifier[MeasRec] [ literal[string] ]= identifier[meas_type]
identifier[MagRecs] . identifier[append] ( identifier[MeasRec] )
identifier[con] = identifier[cb] . identifier[Contribution] ( identifier[output_dir_path] , identifier[read_tables] =[])
identifier[con] . identifier[add_magic_table_from_data] ( identifier[dtype] = literal[string] , identifier[data] = identifier[SpecRecs] )
identifier[con] . identifier[add_magic_table_from_data] ( identifier[dtype] = literal[string] , identifier[data] = identifier[SampRecs] )
identifier[con] . identifier[add_magic_table_from_data] ( identifier[dtype] = literal[string] , identifier[data] = identifier[SiteRecs] )
identifier[con] . identifier[add_magic_table_from_data] ( identifier[dtype] = literal[string] , identifier[data] = identifier[LocRecs] )
identifier[MeasOuts] = identifier[pmag] . identifier[measurements_methods3] ( identifier[MagRecs] , identifier[noave] )
identifier[con] . identifier[add_magic_table_from_data] ( identifier[dtype] = literal[string] , identifier[data] = identifier[MeasOuts] )
identifier[con] . identifier[tables] [ literal[string] ]. identifier[write_magic_file] ( identifier[custom_name] = identifier[spec_file] )
identifier[con] . identifier[tables] [ literal[string] ]. identifier[write_magic_file] ( identifier[custom_name] = identifier[samp_file] )
identifier[con] . identifier[tables] [ literal[string] ]. identifier[write_magic_file] ( identifier[custom_name] = identifier[site_file] )
identifier[con] . identifier[tables] [ literal[string] ]. identifier[write_magic_file] ( identifier[custom_name] = identifier[loc_file] )
identifier[con] . identifier[tables] [ literal[string] ]. identifier[write_magic_file] ( identifier[custom_name] = identifier[meas_file] )
keyword[try] :
identifier[os] . identifier[remove] ( identifier[tmp_file] )
keyword[except] ( identifier[OSError] , identifier[IOError] ) keyword[as] identifier[e] :
identifier[print] ( literal[string] % identifier[tmp_file] )
keyword[return] keyword[True] , identifier[meas_file]
|
def jr6_jr6(mag_file, dir_path='.', input_dir_path='', meas_file='measurements.txt', spec_file='specimens.txt', samp_file='samples.txt', site_file='sites.txt', loc_file='locations.txt', specnum=1, samp_con='1', location='unknown', lat='', lon='', noave=False, meth_code='LP-NO', volume=12, JR=False, user=''):
"""
Convert JR6 .jr6 files to MagIC file(s)
Parameters
----------
mag_file : str
input file name
dir_path : str
working directory, default "."
input_dir_path : str
input file directory IF different from dir_path, default ""
meas_file : str
output measurement file name, default "measurements.txt"
spec_file : str
output specimen file name, default "specimens.txt"
samp_file: str
output sample file name, default "samples.txt"
site_file : str
output site file name, default "sites.txt"
loc_file : str
output location file name, default "locations.txt"
specnum : int
number of characters to designate a specimen, default 0
samp_con : str
sample/site naming convention, default '1', see info below
location : str
location name, default "unknown"
lat : float
latitude, default ""
lon : float
longitude, default ""
noave : bool
do not average duplicate measurements, default False (so by default, DO average)
meth_code : str
colon-delimited method codes, default "LP-NO"
volume : float
volume in ccs, default 12
JR : bool
IODP samples were measured on the JOIDES RESOLUTION, default False
user : str
user name, default ""
Returns
---------
Tuple : (True or False indicating if conversion was sucessful, meas_file name written)
Info
--------
Sample naming convention:
[1] XXXXY: where XXXX is an arbitrary length site designation and Y
is the single character sample designation. e.g., TG001a is the
first sample from site TG001. [default]
[2] XXXX-YY: YY sample from site XXXX (XXX, YY of arbitary length)
[3] XXXX.YY: YY sample from site XXXX (XXX, YY of arbitary length)
[4-Z] XXXX[YYY]: YYY is sample designation with Z characters from site XXX
[5] site name same as sample
[6] site is entered under a separate column -- NOT CURRENTLY SUPPORTED
[7-Z] [XXXX]YYY: XXXX is site designation with Z characters with sample name XXXXYYYY
"""
version_num = pmag.get_version()
(input_dir_path, output_dir_path) = pmag.fix_directories(input_dir_path, dir_path)
specnum = -int(specnum)
samp_con = str(samp_con)
volume = float(volume) * 1e-06
# need to add these
meas_file = pmag.resolve_file_name(meas_file, output_dir_path)
spec_file = pmag.resolve_file_name(spec_file, output_dir_path)
samp_file = pmag.resolve_file_name(samp_file, output_dir_path)
site_file = pmag.resolve_file_name(site_file, output_dir_path)
loc_file = pmag.resolve_file_name(loc_file, output_dir_path)
mag_file = pmag.resolve_file_name(mag_file, input_dir_path)
if JR:
if meth_code == 'LP-NO':
meth_code = '' # depends on [control=['if'], data=['meth_code']]
meth_code = meth_code + ':FS-C-DRILL-IODP:SP-SS-C:SO-V'
meth_code = meth_code.strip(':')
samp_con = '5' # depends on [control=['if'], data=[]]
# format variables
tmp_file = mag_file.split(os.extsep)[0] + os.extsep + 'tmp'
mag_file = pmag.resolve_file_name(mag_file, input_dir_path)
if samp_con.startswith('4'):
if '-' not in samp_con:
print('option [4] must be in form 4-Z where Z is an integer')
return (False, 'naming convention option [4] must be in form 4-Z where Z is an integer') # depends on [control=['if'], data=[]]
else:
Z = samp_con.split('-')[1]
samp_con = '4' # depends on [control=['if'], data=[]]
elif samp_con.startswith('7'):
if '-' not in samp_con:
print('option [7] must be in form 7-Z where Z is an integer')
return (False, 'naming convention option [7] must be in form 7-Z where Z is an integer') # depends on [control=['if'], data=[]]
else:
Z = samp_con.split('-')[1]
samp_con = '7' # depends on [control=['if'], data=[]]
else:
Z = 1
# parse data
# fix .jr6 file so that there are spaces between all the columns.
pre_data = open(mag_file, 'r')
tmp_data = open(tmp_file, 'w')
if samp_con != '2':
fixed_data = pre_data.read().replace('-', ' -') # depends on [control=['if'], data=[]]
else:
fixed_data = ''
for line in pre_data.readlines():
entries = line.split()
if len(entries) < 2:
continue # depends on [control=['if'], data=[]]
fixed_line = entries[0] + ' ' + reduce(lambda x, y: x + ' ' + y, [x.replace('-', ' -') for x in entries[1:]])
fixed_data += fixed_line + os.linesep # depends on [control=['for'], data=['line']]
tmp_data.write(fixed_data)
tmp_data.close()
pre_data.close()
if not JR:
column_names = ['specimen', 'step', 'x', 'y', 'z', 'expon', 'azimuth', 'dip', 'bed_dip_direction', 'bed_dip', 'bed_dip_dir2', 'bed_dip2', 'param1', 'param2', 'param3', 'param4', 'dir_csd'] # depends on [control=['if'], data=[]]
else: # measured on the Joides Resolution JR6
column_names = ['specimen', 'step', 'negz', 'y', 'x', 'expon', 'azimuth', 'dip', 'bed_dip_direction', 'bed_dip', 'bed_dip_dir2', 'bed_dip2', 'param1', 'param2', 'param3', 'param4', 'dir_csd']
data = pd.read_csv(tmp_file, delim_whitespace=True, names=column_names, index_col=False)
if isinstance(data['x'][0], str):
column_names = ['specimen', 'step', 'step_unit', 'x', 'y', 'z', 'expon', 'azimuth', 'dip', 'bed_dip_direction', 'bed_dip', 'bed_dip_dir2', 'bed_dip2', 'param1', 'param2', 'param3', 'param4', 'dir_csd']
data = pd.read_csv(tmp_file, delim_whitespace=True, names=column_names, index_col=False) # depends on [control=['if'], data=[]]
if JR:
data['z'] = -data['negz'] # depends on [control=['if'], data=[]]
cart = np.array([data['x'], data['y'], data['z']]).transpose()
dir_dat = pmag.cart2dir(cart).transpose()
data['dir_dec'] = dir_dat[0]
data['dir_inc'] = dir_dat[1]
# the data are in A/m - this converts to Am^2
data['magn_moment'] = dir_dat[2] * 10.0 ** data['expon'] * volume
data['magn_volume'] = dir_dat[2] * 10.0 ** data['expon'] # A/m - data in A/m
data['dip'] = -data['dip']
data['specimen']
# put data into magic tables
(MagRecs, SpecRecs, SampRecs, SiteRecs, LocRecs) = ([], [], [], [], [])
for (rowNum, row) in data.iterrows():
(MeasRec, SpecRec, SampRec, SiteRec, LocRec) = ({}, {}, {}, {}, {})
specimen = row['specimen']
if specnum != 0:
sample = specimen[:specnum] # depends on [control=['if'], data=['specnum']]
else:
sample = specimen
site = pmag.parse_site(sample, samp_con, Z)
if specimen != '' and specimen not in [x['specimen'] if 'specimen' in list(x.keys()) else '' for x in SpecRecs]:
SpecRec['specimen'] = specimen
SpecRec['sample'] = sample
SpecRec['citations'] = 'This study'
SpecRec['analysts'] = user
SpecRec['volume'] = volume
SpecRecs.append(SpecRec) # depends on [control=['if'], data=[]]
if sample != '' and sample not in [x['sample'] if 'sample' in list(x.keys()) else '' for x in SampRecs]:
SampRec['sample'] = sample
SampRec['site'] = site
SampRec['citations'] = 'This study'
SampRec['analysts'] = user
SampRec['azimuth'] = row['azimuth']
SampRec['dip'] = row['dip']
SampRec['bed_dip_direction'] = row['bed_dip_direction']
SampRec['bed_dip'] = row['bed_dip']
SampRec['method_codes'] = meth_code
SampRecs.append(SampRec) # depends on [control=['if'], data=[]]
if site != '' and site not in [x['site'] if 'site' in list(x.keys()) else '' for x in SiteRecs]:
SiteRec['site'] = site
SiteRec['location'] = location
SiteRec['citations'] = 'This study'
SiteRec['analysts'] = user
SiteRec['lat'] = lat
SiteRec['lon'] = lon
SiteRecs.append(SiteRec) # depends on [control=['if'], data=[]]
if location != '' and location not in [x['location'] if 'location' in list(x.keys()) else '' for x in LocRecs]:
LocRec['location'] = location
LocRec['citations'] = 'This study'
LocRec['analysts'] = user
LocRec['lat_n'] = lat
LocRec['lon_e'] = lon
LocRec['lat_s'] = lat
LocRec['lon_w'] = lon
LocRecs.append(LocRec) # depends on [control=['if'], data=[]]
MeasRec['citations'] = 'This study'
MeasRec['analysts'] = user
MeasRec['specimen'] = specimen
MeasRec['software_packages'] = version_num
MeasRec['treat_temp'] = '%8.3e' % 273 # room temp in kelvin
MeasRec['meas_temp'] = '%8.3e' % 273 # room temp in kelvin
MeasRec['quality'] = 'g'
MeasRec['standard'] = 'u'
MeasRec['treat_step_num'] = 0
MeasRec['treat_ac_field'] = '0'
if row['step'] == 'NRM':
meas_type = 'LT-NO' # depends on [control=['if'], data=[]]
elif 'step_unit' in row and row['step_unit'] == 'C':
meas_type = 'LT-T-Z'
treat = float(row['step'])
MeasRec['treat_temp'] = '%8.3e' % (treat + 273.0) # temp in kelvin # depends on [control=['if'], data=[]]
elif row['step'][0:2] == 'AD':
meas_type = 'LT-AF-Z'
treat = float(row['step'][2:])
MeasRec['treat_ac_field'] = '%8.3e' % (treat * 0.001) # convert from mT to tesla # depends on [control=['if'], data=[]]
elif row['step'][0] == 'A':
meas_type = 'LT-AF-Z'
treat = float(row['step'][1:])
MeasRec['treat_ac_field'] = '%8.3e' % (treat * 0.001) # convert from mT to tesla # depends on [control=['if'], data=[]]
elif row['step'][0] == 'TD':
meas_type = 'LT-T-Z'
treat = float(row['step'][2:])
MeasRec['treat_temp'] = '%8.3e' % (treat + 273.0) # temp in kelvin # depends on [control=['if'], data=[]]
elif row['step'][0] == 'T':
meas_type = 'LT-T-Z'
treat = float(row['step'][1:])
MeasRec['treat_temp'] = '%8.3e' % (treat + 273.0) # temp in kelvin # depends on [control=['if'], data=[]]
else: # need to add IRM, and ARM options
print('measurement type unknown', row['step'])
return (False, 'measurement type unknown')
MeasRec['magn_moment'] = str(row['magn_moment'])
MeasRec['magn_volume'] = str(row['magn_volume'])
MeasRec['dir_dec'] = str(row['dir_dec'])
MeasRec['dir_inc'] = str(row['dir_inc'])
MeasRec['method_codes'] = meas_type
MagRecs.append(MeasRec) # depends on [control=['for'], data=[]]
con = cb.Contribution(output_dir_path, read_tables=[])
con.add_magic_table_from_data(dtype='specimens', data=SpecRecs)
con.add_magic_table_from_data(dtype='samples', data=SampRecs)
con.add_magic_table_from_data(dtype='sites', data=SiteRecs)
con.add_magic_table_from_data(dtype='locations', data=LocRecs)
MeasOuts = pmag.measurements_methods3(MagRecs, noave)
con.add_magic_table_from_data(dtype='measurements', data=MeasOuts)
con.tables['specimens'].write_magic_file(custom_name=spec_file)
con.tables['samples'].write_magic_file(custom_name=samp_file)
con.tables['sites'].write_magic_file(custom_name=site_file)
con.tables['locations'].write_magic_file(custom_name=loc_file)
con.tables['measurements'].write_magic_file(custom_name=meas_file)
try:
os.remove(tmp_file) # depends on [control=['try'], data=[]]
except (OSError, IOError) as e:
print("couldn't remove temperary fixed JR6 file %s" % tmp_file) # depends on [control=['except'], data=[]]
return (True, meas_file)
|
def get_user_events(self, id, **data):
"""
GET /users/:id/events/
Returns a :ref:`paginated <pagination>` response of :format:`events <event>`, under the key ``events``, of all events the user has access to
"""
return self.get("/users/{0}/events/".format(id), data=data)
|
def function[get_user_events, parameter[self, id]]:
constant[
GET /users/:id/events/
Returns a :ref:`paginated <pagination>` response of :format:`events <event>`, under the key ``events``, of all events the user has access to
]
return[call[name[self].get, parameter[call[constant[/users/{0}/events/].format, parameter[name[id]]]]]]
|
keyword[def] identifier[get_user_events] ( identifier[self] , identifier[id] ,** identifier[data] ):
literal[string]
keyword[return] identifier[self] . identifier[get] ( literal[string] . identifier[format] ( identifier[id] ), identifier[data] = identifier[data] )
|
def get_user_events(self, id, **data):
"""
GET /users/:id/events/
Returns a :ref:`paginated <pagination>` response of :format:`events <event>`, under the key ``events``, of all events the user has access to
"""
return self.get('/users/{0}/events/'.format(id), data=data)
|
def reindex(self, new_index=None, index_conf=None):
'''Rebuilt the current index
This function could be useful in the case you want to change some index settings/mappings
and you don't want to loose all the entries belonging to that index.
This function is built in such a way that you can continue to use the old index name,
this is achieved using index aliases.
The old index will be cloned into a new one with the given `index_conf`.
If we are working on an alias, it is redirected to the new index.
Otherwise a brand new alias with the old index name is created in such a way that
points to the newly create index.
Keep in mind that even if you can continue to use the same index name,
the old index will be deleted.
:param index_conf: Configuration to be used in the new index creation.
This param will be passed directly to :py:func:`DB.create_index`
'''
alias = self.index_name if self.es.indices.exists_alias(name=self.index_name) else None
if alias:
original_index=self.es.indices.get_alias(self.index_name).popitem()[0]
else:
original_index=self.index_name
if new_index is None:
mtc = re.match(r"^.*_v(\d)*$", original_index)
if mtc:
new_index = original_index[:mtc.start(1)] + str(int(mtc.group(1)) + 1)
else:
new_index = original_index + '_v1'
log.debug("Reindexing {{ alias: '{}', original_index: '{}', new_index: '{}'}}".format(alias, original_index, new_index))
self.clone_index(new_index, index_conf=index_conf)
if alias:
log.debug("Moving alias from ['{0}' -> '{1}'] to ['{0}' -> '{2}']".format(alias, original_index, new_index))
self.es.indices.update_aliases(body={
"actions" : [
{ "remove" : { "alias": alias, "index" : original_index} },
{ "add" : { "alias": alias, "index" : new_index } }
]})
log.debug("Deleting old index: '{}'".format(original_index))
self.es.indices.delete(original_index)
if not alias:
log.debug("Crating new alias: ['{0}' -> '{1}']".format(original_index, new_index))
self.es.indices.update_aliases(body={
"actions" : [
{ "add" : { "alias": original_index, "index" : new_index } }
]})
|
def function[reindex, parameter[self, new_index, index_conf]]:
constant[Rebuilt the current index
This function could be useful in the case you want to change some index settings/mappings
and you don't want to loose all the entries belonging to that index.
This function is built in such a way that you can continue to use the old index name,
this is achieved using index aliases.
The old index will be cloned into a new one with the given `index_conf`.
If we are working on an alias, it is redirected to the new index.
Otherwise a brand new alias with the old index name is created in such a way that
points to the newly create index.
Keep in mind that even if you can continue to use the same index name,
the old index will be deleted.
:param index_conf: Configuration to be used in the new index creation.
This param will be passed directly to :py:func:`DB.create_index`
]
variable[alias] assign[=] <ast.IfExp object at 0x7da1b26657e0>
if name[alias] begin[:]
variable[original_index] assign[=] call[call[call[name[self].es.indices.get_alias, parameter[name[self].index_name]].popitem, parameter[]]][constant[0]]
if compare[name[new_index] is constant[None]] begin[:]
variable[mtc] assign[=] call[name[re].match, parameter[constant[^.*_v(\d)*$], name[original_index]]]
if name[mtc] begin[:]
variable[new_index] assign[=] binary_operation[call[name[original_index]][<ast.Slice object at 0x7da1b2667bb0>] + call[name[str], parameter[binary_operation[call[name[int], parameter[call[name[mtc].group, parameter[constant[1]]]]] + constant[1]]]]]
call[name[log].debug, parameter[call[constant[Reindexing {{ alias: '{}', original_index: '{}', new_index: '{}'}}].format, parameter[name[alias], name[original_index], name[new_index]]]]]
call[name[self].clone_index, parameter[name[new_index]]]
if name[alias] begin[:]
call[name[log].debug, parameter[call[constant[Moving alias from ['{0}' -> '{1}'] to ['{0}' -> '{2}']].format, parameter[name[alias], name[original_index], name[new_index]]]]]
call[name[self].es.indices.update_aliases, parameter[]]
call[name[log].debug, parameter[call[constant[Deleting old index: '{}'].format, parameter[name[original_index]]]]]
call[name[self].es.indices.delete, parameter[name[original_index]]]
if <ast.UnaryOp object at 0x7da1b2631090> begin[:]
call[name[log].debug, parameter[call[constant[Crating new alias: ['{0}' -> '{1}']].format, parameter[name[original_index], name[new_index]]]]]
call[name[self].es.indices.update_aliases, parameter[]]
|
keyword[def] identifier[reindex] ( identifier[self] , identifier[new_index] = keyword[None] , identifier[index_conf] = keyword[None] ):
literal[string]
identifier[alias] = identifier[self] . identifier[index_name] keyword[if] identifier[self] . identifier[es] . identifier[indices] . identifier[exists_alias] ( identifier[name] = identifier[self] . identifier[index_name] ) keyword[else] keyword[None]
keyword[if] identifier[alias] :
identifier[original_index] = identifier[self] . identifier[es] . identifier[indices] . identifier[get_alias] ( identifier[self] . identifier[index_name] ). identifier[popitem] ()[ literal[int] ]
keyword[else] :
identifier[original_index] = identifier[self] . identifier[index_name]
keyword[if] identifier[new_index] keyword[is] keyword[None] :
identifier[mtc] = identifier[re] . identifier[match] ( literal[string] , identifier[original_index] )
keyword[if] identifier[mtc] :
identifier[new_index] = identifier[original_index] [: identifier[mtc] . identifier[start] ( literal[int] )]+ identifier[str] ( identifier[int] ( identifier[mtc] . identifier[group] ( literal[int] ))+ literal[int] )
keyword[else] :
identifier[new_index] = identifier[original_index] + literal[string]
identifier[log] . identifier[debug] ( literal[string] . identifier[format] ( identifier[alias] , identifier[original_index] , identifier[new_index] ))
identifier[self] . identifier[clone_index] ( identifier[new_index] , identifier[index_conf] = identifier[index_conf] )
keyword[if] identifier[alias] :
identifier[log] . identifier[debug] ( literal[string] . identifier[format] ( identifier[alias] , identifier[original_index] , identifier[new_index] ))
identifier[self] . identifier[es] . identifier[indices] . identifier[update_aliases] ( identifier[body] ={
literal[string] :[
{ literal[string] :{ literal[string] : identifier[alias] , literal[string] : identifier[original_index] }},
{ literal[string] :{ literal[string] : identifier[alias] , literal[string] : identifier[new_index] }}
]})
identifier[log] . identifier[debug] ( literal[string] . identifier[format] ( identifier[original_index] ))
identifier[self] . identifier[es] . identifier[indices] . identifier[delete] ( identifier[original_index] )
keyword[if] keyword[not] identifier[alias] :
identifier[log] . identifier[debug] ( literal[string] . identifier[format] ( identifier[original_index] , identifier[new_index] ))
identifier[self] . identifier[es] . identifier[indices] . identifier[update_aliases] ( identifier[body] ={
literal[string] :[
{ literal[string] :{ literal[string] : identifier[original_index] , literal[string] : identifier[new_index] }}
]})
|
def reindex(self, new_index=None, index_conf=None):
"""Rebuilt the current index
This function could be useful in the case you want to change some index settings/mappings
and you don't want to loose all the entries belonging to that index.
This function is built in such a way that you can continue to use the old index name,
this is achieved using index aliases.
The old index will be cloned into a new one with the given `index_conf`.
If we are working on an alias, it is redirected to the new index.
Otherwise a brand new alias with the old index name is created in such a way that
points to the newly create index.
Keep in mind that even if you can continue to use the same index name,
the old index will be deleted.
:param index_conf: Configuration to be used in the new index creation.
This param will be passed directly to :py:func:`DB.create_index`
"""
alias = self.index_name if self.es.indices.exists_alias(name=self.index_name) else None
if alias:
original_index = self.es.indices.get_alias(self.index_name).popitem()[0] # depends on [control=['if'], data=[]]
else:
original_index = self.index_name
if new_index is None:
mtc = re.match('^.*_v(\\d)*$', original_index)
if mtc:
new_index = original_index[:mtc.start(1)] + str(int(mtc.group(1)) + 1) # depends on [control=['if'], data=[]]
else:
new_index = original_index + '_v1' # depends on [control=['if'], data=['new_index']]
log.debug("Reindexing {{ alias: '{}', original_index: '{}', new_index: '{}'}}".format(alias, original_index, new_index))
self.clone_index(new_index, index_conf=index_conf)
if alias:
log.debug("Moving alias from ['{0}' -> '{1}'] to ['{0}' -> '{2}']".format(alias, original_index, new_index))
self.es.indices.update_aliases(body={'actions': [{'remove': {'alias': alias, 'index': original_index}}, {'add': {'alias': alias, 'index': new_index}}]}) # depends on [control=['if'], data=[]]
log.debug("Deleting old index: '{}'".format(original_index))
self.es.indices.delete(original_index)
if not alias:
log.debug("Crating new alias: ['{0}' -> '{1}']".format(original_index, new_index))
self.es.indices.update_aliases(body={'actions': [{'add': {'alias': original_index, 'index': new_index}}]}) # depends on [control=['if'], data=[]]
|
def floor_nearest(x, dx=1):
"""
floor a number to within a given rounding accuracy
"""
precision = get_sig_digits(dx)
return round(math.floor(float(x) / dx) * dx, precision)
|
def function[floor_nearest, parameter[x, dx]]:
constant[
floor a number to within a given rounding accuracy
]
variable[precision] assign[=] call[name[get_sig_digits], parameter[name[dx]]]
return[call[name[round], parameter[binary_operation[call[name[math].floor, parameter[binary_operation[call[name[float], parameter[name[x]]] / name[dx]]]] * name[dx]], name[precision]]]]
|
keyword[def] identifier[floor_nearest] ( identifier[x] , identifier[dx] = literal[int] ):
literal[string]
identifier[precision] = identifier[get_sig_digits] ( identifier[dx] )
keyword[return] identifier[round] ( identifier[math] . identifier[floor] ( identifier[float] ( identifier[x] )/ identifier[dx] )* identifier[dx] , identifier[precision] )
|
def floor_nearest(x, dx=1):
"""
floor a number to within a given rounding accuracy
"""
precision = get_sig_digits(dx)
return round(math.floor(float(x) / dx) * dx, precision)
|
def request_write(self, request: TBWriteRequest)->None:
"Queues up an asynchronous write request to Tensorboard."
if self.stop_request.isSet(): return
self.queue.put(request)
|
def function[request_write, parameter[self, request]]:
constant[Queues up an asynchronous write request to Tensorboard.]
if call[name[self].stop_request.isSet, parameter[]] begin[:]
return[None]
call[name[self].queue.put, parameter[name[request]]]
|
keyword[def] identifier[request_write] ( identifier[self] , identifier[request] : identifier[TBWriteRequest] )-> keyword[None] :
literal[string]
keyword[if] identifier[self] . identifier[stop_request] . identifier[isSet] (): keyword[return]
identifier[self] . identifier[queue] . identifier[put] ( identifier[request] )
|
def request_write(self, request: TBWriteRequest) -> None:
"""Queues up an asynchronous write request to Tensorboard."""
if self.stop_request.isSet():
return # depends on [control=['if'], data=[]]
self.queue.put(request)
|
def __base_state(self, containers):
'''
Convert blockade ID and container information into
a state dictionary object.
'''
return dict(blockade_id=self._blockade_id,
containers=containers,
version=self._state_version)
|
def function[__base_state, parameter[self, containers]]:
constant[
Convert blockade ID and container information into
a state dictionary object.
]
return[call[name[dict], parameter[]]]
|
keyword[def] identifier[__base_state] ( identifier[self] , identifier[containers] ):
literal[string]
keyword[return] identifier[dict] ( identifier[blockade_id] = identifier[self] . identifier[_blockade_id] ,
identifier[containers] = identifier[containers] ,
identifier[version] = identifier[self] . identifier[_state_version] )
|
def __base_state(self, containers):
"""
Convert blockade ID and container information into
a state dictionary object.
"""
return dict(blockade_id=self._blockade_id, containers=containers, version=self._state_version)
|
def step(self, actions, step_mul=None):
"""Apply actions, step the world forward, and return observations.
Args:
actions: A list of actions meeting the action spec, one per agent.
step_mul: If specified, use this rather than the environment's default.
Returns:
A tuple of TimeStep namedtuples, one per agent.
"""
if self._state == environment.StepType.LAST:
return self.reset()
skip = not self._ensure_available_actions
self._parallel.run(
(c.act, f.transform_action(o.observation, a, skip_available=skip))
for c, f, o, a in zip(
self._controllers, self._features, self._obs, actions))
self._state = environment.StepType.MID
return self._step(step_mul)
|
def function[step, parameter[self, actions, step_mul]]:
constant[Apply actions, step the world forward, and return observations.
Args:
actions: A list of actions meeting the action spec, one per agent.
step_mul: If specified, use this rather than the environment's default.
Returns:
A tuple of TimeStep namedtuples, one per agent.
]
if compare[name[self]._state equal[==] name[environment].StepType.LAST] begin[:]
return[call[name[self].reset, parameter[]]]
variable[skip] assign[=] <ast.UnaryOp object at 0x7da2046223e0>
call[name[self]._parallel.run, parameter[<ast.GeneratorExp object at 0x7da204620880>]]
name[self]._state assign[=] name[environment].StepType.MID
return[call[name[self]._step, parameter[name[step_mul]]]]
|
keyword[def] identifier[step] ( identifier[self] , identifier[actions] , identifier[step_mul] = keyword[None] ):
literal[string]
keyword[if] identifier[self] . identifier[_state] == identifier[environment] . identifier[StepType] . identifier[LAST] :
keyword[return] identifier[self] . identifier[reset] ()
identifier[skip] = keyword[not] identifier[self] . identifier[_ensure_available_actions]
identifier[self] . identifier[_parallel] . identifier[run] (
( identifier[c] . identifier[act] , identifier[f] . identifier[transform_action] ( identifier[o] . identifier[observation] , identifier[a] , identifier[skip_available] = identifier[skip] ))
keyword[for] identifier[c] , identifier[f] , identifier[o] , identifier[a] keyword[in] identifier[zip] (
identifier[self] . identifier[_controllers] , identifier[self] . identifier[_features] , identifier[self] . identifier[_obs] , identifier[actions] ))
identifier[self] . identifier[_state] = identifier[environment] . identifier[StepType] . identifier[MID]
keyword[return] identifier[self] . identifier[_step] ( identifier[step_mul] )
|
def step(self, actions, step_mul=None):
"""Apply actions, step the world forward, and return observations.
Args:
actions: A list of actions meeting the action spec, one per agent.
step_mul: If specified, use this rather than the environment's default.
Returns:
A tuple of TimeStep namedtuples, one per agent.
"""
if self._state == environment.StepType.LAST:
return self.reset() # depends on [control=['if'], data=[]]
skip = not self._ensure_available_actions
self._parallel.run(((c.act, f.transform_action(o.observation, a, skip_available=skip)) for (c, f, o, a) in zip(self._controllers, self._features, self._obs, actions)))
self._state = environment.StepType.MID
return self._step(step_mul)
|
def _create_plugin(self, config):
"""
Creates a plugin from its config.
Params:
config: plugin configuration as read by ait.config
Returns:
plugin: a Plugin
Raises:
ValueError: if any of the required config values are missing
"""
if config is None:
raise ValueError('No plugin config to create plugin from.')
name = config.pop('name', None)
if name is None:
raise(cfg.AitConfigMissing('plugin name'))
# TODO I don't think we actually care about this being unique? Left over from
# previous conversations about stuff?
module_name = name.rsplit('.', 1)[0]
class_name = name.rsplit('.', 1)[-1]
if class_name in [x.name for x in (self.outbound_streams +
self.inbound_streams +
self.servers +
self.plugins)]:
raise ValueError(
'Plugin "{}" already loaded. Only one plugin of a given name is allowed'.
format(class_name)
)
plugin_inputs = config.pop('inputs', None)
if plugin_inputs is None:
log.warn('No plugin inputs specified for {}'.format(name))
plugin_inputs = [ ]
subscribers = config.pop('outputs', None)
if subscribers is None:
log.warn('No plugin outputs specified for {}'.format(name))
subscribers = [ ]
# try to create plugin
module = import_module(module_name)
plugin_class = getattr(module, class_name)
instance = plugin_class(plugin_inputs,
subscribers,
zmq_args={'zmq_context': self.broker.context,
'zmq_proxy_xsub_url': self.broker.XSUB_URL,
'zmq_proxy_xpub_url': self.broker.XPUB_URL},
**config
)
return instance
|
def function[_create_plugin, parameter[self, config]]:
constant[
Creates a plugin from its config.
Params:
config: plugin configuration as read by ait.config
Returns:
plugin: a Plugin
Raises:
ValueError: if any of the required config values are missing
]
if compare[name[config] is constant[None]] begin[:]
<ast.Raise object at 0x7da2054a64a0>
variable[name] assign[=] call[name[config].pop, parameter[constant[name], constant[None]]]
if compare[name[name] is constant[None]] begin[:]
<ast.Raise object at 0x7da2054a4250>
variable[module_name] assign[=] call[call[name[name].rsplit, parameter[constant[.], constant[1]]]][constant[0]]
variable[class_name] assign[=] call[call[name[name].rsplit, parameter[constant[.], constant[1]]]][<ast.UnaryOp object at 0x7da2054a4610>]
if compare[name[class_name] in <ast.ListComp object at 0x7da2054a59c0>] begin[:]
<ast.Raise object at 0x7da2054a7df0>
variable[plugin_inputs] assign[=] call[name[config].pop, parameter[constant[inputs], constant[None]]]
if compare[name[plugin_inputs] is constant[None]] begin[:]
call[name[log].warn, parameter[call[constant[No plugin inputs specified for {}].format, parameter[name[name]]]]]
variable[plugin_inputs] assign[=] list[[]]
variable[subscribers] assign[=] call[name[config].pop, parameter[constant[outputs], constant[None]]]
if compare[name[subscribers] is constant[None]] begin[:]
call[name[log].warn, parameter[call[constant[No plugin outputs specified for {}].format, parameter[name[name]]]]]
variable[subscribers] assign[=] list[[]]
variable[module] assign[=] call[name[import_module], parameter[name[module_name]]]
variable[plugin_class] assign[=] call[name[getattr], parameter[name[module], name[class_name]]]
variable[instance] assign[=] call[name[plugin_class], parameter[name[plugin_inputs], name[subscribers]]]
return[name[instance]]
|
keyword[def] identifier[_create_plugin] ( identifier[self] , identifier[config] ):
literal[string]
keyword[if] identifier[config] keyword[is] keyword[None] :
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[name] = identifier[config] . identifier[pop] ( literal[string] , keyword[None] )
keyword[if] identifier[name] keyword[is] keyword[None] :
keyword[raise] ( identifier[cfg] . identifier[AitConfigMissing] ( literal[string] ))
identifier[module_name] = identifier[name] . identifier[rsplit] ( literal[string] , literal[int] )[ literal[int] ]
identifier[class_name] = identifier[name] . identifier[rsplit] ( literal[string] , literal[int] )[- literal[int] ]
keyword[if] identifier[class_name] keyword[in] [ identifier[x] . identifier[name] keyword[for] identifier[x] keyword[in] ( identifier[self] . identifier[outbound_streams] +
identifier[self] . identifier[inbound_streams] +
identifier[self] . identifier[servers] +
identifier[self] . identifier[plugins] )]:
keyword[raise] identifier[ValueError] (
literal[string] .
identifier[format] ( identifier[class_name] )
)
identifier[plugin_inputs] = identifier[config] . identifier[pop] ( literal[string] , keyword[None] )
keyword[if] identifier[plugin_inputs] keyword[is] keyword[None] :
identifier[log] . identifier[warn] ( literal[string] . identifier[format] ( identifier[name] ))
identifier[plugin_inputs] =[]
identifier[subscribers] = identifier[config] . identifier[pop] ( literal[string] , keyword[None] )
keyword[if] identifier[subscribers] keyword[is] keyword[None] :
identifier[log] . identifier[warn] ( literal[string] . identifier[format] ( identifier[name] ))
identifier[subscribers] =[]
identifier[module] = identifier[import_module] ( identifier[module_name] )
identifier[plugin_class] = identifier[getattr] ( identifier[module] , identifier[class_name] )
identifier[instance] = identifier[plugin_class] ( identifier[plugin_inputs] ,
identifier[subscribers] ,
identifier[zmq_args] ={ literal[string] : identifier[self] . identifier[broker] . identifier[context] ,
literal[string] : identifier[self] . identifier[broker] . identifier[XSUB_URL] ,
literal[string] : identifier[self] . identifier[broker] . identifier[XPUB_URL] },
** identifier[config]
)
keyword[return] identifier[instance]
|
def _create_plugin(self, config):
"""
Creates a plugin from its config.
Params:
config: plugin configuration as read by ait.config
Returns:
plugin: a Plugin
Raises:
ValueError: if any of the required config values are missing
"""
if config is None:
raise ValueError('No plugin config to create plugin from.') # depends on [control=['if'], data=[]]
name = config.pop('name', None)
if name is None:
raise cfg.AitConfigMissing('plugin name') # depends on [control=['if'], data=[]]
# TODO I don't think we actually care about this being unique? Left over from
# previous conversations about stuff?
module_name = name.rsplit('.', 1)[0]
class_name = name.rsplit('.', 1)[-1]
if class_name in [x.name for x in self.outbound_streams + self.inbound_streams + self.servers + self.plugins]:
raise ValueError('Plugin "{}" already loaded. Only one plugin of a given name is allowed'.format(class_name)) # depends on [control=['if'], data=['class_name']]
plugin_inputs = config.pop('inputs', None)
if plugin_inputs is None:
log.warn('No plugin inputs specified for {}'.format(name))
plugin_inputs = [] # depends on [control=['if'], data=['plugin_inputs']]
subscribers = config.pop('outputs', None)
if subscribers is None:
log.warn('No plugin outputs specified for {}'.format(name))
subscribers = [] # depends on [control=['if'], data=['subscribers']]
# try to create plugin
module = import_module(module_name)
plugin_class = getattr(module, class_name)
instance = plugin_class(plugin_inputs, subscribers, zmq_args={'zmq_context': self.broker.context, 'zmq_proxy_xsub_url': self.broker.XSUB_URL, 'zmq_proxy_xpub_url': self.broker.XPUB_URL}, **config)
return instance
|
def set_tunnel(self, host, port):
''' Sets up the host and the port for the HTTP CONNECT Tunnelling.'''
url = host
if port:
url = url + u':' + port
var_host = VARIANT.create_bstr_from_str(url)
var_empty = VARIANT.create_empty()
_WinHttpRequest._SetProxy(
self, HTTPREQUEST_PROXYSETTING_PROXY, var_host, var_empty)
|
def function[set_tunnel, parameter[self, host, port]]:
constant[ Sets up the host and the port for the HTTP CONNECT Tunnelling.]
variable[url] assign[=] name[host]
if name[port] begin[:]
variable[url] assign[=] binary_operation[binary_operation[name[url] + constant[:]] + name[port]]
variable[var_host] assign[=] call[name[VARIANT].create_bstr_from_str, parameter[name[url]]]
variable[var_empty] assign[=] call[name[VARIANT].create_empty, parameter[]]
call[name[_WinHttpRequest]._SetProxy, parameter[name[self], name[HTTPREQUEST_PROXYSETTING_PROXY], name[var_host], name[var_empty]]]
|
keyword[def] identifier[set_tunnel] ( identifier[self] , identifier[host] , identifier[port] ):
literal[string]
identifier[url] = identifier[host]
keyword[if] identifier[port] :
identifier[url] = identifier[url] + literal[string] + identifier[port]
identifier[var_host] = identifier[VARIANT] . identifier[create_bstr_from_str] ( identifier[url] )
identifier[var_empty] = identifier[VARIANT] . identifier[create_empty] ()
identifier[_WinHttpRequest] . identifier[_SetProxy] (
identifier[self] , identifier[HTTPREQUEST_PROXYSETTING_PROXY] , identifier[var_host] , identifier[var_empty] )
|
def set_tunnel(self, host, port):
""" Sets up the host and the port for the HTTP CONNECT Tunnelling."""
url = host
if port:
url = url + u':' + port # depends on [control=['if'], data=[]]
var_host = VARIANT.create_bstr_from_str(url)
var_empty = VARIANT.create_empty()
_WinHttpRequest._SetProxy(self, HTTPREQUEST_PROXYSETTING_PROXY, var_host, var_empty)
|
def get_src_address_from_data(self, decoded=True):
"""
Return the SRC address of a message if SRC_ADDRESS label is present in message type of the message
Return None otherwise
:param decoded:
:return:
"""
src_address_label = next((lbl for lbl in self.message_type if lbl.field_type
and lbl.field_type.function == FieldType.Function.SRC_ADDRESS), None)
if src_address_label:
start, end = self.get_label_range(src_address_label, view=1, decode=decoded)
if decoded:
src_address = self.decoded_hex_str[start:end]
else:
src_address = self.plain_hex_str[start:end]
else:
src_address = None
return src_address
|
def function[get_src_address_from_data, parameter[self, decoded]]:
constant[
Return the SRC address of a message if SRC_ADDRESS label is present in message type of the message
Return None otherwise
:param decoded:
:return:
]
variable[src_address_label] assign[=] call[name[next], parameter[<ast.GeneratorExp object at 0x7da1b1ff47f0>, constant[None]]]
if name[src_address_label] begin[:]
<ast.Tuple object at 0x7da1b1f97100> assign[=] call[name[self].get_label_range, parameter[name[src_address_label]]]
if name[decoded] begin[:]
variable[src_address] assign[=] call[name[self].decoded_hex_str][<ast.Slice object at 0x7da1b1f96e90>]
return[name[src_address]]
|
keyword[def] identifier[get_src_address_from_data] ( identifier[self] , identifier[decoded] = keyword[True] ):
literal[string]
identifier[src_address_label] = identifier[next] (( identifier[lbl] keyword[for] identifier[lbl] keyword[in] identifier[self] . identifier[message_type] keyword[if] identifier[lbl] . identifier[field_type]
keyword[and] identifier[lbl] . identifier[field_type] . identifier[function] == identifier[FieldType] . identifier[Function] . identifier[SRC_ADDRESS] ), keyword[None] )
keyword[if] identifier[src_address_label] :
identifier[start] , identifier[end] = identifier[self] . identifier[get_label_range] ( identifier[src_address_label] , identifier[view] = literal[int] , identifier[decode] = identifier[decoded] )
keyword[if] identifier[decoded] :
identifier[src_address] = identifier[self] . identifier[decoded_hex_str] [ identifier[start] : identifier[end] ]
keyword[else] :
identifier[src_address] = identifier[self] . identifier[plain_hex_str] [ identifier[start] : identifier[end] ]
keyword[else] :
identifier[src_address] = keyword[None]
keyword[return] identifier[src_address]
|
def get_src_address_from_data(self, decoded=True):
"""
Return the SRC address of a message if SRC_ADDRESS label is present in message type of the message
Return None otherwise
:param decoded:
:return:
"""
src_address_label = next((lbl for lbl in self.message_type if lbl.field_type and lbl.field_type.function == FieldType.Function.SRC_ADDRESS), None)
if src_address_label:
(start, end) = self.get_label_range(src_address_label, view=1, decode=decoded)
if decoded:
src_address = self.decoded_hex_str[start:end] # depends on [control=['if'], data=[]]
else:
src_address = self.plain_hex_str[start:end] # depends on [control=['if'], data=[]]
else:
src_address = None
return src_address
|
def get_gene_substitution_language() -> ParserElement:
"""Build a gene substitution parser."""
parser_element = gsub_tag + nest(
dna_nucleotide(GSUB_REFERENCE),
ppc.integer(GSUB_POSITION),
dna_nucleotide(GSUB_VARIANT),
)
parser_element.setParseAction(_handle_gsub)
return parser_element
|
def function[get_gene_substitution_language, parameter[]]:
constant[Build a gene substitution parser.]
variable[parser_element] assign[=] binary_operation[name[gsub_tag] + call[name[nest], parameter[call[name[dna_nucleotide], parameter[name[GSUB_REFERENCE]]], call[name[ppc].integer, parameter[name[GSUB_POSITION]]], call[name[dna_nucleotide], parameter[name[GSUB_VARIANT]]]]]]
call[name[parser_element].setParseAction, parameter[name[_handle_gsub]]]
return[name[parser_element]]
|
keyword[def] identifier[get_gene_substitution_language] ()-> identifier[ParserElement] :
literal[string]
identifier[parser_element] = identifier[gsub_tag] + identifier[nest] (
identifier[dna_nucleotide] ( identifier[GSUB_REFERENCE] ),
identifier[ppc] . identifier[integer] ( identifier[GSUB_POSITION] ),
identifier[dna_nucleotide] ( identifier[GSUB_VARIANT] ),
)
identifier[parser_element] . identifier[setParseAction] ( identifier[_handle_gsub] )
keyword[return] identifier[parser_element]
|
def get_gene_substitution_language() -> ParserElement:
"""Build a gene substitution parser."""
parser_element = gsub_tag + nest(dna_nucleotide(GSUB_REFERENCE), ppc.integer(GSUB_POSITION), dna_nucleotide(GSUB_VARIANT))
parser_element.setParseAction(_handle_gsub)
return parser_element
|
def std(self) -> Optional[float]: #, ddof=0):
"""Standard deviation of all values entered into histogram.
This number is precise, because we keep the necessary data
separate from bin contents.
Returns
-------
float
"""
# TODO: Add DOF
if self._stats:
return np.sqrt(self.variance())
else:
return None
|
def function[std, parameter[self]]:
constant[Standard deviation of all values entered into histogram.
This number is precise, because we keep the necessary data
separate from bin contents.
Returns
-------
float
]
if name[self]._stats begin[:]
return[call[name[np].sqrt, parameter[call[name[self].variance, parameter[]]]]]
|
keyword[def] identifier[std] ( identifier[self] )-> identifier[Optional] [ identifier[float] ]:
literal[string]
keyword[if] identifier[self] . identifier[_stats] :
keyword[return] identifier[np] . identifier[sqrt] ( identifier[self] . identifier[variance] ())
keyword[else] :
keyword[return] keyword[None]
|
def std(self) -> Optional[float]: #, ddof=0):
'Standard deviation of all values entered into histogram.\n\n This number is precise, because we keep the necessary data\n separate from bin contents.\n\n Returns\n -------\n float\n '
# TODO: Add DOF
if self._stats:
return np.sqrt(self.variance()) # depends on [control=['if'], data=[]]
else:
return None
|
def load_exif(album):
"""Loads the exif data of all images in an album from cache"""
if not hasattr(album.gallery, "exifCache"):
_restore_cache(album.gallery)
cache = album.gallery.exifCache
for media in album.medias:
if media.type == "image":
key = os.path.join(media.path, media.filename)
if key in cache:
media.exif = cache[key]
|
def function[load_exif, parameter[album]]:
constant[Loads the exif data of all images in an album from cache]
if <ast.UnaryOp object at 0x7da18f09e500> begin[:]
call[name[_restore_cache], parameter[name[album].gallery]]
variable[cache] assign[=] name[album].gallery.exifCache
for taget[name[media]] in starred[name[album].medias] begin[:]
if compare[name[media].type equal[==] constant[image]] begin[:]
variable[key] assign[=] call[name[os].path.join, parameter[name[media].path, name[media].filename]]
if compare[name[key] in name[cache]] begin[:]
name[media].exif assign[=] call[name[cache]][name[key]]
|
keyword[def] identifier[load_exif] ( identifier[album] ):
literal[string]
keyword[if] keyword[not] identifier[hasattr] ( identifier[album] . identifier[gallery] , literal[string] ):
identifier[_restore_cache] ( identifier[album] . identifier[gallery] )
identifier[cache] = identifier[album] . identifier[gallery] . identifier[exifCache]
keyword[for] identifier[media] keyword[in] identifier[album] . identifier[medias] :
keyword[if] identifier[media] . identifier[type] == literal[string] :
identifier[key] = identifier[os] . identifier[path] . identifier[join] ( identifier[media] . identifier[path] , identifier[media] . identifier[filename] )
keyword[if] identifier[key] keyword[in] identifier[cache] :
identifier[media] . identifier[exif] = identifier[cache] [ identifier[key] ]
|
def load_exif(album):
"""Loads the exif data of all images in an album from cache"""
if not hasattr(album.gallery, 'exifCache'):
_restore_cache(album.gallery) # depends on [control=['if'], data=[]]
cache = album.gallery.exifCache
for media in album.medias:
if media.type == 'image':
key = os.path.join(media.path, media.filename)
if key in cache:
media.exif = cache[key] # depends on [control=['if'], data=['key', 'cache']] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['media']]
|
def load_entry_point(self, group, name):
"""Return the `name` entry point of `group` or raise ImportError"""
ep = self.get_entry_info(group, name)
if ep is None:
raise ImportError("Entry point %r not found" % ((group, name),))
return ep.load()
|
def function[load_entry_point, parameter[self, group, name]]:
constant[Return the `name` entry point of `group` or raise ImportError]
variable[ep] assign[=] call[name[self].get_entry_info, parameter[name[group], name[name]]]
if compare[name[ep] is constant[None]] begin[:]
<ast.Raise object at 0x7da2041d9390>
return[call[name[ep].load, parameter[]]]
|
keyword[def] identifier[load_entry_point] ( identifier[self] , identifier[group] , identifier[name] ):
literal[string]
identifier[ep] = identifier[self] . identifier[get_entry_info] ( identifier[group] , identifier[name] )
keyword[if] identifier[ep] keyword[is] keyword[None] :
keyword[raise] identifier[ImportError] ( literal[string] %(( identifier[group] , identifier[name] ),))
keyword[return] identifier[ep] . identifier[load] ()
|
def load_entry_point(self, group, name):
"""Return the `name` entry point of `group` or raise ImportError"""
ep = self.get_entry_info(group, name)
if ep is None:
raise ImportError('Entry point %r not found' % ((group, name),)) # depends on [control=['if'], data=[]]
return ep.load()
|
def write(self, data):
"""
write data on the OUT endpoint associated to the HID interface
"""
report_size = self.packet_size
if self.ep_out:
report_size = self.ep_out.wMaxPacketSize
for _ in range(report_size - len(data)):
data.append(0)
self.read_sem.release()
if not self.ep_out:
bmRequestType = 0x21 #Host to device request of type Class of Recipient Interface
bmRequest = 0x09 #Set_REPORT (HID class-specific request for transferring data over EP0)
wValue = 0x200 #Issuing an OUT report
wIndex = self.intf_number #mBed Board interface number for HID
self.dev.ctrl_transfer(bmRequestType, bmRequest, wValue, wIndex, data)
return
#raise ValueError('EP_OUT endpoint is NULL')
self.ep_out.write(data)
#logging.debug('sent: %s', data)
return
|
def function[write, parameter[self, data]]:
constant[
write data on the OUT endpoint associated to the HID interface
]
variable[report_size] assign[=] name[self].packet_size
if name[self].ep_out begin[:]
variable[report_size] assign[=] name[self].ep_out.wMaxPacketSize
for taget[name[_]] in starred[call[name[range], parameter[binary_operation[name[report_size] - call[name[len], parameter[name[data]]]]]]] begin[:]
call[name[data].append, parameter[constant[0]]]
call[name[self].read_sem.release, parameter[]]
if <ast.UnaryOp object at 0x7da1b18ac2b0> begin[:]
variable[bmRequestType] assign[=] constant[33]
variable[bmRequest] assign[=] constant[9]
variable[wValue] assign[=] constant[512]
variable[wIndex] assign[=] name[self].intf_number
call[name[self].dev.ctrl_transfer, parameter[name[bmRequestType], name[bmRequest], name[wValue], name[wIndex], name[data]]]
return[None]
call[name[self].ep_out.write, parameter[name[data]]]
return[None]
|
keyword[def] identifier[write] ( identifier[self] , identifier[data] ):
literal[string]
identifier[report_size] = identifier[self] . identifier[packet_size]
keyword[if] identifier[self] . identifier[ep_out] :
identifier[report_size] = identifier[self] . identifier[ep_out] . identifier[wMaxPacketSize]
keyword[for] identifier[_] keyword[in] identifier[range] ( identifier[report_size] - identifier[len] ( identifier[data] )):
identifier[data] . identifier[append] ( literal[int] )
identifier[self] . identifier[read_sem] . identifier[release] ()
keyword[if] keyword[not] identifier[self] . identifier[ep_out] :
identifier[bmRequestType] = literal[int]
identifier[bmRequest] = literal[int]
identifier[wValue] = literal[int]
identifier[wIndex] = identifier[self] . identifier[intf_number]
identifier[self] . identifier[dev] . identifier[ctrl_transfer] ( identifier[bmRequestType] , identifier[bmRequest] , identifier[wValue] , identifier[wIndex] , identifier[data] )
keyword[return]
identifier[self] . identifier[ep_out] . identifier[write] ( identifier[data] )
keyword[return]
|
def write(self, data):
"""
write data on the OUT endpoint associated to the HID interface
"""
report_size = self.packet_size
if self.ep_out:
report_size = self.ep_out.wMaxPacketSize # depends on [control=['if'], data=[]]
for _ in range(report_size - len(data)):
data.append(0) # depends on [control=['for'], data=[]]
self.read_sem.release()
if not self.ep_out:
bmRequestType = 33 #Host to device request of type Class of Recipient Interface
bmRequest = 9 #Set_REPORT (HID class-specific request for transferring data over EP0)
wValue = 512 #Issuing an OUT report
wIndex = self.intf_number #mBed Board interface number for HID
self.dev.ctrl_transfer(bmRequestType, bmRequest, wValue, wIndex, data)
return # depends on [control=['if'], data=[]]
#raise ValueError('EP_OUT endpoint is NULL')
self.ep_out.write(data)
#logging.debug('sent: %s', data)
return
|
def printImportedNames(self):
"""Produce a report of imported names."""
for module in self.listModules():
print("%s:" % module.modname)
print(" %s" % "\n ".join(imp.name for imp in module.imported_names))
|
def function[printImportedNames, parameter[self]]:
constant[Produce a report of imported names.]
for taget[name[module]] in starred[call[name[self].listModules, parameter[]]] begin[:]
call[name[print], parameter[binary_operation[constant[%s:] <ast.Mod object at 0x7da2590d6920> name[module].modname]]]
call[name[print], parameter[binary_operation[constant[ %s] <ast.Mod object at 0x7da2590d6920> call[constant[
].join, parameter[<ast.GeneratorExp object at 0x7da1b042fe20>]]]]]
|
keyword[def] identifier[printImportedNames] ( identifier[self] ):
literal[string]
keyword[for] identifier[module] keyword[in] identifier[self] . identifier[listModules] ():
identifier[print] ( literal[string] % identifier[module] . identifier[modname] )
identifier[print] ( literal[string] % literal[string] . identifier[join] ( identifier[imp] . identifier[name] keyword[for] identifier[imp] keyword[in] identifier[module] . identifier[imported_names] ))
|
def printImportedNames(self):
"""Produce a report of imported names."""
for module in self.listModules():
print('%s:' % module.modname)
print(' %s' % '\n '.join((imp.name for imp in module.imported_names))) # depends on [control=['for'], data=['module']]
|
def stats(self, symbol):
"""
curl https://api.bitfinex.com/v1/stats/btcusd
[
{"period":1,"volume":"7410.27250155"},
{"period":7,"volume":"52251.37118006"},
{"period":30,"volume":"464505.07753251"}
]
"""
data = self._get(self.url_for(PATH_STATS, (symbol)))
for period in data:
for key, value in period.items():
if key == 'period':
new_value = int(value)
elif key == 'volume':
new_value = float(value)
period[key] = new_value
return data
|
def function[stats, parameter[self, symbol]]:
constant[
curl https://api.bitfinex.com/v1/stats/btcusd
[
{"period":1,"volume":"7410.27250155"},
{"period":7,"volume":"52251.37118006"},
{"period":30,"volume":"464505.07753251"}
]
]
variable[data] assign[=] call[name[self]._get, parameter[call[name[self].url_for, parameter[name[PATH_STATS], name[symbol]]]]]
for taget[name[period]] in starred[name[data]] begin[:]
for taget[tuple[[<ast.Name object at 0x7da1b0dbcd00>, <ast.Name object at 0x7da1b0dbfeb0>]]] in starred[call[name[period].items, parameter[]]] begin[:]
if compare[name[key] equal[==] constant[period]] begin[:]
variable[new_value] assign[=] call[name[int], parameter[name[value]]]
call[name[period]][name[key]] assign[=] name[new_value]
return[name[data]]
|
keyword[def] identifier[stats] ( identifier[self] , identifier[symbol] ):
literal[string]
identifier[data] = identifier[self] . identifier[_get] ( identifier[self] . identifier[url_for] ( identifier[PATH_STATS] ,( identifier[symbol] )))
keyword[for] identifier[period] keyword[in] identifier[data] :
keyword[for] identifier[key] , identifier[value] keyword[in] identifier[period] . identifier[items] ():
keyword[if] identifier[key] == literal[string] :
identifier[new_value] = identifier[int] ( identifier[value] )
keyword[elif] identifier[key] == literal[string] :
identifier[new_value] = identifier[float] ( identifier[value] )
identifier[period] [ identifier[key] ]= identifier[new_value]
keyword[return] identifier[data]
|
def stats(self, symbol):
"""
curl https://api.bitfinex.com/v1/stats/btcusd
[
{"period":1,"volume":"7410.27250155"},
{"period":7,"volume":"52251.37118006"},
{"period":30,"volume":"464505.07753251"}
]
"""
data = self._get(self.url_for(PATH_STATS, symbol))
for period in data:
for (key, value) in period.items():
if key == 'period':
new_value = int(value) # depends on [control=['if'], data=[]]
elif key == 'volume':
new_value = float(value) # depends on [control=['if'], data=[]]
period[key] = new_value # depends on [control=['for'], data=[]] # depends on [control=['for'], data=['period']]
return data
|
def pull_request(ctx, base_branch, open_pr, stop_timer):
"""Create a new pull request for this issue."""
lancet = ctx.obj
review_status = lancet.config.get("tracker", "review_status")
remote_name = lancet.config.get("repository", "remote_name")
if not base_branch:
base_branch = lancet.config.get("repository", "base_branch")
# Get the issue
issue = get_issue(lancet)
transition = get_transition(ctx, lancet, issue, review_status)
# Get the working branch
branch = get_branch(lancet, issue, create=False)
with taskstatus("Checking pre-requisites") as ts:
if not branch:
ts.abort("No working branch found")
if lancet.tracker.whoami() not in issue.assignees:
ts.abort("Issue currently not assigned to you")
# TODO: Check mergeability
# TODO: Check remote status (PR does not already exist)
# Push to remote
with taskstatus('Pushing to "{}"', remote_name) as ts:
remote = lancet.repo.lookup_remote(remote_name)
if not remote:
ts.abort('Remote "{}" not found', remote_name)
from ..git import CredentialsCallbacks
remote.push([branch.name], callbacks=CredentialsCallbacks())
ts.ok('Pushed latest changes to "{}"', remote_name)
# Create pull request
with taskstatus("Creating pull request") as ts:
template_path = lancet.config.get("repository", "pr_template")
message = edit_template(template_path, issue=issue)
if not message:
ts.abort("You didn't provide a title for the pull request")
title, body = message.split("\n", 1)
title = title.strip()
if not title:
ts.abort("You didn't provide a title for the pull request")
try:
pr = lancet.scm_manager.create_pull_request(
branch.branch_name, base_branch, title, body.strip("\n")
)
except PullRequestAlreadyExists as e:
pr = e.pull_request
ts.ok("Pull request does already exist at {}", pr.link)
else:
ts.ok("Pull request created at {}", pr.link)
# Update issue
set_issue_status(lancet, issue, review_status, transition)
# TODO: Post to activity stream on JIRA?
# TODO: Post to Slack?
# Stop harvest timer
if stop_timer:
with taskstatus("Pausing harvest timer") as ts:
lancet.timer.pause()
ts.ok("Harvest timer paused")
# Open the pull request page in the browser if requested
if open_pr:
click.launch(pr.link)
|
def function[pull_request, parameter[ctx, base_branch, open_pr, stop_timer]]:
constant[Create a new pull request for this issue.]
variable[lancet] assign[=] name[ctx].obj
variable[review_status] assign[=] call[name[lancet].config.get, parameter[constant[tracker], constant[review_status]]]
variable[remote_name] assign[=] call[name[lancet].config.get, parameter[constant[repository], constant[remote_name]]]
if <ast.UnaryOp object at 0x7da1b1024310> begin[:]
variable[base_branch] assign[=] call[name[lancet].config.get, parameter[constant[repository], constant[base_branch]]]
variable[issue] assign[=] call[name[get_issue], parameter[name[lancet]]]
variable[transition] assign[=] call[name[get_transition], parameter[name[ctx], name[lancet], name[issue], name[review_status]]]
variable[branch] assign[=] call[name[get_branch], parameter[name[lancet], name[issue]]]
with call[name[taskstatus], parameter[constant[Checking pre-requisites]]] begin[:]
if <ast.UnaryOp object at 0x7da1b10b2470> begin[:]
call[name[ts].abort, parameter[constant[No working branch found]]]
if compare[call[name[lancet].tracker.whoami, parameter[]] <ast.NotIn object at 0x7da2590d7190> name[issue].assignees] begin[:]
call[name[ts].abort, parameter[constant[Issue currently not assigned to you]]]
with call[name[taskstatus], parameter[constant[Pushing to "{}"], name[remote_name]]] begin[:]
variable[remote] assign[=] call[name[lancet].repo.lookup_remote, parameter[name[remote_name]]]
if <ast.UnaryOp object at 0x7da1b10244f0> begin[:]
call[name[ts].abort, parameter[constant[Remote "{}" not found], name[remote_name]]]
from relative_module[git] import module[CredentialsCallbacks]
call[name[remote].push, parameter[list[[<ast.Attribute object at 0x7da1b1024b20>]]]]
call[name[ts].ok, parameter[constant[Pushed latest changes to "{}"], name[remote_name]]]
with call[name[taskstatus], parameter[constant[Creating pull request]]] begin[:]
variable[template_path] assign[=] call[name[lancet].config.get, parameter[constant[repository], constant[pr_template]]]
variable[message] assign[=] call[name[edit_template], parameter[name[template_path]]]
if <ast.UnaryOp object at 0x7da1b1024a00> begin[:]
call[name[ts].abort, parameter[constant[You didn't provide a title for the pull request]]]
<ast.Tuple object at 0x7da1b1026290> assign[=] call[name[message].split, parameter[constant[
], constant[1]]]
variable[title] assign[=] call[name[title].strip, parameter[]]
if <ast.UnaryOp object at 0x7da1b1025d80> begin[:]
call[name[ts].abort, parameter[constant[You didn't provide a title for the pull request]]]
<ast.Try object at 0x7da1b10250f0>
call[name[set_issue_status], parameter[name[lancet], name[issue], name[review_status], name[transition]]]
if name[stop_timer] begin[:]
with call[name[taskstatus], parameter[constant[Pausing harvest timer]]] begin[:]
call[name[lancet].timer.pause, parameter[]]
call[name[ts].ok, parameter[constant[Harvest timer paused]]]
if name[open_pr] begin[:]
call[name[click].launch, parameter[name[pr].link]]
|
keyword[def] identifier[pull_request] ( identifier[ctx] , identifier[base_branch] , identifier[open_pr] , identifier[stop_timer] ):
literal[string]
identifier[lancet] = identifier[ctx] . identifier[obj]
identifier[review_status] = identifier[lancet] . identifier[config] . identifier[get] ( literal[string] , literal[string] )
identifier[remote_name] = identifier[lancet] . identifier[config] . identifier[get] ( literal[string] , literal[string] )
keyword[if] keyword[not] identifier[base_branch] :
identifier[base_branch] = identifier[lancet] . identifier[config] . identifier[get] ( literal[string] , literal[string] )
identifier[issue] = identifier[get_issue] ( identifier[lancet] )
identifier[transition] = identifier[get_transition] ( identifier[ctx] , identifier[lancet] , identifier[issue] , identifier[review_status] )
identifier[branch] = identifier[get_branch] ( identifier[lancet] , identifier[issue] , identifier[create] = keyword[False] )
keyword[with] identifier[taskstatus] ( literal[string] ) keyword[as] identifier[ts] :
keyword[if] keyword[not] identifier[branch] :
identifier[ts] . identifier[abort] ( literal[string] )
keyword[if] identifier[lancet] . identifier[tracker] . identifier[whoami] () keyword[not] keyword[in] identifier[issue] . identifier[assignees] :
identifier[ts] . identifier[abort] ( literal[string] )
keyword[with] identifier[taskstatus] ( literal[string] , identifier[remote_name] ) keyword[as] identifier[ts] :
identifier[remote] = identifier[lancet] . identifier[repo] . identifier[lookup_remote] ( identifier[remote_name] )
keyword[if] keyword[not] identifier[remote] :
identifier[ts] . identifier[abort] ( literal[string] , identifier[remote_name] )
keyword[from] .. identifier[git] keyword[import] identifier[CredentialsCallbacks]
identifier[remote] . identifier[push] ([ identifier[branch] . identifier[name] ], identifier[callbacks] = identifier[CredentialsCallbacks] ())
identifier[ts] . identifier[ok] ( literal[string] , identifier[remote_name] )
keyword[with] identifier[taskstatus] ( literal[string] ) keyword[as] identifier[ts] :
identifier[template_path] = identifier[lancet] . identifier[config] . identifier[get] ( literal[string] , literal[string] )
identifier[message] = identifier[edit_template] ( identifier[template_path] , identifier[issue] = identifier[issue] )
keyword[if] keyword[not] identifier[message] :
identifier[ts] . identifier[abort] ( literal[string] )
identifier[title] , identifier[body] = identifier[message] . identifier[split] ( literal[string] , literal[int] )
identifier[title] = identifier[title] . identifier[strip] ()
keyword[if] keyword[not] identifier[title] :
identifier[ts] . identifier[abort] ( literal[string] )
keyword[try] :
identifier[pr] = identifier[lancet] . identifier[scm_manager] . identifier[create_pull_request] (
identifier[branch] . identifier[branch_name] , identifier[base_branch] , identifier[title] , identifier[body] . identifier[strip] ( literal[string] )
)
keyword[except] identifier[PullRequestAlreadyExists] keyword[as] identifier[e] :
identifier[pr] = identifier[e] . identifier[pull_request]
identifier[ts] . identifier[ok] ( literal[string] , identifier[pr] . identifier[link] )
keyword[else] :
identifier[ts] . identifier[ok] ( literal[string] , identifier[pr] . identifier[link] )
identifier[set_issue_status] ( identifier[lancet] , identifier[issue] , identifier[review_status] , identifier[transition] )
keyword[if] identifier[stop_timer] :
keyword[with] identifier[taskstatus] ( literal[string] ) keyword[as] identifier[ts] :
identifier[lancet] . identifier[timer] . identifier[pause] ()
identifier[ts] . identifier[ok] ( literal[string] )
keyword[if] identifier[open_pr] :
identifier[click] . identifier[launch] ( identifier[pr] . identifier[link] )
|
def pull_request(ctx, base_branch, open_pr, stop_timer):
"""Create a new pull request for this issue."""
lancet = ctx.obj
review_status = lancet.config.get('tracker', 'review_status')
remote_name = lancet.config.get('repository', 'remote_name')
if not base_branch:
base_branch = lancet.config.get('repository', 'base_branch') # depends on [control=['if'], data=[]]
# Get the issue
issue = get_issue(lancet)
transition = get_transition(ctx, lancet, issue, review_status)
# Get the working branch
branch = get_branch(lancet, issue, create=False)
with taskstatus('Checking pre-requisites') as ts:
if not branch:
ts.abort('No working branch found') # depends on [control=['if'], data=[]]
if lancet.tracker.whoami() not in issue.assignees:
ts.abort('Issue currently not assigned to you') # depends on [control=['if'], data=[]] # depends on [control=['with'], data=['ts']]
# TODO: Check mergeability
# TODO: Check remote status (PR does not already exist)
# Push to remote
with taskstatus('Pushing to "{}"', remote_name) as ts:
remote = lancet.repo.lookup_remote(remote_name)
if not remote:
ts.abort('Remote "{}" not found', remote_name) # depends on [control=['if'], data=[]]
from ..git import CredentialsCallbacks
remote.push([branch.name], callbacks=CredentialsCallbacks())
ts.ok('Pushed latest changes to "{}"', remote_name) # depends on [control=['with'], data=['ts']]
# Create pull request
with taskstatus('Creating pull request') as ts:
template_path = lancet.config.get('repository', 'pr_template')
message = edit_template(template_path, issue=issue)
if not message:
ts.abort("You didn't provide a title for the pull request") # depends on [control=['if'], data=[]]
(title, body) = message.split('\n', 1)
title = title.strip()
if not title:
ts.abort("You didn't provide a title for the pull request") # depends on [control=['if'], data=[]]
try:
pr = lancet.scm_manager.create_pull_request(branch.branch_name, base_branch, title, body.strip('\n')) # depends on [control=['try'], data=[]]
except PullRequestAlreadyExists as e:
pr = e.pull_request
ts.ok('Pull request does already exist at {}', pr.link) # depends on [control=['except'], data=['e']]
else:
ts.ok('Pull request created at {}', pr.link) # depends on [control=['with'], data=['ts']]
# Update issue
set_issue_status(lancet, issue, review_status, transition)
# TODO: Post to activity stream on JIRA?
# TODO: Post to Slack?
# Stop harvest timer
if stop_timer:
with taskstatus('Pausing harvest timer') as ts:
lancet.timer.pause()
ts.ok('Harvest timer paused') # depends on [control=['with'], data=['ts']] # depends on [control=['if'], data=[]]
# Open the pull request page in the browser if requested
if open_pr:
click.launch(pr.link) # depends on [control=['if'], data=[]]
|
def to_dict(self):
"""
Return the stats as a dictionary.
"""
return {
'mean': self.mean,
'var': self.var,
'min': self.min,
'max': self.max,
'num': self.num
}
|
def function[to_dict, parameter[self]]:
constant[
Return the stats as a dictionary.
]
return[dictionary[[<ast.Constant object at 0x7da1b0ba72b0>, <ast.Constant object at 0x7da1b0ba6e30>, <ast.Constant object at 0x7da1b0c53520>, <ast.Constant object at 0x7da1b0c50520>, <ast.Constant object at 0x7da1b0c53760>], [<ast.Attribute object at 0x7da1b0b1dc90>, <ast.Attribute object at 0x7da1b0b1fe20>, <ast.Attribute object at 0x7da1b0b1e920>, <ast.Attribute object at 0x7da1b0b1f010>, <ast.Attribute object at 0x7da1b0b1cd00>]]]
|
keyword[def] identifier[to_dict] ( identifier[self] ):
literal[string]
keyword[return] {
literal[string] : identifier[self] . identifier[mean] ,
literal[string] : identifier[self] . identifier[var] ,
literal[string] : identifier[self] . identifier[min] ,
literal[string] : identifier[self] . identifier[max] ,
literal[string] : identifier[self] . identifier[num]
}
|
def to_dict(self):
"""
Return the stats as a dictionary.
"""
return {'mean': self.mean, 'var': self.var, 'min': self.min, 'max': self.max, 'num': self.num}
|
def include(self, *fields, **kwargs):
"""
Return a new QuerySet instance that will include related objects.
If fields are specified, they must be non-hidden relationships.
If select_related(None) is called, clear the list.
"""
clone = self._clone()
# Preserve the stickiness of related querysets
# NOTE: by default _clone will clear this attribute
# .include does not modify the actual query, so we
# should not clear `filter_is_sticky`
if self.query.filter_is_sticky:
clone.query.filter_is_sticky = True
clone._include_limit = kwargs.pop('limit_includes', None)
assert not kwargs, '"limit_includes" is the only accepted kwargs. Eat your heart out 2.7'
# Copy the behavior of .select_related(None)
if fields == (None, ):
for field in clone._includes.keys():
clone.query._annotations.pop('__{}'.format(field.name), None)
clone._includes.clear()
return clone
# Parse everything the way django handles joins/select related
# Including multiple child fields ie .include(field1__field2, field1__field3)
# turns into {field1: {field2: {}, field3: {}}
for name in fields:
ctx, model = clone._includes, clone.model
for spl in name.split('__'):
field = model._meta.get_field(spl)
if isinstance(field, ForeignObjectRel) and field.is_hidden():
raise ValueError('Hidden field "{!r}" has no descriptor and therefore cannot be included'.format(field))
model = field.related_model
ctx = ctx.setdefault(field, OrderedDict())
for field in clone._includes.keys():
clone._include(field)
return clone
|
def function[include, parameter[self]]:
constant[
Return a new QuerySet instance that will include related objects.
If fields are specified, they must be non-hidden relationships.
If select_related(None) is called, clear the list.
]
variable[clone] assign[=] call[name[self]._clone, parameter[]]
if name[self].query.filter_is_sticky begin[:]
name[clone].query.filter_is_sticky assign[=] constant[True]
name[clone]._include_limit assign[=] call[name[kwargs].pop, parameter[constant[limit_includes], constant[None]]]
assert[<ast.UnaryOp object at 0x7da18ede5090>]
if compare[name[fields] equal[==] tuple[[<ast.Constant object at 0x7da18ede5690>]]] begin[:]
for taget[name[field]] in starred[call[name[clone]._includes.keys, parameter[]]] begin[:]
call[name[clone].query._annotations.pop, parameter[call[constant[__{}].format, parameter[name[field].name]], constant[None]]]
call[name[clone]._includes.clear, parameter[]]
return[name[clone]]
for taget[name[name]] in starred[name[fields]] begin[:]
<ast.Tuple object at 0x7da18ede4fa0> assign[=] tuple[[<ast.Attribute object at 0x7da18ede5ed0>, <ast.Attribute object at 0x7da18ede6e30>]]
for taget[name[spl]] in starred[call[name[name].split, parameter[constant[__]]]] begin[:]
variable[field] assign[=] call[name[model]._meta.get_field, parameter[name[spl]]]
if <ast.BoolOp object at 0x7da18ede4eb0> begin[:]
<ast.Raise object at 0x7da18ede4610>
variable[model] assign[=] name[field].related_model
variable[ctx] assign[=] call[name[ctx].setdefault, parameter[name[field], call[name[OrderedDict], parameter[]]]]
for taget[name[field]] in starred[call[name[clone]._includes.keys, parameter[]]] begin[:]
call[name[clone]._include, parameter[name[field]]]
return[name[clone]]
|
keyword[def] identifier[include] ( identifier[self] ,* identifier[fields] ,** identifier[kwargs] ):
literal[string]
identifier[clone] = identifier[self] . identifier[_clone] ()
keyword[if] identifier[self] . identifier[query] . identifier[filter_is_sticky] :
identifier[clone] . identifier[query] . identifier[filter_is_sticky] = keyword[True]
identifier[clone] . identifier[_include_limit] = identifier[kwargs] . identifier[pop] ( literal[string] , keyword[None] )
keyword[assert] keyword[not] identifier[kwargs] , literal[string]
keyword[if] identifier[fields] ==( keyword[None] ,):
keyword[for] identifier[field] keyword[in] identifier[clone] . identifier[_includes] . identifier[keys] ():
identifier[clone] . identifier[query] . identifier[_annotations] . identifier[pop] ( literal[string] . identifier[format] ( identifier[field] . identifier[name] ), keyword[None] )
identifier[clone] . identifier[_includes] . identifier[clear] ()
keyword[return] identifier[clone]
keyword[for] identifier[name] keyword[in] identifier[fields] :
identifier[ctx] , identifier[model] = identifier[clone] . identifier[_includes] , identifier[clone] . identifier[model]
keyword[for] identifier[spl] keyword[in] identifier[name] . identifier[split] ( literal[string] ):
identifier[field] = identifier[model] . identifier[_meta] . identifier[get_field] ( identifier[spl] )
keyword[if] identifier[isinstance] ( identifier[field] , identifier[ForeignObjectRel] ) keyword[and] identifier[field] . identifier[is_hidden] ():
keyword[raise] identifier[ValueError] ( literal[string] . identifier[format] ( identifier[field] ))
identifier[model] = identifier[field] . identifier[related_model]
identifier[ctx] = identifier[ctx] . identifier[setdefault] ( identifier[field] , identifier[OrderedDict] ())
keyword[for] identifier[field] keyword[in] identifier[clone] . identifier[_includes] . identifier[keys] ():
identifier[clone] . identifier[_include] ( identifier[field] )
keyword[return] identifier[clone]
|
def include(self, *fields, **kwargs):
"""
Return a new QuerySet instance that will include related objects.
If fields are specified, they must be non-hidden relationships.
If select_related(None) is called, clear the list.
"""
clone = self._clone()
# Preserve the stickiness of related querysets
# NOTE: by default _clone will clear this attribute
# .include does not modify the actual query, so we
# should not clear `filter_is_sticky`
if self.query.filter_is_sticky:
clone.query.filter_is_sticky = True # depends on [control=['if'], data=[]]
clone._include_limit = kwargs.pop('limit_includes', None)
assert not kwargs, '"limit_includes" is the only accepted kwargs. Eat your heart out 2.7'
# Copy the behavior of .select_related(None)
if fields == (None,):
for field in clone._includes.keys():
clone.query._annotations.pop('__{}'.format(field.name), None) # depends on [control=['for'], data=['field']]
clone._includes.clear()
return clone # depends on [control=['if'], data=[]]
# Parse everything the way django handles joins/select related
# Including multiple child fields ie .include(field1__field2, field1__field3)
# turns into {field1: {field2: {}, field3: {}}
for name in fields:
(ctx, model) = (clone._includes, clone.model)
for spl in name.split('__'):
field = model._meta.get_field(spl)
if isinstance(field, ForeignObjectRel) and field.is_hidden():
raise ValueError('Hidden field "{!r}" has no descriptor and therefore cannot be included'.format(field)) # depends on [control=['if'], data=[]]
model = field.related_model
ctx = ctx.setdefault(field, OrderedDict()) # depends on [control=['for'], data=['spl']] # depends on [control=['for'], data=['name']]
for field in clone._includes.keys():
clone._include(field) # depends on [control=['for'], data=['field']]
return clone
|
def clean(self, initial_epoch):
""" Remove entries from database that would get overwritten """
self.db.metrics.delete_many({'run_name': self.model_config.run_name, 'epoch_idx': {'$gt': initial_epoch}})
|
def function[clean, parameter[self, initial_epoch]]:
constant[ Remove entries from database that would get overwritten ]
call[name[self].db.metrics.delete_many, parameter[dictionary[[<ast.Constant object at 0x7da1b15f09a0>, <ast.Constant object at 0x7da1b15f1ae0>], [<ast.Attribute object at 0x7da1b15f0790>, <ast.Dict object at 0x7da1b15f0d30>]]]]
|
keyword[def] identifier[clean] ( identifier[self] , identifier[initial_epoch] ):
literal[string]
identifier[self] . identifier[db] . identifier[metrics] . identifier[delete_many] ({ literal[string] : identifier[self] . identifier[model_config] . identifier[run_name] , literal[string] :{ literal[string] : identifier[initial_epoch] }})
|
def clean(self, initial_epoch):
""" Remove entries from database that would get overwritten """
self.db.metrics.delete_many({'run_name': self.model_config.run_name, 'epoch_idx': {'$gt': initial_epoch}})
|
def parse_record( self, lines ):
"""
Parse a TRANSFAC record out of `lines` and return a motif.
"""
# Break lines up
temp_lines = []
for line in lines:
fields = line.rstrip( "\r\n" ).split( None, 1 )
if len( fields ) == 1:
fields.append( "" )
temp_lines.append( fields )
lines = temp_lines
# Fill in motif from lines
motif = TransfacMotif()
current_line = 0
while 1:
# Done parsing if no more lines to consume
if current_line >= len( lines ):
break
# Remove prefix and first separator from line
prefix, rest = lines[ current_line ]
# No action for this prefix, just ignore the line
if prefix not in self.parse_actions:
current_line += 1
continue
# Get action for line
action = self.parse_actions[ prefix ]
# Store a single line value
if action[0] == "store_single":
key = action[1]
setattr( motif, key, rest )
current_line += 1
# Add a single line value to a list
if action[0] == "store_single_list":
key = action[1]
if not getattr( motif, key ):
setattr( motif, key, [] )
getattr( motif, key ).append( rest )
current_line += 1
# Add a single line value to a dictionary
if action[0] == "store_single_key_value":
key = action[1]
k, v = rest.strip().split( '=', 1 )
if not getattr( motif, key ):
setattr( motif, key, {} )
getattr( motif, key )[k] = v
current_line += 1
# Store a block of text
if action[0] == "store_block":
key = action[1]
value = []
while current_line < len( lines ) and lines[ current_line ][0] == prefix:
value.append( lines[current_line][1] )
current_line += 1
setattr( motif, key, str.join( "\n", value ) )
# Store a matrix
if action[0] == "store_matrix":
# First line is alphabet
alphabet = rest.split()
alphabet_size = len( alphabet )
rows = []
pattern = ""
current_line += 1
# Next lines are the rows of the matrix (we allow 0 rows)
while current_line < len( lines ):
prefix, rest = lines[ current_line ]
# Prefix should be a two digit 0 padded row number
if not prefix.isdigit():
break
# The first `alphabet_size` fields are the row values
values = rest.split()
rows.append( [ float(_) for _ in values[:alphabet_size] ] )
# TRANSFAC includes an extra column with the IUPAC code
if len( values ) > alphabet_size:
pattern += values[alphabet_size]
current_line += 1
# Only store the pattern if it is the correct length (meaning
# that every row had an extra field)
if len( pattern ) != len( rows ):
pattern = None
matrix = FrequencyMatrix.from_rows( alphabet, rows )
setattr( motif, action[1], matrix )
# Only return a motif if we saw at least ID or AC or NA
if motif.id or motif.accession or motif.name:
return motif
|
def function[parse_record, parameter[self, lines]]:
constant[
Parse a TRANSFAC record out of `lines` and return a motif.
]
variable[temp_lines] assign[=] list[[]]
for taget[name[line]] in starred[name[lines]] begin[:]
variable[fields] assign[=] call[call[name[line].rstrip, parameter[constant[
]]].split, parameter[constant[None], constant[1]]]
if compare[call[name[len], parameter[name[fields]]] equal[==] constant[1]] begin[:]
call[name[fields].append, parameter[constant[]]]
call[name[temp_lines].append, parameter[name[fields]]]
variable[lines] assign[=] name[temp_lines]
variable[motif] assign[=] call[name[TransfacMotif], parameter[]]
variable[current_line] assign[=] constant[0]
while constant[1] begin[:]
if compare[name[current_line] greater_or_equal[>=] call[name[len], parameter[name[lines]]]] begin[:]
break
<ast.Tuple object at 0x7da1b0f38ee0> assign[=] call[name[lines]][name[current_line]]
if compare[name[prefix] <ast.NotIn object at 0x7da2590d7190> name[self].parse_actions] begin[:]
<ast.AugAssign object at 0x7da1b0f3a320>
continue
variable[action] assign[=] call[name[self].parse_actions][name[prefix]]
if compare[call[name[action]][constant[0]] equal[==] constant[store_single]] begin[:]
variable[key] assign[=] call[name[action]][constant[1]]
call[name[setattr], parameter[name[motif], name[key], name[rest]]]
<ast.AugAssign object at 0x7da1b0f3bd90>
if compare[call[name[action]][constant[0]] equal[==] constant[store_single_list]] begin[:]
variable[key] assign[=] call[name[action]][constant[1]]
if <ast.UnaryOp object at 0x7da1b0ef2140> begin[:]
call[name[setattr], parameter[name[motif], name[key], list[[]]]]
call[call[name[getattr], parameter[name[motif], name[key]]].append, parameter[name[rest]]]
<ast.AugAssign object at 0x7da1b0ef14b0>
if compare[call[name[action]][constant[0]] equal[==] constant[store_single_key_value]] begin[:]
variable[key] assign[=] call[name[action]][constant[1]]
<ast.Tuple object at 0x7da1b0ef1630> assign[=] call[call[name[rest].strip, parameter[]].split, parameter[constant[=], constant[1]]]
if <ast.UnaryOp object at 0x7da1b0ef2560> begin[:]
call[name[setattr], parameter[name[motif], name[key], dictionary[[], []]]]
call[call[name[getattr], parameter[name[motif], name[key]]]][name[k]] assign[=] name[v]
<ast.AugAssign object at 0x7da1b0ef2860>
if compare[call[name[action]][constant[0]] equal[==] constant[store_block]] begin[:]
variable[key] assign[=] call[name[action]][constant[1]]
variable[value] assign[=] list[[]]
while <ast.BoolOp object at 0x7da1b0d43df0> begin[:]
call[name[value].append, parameter[call[call[name[lines]][name[current_line]]][constant[1]]]]
<ast.AugAssign object at 0x7da1b0d40d60>
call[name[setattr], parameter[name[motif], name[key], call[name[str].join, parameter[constant[
], name[value]]]]]
if compare[call[name[action]][constant[0]] equal[==] constant[store_matrix]] begin[:]
variable[alphabet] assign[=] call[name[rest].split, parameter[]]
variable[alphabet_size] assign[=] call[name[len], parameter[name[alphabet]]]
variable[rows] assign[=] list[[]]
variable[pattern] assign[=] constant[]
<ast.AugAssign object at 0x7da1b0d40d30>
while compare[name[current_line] less[<] call[name[len], parameter[name[lines]]]] begin[:]
<ast.Tuple object at 0x7da1b0d42440> assign[=] call[name[lines]][name[current_line]]
if <ast.UnaryOp object at 0x7da1b0d43ee0> begin[:]
break
variable[values] assign[=] call[name[rest].split, parameter[]]
call[name[rows].append, parameter[<ast.ListComp object at 0x7da1b0d40040>]]
if compare[call[name[len], parameter[name[values]]] greater[>] name[alphabet_size]] begin[:]
<ast.AugAssign object at 0x7da1b0d42b90>
<ast.AugAssign object at 0x7da1b0d40130>
if compare[call[name[len], parameter[name[pattern]]] not_equal[!=] call[name[len], parameter[name[rows]]]] begin[:]
variable[pattern] assign[=] constant[None]
variable[matrix] assign[=] call[name[FrequencyMatrix].from_rows, parameter[name[alphabet], name[rows]]]
call[name[setattr], parameter[name[motif], call[name[action]][constant[1]], name[matrix]]]
if <ast.BoolOp object at 0x7da1b0d43f40> begin[:]
return[name[motif]]
|
keyword[def] identifier[parse_record] ( identifier[self] , identifier[lines] ):
literal[string]
identifier[temp_lines] =[]
keyword[for] identifier[line] keyword[in] identifier[lines] :
identifier[fields] = identifier[line] . identifier[rstrip] ( literal[string] ). identifier[split] ( keyword[None] , literal[int] )
keyword[if] identifier[len] ( identifier[fields] )== literal[int] :
identifier[fields] . identifier[append] ( literal[string] )
identifier[temp_lines] . identifier[append] ( identifier[fields] )
identifier[lines] = identifier[temp_lines]
identifier[motif] = identifier[TransfacMotif] ()
identifier[current_line] = literal[int]
keyword[while] literal[int] :
keyword[if] identifier[current_line] >= identifier[len] ( identifier[lines] ):
keyword[break]
identifier[prefix] , identifier[rest] = identifier[lines] [ identifier[current_line] ]
keyword[if] identifier[prefix] keyword[not] keyword[in] identifier[self] . identifier[parse_actions] :
identifier[current_line] += literal[int]
keyword[continue]
identifier[action] = identifier[self] . identifier[parse_actions] [ identifier[prefix] ]
keyword[if] identifier[action] [ literal[int] ]== literal[string] :
identifier[key] = identifier[action] [ literal[int] ]
identifier[setattr] ( identifier[motif] , identifier[key] , identifier[rest] )
identifier[current_line] += literal[int]
keyword[if] identifier[action] [ literal[int] ]== literal[string] :
identifier[key] = identifier[action] [ literal[int] ]
keyword[if] keyword[not] identifier[getattr] ( identifier[motif] , identifier[key] ):
identifier[setattr] ( identifier[motif] , identifier[key] ,[])
identifier[getattr] ( identifier[motif] , identifier[key] ). identifier[append] ( identifier[rest] )
identifier[current_line] += literal[int]
keyword[if] identifier[action] [ literal[int] ]== literal[string] :
identifier[key] = identifier[action] [ literal[int] ]
identifier[k] , identifier[v] = identifier[rest] . identifier[strip] (). identifier[split] ( literal[string] , literal[int] )
keyword[if] keyword[not] identifier[getattr] ( identifier[motif] , identifier[key] ):
identifier[setattr] ( identifier[motif] , identifier[key] ,{})
identifier[getattr] ( identifier[motif] , identifier[key] )[ identifier[k] ]= identifier[v]
identifier[current_line] += literal[int]
keyword[if] identifier[action] [ literal[int] ]== literal[string] :
identifier[key] = identifier[action] [ literal[int] ]
identifier[value] =[]
keyword[while] identifier[current_line] < identifier[len] ( identifier[lines] ) keyword[and] identifier[lines] [ identifier[current_line] ][ literal[int] ]== identifier[prefix] :
identifier[value] . identifier[append] ( identifier[lines] [ identifier[current_line] ][ literal[int] ])
identifier[current_line] += literal[int]
identifier[setattr] ( identifier[motif] , identifier[key] , identifier[str] . identifier[join] ( literal[string] , identifier[value] ))
keyword[if] identifier[action] [ literal[int] ]== literal[string] :
identifier[alphabet] = identifier[rest] . identifier[split] ()
identifier[alphabet_size] = identifier[len] ( identifier[alphabet] )
identifier[rows] =[]
identifier[pattern] = literal[string]
identifier[current_line] += literal[int]
keyword[while] identifier[current_line] < identifier[len] ( identifier[lines] ):
identifier[prefix] , identifier[rest] = identifier[lines] [ identifier[current_line] ]
keyword[if] keyword[not] identifier[prefix] . identifier[isdigit] ():
keyword[break]
identifier[values] = identifier[rest] . identifier[split] ()
identifier[rows] . identifier[append] ([ identifier[float] ( identifier[_] ) keyword[for] identifier[_] keyword[in] identifier[values] [: identifier[alphabet_size] ]])
keyword[if] identifier[len] ( identifier[values] )> identifier[alphabet_size] :
identifier[pattern] += identifier[values] [ identifier[alphabet_size] ]
identifier[current_line] += literal[int]
keyword[if] identifier[len] ( identifier[pattern] )!= identifier[len] ( identifier[rows] ):
identifier[pattern] = keyword[None]
identifier[matrix] = identifier[FrequencyMatrix] . identifier[from_rows] ( identifier[alphabet] , identifier[rows] )
identifier[setattr] ( identifier[motif] , identifier[action] [ literal[int] ], identifier[matrix] )
keyword[if] identifier[motif] . identifier[id] keyword[or] identifier[motif] . identifier[accession] keyword[or] identifier[motif] . identifier[name] :
keyword[return] identifier[motif]
|
def parse_record(self, lines):
"""
Parse a TRANSFAC record out of `lines` and return a motif.
"""
# Break lines up
temp_lines = []
for line in lines:
fields = line.rstrip('\r\n').split(None, 1)
if len(fields) == 1:
fields.append('') # depends on [control=['if'], data=[]]
temp_lines.append(fields) # depends on [control=['for'], data=['line']]
lines = temp_lines
# Fill in motif from lines
motif = TransfacMotif()
current_line = 0
while 1:
# Done parsing if no more lines to consume
if current_line >= len(lines):
break # depends on [control=['if'], data=[]]
# Remove prefix and first separator from line
(prefix, rest) = lines[current_line]
# No action for this prefix, just ignore the line
if prefix not in self.parse_actions:
current_line += 1
continue # depends on [control=['if'], data=[]]
# Get action for line
action = self.parse_actions[prefix]
# Store a single line value
if action[0] == 'store_single':
key = action[1]
setattr(motif, key, rest)
current_line += 1 # depends on [control=['if'], data=[]]
# Add a single line value to a list
if action[0] == 'store_single_list':
key = action[1]
if not getattr(motif, key):
setattr(motif, key, []) # depends on [control=['if'], data=[]]
getattr(motif, key).append(rest)
current_line += 1 # depends on [control=['if'], data=[]]
# Add a single line value to a dictionary
if action[0] == 'store_single_key_value':
key = action[1]
(k, v) = rest.strip().split('=', 1)
if not getattr(motif, key):
setattr(motif, key, {}) # depends on [control=['if'], data=[]]
getattr(motif, key)[k] = v
current_line += 1 # depends on [control=['if'], data=[]]
# Store a block of text
if action[0] == 'store_block':
key = action[1]
value = []
while current_line < len(lines) and lines[current_line][0] == prefix:
value.append(lines[current_line][1])
current_line += 1 # depends on [control=['while'], data=[]]
setattr(motif, key, str.join('\n', value)) # depends on [control=['if'], data=[]]
# Store a matrix
if action[0] == 'store_matrix':
# First line is alphabet
alphabet = rest.split()
alphabet_size = len(alphabet)
rows = []
pattern = ''
current_line += 1
# Next lines are the rows of the matrix (we allow 0 rows)
while current_line < len(lines):
(prefix, rest) = lines[current_line]
# Prefix should be a two digit 0 padded row number
if not prefix.isdigit():
break # depends on [control=['if'], data=[]]
# The first `alphabet_size` fields are the row values
values = rest.split()
rows.append([float(_) for _ in values[:alphabet_size]])
# TRANSFAC includes an extra column with the IUPAC code
if len(values) > alphabet_size:
pattern += values[alphabet_size] # depends on [control=['if'], data=['alphabet_size']]
current_line += 1 # depends on [control=['while'], data=['current_line']]
# Only store the pattern if it is the correct length (meaning
# that every row had an extra field)
if len(pattern) != len(rows):
pattern = None # depends on [control=['if'], data=[]]
matrix = FrequencyMatrix.from_rows(alphabet, rows)
setattr(motif, action[1], matrix) # depends on [control=['if'], data=[]] # depends on [control=['while'], data=[]]
# Only return a motif if we saw at least ID or AC or NA
if motif.id or motif.accession or motif.name:
return motif # depends on [control=['if'], data=[]]
|
def colorful_print(raw):
'''print colorful text in terminal.'''
lines = raw.split('\n')
colorful = True
detail = False
for line in lines:
if line:
if colorful:
colorful = False
print(colored(line, 'white', 'on_green') + '\n')
continue
elif line.startswith('例'):
print(line + '\n')
continue
elif line.startswith('【'):
print(colored(line, 'white', 'on_green') + '\n')
detail = True
continue
if not detail:
print(colored(line + '\n', 'yellow'))
else:
print(colored(line, 'cyan') + '\n')
|
def function[colorful_print, parameter[raw]]:
constant[print colorful text in terminal.]
variable[lines] assign[=] call[name[raw].split, parameter[constant[
]]]
variable[colorful] assign[=] constant[True]
variable[detail] assign[=] constant[False]
for taget[name[line]] in starred[name[lines]] begin[:]
if name[line] begin[:]
if name[colorful] begin[:]
variable[colorful] assign[=] constant[False]
call[name[print], parameter[binary_operation[call[name[colored], parameter[name[line], constant[white], constant[on_green]]] + constant[
]]]]
continue
if <ast.UnaryOp object at 0x7da1b07febf0> begin[:]
call[name[print], parameter[call[name[colored], parameter[binary_operation[name[line] + constant[
]], constant[yellow]]]]]
|
keyword[def] identifier[colorful_print] ( identifier[raw] ):
literal[string]
identifier[lines] = identifier[raw] . identifier[split] ( literal[string] )
identifier[colorful] = keyword[True]
identifier[detail] = keyword[False]
keyword[for] identifier[line] keyword[in] identifier[lines] :
keyword[if] identifier[line] :
keyword[if] identifier[colorful] :
identifier[colorful] = keyword[False]
identifier[print] ( identifier[colored] ( identifier[line] , literal[string] , literal[string] )+ literal[string] )
keyword[continue]
keyword[elif] identifier[line] . identifier[startswith] ( literal[string] ):
identifier[print] ( identifier[line] + literal[string] )
keyword[continue]
keyword[elif] identifier[line] . identifier[startswith] ( literal[string] ):
identifier[print] ( identifier[colored] ( identifier[line] , literal[string] , literal[string] )+ literal[string] )
identifier[detail] = keyword[True]
keyword[continue]
keyword[if] keyword[not] identifier[detail] :
identifier[print] ( identifier[colored] ( identifier[line] + literal[string] , literal[string] ))
keyword[else] :
identifier[print] ( identifier[colored] ( identifier[line] , literal[string] )+ literal[string] )
|
def colorful_print(raw):
"""print colorful text in terminal."""
lines = raw.split('\n')
colorful = True
detail = False
for line in lines:
if line:
if colorful:
colorful = False
print(colored(line, 'white', 'on_green') + '\n')
continue # depends on [control=['if'], data=[]]
elif line.startswith('例'):
print(line + '\n')
continue # depends on [control=['if'], data=[]]
elif line.startswith('【'):
print(colored(line, 'white', 'on_green') + '\n')
detail = True
continue # depends on [control=['if'], data=[]]
if not detail:
print(colored(line + '\n', 'yellow')) # depends on [control=['if'], data=[]]
else:
print(colored(line, 'cyan') + '\n') # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['line']]
|
def _create_invokeScript(self, network_file_path, commands,
files_map):
"""invokeScript: Configure zLinux os network
invokeScript is included in the network.doscript, it is used to put
the network configuration file to the directory where it belongs and
call znetconfig to configure the network
"""
LOG.debug('Creating invokeScript shell in the folder %s'
% network_file_path)
invokeScript = "invokeScript.sh"
conf = "#!/bin/bash \n"
command = commands
for file in files_map:
target_path = file['target_path']
source_file = file['source_file']
# potential risk: whether target_path exist
command += 'mv ' + source_file + ' ' + target_path + '\n'
command += 'sleep 2\n'
command += '/bin/bash /tmp/znetconfig.sh\n'
command += 'rm -rf invokeScript.sh\n'
scriptfile = os.path.join(network_file_path, invokeScript)
with open(scriptfile, "w") as f:
f.write(conf)
f.write(command)
|
def function[_create_invokeScript, parameter[self, network_file_path, commands, files_map]]:
constant[invokeScript: Configure zLinux os network
invokeScript is included in the network.doscript, it is used to put
the network configuration file to the directory where it belongs and
call znetconfig to configure the network
]
call[name[LOG].debug, parameter[binary_operation[constant[Creating invokeScript shell in the folder %s] <ast.Mod object at 0x7da2590d6920> name[network_file_path]]]]
variable[invokeScript] assign[=] constant[invokeScript.sh]
variable[conf] assign[=] constant[#!/bin/bash
]
variable[command] assign[=] name[commands]
for taget[name[file]] in starred[name[files_map]] begin[:]
variable[target_path] assign[=] call[name[file]][constant[target_path]]
variable[source_file] assign[=] call[name[file]][constant[source_file]]
<ast.AugAssign object at 0x7da20e9b3ca0>
<ast.AugAssign object at 0x7da20e9b0400>
<ast.AugAssign object at 0x7da20e9b0f40>
<ast.AugAssign object at 0x7da20e9b2200>
variable[scriptfile] assign[=] call[name[os].path.join, parameter[name[network_file_path], name[invokeScript]]]
with call[name[open], parameter[name[scriptfile], constant[w]]] begin[:]
call[name[f].write, parameter[name[conf]]]
call[name[f].write, parameter[name[command]]]
|
keyword[def] identifier[_create_invokeScript] ( identifier[self] , identifier[network_file_path] , identifier[commands] ,
identifier[files_map] ):
literal[string]
identifier[LOG] . identifier[debug] ( literal[string]
% identifier[network_file_path] )
identifier[invokeScript] = literal[string]
identifier[conf] = literal[string]
identifier[command] = identifier[commands]
keyword[for] identifier[file] keyword[in] identifier[files_map] :
identifier[target_path] = identifier[file] [ literal[string] ]
identifier[source_file] = identifier[file] [ literal[string] ]
identifier[command] += literal[string] + identifier[source_file] + literal[string] + identifier[target_path] + literal[string]
identifier[command] += literal[string]
identifier[command] += literal[string]
identifier[command] += literal[string]
identifier[scriptfile] = identifier[os] . identifier[path] . identifier[join] ( identifier[network_file_path] , identifier[invokeScript] )
keyword[with] identifier[open] ( identifier[scriptfile] , literal[string] ) keyword[as] identifier[f] :
identifier[f] . identifier[write] ( identifier[conf] )
identifier[f] . identifier[write] ( identifier[command] )
|
def _create_invokeScript(self, network_file_path, commands, files_map):
"""invokeScript: Configure zLinux os network
invokeScript is included in the network.doscript, it is used to put
the network configuration file to the directory where it belongs and
call znetconfig to configure the network
"""
LOG.debug('Creating invokeScript shell in the folder %s' % network_file_path)
invokeScript = 'invokeScript.sh'
conf = '#!/bin/bash \n'
command = commands
for file in files_map:
target_path = file['target_path']
source_file = file['source_file']
# potential risk: whether target_path exist
command += 'mv ' + source_file + ' ' + target_path + '\n' # depends on [control=['for'], data=['file']]
command += 'sleep 2\n'
command += '/bin/bash /tmp/znetconfig.sh\n'
command += 'rm -rf invokeScript.sh\n'
scriptfile = os.path.join(network_file_path, invokeScript)
with open(scriptfile, 'w') as f:
f.write(conf)
f.write(command) # depends on [control=['with'], data=['f']]
|
def make_optimize_action(self, model, session=None, var_list=None, **kwargs):
"""
Build Optimization action task with Tensorflow optimizer.
:param model: GPflow model.
:param session: Tensorflow session.
:param var_list: List of Tensorflow variables to train.
:param feed_dict: Tensorflow feed_dict dictionary.
:param kwargs: Extra parameters passed to `make_optimize_tensor`.
:return: Optimization action.
"""
if model is None or not isinstance(model, Model):
raise ValueError('Unknown type passed for optimization.')
session = model.enquire_session(session)
feed_dict = kwargs.pop('feed_dict', None)
feed_dict_update = self._gen_feed_dict(model, feed_dict)
run_kwargs = {} if feed_dict_update is None else {'feed_dict': feed_dict_update}
optimizer_tensor = self.make_optimize_tensor(model, session, var_list=var_list, **kwargs)
opt = Optimization()
opt.with_optimizer(self)
opt.with_model(model)
opt.with_optimizer_tensor(optimizer_tensor)
opt.with_run_kwargs(**run_kwargs)
return opt
|
def function[make_optimize_action, parameter[self, model, session, var_list]]:
constant[
Build Optimization action task with Tensorflow optimizer.
:param model: GPflow model.
:param session: Tensorflow session.
:param var_list: List of Tensorflow variables to train.
:param feed_dict: Tensorflow feed_dict dictionary.
:param kwargs: Extra parameters passed to `make_optimize_tensor`.
:return: Optimization action.
]
if <ast.BoolOp object at 0x7da1b1f484c0> begin[:]
<ast.Raise object at 0x7da1b1f48af0>
variable[session] assign[=] call[name[model].enquire_session, parameter[name[session]]]
variable[feed_dict] assign[=] call[name[kwargs].pop, parameter[constant[feed_dict], constant[None]]]
variable[feed_dict_update] assign[=] call[name[self]._gen_feed_dict, parameter[name[model], name[feed_dict]]]
variable[run_kwargs] assign[=] <ast.IfExp object at 0x7da1b1f48400>
variable[optimizer_tensor] assign[=] call[name[self].make_optimize_tensor, parameter[name[model], name[session]]]
variable[opt] assign[=] call[name[Optimization], parameter[]]
call[name[opt].with_optimizer, parameter[name[self]]]
call[name[opt].with_model, parameter[name[model]]]
call[name[opt].with_optimizer_tensor, parameter[name[optimizer_tensor]]]
call[name[opt].with_run_kwargs, parameter[]]
return[name[opt]]
|
keyword[def] identifier[make_optimize_action] ( identifier[self] , identifier[model] , identifier[session] = keyword[None] , identifier[var_list] = keyword[None] ,** identifier[kwargs] ):
literal[string]
keyword[if] identifier[model] keyword[is] keyword[None] keyword[or] keyword[not] identifier[isinstance] ( identifier[model] , identifier[Model] ):
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[session] = identifier[model] . identifier[enquire_session] ( identifier[session] )
identifier[feed_dict] = identifier[kwargs] . identifier[pop] ( literal[string] , keyword[None] )
identifier[feed_dict_update] = identifier[self] . identifier[_gen_feed_dict] ( identifier[model] , identifier[feed_dict] )
identifier[run_kwargs] ={} keyword[if] identifier[feed_dict_update] keyword[is] keyword[None] keyword[else] { literal[string] : identifier[feed_dict_update] }
identifier[optimizer_tensor] = identifier[self] . identifier[make_optimize_tensor] ( identifier[model] , identifier[session] , identifier[var_list] = identifier[var_list] ,** identifier[kwargs] )
identifier[opt] = identifier[Optimization] ()
identifier[opt] . identifier[with_optimizer] ( identifier[self] )
identifier[opt] . identifier[with_model] ( identifier[model] )
identifier[opt] . identifier[with_optimizer_tensor] ( identifier[optimizer_tensor] )
identifier[opt] . identifier[with_run_kwargs] (** identifier[run_kwargs] )
keyword[return] identifier[opt]
|
def make_optimize_action(self, model, session=None, var_list=None, **kwargs):
"""
Build Optimization action task with Tensorflow optimizer.
:param model: GPflow model.
:param session: Tensorflow session.
:param var_list: List of Tensorflow variables to train.
:param feed_dict: Tensorflow feed_dict dictionary.
:param kwargs: Extra parameters passed to `make_optimize_tensor`.
:return: Optimization action.
"""
if model is None or not isinstance(model, Model):
raise ValueError('Unknown type passed for optimization.') # depends on [control=['if'], data=[]]
session = model.enquire_session(session)
feed_dict = kwargs.pop('feed_dict', None)
feed_dict_update = self._gen_feed_dict(model, feed_dict)
run_kwargs = {} if feed_dict_update is None else {'feed_dict': feed_dict_update}
optimizer_tensor = self.make_optimize_tensor(model, session, var_list=var_list, **kwargs)
opt = Optimization()
opt.with_optimizer(self)
opt.with_model(model)
opt.with_optimizer_tensor(optimizer_tensor)
opt.with_run_kwargs(**run_kwargs)
return opt
|
def metasay(ctx, inputfile, item):
"""Moo some dataset metadata to stdout.
Python module: rio-metasay
(https://github.com/sgillies/rio-plugin-example).
"""
with rasterio.open(inputfile) as src:
meta = src.profile
click.echo(moothedata(meta, key=item))
|
def function[metasay, parameter[ctx, inputfile, item]]:
constant[Moo some dataset metadata to stdout.
Python module: rio-metasay
(https://github.com/sgillies/rio-plugin-example).
]
with call[name[rasterio].open, parameter[name[inputfile]]] begin[:]
variable[meta] assign[=] name[src].profile
call[name[click].echo, parameter[call[name[moothedata], parameter[name[meta]]]]]
|
keyword[def] identifier[metasay] ( identifier[ctx] , identifier[inputfile] , identifier[item] ):
literal[string]
keyword[with] identifier[rasterio] . identifier[open] ( identifier[inputfile] ) keyword[as] identifier[src] :
identifier[meta] = identifier[src] . identifier[profile]
identifier[click] . identifier[echo] ( identifier[moothedata] ( identifier[meta] , identifier[key] = identifier[item] ))
|
def metasay(ctx, inputfile, item):
"""Moo some dataset metadata to stdout.
Python module: rio-metasay
(https://github.com/sgillies/rio-plugin-example).
"""
with rasterio.open(inputfile) as src:
meta = src.profile # depends on [control=['with'], data=['src']]
click.echo(moothedata(meta, key=item))
|
def get_default_reference(self, method):
"""
Returns the default reference for a method.
:arg method: name of a method
:type method: :class:`str`
:return: reference
:rtype: :class:`Reference <pyxray.descriptor.Reference>` or :class:`str`
"""
if method not in self._available_methods:
raise ValueError('Unknown method: {0}'.format(method))
return self._default_references.get(method)
|
def function[get_default_reference, parameter[self, method]]:
constant[
Returns the default reference for a method.
:arg method: name of a method
:type method: :class:`str`
:return: reference
:rtype: :class:`Reference <pyxray.descriptor.Reference>` or :class:`str`
]
if compare[name[method] <ast.NotIn object at 0x7da2590d7190> name[self]._available_methods] begin[:]
<ast.Raise object at 0x7da2041da8f0>
return[call[name[self]._default_references.get, parameter[name[method]]]]
|
keyword[def] identifier[get_default_reference] ( identifier[self] , identifier[method] ):
literal[string]
keyword[if] identifier[method] keyword[not] keyword[in] identifier[self] . identifier[_available_methods] :
keyword[raise] identifier[ValueError] ( literal[string] . identifier[format] ( identifier[method] ))
keyword[return] identifier[self] . identifier[_default_references] . identifier[get] ( identifier[method] )
|
def get_default_reference(self, method):
"""
Returns the default reference for a method.
:arg method: name of a method
:type method: :class:`str`
:return: reference
:rtype: :class:`Reference <pyxray.descriptor.Reference>` or :class:`str`
"""
if method not in self._available_methods:
raise ValueError('Unknown method: {0}'.format(method)) # depends on [control=['if'], data=['method']]
return self._default_references.get(method)
|
def root (path, root):
""" If 'path' is relative, it is rooted at 'root'. Otherwise, it's unchanged.
"""
if os.path.isabs (path):
return path
else:
return os.path.join (root, path)
|
def function[root, parameter[path, root]]:
constant[ If 'path' is relative, it is rooted at 'root'. Otherwise, it's unchanged.
]
if call[name[os].path.isabs, parameter[name[path]]] begin[:]
return[name[path]]
|
keyword[def] identifier[root] ( identifier[path] , identifier[root] ):
literal[string]
keyword[if] identifier[os] . identifier[path] . identifier[isabs] ( identifier[path] ):
keyword[return] identifier[path]
keyword[else] :
keyword[return] identifier[os] . identifier[path] . identifier[join] ( identifier[root] , identifier[path] )
|
def root(path, root):
""" If 'path' is relative, it is rooted at 'root'. Otherwise, it's unchanged.
"""
if os.path.isabs(path):
return path # depends on [control=['if'], data=[]]
else:
return os.path.join(root, path)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.