code stringlengths 75 104k | code_sememe stringlengths 47 309k | token_type stringlengths 215 214k | code_dependency stringlengths 75 155k |
|---|---|---|---|
def getRow(self, row, numberRows=None):
""" Returns the specified row of the region (if the raster is set)
If numberRows is provided, uses that instead of the raster
"""
row = int(row)
if self._raster[0] == 0 or self._raster[1] == 0:
return self
if numberRows is None or numberRows < 1 or numberRows > 9:
numberRows = self._raster[0]
rowHeight = self.h / numberRows
if row < 0:
# If row is negative, count backwards from the end
row = numberRows - row
if row < 0:
# Bad row index, return last row
return Region(self.x, self.y+self.h-rowHeight, self.w, rowHeight)
elif row > numberRows:
# Bad row index, return first row
return Region(self.x, self.y, self.w, rowHeight)
return Region(self.x, self.y + (row * rowHeight), self.w, rowHeight) | def function[getRow, parameter[self, row, numberRows]]:
constant[ Returns the specified row of the region (if the raster is set)
If numberRows is provided, uses that instead of the raster
]
variable[row] assign[=] call[name[int], parameter[name[row]]]
if <ast.BoolOp object at 0x7da1b1111270> begin[:]
return[name[self]]
if <ast.BoolOp object at 0x7da1b1111870> begin[:]
variable[numberRows] assign[=] call[name[self]._raster][constant[0]]
variable[rowHeight] assign[=] binary_operation[name[self].h / name[numberRows]]
if compare[name[row] less[<] constant[0]] begin[:]
variable[row] assign[=] binary_operation[name[numberRows] - name[row]]
if compare[name[row] less[<] constant[0]] begin[:]
return[call[name[Region], parameter[name[self].x, binary_operation[binary_operation[name[self].y + name[self].h] - name[rowHeight]], name[self].w, name[rowHeight]]]]
return[call[name[Region], parameter[name[self].x, binary_operation[name[self].y + binary_operation[name[row] * name[rowHeight]]], name[self].w, name[rowHeight]]]] | keyword[def] identifier[getRow] ( identifier[self] , identifier[row] , identifier[numberRows] = keyword[None] ):
literal[string]
identifier[row] = identifier[int] ( identifier[row] )
keyword[if] identifier[self] . identifier[_raster] [ literal[int] ]== literal[int] keyword[or] identifier[self] . identifier[_raster] [ literal[int] ]== literal[int] :
keyword[return] identifier[self]
keyword[if] identifier[numberRows] keyword[is] keyword[None] keyword[or] identifier[numberRows] < literal[int] keyword[or] identifier[numberRows] > literal[int] :
identifier[numberRows] = identifier[self] . identifier[_raster] [ literal[int] ]
identifier[rowHeight] = identifier[self] . identifier[h] / identifier[numberRows]
keyword[if] identifier[row] < literal[int] :
identifier[row] = identifier[numberRows] - identifier[row]
keyword[if] identifier[row] < literal[int] :
keyword[return] identifier[Region] ( identifier[self] . identifier[x] , identifier[self] . identifier[y] + identifier[self] . identifier[h] - identifier[rowHeight] , identifier[self] . identifier[w] , identifier[rowHeight] )
keyword[elif] identifier[row] > identifier[numberRows] :
keyword[return] identifier[Region] ( identifier[self] . identifier[x] , identifier[self] . identifier[y] , identifier[self] . identifier[w] , identifier[rowHeight] )
keyword[return] identifier[Region] ( identifier[self] . identifier[x] , identifier[self] . identifier[y] +( identifier[row] * identifier[rowHeight] ), identifier[self] . identifier[w] , identifier[rowHeight] ) | def getRow(self, row, numberRows=None):
""" Returns the specified row of the region (if the raster is set)
If numberRows is provided, uses that instead of the raster
"""
row = int(row)
if self._raster[0] == 0 or self._raster[1] == 0:
return self # depends on [control=['if'], data=[]]
if numberRows is None or numberRows < 1 or numberRows > 9:
numberRows = self._raster[0] # depends on [control=['if'], data=[]]
rowHeight = self.h / numberRows
if row < 0:
# If row is negative, count backwards from the end
row = numberRows - row
if row < 0:
# Bad row index, return last row
return Region(self.x, self.y + self.h - rowHeight, self.w, rowHeight) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['row']]
elif row > numberRows:
# Bad row index, return first row
return Region(self.x, self.y, self.w, rowHeight) # depends on [control=['if'], data=[]]
return Region(self.x, self.y + row * rowHeight, self.w, rowHeight) |
def unzip(filename):
"""
Unzips specified file into ~/.svm
:param filename:
:return:
"""
with zipfile.ZipFile(filename, "r") as zip_ref:
zip_ref.extractall(Spark.svm_path())
return True | def function[unzip, parameter[filename]]:
constant[
Unzips specified file into ~/.svm
:param filename:
:return:
]
with call[name[zipfile].ZipFile, parameter[name[filename], constant[r]]] begin[:]
call[name[zip_ref].extractall, parameter[call[name[Spark].svm_path, parameter[]]]]
return[constant[True]] | keyword[def] identifier[unzip] ( identifier[filename] ):
literal[string]
keyword[with] identifier[zipfile] . identifier[ZipFile] ( identifier[filename] , literal[string] ) keyword[as] identifier[zip_ref] :
identifier[zip_ref] . identifier[extractall] ( identifier[Spark] . identifier[svm_path] ())
keyword[return] keyword[True] | def unzip(filename):
"""
Unzips specified file into ~/.svm
:param filename:
:return:
"""
with zipfile.ZipFile(filename, 'r') as zip_ref:
zip_ref.extractall(Spark.svm_path()) # depends on [control=['with'], data=['zip_ref']]
return True |
def load_contents(self):
"""
Loads contents of Database from a filename database.csv.
"""
with open(self.name + ".csv") as f:
list_of_rows = f.readlines()
list_of_rows = map(
lambda x: x.strip(),
map(
lambda x: x.replace("\"", ""),
list_of_rows
)
)
for row in list_of_rows:
self.put_row(make_row(self.columns, row.split(','))) | def function[load_contents, parameter[self]]:
constant[
Loads contents of Database from a filename database.csv.
]
with call[name[open], parameter[binary_operation[name[self].name + constant[.csv]]]] begin[:]
variable[list_of_rows] assign[=] call[name[f].readlines, parameter[]]
variable[list_of_rows] assign[=] call[name[map], parameter[<ast.Lambda object at 0x7da18bc729e0>, call[name[map], parameter[<ast.Lambda object at 0x7da18bc72bf0>, name[list_of_rows]]]]]
for taget[name[row]] in starred[name[list_of_rows]] begin[:]
call[name[self].put_row, parameter[call[name[make_row], parameter[name[self].columns, call[name[row].split, parameter[constant[,]]]]]]] | keyword[def] identifier[load_contents] ( identifier[self] ):
literal[string]
keyword[with] identifier[open] ( identifier[self] . identifier[name] + literal[string] ) keyword[as] identifier[f] :
identifier[list_of_rows] = identifier[f] . identifier[readlines] ()
identifier[list_of_rows] = identifier[map] (
keyword[lambda] identifier[x] : identifier[x] . identifier[strip] (),
identifier[map] (
keyword[lambda] identifier[x] : identifier[x] . identifier[replace] ( literal[string] , literal[string] ),
identifier[list_of_rows]
)
)
keyword[for] identifier[row] keyword[in] identifier[list_of_rows] :
identifier[self] . identifier[put_row] ( identifier[make_row] ( identifier[self] . identifier[columns] , identifier[row] . identifier[split] ( literal[string] ))) | def load_contents(self):
"""
Loads contents of Database from a filename database.csv.
"""
with open(self.name + '.csv') as f:
list_of_rows = f.readlines() # depends on [control=['with'], data=['f']]
list_of_rows = map(lambda x: x.strip(), map(lambda x: x.replace('"', ''), list_of_rows))
for row in list_of_rows:
self.put_row(make_row(self.columns, row.split(','))) # depends on [control=['for'], data=['row']] |
def get_metadata(url, validate_cert=True):
"""
Gets the metadata XML from the provided URL
:param url: Url where the XML of the Identity Provider Metadata is published.
:type url: string
:param validate_cert: If the url uses https schema, that flag enables or not the verification of the associated certificate.
:type validate_cert: bool
:returns: metadata XML
:rtype: string
"""
valid = False
if validate_cert:
response = urllib2.urlopen(url)
else:
ctx = ssl.create_default_context()
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
response = urllib2.urlopen(url, context=ctx)
xml = response.read()
if xml:
try:
dom = fromstring(xml, forbid_dtd=True)
idp_descriptor_nodes = OneLogin_Saml2_Utils.query(dom, '//md:IDPSSODescriptor')
if idp_descriptor_nodes:
valid = True
except Exception:
pass
if not valid:
raise Exception('Not valid IdP XML found from URL: %s' % (url))
return xml | def function[get_metadata, parameter[url, validate_cert]]:
constant[
Gets the metadata XML from the provided URL
:param url: Url where the XML of the Identity Provider Metadata is published.
:type url: string
:param validate_cert: If the url uses https schema, that flag enables or not the verification of the associated certificate.
:type validate_cert: bool
:returns: metadata XML
:rtype: string
]
variable[valid] assign[=] constant[False]
if name[validate_cert] begin[:]
variable[response] assign[=] call[name[urllib2].urlopen, parameter[name[url]]]
variable[xml] assign[=] call[name[response].read, parameter[]]
if name[xml] begin[:]
<ast.Try object at 0x7da1b17988e0>
if <ast.UnaryOp object at 0x7da1b179ab00> begin[:]
<ast.Raise object at 0x7da1b1799ea0>
return[name[xml]] | keyword[def] identifier[get_metadata] ( identifier[url] , identifier[validate_cert] = keyword[True] ):
literal[string]
identifier[valid] = keyword[False]
keyword[if] identifier[validate_cert] :
identifier[response] = identifier[urllib2] . identifier[urlopen] ( identifier[url] )
keyword[else] :
identifier[ctx] = identifier[ssl] . identifier[create_default_context] ()
identifier[ctx] . identifier[check_hostname] = keyword[False]
identifier[ctx] . identifier[verify_mode] = identifier[ssl] . identifier[CERT_NONE]
identifier[response] = identifier[urllib2] . identifier[urlopen] ( identifier[url] , identifier[context] = identifier[ctx] )
identifier[xml] = identifier[response] . identifier[read] ()
keyword[if] identifier[xml] :
keyword[try] :
identifier[dom] = identifier[fromstring] ( identifier[xml] , identifier[forbid_dtd] = keyword[True] )
identifier[idp_descriptor_nodes] = identifier[OneLogin_Saml2_Utils] . identifier[query] ( identifier[dom] , literal[string] )
keyword[if] identifier[idp_descriptor_nodes] :
identifier[valid] = keyword[True]
keyword[except] identifier[Exception] :
keyword[pass]
keyword[if] keyword[not] identifier[valid] :
keyword[raise] identifier[Exception] ( literal[string] %( identifier[url] ))
keyword[return] identifier[xml] | def get_metadata(url, validate_cert=True):
"""
Gets the metadata XML from the provided URL
:param url: Url where the XML of the Identity Provider Metadata is published.
:type url: string
:param validate_cert: If the url uses https schema, that flag enables or not the verification of the associated certificate.
:type validate_cert: bool
:returns: metadata XML
:rtype: string
"""
valid = False
if validate_cert:
response = urllib2.urlopen(url) # depends on [control=['if'], data=[]]
else:
ctx = ssl.create_default_context()
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
response = urllib2.urlopen(url, context=ctx)
xml = response.read()
if xml:
try:
dom = fromstring(xml, forbid_dtd=True)
idp_descriptor_nodes = OneLogin_Saml2_Utils.query(dom, '//md:IDPSSODescriptor')
if idp_descriptor_nodes:
valid = True # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]]
except Exception:
pass # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
if not valid:
raise Exception('Not valid IdP XML found from URL: %s' % url) # depends on [control=['if'], data=[]]
return xml |
def did_composer_install(dir):
'''
Test to see if the vendor directory exists in this directory
dir
Directory location of the composer.json file
CLI Example:
.. code-block:: bash
salt '*' composer.did_composer_install /var/www/application
'''
lockFile = "{0}/vendor".format(dir)
if os.path.exists(lockFile):
return True
return False | def function[did_composer_install, parameter[dir]]:
constant[
Test to see if the vendor directory exists in this directory
dir
Directory location of the composer.json file
CLI Example:
.. code-block:: bash
salt '*' composer.did_composer_install /var/www/application
]
variable[lockFile] assign[=] call[constant[{0}/vendor].format, parameter[name[dir]]]
if call[name[os].path.exists, parameter[name[lockFile]]] begin[:]
return[constant[True]]
return[constant[False]] | keyword[def] identifier[did_composer_install] ( identifier[dir] ):
literal[string]
identifier[lockFile] = literal[string] . identifier[format] ( identifier[dir] )
keyword[if] identifier[os] . identifier[path] . identifier[exists] ( identifier[lockFile] ):
keyword[return] keyword[True]
keyword[return] keyword[False] | def did_composer_install(dir):
"""
Test to see if the vendor directory exists in this directory
dir
Directory location of the composer.json file
CLI Example:
.. code-block:: bash
salt '*' composer.did_composer_install /var/www/application
"""
lockFile = '{0}/vendor'.format(dir)
if os.path.exists(lockFile):
return True # depends on [control=['if'], data=[]]
return False |
def append(self, tag):
'''
append - Append an item to this tag collection
@param tag - an AdvancedTag
'''
list.append(self, tag)
self.uids.add(tag.uid) | def function[append, parameter[self, tag]]:
constant[
append - Append an item to this tag collection
@param tag - an AdvancedTag
]
call[name[list].append, parameter[name[self], name[tag]]]
call[name[self].uids.add, parameter[name[tag].uid]] | keyword[def] identifier[append] ( identifier[self] , identifier[tag] ):
literal[string]
identifier[list] . identifier[append] ( identifier[self] , identifier[tag] )
identifier[self] . identifier[uids] . identifier[add] ( identifier[tag] . identifier[uid] ) | def append(self, tag):
"""
append - Append an item to this tag collection
@param tag - an AdvancedTag
"""
list.append(self, tag)
self.uids.add(tag.uid) |
def call(self, additional_fields, restriction, shape, depth, max_items, offset):
"""
Find subfolders of a folder.
:param additional_fields: the extra fields that should be returned with the folder, as FieldPath objects
:param shape: The set of attributes to return
:param depth: How deep in the folder structure to search for folders
:param max_items: The maximum number of items to return
:param offset: the offset relative to the first item in the item collection. Usually 0.
:return: XML elements for the matching folders
"""
from .folders import Folder
roots = {f.root for f in self.folders}
if len(roots) != 1:
raise ValueError('FindFolder must be called with folders in the same root hierarchy (%r)' % roots)
root = roots.pop()
for elem in self._paged_call(payload_func=self.get_payload, max_items=max_items, **dict(
additional_fields=additional_fields,
restriction=restriction,
shape=shape,
depth=depth,
page_size=self.chunk_size,
offset=offset,
)):
if isinstance(elem, Exception):
yield elem
continue
yield Folder.from_xml(elem=elem, root=root) | def function[call, parameter[self, additional_fields, restriction, shape, depth, max_items, offset]]:
constant[
Find subfolders of a folder.
:param additional_fields: the extra fields that should be returned with the folder, as FieldPath objects
:param shape: The set of attributes to return
:param depth: How deep in the folder structure to search for folders
:param max_items: The maximum number of items to return
:param offset: the offset relative to the first item in the item collection. Usually 0.
:return: XML elements for the matching folders
]
from relative_module[folders] import module[Folder]
variable[roots] assign[=] <ast.SetComp object at 0x7da20e954250>
if compare[call[name[len], parameter[name[roots]]] not_equal[!=] constant[1]] begin[:]
<ast.Raise object at 0x7da20e955c00>
variable[root] assign[=] call[name[roots].pop, parameter[]]
for taget[name[elem]] in starred[call[name[self]._paged_call, parameter[]]] begin[:]
if call[name[isinstance], parameter[name[elem], name[Exception]]] begin[:]
<ast.Yield object at 0x7da207f03be0>
continue
<ast.Yield object at 0x7da207f00ac0> | keyword[def] identifier[call] ( identifier[self] , identifier[additional_fields] , identifier[restriction] , identifier[shape] , identifier[depth] , identifier[max_items] , identifier[offset] ):
literal[string]
keyword[from] . identifier[folders] keyword[import] identifier[Folder]
identifier[roots] ={ identifier[f] . identifier[root] keyword[for] identifier[f] keyword[in] identifier[self] . identifier[folders] }
keyword[if] identifier[len] ( identifier[roots] )!= literal[int] :
keyword[raise] identifier[ValueError] ( literal[string] % identifier[roots] )
identifier[root] = identifier[roots] . identifier[pop] ()
keyword[for] identifier[elem] keyword[in] identifier[self] . identifier[_paged_call] ( identifier[payload_func] = identifier[self] . identifier[get_payload] , identifier[max_items] = identifier[max_items] ,** identifier[dict] (
identifier[additional_fields] = identifier[additional_fields] ,
identifier[restriction] = identifier[restriction] ,
identifier[shape] = identifier[shape] ,
identifier[depth] = identifier[depth] ,
identifier[page_size] = identifier[self] . identifier[chunk_size] ,
identifier[offset] = identifier[offset] ,
)):
keyword[if] identifier[isinstance] ( identifier[elem] , identifier[Exception] ):
keyword[yield] identifier[elem]
keyword[continue]
keyword[yield] identifier[Folder] . identifier[from_xml] ( identifier[elem] = identifier[elem] , identifier[root] = identifier[root] ) | def call(self, additional_fields, restriction, shape, depth, max_items, offset):
"""
Find subfolders of a folder.
:param additional_fields: the extra fields that should be returned with the folder, as FieldPath objects
:param shape: The set of attributes to return
:param depth: How deep in the folder structure to search for folders
:param max_items: The maximum number of items to return
:param offset: the offset relative to the first item in the item collection. Usually 0.
:return: XML elements for the matching folders
"""
from .folders import Folder
roots = {f.root for f in self.folders}
if len(roots) != 1:
raise ValueError('FindFolder must be called with folders in the same root hierarchy (%r)' % roots) # depends on [control=['if'], data=[]]
root = roots.pop()
for elem in self._paged_call(payload_func=self.get_payload, max_items=max_items, **dict(additional_fields=additional_fields, restriction=restriction, shape=shape, depth=depth, page_size=self.chunk_size, offset=offset)):
if isinstance(elem, Exception):
yield elem
continue # depends on [control=['if'], data=[]]
yield Folder.from_xml(elem=elem, root=root) # depends on [control=['for'], data=['elem']] |
def _parse_connection_string(connstr):
"""
MSSQL style connection string parser
Returns normalized dictionary of connection string parameters
"""
res = {}
for item in connstr.split(';'):
item = item.strip()
if not item:
continue
key, value = item.split('=', 1)
key = key.strip().lower().replace(' ', '_')
value = value.strip()
res[key] = value
return res | def function[_parse_connection_string, parameter[connstr]]:
constant[
MSSQL style connection string parser
Returns normalized dictionary of connection string parameters
]
variable[res] assign[=] dictionary[[], []]
for taget[name[item]] in starred[call[name[connstr].split, parameter[constant[;]]]] begin[:]
variable[item] assign[=] call[name[item].strip, parameter[]]
if <ast.UnaryOp object at 0x7da1b054ae30> begin[:]
continue
<ast.Tuple object at 0x7da1b05499c0> assign[=] call[name[item].split, parameter[constant[=], constant[1]]]
variable[key] assign[=] call[call[call[name[key].strip, parameter[]].lower, parameter[]].replace, parameter[constant[ ], constant[_]]]
variable[value] assign[=] call[name[value].strip, parameter[]]
call[name[res]][name[key]] assign[=] name[value]
return[name[res]] | keyword[def] identifier[_parse_connection_string] ( identifier[connstr] ):
literal[string]
identifier[res] ={}
keyword[for] identifier[item] keyword[in] identifier[connstr] . identifier[split] ( literal[string] ):
identifier[item] = identifier[item] . identifier[strip] ()
keyword[if] keyword[not] identifier[item] :
keyword[continue]
identifier[key] , identifier[value] = identifier[item] . identifier[split] ( literal[string] , literal[int] )
identifier[key] = identifier[key] . identifier[strip] (). identifier[lower] (). identifier[replace] ( literal[string] , literal[string] )
identifier[value] = identifier[value] . identifier[strip] ()
identifier[res] [ identifier[key] ]= identifier[value]
keyword[return] identifier[res] | def _parse_connection_string(connstr):
"""
MSSQL style connection string parser
Returns normalized dictionary of connection string parameters
"""
res = {}
for item in connstr.split(';'):
item = item.strip()
if not item:
continue # depends on [control=['if'], data=[]]
(key, value) = item.split('=', 1)
key = key.strip().lower().replace(' ', '_')
value = value.strip()
res[key] = value # depends on [control=['for'], data=['item']]
return res |
def create_followup(self, post, content, anonymous=False):
"""Create a follow-up on a post `post`.
It seems like if the post has `<p>` tags, then it's treated as HTML,
but is treated as text otherwise. You'll want to provide `content`
accordingly.
:type post: dict|str|int
:param post: Either the post dict returned by another API method, or
the `cid` field of that post.
:type content: str
:param content: The content of the followup.
:type anonymous: bool
:param anonymous: Whether or not to post anonymously.
:rtype: dict
:returns: Dictionary with information about the created follow-up.
"""
try:
cid = post["id"]
except KeyError:
cid = post
params = {
"cid": cid,
"type": "followup",
# For followups, the content is actually put into the subject.
"subject": content,
"content": "",
"anonymous": "yes" if anonymous else "no",
}
return self._rpc.content_create(params) | def function[create_followup, parameter[self, post, content, anonymous]]:
constant[Create a follow-up on a post `post`.
It seems like if the post has `<p>` tags, then it's treated as HTML,
but is treated as text otherwise. You'll want to provide `content`
accordingly.
:type post: dict|str|int
:param post: Either the post dict returned by another API method, or
the `cid` field of that post.
:type content: str
:param content: The content of the followup.
:type anonymous: bool
:param anonymous: Whether or not to post anonymously.
:rtype: dict
:returns: Dictionary with information about the created follow-up.
]
<ast.Try object at 0x7da2044c1750>
variable[params] assign[=] dictionary[[<ast.Constant object at 0x7da1b0feda80>, <ast.Constant object at 0x7da1b0fef220>, <ast.Constant object at 0x7da1b0fec490>, <ast.Constant object at 0x7da1b0fecac0>, <ast.Constant object at 0x7da1b0fefbb0>], [<ast.Name object at 0x7da1b0fed870>, <ast.Constant object at 0x7da1b0fee650>, <ast.Name object at 0x7da1b0fef7f0>, <ast.Constant object at 0x7da1b0f61120>, <ast.IfExp object at 0x7da1b0f62110>]]
return[call[name[self]._rpc.content_create, parameter[name[params]]]] | keyword[def] identifier[create_followup] ( identifier[self] , identifier[post] , identifier[content] , identifier[anonymous] = keyword[False] ):
literal[string]
keyword[try] :
identifier[cid] = identifier[post] [ literal[string] ]
keyword[except] identifier[KeyError] :
identifier[cid] = identifier[post]
identifier[params] ={
literal[string] : identifier[cid] ,
literal[string] : literal[string] ,
literal[string] : identifier[content] ,
literal[string] : literal[string] ,
literal[string] : literal[string] keyword[if] identifier[anonymous] keyword[else] literal[string] ,
}
keyword[return] identifier[self] . identifier[_rpc] . identifier[content_create] ( identifier[params] ) | def create_followup(self, post, content, anonymous=False):
"""Create a follow-up on a post `post`.
It seems like if the post has `<p>` tags, then it's treated as HTML,
but is treated as text otherwise. You'll want to provide `content`
accordingly.
:type post: dict|str|int
:param post: Either the post dict returned by another API method, or
the `cid` field of that post.
:type content: str
:param content: The content of the followup.
:type anonymous: bool
:param anonymous: Whether or not to post anonymously.
:rtype: dict
:returns: Dictionary with information about the created follow-up.
"""
try:
cid = post['id'] # depends on [control=['try'], data=[]]
except KeyError:
cid = post # depends on [control=['except'], data=[]]
# For followups, the content is actually put into the subject.
params = {'cid': cid, 'type': 'followup', 'subject': content, 'content': '', 'anonymous': 'yes' if anonymous else 'no'}
return self._rpc.content_create(params) |
def generate_hcard(template=None, **kwargs):
"""Generate a hCard document.
Template specific key-value pairs need to be passed as ``kwargs``, see classes.
:arg template: Ready template to fill with args, for example "diaspora" (optional)
:returns: HTML document (str)
"""
if template == "diaspora":
hcard = DiasporaHCard(**kwargs)
else:
raise NotImplementedError()
return hcard.render() | def function[generate_hcard, parameter[template]]:
constant[Generate a hCard document.
Template specific key-value pairs need to be passed as ``kwargs``, see classes.
:arg template: Ready template to fill with args, for example "diaspora" (optional)
:returns: HTML document (str)
]
if compare[name[template] equal[==] constant[diaspora]] begin[:]
variable[hcard] assign[=] call[name[DiasporaHCard], parameter[]]
return[call[name[hcard].render, parameter[]]] | keyword[def] identifier[generate_hcard] ( identifier[template] = keyword[None] ,** identifier[kwargs] ):
literal[string]
keyword[if] identifier[template] == literal[string] :
identifier[hcard] = identifier[DiasporaHCard] (** identifier[kwargs] )
keyword[else] :
keyword[raise] identifier[NotImplementedError] ()
keyword[return] identifier[hcard] . identifier[render] () | def generate_hcard(template=None, **kwargs):
"""Generate a hCard document.
Template specific key-value pairs need to be passed as ``kwargs``, see classes.
:arg template: Ready template to fill with args, for example "diaspora" (optional)
:returns: HTML document (str)
"""
if template == 'diaspora':
hcard = DiasporaHCard(**kwargs) # depends on [control=['if'], data=[]]
else:
raise NotImplementedError()
return hcard.render() |
def calculate_oobatake_dG(seq, temp):
"""Get free energy of unfolding (dG) using Oobatake method in units cal/mol.
Args:
seq (str, Seq, SeqRecord): Amino acid sequence
temp (float): Temperature in degrees C
Returns:
float: Free energy of unfolding dG (J/mol)
"""
dH = calculate_oobatake_dH(seq, temp)
dS = calculate_oobatake_dS(seq, temp)
dG = dH - (temp + 273.15) * dS
# 563.552 - a correction for N- and C-terminal group (approximated from 7 examples in the paper)
return dG - 563.552 | def function[calculate_oobatake_dG, parameter[seq, temp]]:
constant[Get free energy of unfolding (dG) using Oobatake method in units cal/mol.
Args:
seq (str, Seq, SeqRecord): Amino acid sequence
temp (float): Temperature in degrees C
Returns:
float: Free energy of unfolding dG (J/mol)
]
variable[dH] assign[=] call[name[calculate_oobatake_dH], parameter[name[seq], name[temp]]]
variable[dS] assign[=] call[name[calculate_oobatake_dS], parameter[name[seq], name[temp]]]
variable[dG] assign[=] binary_operation[name[dH] - binary_operation[binary_operation[name[temp] + constant[273.15]] * name[dS]]]
return[binary_operation[name[dG] - constant[563.552]]] | keyword[def] identifier[calculate_oobatake_dG] ( identifier[seq] , identifier[temp] ):
literal[string]
identifier[dH] = identifier[calculate_oobatake_dH] ( identifier[seq] , identifier[temp] )
identifier[dS] = identifier[calculate_oobatake_dS] ( identifier[seq] , identifier[temp] )
identifier[dG] = identifier[dH] -( identifier[temp] + literal[int] )* identifier[dS]
keyword[return] identifier[dG] - literal[int] | def calculate_oobatake_dG(seq, temp):
"""Get free energy of unfolding (dG) using Oobatake method in units cal/mol.
Args:
seq (str, Seq, SeqRecord): Amino acid sequence
temp (float): Temperature in degrees C
Returns:
float: Free energy of unfolding dG (J/mol)
"""
dH = calculate_oobatake_dH(seq, temp)
dS = calculate_oobatake_dS(seq, temp)
dG = dH - (temp + 273.15) * dS
# 563.552 - a correction for N- and C-terminal group (approximated from 7 examples in the paper)
return dG - 563.552 |
def paintEvent(self, event):
"""
Handles the drawing for this widget and its selection region.
:param event | <QPaintEvent>
"""
pen = QPen(Qt.DashLine)
pen.setColor(QColor('red'))
with XPainter(self) as painter:
painter.setPen(pen)
clr = QColor('black')
clr.setAlpha(100)
painter.setBrush(clr)
painter.drawRect(self._region) | def function[paintEvent, parameter[self, event]]:
constant[
Handles the drawing for this widget and its selection region.
:param event | <QPaintEvent>
]
variable[pen] assign[=] call[name[QPen], parameter[name[Qt].DashLine]]
call[name[pen].setColor, parameter[call[name[QColor], parameter[constant[red]]]]]
with call[name[XPainter], parameter[name[self]]] begin[:]
call[name[painter].setPen, parameter[name[pen]]]
variable[clr] assign[=] call[name[QColor], parameter[constant[black]]]
call[name[clr].setAlpha, parameter[constant[100]]]
call[name[painter].setBrush, parameter[name[clr]]]
call[name[painter].drawRect, parameter[name[self]._region]] | keyword[def] identifier[paintEvent] ( identifier[self] , identifier[event] ):
literal[string]
identifier[pen] = identifier[QPen] ( identifier[Qt] . identifier[DashLine] )
identifier[pen] . identifier[setColor] ( identifier[QColor] ( literal[string] ))
keyword[with] identifier[XPainter] ( identifier[self] ) keyword[as] identifier[painter] :
identifier[painter] . identifier[setPen] ( identifier[pen] )
identifier[clr] = identifier[QColor] ( literal[string] )
identifier[clr] . identifier[setAlpha] ( literal[int] )
identifier[painter] . identifier[setBrush] ( identifier[clr] )
identifier[painter] . identifier[drawRect] ( identifier[self] . identifier[_region] ) | def paintEvent(self, event):
"""
Handles the drawing for this widget and its selection region.
:param event | <QPaintEvent>
"""
pen = QPen(Qt.DashLine)
pen.setColor(QColor('red'))
with XPainter(self) as painter:
painter.setPen(pen)
clr = QColor('black')
clr.setAlpha(100)
painter.setBrush(clr)
painter.drawRect(self._region) # depends on [control=['with'], data=['painter']] |
def save(self, model):
"""
Attach a model instance to the parent models.
:param model: The model instance to attach
:type model: Model
:rtype: Model
"""
model.set_attribute(self.get_plain_foreign_key(), self.get_parent_key())
if model.save():
return model
return False | def function[save, parameter[self, model]]:
constant[
Attach a model instance to the parent models.
:param model: The model instance to attach
:type model: Model
:rtype: Model
]
call[name[model].set_attribute, parameter[call[name[self].get_plain_foreign_key, parameter[]], call[name[self].get_parent_key, parameter[]]]]
if call[name[model].save, parameter[]] begin[:]
return[name[model]]
return[constant[False]] | keyword[def] identifier[save] ( identifier[self] , identifier[model] ):
literal[string]
identifier[model] . identifier[set_attribute] ( identifier[self] . identifier[get_plain_foreign_key] (), identifier[self] . identifier[get_parent_key] ())
keyword[if] identifier[model] . identifier[save] ():
keyword[return] identifier[model]
keyword[return] keyword[False] | def save(self, model):
"""
Attach a model instance to the parent models.
:param model: The model instance to attach
:type model: Model
:rtype: Model
"""
model.set_attribute(self.get_plain_foreign_key(), self.get_parent_key())
if model.save():
return model # depends on [control=['if'], data=[]]
return False |
def _preset(self, name, args, kwargs):
"""Generic preset function, marks a parameter or config for presetting."""
if self.f_contains(name, shortcuts=False):
raise ValueError('Parameter `%s` is already part of your trajectory, use the normal'
'accessing routine to change config.' % name)
else:
self._changed_default_parameters[name] = (args, kwargs) | def function[_preset, parameter[self, name, args, kwargs]]:
constant[Generic preset function, marks a parameter or config for presetting.]
if call[name[self].f_contains, parameter[name[name]]] begin[:]
<ast.Raise object at 0x7da1b02a4340> | keyword[def] identifier[_preset] ( identifier[self] , identifier[name] , identifier[args] , identifier[kwargs] ):
literal[string]
keyword[if] identifier[self] . identifier[f_contains] ( identifier[name] , identifier[shortcuts] = keyword[False] ):
keyword[raise] identifier[ValueError] ( literal[string]
literal[string] % identifier[name] )
keyword[else] :
identifier[self] . identifier[_changed_default_parameters] [ identifier[name] ]=( identifier[args] , identifier[kwargs] ) | def _preset(self, name, args, kwargs):
"""Generic preset function, marks a parameter or config for presetting."""
if self.f_contains(name, shortcuts=False):
raise ValueError('Parameter `%s` is already part of your trajectory, use the normalaccessing routine to change config.' % name) # depends on [control=['if'], data=[]]
else:
self._changed_default_parameters[name] = (args, kwargs) |
def get_mapping(version=1, exported_at=None, app_name=None):
"""
Return Heroku Connect mapping for the entire project.
Args:
version (int): Version of the Heroku Connect mapping, default: ``1``.
exported_at (datetime.datetime): Time the export was created, default is ``now()``.
app_name (str): Name of Heroku application associated with Heroku Connect the add-on.
Returns:
dict: Heroku Connect mapping.
Note:
The version does not need to be incremented. Exports from the Heroku Connect
website will always have the version number ``1``.
"""
if exported_at is None:
exported_at = timezone.now()
app_name = app_name or settings.HEROKU_CONNECT_APP_NAME
return {
'version': version,
'connection': {
'organization_id': settings.HEROKU_CONNECT_ORGANIZATION_ID,
'app_name': app_name,
'exported_at': exported_at.isoformat(),
},
'mappings': [
model.get_heroku_connect_mapping()
for model in get_heroku_connect_models()
]
} | def function[get_mapping, parameter[version, exported_at, app_name]]:
constant[
Return Heroku Connect mapping for the entire project.
Args:
version (int): Version of the Heroku Connect mapping, default: ``1``.
exported_at (datetime.datetime): Time the export was created, default is ``now()``.
app_name (str): Name of Heroku application associated with Heroku Connect the add-on.
Returns:
dict: Heroku Connect mapping.
Note:
The version does not need to be incremented. Exports from the Heroku Connect
website will always have the version number ``1``.
]
if compare[name[exported_at] is constant[None]] begin[:]
variable[exported_at] assign[=] call[name[timezone].now, parameter[]]
variable[app_name] assign[=] <ast.BoolOp object at 0x7da1b25057b0>
return[dictionary[[<ast.Constant object at 0x7da1b2507400>, <ast.Constant object at 0x7da1b2505600>, <ast.Constant object at 0x7da1b2505390>], [<ast.Name object at 0x7da1b2506950>, <ast.Dict object at 0x7da1b2507850>, <ast.ListComp object at 0x7da1b2505720>]]] | keyword[def] identifier[get_mapping] ( identifier[version] = literal[int] , identifier[exported_at] = keyword[None] , identifier[app_name] = keyword[None] ):
literal[string]
keyword[if] identifier[exported_at] keyword[is] keyword[None] :
identifier[exported_at] = identifier[timezone] . identifier[now] ()
identifier[app_name] = identifier[app_name] keyword[or] identifier[settings] . identifier[HEROKU_CONNECT_APP_NAME]
keyword[return] {
literal[string] : identifier[version] ,
literal[string] :{
literal[string] : identifier[settings] . identifier[HEROKU_CONNECT_ORGANIZATION_ID] ,
literal[string] : identifier[app_name] ,
literal[string] : identifier[exported_at] . identifier[isoformat] (),
},
literal[string] :[
identifier[model] . identifier[get_heroku_connect_mapping] ()
keyword[for] identifier[model] keyword[in] identifier[get_heroku_connect_models] ()
]
} | def get_mapping(version=1, exported_at=None, app_name=None):
"""
Return Heroku Connect mapping for the entire project.
Args:
version (int): Version of the Heroku Connect mapping, default: ``1``.
exported_at (datetime.datetime): Time the export was created, default is ``now()``.
app_name (str): Name of Heroku application associated with Heroku Connect the add-on.
Returns:
dict: Heroku Connect mapping.
Note:
The version does not need to be incremented. Exports from the Heroku Connect
website will always have the version number ``1``.
"""
if exported_at is None:
exported_at = timezone.now() # depends on [control=['if'], data=['exported_at']]
app_name = app_name or settings.HEROKU_CONNECT_APP_NAME
return {'version': version, 'connection': {'organization_id': settings.HEROKU_CONNECT_ORGANIZATION_ID, 'app_name': app_name, 'exported_at': exported_at.isoformat()}, 'mappings': [model.get_heroku_connect_mapping() for model in get_heroku_connect_models()]} |
def _check_ruby(ret, ruby, user=None):
'''
Check that ruby is installed
'''
match_version = True
match_micro_version = False
micro_version_regex = re.compile(r'-([0-9]{4}\.[0-9]{2}|p[0-9]+)$')
if micro_version_regex.search(ruby):
match_micro_version = True
if re.search('^[a-z]+$', ruby):
match_version = False
ruby = re.sub('^ruby-', '', ruby)
for impl, version, default in __salt__['rvm.list'](runas=user):
if impl != 'ruby':
version = '{impl}-{version}'.format(impl=impl, version=version)
if not match_micro_version:
version = micro_version_regex.sub('', version)
if not match_version:
version = re.sub('-.*', '', version)
if version == ruby:
ret['result'] = True
ret['comment'] = 'Requested ruby exists.'
ret['default'] = default
break
return ret | def function[_check_ruby, parameter[ret, ruby, user]]:
constant[
Check that ruby is installed
]
variable[match_version] assign[=] constant[True]
variable[match_micro_version] assign[=] constant[False]
variable[micro_version_regex] assign[=] call[name[re].compile, parameter[constant[-([0-9]{4}\.[0-9]{2}|p[0-9]+)$]]]
if call[name[micro_version_regex].search, parameter[name[ruby]]] begin[:]
variable[match_micro_version] assign[=] constant[True]
if call[name[re].search, parameter[constant[^[a-z]+$], name[ruby]]] begin[:]
variable[match_version] assign[=] constant[False]
variable[ruby] assign[=] call[name[re].sub, parameter[constant[^ruby-], constant[], name[ruby]]]
for taget[tuple[[<ast.Name object at 0x7da18ede4370>, <ast.Name object at 0x7da18ede5210>, <ast.Name object at 0x7da18ede64d0>]]] in starred[call[call[name[__salt__]][constant[rvm.list]], parameter[]]] begin[:]
if compare[name[impl] not_equal[!=] constant[ruby]] begin[:]
variable[version] assign[=] call[constant[{impl}-{version}].format, parameter[]]
if <ast.UnaryOp object at 0x7da18ede58a0> begin[:]
variable[version] assign[=] call[name[micro_version_regex].sub, parameter[constant[], name[version]]]
if <ast.UnaryOp object at 0x7da1b1c15960> begin[:]
variable[version] assign[=] call[name[re].sub, parameter[constant[-.*], constant[], name[version]]]
if compare[name[version] equal[==] name[ruby]] begin[:]
call[name[ret]][constant[result]] assign[=] constant[True]
call[name[ret]][constant[comment]] assign[=] constant[Requested ruby exists.]
call[name[ret]][constant[default]] assign[=] name[default]
break
return[name[ret]] | keyword[def] identifier[_check_ruby] ( identifier[ret] , identifier[ruby] , identifier[user] = keyword[None] ):
literal[string]
identifier[match_version] = keyword[True]
identifier[match_micro_version] = keyword[False]
identifier[micro_version_regex] = identifier[re] . identifier[compile] ( literal[string] )
keyword[if] identifier[micro_version_regex] . identifier[search] ( identifier[ruby] ):
identifier[match_micro_version] = keyword[True]
keyword[if] identifier[re] . identifier[search] ( literal[string] , identifier[ruby] ):
identifier[match_version] = keyword[False]
identifier[ruby] = identifier[re] . identifier[sub] ( literal[string] , literal[string] , identifier[ruby] )
keyword[for] identifier[impl] , identifier[version] , identifier[default] keyword[in] identifier[__salt__] [ literal[string] ]( identifier[runas] = identifier[user] ):
keyword[if] identifier[impl] != literal[string] :
identifier[version] = literal[string] . identifier[format] ( identifier[impl] = identifier[impl] , identifier[version] = identifier[version] )
keyword[if] keyword[not] identifier[match_micro_version] :
identifier[version] = identifier[micro_version_regex] . identifier[sub] ( literal[string] , identifier[version] )
keyword[if] keyword[not] identifier[match_version] :
identifier[version] = identifier[re] . identifier[sub] ( literal[string] , literal[string] , identifier[version] )
keyword[if] identifier[version] == identifier[ruby] :
identifier[ret] [ literal[string] ]= keyword[True]
identifier[ret] [ literal[string] ]= literal[string]
identifier[ret] [ literal[string] ]= identifier[default]
keyword[break]
keyword[return] identifier[ret] | def _check_ruby(ret, ruby, user=None):
"""
Check that ruby is installed
"""
match_version = True
match_micro_version = False
micro_version_regex = re.compile('-([0-9]{4}\\.[0-9]{2}|p[0-9]+)$')
if micro_version_regex.search(ruby):
match_micro_version = True # depends on [control=['if'], data=[]]
if re.search('^[a-z]+$', ruby):
match_version = False # depends on [control=['if'], data=[]]
ruby = re.sub('^ruby-', '', ruby)
for (impl, version, default) in __salt__['rvm.list'](runas=user):
if impl != 'ruby':
version = '{impl}-{version}'.format(impl=impl, version=version) # depends on [control=['if'], data=['impl']]
if not match_micro_version:
version = micro_version_regex.sub('', version) # depends on [control=['if'], data=[]]
if not match_version:
version = re.sub('-.*', '', version) # depends on [control=['if'], data=[]]
if version == ruby:
ret['result'] = True
ret['comment'] = 'Requested ruby exists.'
ret['default'] = default
break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
return ret |
def username_end_time(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
username = ET.SubElement(config, "username", xmlns="urn:brocade.com:mgmt:brocade-aaa")
name_key = ET.SubElement(username, "name")
name_key.text = kwargs.pop('name')
end_time = ET.SubElement(username, "end-time")
end_time.text = kwargs.pop('end_time')
callback = kwargs.pop('callback', self._callback)
return callback(config) | def function[username_end_time, parameter[self]]:
constant[Auto Generated Code
]
variable[config] assign[=] call[name[ET].Element, parameter[constant[config]]]
variable[username] assign[=] call[name[ET].SubElement, parameter[name[config], constant[username]]]
variable[name_key] assign[=] call[name[ET].SubElement, parameter[name[username], constant[name]]]
name[name_key].text assign[=] call[name[kwargs].pop, parameter[constant[name]]]
variable[end_time] assign[=] call[name[ET].SubElement, parameter[name[username], constant[end-time]]]
name[end_time].text assign[=] call[name[kwargs].pop, parameter[constant[end_time]]]
variable[callback] assign[=] call[name[kwargs].pop, parameter[constant[callback], name[self]._callback]]
return[call[name[callback], parameter[name[config]]]] | keyword[def] identifier[username_end_time] ( identifier[self] ,** identifier[kwargs] ):
literal[string]
identifier[config] = identifier[ET] . identifier[Element] ( literal[string] )
identifier[username] = identifier[ET] . identifier[SubElement] ( identifier[config] , literal[string] , identifier[xmlns] = literal[string] )
identifier[name_key] = identifier[ET] . identifier[SubElement] ( identifier[username] , literal[string] )
identifier[name_key] . identifier[text] = identifier[kwargs] . identifier[pop] ( literal[string] )
identifier[end_time] = identifier[ET] . identifier[SubElement] ( identifier[username] , literal[string] )
identifier[end_time] . identifier[text] = identifier[kwargs] . identifier[pop] ( literal[string] )
identifier[callback] = identifier[kwargs] . identifier[pop] ( literal[string] , identifier[self] . identifier[_callback] )
keyword[return] identifier[callback] ( identifier[config] ) | def username_end_time(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element('config')
username = ET.SubElement(config, 'username', xmlns='urn:brocade.com:mgmt:brocade-aaa')
name_key = ET.SubElement(username, 'name')
name_key.text = kwargs.pop('name')
end_time = ET.SubElement(username, 'end-time')
end_time.text = kwargs.pop('end_time')
callback = kwargs.pop('callback', self._callback)
return callback(config) |
def parent(self):
'''We use parent for some initial data'''
if not hasattr(self, '_parent'):
if 'parent' in self.kwargs:
try:
self._parent = Page.objects.get(id=self.kwargs["parent"])
except Exception as e:
raise e
else:
if hasattr(self.request, 'leonardo_page'):
self._parent = self.request.leonardo_page
else:
return None
return self._parent | def function[parent, parameter[self]]:
constant[We use parent for some initial data]
if <ast.UnaryOp object at 0x7da1b0f5ada0> begin[:]
if compare[constant[parent] in name[self].kwargs] begin[:]
<ast.Try object at 0x7da1b0ef04c0>
return[name[self]._parent] | keyword[def] identifier[parent] ( identifier[self] ):
literal[string]
keyword[if] keyword[not] identifier[hasattr] ( identifier[self] , literal[string] ):
keyword[if] literal[string] keyword[in] identifier[self] . identifier[kwargs] :
keyword[try] :
identifier[self] . identifier[_parent] = identifier[Page] . identifier[objects] . identifier[get] ( identifier[id] = identifier[self] . identifier[kwargs] [ literal[string] ])
keyword[except] identifier[Exception] keyword[as] identifier[e] :
keyword[raise] identifier[e]
keyword[else] :
keyword[if] identifier[hasattr] ( identifier[self] . identifier[request] , literal[string] ):
identifier[self] . identifier[_parent] = identifier[self] . identifier[request] . identifier[leonardo_page]
keyword[else] :
keyword[return] keyword[None]
keyword[return] identifier[self] . identifier[_parent] | def parent(self):
"""We use parent for some initial data"""
if not hasattr(self, '_parent'):
if 'parent' in self.kwargs:
try:
self._parent = Page.objects.get(id=self.kwargs['parent']) # depends on [control=['try'], data=[]]
except Exception as e:
raise e # depends on [control=['except'], data=['e']] # depends on [control=['if'], data=[]]
elif hasattr(self.request, 'leonardo_page'):
self._parent = self.request.leonardo_page # depends on [control=['if'], data=[]]
else:
return None # depends on [control=['if'], data=[]]
return self._parent |
async def SetFilesystemInfo(self, filesystems):
'''
filesystems : typing.Sequence[~Filesystem]
Returns -> typing.Sequence[~ErrorResult]
'''
# map input types to rpc msg
_params = dict()
msg = dict(type='StorageProvisioner',
request='SetFilesystemInfo',
version=3,
params=_params)
_params['filesystems'] = filesystems
reply = await self.rpc(msg)
return reply | <ast.AsyncFunctionDef object at 0x7da1b0ef9d20> | keyword[async] keyword[def] identifier[SetFilesystemInfo] ( identifier[self] , identifier[filesystems] ):
literal[string]
identifier[_params] = identifier[dict] ()
identifier[msg] = identifier[dict] ( identifier[type] = literal[string] ,
identifier[request] = literal[string] ,
identifier[version] = literal[int] ,
identifier[params] = identifier[_params] )
identifier[_params] [ literal[string] ]= identifier[filesystems]
identifier[reply] = keyword[await] identifier[self] . identifier[rpc] ( identifier[msg] )
keyword[return] identifier[reply] | async def SetFilesystemInfo(self, filesystems):
"""
filesystems : typing.Sequence[~Filesystem]
Returns -> typing.Sequence[~ErrorResult]
"""
# map input types to rpc msg
_params = dict()
msg = dict(type='StorageProvisioner', request='SetFilesystemInfo', version=3, params=_params)
_params['filesystems'] = filesystems
reply = await self.rpc(msg)
return reply |
def extract_from_urllib3():
"""
Undo monkey-patching by :func:`inject_into_urllib3`.
"""
util.ssl_.SSLContext = orig_util_SSLContext
util.HAS_SNI = orig_util_HAS_SNI
util.ssl_.HAS_SNI = orig_util_HAS_SNI
util.IS_SECURETRANSPORT = False
util.ssl_.IS_SECURETRANSPORT = False | def function[extract_from_urllib3, parameter[]]:
constant[
Undo monkey-patching by :func:`inject_into_urllib3`.
]
name[util].ssl_.SSLContext assign[=] name[orig_util_SSLContext]
name[util].HAS_SNI assign[=] name[orig_util_HAS_SNI]
name[util].ssl_.HAS_SNI assign[=] name[orig_util_HAS_SNI]
name[util].IS_SECURETRANSPORT assign[=] constant[False]
name[util].ssl_.IS_SECURETRANSPORT assign[=] constant[False] | keyword[def] identifier[extract_from_urllib3] ():
literal[string]
identifier[util] . identifier[ssl_] . identifier[SSLContext] = identifier[orig_util_SSLContext]
identifier[util] . identifier[HAS_SNI] = identifier[orig_util_HAS_SNI]
identifier[util] . identifier[ssl_] . identifier[HAS_SNI] = identifier[orig_util_HAS_SNI]
identifier[util] . identifier[IS_SECURETRANSPORT] = keyword[False]
identifier[util] . identifier[ssl_] . identifier[IS_SECURETRANSPORT] = keyword[False] | def extract_from_urllib3():
"""
Undo monkey-patching by :func:`inject_into_urllib3`.
"""
util.ssl_.SSLContext = orig_util_SSLContext
util.HAS_SNI = orig_util_HAS_SNI
util.ssl_.HAS_SNI = orig_util_HAS_SNI
util.IS_SECURETRANSPORT = False
util.ssl_.IS_SECURETRANSPORT = False |
def match(command, options, args):
"""disambiguate a command (expanding, for eg, lis into list) and validate the number of args passed for it"""
build = ""
possible = commands
for l in command:
build += l
possible = filter(lambda w: w.startswith(build), possible)
if len(possible) == 0:
raise ArgError("Command invalid: %s" % command)
if len(possible) > 1:
raise ArgError("Ambiguous command: %s" % command)
command = possible.pop()
if not num_args[command](len(args)):
raise ArgError("Bad number of args for command %s" % command)
return command | def function[match, parameter[command, options, args]]:
constant[disambiguate a command (expanding, for eg, lis into list) and validate the number of args passed for it]
variable[build] assign[=] constant[]
variable[possible] assign[=] name[commands]
for taget[name[l]] in starred[name[command]] begin[:]
<ast.AugAssign object at 0x7da20e9b16f0>
variable[possible] assign[=] call[name[filter], parameter[<ast.Lambda object at 0x7da20e9b3010>, name[possible]]]
if compare[call[name[len], parameter[name[possible]]] equal[==] constant[0]] begin[:]
<ast.Raise object at 0x7da20e9b2320>
if compare[call[name[len], parameter[name[possible]]] greater[>] constant[1]] begin[:]
<ast.Raise object at 0x7da20e9b2cb0>
variable[command] assign[=] call[name[possible].pop, parameter[]]
if <ast.UnaryOp object at 0x7da20e9b1ab0> begin[:]
<ast.Raise object at 0x7da20e9b3220>
return[name[command]] | keyword[def] identifier[match] ( identifier[command] , identifier[options] , identifier[args] ):
literal[string]
identifier[build] = literal[string]
identifier[possible] = identifier[commands]
keyword[for] identifier[l] keyword[in] identifier[command] :
identifier[build] += identifier[l]
identifier[possible] = identifier[filter] ( keyword[lambda] identifier[w] : identifier[w] . identifier[startswith] ( identifier[build] ), identifier[possible] )
keyword[if] identifier[len] ( identifier[possible] )== literal[int] :
keyword[raise] identifier[ArgError] ( literal[string] % identifier[command] )
keyword[if] identifier[len] ( identifier[possible] )> literal[int] :
keyword[raise] identifier[ArgError] ( literal[string] % identifier[command] )
identifier[command] = identifier[possible] . identifier[pop] ()
keyword[if] keyword[not] identifier[num_args] [ identifier[command] ]( identifier[len] ( identifier[args] )):
keyword[raise] identifier[ArgError] ( literal[string] % identifier[command] )
keyword[return] identifier[command] | def match(command, options, args):
"""disambiguate a command (expanding, for eg, lis into list) and validate the number of args passed for it"""
build = ''
possible = commands
for l in command:
build += l
possible = filter(lambda w: w.startswith(build), possible) # depends on [control=['for'], data=['l']]
if len(possible) == 0:
raise ArgError('Command invalid: %s' % command) # depends on [control=['if'], data=[]]
if len(possible) > 1:
raise ArgError('Ambiguous command: %s' % command) # depends on [control=['if'], data=[]]
command = possible.pop()
if not num_args[command](len(args)):
raise ArgError('Bad number of args for command %s' % command) # depends on [control=['if'], data=[]]
return command |
def get_sdb_path(self, sdb):
"""Return the path for a SDB"""
sdb_id = self.get_sdb_id(sdb)
sdb_resp = get_with_retry(
self.cerberus_url + '/v1/safe-deposit-box/' + sdb_id + '/',
headers=self.HEADERS
)
throw_if_bad_response(sdb_resp)
return sdb_resp.json()['path'] | def function[get_sdb_path, parameter[self, sdb]]:
constant[Return the path for a SDB]
variable[sdb_id] assign[=] call[name[self].get_sdb_id, parameter[name[sdb]]]
variable[sdb_resp] assign[=] call[name[get_with_retry], parameter[binary_operation[binary_operation[binary_operation[name[self].cerberus_url + constant[/v1/safe-deposit-box/]] + name[sdb_id]] + constant[/]]]]
call[name[throw_if_bad_response], parameter[name[sdb_resp]]]
return[call[call[name[sdb_resp].json, parameter[]]][constant[path]]] | keyword[def] identifier[get_sdb_path] ( identifier[self] , identifier[sdb] ):
literal[string]
identifier[sdb_id] = identifier[self] . identifier[get_sdb_id] ( identifier[sdb] )
identifier[sdb_resp] = identifier[get_with_retry] (
identifier[self] . identifier[cerberus_url] + literal[string] + identifier[sdb_id] + literal[string] ,
identifier[headers] = identifier[self] . identifier[HEADERS]
)
identifier[throw_if_bad_response] ( identifier[sdb_resp] )
keyword[return] identifier[sdb_resp] . identifier[json] ()[ literal[string] ] | def get_sdb_path(self, sdb):
"""Return the path for a SDB"""
sdb_id = self.get_sdb_id(sdb)
sdb_resp = get_with_retry(self.cerberus_url + '/v1/safe-deposit-box/' + sdb_id + '/', headers=self.HEADERS)
throw_if_bad_response(sdb_resp)
return sdb_resp.json()['path'] |
def emit(self, action, event):
"""
Send an event to all the client listening for notifications
:param action: Action name
:param event: Event to send
"""
NotificationManager.instance().emit(action, event, project_id=self.id) | def function[emit, parameter[self, action, event]]:
constant[
Send an event to all the client listening for notifications
:param action: Action name
:param event: Event to send
]
call[call[name[NotificationManager].instance, parameter[]].emit, parameter[name[action], name[event]]] | keyword[def] identifier[emit] ( identifier[self] , identifier[action] , identifier[event] ):
literal[string]
identifier[NotificationManager] . identifier[instance] (). identifier[emit] ( identifier[action] , identifier[event] , identifier[project_id] = identifier[self] . identifier[id] ) | def emit(self, action, event):
"""
Send an event to all the client listening for notifications
:param action: Action name
:param event: Event to send
"""
NotificationManager.instance().emit(action, event, project_id=self.id) |
def _fdopen_ver2(self, file_des, mode='r',
bufsize=None): # pylint: disable=unused-argument
"""Returns an open file object connected to the file descriptor
file_des.
Args:
file_des: An integer file descriptor for the file object requested.
mode: Additional file flags. Currently checks to see if the mode
matches the mode of the requested file object.
bufsize: ignored. (Used for signature compliance with
__builtin__.fdopen)
Returns:
File object corresponding to file_des.
Raises:
OSError: if bad file descriptor or incompatible mode is given.
TypeError: if file descriptor is not an integer.
"""
if not is_int_type(file_des):
raise TypeError('an integer is required')
try:
return FakeFileOpen(self.filesystem).call(file_des, mode=mode)
except IOError as exc:
self.filesystem.raise_os_error(exc.errno, exc.filename) | def function[_fdopen_ver2, parameter[self, file_des, mode, bufsize]]:
constant[Returns an open file object connected to the file descriptor
file_des.
Args:
file_des: An integer file descriptor for the file object requested.
mode: Additional file flags. Currently checks to see if the mode
matches the mode of the requested file object.
bufsize: ignored. (Used for signature compliance with
__builtin__.fdopen)
Returns:
File object corresponding to file_des.
Raises:
OSError: if bad file descriptor or incompatible mode is given.
TypeError: if file descriptor is not an integer.
]
if <ast.UnaryOp object at 0x7da18dc04340> begin[:]
<ast.Raise object at 0x7da18dc05810>
<ast.Try object at 0x7da18dc05000> | keyword[def] identifier[_fdopen_ver2] ( identifier[self] , identifier[file_des] , identifier[mode] = literal[string] ,
identifier[bufsize] = keyword[None] ):
literal[string]
keyword[if] keyword[not] identifier[is_int_type] ( identifier[file_des] ):
keyword[raise] identifier[TypeError] ( literal[string] )
keyword[try] :
keyword[return] identifier[FakeFileOpen] ( identifier[self] . identifier[filesystem] ). identifier[call] ( identifier[file_des] , identifier[mode] = identifier[mode] )
keyword[except] identifier[IOError] keyword[as] identifier[exc] :
identifier[self] . identifier[filesystem] . identifier[raise_os_error] ( identifier[exc] . identifier[errno] , identifier[exc] . identifier[filename] ) | def _fdopen_ver2(self, file_des, mode='r', bufsize=None): # pylint: disable=unused-argument
'Returns an open file object connected to the file descriptor\n file_des.\n\n Args:\n file_des: An integer file descriptor for the file object requested.\n mode: Additional file flags. Currently checks to see if the mode\n matches the mode of the requested file object.\n bufsize: ignored. (Used for signature compliance with\n __builtin__.fdopen)\n\n Returns:\n File object corresponding to file_des.\n\n Raises:\n OSError: if bad file descriptor or incompatible mode is given.\n TypeError: if file descriptor is not an integer.\n '
if not is_int_type(file_des):
raise TypeError('an integer is required') # depends on [control=['if'], data=[]]
try:
return FakeFileOpen(self.filesystem).call(file_des, mode=mode) # depends on [control=['try'], data=[]]
except IOError as exc:
self.filesystem.raise_os_error(exc.errno, exc.filename) # depends on [control=['except'], data=['exc']] |
def dictapply(d, fn):
"""
apply a function to all non-dict values in a dictionary
"""
for k, v in d.items():
if isinstance(v, dict):
v = dictapply(v, fn)
else:
d[k] = fn(v)
return d | def function[dictapply, parameter[d, fn]]:
constant[
apply a function to all non-dict values in a dictionary
]
for taget[tuple[[<ast.Name object at 0x7da1b1830730>, <ast.Name object at 0x7da1b1832050>]]] in starred[call[name[d].items, parameter[]]] begin[:]
if call[name[isinstance], parameter[name[v], name[dict]]] begin[:]
variable[v] assign[=] call[name[dictapply], parameter[name[v], name[fn]]]
return[name[d]] | keyword[def] identifier[dictapply] ( identifier[d] , identifier[fn] ):
literal[string]
keyword[for] identifier[k] , identifier[v] keyword[in] identifier[d] . identifier[items] ():
keyword[if] identifier[isinstance] ( identifier[v] , identifier[dict] ):
identifier[v] = identifier[dictapply] ( identifier[v] , identifier[fn] )
keyword[else] :
identifier[d] [ identifier[k] ]= identifier[fn] ( identifier[v] )
keyword[return] identifier[d] | def dictapply(d, fn):
"""
apply a function to all non-dict values in a dictionary
"""
for (k, v) in d.items():
if isinstance(v, dict):
v = dictapply(v, fn) # depends on [control=['if'], data=[]]
else:
d[k] = fn(v) # depends on [control=['for'], data=[]]
return d |
async def make_response(self, request, response):
"""Convert a handler result to web response."""
while iscoroutine(response):
response = await response
if isinstance(response, StreamResponse):
return response
if isinstance(response, str):
return Response(text=response, content_type='text/html')
if isinstance(response, bytes):
return Response(body=response, content_type='text/html')
return Response(text=json.dumps(response), content_type='application/json') | <ast.AsyncFunctionDef object at 0x7da18ede4370> | keyword[async] keyword[def] identifier[make_response] ( identifier[self] , identifier[request] , identifier[response] ):
literal[string]
keyword[while] identifier[iscoroutine] ( identifier[response] ):
identifier[response] = keyword[await] identifier[response]
keyword[if] identifier[isinstance] ( identifier[response] , identifier[StreamResponse] ):
keyword[return] identifier[response]
keyword[if] identifier[isinstance] ( identifier[response] , identifier[str] ):
keyword[return] identifier[Response] ( identifier[text] = identifier[response] , identifier[content_type] = literal[string] )
keyword[if] identifier[isinstance] ( identifier[response] , identifier[bytes] ):
keyword[return] identifier[Response] ( identifier[body] = identifier[response] , identifier[content_type] = literal[string] )
keyword[return] identifier[Response] ( identifier[text] = identifier[json] . identifier[dumps] ( identifier[response] ), identifier[content_type] = literal[string] ) | async def make_response(self, request, response):
"""Convert a handler result to web response."""
while iscoroutine(response):
response = await response # depends on [control=['while'], data=[]]
if isinstance(response, StreamResponse):
return response # depends on [control=['if'], data=[]]
if isinstance(response, str):
return Response(text=response, content_type='text/html') # depends on [control=['if'], data=[]]
if isinstance(response, bytes):
return Response(body=response, content_type='text/html') # depends on [control=['if'], data=[]]
return Response(text=json.dumps(response), content_type='application/json') |
def spline_curve(x, y, step, val_min=0, val_max=None, kind='quadratic', **kwargs):
"""
Fit spline curve for given x, y values
Args:
x: x-values
y: y-values
step: step size for interpolation
val_min: minimum value of result
val_max: maximum value of result
kind: for scipy.interpolate.interp1d
Specifies the kind of interpolation as a string (‘linear’, ‘nearest’, ‘zero’, ‘slinear’,
‘quadratic’, ‘cubic’, ‘previous’, ‘next’, where ‘zero’, ‘slinear’, ‘quadratic’ and ‘cubic’
refer to a spline interpolation of zeroth, first, second or third order; ‘previous’ and
‘next’ simply return the previous or next value of the point) or as an integer specifying
the order of the spline interpolator to use. Default is ‘linear’.
**kwargs: additional parameters for interp1d
Returns:
pd.Series: fitted curve
Examples:
>>> x = pd.Series([1, 2, 3])
>>> y = pd.Series([np.exp(1), np.exp(2), np.exp(3)])
>>> r = spline_curve(x=x, y=y, step=.5, val_min=3, val_max=18, fill_value='extrapolate')
>>> r.round(2).index.tolist()
[1.0, 1.5, 2.0, 2.5, 3.0]
>>> r.round(2).tolist()
[3.0, 4.05, 7.39, 12.73, 18.0]
>>> y_df = pd.DataFrame(dict(a=[np.exp(1), np.exp(2), np.exp(3)], b=[2, 3, 4]))
>>> r_df = spline_curve(x=x, y=y_df, step=.5, val_min=3, fill_value='extrapolate')
>>> r_df.round(2)
a b
1.00 3.00 3.00
1.50 4.05 3.00
2.00 7.39 3.00
2.50 12.73 3.50
3.00 20.09 4.00
"""
from scipy.interpolate import interp1d
from collections import OrderedDict
if isinstance(y, pd.DataFrame):
return pd.DataFrame(OrderedDict([(col, spline_curve(
x, y.loc[:, col], step=step, val_min=val_min, val_max=val_max, kind=kind
)) for col in y.columns]))
fitted_curve = interp1d(x, y, kind=kind, **kwargs)
new_x = np.arange(x.min(), x.max() + step / 2., step=step)
return pd.Series(
new_x, index=new_x, name=y.name if hasattr(y, 'name') else None
).apply(fitted_curve).clip(val_min, val_max) | def function[spline_curve, parameter[x, y, step, val_min, val_max, kind]]:
constant[
Fit spline curve for given x, y values
Args:
x: x-values
y: y-values
step: step size for interpolation
val_min: minimum value of result
val_max: maximum value of result
kind: for scipy.interpolate.interp1d
Specifies the kind of interpolation as a string (‘linear’, ‘nearest’, ‘zero’, ‘slinear’,
‘quadratic’, ‘cubic’, ‘previous’, ‘next’, where ‘zero’, ‘slinear’, ‘quadratic’ and ‘cubic’
refer to a spline interpolation of zeroth, first, second or third order; ‘previous’ and
‘next’ simply return the previous or next value of the point) or as an integer specifying
the order of the spline interpolator to use. Default is ‘linear’.
**kwargs: additional parameters for interp1d
Returns:
pd.Series: fitted curve
Examples:
>>> x = pd.Series([1, 2, 3])
>>> y = pd.Series([np.exp(1), np.exp(2), np.exp(3)])
>>> r = spline_curve(x=x, y=y, step=.5, val_min=3, val_max=18, fill_value='extrapolate')
>>> r.round(2).index.tolist()
[1.0, 1.5, 2.0, 2.5, 3.0]
>>> r.round(2).tolist()
[3.0, 4.05, 7.39, 12.73, 18.0]
>>> y_df = pd.DataFrame(dict(a=[np.exp(1), np.exp(2), np.exp(3)], b=[2, 3, 4]))
>>> r_df = spline_curve(x=x, y=y_df, step=.5, val_min=3, fill_value='extrapolate')
>>> r_df.round(2)
a b
1.00 3.00 3.00
1.50 4.05 3.00
2.00 7.39 3.00
2.50 12.73 3.50
3.00 20.09 4.00
]
from relative_module[scipy.interpolate] import module[interp1d]
from relative_module[collections] import module[OrderedDict]
if call[name[isinstance], parameter[name[y], name[pd].DataFrame]] begin[:]
return[call[name[pd].DataFrame, parameter[call[name[OrderedDict], parameter[<ast.ListComp object at 0x7da1b0b55f00>]]]]]
variable[fitted_curve] assign[=] call[name[interp1d], parameter[name[x], name[y]]]
variable[new_x] assign[=] call[name[np].arange, parameter[call[name[x].min, parameter[]], binary_operation[call[name[x].max, parameter[]] + binary_operation[name[step] / constant[2.0]]]]]
return[call[call[call[name[pd].Series, parameter[name[new_x]]].apply, parameter[name[fitted_curve]]].clip, parameter[name[val_min], name[val_max]]]] | keyword[def] identifier[spline_curve] ( identifier[x] , identifier[y] , identifier[step] , identifier[val_min] = literal[int] , identifier[val_max] = keyword[None] , identifier[kind] = literal[string] ,** identifier[kwargs] ):
literal[string]
keyword[from] identifier[scipy] . identifier[interpolate] keyword[import] identifier[interp1d]
keyword[from] identifier[collections] keyword[import] identifier[OrderedDict]
keyword[if] identifier[isinstance] ( identifier[y] , identifier[pd] . identifier[DataFrame] ):
keyword[return] identifier[pd] . identifier[DataFrame] ( identifier[OrderedDict] ([( identifier[col] , identifier[spline_curve] (
identifier[x] , identifier[y] . identifier[loc] [:, identifier[col] ], identifier[step] = identifier[step] , identifier[val_min] = identifier[val_min] , identifier[val_max] = identifier[val_max] , identifier[kind] = identifier[kind]
)) keyword[for] identifier[col] keyword[in] identifier[y] . identifier[columns] ]))
identifier[fitted_curve] = identifier[interp1d] ( identifier[x] , identifier[y] , identifier[kind] = identifier[kind] ,** identifier[kwargs] )
identifier[new_x] = identifier[np] . identifier[arange] ( identifier[x] . identifier[min] (), identifier[x] . identifier[max] ()+ identifier[step] / literal[int] , identifier[step] = identifier[step] )
keyword[return] identifier[pd] . identifier[Series] (
identifier[new_x] , identifier[index] = identifier[new_x] , identifier[name] = identifier[y] . identifier[name] keyword[if] identifier[hasattr] ( identifier[y] , literal[string] ) keyword[else] keyword[None]
). identifier[apply] ( identifier[fitted_curve] ). identifier[clip] ( identifier[val_min] , identifier[val_max] ) | def spline_curve(x, y, step, val_min=0, val_max=None, kind='quadratic', **kwargs):
"""
Fit spline curve for given x, y values
Args:
x: x-values
y: y-values
step: step size for interpolation
val_min: minimum value of result
val_max: maximum value of result
kind: for scipy.interpolate.interp1d
Specifies the kind of interpolation as a string (‘linear’, ‘nearest’, ‘zero’, ‘slinear’,
‘quadratic’, ‘cubic’, ‘previous’, ‘next’, where ‘zero’, ‘slinear’, ‘quadratic’ and ‘cubic’
refer to a spline interpolation of zeroth, first, second or third order; ‘previous’ and
‘next’ simply return the previous or next value of the point) or as an integer specifying
the order of the spline interpolator to use. Default is ‘linear’.
**kwargs: additional parameters for interp1d
Returns:
pd.Series: fitted curve
Examples:
>>> x = pd.Series([1, 2, 3])
>>> y = pd.Series([np.exp(1), np.exp(2), np.exp(3)])
>>> r = spline_curve(x=x, y=y, step=.5, val_min=3, val_max=18, fill_value='extrapolate')
>>> r.round(2).index.tolist()
[1.0, 1.5, 2.0, 2.5, 3.0]
>>> r.round(2).tolist()
[3.0, 4.05, 7.39, 12.73, 18.0]
>>> y_df = pd.DataFrame(dict(a=[np.exp(1), np.exp(2), np.exp(3)], b=[2, 3, 4]))
>>> r_df = spline_curve(x=x, y=y_df, step=.5, val_min=3, fill_value='extrapolate')
>>> r_df.round(2)
a b
1.00 3.00 3.00
1.50 4.05 3.00
2.00 7.39 3.00
2.50 12.73 3.50
3.00 20.09 4.00
"""
from scipy.interpolate import interp1d
from collections import OrderedDict
if isinstance(y, pd.DataFrame):
return pd.DataFrame(OrderedDict([(col, spline_curve(x, y.loc[:, col], step=step, val_min=val_min, val_max=val_max, kind=kind)) for col in y.columns])) # depends on [control=['if'], data=[]]
fitted_curve = interp1d(x, y, kind=kind, **kwargs)
new_x = np.arange(x.min(), x.max() + step / 2.0, step=step)
return pd.Series(new_x, index=new_x, name=y.name if hasattr(y, 'name') else None).apply(fitted_curve).clip(val_min, val_max) |
def ip_to_url(ip_addr):
"""
Resolve a hostname based off an IP address.
This is very limited and will
probably not return any results if it is a shared IP address or an
address with improperly setup DNS records.
.. code:: python
reusables.ip_to_url('93.184.216.34') # example.com
# None
reusables.ip_to_url('8.8.8.8')
# 'google-public-dns-a.google.com'
:param ip_addr: IP address to resolve to hostname
:return: string of hostname or None
"""
try:
return socket.gethostbyaddr(ip_addr)[0]
except (socket.gaierror, socket.herror):
logger.exception("Could not resolve hostname") | def function[ip_to_url, parameter[ip_addr]]:
constant[
Resolve a hostname based off an IP address.
This is very limited and will
probably not return any results if it is a shared IP address or an
address with improperly setup DNS records.
.. code:: python
reusables.ip_to_url('93.184.216.34') # example.com
# None
reusables.ip_to_url('8.8.8.8')
# 'google-public-dns-a.google.com'
:param ip_addr: IP address to resolve to hostname
:return: string of hostname or None
]
<ast.Try object at 0x7da18f00f5e0> | keyword[def] identifier[ip_to_url] ( identifier[ip_addr] ):
literal[string]
keyword[try] :
keyword[return] identifier[socket] . identifier[gethostbyaddr] ( identifier[ip_addr] )[ literal[int] ]
keyword[except] ( identifier[socket] . identifier[gaierror] , identifier[socket] . identifier[herror] ):
identifier[logger] . identifier[exception] ( literal[string] ) | def ip_to_url(ip_addr):
"""
Resolve a hostname based off an IP address.
This is very limited and will
probably not return any results if it is a shared IP address or an
address with improperly setup DNS records.
.. code:: python
reusables.ip_to_url('93.184.216.34') # example.com
# None
reusables.ip_to_url('8.8.8.8')
# 'google-public-dns-a.google.com'
:param ip_addr: IP address to resolve to hostname
:return: string of hostname or None
"""
try:
return socket.gethostbyaddr(ip_addr)[0] # depends on [control=['try'], data=[]]
except (socket.gaierror, socket.herror):
logger.exception('Could not resolve hostname') # depends on [control=['except'], data=[]] |
def stanza_factory(element, return_path = None, language = None):
"""Creates Iq, Message or Presence object for XML stanza `element`
:Parameters:
- `element`: the stanza XML element
- `return_path`: object through which responses to this stanza should
be sent (will be weakly referenced by the stanza object).
- `language`: default language for the stanza
:Types:
- `element`: :etree:`ElementTree.Element`
- `return_path`: `StanzaRoute`
- `language`: `unicode`
"""
tag = element.tag
if tag.endswith("}iq") or tag == "iq":
return Iq(element, return_path = return_path, language = language)
if tag.endswith("}message") or tag == "message":
return Message(element, return_path = return_path, language = language)
if tag.endswith("}presence") or tag == "presence":
return Presence(element, return_path = return_path, language = language)
else:
return Stanza(element, return_path = return_path, language = language) | def function[stanza_factory, parameter[element, return_path, language]]:
constant[Creates Iq, Message or Presence object for XML stanza `element`
:Parameters:
- `element`: the stanza XML element
- `return_path`: object through which responses to this stanza should
be sent (will be weakly referenced by the stanza object).
- `language`: default language for the stanza
:Types:
- `element`: :etree:`ElementTree.Element`
- `return_path`: `StanzaRoute`
- `language`: `unicode`
]
variable[tag] assign[=] name[element].tag
if <ast.BoolOp object at 0x7da20c9927d0> begin[:]
return[call[name[Iq], parameter[name[element]]]]
if <ast.BoolOp object at 0x7da20c9922c0> begin[:]
return[call[name[Message], parameter[name[element]]]]
if <ast.BoolOp object at 0x7da20c991ea0> begin[:]
return[call[name[Presence], parameter[name[element]]]] | keyword[def] identifier[stanza_factory] ( identifier[element] , identifier[return_path] = keyword[None] , identifier[language] = keyword[None] ):
literal[string]
identifier[tag] = identifier[element] . identifier[tag]
keyword[if] identifier[tag] . identifier[endswith] ( literal[string] ) keyword[or] identifier[tag] == literal[string] :
keyword[return] identifier[Iq] ( identifier[element] , identifier[return_path] = identifier[return_path] , identifier[language] = identifier[language] )
keyword[if] identifier[tag] . identifier[endswith] ( literal[string] ) keyword[or] identifier[tag] == literal[string] :
keyword[return] identifier[Message] ( identifier[element] , identifier[return_path] = identifier[return_path] , identifier[language] = identifier[language] )
keyword[if] identifier[tag] . identifier[endswith] ( literal[string] ) keyword[or] identifier[tag] == literal[string] :
keyword[return] identifier[Presence] ( identifier[element] , identifier[return_path] = identifier[return_path] , identifier[language] = identifier[language] )
keyword[else] :
keyword[return] identifier[Stanza] ( identifier[element] , identifier[return_path] = identifier[return_path] , identifier[language] = identifier[language] ) | def stanza_factory(element, return_path=None, language=None):
"""Creates Iq, Message or Presence object for XML stanza `element`
:Parameters:
- `element`: the stanza XML element
- `return_path`: object through which responses to this stanza should
be sent (will be weakly referenced by the stanza object).
- `language`: default language for the stanza
:Types:
- `element`: :etree:`ElementTree.Element`
- `return_path`: `StanzaRoute`
- `language`: `unicode`
"""
tag = element.tag
if tag.endswith('}iq') or tag == 'iq':
return Iq(element, return_path=return_path, language=language) # depends on [control=['if'], data=[]]
if tag.endswith('}message') or tag == 'message':
return Message(element, return_path=return_path, language=language) # depends on [control=['if'], data=[]]
if tag.endswith('}presence') or tag == 'presence':
return Presence(element, return_path=return_path, language=language) # depends on [control=['if'], data=[]]
else:
return Stanza(element, return_path=return_path, language=language) |
def get_det_oid(self, det_id):
"""Convert detector serialnumber to string representation (OID)"""
try:
return self.detectors[self.detectors.SERIALNUMBER == det_id
].OID.iloc[0]
except IndexError:
log.critical("No OID found for det ID '{}'".format(det_id))
return None | def function[get_det_oid, parameter[self, det_id]]:
constant[Convert detector serialnumber to string representation (OID)]
<ast.Try object at 0x7da207f9b430> | keyword[def] identifier[get_det_oid] ( identifier[self] , identifier[det_id] ):
literal[string]
keyword[try] :
keyword[return] identifier[self] . identifier[detectors] [ identifier[self] . identifier[detectors] . identifier[SERIALNUMBER] == identifier[det_id]
]. identifier[OID] . identifier[iloc] [ literal[int] ]
keyword[except] identifier[IndexError] :
identifier[log] . identifier[critical] ( literal[string] . identifier[format] ( identifier[det_id] ))
keyword[return] keyword[None] | def get_det_oid(self, det_id):
"""Convert detector serialnumber to string representation (OID)"""
try:
return self.detectors[self.detectors.SERIALNUMBER == det_id].OID.iloc[0] # depends on [control=['try'], data=[]]
except IndexError:
log.critical("No OID found for det ID '{}'".format(det_id))
return None # depends on [control=['except'], data=[]] |
def _wet_message_received(self, msg):
"""Report a wet state."""
for callback in self._dry_wet_callbacks:
callback(LeakSensorState.WET)
self._update_subscribers(0x13) | def function[_wet_message_received, parameter[self, msg]]:
constant[Report a wet state.]
for taget[name[callback]] in starred[name[self]._dry_wet_callbacks] begin[:]
call[name[callback], parameter[name[LeakSensorState].WET]]
call[name[self]._update_subscribers, parameter[constant[19]]] | keyword[def] identifier[_wet_message_received] ( identifier[self] , identifier[msg] ):
literal[string]
keyword[for] identifier[callback] keyword[in] identifier[self] . identifier[_dry_wet_callbacks] :
identifier[callback] ( identifier[LeakSensorState] . identifier[WET] )
identifier[self] . identifier[_update_subscribers] ( literal[int] ) | def _wet_message_received(self, msg):
"""Report a wet state."""
for callback in self._dry_wet_callbacks:
callback(LeakSensorState.WET) # depends on [control=['for'], data=['callback']]
self._update_subscribers(19) |
def dp_from_p(p, ps, p_top=0., p_bot=1.1e5):
"""Get level thickness of pressure data, incorporating surface pressure.
Level edges are defined as halfway between the levels, as well as the user-
specified uppermost and lowermost values. The dp of levels whose bottom
pressure is less than the surface pressure is not changed by ps, since they
don't intersect the surface. If ps is in between a level's top and bottom
pressures, then its dp becomes the pressure difference between its top and
ps. If ps is less than a level's top and bottom pressures, then that level
is underground and its values are masked.
Note that postprocessing routines (e.g. at GFDL) typically mask out data
wherever the surface pressure is less than the level's given value, not the
level's upper edge. This masks out more levels than the
"""
p_str = get_dim_name(p, (internal_names.PLEVEL_STR, 'plev'))
p_vals = to_pascal(p.values.copy())
# Layer edges are halfway between the given pressure levels.
p_edges_interior = 0.5*(p_vals[:-1] + p_vals[1:])
p_edges = np.concatenate(([p_bot], p_edges_interior, [p_top]))
p_edge_above = p_edges[1:]
p_edge_below = p_edges[:-1]
dp = p_edge_below - p_edge_above
if not all(np.sign(dp)):
raise ValueError("dp array not all > 0 : {}".format(dp))
# Pressure difference between ps and the upper edge of each pressure level.
p_edge_above_xr = xr.DataArray(p_edge_above, dims=p.dims, coords=p.coords)
dp_to_sfc = ps - p_edge_above_xr
# Find the level adjacent to the masked, under-ground levels.
change = xr.DataArray(np.zeros(dp_to_sfc.shape), dims=dp_to_sfc.dims,
coords=dp_to_sfc.coords)
change[{p_str: slice(1, None)}] = np.diff(
np.sign(ps - to_pascal(p.copy()))
)
dp_combined = xr.DataArray(np.where(change, dp_to_sfc, dp),
dims=dp_to_sfc.dims, coords=dp_to_sfc.coords)
# Mask levels that are under ground.
above_ground = ps > to_pascal(p.copy())
above_ground[p_str] = p[p_str]
dp_with_ps = dp_combined.where(above_ground)
# Revert to original dim order.
possible_dim_orders = [
(internal_names.TIME_STR, p_str, internal_names.LAT_STR,
internal_names.LON_STR),
(internal_names.TIME_STR, p_str, internal_names.LAT_STR),
(internal_names.TIME_STR, p_str, internal_names.LON_STR),
(internal_names.TIME_STR, p_str),
(p_str, internal_names.LAT_STR, internal_names.LON_STR),
(p_str, internal_names.LAT_STR),
(p_str, internal_names.LON_STR),
(p_str,),
]
for dim_order in possible_dim_orders:
try:
return dp_with_ps.transpose(*dim_order)
except ValueError:
logging.debug("Failed transpose to dims: {}".format(dim_order))
else:
logging.debug("No transpose was successful.")
return dp_with_ps | def function[dp_from_p, parameter[p, ps, p_top, p_bot]]:
constant[Get level thickness of pressure data, incorporating surface pressure.
Level edges are defined as halfway between the levels, as well as the user-
specified uppermost and lowermost values. The dp of levels whose bottom
pressure is less than the surface pressure is not changed by ps, since they
don't intersect the surface. If ps is in between a level's top and bottom
pressures, then its dp becomes the pressure difference between its top and
ps. If ps is less than a level's top and bottom pressures, then that level
is underground and its values are masked.
Note that postprocessing routines (e.g. at GFDL) typically mask out data
wherever the surface pressure is less than the level's given value, not the
level's upper edge. This masks out more levels than the
]
variable[p_str] assign[=] call[name[get_dim_name], parameter[name[p], tuple[[<ast.Attribute object at 0x7da204566f80>, <ast.Constant object at 0x7da204564ac0>]]]]
variable[p_vals] assign[=] call[name[to_pascal], parameter[call[name[p].values.copy, parameter[]]]]
variable[p_edges_interior] assign[=] binary_operation[constant[0.5] * binary_operation[call[name[p_vals]][<ast.Slice object at 0x7da204564ca0>] + call[name[p_vals]][<ast.Slice object at 0x7da2045671c0>]]]
variable[p_edges] assign[=] call[name[np].concatenate, parameter[tuple[[<ast.List object at 0x7da2045669b0>, <ast.Name object at 0x7da204567cd0>, <ast.List object at 0x7da204567b50>]]]]
variable[p_edge_above] assign[=] call[name[p_edges]][<ast.Slice object at 0x7da204567910>]
variable[p_edge_below] assign[=] call[name[p_edges]][<ast.Slice object at 0x7da2045646a0>]
variable[dp] assign[=] binary_operation[name[p_edge_below] - name[p_edge_above]]
if <ast.UnaryOp object at 0x7da2045648b0> begin[:]
<ast.Raise object at 0x7da2045640a0>
variable[p_edge_above_xr] assign[=] call[name[xr].DataArray, parameter[name[p_edge_above]]]
variable[dp_to_sfc] assign[=] binary_operation[name[ps] - name[p_edge_above_xr]]
variable[change] assign[=] call[name[xr].DataArray, parameter[call[name[np].zeros, parameter[name[dp_to_sfc].shape]]]]
call[name[change]][dictionary[[<ast.Name object at 0x7da1b04d2410>], [<ast.Call object at 0x7da1b04d24d0>]]] assign[=] call[name[np].diff, parameter[call[name[np].sign, parameter[binary_operation[name[ps] - call[name[to_pascal], parameter[call[name[p].copy, parameter[]]]]]]]]]
variable[dp_combined] assign[=] call[name[xr].DataArray, parameter[call[name[np].where, parameter[name[change], name[dp_to_sfc], name[dp]]]]]
variable[above_ground] assign[=] compare[name[ps] greater[>] call[name[to_pascal], parameter[call[name[p].copy, parameter[]]]]]
call[name[above_ground]][name[p_str]] assign[=] call[name[p]][name[p_str]]
variable[dp_with_ps] assign[=] call[name[dp_combined].where, parameter[name[above_ground]]]
variable[possible_dim_orders] assign[=] list[[<ast.Tuple object at 0x7da1b04d08e0>, <ast.Tuple object at 0x7da1b04d06d0>, <ast.Tuple object at 0x7da1b04d1d50>, <ast.Tuple object at 0x7da1b04d0550>, <ast.Tuple object at 0x7da1b04d1c90>, <ast.Tuple object at 0x7da1b04d1b70>, <ast.Tuple object at 0x7da1b04d1ab0>, <ast.Tuple object at 0x7da1b04d17b0>]]
for taget[name[dim_order]] in starred[name[possible_dim_orders]] begin[:]
<ast.Try object at 0x7da1b04d1870> | keyword[def] identifier[dp_from_p] ( identifier[p] , identifier[ps] , identifier[p_top] = literal[int] , identifier[p_bot] = literal[int] ):
literal[string]
identifier[p_str] = identifier[get_dim_name] ( identifier[p] ,( identifier[internal_names] . identifier[PLEVEL_STR] , literal[string] ))
identifier[p_vals] = identifier[to_pascal] ( identifier[p] . identifier[values] . identifier[copy] ())
identifier[p_edges_interior] = literal[int] *( identifier[p_vals] [:- literal[int] ]+ identifier[p_vals] [ literal[int] :])
identifier[p_edges] = identifier[np] . identifier[concatenate] (([ identifier[p_bot] ], identifier[p_edges_interior] ,[ identifier[p_top] ]))
identifier[p_edge_above] = identifier[p_edges] [ literal[int] :]
identifier[p_edge_below] = identifier[p_edges] [:- literal[int] ]
identifier[dp] = identifier[p_edge_below] - identifier[p_edge_above]
keyword[if] keyword[not] identifier[all] ( identifier[np] . identifier[sign] ( identifier[dp] )):
keyword[raise] identifier[ValueError] ( literal[string] . identifier[format] ( identifier[dp] ))
identifier[p_edge_above_xr] = identifier[xr] . identifier[DataArray] ( identifier[p_edge_above] , identifier[dims] = identifier[p] . identifier[dims] , identifier[coords] = identifier[p] . identifier[coords] )
identifier[dp_to_sfc] = identifier[ps] - identifier[p_edge_above_xr]
identifier[change] = identifier[xr] . identifier[DataArray] ( identifier[np] . identifier[zeros] ( identifier[dp_to_sfc] . identifier[shape] ), identifier[dims] = identifier[dp_to_sfc] . identifier[dims] ,
identifier[coords] = identifier[dp_to_sfc] . identifier[coords] )
identifier[change] [{ identifier[p_str] : identifier[slice] ( literal[int] , keyword[None] )}]= identifier[np] . identifier[diff] (
identifier[np] . identifier[sign] ( identifier[ps] - identifier[to_pascal] ( identifier[p] . identifier[copy] ()))
)
identifier[dp_combined] = identifier[xr] . identifier[DataArray] ( identifier[np] . identifier[where] ( identifier[change] , identifier[dp_to_sfc] , identifier[dp] ),
identifier[dims] = identifier[dp_to_sfc] . identifier[dims] , identifier[coords] = identifier[dp_to_sfc] . identifier[coords] )
identifier[above_ground] = identifier[ps] > identifier[to_pascal] ( identifier[p] . identifier[copy] ())
identifier[above_ground] [ identifier[p_str] ]= identifier[p] [ identifier[p_str] ]
identifier[dp_with_ps] = identifier[dp_combined] . identifier[where] ( identifier[above_ground] )
identifier[possible_dim_orders] =[
( identifier[internal_names] . identifier[TIME_STR] , identifier[p_str] , identifier[internal_names] . identifier[LAT_STR] ,
identifier[internal_names] . identifier[LON_STR] ),
( identifier[internal_names] . identifier[TIME_STR] , identifier[p_str] , identifier[internal_names] . identifier[LAT_STR] ),
( identifier[internal_names] . identifier[TIME_STR] , identifier[p_str] , identifier[internal_names] . identifier[LON_STR] ),
( identifier[internal_names] . identifier[TIME_STR] , identifier[p_str] ),
( identifier[p_str] , identifier[internal_names] . identifier[LAT_STR] , identifier[internal_names] . identifier[LON_STR] ),
( identifier[p_str] , identifier[internal_names] . identifier[LAT_STR] ),
( identifier[p_str] , identifier[internal_names] . identifier[LON_STR] ),
( identifier[p_str] ,),
]
keyword[for] identifier[dim_order] keyword[in] identifier[possible_dim_orders] :
keyword[try] :
keyword[return] identifier[dp_with_ps] . identifier[transpose] (* identifier[dim_order] )
keyword[except] identifier[ValueError] :
identifier[logging] . identifier[debug] ( literal[string] . identifier[format] ( identifier[dim_order] ))
keyword[else] :
identifier[logging] . identifier[debug] ( literal[string] )
keyword[return] identifier[dp_with_ps] | def dp_from_p(p, ps, p_top=0.0, p_bot=110000.0):
"""Get level thickness of pressure data, incorporating surface pressure.
Level edges are defined as halfway between the levels, as well as the user-
specified uppermost and lowermost values. The dp of levels whose bottom
pressure is less than the surface pressure is not changed by ps, since they
don't intersect the surface. If ps is in between a level's top and bottom
pressures, then its dp becomes the pressure difference between its top and
ps. If ps is less than a level's top and bottom pressures, then that level
is underground and its values are masked.
Note that postprocessing routines (e.g. at GFDL) typically mask out data
wherever the surface pressure is less than the level's given value, not the
level's upper edge. This masks out more levels than the
"""
p_str = get_dim_name(p, (internal_names.PLEVEL_STR, 'plev'))
p_vals = to_pascal(p.values.copy())
# Layer edges are halfway between the given pressure levels.
p_edges_interior = 0.5 * (p_vals[:-1] + p_vals[1:])
p_edges = np.concatenate(([p_bot], p_edges_interior, [p_top]))
p_edge_above = p_edges[1:]
p_edge_below = p_edges[:-1]
dp = p_edge_below - p_edge_above
if not all(np.sign(dp)):
raise ValueError('dp array not all > 0 : {}'.format(dp)) # depends on [control=['if'], data=[]]
# Pressure difference between ps and the upper edge of each pressure level.
p_edge_above_xr = xr.DataArray(p_edge_above, dims=p.dims, coords=p.coords)
dp_to_sfc = ps - p_edge_above_xr
# Find the level adjacent to the masked, under-ground levels.
change = xr.DataArray(np.zeros(dp_to_sfc.shape), dims=dp_to_sfc.dims, coords=dp_to_sfc.coords)
change[{p_str: slice(1, None)}] = np.diff(np.sign(ps - to_pascal(p.copy())))
dp_combined = xr.DataArray(np.where(change, dp_to_sfc, dp), dims=dp_to_sfc.dims, coords=dp_to_sfc.coords)
# Mask levels that are under ground.
above_ground = ps > to_pascal(p.copy())
above_ground[p_str] = p[p_str]
dp_with_ps = dp_combined.where(above_ground)
# Revert to original dim order.
possible_dim_orders = [(internal_names.TIME_STR, p_str, internal_names.LAT_STR, internal_names.LON_STR), (internal_names.TIME_STR, p_str, internal_names.LAT_STR), (internal_names.TIME_STR, p_str, internal_names.LON_STR), (internal_names.TIME_STR, p_str), (p_str, internal_names.LAT_STR, internal_names.LON_STR), (p_str, internal_names.LAT_STR), (p_str, internal_names.LON_STR), (p_str,)]
for dim_order in possible_dim_orders:
try:
return dp_with_ps.transpose(*dim_order) # depends on [control=['try'], data=[]]
except ValueError:
logging.debug('Failed transpose to dims: {}'.format(dim_order)) # depends on [control=['except'], data=[]] # depends on [control=['for'], data=['dim_order']]
else:
logging.debug('No transpose was successful.')
return dp_with_ps |
def pad(self, top=0, right=0, bottom=0, left=0, mode="constant", cval=0.0):
"""
Pad the heatmaps on their top/right/bottom/left side.
Parameters
----------
top : int, optional
Amount of pixels to add at the top side of the heatmaps. Must be 0 or greater.
right : int, optional
Amount of pixels to add at the right side of the heatmaps. Must be 0 or greater.
bottom : int, optional
Amount of pixels to add at the bottom side of the heatmaps. Must be 0 or greater.
left : int, optional
Amount of pixels to add at the left side of the heatmaps. Must be 0 or greater.
mode : string, optional
Padding mode to use. See :func:`numpy.pad` for details.
cval : number, optional
Value to use for padding if `mode` is ``constant``. See :func:`numpy.pad` for details.
Returns
-------
imgaug.HeatmapsOnImage
Padded heatmaps of height ``H'=H+top+bottom`` and width ``W'=W+left+right``.
"""
arr_0to1_padded = ia.pad(self.arr_0to1, top=top, right=right, bottom=bottom, left=left, mode=mode, cval=cval)
return HeatmapsOnImage.from_0to1(arr_0to1_padded, shape=self.shape, min_value=self.min_value,
max_value=self.max_value) | def function[pad, parameter[self, top, right, bottom, left, mode, cval]]:
constant[
Pad the heatmaps on their top/right/bottom/left side.
Parameters
----------
top : int, optional
Amount of pixels to add at the top side of the heatmaps. Must be 0 or greater.
right : int, optional
Amount of pixels to add at the right side of the heatmaps. Must be 0 or greater.
bottom : int, optional
Amount of pixels to add at the bottom side of the heatmaps. Must be 0 or greater.
left : int, optional
Amount of pixels to add at the left side of the heatmaps. Must be 0 or greater.
mode : string, optional
Padding mode to use. See :func:`numpy.pad` for details.
cval : number, optional
Value to use for padding if `mode` is ``constant``. See :func:`numpy.pad` for details.
Returns
-------
imgaug.HeatmapsOnImage
Padded heatmaps of height ``H'=H+top+bottom`` and width ``W'=W+left+right``.
]
variable[arr_0to1_padded] assign[=] call[name[ia].pad, parameter[name[self].arr_0to1]]
return[call[name[HeatmapsOnImage].from_0to1, parameter[name[arr_0to1_padded]]]] | keyword[def] identifier[pad] ( identifier[self] , identifier[top] = literal[int] , identifier[right] = literal[int] , identifier[bottom] = literal[int] , identifier[left] = literal[int] , identifier[mode] = literal[string] , identifier[cval] = literal[int] ):
literal[string]
identifier[arr_0to1_padded] = identifier[ia] . identifier[pad] ( identifier[self] . identifier[arr_0to1] , identifier[top] = identifier[top] , identifier[right] = identifier[right] , identifier[bottom] = identifier[bottom] , identifier[left] = identifier[left] , identifier[mode] = identifier[mode] , identifier[cval] = identifier[cval] )
keyword[return] identifier[HeatmapsOnImage] . identifier[from_0to1] ( identifier[arr_0to1_padded] , identifier[shape] = identifier[self] . identifier[shape] , identifier[min_value] = identifier[self] . identifier[min_value] ,
identifier[max_value] = identifier[self] . identifier[max_value] ) | def pad(self, top=0, right=0, bottom=0, left=0, mode='constant', cval=0.0):
"""
Pad the heatmaps on their top/right/bottom/left side.
Parameters
----------
top : int, optional
Amount of pixels to add at the top side of the heatmaps. Must be 0 or greater.
right : int, optional
Amount of pixels to add at the right side of the heatmaps. Must be 0 or greater.
bottom : int, optional
Amount of pixels to add at the bottom side of the heatmaps. Must be 0 or greater.
left : int, optional
Amount of pixels to add at the left side of the heatmaps. Must be 0 or greater.
mode : string, optional
Padding mode to use. See :func:`numpy.pad` for details.
cval : number, optional
Value to use for padding if `mode` is ``constant``. See :func:`numpy.pad` for details.
Returns
-------
imgaug.HeatmapsOnImage
Padded heatmaps of height ``H'=H+top+bottom`` and width ``W'=W+left+right``.
"""
arr_0to1_padded = ia.pad(self.arr_0to1, top=top, right=right, bottom=bottom, left=left, mode=mode, cval=cval)
return HeatmapsOnImage.from_0to1(arr_0to1_padded, shape=self.shape, min_value=self.min_value, max_value=self.max_value) |
def add(self, logical_id, property, value):
"""
Add the information that resource with given `logical_id` supports the given `property`, and that a reference
to `logical_id.property` resolves to given `value.
Example:
"MyApi.Deployment" -> "MyApiDeployment1234567890"
:param logical_id: Logical ID of the resource (Ex: MyLambdaFunction)
:param property: Property on the resource that can be referenced (Ex: Alias)
:param value: Value that this reference resolves to.
:return: nothing
"""
if not logical_id or not property:
raise ValueError("LogicalId and property must be a non-empty string")
if not value or not isinstance(value, string_types):
raise ValueError("Property value must be a non-empty string")
if logical_id not in self._refs:
self._refs[logical_id] = {}
if property in self._refs[logical_id]:
raise ValueError("Cannot add second reference value to {}.{} property".format(logical_id, property))
self._refs[logical_id][property] = value | def function[add, parameter[self, logical_id, property, value]]:
constant[
Add the information that resource with given `logical_id` supports the given `property`, and that a reference
to `logical_id.property` resolves to given `value.
Example:
"MyApi.Deployment" -> "MyApiDeployment1234567890"
:param logical_id: Logical ID of the resource (Ex: MyLambdaFunction)
:param property: Property on the resource that can be referenced (Ex: Alias)
:param value: Value that this reference resolves to.
:return: nothing
]
if <ast.BoolOp object at 0x7da20e955a50> begin[:]
<ast.Raise object at 0x7da20e954a90>
if <ast.BoolOp object at 0x7da20e954430> begin[:]
<ast.Raise object at 0x7da1b1e15780>
if compare[name[logical_id] <ast.NotIn object at 0x7da2590d7190> name[self]._refs] begin[:]
call[name[self]._refs][name[logical_id]] assign[=] dictionary[[], []]
if compare[name[property] in call[name[self]._refs][name[logical_id]]] begin[:]
<ast.Raise object at 0x7da1b1e152d0>
call[call[name[self]._refs][name[logical_id]]][name[property]] assign[=] name[value] | keyword[def] identifier[add] ( identifier[self] , identifier[logical_id] , identifier[property] , identifier[value] ):
literal[string]
keyword[if] keyword[not] identifier[logical_id] keyword[or] keyword[not] identifier[property] :
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[if] keyword[not] identifier[value] keyword[or] keyword[not] identifier[isinstance] ( identifier[value] , identifier[string_types] ):
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[if] identifier[logical_id] keyword[not] keyword[in] identifier[self] . identifier[_refs] :
identifier[self] . identifier[_refs] [ identifier[logical_id] ]={}
keyword[if] identifier[property] keyword[in] identifier[self] . identifier[_refs] [ identifier[logical_id] ]:
keyword[raise] identifier[ValueError] ( literal[string] . identifier[format] ( identifier[logical_id] , identifier[property] ))
identifier[self] . identifier[_refs] [ identifier[logical_id] ][ identifier[property] ]= identifier[value] | def add(self, logical_id, property, value):
"""
Add the information that resource with given `logical_id` supports the given `property`, and that a reference
to `logical_id.property` resolves to given `value.
Example:
"MyApi.Deployment" -> "MyApiDeployment1234567890"
:param logical_id: Logical ID of the resource (Ex: MyLambdaFunction)
:param property: Property on the resource that can be referenced (Ex: Alias)
:param value: Value that this reference resolves to.
:return: nothing
"""
if not logical_id or not property:
raise ValueError('LogicalId and property must be a non-empty string') # depends on [control=['if'], data=[]]
if not value or not isinstance(value, string_types):
raise ValueError('Property value must be a non-empty string') # depends on [control=['if'], data=[]]
if logical_id not in self._refs:
self._refs[logical_id] = {} # depends on [control=['if'], data=['logical_id']]
if property in self._refs[logical_id]:
raise ValueError('Cannot add second reference value to {}.{} property'.format(logical_id, property)) # depends on [control=['if'], data=['property']]
self._refs[logical_id][property] = value |
def _determine_keys(dictionary):
"""Determine the different kinds of keys."""
optional = {}
defaults = {}
mandatory = {}
types = {}
for key, value in dictionary.items():
if isinstance(key, Optional):
optional[key.value] = parse_schema(value)
if isinstance(value, BaseSchema) and\
value.default is not UNSPECIFIED:
defaults[key.value] = (value.default, value.null_values)
continue # pragma: nocover
if type(key) is type:
types[key] = parse_schema(value)
continue
mandatory[key] = parse_schema(value)
return mandatory, optional, types, defaults | def function[_determine_keys, parameter[dictionary]]:
constant[Determine the different kinds of keys.]
variable[optional] assign[=] dictionary[[], []]
variable[defaults] assign[=] dictionary[[], []]
variable[mandatory] assign[=] dictionary[[], []]
variable[types] assign[=] dictionary[[], []]
for taget[tuple[[<ast.Name object at 0x7da18eb546a0>, <ast.Name object at 0x7da18eb540a0>]]] in starred[call[name[dictionary].items, parameter[]]] begin[:]
if call[name[isinstance], parameter[name[key], name[Optional]]] begin[:]
call[name[optional]][name[key].value] assign[=] call[name[parse_schema], parameter[name[value]]]
if <ast.BoolOp object at 0x7da2046212d0> begin[:]
call[name[defaults]][name[key].value] assign[=] tuple[[<ast.Attribute object at 0x7da204620700>, <ast.Attribute object at 0x7da204623130>]]
continue
if compare[call[name[type], parameter[name[key]]] is name[type]] begin[:]
call[name[types]][name[key]] assign[=] call[name[parse_schema], parameter[name[value]]]
continue
call[name[mandatory]][name[key]] assign[=] call[name[parse_schema], parameter[name[value]]]
return[tuple[[<ast.Name object at 0x7da204621c00>, <ast.Name object at 0x7da204623d60>, <ast.Name object at 0x7da204621810>, <ast.Name object at 0x7da204620730>]]] | keyword[def] identifier[_determine_keys] ( identifier[dictionary] ):
literal[string]
identifier[optional] ={}
identifier[defaults] ={}
identifier[mandatory] ={}
identifier[types] ={}
keyword[for] identifier[key] , identifier[value] keyword[in] identifier[dictionary] . identifier[items] ():
keyword[if] identifier[isinstance] ( identifier[key] , identifier[Optional] ):
identifier[optional] [ identifier[key] . identifier[value] ]= identifier[parse_schema] ( identifier[value] )
keyword[if] identifier[isinstance] ( identifier[value] , identifier[BaseSchema] ) keyword[and] identifier[value] . identifier[default] keyword[is] keyword[not] identifier[UNSPECIFIED] :
identifier[defaults] [ identifier[key] . identifier[value] ]=( identifier[value] . identifier[default] , identifier[value] . identifier[null_values] )
keyword[continue]
keyword[if] identifier[type] ( identifier[key] ) keyword[is] identifier[type] :
identifier[types] [ identifier[key] ]= identifier[parse_schema] ( identifier[value] )
keyword[continue]
identifier[mandatory] [ identifier[key] ]= identifier[parse_schema] ( identifier[value] )
keyword[return] identifier[mandatory] , identifier[optional] , identifier[types] , identifier[defaults] | def _determine_keys(dictionary):
"""Determine the different kinds of keys."""
optional = {}
defaults = {}
mandatory = {}
types = {}
for (key, value) in dictionary.items():
if isinstance(key, Optional):
optional[key.value] = parse_schema(value)
if isinstance(value, BaseSchema) and value.default is not UNSPECIFIED:
defaults[key.value] = (value.default, value.null_values) # depends on [control=['if'], data=[]]
continue # pragma: nocover # depends on [control=['if'], data=[]]
if type(key) is type:
types[key] = parse_schema(value)
continue # depends on [control=['if'], data=[]]
mandatory[key] = parse_schema(value) # depends on [control=['for'], data=[]]
return (mandatory, optional, types, defaults) |
def main(self):
"""
Run the methods required to create the genesippr report summary image
"""
self.dataframe_setup()
self.figure_populate(self.outputfolder,
self.image_report,
self.header_list,
self.samples,
'genesippr',
'report',
fail=self.fail) | def function[main, parameter[self]]:
constant[
Run the methods required to create the genesippr report summary image
]
call[name[self].dataframe_setup, parameter[]]
call[name[self].figure_populate, parameter[name[self].outputfolder, name[self].image_report, name[self].header_list, name[self].samples, constant[genesippr], constant[report]]] | keyword[def] identifier[main] ( identifier[self] ):
literal[string]
identifier[self] . identifier[dataframe_setup] ()
identifier[self] . identifier[figure_populate] ( identifier[self] . identifier[outputfolder] ,
identifier[self] . identifier[image_report] ,
identifier[self] . identifier[header_list] ,
identifier[self] . identifier[samples] ,
literal[string] ,
literal[string] ,
identifier[fail] = identifier[self] . identifier[fail] ) | def main(self):
"""
Run the methods required to create the genesippr report summary image
"""
self.dataframe_setup()
self.figure_populate(self.outputfolder, self.image_report, self.header_list, self.samples, 'genesippr', 'report', fail=self.fail) |
def com_google_fonts_check_glyf_unused_data(ttFont):
"""Is there any unused data at the end of the glyf table?"""
try:
expected_glyphs = len(ttFont.getGlyphOrder())
actual_glyphs = len(ttFont['glyf'].glyphs)
diff = actual_glyphs - expected_glyphs
if diff < 0:
yield FAIL, Message("unreachable-data",
("Glyf table has unreachable data at the end of "
" the table. Expected glyf table length {}"
" (from loca table), got length"
" {} (difference: {})").format(
expected_glyphs, actual_glyphs, diff))
elif not diff: # negative diff -> exception below
yield PASS, "There is no unused data at the end of the glyf table."
else:
raise Exception("Bug: fontTools did not raise an expected exception.")
except fontTools.ttLib.TTLibError as error:
if "not enough 'glyf' table data" in format(error):
yield FAIL, Message("missing-data",
("Loca table references data beyond"
" the end of the glyf table."
" Expected glyf table length {}"
" (from loca table).").format(expected_glyphs))
else:
raise Exception("Bug: Unexpected fontTools exception.") | def function[com_google_fonts_check_glyf_unused_data, parameter[ttFont]]:
constant[Is there any unused data at the end of the glyf table?]
<ast.Try object at 0x7da1b1251810> | keyword[def] identifier[com_google_fonts_check_glyf_unused_data] ( identifier[ttFont] ):
literal[string]
keyword[try] :
identifier[expected_glyphs] = identifier[len] ( identifier[ttFont] . identifier[getGlyphOrder] ())
identifier[actual_glyphs] = identifier[len] ( identifier[ttFont] [ literal[string] ]. identifier[glyphs] )
identifier[diff] = identifier[actual_glyphs] - identifier[expected_glyphs]
keyword[if] identifier[diff] < literal[int] :
keyword[yield] identifier[FAIL] , identifier[Message] ( literal[string] ,
( literal[string]
literal[string]
literal[string]
literal[string] ). identifier[format] (
identifier[expected_glyphs] , identifier[actual_glyphs] , identifier[diff] ))
keyword[elif] keyword[not] identifier[diff] :
keyword[yield] identifier[PASS] , literal[string]
keyword[else] :
keyword[raise] identifier[Exception] ( literal[string] )
keyword[except] identifier[fontTools] . identifier[ttLib] . identifier[TTLibError] keyword[as] identifier[error] :
keyword[if] literal[string] keyword[in] identifier[format] ( identifier[error] ):
keyword[yield] identifier[FAIL] , identifier[Message] ( literal[string] ,
( literal[string]
literal[string]
literal[string]
literal[string] ). identifier[format] ( identifier[expected_glyphs] ))
keyword[else] :
keyword[raise] identifier[Exception] ( literal[string] ) | def com_google_fonts_check_glyf_unused_data(ttFont):
"""Is there any unused data at the end of the glyf table?"""
try:
expected_glyphs = len(ttFont.getGlyphOrder())
actual_glyphs = len(ttFont['glyf'].glyphs)
diff = actual_glyphs - expected_glyphs
if diff < 0:
yield (FAIL, Message('unreachable-data', 'Glyf table has unreachable data at the end of the table. Expected glyf table length {} (from loca table), got length {} (difference: {})'.format(expected_glyphs, actual_glyphs, diff))) # depends on [control=['if'], data=['diff']]
elif not diff: # negative diff -> exception below
yield (PASS, 'There is no unused data at the end of the glyf table.') # depends on [control=['if'], data=[]]
else:
raise Exception('Bug: fontTools did not raise an expected exception.') # depends on [control=['try'], data=[]]
except fontTools.ttLib.TTLibError as error:
if "not enough 'glyf' table data" in format(error):
yield (FAIL, Message('missing-data', 'Loca table references data beyond the end of the glyf table. Expected glyf table length {} (from loca table).'.format(expected_glyphs))) # depends on [control=['if'], data=[]]
else:
raise Exception('Bug: Unexpected fontTools exception.') # depends on [control=['except'], data=['error']] |
def task_verify(self, task):
'''
return False if any of 'taskid', 'project', 'url' is not in task dict
or project in not in task_queue
'''
for each in ('taskid', 'project', 'url', ):
if each not in task or not task[each]:
logger.error('%s not in task: %.200r', each, task)
return False
if task['project'] not in self.projects:
logger.error('unknown project: %s', task['project'])
return False
project = self.projects[task['project']]
if not project.active:
logger.error('project %s not started, please set status to RUNNING or DEBUG',
task['project'])
return False
return True | def function[task_verify, parameter[self, task]]:
constant[
return False if any of 'taskid', 'project', 'url' is not in task dict
or project in not in task_queue
]
for taget[name[each]] in starred[tuple[[<ast.Constant object at 0x7da1b208eaa0>, <ast.Constant object at 0x7da1b208d990>, <ast.Constant object at 0x7da1b208fa60>]]] begin[:]
if <ast.BoolOp object at 0x7da1b208f580> begin[:]
call[name[logger].error, parameter[constant[%s not in task: %.200r], name[each], name[task]]]
return[constant[False]]
if compare[call[name[task]][constant[project]] <ast.NotIn object at 0x7da2590d7190> name[self].projects] begin[:]
call[name[logger].error, parameter[constant[unknown project: %s], call[name[task]][constant[project]]]]
return[constant[False]]
variable[project] assign[=] call[name[self].projects][call[name[task]][constant[project]]]
if <ast.UnaryOp object at 0x7da1b1fe5930> begin[:]
call[name[logger].error, parameter[constant[project %s not started, please set status to RUNNING or DEBUG], call[name[task]][constant[project]]]]
return[constant[False]]
return[constant[True]] | keyword[def] identifier[task_verify] ( identifier[self] , identifier[task] ):
literal[string]
keyword[for] identifier[each] keyword[in] ( literal[string] , literal[string] , literal[string] ,):
keyword[if] identifier[each] keyword[not] keyword[in] identifier[task] keyword[or] keyword[not] identifier[task] [ identifier[each] ]:
identifier[logger] . identifier[error] ( literal[string] , identifier[each] , identifier[task] )
keyword[return] keyword[False]
keyword[if] identifier[task] [ literal[string] ] keyword[not] keyword[in] identifier[self] . identifier[projects] :
identifier[logger] . identifier[error] ( literal[string] , identifier[task] [ literal[string] ])
keyword[return] keyword[False]
identifier[project] = identifier[self] . identifier[projects] [ identifier[task] [ literal[string] ]]
keyword[if] keyword[not] identifier[project] . identifier[active] :
identifier[logger] . identifier[error] ( literal[string] ,
identifier[task] [ literal[string] ])
keyword[return] keyword[False]
keyword[return] keyword[True] | def task_verify(self, task):
"""
return False if any of 'taskid', 'project', 'url' is not in task dict
or project in not in task_queue
"""
for each in ('taskid', 'project', 'url'):
if each not in task or not task[each]:
logger.error('%s not in task: %.200r', each, task)
return False # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['each']]
if task['project'] not in self.projects:
logger.error('unknown project: %s', task['project'])
return False # depends on [control=['if'], data=[]]
project = self.projects[task['project']]
if not project.active:
logger.error('project %s not started, please set status to RUNNING or DEBUG', task['project'])
return False # depends on [control=['if'], data=[]]
return True |
def tag(self, image, repository, tag=None, force=False):
"""
Tag an image into a repository. Similar to the ``docker tag`` command.
Args:
image (str): The image to tag
repository (str): The repository to set for the tag
tag (str): The tag name
force (bool): Force
Returns:
(bool): ``True`` if successful
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
Example:
>>> client.tag('ubuntu', 'localhost:5000/ubuntu', 'latest',
force=True)
"""
params = {
'tag': tag,
'repo': repository,
'force': 1 if force else 0
}
url = self._url("/images/{0}/tag", image)
res = self._post(url, params=params)
self._raise_for_status(res)
return res.status_code == 201 | def function[tag, parameter[self, image, repository, tag, force]]:
constant[
Tag an image into a repository. Similar to the ``docker tag`` command.
Args:
image (str): The image to tag
repository (str): The repository to set for the tag
tag (str): The tag name
force (bool): Force
Returns:
(bool): ``True`` if successful
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
Example:
>>> client.tag('ubuntu', 'localhost:5000/ubuntu', 'latest',
force=True)
]
variable[params] assign[=] dictionary[[<ast.Constant object at 0x7da18dc9ae00>, <ast.Constant object at 0x7da18dc9a260>, <ast.Constant object at 0x7da18dc990c0>], [<ast.Name object at 0x7da18dc9af20>, <ast.Name object at 0x7da18dc9a3e0>, <ast.IfExp object at 0x7da18dc986d0>]]
variable[url] assign[=] call[name[self]._url, parameter[constant[/images/{0}/tag], name[image]]]
variable[res] assign[=] call[name[self]._post, parameter[name[url]]]
call[name[self]._raise_for_status, parameter[name[res]]]
return[compare[name[res].status_code equal[==] constant[201]]] | keyword[def] identifier[tag] ( identifier[self] , identifier[image] , identifier[repository] , identifier[tag] = keyword[None] , identifier[force] = keyword[False] ):
literal[string]
identifier[params] ={
literal[string] : identifier[tag] ,
literal[string] : identifier[repository] ,
literal[string] : literal[int] keyword[if] identifier[force] keyword[else] literal[int]
}
identifier[url] = identifier[self] . identifier[_url] ( literal[string] , identifier[image] )
identifier[res] = identifier[self] . identifier[_post] ( identifier[url] , identifier[params] = identifier[params] )
identifier[self] . identifier[_raise_for_status] ( identifier[res] )
keyword[return] identifier[res] . identifier[status_code] == literal[int] | def tag(self, image, repository, tag=None, force=False):
"""
Tag an image into a repository. Similar to the ``docker tag`` command.
Args:
image (str): The image to tag
repository (str): The repository to set for the tag
tag (str): The tag name
force (bool): Force
Returns:
(bool): ``True`` if successful
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
Example:
>>> client.tag('ubuntu', 'localhost:5000/ubuntu', 'latest',
force=True)
"""
params = {'tag': tag, 'repo': repository, 'force': 1 if force else 0}
url = self._url('/images/{0}/tag', image)
res = self._post(url, params=params)
self._raise_for_status(res)
return res.status_code == 201 |
def get_parameter_negative_warning(model_type, model_params, parameter):
""" Return an empty list or a single warning wrapped in a list indicating
whether model parameter is negative.
Parameters
----------
model_type : :any:`str`
Model type (e.g., ``'cdd_hdd'``).
model_params : :any:`dict`
Parameters as stored in :any:`eemeter.CalTRACKUsagePerDayCandidateModel.model_params`.
parameter : :any:`str`
The name of the parameter, e.g., ``'intercept'``.
Returns
-------
warnings : :any:`list` of :any:`eemeter.EEMeterWarning`
Empty list or list of single warning.
"""
warnings = []
if model_params.get(parameter, 0) < 0:
warnings.append(
EEMeterWarning(
qualified_name=(
"eemeter.caltrack_daily.{model_type}.{parameter}_negative".format(
model_type=model_type, parameter=parameter
)
),
description=(
"Model fit {parameter} parameter is negative. Candidate model rejected.".format(
parameter=parameter
)
),
data=model_params,
)
)
return warnings | def function[get_parameter_negative_warning, parameter[model_type, model_params, parameter]]:
constant[ Return an empty list or a single warning wrapped in a list indicating
whether model parameter is negative.
Parameters
----------
model_type : :any:`str`
Model type (e.g., ``'cdd_hdd'``).
model_params : :any:`dict`
Parameters as stored in :any:`eemeter.CalTRACKUsagePerDayCandidateModel.model_params`.
parameter : :any:`str`
The name of the parameter, e.g., ``'intercept'``.
Returns
-------
warnings : :any:`list` of :any:`eemeter.EEMeterWarning`
Empty list or list of single warning.
]
variable[warnings] assign[=] list[[]]
if compare[call[name[model_params].get, parameter[name[parameter], constant[0]]] less[<] constant[0]] begin[:]
call[name[warnings].append, parameter[call[name[EEMeterWarning], parameter[]]]]
return[name[warnings]] | keyword[def] identifier[get_parameter_negative_warning] ( identifier[model_type] , identifier[model_params] , identifier[parameter] ):
literal[string]
identifier[warnings] =[]
keyword[if] identifier[model_params] . identifier[get] ( identifier[parameter] , literal[int] )< literal[int] :
identifier[warnings] . identifier[append] (
identifier[EEMeterWarning] (
identifier[qualified_name] =(
literal[string] . identifier[format] (
identifier[model_type] = identifier[model_type] , identifier[parameter] = identifier[parameter]
)
),
identifier[description] =(
literal[string] . identifier[format] (
identifier[parameter] = identifier[parameter]
)
),
identifier[data] = identifier[model_params] ,
)
)
keyword[return] identifier[warnings] | def get_parameter_negative_warning(model_type, model_params, parameter):
""" Return an empty list or a single warning wrapped in a list indicating
whether model parameter is negative.
Parameters
----------
model_type : :any:`str`
Model type (e.g., ``'cdd_hdd'``).
model_params : :any:`dict`
Parameters as stored in :any:`eemeter.CalTRACKUsagePerDayCandidateModel.model_params`.
parameter : :any:`str`
The name of the parameter, e.g., ``'intercept'``.
Returns
-------
warnings : :any:`list` of :any:`eemeter.EEMeterWarning`
Empty list or list of single warning.
"""
warnings = []
if model_params.get(parameter, 0) < 0:
warnings.append(EEMeterWarning(qualified_name='eemeter.caltrack_daily.{model_type}.{parameter}_negative'.format(model_type=model_type, parameter=parameter), description='Model fit {parameter} parameter is negative. Candidate model rejected.'.format(parameter=parameter), data=model_params)) # depends on [control=['if'], data=[]]
return warnings |
def logger_init(level):
"""
Initialize the logger for this thread.
Sets the log level to ERROR (0), WARNING (1), INFO (2), or DEBUG (3),
depending on the argument `level`.
"""
levellist = [logging.ERROR, logging.WARNING, logging.INFO, logging.DEBUG]
handler = logging.StreamHandler()
fmt = ('%(levelname) -10s %(asctime)s %(name) -30s %(funcName) '
'-35s %(lineno) -5d: %(message)s')
handler.setFormatter(logging.Formatter(fmt))
logger = logging.root
logger.addHandler(handler)
logger.setLevel(levellist[level]) | def function[logger_init, parameter[level]]:
constant[
Initialize the logger for this thread.
Sets the log level to ERROR (0), WARNING (1), INFO (2), or DEBUG (3),
depending on the argument `level`.
]
variable[levellist] assign[=] list[[<ast.Attribute object at 0x7da18dc072e0>, <ast.Attribute object at 0x7da18dc06ce0>, <ast.Attribute object at 0x7da18dc07580>, <ast.Attribute object at 0x7da18dc07af0>]]
variable[handler] assign[=] call[name[logging].StreamHandler, parameter[]]
variable[fmt] assign[=] constant[%(levelname) -10s %(asctime)s %(name) -30s %(funcName) -35s %(lineno) -5d: %(message)s]
call[name[handler].setFormatter, parameter[call[name[logging].Formatter, parameter[name[fmt]]]]]
variable[logger] assign[=] name[logging].root
call[name[logger].addHandler, parameter[name[handler]]]
call[name[logger].setLevel, parameter[call[name[levellist]][name[level]]]] | keyword[def] identifier[logger_init] ( identifier[level] ):
literal[string]
identifier[levellist] =[ identifier[logging] . identifier[ERROR] , identifier[logging] . identifier[WARNING] , identifier[logging] . identifier[INFO] , identifier[logging] . identifier[DEBUG] ]
identifier[handler] = identifier[logging] . identifier[StreamHandler] ()
identifier[fmt] =( literal[string]
literal[string] )
identifier[handler] . identifier[setFormatter] ( identifier[logging] . identifier[Formatter] ( identifier[fmt] ))
identifier[logger] = identifier[logging] . identifier[root]
identifier[logger] . identifier[addHandler] ( identifier[handler] )
identifier[logger] . identifier[setLevel] ( identifier[levellist] [ identifier[level] ]) | def logger_init(level):
"""
Initialize the logger for this thread.
Sets the log level to ERROR (0), WARNING (1), INFO (2), or DEBUG (3),
depending on the argument `level`.
"""
levellist = [logging.ERROR, logging.WARNING, logging.INFO, logging.DEBUG]
handler = logging.StreamHandler()
fmt = '%(levelname) -10s %(asctime)s %(name) -30s %(funcName) -35s %(lineno) -5d: %(message)s'
handler.setFormatter(logging.Formatter(fmt))
logger = logging.root
logger.addHandler(handler)
logger.setLevel(levellist[level]) |
def delete_file(self, sass_filename, sass_fileurl):
"""
Delete a *.css file, but only if it has been generated through a SASS/SCSS file.
"""
if self.use_static_root:
destpath = os.path.join(self.static_root, os.path.splitext(sass_fileurl)[0] + '.css')
else:
destpath = os.path.splitext(sass_filename)[0] + '.css'
if os.path.isfile(destpath):
os.remove(destpath)
self.processed_files.append(sass_filename)
if self.verbosity > 1:
self.stdout.write("Deleted '{0}'\n".format(destpath)) | def function[delete_file, parameter[self, sass_filename, sass_fileurl]]:
constant[
Delete a *.css file, but only if it has been generated through a SASS/SCSS file.
]
if name[self].use_static_root begin[:]
variable[destpath] assign[=] call[name[os].path.join, parameter[name[self].static_root, binary_operation[call[call[name[os].path.splitext, parameter[name[sass_fileurl]]]][constant[0]] + constant[.css]]]]
if call[name[os].path.isfile, parameter[name[destpath]]] begin[:]
call[name[os].remove, parameter[name[destpath]]]
call[name[self].processed_files.append, parameter[name[sass_filename]]]
if compare[name[self].verbosity greater[>] constant[1]] begin[:]
call[name[self].stdout.write, parameter[call[constant[Deleted '{0}'
].format, parameter[name[destpath]]]]] | keyword[def] identifier[delete_file] ( identifier[self] , identifier[sass_filename] , identifier[sass_fileurl] ):
literal[string]
keyword[if] identifier[self] . identifier[use_static_root] :
identifier[destpath] = identifier[os] . identifier[path] . identifier[join] ( identifier[self] . identifier[static_root] , identifier[os] . identifier[path] . identifier[splitext] ( identifier[sass_fileurl] )[ literal[int] ]+ literal[string] )
keyword[else] :
identifier[destpath] = identifier[os] . identifier[path] . identifier[splitext] ( identifier[sass_filename] )[ literal[int] ]+ literal[string]
keyword[if] identifier[os] . identifier[path] . identifier[isfile] ( identifier[destpath] ):
identifier[os] . identifier[remove] ( identifier[destpath] )
identifier[self] . identifier[processed_files] . identifier[append] ( identifier[sass_filename] )
keyword[if] identifier[self] . identifier[verbosity] > literal[int] :
identifier[self] . identifier[stdout] . identifier[write] ( literal[string] . identifier[format] ( identifier[destpath] )) | def delete_file(self, sass_filename, sass_fileurl):
"""
Delete a *.css file, but only if it has been generated through a SASS/SCSS file.
"""
if self.use_static_root:
destpath = os.path.join(self.static_root, os.path.splitext(sass_fileurl)[0] + '.css') # depends on [control=['if'], data=[]]
else:
destpath = os.path.splitext(sass_filename)[0] + '.css'
if os.path.isfile(destpath):
os.remove(destpath)
self.processed_files.append(sass_filename)
if self.verbosity > 1:
self.stdout.write("Deleted '{0}'\n".format(destpath)) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] |
def put_settings(self, sensors=[], actuators=[], auth_token=None,
endpoint=None, blink=None, discovery=None,
dht_sensors=[], ds18b20_sensors=[]):
""" Sync settings to the Konnected device """
url = self.base_url + '/settings'
payload = {
"sensors": sensors,
"actuators": actuators,
"dht_sensors": dht_sensors,
"ds18b20_sensors": ds18b20_sensors,
"token": auth_token,
"apiUrl": endpoint
}
if blink is not None:
payload['blink'] = blink
if discovery is not None:
payload['discovery'] = discovery
try:
r = requests.put(url, json=payload, timeout=10)
return r.ok
except RequestException as err:
raise Client.ClientError(err) | def function[put_settings, parameter[self, sensors, actuators, auth_token, endpoint, blink, discovery, dht_sensors, ds18b20_sensors]]:
constant[ Sync settings to the Konnected device ]
variable[url] assign[=] binary_operation[name[self].base_url + constant[/settings]]
variable[payload] assign[=] dictionary[[<ast.Constant object at 0x7da1b26aee60>, <ast.Constant object at 0x7da1b26ad5d0>, <ast.Constant object at 0x7da1b26aecb0>, <ast.Constant object at 0x7da1b26afa60>, <ast.Constant object at 0x7da1b26ad420>, <ast.Constant object at 0x7da20e954190>], [<ast.Name object at 0x7da20e955060>, <ast.Name object at 0x7da20e955db0>, <ast.Name object at 0x7da20e9567a0>, <ast.Name object at 0x7da20e9559f0>, <ast.Name object at 0x7da20e9543d0>, <ast.Name object at 0x7da20e9579d0>]]
if compare[name[blink] is_not constant[None]] begin[:]
call[name[payload]][constant[blink]] assign[=] name[blink]
if compare[name[discovery] is_not constant[None]] begin[:]
call[name[payload]][constant[discovery]] assign[=] name[discovery]
<ast.Try object at 0x7da20e956410> | keyword[def] identifier[put_settings] ( identifier[self] , identifier[sensors] =[], identifier[actuators] =[], identifier[auth_token] = keyword[None] ,
identifier[endpoint] = keyword[None] , identifier[blink] = keyword[None] , identifier[discovery] = keyword[None] ,
identifier[dht_sensors] =[], identifier[ds18b20_sensors] =[]):
literal[string]
identifier[url] = identifier[self] . identifier[base_url] + literal[string]
identifier[payload] ={
literal[string] : identifier[sensors] ,
literal[string] : identifier[actuators] ,
literal[string] : identifier[dht_sensors] ,
literal[string] : identifier[ds18b20_sensors] ,
literal[string] : identifier[auth_token] ,
literal[string] : identifier[endpoint]
}
keyword[if] identifier[blink] keyword[is] keyword[not] keyword[None] :
identifier[payload] [ literal[string] ]= identifier[blink]
keyword[if] identifier[discovery] keyword[is] keyword[not] keyword[None] :
identifier[payload] [ literal[string] ]= identifier[discovery]
keyword[try] :
identifier[r] = identifier[requests] . identifier[put] ( identifier[url] , identifier[json] = identifier[payload] , identifier[timeout] = literal[int] )
keyword[return] identifier[r] . identifier[ok]
keyword[except] identifier[RequestException] keyword[as] identifier[err] :
keyword[raise] identifier[Client] . identifier[ClientError] ( identifier[err] ) | def put_settings(self, sensors=[], actuators=[], auth_token=None, endpoint=None, blink=None, discovery=None, dht_sensors=[], ds18b20_sensors=[]):
""" Sync settings to the Konnected device """
url = self.base_url + '/settings'
payload = {'sensors': sensors, 'actuators': actuators, 'dht_sensors': dht_sensors, 'ds18b20_sensors': ds18b20_sensors, 'token': auth_token, 'apiUrl': endpoint}
if blink is not None:
payload['blink'] = blink # depends on [control=['if'], data=['blink']]
if discovery is not None:
payload['discovery'] = discovery # depends on [control=['if'], data=['discovery']]
try:
r = requests.put(url, json=payload, timeout=10)
return r.ok # depends on [control=['try'], data=[]]
except RequestException as err:
raise Client.ClientError(err) # depends on [control=['except'], data=['err']] |
def _createMagConversionDict():
""" loads magnitude_conversion.dat which is table A% 1995ApJS..101..117K
"""
magnitude_conversion_filepath = resource_stream(__name__, 'data/magnitude_conversion.dat')
raw_table = np.loadtxt(magnitude_conversion_filepath, '|S5')
magDict = {}
for row in raw_table:
if sys.hexversion >= 0x03000000:
starClass = row[1].decode("utf-8") # otherwise we get byte ints or b' caused by 2to3
tableData = [x.decode("utf-8") for x in row[3:]]
else:
starClass = row[1]
tableData = row[3:]
magDict[starClass] = tableData
return magDict | def function[_createMagConversionDict, parameter[]]:
constant[ loads magnitude_conversion.dat which is table A% 1995ApJS..101..117K
]
variable[magnitude_conversion_filepath] assign[=] call[name[resource_stream], parameter[name[__name__], constant[data/magnitude_conversion.dat]]]
variable[raw_table] assign[=] call[name[np].loadtxt, parameter[name[magnitude_conversion_filepath], constant[|S5]]]
variable[magDict] assign[=] dictionary[[], []]
for taget[name[row]] in starred[name[raw_table]] begin[:]
if compare[name[sys].hexversion greater_or_equal[>=] constant[50331648]] begin[:]
variable[starClass] assign[=] call[call[name[row]][constant[1]].decode, parameter[constant[utf-8]]]
variable[tableData] assign[=] <ast.ListComp object at 0x7da18ede5e10>
call[name[magDict]][name[starClass]] assign[=] name[tableData]
return[name[magDict]] | keyword[def] identifier[_createMagConversionDict] ():
literal[string]
identifier[magnitude_conversion_filepath] = identifier[resource_stream] ( identifier[__name__] , literal[string] )
identifier[raw_table] = identifier[np] . identifier[loadtxt] ( identifier[magnitude_conversion_filepath] , literal[string] )
identifier[magDict] ={}
keyword[for] identifier[row] keyword[in] identifier[raw_table] :
keyword[if] identifier[sys] . identifier[hexversion] >= literal[int] :
identifier[starClass] = identifier[row] [ literal[int] ]. identifier[decode] ( literal[string] )
identifier[tableData] =[ identifier[x] . identifier[decode] ( literal[string] ) keyword[for] identifier[x] keyword[in] identifier[row] [ literal[int] :]]
keyword[else] :
identifier[starClass] = identifier[row] [ literal[int] ]
identifier[tableData] = identifier[row] [ literal[int] :]
identifier[magDict] [ identifier[starClass] ]= identifier[tableData]
keyword[return] identifier[magDict] | def _createMagConversionDict():
""" loads magnitude_conversion.dat which is table A% 1995ApJS..101..117K
"""
magnitude_conversion_filepath = resource_stream(__name__, 'data/magnitude_conversion.dat')
raw_table = np.loadtxt(magnitude_conversion_filepath, '|S5')
magDict = {}
for row in raw_table:
if sys.hexversion >= 50331648:
starClass = row[1].decode('utf-8') # otherwise we get byte ints or b' caused by 2to3
tableData = [x.decode('utf-8') for x in row[3:]] # depends on [control=['if'], data=[]]
else:
starClass = row[1]
tableData = row[3:]
magDict[starClass] = tableData # depends on [control=['for'], data=['row']]
return magDict |
def _get_fit(self, time: int) -> typing.Tuple[np.ndarray, np.ndarray, float, float]:
"""
Fit regression model to data
:param: time (column of data)
:return: predicted points
:return: residuals
:return: mean residual
:return: error
"""
rawdata = self.averagedata[:, time]
domain = np.arange(len(rawdata))
datalength = len(domain)
coefficients = np.zeros((datalength, self.function_number + 2))
coefficients[:, 0] = 1
coefficients[:, 1] = domain
for i in range(self.function_number):
coefficients[:, 2 + i] = self._gaussian_function(datalength, domain, 1, i)
betas = linalg.inv(coefficients.transpose().dot(coefficients)).dot(coefficients.transpose().dot(rawdata))
predicted_values = coefficients.dot(betas)
residuals = rawdata - predicted_values
error = np.sqrt(residuals.transpose().dot(residuals) / (datalength - (self.function_number + 2)))
return predicted_values, residuals, residuals.mean(), error | def function[_get_fit, parameter[self, time]]:
constant[
Fit regression model to data
:param: time (column of data)
:return: predicted points
:return: residuals
:return: mean residual
:return: error
]
variable[rawdata] assign[=] call[name[self].averagedata][tuple[[<ast.Slice object at 0x7da1b1365fc0>, <ast.Name object at 0x7da1b13645b0>]]]
variable[domain] assign[=] call[name[np].arange, parameter[call[name[len], parameter[name[rawdata]]]]]
variable[datalength] assign[=] call[name[len], parameter[name[domain]]]
variable[coefficients] assign[=] call[name[np].zeros, parameter[tuple[[<ast.Name object at 0x7da1b1367be0>, <ast.BinOp object at 0x7da1b1366d70>]]]]
call[name[coefficients]][tuple[[<ast.Slice object at 0x7da1b13658d0>, <ast.Constant object at 0x7da1b1367490>]]] assign[=] constant[1]
call[name[coefficients]][tuple[[<ast.Slice object at 0x7da1b1366bf0>, <ast.Constant object at 0x7da1b1365810>]]] assign[=] name[domain]
for taget[name[i]] in starred[call[name[range], parameter[name[self].function_number]]] begin[:]
call[name[coefficients]][tuple[[<ast.Slice object at 0x7da1b1367f40>, <ast.BinOp object at 0x7da1b1365f60>]]] assign[=] call[name[self]._gaussian_function, parameter[name[datalength], name[domain], constant[1], name[i]]]
variable[betas] assign[=] call[call[name[linalg].inv, parameter[call[call[name[coefficients].transpose, parameter[]].dot, parameter[name[coefficients]]]]].dot, parameter[call[call[name[coefficients].transpose, parameter[]].dot, parameter[name[rawdata]]]]]
variable[predicted_values] assign[=] call[name[coefficients].dot, parameter[name[betas]]]
variable[residuals] assign[=] binary_operation[name[rawdata] - name[predicted_values]]
variable[error] assign[=] call[name[np].sqrt, parameter[binary_operation[call[call[name[residuals].transpose, parameter[]].dot, parameter[name[residuals]]] / binary_operation[name[datalength] - binary_operation[name[self].function_number + constant[2]]]]]]
return[tuple[[<ast.Name object at 0x7da1b1367df0>, <ast.Name object at 0x7da1b1367f10>, <ast.Call object at 0x7da1b13674f0>, <ast.Name object at 0x7da1b13657e0>]]] | keyword[def] identifier[_get_fit] ( identifier[self] , identifier[time] : identifier[int] )-> identifier[typing] . identifier[Tuple] [ identifier[np] . identifier[ndarray] , identifier[np] . identifier[ndarray] , identifier[float] , identifier[float] ]:
literal[string]
identifier[rawdata] = identifier[self] . identifier[averagedata] [:, identifier[time] ]
identifier[domain] = identifier[np] . identifier[arange] ( identifier[len] ( identifier[rawdata] ))
identifier[datalength] = identifier[len] ( identifier[domain] )
identifier[coefficients] = identifier[np] . identifier[zeros] (( identifier[datalength] , identifier[self] . identifier[function_number] + literal[int] ))
identifier[coefficients] [:, literal[int] ]= literal[int]
identifier[coefficients] [:, literal[int] ]= identifier[domain]
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[self] . identifier[function_number] ):
identifier[coefficients] [:, literal[int] + identifier[i] ]= identifier[self] . identifier[_gaussian_function] ( identifier[datalength] , identifier[domain] , literal[int] , identifier[i] )
identifier[betas] = identifier[linalg] . identifier[inv] ( identifier[coefficients] . identifier[transpose] (). identifier[dot] ( identifier[coefficients] )). identifier[dot] ( identifier[coefficients] . identifier[transpose] (). identifier[dot] ( identifier[rawdata] ))
identifier[predicted_values] = identifier[coefficients] . identifier[dot] ( identifier[betas] )
identifier[residuals] = identifier[rawdata] - identifier[predicted_values]
identifier[error] = identifier[np] . identifier[sqrt] ( identifier[residuals] . identifier[transpose] (). identifier[dot] ( identifier[residuals] )/( identifier[datalength] -( identifier[self] . identifier[function_number] + literal[int] )))
keyword[return] identifier[predicted_values] , identifier[residuals] , identifier[residuals] . identifier[mean] (), identifier[error] | def _get_fit(self, time: int) -> typing.Tuple[np.ndarray, np.ndarray, float, float]:
"""
Fit regression model to data
:param: time (column of data)
:return: predicted points
:return: residuals
:return: mean residual
:return: error
"""
rawdata = self.averagedata[:, time]
domain = np.arange(len(rawdata))
datalength = len(domain)
coefficients = np.zeros((datalength, self.function_number + 2))
coefficients[:, 0] = 1
coefficients[:, 1] = domain
for i in range(self.function_number):
coefficients[:, 2 + i] = self._gaussian_function(datalength, domain, 1, i) # depends on [control=['for'], data=['i']]
betas = linalg.inv(coefficients.transpose().dot(coefficients)).dot(coefficients.transpose().dot(rawdata))
predicted_values = coefficients.dot(betas)
residuals = rawdata - predicted_values
error = np.sqrt(residuals.transpose().dot(residuals) / (datalength - (self.function_number + 2)))
return (predicted_values, residuals, residuals.mean(), error) |
def login(self, principal, loginProperties):
"""
Parameters:
- principal
- loginProperties
"""
self.send_login(principal, loginProperties)
return self.recv_login() | def function[login, parameter[self, principal, loginProperties]]:
constant[
Parameters:
- principal
- loginProperties
]
call[name[self].send_login, parameter[name[principal], name[loginProperties]]]
return[call[name[self].recv_login, parameter[]]] | keyword[def] identifier[login] ( identifier[self] , identifier[principal] , identifier[loginProperties] ):
literal[string]
identifier[self] . identifier[send_login] ( identifier[principal] , identifier[loginProperties] )
keyword[return] identifier[self] . identifier[recv_login] () | def login(self, principal, loginProperties):
"""
Parameters:
- principal
- loginProperties
"""
self.send_login(principal, loginProperties)
return self.recv_login() |
def links_to_dynamic(self, ext):
"""Return true if 'ext' links to a dynamic lib in the same package"""
# XXX this should check to ensure the lib is actually being built
# XXX as dynamic, and not just using a locally-found version or a
# XXX static-compiled version
libnames = dict.fromkeys([lib._full_name for lib in self.shlibs])
pkg = '.'.join(ext._full_name.split('.')[:-1]+[''])
for libname in ext.libraries:
if pkg+libname in libnames: return True
return False | def function[links_to_dynamic, parameter[self, ext]]:
constant[Return true if 'ext' links to a dynamic lib in the same package]
variable[libnames] assign[=] call[name[dict].fromkeys, parameter[<ast.ListComp object at 0x7da18ede5d80>]]
variable[pkg] assign[=] call[constant[.].join, parameter[binary_operation[call[call[name[ext]._full_name.split, parameter[constant[.]]]][<ast.Slice object at 0x7da18ede5120>] + list[[<ast.Constant object at 0x7da18ede7400>]]]]]
for taget[name[libname]] in starred[name[ext].libraries] begin[:]
if compare[binary_operation[name[pkg] + name[libname]] in name[libnames]] begin[:]
return[constant[True]]
return[constant[False]] | keyword[def] identifier[links_to_dynamic] ( identifier[self] , identifier[ext] ):
literal[string]
identifier[libnames] = identifier[dict] . identifier[fromkeys] ([ identifier[lib] . identifier[_full_name] keyword[for] identifier[lib] keyword[in] identifier[self] . identifier[shlibs] ])
identifier[pkg] = literal[string] . identifier[join] ( identifier[ext] . identifier[_full_name] . identifier[split] ( literal[string] )[:- literal[int] ]+[ literal[string] ])
keyword[for] identifier[libname] keyword[in] identifier[ext] . identifier[libraries] :
keyword[if] identifier[pkg] + identifier[libname] keyword[in] identifier[libnames] : keyword[return] keyword[True]
keyword[return] keyword[False] | def links_to_dynamic(self, ext):
"""Return true if 'ext' links to a dynamic lib in the same package"""
# XXX this should check to ensure the lib is actually being built
# XXX as dynamic, and not just using a locally-found version or a
# XXX static-compiled version
libnames = dict.fromkeys([lib._full_name for lib in self.shlibs])
pkg = '.'.join(ext._full_name.split('.')[:-1] + [''])
for libname in ext.libraries:
if pkg + libname in libnames:
return True # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['libname']]
return False |
def worker(f):
"""
Decorator. Abortable worker. If wrapped task will be cancelled by
dispatcher, decorator will send ftp codes of successful interrupt.
::
>>> @worker
... async def worker(self, connection, rest):
... ...
"""
@functools.wraps(f)
async def wrapper(cls, connection, rest):
try:
await f(cls, connection, rest)
except asyncio.CancelledError:
connection.response("426", "transfer aborted")
connection.response("226", "abort successful")
return wrapper | def function[worker, parameter[f]]:
constant[
Decorator. Abortable worker. If wrapped task will be cancelled by
dispatcher, decorator will send ftp codes of successful interrupt.
::
>>> @worker
... async def worker(self, connection, rest):
... ...
]
<ast.AsyncFunctionDef object at 0x7da1b00b0370>
return[name[wrapper]] | keyword[def] identifier[worker] ( identifier[f] ):
literal[string]
@ identifier[functools] . identifier[wraps] ( identifier[f] )
keyword[async] keyword[def] identifier[wrapper] ( identifier[cls] , identifier[connection] , identifier[rest] ):
keyword[try] :
keyword[await] identifier[f] ( identifier[cls] , identifier[connection] , identifier[rest] )
keyword[except] identifier[asyncio] . identifier[CancelledError] :
identifier[connection] . identifier[response] ( literal[string] , literal[string] )
identifier[connection] . identifier[response] ( literal[string] , literal[string] )
keyword[return] identifier[wrapper] | def worker(f):
"""
Decorator. Abortable worker. If wrapped task will be cancelled by
dispatcher, decorator will send ftp codes of successful interrupt.
::
>>> @worker
... async def worker(self, connection, rest):
... ...
"""
@functools.wraps(f)
async def wrapper(cls, connection, rest):
try:
await f(cls, connection, rest) # depends on [control=['try'], data=[]]
except asyncio.CancelledError:
connection.response('426', 'transfer aborted')
connection.response('226', 'abort successful') # depends on [control=['except'], data=[]]
return wrapper |
def get_residue_id_to_type_map(self):
'''Returns a dictionary mapping 6-character residue IDs (Chain, residue number, insertion code e.g. "A 123B") to the
corresponding one-letter amino acid.
Caveat: This function ignores occupancy - this function should be called once occupancy has been dealt with appropriately.'''
resid2type = {}
atomlines = self.parsed_lines['ATOM ']
for line in atomlines:
resname = line[17:20]
if resname in allowed_PDB_residues_types and line[13:16] == 'CA ':
resid2type[line[21:27]] = residue_type_3to1_map.get(resname) or protonated_residue_type_3to1_map.get(resname)
return resid2type | def function[get_residue_id_to_type_map, parameter[self]]:
constant[Returns a dictionary mapping 6-character residue IDs (Chain, residue number, insertion code e.g. "A 123B") to the
corresponding one-letter amino acid.
Caveat: This function ignores occupancy - this function should be called once occupancy has been dealt with appropriately.]
variable[resid2type] assign[=] dictionary[[], []]
variable[atomlines] assign[=] call[name[self].parsed_lines][constant[ATOM ]]
for taget[name[line]] in starred[name[atomlines]] begin[:]
variable[resname] assign[=] call[name[line]][<ast.Slice object at 0x7da20c6a8160>]
if <ast.BoolOp object at 0x7da20c6aace0> begin[:]
call[name[resid2type]][call[name[line]][<ast.Slice object at 0x7da18f812b60>]] assign[=] <ast.BoolOp object at 0x7da18f8126e0>
return[name[resid2type]] | keyword[def] identifier[get_residue_id_to_type_map] ( identifier[self] ):
literal[string]
identifier[resid2type] ={}
identifier[atomlines] = identifier[self] . identifier[parsed_lines] [ literal[string] ]
keyword[for] identifier[line] keyword[in] identifier[atomlines] :
identifier[resname] = identifier[line] [ literal[int] : literal[int] ]
keyword[if] identifier[resname] keyword[in] identifier[allowed_PDB_residues_types] keyword[and] identifier[line] [ literal[int] : literal[int] ]== literal[string] :
identifier[resid2type] [ identifier[line] [ literal[int] : literal[int] ]]= identifier[residue_type_3to1_map] . identifier[get] ( identifier[resname] ) keyword[or] identifier[protonated_residue_type_3to1_map] . identifier[get] ( identifier[resname] )
keyword[return] identifier[resid2type] | def get_residue_id_to_type_map(self):
"""Returns a dictionary mapping 6-character residue IDs (Chain, residue number, insertion code e.g. "A 123B") to the
corresponding one-letter amino acid.
Caveat: This function ignores occupancy - this function should be called once occupancy has been dealt with appropriately."""
resid2type = {}
atomlines = self.parsed_lines['ATOM ']
for line in atomlines:
resname = line[17:20]
if resname in allowed_PDB_residues_types and line[13:16] == 'CA ':
resid2type[line[21:27]] = residue_type_3to1_map.get(resname) or protonated_residue_type_3to1_map.get(resname) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['line']]
return resid2type |
def get_all_current_trains(self, train_type=None, direction=None):
"""Returns all trains that are due to start in the next 10 minutes
@param train_type: ['mainline', 'suburban', 'dart']
"""
params = None
if train_type:
url = self.api_base_url + 'getCurrentTrainsXML_WithTrainType'
params = {
'TrainType': STATION_TYPE_TO_CODE_DICT[train_type]
}
else:
url = self.api_base_url + 'getCurrentTrainsXML'
response = requests.get(
url, params=params, timeout=10)
if response.status_code != 200:
return []
trains = self._parse_all_train_data(response.content)
if direction is not None:
return self._prune_trains(trains, direction=direction)
return trains | def function[get_all_current_trains, parameter[self, train_type, direction]]:
constant[Returns all trains that are due to start in the next 10 minutes
@param train_type: ['mainline', 'suburban', 'dart']
]
variable[params] assign[=] constant[None]
if name[train_type] begin[:]
variable[url] assign[=] binary_operation[name[self].api_base_url + constant[getCurrentTrainsXML_WithTrainType]]
variable[params] assign[=] dictionary[[<ast.Constant object at 0x7da1b2878a60>], [<ast.Subscript object at 0x7da1b2878700>]]
variable[response] assign[=] call[name[requests].get, parameter[name[url]]]
if compare[name[response].status_code not_equal[!=] constant[200]] begin[:]
return[list[[]]]
variable[trains] assign[=] call[name[self]._parse_all_train_data, parameter[name[response].content]]
if compare[name[direction] is_not constant[None]] begin[:]
return[call[name[self]._prune_trains, parameter[name[trains]]]]
return[name[trains]] | keyword[def] identifier[get_all_current_trains] ( identifier[self] , identifier[train_type] = keyword[None] , identifier[direction] = keyword[None] ):
literal[string]
identifier[params] = keyword[None]
keyword[if] identifier[train_type] :
identifier[url] = identifier[self] . identifier[api_base_url] + literal[string]
identifier[params] ={
literal[string] : identifier[STATION_TYPE_TO_CODE_DICT] [ identifier[train_type] ]
}
keyword[else] :
identifier[url] = identifier[self] . identifier[api_base_url] + literal[string]
identifier[response] = identifier[requests] . identifier[get] (
identifier[url] , identifier[params] = identifier[params] , identifier[timeout] = literal[int] )
keyword[if] identifier[response] . identifier[status_code] != literal[int] :
keyword[return] []
identifier[trains] = identifier[self] . identifier[_parse_all_train_data] ( identifier[response] . identifier[content] )
keyword[if] identifier[direction] keyword[is] keyword[not] keyword[None] :
keyword[return] identifier[self] . identifier[_prune_trains] ( identifier[trains] , identifier[direction] = identifier[direction] )
keyword[return] identifier[trains] | def get_all_current_trains(self, train_type=None, direction=None):
"""Returns all trains that are due to start in the next 10 minutes
@param train_type: ['mainline', 'suburban', 'dart']
"""
params = None
if train_type:
url = self.api_base_url + 'getCurrentTrainsXML_WithTrainType'
params = {'TrainType': STATION_TYPE_TO_CODE_DICT[train_type]} # depends on [control=['if'], data=[]]
else:
url = self.api_base_url + 'getCurrentTrainsXML'
response = requests.get(url, params=params, timeout=10)
if response.status_code != 200:
return [] # depends on [control=['if'], data=[]]
trains = self._parse_all_train_data(response.content)
if direction is not None:
return self._prune_trains(trains, direction=direction) # depends on [control=['if'], data=['direction']]
return trains |
def _assemble_complex(stmt):
"""Assemble Complex statements into text."""
member_strs = [_assemble_agent_str(m) for m in stmt.members]
stmt_str = member_strs[0] + ' binds ' + _join_list(member_strs[1:])
return _make_sentence(stmt_str) | def function[_assemble_complex, parameter[stmt]]:
constant[Assemble Complex statements into text.]
variable[member_strs] assign[=] <ast.ListComp object at 0x7da207f025f0>
variable[stmt_str] assign[=] binary_operation[binary_operation[call[name[member_strs]][constant[0]] + constant[ binds ]] + call[name[_join_list], parameter[call[name[member_strs]][<ast.Slice object at 0x7da207f008e0>]]]]
return[call[name[_make_sentence], parameter[name[stmt_str]]]] | keyword[def] identifier[_assemble_complex] ( identifier[stmt] ):
literal[string]
identifier[member_strs] =[ identifier[_assemble_agent_str] ( identifier[m] ) keyword[for] identifier[m] keyword[in] identifier[stmt] . identifier[members] ]
identifier[stmt_str] = identifier[member_strs] [ literal[int] ]+ literal[string] + identifier[_join_list] ( identifier[member_strs] [ literal[int] :])
keyword[return] identifier[_make_sentence] ( identifier[stmt_str] ) | def _assemble_complex(stmt):
"""Assemble Complex statements into text."""
member_strs = [_assemble_agent_str(m) for m in stmt.members]
stmt_str = member_strs[0] + ' binds ' + _join_list(member_strs[1:])
return _make_sentence(stmt_str) |
def run(bam_file, sample, out_dir):
"""Standard QC metrics for chipseq"""
out = {}
# if "rchipqc" in dd.get_tools_on(sample):
# out = chipqc(bam_file, sample, out_dir)
peaks = sample.get("peaks_files", {}).get("main")
if peaks:
out.update(_reads_in_peaks(bam_file, peaks, sample))
return out | def function[run, parameter[bam_file, sample, out_dir]]:
constant[Standard QC metrics for chipseq]
variable[out] assign[=] dictionary[[], []]
variable[peaks] assign[=] call[call[name[sample].get, parameter[constant[peaks_files], dictionary[[], []]]].get, parameter[constant[main]]]
if name[peaks] begin[:]
call[name[out].update, parameter[call[name[_reads_in_peaks], parameter[name[bam_file], name[peaks], name[sample]]]]]
return[name[out]] | keyword[def] identifier[run] ( identifier[bam_file] , identifier[sample] , identifier[out_dir] ):
literal[string]
identifier[out] ={}
identifier[peaks] = identifier[sample] . identifier[get] ( literal[string] ,{}). identifier[get] ( literal[string] )
keyword[if] identifier[peaks] :
identifier[out] . identifier[update] ( identifier[_reads_in_peaks] ( identifier[bam_file] , identifier[peaks] , identifier[sample] ))
keyword[return] identifier[out] | def run(bam_file, sample, out_dir):
"""Standard QC metrics for chipseq"""
out = {}
# if "rchipqc" in dd.get_tools_on(sample):
# out = chipqc(bam_file, sample, out_dir)
peaks = sample.get('peaks_files', {}).get('main')
if peaks:
out.update(_reads_in_peaks(bam_file, peaks, sample)) # depends on [control=['if'], data=[]]
return out |
def rect_helper(x0, y0, x1, y1):
"""Rectangle helper"""
x0, y0, x1, y1 = force_int(x0, y0, x1, y1)
if x0 > x1:
x0, x1 = x1, x0
if y0 > y1:
y0, y1 = y1, y0
return x0, y0, x1, y1 | def function[rect_helper, parameter[x0, y0, x1, y1]]:
constant[Rectangle helper]
<ast.Tuple object at 0x7da1b0948970> assign[=] call[name[force_int], parameter[name[x0], name[y0], name[x1], name[y1]]]
if compare[name[x0] greater[>] name[x1]] begin[:]
<ast.Tuple object at 0x7da1b0948c70> assign[=] tuple[[<ast.Name object at 0x7da1b094bf40>, <ast.Name object at 0x7da1b094ac50>]]
if compare[name[y0] greater[>] name[y1]] begin[:]
<ast.Tuple object at 0x7da1b094b550> assign[=] tuple[[<ast.Name object at 0x7da1b0948520>, <ast.Name object at 0x7da1b0948430>]]
return[tuple[[<ast.Name object at 0x7da1b0948790>, <ast.Name object at 0x7da1b094b3d0>, <ast.Name object at 0x7da1b094a6e0>, <ast.Name object at 0x7da1b0949930>]]] | keyword[def] identifier[rect_helper] ( identifier[x0] , identifier[y0] , identifier[x1] , identifier[y1] ):
literal[string]
identifier[x0] , identifier[y0] , identifier[x1] , identifier[y1] = identifier[force_int] ( identifier[x0] , identifier[y0] , identifier[x1] , identifier[y1] )
keyword[if] identifier[x0] > identifier[x1] :
identifier[x0] , identifier[x1] = identifier[x1] , identifier[x0]
keyword[if] identifier[y0] > identifier[y1] :
identifier[y0] , identifier[y1] = identifier[y1] , identifier[y0]
keyword[return] identifier[x0] , identifier[y0] , identifier[x1] , identifier[y1] | def rect_helper(x0, y0, x1, y1):
"""Rectangle helper"""
(x0, y0, x1, y1) = force_int(x0, y0, x1, y1)
if x0 > x1:
(x0, x1) = (x1, x0) # depends on [control=['if'], data=['x0', 'x1']]
if y0 > y1:
(y0, y1) = (y1, y0) # depends on [control=['if'], data=['y0', 'y1']]
return (x0, y0, x1, y1) |
def _cost_func(x, kernel_options, tuning_options, runner, results, cache):
""" Cost function used by minimize """
error_time = 1e20
logging.debug('_cost_func called')
logging.debug('x: ' + str(x))
x_key = ",".join([str(i) for i in x])
if x_key in cache:
return cache[x_key]
#snap values in x to nearest actual value for each parameter unscale x if needed
if tuning_options.scaling:
params = unscale_and_snap_to_nearest(x, tuning_options.tune_params, tuning_options.eps)
else:
params = snap_to_nearest_config(x, tuning_options.tune_params)
logging.debug('params ' + str(params))
x_int = ",".join([str(i) for i in params])
if x_int in cache:
return cache[x_int]
#check if this is a legal (non-restricted) parameter instance
if tuning_options.restrictions:
legal = util.check_restrictions(tuning_options.restrictions, params, tuning_options.tune_params.keys(), tuning_options.verbose)
if not legal:
cache[x_int] = error_time
cache[x_key] = error_time
return error_time
#compile and benchmark this instance
res, _ = runner.run([params], kernel_options, tuning_options)
#append to tuning results
if res:
results.append(res[0])
cache[x_int] = res[0]['time']
cache[x_key] = res[0]['time']
return res[0]['time']
cache[x_int] = error_time
cache[x_key] = error_time
return error_time | def function[_cost_func, parameter[x, kernel_options, tuning_options, runner, results, cache]]:
constant[ Cost function used by minimize ]
variable[error_time] assign[=] constant[1e+20]
call[name[logging].debug, parameter[constant[_cost_func called]]]
call[name[logging].debug, parameter[binary_operation[constant[x: ] + call[name[str], parameter[name[x]]]]]]
variable[x_key] assign[=] call[constant[,].join, parameter[<ast.ListComp object at 0x7da1b04164a0>]]
if compare[name[x_key] in name[cache]] begin[:]
return[call[name[cache]][name[x_key]]]
if name[tuning_options].scaling begin[:]
variable[params] assign[=] call[name[unscale_and_snap_to_nearest], parameter[name[x], name[tuning_options].tune_params, name[tuning_options].eps]]
call[name[logging].debug, parameter[binary_operation[constant[params ] + call[name[str], parameter[name[params]]]]]]
variable[x_int] assign[=] call[constant[,].join, parameter[<ast.ListComp object at 0x7da1b0417af0>]]
if compare[name[x_int] in name[cache]] begin[:]
return[call[name[cache]][name[x_int]]]
if name[tuning_options].restrictions begin[:]
variable[legal] assign[=] call[name[util].check_restrictions, parameter[name[tuning_options].restrictions, name[params], call[name[tuning_options].tune_params.keys, parameter[]], name[tuning_options].verbose]]
if <ast.UnaryOp object at 0x7da1b0416c20> begin[:]
call[name[cache]][name[x_int]] assign[=] name[error_time]
call[name[cache]][name[x_key]] assign[=] name[error_time]
return[name[error_time]]
<ast.Tuple object at 0x7da1b0415a80> assign[=] call[name[runner].run, parameter[list[[<ast.Name object at 0x7da1b0417b20>]], name[kernel_options], name[tuning_options]]]
if name[res] begin[:]
call[name[results].append, parameter[call[name[res]][constant[0]]]]
call[name[cache]][name[x_int]] assign[=] call[call[name[res]][constant[0]]][constant[time]]
call[name[cache]][name[x_key]] assign[=] call[call[name[res]][constant[0]]][constant[time]]
return[call[call[name[res]][constant[0]]][constant[time]]]
call[name[cache]][name[x_int]] assign[=] name[error_time]
call[name[cache]][name[x_key]] assign[=] name[error_time]
return[name[error_time]] | keyword[def] identifier[_cost_func] ( identifier[x] , identifier[kernel_options] , identifier[tuning_options] , identifier[runner] , identifier[results] , identifier[cache] ):
literal[string]
identifier[error_time] = literal[int]
identifier[logging] . identifier[debug] ( literal[string] )
identifier[logging] . identifier[debug] ( literal[string] + identifier[str] ( identifier[x] ))
identifier[x_key] = literal[string] . identifier[join] ([ identifier[str] ( identifier[i] ) keyword[for] identifier[i] keyword[in] identifier[x] ])
keyword[if] identifier[x_key] keyword[in] identifier[cache] :
keyword[return] identifier[cache] [ identifier[x_key] ]
keyword[if] identifier[tuning_options] . identifier[scaling] :
identifier[params] = identifier[unscale_and_snap_to_nearest] ( identifier[x] , identifier[tuning_options] . identifier[tune_params] , identifier[tuning_options] . identifier[eps] )
keyword[else] :
identifier[params] = identifier[snap_to_nearest_config] ( identifier[x] , identifier[tuning_options] . identifier[tune_params] )
identifier[logging] . identifier[debug] ( literal[string] + identifier[str] ( identifier[params] ))
identifier[x_int] = literal[string] . identifier[join] ([ identifier[str] ( identifier[i] ) keyword[for] identifier[i] keyword[in] identifier[params] ])
keyword[if] identifier[x_int] keyword[in] identifier[cache] :
keyword[return] identifier[cache] [ identifier[x_int] ]
keyword[if] identifier[tuning_options] . identifier[restrictions] :
identifier[legal] = identifier[util] . identifier[check_restrictions] ( identifier[tuning_options] . identifier[restrictions] , identifier[params] , identifier[tuning_options] . identifier[tune_params] . identifier[keys] (), identifier[tuning_options] . identifier[verbose] )
keyword[if] keyword[not] identifier[legal] :
identifier[cache] [ identifier[x_int] ]= identifier[error_time]
identifier[cache] [ identifier[x_key] ]= identifier[error_time]
keyword[return] identifier[error_time]
identifier[res] , identifier[_] = identifier[runner] . identifier[run] ([ identifier[params] ], identifier[kernel_options] , identifier[tuning_options] )
keyword[if] identifier[res] :
identifier[results] . identifier[append] ( identifier[res] [ literal[int] ])
identifier[cache] [ identifier[x_int] ]= identifier[res] [ literal[int] ][ literal[string] ]
identifier[cache] [ identifier[x_key] ]= identifier[res] [ literal[int] ][ literal[string] ]
keyword[return] identifier[res] [ literal[int] ][ literal[string] ]
identifier[cache] [ identifier[x_int] ]= identifier[error_time]
identifier[cache] [ identifier[x_key] ]= identifier[error_time]
keyword[return] identifier[error_time] | def _cost_func(x, kernel_options, tuning_options, runner, results, cache):
""" Cost function used by minimize """
error_time = 1e+20
logging.debug('_cost_func called')
logging.debug('x: ' + str(x))
x_key = ','.join([str(i) for i in x])
if x_key in cache:
return cache[x_key] # depends on [control=['if'], data=['x_key', 'cache']]
#snap values in x to nearest actual value for each parameter unscale x if needed
if tuning_options.scaling:
params = unscale_and_snap_to_nearest(x, tuning_options.tune_params, tuning_options.eps) # depends on [control=['if'], data=[]]
else:
params = snap_to_nearest_config(x, tuning_options.tune_params)
logging.debug('params ' + str(params))
x_int = ','.join([str(i) for i in params])
if x_int in cache:
return cache[x_int] # depends on [control=['if'], data=['x_int', 'cache']]
#check if this is a legal (non-restricted) parameter instance
if tuning_options.restrictions:
legal = util.check_restrictions(tuning_options.restrictions, params, tuning_options.tune_params.keys(), tuning_options.verbose)
if not legal:
cache[x_int] = error_time
cache[x_key] = error_time
return error_time # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
#compile and benchmark this instance
(res, _) = runner.run([params], kernel_options, tuning_options)
#append to tuning results
if res:
results.append(res[0])
cache[x_int] = res[0]['time']
cache[x_key] = res[0]['time']
return res[0]['time'] # depends on [control=['if'], data=[]]
cache[x_int] = error_time
cache[x_key] = error_time
return error_time |
def linexand (listoflists,columnlist,valuelist):
"""
Returns the rows of a list of lists where col (from columnlist) = val
(from valuelist) for EVERY pair of values (columnlist[i],valuelists[i]).
len(columnlist) must equal len(valuelist).
Usage: linexand (listoflists,columnlist,valuelist)
Returns: the rows of listoflists where columnlist[i]=valuelist[i] for ALL i
"""
if type(columnlist) not in [ListType,TupleType]:
columnlist = [columnlist]
if type(valuelist) not in [ListType,TupleType]:
valuelist = [valuelist]
criterion = ''
for i in range(len(columnlist)):
if type(valuelist[i])==StringType:
critval = '\'' + valuelist[i] + '\''
else:
critval = str(valuelist[i])
criterion = criterion + ' x['+str(columnlist[i])+']=='+critval+' and'
criterion = criterion[0:-3] # remove the "and" after the last crit
function = 'filter(lambda x: '+criterion+',listoflists)'
lines = eval(function)
return lines | def function[linexand, parameter[listoflists, columnlist, valuelist]]:
constant[
Returns the rows of a list of lists where col (from columnlist) = val
(from valuelist) for EVERY pair of values (columnlist[i],valuelists[i]).
len(columnlist) must equal len(valuelist).
Usage: linexand (listoflists,columnlist,valuelist)
Returns: the rows of listoflists where columnlist[i]=valuelist[i] for ALL i
]
if compare[call[name[type], parameter[name[columnlist]]] <ast.NotIn object at 0x7da2590d7190> list[[<ast.Name object at 0x7da1b0ebf400>, <ast.Name object at 0x7da1b0ebdf30>]]] begin[:]
variable[columnlist] assign[=] list[[<ast.Name object at 0x7da1b0ebd1e0>]]
if compare[call[name[type], parameter[name[valuelist]]] <ast.NotIn object at 0x7da2590d7190> list[[<ast.Name object at 0x7da1b0ebfa00>, <ast.Name object at 0x7da1b0ebdab0>]]] begin[:]
variable[valuelist] assign[=] list[[<ast.Name object at 0x7da1b0ebd2a0>]]
variable[criterion] assign[=] constant[]
for taget[name[i]] in starred[call[name[range], parameter[call[name[len], parameter[name[columnlist]]]]]] begin[:]
if compare[call[name[type], parameter[call[name[valuelist]][name[i]]]] equal[==] name[StringType]] begin[:]
variable[critval] assign[=] binary_operation[binary_operation[constant['] + call[name[valuelist]][name[i]]] + constant[']]
variable[criterion] assign[=] binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[name[criterion] + constant[ x[]] + call[name[str], parameter[call[name[columnlist]][name[i]]]]] + constant[]==]] + name[critval]] + constant[ and]]
variable[criterion] assign[=] call[name[criterion]][<ast.Slice object at 0x7da1b0ef1e10>]
variable[function] assign[=] binary_operation[binary_operation[constant[filter(lambda x: ] + name[criterion]] + constant[,listoflists)]]
variable[lines] assign[=] call[name[eval], parameter[name[function]]]
return[name[lines]] | keyword[def] identifier[linexand] ( identifier[listoflists] , identifier[columnlist] , identifier[valuelist] ):
literal[string]
keyword[if] identifier[type] ( identifier[columnlist] ) keyword[not] keyword[in] [ identifier[ListType] , identifier[TupleType] ]:
identifier[columnlist] =[ identifier[columnlist] ]
keyword[if] identifier[type] ( identifier[valuelist] ) keyword[not] keyword[in] [ identifier[ListType] , identifier[TupleType] ]:
identifier[valuelist] =[ identifier[valuelist] ]
identifier[criterion] = literal[string]
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[len] ( identifier[columnlist] )):
keyword[if] identifier[type] ( identifier[valuelist] [ identifier[i] ])== identifier[StringType] :
identifier[critval] = literal[string] + identifier[valuelist] [ identifier[i] ]+ literal[string]
keyword[else] :
identifier[critval] = identifier[str] ( identifier[valuelist] [ identifier[i] ])
identifier[criterion] = identifier[criterion] + literal[string] + identifier[str] ( identifier[columnlist] [ identifier[i] ])+ literal[string] + identifier[critval] + literal[string]
identifier[criterion] = identifier[criterion] [ literal[int] :- literal[int] ]
identifier[function] = literal[string] + identifier[criterion] + literal[string]
identifier[lines] = identifier[eval] ( identifier[function] )
keyword[return] identifier[lines] | def linexand(listoflists, columnlist, valuelist):
"""
Returns the rows of a list of lists where col (from columnlist) = val
(from valuelist) for EVERY pair of values (columnlist[i],valuelists[i]).
len(columnlist) must equal len(valuelist).
Usage: linexand (listoflists,columnlist,valuelist)
Returns: the rows of listoflists where columnlist[i]=valuelist[i] for ALL i
"""
if type(columnlist) not in [ListType, TupleType]:
columnlist = [columnlist] # depends on [control=['if'], data=[]]
if type(valuelist) not in [ListType, TupleType]:
valuelist = [valuelist] # depends on [control=['if'], data=[]]
criterion = ''
for i in range(len(columnlist)):
if type(valuelist[i]) == StringType:
critval = "'" + valuelist[i] + "'" # depends on [control=['if'], data=[]]
else:
critval = str(valuelist[i])
criterion = criterion + ' x[' + str(columnlist[i]) + ']==' + critval + ' and' # depends on [control=['for'], data=['i']]
criterion = criterion[0:-3] # remove the "and" after the last crit
function = 'filter(lambda x: ' + criterion + ',listoflists)'
lines = eval(function)
return lines |
def _sanityCheck(subjectStart, subjectEnd, queryStart, queryEnd,
queryStartInSubject, queryEndInSubject, hsp, queryLen,
subjectGaps, queryGaps, localDict):
"""
Perform some sanity checks on an HSP. Call _debugPrint on any error.
@param subjectStart: The 0-based C{int} start offset of the match in the
subject.
@param subjectEnd: The 0-based C{int} end offset of the match in the
subject.
@param queryStart: The 0-based C{int} start offset of the match in the
query.
@param queryEnd: The 0-based C{int} end offset of the match in the query.
@param queryStartInSubject: The 0-based C{int} offset of where the query
starts in the subject.
@param queryEndInSubject: The 0-based C{int} offset of where the query
ends in the subject.
@param hsp: The HSP C{dict} passed to normalizeHSP.
@param queryLen: the C{int} length of the query sequence.
@param subjectGaps: the C{int} number of gaps in the subject.
@param queryGaps: the C{int} number of gaps in the query.
@param localDict: A C{dict} of local variables from our caller (as
produced by locals()).
"""
# Subject indices must always be ascending.
if subjectStart >= subjectEnd:
_debugPrint(hsp, queryLen, localDict, 'subjectStart >= subjectEnd')
subjectMatchLength = subjectEnd - subjectStart
queryMatchLength = queryEnd - queryStart
# Sanity check that the length of the matches in the subject and query
# are identical, taking into account gaps in both.
subjectMatchLengthWithGaps = subjectMatchLength + subjectGaps
queryMatchLengthWithGaps = queryMatchLength + queryGaps
if subjectMatchLengthWithGaps != queryMatchLengthWithGaps:
_debugPrint(hsp, queryLen, localDict,
'Including gaps, subject match length (%d) != Query match '
'length (%d)' % (subjectMatchLengthWithGaps,
queryMatchLengthWithGaps))
if queryStartInSubject > subjectStart:
_debugPrint(hsp, queryLen, localDict,
'queryStartInSubject (%d) > subjectStart (%d)' %
(queryStartInSubject, subjectStart))
if queryEndInSubject < subjectEnd:
_debugPrint(hsp, queryLen, localDict,
'queryEndInSubject (%d) < subjectEnd (%d)' %
(queryEndInSubject, subjectEnd)) | def function[_sanityCheck, parameter[subjectStart, subjectEnd, queryStart, queryEnd, queryStartInSubject, queryEndInSubject, hsp, queryLen, subjectGaps, queryGaps, localDict]]:
constant[
Perform some sanity checks on an HSP. Call _debugPrint on any error.
@param subjectStart: The 0-based C{int} start offset of the match in the
subject.
@param subjectEnd: The 0-based C{int} end offset of the match in the
subject.
@param queryStart: The 0-based C{int} start offset of the match in the
query.
@param queryEnd: The 0-based C{int} end offset of the match in the query.
@param queryStartInSubject: The 0-based C{int} offset of where the query
starts in the subject.
@param queryEndInSubject: The 0-based C{int} offset of where the query
ends in the subject.
@param hsp: The HSP C{dict} passed to normalizeHSP.
@param queryLen: the C{int} length of the query sequence.
@param subjectGaps: the C{int} number of gaps in the subject.
@param queryGaps: the C{int} number of gaps in the query.
@param localDict: A C{dict} of local variables from our caller (as
produced by locals()).
]
if compare[name[subjectStart] greater_or_equal[>=] name[subjectEnd]] begin[:]
call[name[_debugPrint], parameter[name[hsp], name[queryLen], name[localDict], constant[subjectStart >= subjectEnd]]]
variable[subjectMatchLength] assign[=] binary_operation[name[subjectEnd] - name[subjectStart]]
variable[queryMatchLength] assign[=] binary_operation[name[queryEnd] - name[queryStart]]
variable[subjectMatchLengthWithGaps] assign[=] binary_operation[name[subjectMatchLength] + name[subjectGaps]]
variable[queryMatchLengthWithGaps] assign[=] binary_operation[name[queryMatchLength] + name[queryGaps]]
if compare[name[subjectMatchLengthWithGaps] not_equal[!=] name[queryMatchLengthWithGaps]] begin[:]
call[name[_debugPrint], parameter[name[hsp], name[queryLen], name[localDict], binary_operation[constant[Including gaps, subject match length (%d) != Query match length (%d)] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b0cda9b0>, <ast.Name object at 0x7da1b0cda980>]]]]]
if compare[name[queryStartInSubject] greater[>] name[subjectStart]] begin[:]
call[name[_debugPrint], parameter[name[hsp], name[queryLen], name[localDict], binary_operation[constant[queryStartInSubject (%d) > subjectStart (%d)] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b0cda6b0>, <ast.Name object at 0x7da1b0cda680>]]]]]
if compare[name[queryEndInSubject] less[<] name[subjectEnd]] begin[:]
call[name[_debugPrint], parameter[name[hsp], name[queryLen], name[localDict], binary_operation[constant[queryEndInSubject (%d) < subjectEnd (%d)] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b0cda3b0>, <ast.Name object at 0x7da1b0cda380>]]]]] | keyword[def] identifier[_sanityCheck] ( identifier[subjectStart] , identifier[subjectEnd] , identifier[queryStart] , identifier[queryEnd] ,
identifier[queryStartInSubject] , identifier[queryEndInSubject] , identifier[hsp] , identifier[queryLen] ,
identifier[subjectGaps] , identifier[queryGaps] , identifier[localDict] ):
literal[string]
keyword[if] identifier[subjectStart] >= identifier[subjectEnd] :
identifier[_debugPrint] ( identifier[hsp] , identifier[queryLen] , identifier[localDict] , literal[string] )
identifier[subjectMatchLength] = identifier[subjectEnd] - identifier[subjectStart]
identifier[queryMatchLength] = identifier[queryEnd] - identifier[queryStart]
identifier[subjectMatchLengthWithGaps] = identifier[subjectMatchLength] + identifier[subjectGaps]
identifier[queryMatchLengthWithGaps] = identifier[queryMatchLength] + identifier[queryGaps]
keyword[if] identifier[subjectMatchLengthWithGaps] != identifier[queryMatchLengthWithGaps] :
identifier[_debugPrint] ( identifier[hsp] , identifier[queryLen] , identifier[localDict] ,
literal[string]
literal[string] %( identifier[subjectMatchLengthWithGaps] ,
identifier[queryMatchLengthWithGaps] ))
keyword[if] identifier[queryStartInSubject] > identifier[subjectStart] :
identifier[_debugPrint] ( identifier[hsp] , identifier[queryLen] , identifier[localDict] ,
literal[string] %
( identifier[queryStartInSubject] , identifier[subjectStart] ))
keyword[if] identifier[queryEndInSubject] < identifier[subjectEnd] :
identifier[_debugPrint] ( identifier[hsp] , identifier[queryLen] , identifier[localDict] ,
literal[string] %
( identifier[queryEndInSubject] , identifier[subjectEnd] )) | def _sanityCheck(subjectStart, subjectEnd, queryStart, queryEnd, queryStartInSubject, queryEndInSubject, hsp, queryLen, subjectGaps, queryGaps, localDict):
"""
Perform some sanity checks on an HSP. Call _debugPrint on any error.
@param subjectStart: The 0-based C{int} start offset of the match in the
subject.
@param subjectEnd: The 0-based C{int} end offset of the match in the
subject.
@param queryStart: The 0-based C{int} start offset of the match in the
query.
@param queryEnd: The 0-based C{int} end offset of the match in the query.
@param queryStartInSubject: The 0-based C{int} offset of where the query
starts in the subject.
@param queryEndInSubject: The 0-based C{int} offset of where the query
ends in the subject.
@param hsp: The HSP C{dict} passed to normalizeHSP.
@param queryLen: the C{int} length of the query sequence.
@param subjectGaps: the C{int} number of gaps in the subject.
@param queryGaps: the C{int} number of gaps in the query.
@param localDict: A C{dict} of local variables from our caller (as
produced by locals()).
"""
# Subject indices must always be ascending.
if subjectStart >= subjectEnd:
_debugPrint(hsp, queryLen, localDict, 'subjectStart >= subjectEnd') # depends on [control=['if'], data=[]]
subjectMatchLength = subjectEnd - subjectStart
queryMatchLength = queryEnd - queryStart
# Sanity check that the length of the matches in the subject and query
# are identical, taking into account gaps in both.
subjectMatchLengthWithGaps = subjectMatchLength + subjectGaps
queryMatchLengthWithGaps = queryMatchLength + queryGaps
if subjectMatchLengthWithGaps != queryMatchLengthWithGaps:
_debugPrint(hsp, queryLen, localDict, 'Including gaps, subject match length (%d) != Query match length (%d)' % (subjectMatchLengthWithGaps, queryMatchLengthWithGaps)) # depends on [control=['if'], data=['subjectMatchLengthWithGaps', 'queryMatchLengthWithGaps']]
if queryStartInSubject > subjectStart:
_debugPrint(hsp, queryLen, localDict, 'queryStartInSubject (%d) > subjectStart (%d)' % (queryStartInSubject, subjectStart)) # depends on [control=['if'], data=['queryStartInSubject', 'subjectStart']]
if queryEndInSubject < subjectEnd:
_debugPrint(hsp, queryLen, localDict, 'queryEndInSubject (%d) < subjectEnd (%d)' % (queryEndInSubject, subjectEnd)) # depends on [control=['if'], data=['queryEndInSubject', 'subjectEnd']] |
def flags(self, index: QModelIndex):
"""All fields are selectable"""
if self.IS_EDITABLE and self.header[index.column()] in self.EDITABLE_FIELDS:
return Qt.ItemIsEnabled | Qt.ItemIsSelectable | Qt.ItemIsEditable
else:
return super().flags(index) | Qt.ItemIsSelectable | def function[flags, parameter[self, index]]:
constant[All fields are selectable]
if <ast.BoolOp object at 0x7da1b11d35b0> begin[:]
return[binary_operation[binary_operation[name[Qt].ItemIsEnabled <ast.BitOr object at 0x7da2590d6aa0> name[Qt].ItemIsSelectable] <ast.BitOr object at 0x7da2590d6aa0> name[Qt].ItemIsEditable]] | keyword[def] identifier[flags] ( identifier[self] , identifier[index] : identifier[QModelIndex] ):
literal[string]
keyword[if] identifier[self] . identifier[IS_EDITABLE] keyword[and] identifier[self] . identifier[header] [ identifier[index] . identifier[column] ()] keyword[in] identifier[self] . identifier[EDITABLE_FIELDS] :
keyword[return] identifier[Qt] . identifier[ItemIsEnabled] | identifier[Qt] . identifier[ItemIsSelectable] | identifier[Qt] . identifier[ItemIsEditable]
keyword[else] :
keyword[return] identifier[super] (). identifier[flags] ( identifier[index] )| identifier[Qt] . identifier[ItemIsSelectable] | def flags(self, index: QModelIndex):
"""All fields are selectable"""
if self.IS_EDITABLE and self.header[index.column()] in self.EDITABLE_FIELDS:
return Qt.ItemIsEnabled | Qt.ItemIsSelectable | Qt.ItemIsEditable # depends on [control=['if'], data=[]]
else:
return super().flags(index) | Qt.ItemIsSelectable |
def is_pid_healthy(pid):
'''
This is a health check that will confirm the PID is running
and executed by salt.
If pusutil is available:
* all architectures are checked
if psutil is not available:
* Linux/Solaris/etc: archs with `/proc/cmdline` available are checked
* AIX/Windows: assume PID is healhty and return True
'''
if HAS_PSUTIL:
try:
proc = psutil.Process(pid)
except psutil.NoSuchProcess:
log.warning("PID %s is no longer running.", pid)
return False
return any(['salt' in cmd for cmd in proc.cmdline()])
if salt.utils.platform.is_aix() or salt.utils.platform.is_windows():
return True
if not salt.utils.process.os_is_running(pid):
log.warning("PID %s is no longer running.", pid)
return False
cmdline_file = os.path.join('proc', str(pid), 'cmdline')
try:
with salt.utils.files.fopen(cmdline_file, 'rb') as fp_:
return b'salt' in fp_.read()
except (OSError, IOError) as err:
log.error("There was a problem reading proc file: %s", err)
return False | def function[is_pid_healthy, parameter[pid]]:
constant[
This is a health check that will confirm the PID is running
and executed by salt.
If pusutil is available:
* all architectures are checked
if psutil is not available:
* Linux/Solaris/etc: archs with `/proc/cmdline` available are checked
* AIX/Windows: assume PID is healhty and return True
]
if name[HAS_PSUTIL] begin[:]
<ast.Try object at 0x7da20c7c8f40>
return[call[name[any], parameter[<ast.ListComp object at 0x7da20c7c90c0>]]]
if <ast.BoolOp object at 0x7da20c7cbdc0> begin[:]
return[constant[True]]
if <ast.UnaryOp object at 0x7da20c7c9b40> begin[:]
call[name[log].warning, parameter[constant[PID %s is no longer running.], name[pid]]]
return[constant[False]]
variable[cmdline_file] assign[=] call[name[os].path.join, parameter[constant[proc], call[name[str], parameter[name[pid]]], constant[cmdline]]]
<ast.Try object at 0x7da20c7c9ea0> | keyword[def] identifier[is_pid_healthy] ( identifier[pid] ):
literal[string]
keyword[if] identifier[HAS_PSUTIL] :
keyword[try] :
identifier[proc] = identifier[psutil] . identifier[Process] ( identifier[pid] )
keyword[except] identifier[psutil] . identifier[NoSuchProcess] :
identifier[log] . identifier[warning] ( literal[string] , identifier[pid] )
keyword[return] keyword[False]
keyword[return] identifier[any] ([ literal[string] keyword[in] identifier[cmd] keyword[for] identifier[cmd] keyword[in] identifier[proc] . identifier[cmdline] ()])
keyword[if] identifier[salt] . identifier[utils] . identifier[platform] . identifier[is_aix] () keyword[or] identifier[salt] . identifier[utils] . identifier[platform] . identifier[is_windows] ():
keyword[return] keyword[True]
keyword[if] keyword[not] identifier[salt] . identifier[utils] . identifier[process] . identifier[os_is_running] ( identifier[pid] ):
identifier[log] . identifier[warning] ( literal[string] , identifier[pid] )
keyword[return] keyword[False]
identifier[cmdline_file] = identifier[os] . identifier[path] . identifier[join] ( literal[string] , identifier[str] ( identifier[pid] ), literal[string] )
keyword[try] :
keyword[with] identifier[salt] . identifier[utils] . identifier[files] . identifier[fopen] ( identifier[cmdline_file] , literal[string] ) keyword[as] identifier[fp_] :
keyword[return] literal[string] keyword[in] identifier[fp_] . identifier[read] ()
keyword[except] ( identifier[OSError] , identifier[IOError] ) keyword[as] identifier[err] :
identifier[log] . identifier[error] ( literal[string] , identifier[err] )
keyword[return] keyword[False] | def is_pid_healthy(pid):
"""
This is a health check that will confirm the PID is running
and executed by salt.
If pusutil is available:
* all architectures are checked
if psutil is not available:
* Linux/Solaris/etc: archs with `/proc/cmdline` available are checked
* AIX/Windows: assume PID is healhty and return True
"""
if HAS_PSUTIL:
try:
proc = psutil.Process(pid) # depends on [control=['try'], data=[]]
except psutil.NoSuchProcess:
log.warning('PID %s is no longer running.', pid)
return False # depends on [control=['except'], data=[]]
return any(['salt' in cmd for cmd in proc.cmdline()]) # depends on [control=['if'], data=[]]
if salt.utils.platform.is_aix() or salt.utils.platform.is_windows():
return True # depends on [control=['if'], data=[]]
if not salt.utils.process.os_is_running(pid):
log.warning('PID %s is no longer running.', pid)
return False # depends on [control=['if'], data=[]]
cmdline_file = os.path.join('proc', str(pid), 'cmdline')
try:
with salt.utils.files.fopen(cmdline_file, 'rb') as fp_:
return b'salt' in fp_.read() # depends on [control=['with'], data=['fp_']] # depends on [control=['try'], data=[]]
except (OSError, IOError) as err:
log.error('There was a problem reading proc file: %s', err)
return False # depends on [control=['except'], data=['err']] |
def gradient(self, q, t=0.):
"""
Compute the gradient of the potential at the given position(s).
Parameters
----------
q : `~gala.dynamics.PhaseSpacePosition`, `~astropy.units.Quantity`, array_like
The position to compute the value of the potential. If the
input position object has no units (i.e. is an `~numpy.ndarray`),
it is assumed to be in the same unit system as the potential.
Returns
-------
grad : `~astropy.units.Quantity`
The gradient of the potential. Will have the same shape as
the input position.
"""
q = self._remove_units_prepare_shape(q)
orig_shape, q = self._get_c_valid_arr(q)
t = self._validate_prepare_time(t, q)
ret_unit = self.units['length'] / self.units['time']**2
return (self._gradient(q, t=t).T.reshape(orig_shape) * ret_unit).to(self.units['acceleration']) | def function[gradient, parameter[self, q, t]]:
constant[
Compute the gradient of the potential at the given position(s).
Parameters
----------
q : `~gala.dynamics.PhaseSpacePosition`, `~astropy.units.Quantity`, array_like
The position to compute the value of the potential. If the
input position object has no units (i.e. is an `~numpy.ndarray`),
it is assumed to be in the same unit system as the potential.
Returns
-------
grad : `~astropy.units.Quantity`
The gradient of the potential. Will have the same shape as
the input position.
]
variable[q] assign[=] call[name[self]._remove_units_prepare_shape, parameter[name[q]]]
<ast.Tuple object at 0x7da1b0e3a860> assign[=] call[name[self]._get_c_valid_arr, parameter[name[q]]]
variable[t] assign[=] call[name[self]._validate_prepare_time, parameter[name[t], name[q]]]
variable[ret_unit] assign[=] binary_operation[call[name[self].units][constant[length]] / binary_operation[call[name[self].units][constant[time]] ** constant[2]]]
return[call[binary_operation[call[call[name[self]._gradient, parameter[name[q]]].T.reshape, parameter[name[orig_shape]]] * name[ret_unit]].to, parameter[call[name[self].units][constant[acceleration]]]]] | keyword[def] identifier[gradient] ( identifier[self] , identifier[q] , identifier[t] = literal[int] ):
literal[string]
identifier[q] = identifier[self] . identifier[_remove_units_prepare_shape] ( identifier[q] )
identifier[orig_shape] , identifier[q] = identifier[self] . identifier[_get_c_valid_arr] ( identifier[q] )
identifier[t] = identifier[self] . identifier[_validate_prepare_time] ( identifier[t] , identifier[q] )
identifier[ret_unit] = identifier[self] . identifier[units] [ literal[string] ]/ identifier[self] . identifier[units] [ literal[string] ]** literal[int]
keyword[return] ( identifier[self] . identifier[_gradient] ( identifier[q] , identifier[t] = identifier[t] ). identifier[T] . identifier[reshape] ( identifier[orig_shape] )* identifier[ret_unit] ). identifier[to] ( identifier[self] . identifier[units] [ literal[string] ]) | def gradient(self, q, t=0.0):
"""
Compute the gradient of the potential at the given position(s).
Parameters
----------
q : `~gala.dynamics.PhaseSpacePosition`, `~astropy.units.Quantity`, array_like
The position to compute the value of the potential. If the
input position object has no units (i.e. is an `~numpy.ndarray`),
it is assumed to be in the same unit system as the potential.
Returns
-------
grad : `~astropy.units.Quantity`
The gradient of the potential. Will have the same shape as
the input position.
"""
q = self._remove_units_prepare_shape(q)
(orig_shape, q) = self._get_c_valid_arr(q)
t = self._validate_prepare_time(t, q)
ret_unit = self.units['length'] / self.units['time'] ** 2
return (self._gradient(q, t=t).T.reshape(orig_shape) * ret_unit).to(self.units['acceleration']) |
def _get_or_create_config(path, prompt=True):
"""
Get or create a Tarbell configuration directory.
"""
dirname = os.path.dirname(path)
filename = os.path.basename(path)
try:
os.makedirs(dirname)
except OSError:
pass
try:
with open(path, 'r+') as f:
if os.path.isfile(path):
puts("{0} already exists, backing up".format(colored.green(path)))
_backup(dirname, filename)
return yaml.load(f)
except IOError:
return {} | def function[_get_or_create_config, parameter[path, prompt]]:
constant[
Get or create a Tarbell configuration directory.
]
variable[dirname] assign[=] call[name[os].path.dirname, parameter[name[path]]]
variable[filename] assign[=] call[name[os].path.basename, parameter[name[path]]]
<ast.Try object at 0x7da1b1a1fd90>
<ast.Try object at 0x7da1b1a1cd60> | keyword[def] identifier[_get_or_create_config] ( identifier[path] , identifier[prompt] = keyword[True] ):
literal[string]
identifier[dirname] = identifier[os] . identifier[path] . identifier[dirname] ( identifier[path] )
identifier[filename] = identifier[os] . identifier[path] . identifier[basename] ( identifier[path] )
keyword[try] :
identifier[os] . identifier[makedirs] ( identifier[dirname] )
keyword[except] identifier[OSError] :
keyword[pass]
keyword[try] :
keyword[with] identifier[open] ( identifier[path] , literal[string] ) keyword[as] identifier[f] :
keyword[if] identifier[os] . identifier[path] . identifier[isfile] ( identifier[path] ):
identifier[puts] ( literal[string] . identifier[format] ( identifier[colored] . identifier[green] ( identifier[path] )))
identifier[_backup] ( identifier[dirname] , identifier[filename] )
keyword[return] identifier[yaml] . identifier[load] ( identifier[f] )
keyword[except] identifier[IOError] :
keyword[return] {} | def _get_or_create_config(path, prompt=True):
"""
Get or create a Tarbell configuration directory.
"""
dirname = os.path.dirname(path)
filename = os.path.basename(path)
try:
os.makedirs(dirname) # depends on [control=['try'], data=[]]
except OSError:
pass # depends on [control=['except'], data=[]]
try:
with open(path, 'r+') as f:
if os.path.isfile(path):
puts('{0} already exists, backing up'.format(colored.green(path)))
_backup(dirname, filename) # depends on [control=['if'], data=[]]
return yaml.load(f) # depends on [control=['with'], data=['f']] # depends on [control=['try'], data=[]]
except IOError:
return {} # depends on [control=['except'], data=[]] |
def toLily(self):
'''
Method which converts the object instance, its attributes and children to a string of lilypond code
:return: str of lilypond code
'''
lilystring = "\\version \"2.18.2\" \n"
partstrings = []
ids_loaded = []
groupings = []
if len(self.groups) > 0:
# here we need to do some set union theory
lstring, groupings, ids_loaded = self.handleGroups()
lilystring += lstring
children = [
child for child in self.GetSortedChildren() if child not in ids_loaded]
for child in children:
part = self.getPart(child)
partstring = part.toLily()
lilystring += partstring[0]
partstrings.append(partstring[1])
lilystring += self.item.toLily()
lilystring += "<<"
lilystring += "".join([gstring for gstring in groupings])
lilystring += "".join([partstring for partstring in partstrings])
lilystring += ">>"
return lilystring | def function[toLily, parameter[self]]:
constant[
Method which converts the object instance, its attributes and children to a string of lilypond code
:return: str of lilypond code
]
variable[lilystring] assign[=] constant[\version "2.18.2"
]
variable[partstrings] assign[=] list[[]]
variable[ids_loaded] assign[=] list[[]]
variable[groupings] assign[=] list[[]]
if compare[call[name[len], parameter[name[self].groups]] greater[>] constant[0]] begin[:]
<ast.Tuple object at 0x7da1b2373df0> assign[=] call[name[self].handleGroups, parameter[]]
<ast.AugAssign object at 0x7da1b2370940>
variable[children] assign[=] <ast.ListComp object at 0x7da1b2371960>
for taget[name[child]] in starred[name[children]] begin[:]
variable[part] assign[=] call[name[self].getPart, parameter[name[child]]]
variable[partstring] assign[=] call[name[part].toLily, parameter[]]
<ast.AugAssign object at 0x7da1b23722f0>
call[name[partstrings].append, parameter[call[name[partstring]][constant[1]]]]
<ast.AugAssign object at 0x7da1b2370fa0>
<ast.AugAssign object at 0x7da1b2372800>
<ast.AugAssign object at 0x7da1b23718d0>
<ast.AugAssign object at 0x7da1b2373af0>
<ast.AugAssign object at 0x7da1b2371000>
return[name[lilystring]] | keyword[def] identifier[toLily] ( identifier[self] ):
literal[string]
identifier[lilystring] = literal[string]
identifier[partstrings] =[]
identifier[ids_loaded] =[]
identifier[groupings] =[]
keyword[if] identifier[len] ( identifier[self] . identifier[groups] )> literal[int] :
identifier[lstring] , identifier[groupings] , identifier[ids_loaded] = identifier[self] . identifier[handleGroups] ()
identifier[lilystring] += identifier[lstring]
identifier[children] =[
identifier[child] keyword[for] identifier[child] keyword[in] identifier[self] . identifier[GetSortedChildren] () keyword[if] identifier[child] keyword[not] keyword[in] identifier[ids_loaded] ]
keyword[for] identifier[child] keyword[in] identifier[children] :
identifier[part] = identifier[self] . identifier[getPart] ( identifier[child] )
identifier[partstring] = identifier[part] . identifier[toLily] ()
identifier[lilystring] += identifier[partstring] [ literal[int] ]
identifier[partstrings] . identifier[append] ( identifier[partstring] [ literal[int] ])
identifier[lilystring] += identifier[self] . identifier[item] . identifier[toLily] ()
identifier[lilystring] += literal[string]
identifier[lilystring] += literal[string] . identifier[join] ([ identifier[gstring] keyword[for] identifier[gstring] keyword[in] identifier[groupings] ])
identifier[lilystring] += literal[string] . identifier[join] ([ identifier[partstring] keyword[for] identifier[partstring] keyword[in] identifier[partstrings] ])
identifier[lilystring] += literal[string]
keyword[return] identifier[lilystring] | def toLily(self):
"""
Method which converts the object instance, its attributes and children to a string of lilypond code
:return: str of lilypond code
"""
lilystring = '\\version "2.18.2" \n'
partstrings = []
ids_loaded = []
groupings = []
if len(self.groups) > 0:
# here we need to do some set union theory
(lstring, groupings, ids_loaded) = self.handleGroups()
lilystring += lstring # depends on [control=['if'], data=[]]
children = [child for child in self.GetSortedChildren() if child not in ids_loaded]
for child in children:
part = self.getPart(child)
partstring = part.toLily()
lilystring += partstring[0]
partstrings.append(partstring[1]) # depends on [control=['for'], data=['child']]
lilystring += self.item.toLily()
lilystring += '<<'
lilystring += ''.join([gstring for gstring in groupings])
lilystring += ''.join([partstring for partstring in partstrings])
lilystring += '>>'
return lilystring |
def get_all_indirect_statements(self):
"""Get all indirect increases/decreases BEL statements.
This method stores the results of the query in self.all_indirect_stmts
as a list of strings. The SPARQL query used to find indirect BEL
statements searches for all statements whose predicate is either
Increases or Decreases.
"""
q_stmts = prefixes + """
SELECT ?stmt
WHERE {
?stmt a belvoc:Statement .
{
{ ?stmt belvoc:hasRelationship belvoc:Increases . }
UNION
{ ?stmt belvoc:hasRelationship belvoc:Decreases . }
}
}
"""
res_stmts = self.g.query(q_stmts)
self.all_indirect_stmts = [strip_statement(stmt[0]) for stmt in res_stmts] | def function[get_all_indirect_statements, parameter[self]]:
constant[Get all indirect increases/decreases BEL statements.
This method stores the results of the query in self.all_indirect_stmts
as a list of strings. The SPARQL query used to find indirect BEL
statements searches for all statements whose predicate is either
Increases or Decreases.
]
variable[q_stmts] assign[=] binary_operation[name[prefixes] + constant[
SELECT ?stmt
WHERE {
?stmt a belvoc:Statement .
{
{ ?stmt belvoc:hasRelationship belvoc:Increases . }
UNION
{ ?stmt belvoc:hasRelationship belvoc:Decreases . }
}
}
]]
variable[res_stmts] assign[=] call[name[self].g.query, parameter[name[q_stmts]]]
name[self].all_indirect_stmts assign[=] <ast.ListComp object at 0x7da18dc987c0> | keyword[def] identifier[get_all_indirect_statements] ( identifier[self] ):
literal[string]
identifier[q_stmts] = identifier[prefixes] + literal[string]
identifier[res_stmts] = identifier[self] . identifier[g] . identifier[query] ( identifier[q_stmts] )
identifier[self] . identifier[all_indirect_stmts] =[ identifier[strip_statement] ( identifier[stmt] [ literal[int] ]) keyword[for] identifier[stmt] keyword[in] identifier[res_stmts] ] | def get_all_indirect_statements(self):
"""Get all indirect increases/decreases BEL statements.
This method stores the results of the query in self.all_indirect_stmts
as a list of strings. The SPARQL query used to find indirect BEL
statements searches for all statements whose predicate is either
Increases or Decreases.
"""
q_stmts = prefixes + '\n SELECT ?stmt\n WHERE {\n ?stmt a belvoc:Statement .\n {\n { ?stmt belvoc:hasRelationship belvoc:Increases . }\n UNION\n { ?stmt belvoc:hasRelationship belvoc:Decreases . }\n }\n }\n '
res_stmts = self.g.query(q_stmts)
self.all_indirect_stmts = [strip_statement(stmt[0]) for stmt in res_stmts] |
def tileAddress(self, zoom, point):
"Returns a tile address based on a zoom level and \
a point in the tile"
[x, y] = point
assert x <= self.MAXX and x >= self.MINX
assert y <= self.MAXY and y >= self.MINY
assert zoom in range(0, len(self.RESOLUTIONS))
tileS = self.tileSize(zoom)
offsetX = abs(x - self.MINX)
if self.originCorner == 'bottom-left':
offsetY = abs(y - self.MINY)
elif self.originCorner == 'top-left':
offsetY = abs(self.MAXY - y)
col = offsetX / tileS
row = offsetY / tileS
# We are exactly on the edge of a tile and the extent
if x in (self.MINX, self.MAXX) and col.is_integer():
col = max(0, col - 1)
if y in (self.MINY, self.MAXY) and row.is_integer():
row = max(0, row - 1)
return [
int(math.floor(col)),
int(math.floor(row))
] | def function[tileAddress, parameter[self, zoom, point]]:
constant[Returns a tile address based on a zoom level and a point in the tile]
<ast.List object at 0x7da1b1857220> assign[=] name[point]
assert[<ast.BoolOp object at 0x7da1b1854cd0>]
assert[<ast.BoolOp object at 0x7da1b1857460>]
assert[compare[name[zoom] in call[name[range], parameter[constant[0], call[name[len], parameter[name[self].RESOLUTIONS]]]]]]
variable[tileS] assign[=] call[name[self].tileSize, parameter[name[zoom]]]
variable[offsetX] assign[=] call[name[abs], parameter[binary_operation[name[x] - name[self].MINX]]]
if compare[name[self].originCorner equal[==] constant[bottom-left]] begin[:]
variable[offsetY] assign[=] call[name[abs], parameter[binary_operation[name[y] - name[self].MINY]]]
variable[col] assign[=] binary_operation[name[offsetX] / name[tileS]]
variable[row] assign[=] binary_operation[name[offsetY] / name[tileS]]
if <ast.BoolOp object at 0x7da1b1854790> begin[:]
variable[col] assign[=] call[name[max], parameter[constant[0], binary_operation[name[col] - constant[1]]]]
if <ast.BoolOp object at 0x7da1b1857880> begin[:]
variable[row] assign[=] call[name[max], parameter[constant[0], binary_operation[name[row] - constant[1]]]]
return[list[[<ast.Call object at 0x7da1b1855270>, <ast.Call object at 0x7da1b1855a80>]]] | keyword[def] identifier[tileAddress] ( identifier[self] , identifier[zoom] , identifier[point] ):
literal[string]
[ identifier[x] , identifier[y] ]= identifier[point]
keyword[assert] identifier[x] <= identifier[self] . identifier[MAXX] keyword[and] identifier[x] >= identifier[self] . identifier[MINX]
keyword[assert] identifier[y] <= identifier[self] . identifier[MAXY] keyword[and] identifier[y] >= identifier[self] . identifier[MINY]
keyword[assert] identifier[zoom] keyword[in] identifier[range] ( literal[int] , identifier[len] ( identifier[self] . identifier[RESOLUTIONS] ))
identifier[tileS] = identifier[self] . identifier[tileSize] ( identifier[zoom] )
identifier[offsetX] = identifier[abs] ( identifier[x] - identifier[self] . identifier[MINX] )
keyword[if] identifier[self] . identifier[originCorner] == literal[string] :
identifier[offsetY] = identifier[abs] ( identifier[y] - identifier[self] . identifier[MINY] )
keyword[elif] identifier[self] . identifier[originCorner] == literal[string] :
identifier[offsetY] = identifier[abs] ( identifier[self] . identifier[MAXY] - identifier[y] )
identifier[col] = identifier[offsetX] / identifier[tileS]
identifier[row] = identifier[offsetY] / identifier[tileS]
keyword[if] identifier[x] keyword[in] ( identifier[self] . identifier[MINX] , identifier[self] . identifier[MAXX] ) keyword[and] identifier[col] . identifier[is_integer] ():
identifier[col] = identifier[max] ( literal[int] , identifier[col] - literal[int] )
keyword[if] identifier[y] keyword[in] ( identifier[self] . identifier[MINY] , identifier[self] . identifier[MAXY] ) keyword[and] identifier[row] . identifier[is_integer] ():
identifier[row] = identifier[max] ( literal[int] , identifier[row] - literal[int] )
keyword[return] [
identifier[int] ( identifier[math] . identifier[floor] ( identifier[col] )),
identifier[int] ( identifier[math] . identifier[floor] ( identifier[row] ))
] | def tileAddress(self, zoom, point):
"""Returns a tile address based on a zoom level and a point in the tile"""
[x, y] = point
assert x <= self.MAXX and x >= self.MINX
assert y <= self.MAXY and y >= self.MINY
assert zoom in range(0, len(self.RESOLUTIONS))
tileS = self.tileSize(zoom)
offsetX = abs(x - self.MINX)
if self.originCorner == 'bottom-left':
offsetY = abs(y - self.MINY) # depends on [control=['if'], data=[]]
elif self.originCorner == 'top-left':
offsetY = abs(self.MAXY - y) # depends on [control=['if'], data=[]]
col = offsetX / tileS
row = offsetY / tileS
# We are exactly on the edge of a tile and the extent
if x in (self.MINX, self.MAXX) and col.is_integer():
col = max(0, col - 1) # depends on [control=['if'], data=[]]
if y in (self.MINY, self.MAXY) and row.is_integer():
row = max(0, row - 1) # depends on [control=['if'], data=[]]
return [int(math.floor(col)), int(math.floor(row))] |
def sum(self, attribute_selector, state_keeper=None):
"""Applies a rolling sum operator to the stream.
Attributes:
sum_attribute_index (int): The index of the attribute to sum
(assuming tuple records).
"""
op = Operator(
_generate_uuid(),
OpType.Sum,
"Sum",
_sum,
other=attribute_selector,
state_actor=state_keeper,
num_instances=self.env.config.parallelism)
return self.__register(op) | def function[sum, parameter[self, attribute_selector, state_keeper]]:
constant[Applies a rolling sum operator to the stream.
Attributes:
sum_attribute_index (int): The index of the attribute to sum
(assuming tuple records).
]
variable[op] assign[=] call[name[Operator], parameter[call[name[_generate_uuid], parameter[]], name[OpType].Sum, constant[Sum], name[_sum]]]
return[call[name[self].__register, parameter[name[op]]]] | keyword[def] identifier[sum] ( identifier[self] , identifier[attribute_selector] , identifier[state_keeper] = keyword[None] ):
literal[string]
identifier[op] = identifier[Operator] (
identifier[_generate_uuid] (),
identifier[OpType] . identifier[Sum] ,
literal[string] ,
identifier[_sum] ,
identifier[other] = identifier[attribute_selector] ,
identifier[state_actor] = identifier[state_keeper] ,
identifier[num_instances] = identifier[self] . identifier[env] . identifier[config] . identifier[parallelism] )
keyword[return] identifier[self] . identifier[__register] ( identifier[op] ) | def sum(self, attribute_selector, state_keeper=None):
"""Applies a rolling sum operator to the stream.
Attributes:
sum_attribute_index (int): The index of the attribute to sum
(assuming tuple records).
"""
op = Operator(_generate_uuid(), OpType.Sum, 'Sum', _sum, other=attribute_selector, state_actor=state_keeper, num_instances=self.env.config.parallelism)
return self.__register(op) |
def _authenticate(self):
"""An innocent call to check that the credentials are okay."""
response = self._get("/v1/domains/{0}".format(self.domain))
self.domain_id = response["domain"]["id"] | def function[_authenticate, parameter[self]]:
constant[An innocent call to check that the credentials are okay.]
variable[response] assign[=] call[name[self]._get, parameter[call[constant[/v1/domains/{0}].format, parameter[name[self].domain]]]]
name[self].domain_id assign[=] call[call[name[response]][constant[domain]]][constant[id]] | keyword[def] identifier[_authenticate] ( identifier[self] ):
literal[string]
identifier[response] = identifier[self] . identifier[_get] ( literal[string] . identifier[format] ( identifier[self] . identifier[domain] ))
identifier[self] . identifier[domain_id] = identifier[response] [ literal[string] ][ literal[string] ] | def _authenticate(self):
"""An innocent call to check that the credentials are okay."""
response = self._get('/v1/domains/{0}'.format(self.domain))
self.domain_id = response['domain']['id'] |
def get_network(self):
""" Identify the connected network. This call returns a
dictionary with keys chain_id, core_symbol and prefix
"""
props = self.get_chain_properties()
chain_id = props["chain_id"]
for k, v in known_chains.items():
if v["chain_id"] == chain_id:
return v
raise Exception("Connecting to unknown network!") | def function[get_network, parameter[self]]:
constant[ Identify the connected network. This call returns a
dictionary with keys chain_id, core_symbol and prefix
]
variable[props] assign[=] call[name[self].get_chain_properties, parameter[]]
variable[chain_id] assign[=] call[name[props]][constant[chain_id]]
for taget[tuple[[<ast.Name object at 0x7da1b106f370>, <ast.Name object at 0x7da1b106d570>]]] in starred[call[name[known_chains].items, parameter[]]] begin[:]
if compare[call[name[v]][constant[chain_id]] equal[==] name[chain_id]] begin[:]
return[name[v]]
<ast.Raise object at 0x7da1b106f310> | keyword[def] identifier[get_network] ( identifier[self] ):
literal[string]
identifier[props] = identifier[self] . identifier[get_chain_properties] ()
identifier[chain_id] = identifier[props] [ literal[string] ]
keyword[for] identifier[k] , identifier[v] keyword[in] identifier[known_chains] . identifier[items] ():
keyword[if] identifier[v] [ literal[string] ]== identifier[chain_id] :
keyword[return] identifier[v]
keyword[raise] identifier[Exception] ( literal[string] ) | def get_network(self):
""" Identify the connected network. This call returns a
dictionary with keys chain_id, core_symbol and prefix
"""
props = self.get_chain_properties()
chain_id = props['chain_id']
for (k, v) in known_chains.items():
if v['chain_id'] == chain_id:
return v # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
raise Exception('Connecting to unknown network!') |
def script():
"""Run the command-line script."""
parser = argparse.ArgumentParser(description="Print all textual tags of one or more audio files.")
parser.add_argument("-b", "--batch", help="disable user interaction", action="store_true")
parser.add_argument("file", nargs="+", help="file(s) to print tags of")
args = parser.parse_args()
for filename in args.file:
if isinstance(filename, bytes):
filename = filename.decode(sys.getfilesystemencoding())
line = "TAGS OF '{0}'".format(os.path.basename(filename))
print("*" * len(line))
print(line)
print("*" * len(line))
audioFile = taglib.File(filename)
tags = audioFile.tags
if len(tags) > 0:
maxKeyLen = max(len(key) for key in tags.keys())
for key, values in tags.items():
for value in values:
print(('{0:' + str(maxKeyLen) + '} = {1}').format(key, value))
if len(audioFile.unsupported) > 0:
print('Unsupported tag elements: ' + "; ".join(audioFile.unsupported))
if sys.version_info[0] == 2:
inputFunction = raw_input
else:
inputFunction = input
if not args.batch and inputFunction("remove unsupported properties? [yN] ").lower() in ["y", "yes"]:
audioFile.removeUnsupportedProperties(audioFile.unsupported)
audioFile.save() | def function[script, parameter[]]:
constant[Run the command-line script.]
variable[parser] assign[=] call[name[argparse].ArgumentParser, parameter[]]
call[name[parser].add_argument, parameter[constant[-b], constant[--batch]]]
call[name[parser].add_argument, parameter[constant[file]]]
variable[args] assign[=] call[name[parser].parse_args, parameter[]]
for taget[name[filename]] in starred[name[args].file] begin[:]
if call[name[isinstance], parameter[name[filename], name[bytes]]] begin[:]
variable[filename] assign[=] call[name[filename].decode, parameter[call[name[sys].getfilesystemencoding, parameter[]]]]
variable[line] assign[=] call[constant[TAGS OF '{0}'].format, parameter[call[name[os].path.basename, parameter[name[filename]]]]]
call[name[print], parameter[binary_operation[constant[*] * call[name[len], parameter[name[line]]]]]]
call[name[print], parameter[name[line]]]
call[name[print], parameter[binary_operation[constant[*] * call[name[len], parameter[name[line]]]]]]
variable[audioFile] assign[=] call[name[taglib].File, parameter[name[filename]]]
variable[tags] assign[=] name[audioFile].tags
if compare[call[name[len], parameter[name[tags]]] greater[>] constant[0]] begin[:]
variable[maxKeyLen] assign[=] call[name[max], parameter[<ast.GeneratorExp object at 0x7da18fe91f30>]]
for taget[tuple[[<ast.Name object at 0x7da18fe92290>, <ast.Name object at 0x7da18fe93940>]]] in starred[call[name[tags].items, parameter[]]] begin[:]
for taget[name[value]] in starred[name[values]] begin[:]
call[name[print], parameter[call[binary_operation[binary_operation[constant[{0:] + call[name[str], parameter[name[maxKeyLen]]]] + constant[} = {1}]].format, parameter[name[key], name[value]]]]]
if compare[call[name[len], parameter[name[audioFile].unsupported]] greater[>] constant[0]] begin[:]
call[name[print], parameter[binary_operation[constant[Unsupported tag elements: ] + call[constant[; ].join, parameter[name[audioFile].unsupported]]]]]
if compare[call[name[sys].version_info][constant[0]] equal[==] constant[2]] begin[:]
variable[inputFunction] assign[=] name[raw_input]
if <ast.BoolOp object at 0x7da2044c3cd0> begin[:]
call[name[audioFile].removeUnsupportedProperties, parameter[name[audioFile].unsupported]]
call[name[audioFile].save, parameter[]] | keyword[def] identifier[script] ():
literal[string]
identifier[parser] = identifier[argparse] . identifier[ArgumentParser] ( identifier[description] = literal[string] )
identifier[parser] . identifier[add_argument] ( literal[string] , literal[string] , identifier[help] = literal[string] , identifier[action] = literal[string] )
identifier[parser] . identifier[add_argument] ( literal[string] , identifier[nargs] = literal[string] , identifier[help] = literal[string] )
identifier[args] = identifier[parser] . identifier[parse_args] ()
keyword[for] identifier[filename] keyword[in] identifier[args] . identifier[file] :
keyword[if] identifier[isinstance] ( identifier[filename] , identifier[bytes] ):
identifier[filename] = identifier[filename] . identifier[decode] ( identifier[sys] . identifier[getfilesystemencoding] ())
identifier[line] = literal[string] . identifier[format] ( identifier[os] . identifier[path] . identifier[basename] ( identifier[filename] ))
identifier[print] ( literal[string] * identifier[len] ( identifier[line] ))
identifier[print] ( identifier[line] )
identifier[print] ( literal[string] * identifier[len] ( identifier[line] ))
identifier[audioFile] = identifier[taglib] . identifier[File] ( identifier[filename] )
identifier[tags] = identifier[audioFile] . identifier[tags]
keyword[if] identifier[len] ( identifier[tags] )> literal[int] :
identifier[maxKeyLen] = identifier[max] ( identifier[len] ( identifier[key] ) keyword[for] identifier[key] keyword[in] identifier[tags] . identifier[keys] ())
keyword[for] identifier[key] , identifier[values] keyword[in] identifier[tags] . identifier[items] ():
keyword[for] identifier[value] keyword[in] identifier[values] :
identifier[print] (( literal[string] + identifier[str] ( identifier[maxKeyLen] )+ literal[string] ). identifier[format] ( identifier[key] , identifier[value] ))
keyword[if] identifier[len] ( identifier[audioFile] . identifier[unsupported] )> literal[int] :
identifier[print] ( literal[string] + literal[string] . identifier[join] ( identifier[audioFile] . identifier[unsupported] ))
keyword[if] identifier[sys] . identifier[version_info] [ literal[int] ]== literal[int] :
identifier[inputFunction] = identifier[raw_input]
keyword[else] :
identifier[inputFunction] = identifier[input]
keyword[if] keyword[not] identifier[args] . identifier[batch] keyword[and] identifier[inputFunction] ( literal[string] ). identifier[lower] () keyword[in] [ literal[string] , literal[string] ]:
identifier[audioFile] . identifier[removeUnsupportedProperties] ( identifier[audioFile] . identifier[unsupported] )
identifier[audioFile] . identifier[save] () | def script():
"""Run the command-line script."""
parser = argparse.ArgumentParser(description='Print all textual tags of one or more audio files.')
parser.add_argument('-b', '--batch', help='disable user interaction', action='store_true')
parser.add_argument('file', nargs='+', help='file(s) to print tags of')
args = parser.parse_args()
for filename in args.file:
if isinstance(filename, bytes):
filename = filename.decode(sys.getfilesystemencoding()) # depends on [control=['if'], data=[]]
line = "TAGS OF '{0}'".format(os.path.basename(filename))
print('*' * len(line))
print(line)
print('*' * len(line))
audioFile = taglib.File(filename)
tags = audioFile.tags
if len(tags) > 0:
maxKeyLen = max((len(key) for key in tags.keys()))
for (key, values) in tags.items():
for value in values:
print(('{0:' + str(maxKeyLen) + '} = {1}').format(key, value)) # depends on [control=['for'], data=['value']] # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]]
if len(audioFile.unsupported) > 0:
print('Unsupported tag elements: ' + '; '.join(audioFile.unsupported))
if sys.version_info[0] == 2:
inputFunction = raw_input # depends on [control=['if'], data=[]]
else:
inputFunction = input
if not args.batch and inputFunction('remove unsupported properties? [yN] ').lower() in ['y', 'yes']:
audioFile.removeUnsupportedProperties(audioFile.unsupported)
audioFile.save() # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['filename']] |
def predicate_type(self, pred: URIRef) -> URIRef:
"""
Return the type of pred
:param pred: predicate to map
:return:
"""
return self._o.value(pred, RDFS.range) | def function[predicate_type, parameter[self, pred]]:
constant[
Return the type of pred
:param pred: predicate to map
:return:
]
return[call[name[self]._o.value, parameter[name[pred], name[RDFS].range]]] | keyword[def] identifier[predicate_type] ( identifier[self] , identifier[pred] : identifier[URIRef] )-> identifier[URIRef] :
literal[string]
keyword[return] identifier[self] . identifier[_o] . identifier[value] ( identifier[pred] , identifier[RDFS] . identifier[range] ) | def predicate_type(self, pred: URIRef) -> URIRef:
"""
Return the type of pred
:param pred: predicate to map
:return:
"""
return self._o.value(pred, RDFS.range) |
def extract_columns_from_table_definition_file(xmltag, table_definition_file):
"""
Extract all columns mentioned in the result tag of a table definition file.
"""
def handle_path(path):
"""Convert path from a path relative to table-definition file."""
if not path or path.startswith("http://") or path.startswith("https://"):
return path
return os.path.join(os.path.dirname(table_definition_file), path)
columns = list()
for c in xmltag.findall('column'):
scale_factor = c.get("scaleFactor")
display_unit = c.get("displayUnit")
source_unit = c.get("sourceUnit")
new_column = Column(c.get("title"), c.text, c.get("numberOfDigits"),
handle_path(c.get("href")), None, display_unit, source_unit,
scale_factor, c.get("relevantForDiff"), c.get("displayTitle"))
columns.append(new_column)
return columns | def function[extract_columns_from_table_definition_file, parameter[xmltag, table_definition_file]]:
constant[
Extract all columns mentioned in the result tag of a table definition file.
]
def function[handle_path, parameter[path]]:
constant[Convert path from a path relative to table-definition file.]
if <ast.BoolOp object at 0x7da1b2347f10> begin[:]
return[name[path]]
return[call[name[os].path.join, parameter[call[name[os].path.dirname, parameter[name[table_definition_file]]], name[path]]]]
variable[columns] assign[=] call[name[list], parameter[]]
for taget[name[c]] in starred[call[name[xmltag].findall, parameter[constant[column]]]] begin[:]
variable[scale_factor] assign[=] call[name[c].get, parameter[constant[scaleFactor]]]
variable[display_unit] assign[=] call[name[c].get, parameter[constant[displayUnit]]]
variable[source_unit] assign[=] call[name[c].get, parameter[constant[sourceUnit]]]
variable[new_column] assign[=] call[name[Column], parameter[call[name[c].get, parameter[constant[title]]], name[c].text, call[name[c].get, parameter[constant[numberOfDigits]]], call[name[handle_path], parameter[call[name[c].get, parameter[constant[href]]]]], constant[None], name[display_unit], name[source_unit], name[scale_factor], call[name[c].get, parameter[constant[relevantForDiff]]], call[name[c].get, parameter[constant[displayTitle]]]]]
call[name[columns].append, parameter[name[new_column]]]
return[name[columns]] | keyword[def] identifier[extract_columns_from_table_definition_file] ( identifier[xmltag] , identifier[table_definition_file] ):
literal[string]
keyword[def] identifier[handle_path] ( identifier[path] ):
literal[string]
keyword[if] keyword[not] identifier[path] keyword[or] identifier[path] . identifier[startswith] ( literal[string] ) keyword[or] identifier[path] . identifier[startswith] ( literal[string] ):
keyword[return] identifier[path]
keyword[return] identifier[os] . identifier[path] . identifier[join] ( identifier[os] . identifier[path] . identifier[dirname] ( identifier[table_definition_file] ), identifier[path] )
identifier[columns] = identifier[list] ()
keyword[for] identifier[c] keyword[in] identifier[xmltag] . identifier[findall] ( literal[string] ):
identifier[scale_factor] = identifier[c] . identifier[get] ( literal[string] )
identifier[display_unit] = identifier[c] . identifier[get] ( literal[string] )
identifier[source_unit] = identifier[c] . identifier[get] ( literal[string] )
identifier[new_column] = identifier[Column] ( identifier[c] . identifier[get] ( literal[string] ), identifier[c] . identifier[text] , identifier[c] . identifier[get] ( literal[string] ),
identifier[handle_path] ( identifier[c] . identifier[get] ( literal[string] )), keyword[None] , identifier[display_unit] , identifier[source_unit] ,
identifier[scale_factor] , identifier[c] . identifier[get] ( literal[string] ), identifier[c] . identifier[get] ( literal[string] ))
identifier[columns] . identifier[append] ( identifier[new_column] )
keyword[return] identifier[columns] | def extract_columns_from_table_definition_file(xmltag, table_definition_file):
"""
Extract all columns mentioned in the result tag of a table definition file.
"""
def handle_path(path):
"""Convert path from a path relative to table-definition file."""
if not path or path.startswith('http://') or path.startswith('https://'):
return path # depends on [control=['if'], data=[]]
return os.path.join(os.path.dirname(table_definition_file), path)
columns = list()
for c in xmltag.findall('column'):
scale_factor = c.get('scaleFactor')
display_unit = c.get('displayUnit')
source_unit = c.get('sourceUnit')
new_column = Column(c.get('title'), c.text, c.get('numberOfDigits'), handle_path(c.get('href')), None, display_unit, source_unit, scale_factor, c.get('relevantForDiff'), c.get('displayTitle'))
columns.append(new_column) # depends on [control=['for'], data=['c']]
return columns |
def get_potential_files(self, ignore_list):
"""
Get a listing of files for the appropriate task which may or may
not be locked and/or done.
"""
exclude_prefix = self.taskid == tasks.suffixes.get(tasks.REALS_TASK, '') and 'fk' or None
filenames = [filename for filename in
self.directory_context.get_listing(self.taskid, exclude_prefix=exclude_prefix)
if filename not in ignore_list and
filename not in self._done and
filename not in self._already_fetched]
# if the extension is .mpc. then we look for the largest numbered MPC file.
# look for the largest numbered MPC file only.
if self.taskid == tasks.suffixes.get(tasks.TRACK_TASK, ''):
basenames = {}
for filename in filenames:
fullname = os.path.splitext(filename)[0]
if fullname in basenames:
continue
basename = os.path.splitext(fullname)[0]
# only do the 'maximum' search when the 2nd extension is an integer value
try:
idx = int(filename.split('.')[-2])
if idx > basenames.get(basename, 0):
basenames[basename] = idx
except:
# since we failed, just keep the file in the list
basenames[fullname] = ''
filenames = []
for basename in basenames:
# sometimes the version is empty, so no '.' is needed
version = basenames[basename]
version = len(str(version)) > 0 and ".{}".format(version) or version
filenames.append("{}{}{}".format(basename, version, self.taskid))
# print basename, basenames[basename], filenames[-1]
return filenames | def function[get_potential_files, parameter[self, ignore_list]]:
constant[
Get a listing of files for the appropriate task which may or may
not be locked and/or done.
]
variable[exclude_prefix] assign[=] <ast.BoolOp object at 0x7da1b1a76410>
variable[filenames] assign[=] <ast.ListComp object at 0x7da1b1a74190>
if compare[name[self].taskid equal[==] call[name[tasks].suffixes.get, parameter[name[tasks].TRACK_TASK, constant[]]]] begin[:]
variable[basenames] assign[=] dictionary[[], []]
for taget[name[filename]] in starred[name[filenames]] begin[:]
variable[fullname] assign[=] call[call[name[os].path.splitext, parameter[name[filename]]]][constant[0]]
if compare[name[fullname] in name[basenames]] begin[:]
continue
variable[basename] assign[=] call[call[name[os].path.splitext, parameter[name[fullname]]]][constant[0]]
<ast.Try object at 0x7da1b1a75a20>
variable[filenames] assign[=] list[[]]
for taget[name[basename]] in starred[name[basenames]] begin[:]
variable[version] assign[=] call[name[basenames]][name[basename]]
variable[version] assign[=] <ast.BoolOp object at 0x7da1b19da2c0>
call[name[filenames].append, parameter[call[constant[{}{}{}].format, parameter[name[basename], name[version], name[self].taskid]]]]
return[name[filenames]] | keyword[def] identifier[get_potential_files] ( identifier[self] , identifier[ignore_list] ):
literal[string]
identifier[exclude_prefix] = identifier[self] . identifier[taskid] == identifier[tasks] . identifier[suffixes] . identifier[get] ( identifier[tasks] . identifier[REALS_TASK] , literal[string] ) keyword[and] literal[string] keyword[or] keyword[None]
identifier[filenames] =[ identifier[filename] keyword[for] identifier[filename] keyword[in]
identifier[self] . identifier[directory_context] . identifier[get_listing] ( identifier[self] . identifier[taskid] , identifier[exclude_prefix] = identifier[exclude_prefix] )
keyword[if] identifier[filename] keyword[not] keyword[in] identifier[ignore_list] keyword[and]
identifier[filename] keyword[not] keyword[in] identifier[self] . identifier[_done] keyword[and]
identifier[filename] keyword[not] keyword[in] identifier[self] . identifier[_already_fetched] ]
keyword[if] identifier[self] . identifier[taskid] == identifier[tasks] . identifier[suffixes] . identifier[get] ( identifier[tasks] . identifier[TRACK_TASK] , literal[string] ):
identifier[basenames] ={}
keyword[for] identifier[filename] keyword[in] identifier[filenames] :
identifier[fullname] = identifier[os] . identifier[path] . identifier[splitext] ( identifier[filename] )[ literal[int] ]
keyword[if] identifier[fullname] keyword[in] identifier[basenames] :
keyword[continue]
identifier[basename] = identifier[os] . identifier[path] . identifier[splitext] ( identifier[fullname] )[ literal[int] ]
keyword[try] :
identifier[idx] = identifier[int] ( identifier[filename] . identifier[split] ( literal[string] )[- literal[int] ])
keyword[if] identifier[idx] > identifier[basenames] . identifier[get] ( identifier[basename] , literal[int] ):
identifier[basenames] [ identifier[basename] ]= identifier[idx]
keyword[except] :
identifier[basenames] [ identifier[fullname] ]= literal[string]
identifier[filenames] =[]
keyword[for] identifier[basename] keyword[in] identifier[basenames] :
identifier[version] = identifier[basenames] [ identifier[basename] ]
identifier[version] = identifier[len] ( identifier[str] ( identifier[version] ))> literal[int] keyword[and] literal[string] . identifier[format] ( identifier[version] ) keyword[or] identifier[version]
identifier[filenames] . identifier[append] ( literal[string] . identifier[format] ( identifier[basename] , identifier[version] , identifier[self] . identifier[taskid] ))
keyword[return] identifier[filenames] | def get_potential_files(self, ignore_list):
"""
Get a listing of files for the appropriate task which may or may
not be locked and/or done.
"""
exclude_prefix = self.taskid == tasks.suffixes.get(tasks.REALS_TASK, '') and 'fk' or None
filenames = [filename for filename in self.directory_context.get_listing(self.taskid, exclude_prefix=exclude_prefix) if filename not in ignore_list and filename not in self._done and (filename not in self._already_fetched)]
# if the extension is .mpc. then we look for the largest numbered MPC file.
# look for the largest numbered MPC file only.
if self.taskid == tasks.suffixes.get(tasks.TRACK_TASK, ''):
basenames = {}
for filename in filenames:
fullname = os.path.splitext(filename)[0]
if fullname in basenames:
continue # depends on [control=['if'], data=[]]
basename = os.path.splitext(fullname)[0]
# only do the 'maximum' search when the 2nd extension is an integer value
try:
idx = int(filename.split('.')[-2])
if idx > basenames.get(basename, 0):
basenames[basename] = idx # depends on [control=['if'], data=['idx']] # depends on [control=['try'], data=[]]
except:
# since we failed, just keep the file in the list
basenames[fullname] = '' # depends on [control=['except'], data=[]] # depends on [control=['for'], data=['filename']]
filenames = []
for basename in basenames:
# sometimes the version is empty, so no '.' is needed
version = basenames[basename]
version = len(str(version)) > 0 and '.{}'.format(version) or version
filenames.append('{}{}{}'.format(basename, version, self.taskid)) # depends on [control=['for'], data=['basename']] # depends on [control=['if'], data=[]]
# print basename, basenames[basename], filenames[-1]
return filenames |
def clear_selection(self):
"""Clear current selection"""
cursor = self.textCursor()
cursor.clearSelection()
self.setTextCursor(cursor) | def function[clear_selection, parameter[self]]:
constant[Clear current selection]
variable[cursor] assign[=] call[name[self].textCursor, parameter[]]
call[name[cursor].clearSelection, parameter[]]
call[name[self].setTextCursor, parameter[name[cursor]]] | keyword[def] identifier[clear_selection] ( identifier[self] ):
literal[string]
identifier[cursor] = identifier[self] . identifier[textCursor] ()
identifier[cursor] . identifier[clearSelection] ()
identifier[self] . identifier[setTextCursor] ( identifier[cursor] ) | def clear_selection(self):
"""Clear current selection"""
cursor = self.textCursor()
cursor.clearSelection()
self.setTextCursor(cursor) |
def sanitize_cloud(cloud: str) -> str:
"""
Fix rare cloud layer issues
"""
if len(cloud) < 4:
return cloud
if not cloud[3].isdigit() and cloud[3] != '/':
if cloud[3] == 'O':
cloud = cloud[:3] + '0' + cloud[4:] # Bad "O": FEWO03 -> FEW003
else: # Move modifiers to end: BKNC015 -> BKN015C
cloud = cloud[:3] + cloud[4:] + cloud[3]
return cloud | def function[sanitize_cloud, parameter[cloud]]:
constant[
Fix rare cloud layer issues
]
if compare[call[name[len], parameter[name[cloud]]] less[<] constant[4]] begin[:]
return[name[cloud]]
if <ast.BoolOp object at 0x7da18f721810> begin[:]
if compare[call[name[cloud]][constant[3]] equal[==] constant[O]] begin[:]
variable[cloud] assign[=] binary_operation[binary_operation[call[name[cloud]][<ast.Slice object at 0x7da18f722b90>] + constant[0]] + call[name[cloud]][<ast.Slice object at 0x7da18f722b00>]]
return[name[cloud]] | keyword[def] identifier[sanitize_cloud] ( identifier[cloud] : identifier[str] )-> identifier[str] :
literal[string]
keyword[if] identifier[len] ( identifier[cloud] )< literal[int] :
keyword[return] identifier[cloud]
keyword[if] keyword[not] identifier[cloud] [ literal[int] ]. identifier[isdigit] () keyword[and] identifier[cloud] [ literal[int] ]!= literal[string] :
keyword[if] identifier[cloud] [ literal[int] ]== literal[string] :
identifier[cloud] = identifier[cloud] [: literal[int] ]+ literal[string] + identifier[cloud] [ literal[int] :]
keyword[else] :
identifier[cloud] = identifier[cloud] [: literal[int] ]+ identifier[cloud] [ literal[int] :]+ identifier[cloud] [ literal[int] ]
keyword[return] identifier[cloud] | def sanitize_cloud(cloud: str) -> str:
"""
Fix rare cloud layer issues
"""
if len(cloud) < 4:
return cloud # depends on [control=['if'], data=[]]
if not cloud[3].isdigit() and cloud[3] != '/':
if cloud[3] == 'O':
cloud = cloud[:3] + '0' + cloud[4:] # Bad "O": FEWO03 -> FEW003 # depends on [control=['if'], data=[]]
else: # Move modifiers to end: BKNC015 -> BKN015C
cloud = cloud[:3] + cloud[4:] + cloud[3] # depends on [control=['if'], data=[]]
return cloud |
def verify_jar_checksums(self, jar_file, strict=True):
"""
Verify checksums, present in the manifest, against the JAR content.
:return: list of entries for which verification has failed
"""
verify_failures = []
zip_file = ZipFile(jar_file)
for filename in zip_file.namelist():
if file_skips_verification(filename):
continue
file_section = self.create_section(filename, overwrite=False)
digests = file_section.keys_with_suffix("-Digest")
if not digests and strict:
verify_failures.append(filename)
continue
for java_digest in digests:
read_digest = file_section.get(java_digest + "-Digest")
calculated_digest = b64_encoded_digest(
zip_file.read(filename),
_get_digest(java_digest))
if calculated_digest == read_digest:
# found a match
break
else:
# for all the digests, not one of them matched. Add
# this filename to the error list
verify_failures.append(filename)
return verify_failures | def function[verify_jar_checksums, parameter[self, jar_file, strict]]:
constant[
Verify checksums, present in the manifest, against the JAR content.
:return: list of entries for which verification has failed
]
variable[verify_failures] assign[=] list[[]]
variable[zip_file] assign[=] call[name[ZipFile], parameter[name[jar_file]]]
for taget[name[filename]] in starred[call[name[zip_file].namelist, parameter[]]] begin[:]
if call[name[file_skips_verification], parameter[name[filename]]] begin[:]
continue
variable[file_section] assign[=] call[name[self].create_section, parameter[name[filename]]]
variable[digests] assign[=] call[name[file_section].keys_with_suffix, parameter[constant[-Digest]]]
if <ast.BoolOp object at 0x7da1b0b19ff0> begin[:]
call[name[verify_failures].append, parameter[name[filename]]]
continue
for taget[name[java_digest]] in starred[name[digests]] begin[:]
variable[read_digest] assign[=] call[name[file_section].get, parameter[binary_operation[name[java_digest] + constant[-Digest]]]]
variable[calculated_digest] assign[=] call[name[b64_encoded_digest], parameter[call[name[zip_file].read, parameter[name[filename]]], call[name[_get_digest], parameter[name[java_digest]]]]]
if compare[name[calculated_digest] equal[==] name[read_digest]] begin[:]
break
return[name[verify_failures]] | keyword[def] identifier[verify_jar_checksums] ( identifier[self] , identifier[jar_file] , identifier[strict] = keyword[True] ):
literal[string]
identifier[verify_failures] =[]
identifier[zip_file] = identifier[ZipFile] ( identifier[jar_file] )
keyword[for] identifier[filename] keyword[in] identifier[zip_file] . identifier[namelist] ():
keyword[if] identifier[file_skips_verification] ( identifier[filename] ):
keyword[continue]
identifier[file_section] = identifier[self] . identifier[create_section] ( identifier[filename] , identifier[overwrite] = keyword[False] )
identifier[digests] = identifier[file_section] . identifier[keys_with_suffix] ( literal[string] )
keyword[if] keyword[not] identifier[digests] keyword[and] identifier[strict] :
identifier[verify_failures] . identifier[append] ( identifier[filename] )
keyword[continue]
keyword[for] identifier[java_digest] keyword[in] identifier[digests] :
identifier[read_digest] = identifier[file_section] . identifier[get] ( identifier[java_digest] + literal[string] )
identifier[calculated_digest] = identifier[b64_encoded_digest] (
identifier[zip_file] . identifier[read] ( identifier[filename] ),
identifier[_get_digest] ( identifier[java_digest] ))
keyword[if] identifier[calculated_digest] == identifier[read_digest] :
keyword[break]
keyword[else] :
identifier[verify_failures] . identifier[append] ( identifier[filename] )
keyword[return] identifier[verify_failures] | def verify_jar_checksums(self, jar_file, strict=True):
"""
Verify checksums, present in the manifest, against the JAR content.
:return: list of entries for which verification has failed
"""
verify_failures = []
zip_file = ZipFile(jar_file)
for filename in zip_file.namelist():
if file_skips_verification(filename):
continue # depends on [control=['if'], data=[]]
file_section = self.create_section(filename, overwrite=False)
digests = file_section.keys_with_suffix('-Digest')
if not digests and strict:
verify_failures.append(filename)
continue # depends on [control=['if'], data=[]]
for java_digest in digests:
read_digest = file_section.get(java_digest + '-Digest')
calculated_digest = b64_encoded_digest(zip_file.read(filename), _get_digest(java_digest))
if calculated_digest == read_digest:
# found a match
break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['java_digest']]
else:
# for all the digests, not one of them matched. Add
# this filename to the error list
verify_failures.append(filename) # depends on [control=['for'], data=['filename']]
return verify_failures |
def feed(self, from_date=None, from_offset=None, category=None,
latest_items=None, arthur_items=None, filter_classified=None):
""" Feed data in Elastic from Perceval or Arthur """
if self.fetch_archive:
items = self.perceval_backend.fetch_from_archive()
self.feed_items(items)
return
elif arthur_items:
items = arthur_items
self.feed_items(items)
return
if from_date and from_offset:
raise RuntimeError("Can't not feed using from_date and from_offset.")
# We need to filter by repository to support several repositories
# in the same raw index
filters_ = [get_repository_filter(self.perceval_backend,
self.get_connector_name())]
# Check if backend supports from_date
signature = inspect.signature(self.perceval_backend.fetch)
last_update = None
if 'from_date' in signature.parameters:
if from_date:
last_update = from_date
else:
self.last_update = self.get_last_update_from_es(filters_=filters_)
last_update = self.last_update
logger.info("Incremental from: %s", last_update)
offset = None
if 'offset' in signature.parameters:
if from_offset:
offset = from_offset
else:
offset = self.elastic.get_last_offset("offset", filters_=filters_)
if offset is not None:
logger.info("Incremental from: %i offset", offset)
else:
logger.info("Not incremental")
params = {}
# category and filter_classified params are shared
# by all Perceval backends
if category is not None:
params['category'] = category
if filter_classified is not None:
params['filter_classified'] = filter_classified
# latest items, from_date and offset cannot be used together,
# thus, the params dictionary is filled with the param available
# and Perceval is executed
if latest_items:
params['latest_items'] = latest_items
items = self.perceval_backend.fetch(**params)
elif last_update:
last_update = last_update.replace(tzinfo=None)
params['from_date'] = last_update
items = self.perceval_backend.fetch(**params)
elif offset is not None:
params['offset'] = offset
items = self.perceval_backend.fetch(**params)
else:
items = self.perceval_backend.fetch(**params)
self.feed_items(items)
self.update_items() | def function[feed, parameter[self, from_date, from_offset, category, latest_items, arthur_items, filter_classified]]:
constant[ Feed data in Elastic from Perceval or Arthur ]
if name[self].fetch_archive begin[:]
variable[items] assign[=] call[name[self].perceval_backend.fetch_from_archive, parameter[]]
call[name[self].feed_items, parameter[name[items]]]
return[None]
if <ast.BoolOp object at 0x7da18dc99690> begin[:]
<ast.Raise object at 0x7da18dc98d90>
variable[filters_] assign[=] list[[<ast.Call object at 0x7da18dc9ad10>]]
variable[signature] assign[=] call[name[inspect].signature, parameter[name[self].perceval_backend.fetch]]
variable[last_update] assign[=] constant[None]
if compare[constant[from_date] in name[signature].parameters] begin[:]
if name[from_date] begin[:]
variable[last_update] assign[=] name[from_date]
call[name[logger].info, parameter[constant[Incremental from: %s], name[last_update]]]
variable[offset] assign[=] constant[None]
if compare[constant[offset] in name[signature].parameters] begin[:]
if name[from_offset] begin[:]
variable[offset] assign[=] name[from_offset]
if compare[name[offset] is_not constant[None]] begin[:]
call[name[logger].info, parameter[constant[Incremental from: %i offset], name[offset]]]
variable[params] assign[=] dictionary[[], []]
if compare[name[category] is_not constant[None]] begin[:]
call[name[params]][constant[category]] assign[=] name[category]
if compare[name[filter_classified] is_not constant[None]] begin[:]
call[name[params]][constant[filter_classified]] assign[=] name[filter_classified]
if name[latest_items] begin[:]
call[name[params]][constant[latest_items]] assign[=] name[latest_items]
variable[items] assign[=] call[name[self].perceval_backend.fetch, parameter[]]
call[name[self].feed_items, parameter[name[items]]]
call[name[self].update_items, parameter[]] | keyword[def] identifier[feed] ( identifier[self] , identifier[from_date] = keyword[None] , identifier[from_offset] = keyword[None] , identifier[category] = keyword[None] ,
identifier[latest_items] = keyword[None] , identifier[arthur_items] = keyword[None] , identifier[filter_classified] = keyword[None] ):
literal[string]
keyword[if] identifier[self] . identifier[fetch_archive] :
identifier[items] = identifier[self] . identifier[perceval_backend] . identifier[fetch_from_archive] ()
identifier[self] . identifier[feed_items] ( identifier[items] )
keyword[return]
keyword[elif] identifier[arthur_items] :
identifier[items] = identifier[arthur_items]
identifier[self] . identifier[feed_items] ( identifier[items] )
keyword[return]
keyword[if] identifier[from_date] keyword[and] identifier[from_offset] :
keyword[raise] identifier[RuntimeError] ( literal[string] )
identifier[filters_] =[ identifier[get_repository_filter] ( identifier[self] . identifier[perceval_backend] ,
identifier[self] . identifier[get_connector_name] ())]
identifier[signature] = identifier[inspect] . identifier[signature] ( identifier[self] . identifier[perceval_backend] . identifier[fetch] )
identifier[last_update] = keyword[None]
keyword[if] literal[string] keyword[in] identifier[signature] . identifier[parameters] :
keyword[if] identifier[from_date] :
identifier[last_update] = identifier[from_date]
keyword[else] :
identifier[self] . identifier[last_update] = identifier[self] . identifier[get_last_update_from_es] ( identifier[filters_] = identifier[filters_] )
identifier[last_update] = identifier[self] . identifier[last_update]
identifier[logger] . identifier[info] ( literal[string] , identifier[last_update] )
identifier[offset] = keyword[None]
keyword[if] literal[string] keyword[in] identifier[signature] . identifier[parameters] :
keyword[if] identifier[from_offset] :
identifier[offset] = identifier[from_offset]
keyword[else] :
identifier[offset] = identifier[self] . identifier[elastic] . identifier[get_last_offset] ( literal[string] , identifier[filters_] = identifier[filters_] )
keyword[if] identifier[offset] keyword[is] keyword[not] keyword[None] :
identifier[logger] . identifier[info] ( literal[string] , identifier[offset] )
keyword[else] :
identifier[logger] . identifier[info] ( literal[string] )
identifier[params] ={}
keyword[if] identifier[category] keyword[is] keyword[not] keyword[None] :
identifier[params] [ literal[string] ]= identifier[category]
keyword[if] identifier[filter_classified] keyword[is] keyword[not] keyword[None] :
identifier[params] [ literal[string] ]= identifier[filter_classified]
keyword[if] identifier[latest_items] :
identifier[params] [ literal[string] ]= identifier[latest_items]
identifier[items] = identifier[self] . identifier[perceval_backend] . identifier[fetch] (** identifier[params] )
keyword[elif] identifier[last_update] :
identifier[last_update] = identifier[last_update] . identifier[replace] ( identifier[tzinfo] = keyword[None] )
identifier[params] [ literal[string] ]= identifier[last_update]
identifier[items] = identifier[self] . identifier[perceval_backend] . identifier[fetch] (** identifier[params] )
keyword[elif] identifier[offset] keyword[is] keyword[not] keyword[None] :
identifier[params] [ literal[string] ]= identifier[offset]
identifier[items] = identifier[self] . identifier[perceval_backend] . identifier[fetch] (** identifier[params] )
keyword[else] :
identifier[items] = identifier[self] . identifier[perceval_backend] . identifier[fetch] (** identifier[params] )
identifier[self] . identifier[feed_items] ( identifier[items] )
identifier[self] . identifier[update_items] () | def feed(self, from_date=None, from_offset=None, category=None, latest_items=None, arthur_items=None, filter_classified=None):
""" Feed data in Elastic from Perceval or Arthur """
if self.fetch_archive:
items = self.perceval_backend.fetch_from_archive()
self.feed_items(items)
return # depends on [control=['if'], data=[]]
elif arthur_items:
items = arthur_items
self.feed_items(items)
return # depends on [control=['if'], data=[]]
if from_date and from_offset:
raise RuntimeError("Can't not feed using from_date and from_offset.") # depends on [control=['if'], data=[]]
# We need to filter by repository to support several repositories
# in the same raw index
filters_ = [get_repository_filter(self.perceval_backend, self.get_connector_name())]
# Check if backend supports from_date
signature = inspect.signature(self.perceval_backend.fetch)
last_update = None
if 'from_date' in signature.parameters:
if from_date:
last_update = from_date # depends on [control=['if'], data=[]]
else:
self.last_update = self.get_last_update_from_es(filters_=filters_)
last_update = self.last_update
logger.info('Incremental from: %s', last_update) # depends on [control=['if'], data=[]]
offset = None
if 'offset' in signature.parameters:
if from_offset:
offset = from_offset # depends on [control=['if'], data=[]]
else:
offset = self.elastic.get_last_offset('offset', filters_=filters_)
if offset is not None:
logger.info('Incremental from: %i offset', offset) # depends on [control=['if'], data=['offset']]
else:
logger.info('Not incremental') # depends on [control=['if'], data=[]]
params = {}
# category and filter_classified params are shared
# by all Perceval backends
if category is not None:
params['category'] = category # depends on [control=['if'], data=['category']]
if filter_classified is not None:
params['filter_classified'] = filter_classified # depends on [control=['if'], data=['filter_classified']]
# latest items, from_date and offset cannot be used together,
# thus, the params dictionary is filled with the param available
# and Perceval is executed
if latest_items:
params['latest_items'] = latest_items
items = self.perceval_backend.fetch(**params) # depends on [control=['if'], data=[]]
elif last_update:
last_update = last_update.replace(tzinfo=None)
params['from_date'] = last_update
items = self.perceval_backend.fetch(**params) # depends on [control=['if'], data=[]]
elif offset is not None:
params['offset'] = offset
items = self.perceval_backend.fetch(**params) # depends on [control=['if'], data=['offset']]
else:
items = self.perceval_backend.fetch(**params)
self.feed_items(items)
self.update_items() |
def split_bin_edges(edges, npts=2):
"""Subdivide an array of bins by splitting each bin into ``npts``
subintervals.
Parameters
----------
edges : `~numpy.ndarray`
Bin edge array.
npts : int
Number of intervals into which each bin will be subdivided.
Returns
-------
edges : `~numpy.ndarray`
Subdivided bin edge array.
"""
if npts < 2:
return edges
x = (edges[:-1, None] +
(edges[1:, None] - edges[:-1, None]) *
np.linspace(0.0, 1.0, npts + 1)[None, :])
return np.unique(np.ravel(x)) | def function[split_bin_edges, parameter[edges, npts]]:
constant[Subdivide an array of bins by splitting each bin into ``npts``
subintervals.
Parameters
----------
edges : `~numpy.ndarray`
Bin edge array.
npts : int
Number of intervals into which each bin will be subdivided.
Returns
-------
edges : `~numpy.ndarray`
Subdivided bin edge array.
]
if compare[name[npts] less[<] constant[2]] begin[:]
return[name[edges]]
variable[x] assign[=] binary_operation[call[name[edges]][tuple[[<ast.Slice object at 0x7da204620070>, <ast.Constant object at 0x7da20c6c6200>]]] + binary_operation[binary_operation[call[name[edges]][tuple[[<ast.Slice object at 0x7da20c6c7c40>, <ast.Constant object at 0x7da20c6c7a00>]]] - call[name[edges]][tuple[[<ast.Slice object at 0x7da20c6c5f00>, <ast.Constant object at 0x7da20c6c6470>]]]] * call[call[name[np].linspace, parameter[constant[0.0], constant[1.0], binary_operation[name[npts] + constant[1]]]]][tuple[[<ast.Constant object at 0x7da20c6c5900>, <ast.Slice object at 0x7da20c6c78e0>]]]]]
return[call[name[np].unique, parameter[call[name[np].ravel, parameter[name[x]]]]]] | keyword[def] identifier[split_bin_edges] ( identifier[edges] , identifier[npts] = literal[int] ):
literal[string]
keyword[if] identifier[npts] < literal[int] :
keyword[return] identifier[edges]
identifier[x] =( identifier[edges] [:- literal[int] , keyword[None] ]+
( identifier[edges] [ literal[int] :, keyword[None] ]- identifier[edges] [:- literal[int] , keyword[None] ])*
identifier[np] . identifier[linspace] ( literal[int] , literal[int] , identifier[npts] + literal[int] )[ keyword[None] ,:])
keyword[return] identifier[np] . identifier[unique] ( identifier[np] . identifier[ravel] ( identifier[x] )) | def split_bin_edges(edges, npts=2):
"""Subdivide an array of bins by splitting each bin into ``npts``
subintervals.
Parameters
----------
edges : `~numpy.ndarray`
Bin edge array.
npts : int
Number of intervals into which each bin will be subdivided.
Returns
-------
edges : `~numpy.ndarray`
Subdivided bin edge array.
"""
if npts < 2:
return edges # depends on [control=['if'], data=[]]
x = edges[:-1, None] + (edges[1:, None] - edges[:-1, None]) * np.linspace(0.0, 1.0, npts + 1)[None, :]
return np.unique(np.ravel(x)) |
def record_manifest(self):
"""
Called after a deployment to record any data necessary to detect changes
for a future deployment.
"""
manifest = super(GitTrackerSatchel, self).record_manifest()
manifest[CURRENT_COMMIT] = self.get_current_commit()
return manifest | def function[record_manifest, parameter[self]]:
constant[
Called after a deployment to record any data necessary to detect changes
for a future deployment.
]
variable[manifest] assign[=] call[call[name[super], parameter[name[GitTrackerSatchel], name[self]]].record_manifest, parameter[]]
call[name[manifest]][name[CURRENT_COMMIT]] assign[=] call[name[self].get_current_commit, parameter[]]
return[name[manifest]] | keyword[def] identifier[record_manifest] ( identifier[self] ):
literal[string]
identifier[manifest] = identifier[super] ( identifier[GitTrackerSatchel] , identifier[self] ). identifier[record_manifest] ()
identifier[manifest] [ identifier[CURRENT_COMMIT] ]= identifier[self] . identifier[get_current_commit] ()
keyword[return] identifier[manifest] | def record_manifest(self):
"""
Called after a deployment to record any data necessary to detect changes
for a future deployment.
"""
manifest = super(GitTrackerSatchel, self).record_manifest()
manifest[CURRENT_COMMIT] = self.get_current_commit()
return manifest |
def from_rgb(cls, r: int, g: int, b: int) -> 'ColorCode':
""" Return a ColorCode from a RGB tuple. """
c = cls()
c._init_rgb(r, g, b)
return c | def function[from_rgb, parameter[cls, r, g, b]]:
constant[ Return a ColorCode from a RGB tuple. ]
variable[c] assign[=] call[name[cls], parameter[]]
call[name[c]._init_rgb, parameter[name[r], name[g], name[b]]]
return[name[c]] | keyword[def] identifier[from_rgb] ( identifier[cls] , identifier[r] : identifier[int] , identifier[g] : identifier[int] , identifier[b] : identifier[int] )-> literal[string] :
literal[string]
identifier[c] = identifier[cls] ()
identifier[c] . identifier[_init_rgb] ( identifier[r] , identifier[g] , identifier[b] )
keyword[return] identifier[c] | def from_rgb(cls, r: int, g: int, b: int) -> 'ColorCode':
""" Return a ColorCode from a RGB tuple. """
c = cls()
c._init_rgb(r, g, b)
return c |
def getPartitionList(self):
"""Returns list of partitions.
@return: List of (disk,partition) pairs.
"""
if self._partList is None:
self._partList = []
for (disk,parts) in self.getPartitionDict().iteritems():
for part in parts:
self._partList.append((disk,part))
return self._partList | def function[getPartitionList, parameter[self]]:
constant[Returns list of partitions.
@return: List of (disk,partition) pairs.
]
if compare[name[self]._partList is constant[None]] begin[:]
name[self]._partList assign[=] list[[]]
for taget[tuple[[<ast.Name object at 0x7da1b10426e0>, <ast.Name object at 0x7da1b10420e0>]]] in starred[call[call[name[self].getPartitionDict, parameter[]].iteritems, parameter[]]] begin[:]
for taget[name[part]] in starred[name[parts]] begin[:]
call[name[self]._partList.append, parameter[tuple[[<ast.Name object at 0x7da1b1042560>, <ast.Name object at 0x7da1b1043940>]]]]
return[name[self]._partList] | keyword[def] identifier[getPartitionList] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[_partList] keyword[is] keyword[None] :
identifier[self] . identifier[_partList] =[]
keyword[for] ( identifier[disk] , identifier[parts] ) keyword[in] identifier[self] . identifier[getPartitionDict] (). identifier[iteritems] ():
keyword[for] identifier[part] keyword[in] identifier[parts] :
identifier[self] . identifier[_partList] . identifier[append] (( identifier[disk] , identifier[part] ))
keyword[return] identifier[self] . identifier[_partList] | def getPartitionList(self):
"""Returns list of partitions.
@return: List of (disk,partition) pairs.
"""
if self._partList is None:
self._partList = []
for (disk, parts) in self.getPartitionDict().iteritems():
for part in parts:
self._partList.append((disk, part)) # depends on [control=['for'], data=['part']] # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]]
return self._partList |
def check_output(self, cmd):
"""Wrapper for subprocess.check_output."""
ret, output = self._exec(cmd)
if not ret == 0:
raise CommandError(self)
return output | def function[check_output, parameter[self, cmd]]:
constant[Wrapper for subprocess.check_output.]
<ast.Tuple object at 0x7da1b26ac430> assign[=] call[name[self]._exec, parameter[name[cmd]]]
if <ast.UnaryOp object at 0x7da1b26ae200> begin[:]
<ast.Raise object at 0x7da1b26afe80>
return[name[output]] | keyword[def] identifier[check_output] ( identifier[self] , identifier[cmd] ):
literal[string]
identifier[ret] , identifier[output] = identifier[self] . identifier[_exec] ( identifier[cmd] )
keyword[if] keyword[not] identifier[ret] == literal[int] :
keyword[raise] identifier[CommandError] ( identifier[self] )
keyword[return] identifier[output] | def check_output(self, cmd):
"""Wrapper for subprocess.check_output."""
(ret, output) = self._exec(cmd)
if not ret == 0:
raise CommandError(self) # depends on [control=['if'], data=[]]
return output |
def get_jid(returner, jid):
'''
Return the information for a specified job id
CLI Example:
.. code-block:: bash
salt '*' ret.get_jid redis 20421104181954700505
'''
returners = salt.loader.returners(__opts__, __salt__)
return returners['{0}.get_jid'.format(returner)](jid) | def function[get_jid, parameter[returner, jid]]:
constant[
Return the information for a specified job id
CLI Example:
.. code-block:: bash
salt '*' ret.get_jid redis 20421104181954700505
]
variable[returners] assign[=] call[name[salt].loader.returners, parameter[name[__opts__], name[__salt__]]]
return[call[call[name[returners]][call[constant[{0}.get_jid].format, parameter[name[returner]]]], parameter[name[jid]]]] | keyword[def] identifier[get_jid] ( identifier[returner] , identifier[jid] ):
literal[string]
identifier[returners] = identifier[salt] . identifier[loader] . identifier[returners] ( identifier[__opts__] , identifier[__salt__] )
keyword[return] identifier[returners] [ literal[string] . identifier[format] ( identifier[returner] )]( identifier[jid] ) | def get_jid(returner, jid):
"""
Return the information for a specified job id
CLI Example:
.. code-block:: bash
salt '*' ret.get_jid redis 20421104181954700505
"""
returners = salt.loader.returners(__opts__, __salt__)
return returners['{0}.get_jid'.format(returner)](jid) |
def apply(self, func, axis=0, subset=None, **kwargs):
"""
Apply a function column-wise, row-wise, or table-wise,
updating the HTML representation with the result.
Parameters
----------
func : function
``func`` should take a Series or DataFrame (depending
on ``axis``), and return an object with the same shape.
Must return a DataFrame with identical index and
column labels when ``axis=None``
axis : {0 or 'index', 1 or 'columns', None}, default 0
apply to each column (``axis=0`` or ``'index'``), to each row
(``axis=1`` or ``'columns'``), or to the entire DataFrame at once
with ``axis=None``.
subset : IndexSlice
a valid indexer to limit ``data`` to *before* applying the
function. Consider using a pandas.IndexSlice
kwargs : dict
pass along to ``func``
Returns
-------
self : Styler
Notes
-----
The output shape of ``func`` should match the input, i.e. if
``x`` is the input row, column, or table (depending on ``axis``),
then ``func(x).shape == x.shape`` should be true.
This is similar to ``DataFrame.apply``, except that ``axis=None``
applies the function to the entire DataFrame at once,
rather than column-wise or row-wise.
Examples
--------
>>> def highlight_max(x):
... return ['background-color: yellow' if v == x.max() else ''
for v in x]
...
>>> df = pd.DataFrame(np.random.randn(5, 2))
>>> df.style.apply(highlight_max)
"""
self._todo.append((lambda instance: getattr(instance, '_apply'),
(func, axis, subset), kwargs))
return self | def function[apply, parameter[self, func, axis, subset]]:
constant[
Apply a function column-wise, row-wise, or table-wise,
updating the HTML representation with the result.
Parameters
----------
func : function
``func`` should take a Series or DataFrame (depending
on ``axis``), and return an object with the same shape.
Must return a DataFrame with identical index and
column labels when ``axis=None``
axis : {0 or 'index', 1 or 'columns', None}, default 0
apply to each column (``axis=0`` or ``'index'``), to each row
(``axis=1`` or ``'columns'``), or to the entire DataFrame at once
with ``axis=None``.
subset : IndexSlice
a valid indexer to limit ``data`` to *before* applying the
function. Consider using a pandas.IndexSlice
kwargs : dict
pass along to ``func``
Returns
-------
self : Styler
Notes
-----
The output shape of ``func`` should match the input, i.e. if
``x`` is the input row, column, or table (depending on ``axis``),
then ``func(x).shape == x.shape`` should be true.
This is similar to ``DataFrame.apply``, except that ``axis=None``
applies the function to the entire DataFrame at once,
rather than column-wise or row-wise.
Examples
--------
>>> def highlight_max(x):
... return ['background-color: yellow' if v == x.max() else ''
for v in x]
...
>>> df = pd.DataFrame(np.random.randn(5, 2))
>>> df.style.apply(highlight_max)
]
call[name[self]._todo.append, parameter[tuple[[<ast.Lambda object at 0x7da18c4cef80>, <ast.Tuple object at 0x7da18c4cfdf0>, <ast.Name object at 0x7da18c4cc3a0>]]]]
return[name[self]] | keyword[def] identifier[apply] ( identifier[self] , identifier[func] , identifier[axis] = literal[int] , identifier[subset] = keyword[None] ,** identifier[kwargs] ):
literal[string]
identifier[self] . identifier[_todo] . identifier[append] (( keyword[lambda] identifier[instance] : identifier[getattr] ( identifier[instance] , literal[string] ),
( identifier[func] , identifier[axis] , identifier[subset] ), identifier[kwargs] ))
keyword[return] identifier[self] | def apply(self, func, axis=0, subset=None, **kwargs):
"""
Apply a function column-wise, row-wise, or table-wise,
updating the HTML representation with the result.
Parameters
----------
func : function
``func`` should take a Series or DataFrame (depending
on ``axis``), and return an object with the same shape.
Must return a DataFrame with identical index and
column labels when ``axis=None``
axis : {0 or 'index', 1 or 'columns', None}, default 0
apply to each column (``axis=0`` or ``'index'``), to each row
(``axis=1`` or ``'columns'``), or to the entire DataFrame at once
with ``axis=None``.
subset : IndexSlice
a valid indexer to limit ``data`` to *before* applying the
function. Consider using a pandas.IndexSlice
kwargs : dict
pass along to ``func``
Returns
-------
self : Styler
Notes
-----
The output shape of ``func`` should match the input, i.e. if
``x`` is the input row, column, or table (depending on ``axis``),
then ``func(x).shape == x.shape`` should be true.
This is similar to ``DataFrame.apply``, except that ``axis=None``
applies the function to the entire DataFrame at once,
rather than column-wise or row-wise.
Examples
--------
>>> def highlight_max(x):
... return ['background-color: yellow' if v == x.max() else ''
for v in x]
...
>>> df = pd.DataFrame(np.random.randn(5, 2))
>>> df.style.apply(highlight_max)
"""
self._todo.append((lambda instance: getattr(instance, '_apply'), (func, axis, subset), kwargs))
return self |
def parse_value_instancewithpath(self, tup_tree):
"""
The VALUE.INSTANCEWITHPATH is used to define a value that comprises
a single CIMInstance with additional information that defines the
absolute path to that object.
::
<!ELEMENT VALUE.INSTANCEWITHPATH (INSTANCEPATH, INSTANCE)>
"""
self.check_node(tup_tree, 'VALUE.INSTANCEWITHPATH')
k = kids(tup_tree)
if len(k) != 2:
raise CIMXMLParseError(
_format("Element {0!A} has invalid number of child elements "
"{1!A} (expecting two child elements "
"(INSTANCEPATH, INSTANCE))",
name(tup_tree), k),
conn_id=self.conn_id)
inst_path = self.parse_instancepath(k[0])
instance = self.parse_instance(k[1])
instance.path = inst_path
return instance | def function[parse_value_instancewithpath, parameter[self, tup_tree]]:
constant[
The VALUE.INSTANCEWITHPATH is used to define a value that comprises
a single CIMInstance with additional information that defines the
absolute path to that object.
::
<!ELEMENT VALUE.INSTANCEWITHPATH (INSTANCEPATH, INSTANCE)>
]
call[name[self].check_node, parameter[name[tup_tree], constant[VALUE.INSTANCEWITHPATH]]]
variable[k] assign[=] call[name[kids], parameter[name[tup_tree]]]
if compare[call[name[len], parameter[name[k]]] not_equal[!=] constant[2]] begin[:]
<ast.Raise object at 0x7da18bcc94e0>
variable[inst_path] assign[=] call[name[self].parse_instancepath, parameter[call[name[k]][constant[0]]]]
variable[instance] assign[=] call[name[self].parse_instance, parameter[call[name[k]][constant[1]]]]
name[instance].path assign[=] name[inst_path]
return[name[instance]] | keyword[def] identifier[parse_value_instancewithpath] ( identifier[self] , identifier[tup_tree] ):
literal[string]
identifier[self] . identifier[check_node] ( identifier[tup_tree] , literal[string] )
identifier[k] = identifier[kids] ( identifier[tup_tree] )
keyword[if] identifier[len] ( identifier[k] )!= literal[int] :
keyword[raise] identifier[CIMXMLParseError] (
identifier[_format] ( literal[string]
literal[string]
literal[string] ,
identifier[name] ( identifier[tup_tree] ), identifier[k] ),
identifier[conn_id] = identifier[self] . identifier[conn_id] )
identifier[inst_path] = identifier[self] . identifier[parse_instancepath] ( identifier[k] [ literal[int] ])
identifier[instance] = identifier[self] . identifier[parse_instance] ( identifier[k] [ literal[int] ])
identifier[instance] . identifier[path] = identifier[inst_path]
keyword[return] identifier[instance] | def parse_value_instancewithpath(self, tup_tree):
"""
The VALUE.INSTANCEWITHPATH is used to define a value that comprises
a single CIMInstance with additional information that defines the
absolute path to that object.
::
<!ELEMENT VALUE.INSTANCEWITHPATH (INSTANCEPATH, INSTANCE)>
"""
self.check_node(tup_tree, 'VALUE.INSTANCEWITHPATH')
k = kids(tup_tree)
if len(k) != 2:
raise CIMXMLParseError(_format('Element {0!A} has invalid number of child elements {1!A} (expecting two child elements (INSTANCEPATH, INSTANCE))', name(tup_tree), k), conn_id=self.conn_id) # depends on [control=['if'], data=[]]
inst_path = self.parse_instancepath(k[0])
instance = self.parse_instance(k[1])
instance.path = inst_path
return instance |
def select_file(self, edit, filters=None):
"""Select File"""
basedir = osp.dirname(to_text_string(edit.text()))
if not osp.isdir(basedir):
basedir = getcwd_or_home()
if filters is None:
filters = _("All files (*)")
title = _("Select file")
filename, _selfilter = getopenfilename(self, title, basedir, filters)
if filename:
edit.setText(filename) | def function[select_file, parameter[self, edit, filters]]:
constant[Select File]
variable[basedir] assign[=] call[name[osp].dirname, parameter[call[name[to_text_string], parameter[call[name[edit].text, parameter[]]]]]]
if <ast.UnaryOp object at 0x7da18f00cfd0> begin[:]
variable[basedir] assign[=] call[name[getcwd_or_home], parameter[]]
if compare[name[filters] is constant[None]] begin[:]
variable[filters] assign[=] call[name[_], parameter[constant[All files (*)]]]
variable[title] assign[=] call[name[_], parameter[constant[Select file]]]
<ast.Tuple object at 0x7da20e957790> assign[=] call[name[getopenfilename], parameter[name[self], name[title], name[basedir], name[filters]]]
if name[filename] begin[:]
call[name[edit].setText, parameter[name[filename]]] | keyword[def] identifier[select_file] ( identifier[self] , identifier[edit] , identifier[filters] = keyword[None] ):
literal[string]
identifier[basedir] = identifier[osp] . identifier[dirname] ( identifier[to_text_string] ( identifier[edit] . identifier[text] ()))
keyword[if] keyword[not] identifier[osp] . identifier[isdir] ( identifier[basedir] ):
identifier[basedir] = identifier[getcwd_or_home] ()
keyword[if] identifier[filters] keyword[is] keyword[None] :
identifier[filters] = identifier[_] ( literal[string] )
identifier[title] = identifier[_] ( literal[string] )
identifier[filename] , identifier[_selfilter] = identifier[getopenfilename] ( identifier[self] , identifier[title] , identifier[basedir] , identifier[filters] )
keyword[if] identifier[filename] :
identifier[edit] . identifier[setText] ( identifier[filename] ) | def select_file(self, edit, filters=None):
"""Select File"""
basedir = osp.dirname(to_text_string(edit.text()))
if not osp.isdir(basedir):
basedir = getcwd_or_home() # depends on [control=['if'], data=[]]
if filters is None:
filters = _('All files (*)') # depends on [control=['if'], data=['filters']]
title = _('Select file')
(filename, _selfilter) = getopenfilename(self, title, basedir, filters)
if filename:
edit.setText(filename) # depends on [control=['if'], data=[]] |
def reverse_list_recursive(head):
"""
:type head: ListNode
:rtype: ListNode
"""
if head is None or head.next is None:
return head
p = head.next
head.next = None
revrest = reverse_list_recursive(p)
p.next = head
return revrest | def function[reverse_list_recursive, parameter[head]]:
constant[
:type head: ListNode
:rtype: ListNode
]
if <ast.BoolOp object at 0x7da1b1e984c0> begin[:]
return[name[head]]
variable[p] assign[=] name[head].next
name[head].next assign[=] constant[None]
variable[revrest] assign[=] call[name[reverse_list_recursive], parameter[name[p]]]
name[p].next assign[=] name[head]
return[name[revrest]] | keyword[def] identifier[reverse_list_recursive] ( identifier[head] ):
literal[string]
keyword[if] identifier[head] keyword[is] keyword[None] keyword[or] identifier[head] . identifier[next] keyword[is] keyword[None] :
keyword[return] identifier[head]
identifier[p] = identifier[head] . identifier[next]
identifier[head] . identifier[next] = keyword[None]
identifier[revrest] = identifier[reverse_list_recursive] ( identifier[p] )
identifier[p] . identifier[next] = identifier[head]
keyword[return] identifier[revrest] | def reverse_list_recursive(head):
"""
:type head: ListNode
:rtype: ListNode
"""
if head is None or head.next is None:
return head # depends on [control=['if'], data=[]]
p = head.next
head.next = None
revrest = reverse_list_recursive(p)
p.next = head
return revrest |
def warsaw_up_to_warsaw(C, parameters=None, sectors=None):
"""Translate from the 'Warsaw up' basis to the Warsaw basis.
Parameters used:
- `Vus`, `Vub`, `Vcb`, `gamma`: elements of the unitary CKM matrix (defined
as the mismatch between left-handed quark mass matrix diagonalization
matrices).
"""
C_in = smeftutil.wcxf2arrays_symmetrized(C)
p = default_parameters.copy()
if parameters is not None:
# if parameters are passed in, overwrite the default values
p.update(parameters)
Uu = Ud = Ul = Ue = np.eye(3)
V = ckmutil.ckm.ckm_tree(p["Vus"], p["Vub"], p["Vcb"], p["delta"])
Uq = V
C_out = smeftutil.flavor_rotation(C_in, Uq, Uu, Ud, Ul, Ue)
C_out = smeftutil.arrays2wcxf_nonred(C_out)
warsaw = wcxf.Basis['SMEFT', 'Warsaw']
all_wcs = set(warsaw.all_wcs) # to speed up lookup
return {k: v for k, v in C_out.items() if k in all_wcs} | def function[warsaw_up_to_warsaw, parameter[C, parameters, sectors]]:
constant[Translate from the 'Warsaw up' basis to the Warsaw basis.
Parameters used:
- `Vus`, `Vub`, `Vcb`, `gamma`: elements of the unitary CKM matrix (defined
as the mismatch between left-handed quark mass matrix diagonalization
matrices).
]
variable[C_in] assign[=] call[name[smeftutil].wcxf2arrays_symmetrized, parameter[name[C]]]
variable[p] assign[=] call[name[default_parameters].copy, parameter[]]
if compare[name[parameters] is_not constant[None]] begin[:]
call[name[p].update, parameter[name[parameters]]]
variable[Uu] assign[=] call[name[np].eye, parameter[constant[3]]]
variable[V] assign[=] call[name[ckmutil].ckm.ckm_tree, parameter[call[name[p]][constant[Vus]], call[name[p]][constant[Vub]], call[name[p]][constant[Vcb]], call[name[p]][constant[delta]]]]
variable[Uq] assign[=] name[V]
variable[C_out] assign[=] call[name[smeftutil].flavor_rotation, parameter[name[C_in], name[Uq], name[Uu], name[Ud], name[Ul], name[Ue]]]
variable[C_out] assign[=] call[name[smeftutil].arrays2wcxf_nonred, parameter[name[C_out]]]
variable[warsaw] assign[=] call[name[wcxf].Basis][tuple[[<ast.Constant object at 0x7da1b1b69000>, <ast.Constant object at 0x7da1b1b695a0>]]]
variable[all_wcs] assign[=] call[name[set], parameter[name[warsaw].all_wcs]]
return[<ast.DictComp object at 0x7da1b1b6ace0>] | keyword[def] identifier[warsaw_up_to_warsaw] ( identifier[C] , identifier[parameters] = keyword[None] , identifier[sectors] = keyword[None] ):
literal[string]
identifier[C_in] = identifier[smeftutil] . identifier[wcxf2arrays_symmetrized] ( identifier[C] )
identifier[p] = identifier[default_parameters] . identifier[copy] ()
keyword[if] identifier[parameters] keyword[is] keyword[not] keyword[None] :
identifier[p] . identifier[update] ( identifier[parameters] )
identifier[Uu] = identifier[Ud] = identifier[Ul] = identifier[Ue] = identifier[np] . identifier[eye] ( literal[int] )
identifier[V] = identifier[ckmutil] . identifier[ckm] . identifier[ckm_tree] ( identifier[p] [ literal[string] ], identifier[p] [ literal[string] ], identifier[p] [ literal[string] ], identifier[p] [ literal[string] ])
identifier[Uq] = identifier[V]
identifier[C_out] = identifier[smeftutil] . identifier[flavor_rotation] ( identifier[C_in] , identifier[Uq] , identifier[Uu] , identifier[Ud] , identifier[Ul] , identifier[Ue] )
identifier[C_out] = identifier[smeftutil] . identifier[arrays2wcxf_nonred] ( identifier[C_out] )
identifier[warsaw] = identifier[wcxf] . identifier[Basis] [ literal[string] , literal[string] ]
identifier[all_wcs] = identifier[set] ( identifier[warsaw] . identifier[all_wcs] )
keyword[return] { identifier[k] : identifier[v] keyword[for] identifier[k] , identifier[v] keyword[in] identifier[C_out] . identifier[items] () keyword[if] identifier[k] keyword[in] identifier[all_wcs] } | def warsaw_up_to_warsaw(C, parameters=None, sectors=None):
"""Translate from the 'Warsaw up' basis to the Warsaw basis.
Parameters used:
- `Vus`, `Vub`, `Vcb`, `gamma`: elements of the unitary CKM matrix (defined
as the mismatch between left-handed quark mass matrix diagonalization
matrices).
"""
C_in = smeftutil.wcxf2arrays_symmetrized(C)
p = default_parameters.copy()
if parameters is not None:
# if parameters are passed in, overwrite the default values
p.update(parameters) # depends on [control=['if'], data=['parameters']]
Uu = Ud = Ul = Ue = np.eye(3)
V = ckmutil.ckm.ckm_tree(p['Vus'], p['Vub'], p['Vcb'], p['delta'])
Uq = V
C_out = smeftutil.flavor_rotation(C_in, Uq, Uu, Ud, Ul, Ue)
C_out = smeftutil.arrays2wcxf_nonred(C_out)
warsaw = wcxf.Basis['SMEFT', 'Warsaw']
all_wcs = set(warsaw.all_wcs) # to speed up lookup
return {k: v for (k, v) in C_out.items() if k in all_wcs} |
def _scroll_down(self, cli):
" Scroll window down. "
info = self.render_info
if self.vertical_scroll < info.content_height - info.window_height:
if info.cursor_position.y <= info.configured_scroll_offsets.top:
self.content.move_cursor_down(cli)
self.vertical_scroll += 1 | def function[_scroll_down, parameter[self, cli]]:
constant[ Scroll window down. ]
variable[info] assign[=] name[self].render_info
if compare[name[self].vertical_scroll less[<] binary_operation[name[info].content_height - name[info].window_height]] begin[:]
if compare[name[info].cursor_position.y less_or_equal[<=] name[info].configured_scroll_offsets.top] begin[:]
call[name[self].content.move_cursor_down, parameter[name[cli]]]
<ast.AugAssign object at 0x7da2043459c0> | keyword[def] identifier[_scroll_down] ( identifier[self] , identifier[cli] ):
literal[string]
identifier[info] = identifier[self] . identifier[render_info]
keyword[if] identifier[self] . identifier[vertical_scroll] < identifier[info] . identifier[content_height] - identifier[info] . identifier[window_height] :
keyword[if] identifier[info] . identifier[cursor_position] . identifier[y] <= identifier[info] . identifier[configured_scroll_offsets] . identifier[top] :
identifier[self] . identifier[content] . identifier[move_cursor_down] ( identifier[cli] )
identifier[self] . identifier[vertical_scroll] += literal[int] | def _scroll_down(self, cli):
""" Scroll window down. """
info = self.render_info
if self.vertical_scroll < info.content_height - info.window_height:
if info.cursor_position.y <= info.configured_scroll_offsets.top:
self.content.move_cursor_down(cli) # depends on [control=['if'], data=[]]
self.vertical_scroll += 1 # depends on [control=['if'], data=[]] |
def sanitize(self):
'''
Check if the current settings conform to the LISP specifications and
fix where possible.
'''
# Record TTL: The time in minutes the recipient of the Map-Reply will
# store the mapping. If the TTL is 0, the entry SHOULD be removed
# from the cache immediately. If the value is 0xffffffff, the
# recipient can decide locally how long to store the mapping.
if not isinstance(self.ttl, numbers.Integral) \
or self.ttl < 0 or self.ttl > 0xffffffff:
raise ValueError('Invalid TTL')
# ACT: This 3-bit field describes negative Map-Reply actions. In any
# other message type, these bits are set to 0 and ignored on
# receipt. These bits are used only when the 'Locator Count' field
# is set to 0. The action bits are encoded only in Map-Reply
# messages. The actions defined are used by an ITR or PITR when a
# destination EID matches a negative mapping cache entry.
# Unassigned values should cause a map-cache entry to be created
# and, when packets match this negative cache entry, they will be
# dropped. The current assigned values are:
#
# (0) No-Action: The map-cache is kept alive and no packet
# encapsulation occurs.
#
# (1) Natively-Forward: The packet is not encapsulated or dropped
# but natively forwarded.
#
# (2) Send-Map-Request: The packet invokes sending a Map-Request.
#
# (3) Drop: A packet that matches this map-cache entry is dropped.
# An ICMP Unreachable message SHOULD be sent.
if self.locator_records:
self.action = self.ACT_NO_ACTION
if self.action not in (self.ACT_NO_ACTION,
self.ACT_NATIVELY_FORWARD,
self.ACT_SEND_MAP_REQUEST,
self.ACT_DROP):
raise ValueError('Invalid Negative Map-Reply action')
# A: The Authoritative bit, when sent is always set to 1 by an ETR.
# When a Map-Server is proxy Map-Replying [LISP-MS] for a LISP site,
# the Authoritative bit is set to 0. This indicates to requesting
# ITRs that the Map-Reply was not originated by a LISP node managed
# at the site that owns the EID-prefix.
if not isinstance(self.authoritative, bool):
raise ValueError('Authoritative flag must be a boolean')
# Map-Version Number: When this 12-bit value is non-zero the Map-Reply
# sender is informing the ITR what the version number is for the
# EID-record contained in the Map-Reply. The ETR can allocate this
# number internally but MUST coordinate this value with other ETRs
# for the site. When this value is 0, there is no versioning
# information conveyed. The Map-Version Number can be included in
# Map-Request and Map-Register messages. See Section 6.6.3 for more
# details.
if not isinstance(self.map_version, numbers.Integral) \
or self.map_version < 0 \
or self.map_version >= 2 ** 12:
raise ValueError('Invalid map version')
# EID-prefix: 4 octets if an IPv4 address-family, 16 octets if an IPv6
# address-family.
if not isinstance(self.eid_prefix, (IPv4Network, IPv6Network)):
raise ValueError('EID prefix must be IPv4 or IPv6')
# Check locator records
for locator_record in self.locator_records:
if not isinstance(locator_record, LocatorRecord):
raise ValueError('Invalid Locator record')
locator_record.sanitize()
# For each Map-Reply record, the list of Locators in a Locator-Set MUST
# appear in the same order for each ETR that originates a Map-Reply
# message. The Locator-Set MUST be sorted in order of ascending IP
# address where an IPv4 locator address is considered numerically 'less
# than' an IPv6 locator address.
self.locator_records.sort(key=LocatorRecord.sort_key) | def function[sanitize, parameter[self]]:
constant[
Check if the current settings conform to the LISP specifications and
fix where possible.
]
if <ast.BoolOp object at 0x7da1b0a81690> begin[:]
<ast.Raise object at 0x7da1b0a80fa0>
if name[self].locator_records begin[:]
name[self].action assign[=] name[self].ACT_NO_ACTION
if compare[name[self].action <ast.NotIn object at 0x7da2590d7190> tuple[[<ast.Attribute object at 0x7da1b0a83c10>, <ast.Attribute object at 0x7da1b0a81750>, <ast.Attribute object at 0x7da1b0a83070>, <ast.Attribute object at 0x7da1b0a81210>]]] begin[:]
<ast.Raise object at 0x7da1b0a80460>
if <ast.UnaryOp object at 0x7da1b0a839d0> begin[:]
<ast.Raise object at 0x7da1b0a82830>
if <ast.BoolOp object at 0x7da1b0a82860> begin[:]
<ast.Raise object at 0x7da1b0a82980>
if <ast.UnaryOp object at 0x7da1b0a81b70> begin[:]
<ast.Raise object at 0x7da1b0a80a60>
for taget[name[locator_record]] in starred[name[self].locator_records] begin[:]
if <ast.UnaryOp object at 0x7da1b0a80ca0> begin[:]
<ast.Raise object at 0x7da1b0a83190>
call[name[locator_record].sanitize, parameter[]]
call[name[self].locator_records.sort, parameter[]] | keyword[def] identifier[sanitize] ( identifier[self] ):
literal[string]
keyword[if] keyword[not] identifier[isinstance] ( identifier[self] . identifier[ttl] , identifier[numbers] . identifier[Integral] ) keyword[or] identifier[self] . identifier[ttl] < literal[int] keyword[or] identifier[self] . identifier[ttl] > literal[int] :
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[if] identifier[self] . identifier[locator_records] :
identifier[self] . identifier[action] = identifier[self] . identifier[ACT_NO_ACTION]
keyword[if] identifier[self] . identifier[action] keyword[not] keyword[in] ( identifier[self] . identifier[ACT_NO_ACTION] ,
identifier[self] . identifier[ACT_NATIVELY_FORWARD] ,
identifier[self] . identifier[ACT_SEND_MAP_REQUEST] ,
identifier[self] . identifier[ACT_DROP] ):
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[if] keyword[not] identifier[isinstance] ( identifier[self] . identifier[authoritative] , identifier[bool] ):
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[if] keyword[not] identifier[isinstance] ( identifier[self] . identifier[map_version] , identifier[numbers] . identifier[Integral] ) keyword[or] identifier[self] . identifier[map_version] < literal[int] keyword[or] identifier[self] . identifier[map_version] >= literal[int] ** literal[int] :
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[if] keyword[not] identifier[isinstance] ( identifier[self] . identifier[eid_prefix] ,( identifier[IPv4Network] , identifier[IPv6Network] )):
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[for] identifier[locator_record] keyword[in] identifier[self] . identifier[locator_records] :
keyword[if] keyword[not] identifier[isinstance] ( identifier[locator_record] , identifier[LocatorRecord] ):
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[locator_record] . identifier[sanitize] ()
identifier[self] . identifier[locator_records] . identifier[sort] ( identifier[key] = identifier[LocatorRecord] . identifier[sort_key] ) | def sanitize(self):
"""
Check if the current settings conform to the LISP specifications and
fix where possible.
"""
# Record TTL: The time in minutes the recipient of the Map-Reply will
# store the mapping. If the TTL is 0, the entry SHOULD be removed
# from the cache immediately. If the value is 0xffffffff, the
# recipient can decide locally how long to store the mapping.
if not isinstance(self.ttl, numbers.Integral) or self.ttl < 0 or self.ttl > 4294967295:
raise ValueError('Invalid TTL') # depends on [control=['if'], data=[]]
# ACT: This 3-bit field describes negative Map-Reply actions. In any
# other message type, these bits are set to 0 and ignored on
# receipt. These bits are used only when the 'Locator Count' field
# is set to 0. The action bits are encoded only in Map-Reply
# messages. The actions defined are used by an ITR or PITR when a
# destination EID matches a negative mapping cache entry.
# Unassigned values should cause a map-cache entry to be created
# and, when packets match this negative cache entry, they will be
# dropped. The current assigned values are:
#
# (0) No-Action: The map-cache is kept alive and no packet
# encapsulation occurs.
#
# (1) Natively-Forward: The packet is not encapsulated or dropped
# but natively forwarded.
#
# (2) Send-Map-Request: The packet invokes sending a Map-Request.
#
# (3) Drop: A packet that matches this map-cache entry is dropped.
# An ICMP Unreachable message SHOULD be sent.
if self.locator_records:
self.action = self.ACT_NO_ACTION # depends on [control=['if'], data=[]]
if self.action not in (self.ACT_NO_ACTION, self.ACT_NATIVELY_FORWARD, self.ACT_SEND_MAP_REQUEST, self.ACT_DROP):
raise ValueError('Invalid Negative Map-Reply action') # depends on [control=['if'], data=[]]
# A: The Authoritative bit, when sent is always set to 1 by an ETR.
# When a Map-Server is proxy Map-Replying [LISP-MS] for a LISP site,
# the Authoritative bit is set to 0. This indicates to requesting
# ITRs that the Map-Reply was not originated by a LISP node managed
# at the site that owns the EID-prefix.
if not isinstance(self.authoritative, bool):
raise ValueError('Authoritative flag must be a boolean') # depends on [control=['if'], data=[]]
# Map-Version Number: When this 12-bit value is non-zero the Map-Reply
# sender is informing the ITR what the version number is for the
# EID-record contained in the Map-Reply. The ETR can allocate this
# number internally but MUST coordinate this value with other ETRs
# for the site. When this value is 0, there is no versioning
# information conveyed. The Map-Version Number can be included in
# Map-Request and Map-Register messages. See Section 6.6.3 for more
# details.
if not isinstance(self.map_version, numbers.Integral) or self.map_version < 0 or self.map_version >= 2 ** 12:
raise ValueError('Invalid map version') # depends on [control=['if'], data=[]]
# EID-prefix: 4 octets if an IPv4 address-family, 16 octets if an IPv6
# address-family.
if not isinstance(self.eid_prefix, (IPv4Network, IPv6Network)):
raise ValueError('EID prefix must be IPv4 or IPv6') # depends on [control=['if'], data=[]]
# Check locator records
for locator_record in self.locator_records:
if not isinstance(locator_record, LocatorRecord):
raise ValueError('Invalid Locator record') # depends on [control=['if'], data=[]]
locator_record.sanitize() # depends on [control=['for'], data=['locator_record']]
# For each Map-Reply record, the list of Locators in a Locator-Set MUST
# appear in the same order for each ETR that originates a Map-Reply
# message. The Locator-Set MUST be sorted in order of ascending IP
# address where an IPv4 locator address is considered numerically 'less
# than' an IPv6 locator address.
self.locator_records.sort(key=LocatorRecord.sort_key) |
def sendfile(self, data, zlib_compress=None, compress_level=6):
"""Send data from a file object"""
if hasattr(data, 'seek'):
data.seek(0)
chunk_size = CHUNK_SIZE
if zlib_compress:
chunk_size = BLOCK_SIZE
compressor = compressobj(compress_level)
while 1:
binarydata = data.read(chunk_size)
if binarydata == '':
break
if zlib_compress:
binarydata = compressor.compress(binarydata)
if not binarydata:
continue
self.send(binarydata)
if zlib_compress:
remaining = compressor.flush()
while remaining:
binarydata = remaining[:BLOCK_SIZE]
remaining = remaining[BLOCK_SIZE:]
self.send(binarydata) | def function[sendfile, parameter[self, data, zlib_compress, compress_level]]:
constant[Send data from a file object]
if call[name[hasattr], parameter[name[data], constant[seek]]] begin[:]
call[name[data].seek, parameter[constant[0]]]
variable[chunk_size] assign[=] name[CHUNK_SIZE]
if name[zlib_compress] begin[:]
variable[chunk_size] assign[=] name[BLOCK_SIZE]
variable[compressor] assign[=] call[name[compressobj], parameter[name[compress_level]]]
while constant[1] begin[:]
variable[binarydata] assign[=] call[name[data].read, parameter[name[chunk_size]]]
if compare[name[binarydata] equal[==] constant[]] begin[:]
break
if name[zlib_compress] begin[:]
variable[binarydata] assign[=] call[name[compressor].compress, parameter[name[binarydata]]]
if <ast.UnaryOp object at 0x7da1b0a2d720> begin[:]
continue
call[name[self].send, parameter[name[binarydata]]]
if name[zlib_compress] begin[:]
variable[remaining] assign[=] call[name[compressor].flush, parameter[]]
while name[remaining] begin[:]
variable[binarydata] assign[=] call[name[remaining]][<ast.Slice object at 0x7da1b0a2f0d0>]
variable[remaining] assign[=] call[name[remaining]][<ast.Slice object at 0x7da1b0a2e980>]
call[name[self].send, parameter[name[binarydata]]] | keyword[def] identifier[sendfile] ( identifier[self] , identifier[data] , identifier[zlib_compress] = keyword[None] , identifier[compress_level] = literal[int] ):
literal[string]
keyword[if] identifier[hasattr] ( identifier[data] , literal[string] ):
identifier[data] . identifier[seek] ( literal[int] )
identifier[chunk_size] = identifier[CHUNK_SIZE]
keyword[if] identifier[zlib_compress] :
identifier[chunk_size] = identifier[BLOCK_SIZE]
identifier[compressor] = identifier[compressobj] ( identifier[compress_level] )
keyword[while] literal[int] :
identifier[binarydata] = identifier[data] . identifier[read] ( identifier[chunk_size] )
keyword[if] identifier[binarydata] == literal[string] :
keyword[break]
keyword[if] identifier[zlib_compress] :
identifier[binarydata] = identifier[compressor] . identifier[compress] ( identifier[binarydata] )
keyword[if] keyword[not] identifier[binarydata] :
keyword[continue]
identifier[self] . identifier[send] ( identifier[binarydata] )
keyword[if] identifier[zlib_compress] :
identifier[remaining] = identifier[compressor] . identifier[flush] ()
keyword[while] identifier[remaining] :
identifier[binarydata] = identifier[remaining] [: identifier[BLOCK_SIZE] ]
identifier[remaining] = identifier[remaining] [ identifier[BLOCK_SIZE] :]
identifier[self] . identifier[send] ( identifier[binarydata] ) | def sendfile(self, data, zlib_compress=None, compress_level=6):
"""Send data from a file object"""
if hasattr(data, 'seek'):
data.seek(0) # depends on [control=['if'], data=[]]
chunk_size = CHUNK_SIZE
if zlib_compress:
chunk_size = BLOCK_SIZE
compressor = compressobj(compress_level) # depends on [control=['if'], data=[]]
while 1:
binarydata = data.read(chunk_size)
if binarydata == '':
break # depends on [control=['if'], data=[]]
if zlib_compress:
binarydata = compressor.compress(binarydata)
if not binarydata:
continue # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
self.send(binarydata) # depends on [control=['while'], data=[]]
if zlib_compress:
remaining = compressor.flush()
while remaining:
binarydata = remaining[:BLOCK_SIZE]
remaining = remaining[BLOCK_SIZE:]
self.send(binarydata) # depends on [control=['while'], data=[]] # depends on [control=['if'], data=[]] |
def change_by(cls, name, num):
"""Change counter of ``name`` by ``num`` (can be negative)."""
count = cls.count(name)
if count + num < 0:
raise CounterValueError('Counter[%s] will be negative '
'after %+d.' % (name, num))
counter = cls.collection.find_and_modify(
{'name': name},
{'$inc': {'seq': num}},
new=True,
upsert=True
)
return counter['seq'] | def function[change_by, parameter[cls, name, num]]:
constant[Change counter of ``name`` by ``num`` (can be negative).]
variable[count] assign[=] call[name[cls].count, parameter[name[name]]]
if compare[binary_operation[name[count] + name[num]] less[<] constant[0]] begin[:]
<ast.Raise object at 0x7da20c6a9480>
variable[counter] assign[=] call[name[cls].collection.find_and_modify, parameter[dictionary[[<ast.Constant object at 0x7da20c6aabc0>], [<ast.Name object at 0x7da20c6aabf0>]], dictionary[[<ast.Constant object at 0x7da20c6a8e20>], [<ast.Dict object at 0x7da20c6ab4f0>]]]]
return[call[name[counter]][constant[seq]]] | keyword[def] identifier[change_by] ( identifier[cls] , identifier[name] , identifier[num] ):
literal[string]
identifier[count] = identifier[cls] . identifier[count] ( identifier[name] )
keyword[if] identifier[count] + identifier[num] < literal[int] :
keyword[raise] identifier[CounterValueError] ( literal[string]
literal[string] %( identifier[name] , identifier[num] ))
identifier[counter] = identifier[cls] . identifier[collection] . identifier[find_and_modify] (
{ literal[string] : identifier[name] },
{ literal[string] :{ literal[string] : identifier[num] }},
identifier[new] = keyword[True] ,
identifier[upsert] = keyword[True]
)
keyword[return] identifier[counter] [ literal[string] ] | def change_by(cls, name, num):
"""Change counter of ``name`` by ``num`` (can be negative)."""
count = cls.count(name)
if count + num < 0:
raise CounterValueError('Counter[%s] will be negative after %+d.' % (name, num)) # depends on [control=['if'], data=[]]
counter = cls.collection.find_and_modify({'name': name}, {'$inc': {'seq': num}}, new=True, upsert=True)
return counter['seq'] |
def temp_directory(*args, **kwargs):
"""
Context manager returns a path created by mkdtemp and cleans it up afterwards.
"""
path = tempfile.mkdtemp(*args, **kwargs)
try:
yield path
finally:
shutil.rmtree(path) | def function[temp_directory, parameter[]]:
constant[
Context manager returns a path created by mkdtemp and cleans it up afterwards.
]
variable[path] assign[=] call[name[tempfile].mkdtemp, parameter[<ast.Starred object at 0x7da1b1473910>]]
<ast.Try object at 0x7da1b1470250> | keyword[def] identifier[temp_directory] (* identifier[args] ,** identifier[kwargs] ):
literal[string]
identifier[path] = identifier[tempfile] . identifier[mkdtemp] (* identifier[args] ,** identifier[kwargs] )
keyword[try] :
keyword[yield] identifier[path]
keyword[finally] :
identifier[shutil] . identifier[rmtree] ( identifier[path] ) | def temp_directory(*args, **kwargs):
"""
Context manager returns a path created by mkdtemp and cleans it up afterwards.
"""
path = tempfile.mkdtemp(*args, **kwargs)
try:
yield path # depends on [control=['try'], data=[]]
finally:
shutil.rmtree(path) |
def render(hjson_data, saltenv='base', sls='', **kws):
'''
Accepts HJSON as a string or as a file object and runs it through the HJSON
parser.
:rtype: A Python data structure
'''
if not isinstance(hjson_data, six.string_types):
hjson_data = hjson_data.read()
if hjson_data.startswith('#!'):
hjson_data = hjson_data[(hjson_data.find('\n') + 1):]
if not hjson_data.strip():
return {}
return hjson.loads(hjson_data) | def function[render, parameter[hjson_data, saltenv, sls]]:
constant[
Accepts HJSON as a string or as a file object and runs it through the HJSON
parser.
:rtype: A Python data structure
]
if <ast.UnaryOp object at 0x7da1b20249a0> begin[:]
variable[hjson_data] assign[=] call[name[hjson_data].read, parameter[]]
if call[name[hjson_data].startswith, parameter[constant[#!]]] begin[:]
variable[hjson_data] assign[=] call[name[hjson_data]][<ast.Slice object at 0x7da1b1f29ed0>]
if <ast.UnaryOp object at 0x7da1b1f29fc0> begin[:]
return[dictionary[[], []]]
return[call[name[hjson].loads, parameter[name[hjson_data]]]] | keyword[def] identifier[render] ( identifier[hjson_data] , identifier[saltenv] = literal[string] , identifier[sls] = literal[string] ,** identifier[kws] ):
literal[string]
keyword[if] keyword[not] identifier[isinstance] ( identifier[hjson_data] , identifier[six] . identifier[string_types] ):
identifier[hjson_data] = identifier[hjson_data] . identifier[read] ()
keyword[if] identifier[hjson_data] . identifier[startswith] ( literal[string] ):
identifier[hjson_data] = identifier[hjson_data] [( identifier[hjson_data] . identifier[find] ( literal[string] )+ literal[int] ):]
keyword[if] keyword[not] identifier[hjson_data] . identifier[strip] ():
keyword[return] {}
keyword[return] identifier[hjson] . identifier[loads] ( identifier[hjson_data] ) | def render(hjson_data, saltenv='base', sls='', **kws):
"""
Accepts HJSON as a string or as a file object and runs it through the HJSON
parser.
:rtype: A Python data structure
"""
if not isinstance(hjson_data, six.string_types):
hjson_data = hjson_data.read() # depends on [control=['if'], data=[]]
if hjson_data.startswith('#!'):
hjson_data = hjson_data[hjson_data.find('\n') + 1:] # depends on [control=['if'], data=[]]
if not hjson_data.strip():
return {} # depends on [control=['if'], data=[]]
return hjson.loads(hjson_data) |
def rdfgraph_to_ontol(rg):
"""
Return an Ontology object from an rdflib graph object
Status: Incomplete
"""
digraph = networkx.MultiDiGraph()
from rdflib.namespace import RDF
label_map = {}
for c in rg.subjects(RDF.type, OWL.Class):
cid = contract_uri_wrap(c)
logging.info("C={}".format(cid))
for lit in rg.objects(c, RDFS.label):
label_map[cid] = lit.value
digraph.add_node(cid, label=lit.value)
for s in rg.objects(c, RDFS.subClassOf):
# todo - blank nodes
sid = contract_uri_wrap(s)
digraph.add_edge(sid, cid, pred='subClassOf')
logging.info("G={}".format(digraph))
payload = {
'graph': digraph,
#'xref_graph': xref_graph,
#'graphdoc': obographdoc,
#'logical_definitions': logical_definitions
}
ont = Ontology(handle='wd', payload=payload)
return ont | def function[rdfgraph_to_ontol, parameter[rg]]:
constant[
Return an Ontology object from an rdflib graph object
Status: Incomplete
]
variable[digraph] assign[=] call[name[networkx].MultiDiGraph, parameter[]]
from relative_module[rdflib.namespace] import module[RDF]
variable[label_map] assign[=] dictionary[[], []]
for taget[name[c]] in starred[call[name[rg].subjects, parameter[name[RDF].type, name[OWL].Class]]] begin[:]
variable[cid] assign[=] call[name[contract_uri_wrap], parameter[name[c]]]
call[name[logging].info, parameter[call[constant[C={}].format, parameter[name[cid]]]]]
for taget[name[lit]] in starred[call[name[rg].objects, parameter[name[c], name[RDFS].label]]] begin[:]
call[name[label_map]][name[cid]] assign[=] name[lit].value
call[name[digraph].add_node, parameter[name[cid]]]
for taget[name[s]] in starred[call[name[rg].objects, parameter[name[c], name[RDFS].subClassOf]]] begin[:]
variable[sid] assign[=] call[name[contract_uri_wrap], parameter[name[s]]]
call[name[digraph].add_edge, parameter[name[sid], name[cid]]]
call[name[logging].info, parameter[call[constant[G={}].format, parameter[name[digraph]]]]]
variable[payload] assign[=] dictionary[[<ast.Constant object at 0x7da1b083eb60>], [<ast.Name object at 0x7da1b083c880>]]
variable[ont] assign[=] call[name[Ontology], parameter[]]
return[name[ont]] | keyword[def] identifier[rdfgraph_to_ontol] ( identifier[rg] ):
literal[string]
identifier[digraph] = identifier[networkx] . identifier[MultiDiGraph] ()
keyword[from] identifier[rdflib] . identifier[namespace] keyword[import] identifier[RDF]
identifier[label_map] ={}
keyword[for] identifier[c] keyword[in] identifier[rg] . identifier[subjects] ( identifier[RDF] . identifier[type] , identifier[OWL] . identifier[Class] ):
identifier[cid] = identifier[contract_uri_wrap] ( identifier[c] )
identifier[logging] . identifier[info] ( literal[string] . identifier[format] ( identifier[cid] ))
keyword[for] identifier[lit] keyword[in] identifier[rg] . identifier[objects] ( identifier[c] , identifier[RDFS] . identifier[label] ):
identifier[label_map] [ identifier[cid] ]= identifier[lit] . identifier[value]
identifier[digraph] . identifier[add_node] ( identifier[cid] , identifier[label] = identifier[lit] . identifier[value] )
keyword[for] identifier[s] keyword[in] identifier[rg] . identifier[objects] ( identifier[c] , identifier[RDFS] . identifier[subClassOf] ):
identifier[sid] = identifier[contract_uri_wrap] ( identifier[s] )
identifier[digraph] . identifier[add_edge] ( identifier[sid] , identifier[cid] , identifier[pred] = literal[string] )
identifier[logging] . identifier[info] ( literal[string] . identifier[format] ( identifier[digraph] ))
identifier[payload] ={
literal[string] : identifier[digraph] ,
}
identifier[ont] = identifier[Ontology] ( identifier[handle] = literal[string] , identifier[payload] = identifier[payload] )
keyword[return] identifier[ont] | def rdfgraph_to_ontol(rg):
"""
Return an Ontology object from an rdflib graph object
Status: Incomplete
"""
digraph = networkx.MultiDiGraph()
from rdflib.namespace import RDF
label_map = {}
for c in rg.subjects(RDF.type, OWL.Class):
cid = contract_uri_wrap(c)
logging.info('C={}'.format(cid))
for lit in rg.objects(c, RDFS.label):
label_map[cid] = lit.value
digraph.add_node(cid, label=lit.value) # depends on [control=['for'], data=['lit']]
for s in rg.objects(c, RDFS.subClassOf):
# todo - blank nodes
sid = contract_uri_wrap(s)
digraph.add_edge(sid, cid, pred='subClassOf') # depends on [control=['for'], data=['s']] # depends on [control=['for'], data=['c']]
logging.info('G={}'.format(digraph))
#'xref_graph': xref_graph,
#'graphdoc': obographdoc,
#'logical_definitions': logical_definitions
payload = {'graph': digraph}
ont = Ontology(handle='wd', payload=payload)
return ont |
def get_object_type_by_name(object_type_name):
"""
:return: type suitable to handle the given object type name.
Use the type to create new instances.
:param object_type_name: Member of TYPES
:raise ValueError: In case object_type_name is unknown"""
if object_type_name == b"commit":
from . import commit
return commit.Commit
elif object_type_name == b"tag":
from . import tag
return tag.TagObject
elif object_type_name == b"blob":
from . import blob
return blob.Blob
elif object_type_name == b"tree":
from . import tree
return tree.Tree
else:
raise ValueError("Cannot handle unknown object type: %s" % object_type_name) | def function[get_object_type_by_name, parameter[object_type_name]]:
constant[
:return: type suitable to handle the given object type name.
Use the type to create new instances.
:param object_type_name: Member of TYPES
:raise ValueError: In case object_type_name is unknown]
if compare[name[object_type_name] equal[==] constant[b'commit']] begin[:]
from relative_module[None] import module[commit]
return[name[commit].Commit] | keyword[def] identifier[get_object_type_by_name] ( identifier[object_type_name] ):
literal[string]
keyword[if] identifier[object_type_name] == literal[string] :
keyword[from] . keyword[import] identifier[commit]
keyword[return] identifier[commit] . identifier[Commit]
keyword[elif] identifier[object_type_name] == literal[string] :
keyword[from] . keyword[import] identifier[tag]
keyword[return] identifier[tag] . identifier[TagObject]
keyword[elif] identifier[object_type_name] == literal[string] :
keyword[from] . keyword[import] identifier[blob]
keyword[return] identifier[blob] . identifier[Blob]
keyword[elif] identifier[object_type_name] == literal[string] :
keyword[from] . keyword[import] identifier[tree]
keyword[return] identifier[tree] . identifier[Tree]
keyword[else] :
keyword[raise] identifier[ValueError] ( literal[string] % identifier[object_type_name] ) | def get_object_type_by_name(object_type_name):
"""
:return: type suitable to handle the given object type name.
Use the type to create new instances.
:param object_type_name: Member of TYPES
:raise ValueError: In case object_type_name is unknown"""
if object_type_name == b'commit':
from . import commit
return commit.Commit # depends on [control=['if'], data=[]]
elif object_type_name == b'tag':
from . import tag
return tag.TagObject # depends on [control=['if'], data=[]]
elif object_type_name == b'blob':
from . import blob
return blob.Blob # depends on [control=['if'], data=[]]
elif object_type_name == b'tree':
from . import tree
return tree.Tree # depends on [control=['if'], data=[]]
else:
raise ValueError('Cannot handle unknown object type: %s' % object_type_name) |
def read_frame(self):
"""Read each block after global header.
Structure of record/package header (C):
typedef struct pcaprec_hdr_s {
guint32 ts_sec; /* timestamp seconds */
guint32 ts_usec; /* timestamp microseconds */
guint32 incl_len; /* number of octets of packet saved in file */
guint32 orig_len; /* actual length of packet */
} pcaprec_hdr_t;
"""
# _scur = self._file.tell()
_temp = self._read_unpack(4, lilendian=True, quiet=True)
if _temp is None:
raise EOFError
_tsss = _temp
_tsus = self._read_unpack(4, lilendian=True)
_ilen = self._read_unpack(4, lilendian=True)
_olen = self._read_unpack(4, lilendian=True)
if self._nsec:
_epch = _tsss + _tsus / 1000000000
else:
_epch = _tsss + _tsus / 1000000
_time = datetime.datetime.fromtimestamp(_epch)
frame = dict(
frame_info=dict(
ts_sec=_tsss,
ts_usec=_tsus,
incl_len=_ilen,
orig_len=_olen,
),
time=_time,
number=self._fnum,
time_epoch=_epch,
len=_ilen,
cap_len=_olen,
)
# load packet data
length = frame['len']
bytes_ = self._file.read(length)
# record file pointer
if self._mpkt and self._mpfp:
# print(self._fnum, 'ready')
self._mpfp.put(self._file.tell())
self._mpkt.pool += 1
# make BytesIO from frame packet data
frame['packet'] = bytes_
self._file = io.BytesIO(bytes_)
# frame['packet'] = self._read_packet(header=0, payload=length, discard=True)
return self._decode_next_layer(frame, length) | def function[read_frame, parameter[self]]:
constant[Read each block after global header.
Structure of record/package header (C):
typedef struct pcaprec_hdr_s {
guint32 ts_sec; /* timestamp seconds */
guint32 ts_usec; /* timestamp microseconds */
guint32 incl_len; /* number of octets of packet saved in file */
guint32 orig_len; /* actual length of packet */
} pcaprec_hdr_t;
]
variable[_temp] assign[=] call[name[self]._read_unpack, parameter[constant[4]]]
if compare[name[_temp] is constant[None]] begin[:]
<ast.Raise object at 0x7da1b0674e80>
variable[_tsss] assign[=] name[_temp]
variable[_tsus] assign[=] call[name[self]._read_unpack, parameter[constant[4]]]
variable[_ilen] assign[=] call[name[self]._read_unpack, parameter[constant[4]]]
variable[_olen] assign[=] call[name[self]._read_unpack, parameter[constant[4]]]
if name[self]._nsec begin[:]
variable[_epch] assign[=] binary_operation[name[_tsss] + binary_operation[name[_tsus] / constant[1000000000]]]
variable[_time] assign[=] call[name[datetime].datetime.fromtimestamp, parameter[name[_epch]]]
variable[frame] assign[=] call[name[dict], parameter[]]
variable[length] assign[=] call[name[frame]][constant[len]]
variable[bytes_] assign[=] call[name[self]._file.read, parameter[name[length]]]
if <ast.BoolOp object at 0x7da1b06fd300> begin[:]
call[name[self]._mpfp.put, parameter[call[name[self]._file.tell, parameter[]]]]
<ast.AugAssign object at 0x7da1b06fd0c0>
call[name[frame]][constant[packet]] assign[=] name[bytes_]
name[self]._file assign[=] call[name[io].BytesIO, parameter[name[bytes_]]]
return[call[name[self]._decode_next_layer, parameter[name[frame], name[length]]]] | keyword[def] identifier[read_frame] ( identifier[self] ):
literal[string]
identifier[_temp] = identifier[self] . identifier[_read_unpack] ( literal[int] , identifier[lilendian] = keyword[True] , identifier[quiet] = keyword[True] )
keyword[if] identifier[_temp] keyword[is] keyword[None] :
keyword[raise] identifier[EOFError]
identifier[_tsss] = identifier[_temp]
identifier[_tsus] = identifier[self] . identifier[_read_unpack] ( literal[int] , identifier[lilendian] = keyword[True] )
identifier[_ilen] = identifier[self] . identifier[_read_unpack] ( literal[int] , identifier[lilendian] = keyword[True] )
identifier[_olen] = identifier[self] . identifier[_read_unpack] ( literal[int] , identifier[lilendian] = keyword[True] )
keyword[if] identifier[self] . identifier[_nsec] :
identifier[_epch] = identifier[_tsss] + identifier[_tsus] / literal[int]
keyword[else] :
identifier[_epch] = identifier[_tsss] + identifier[_tsus] / literal[int]
identifier[_time] = identifier[datetime] . identifier[datetime] . identifier[fromtimestamp] ( identifier[_epch] )
identifier[frame] = identifier[dict] (
identifier[frame_info] = identifier[dict] (
identifier[ts_sec] = identifier[_tsss] ,
identifier[ts_usec] = identifier[_tsus] ,
identifier[incl_len] = identifier[_ilen] ,
identifier[orig_len] = identifier[_olen] ,
),
identifier[time] = identifier[_time] ,
identifier[number] = identifier[self] . identifier[_fnum] ,
identifier[time_epoch] = identifier[_epch] ,
identifier[len] = identifier[_ilen] ,
identifier[cap_len] = identifier[_olen] ,
)
identifier[length] = identifier[frame] [ literal[string] ]
identifier[bytes_] = identifier[self] . identifier[_file] . identifier[read] ( identifier[length] )
keyword[if] identifier[self] . identifier[_mpkt] keyword[and] identifier[self] . identifier[_mpfp] :
identifier[self] . identifier[_mpfp] . identifier[put] ( identifier[self] . identifier[_file] . identifier[tell] ())
identifier[self] . identifier[_mpkt] . identifier[pool] += literal[int]
identifier[frame] [ literal[string] ]= identifier[bytes_]
identifier[self] . identifier[_file] = identifier[io] . identifier[BytesIO] ( identifier[bytes_] )
keyword[return] identifier[self] . identifier[_decode_next_layer] ( identifier[frame] , identifier[length] ) | def read_frame(self):
"""Read each block after global header.
Structure of record/package header (C):
typedef struct pcaprec_hdr_s {
guint32 ts_sec; /* timestamp seconds */
guint32 ts_usec; /* timestamp microseconds */
guint32 incl_len; /* number of octets of packet saved in file */
guint32 orig_len; /* actual length of packet */
} pcaprec_hdr_t;
"""
# _scur = self._file.tell()
_temp = self._read_unpack(4, lilendian=True, quiet=True)
if _temp is None:
raise EOFError # depends on [control=['if'], data=[]]
_tsss = _temp
_tsus = self._read_unpack(4, lilendian=True)
_ilen = self._read_unpack(4, lilendian=True)
_olen = self._read_unpack(4, lilendian=True)
if self._nsec:
_epch = _tsss + _tsus / 1000000000 # depends on [control=['if'], data=[]]
else:
_epch = _tsss + _tsus / 1000000
_time = datetime.datetime.fromtimestamp(_epch)
frame = dict(frame_info=dict(ts_sec=_tsss, ts_usec=_tsus, incl_len=_ilen, orig_len=_olen), time=_time, number=self._fnum, time_epoch=_epch, len=_ilen, cap_len=_olen)
# load packet data
length = frame['len']
bytes_ = self._file.read(length)
# record file pointer
if self._mpkt and self._mpfp:
# print(self._fnum, 'ready')
self._mpfp.put(self._file.tell())
self._mpkt.pool += 1 # depends on [control=['if'], data=[]]
# make BytesIO from frame packet data
frame['packet'] = bytes_
self._file = io.BytesIO(bytes_)
# frame['packet'] = self._read_packet(header=0, payload=length, discard=True)
return self._decode_next_layer(frame, length) |
def _sort_tau_by_y(self, y):
"""Sort tau matrix by dependece with variable y.
Args:
:param y: index of variable of intrest
:type y: int
"""
# first column is the variable of interest
tau_y = self.tau_matrix[:, y]
tau_y[y] = np.NaN
temp = np.empty([self.n_nodes, 3])
temp[:, 0] = np.arange(self.n_nodes)
temp[:, 1] = tau_y
temp[:, 2] = abs(tau_y)
temp[np.isnan(temp)] = -10
tau_sorted = temp[temp[:, 2].argsort()[::-1]]
return tau_sorted | def function[_sort_tau_by_y, parameter[self, y]]:
constant[Sort tau matrix by dependece with variable y.
Args:
:param y: index of variable of intrest
:type y: int
]
variable[tau_y] assign[=] call[name[self].tau_matrix][tuple[[<ast.Slice object at 0x7da1b1e02500>, <ast.Name object at 0x7da1b1e03880>]]]
call[name[tau_y]][name[y]] assign[=] name[np].NaN
variable[temp] assign[=] call[name[np].empty, parameter[list[[<ast.Attribute object at 0x7da1b1e009a0>, <ast.Constant object at 0x7da1b1e00700>]]]]
call[name[temp]][tuple[[<ast.Slice object at 0x7da20e74ae60>, <ast.Constant object at 0x7da20e7486d0>]]] assign[=] call[name[np].arange, parameter[name[self].n_nodes]]
call[name[temp]][tuple[[<ast.Slice object at 0x7da20e74b070>, <ast.Constant object at 0x7da20e748a90>]]] assign[=] name[tau_y]
call[name[temp]][tuple[[<ast.Slice object at 0x7da20e7497b0>, <ast.Constant object at 0x7da20e74baf0>]]] assign[=] call[name[abs], parameter[name[tau_y]]]
call[name[temp]][call[name[np].isnan, parameter[name[temp]]]] assign[=] <ast.UnaryOp object at 0x7da204960d90>
variable[tau_sorted] assign[=] call[name[temp]][call[call[call[name[temp]][tuple[[<ast.Slice object at 0x7da1b2346680>, <ast.Constant object at 0x7da1b2344940>]]].argsort, parameter[]]][<ast.Slice object at 0x7da1b2347eb0>]]
return[name[tau_sorted]] | keyword[def] identifier[_sort_tau_by_y] ( identifier[self] , identifier[y] ):
literal[string]
identifier[tau_y] = identifier[self] . identifier[tau_matrix] [:, identifier[y] ]
identifier[tau_y] [ identifier[y] ]= identifier[np] . identifier[NaN]
identifier[temp] = identifier[np] . identifier[empty] ([ identifier[self] . identifier[n_nodes] , literal[int] ])
identifier[temp] [:, literal[int] ]= identifier[np] . identifier[arange] ( identifier[self] . identifier[n_nodes] )
identifier[temp] [:, literal[int] ]= identifier[tau_y]
identifier[temp] [:, literal[int] ]= identifier[abs] ( identifier[tau_y] )
identifier[temp] [ identifier[np] . identifier[isnan] ( identifier[temp] )]=- literal[int]
identifier[tau_sorted] = identifier[temp] [ identifier[temp] [:, literal[int] ]. identifier[argsort] ()[::- literal[int] ]]
keyword[return] identifier[tau_sorted] | def _sort_tau_by_y(self, y):
"""Sort tau matrix by dependece with variable y.
Args:
:param y: index of variable of intrest
:type y: int
"""
# first column is the variable of interest
tau_y = self.tau_matrix[:, y]
tau_y[y] = np.NaN
temp = np.empty([self.n_nodes, 3])
temp[:, 0] = np.arange(self.n_nodes)
temp[:, 1] = tau_y
temp[:, 2] = abs(tau_y)
temp[np.isnan(temp)] = -10
tau_sorted = temp[temp[:, 2].argsort()[::-1]]
return tau_sorted |
def destroy_ebs_volume(region, volume_id, access_key_id, secret_access_key):
""" destroys an ebs volume """
conn = connect_to_ec2(region, access_key_id, secret_access_key)
if ebs_volume_exists(region, volume_id, access_key_id, secret_access_key):
log_yellow('destroying EBS volume ...')
conn.delete_volume(volume_id) | def function[destroy_ebs_volume, parameter[region, volume_id, access_key_id, secret_access_key]]:
constant[ destroys an ebs volume ]
variable[conn] assign[=] call[name[connect_to_ec2], parameter[name[region], name[access_key_id], name[secret_access_key]]]
if call[name[ebs_volume_exists], parameter[name[region], name[volume_id], name[access_key_id], name[secret_access_key]]] begin[:]
call[name[log_yellow], parameter[constant[destroying EBS volume ...]]]
call[name[conn].delete_volume, parameter[name[volume_id]]] | keyword[def] identifier[destroy_ebs_volume] ( identifier[region] , identifier[volume_id] , identifier[access_key_id] , identifier[secret_access_key] ):
literal[string]
identifier[conn] = identifier[connect_to_ec2] ( identifier[region] , identifier[access_key_id] , identifier[secret_access_key] )
keyword[if] identifier[ebs_volume_exists] ( identifier[region] , identifier[volume_id] , identifier[access_key_id] , identifier[secret_access_key] ):
identifier[log_yellow] ( literal[string] )
identifier[conn] . identifier[delete_volume] ( identifier[volume_id] ) | def destroy_ebs_volume(region, volume_id, access_key_id, secret_access_key):
""" destroys an ebs volume """
conn = connect_to_ec2(region, access_key_id, secret_access_key)
if ebs_volume_exists(region, volume_id, access_key_id, secret_access_key):
log_yellow('destroying EBS volume ...')
conn.delete_volume(volume_id) # depends on [control=['if'], data=[]] |
def _hijacked_run_baton_query(
self, baton_binary: BatonBinary, program_arguments: List[str]=None, input_data: Any=None) -> List[Dict]:
"""
Hijacked `run_baton_query` method with hijacking to add the `--recursive` flag to calls to `baton-chmod` that
originate from code called from frames with the ids in `self._hijack_frame_ids`.
:param baton_binary: see `BatonRunner.run_baton_query`
:param program_arguments: see `BatonRunner.run_baton_query`
:param input_data: see `BatonRunner.run_baton_query`
:return: see `BatonRunner.run_baton_query`
"""
if baton_binary == BatonBinary.BATON_CHMOD:
current_frame = inspect.currentframe()
def frame_code_in_same_file(frame) -> bool:
return frame_back.f_code.co_filename == current_frame.f_code.co_filename
frame_back = current_frame.f_back
assert frame_code_in_same_file(frame_back)
while frame_back is not None and frame_code_in_same_file(frame_back):
if id(frame_back) in self._hijack_frame_ids:
return self._original_run_baton_query(baton_binary, [BATON_CHMOD_RECURSIVE_FLAG], input_data)
frame_back = frame_back.f_back
return self._original_run_baton_query(baton_binary, program_arguments, input_data) | def function[_hijacked_run_baton_query, parameter[self, baton_binary, program_arguments, input_data]]:
constant[
Hijacked `run_baton_query` method with hijacking to add the `--recursive` flag to calls to `baton-chmod` that
originate from code called from frames with the ids in `self._hijack_frame_ids`.
:param baton_binary: see `BatonRunner.run_baton_query`
:param program_arguments: see `BatonRunner.run_baton_query`
:param input_data: see `BatonRunner.run_baton_query`
:return: see `BatonRunner.run_baton_query`
]
if compare[name[baton_binary] equal[==] name[BatonBinary].BATON_CHMOD] begin[:]
variable[current_frame] assign[=] call[name[inspect].currentframe, parameter[]]
def function[frame_code_in_same_file, parameter[frame]]:
return[compare[name[frame_back].f_code.co_filename equal[==] name[current_frame].f_code.co_filename]]
variable[frame_back] assign[=] name[current_frame].f_back
assert[call[name[frame_code_in_same_file], parameter[name[frame_back]]]]
while <ast.BoolOp object at 0x7da1b1642740> begin[:]
if compare[call[name[id], parameter[name[frame_back]]] in name[self]._hijack_frame_ids] begin[:]
return[call[name[self]._original_run_baton_query, parameter[name[baton_binary], list[[<ast.Name object at 0x7da207f983a0>]], name[input_data]]]]
variable[frame_back] assign[=] name[frame_back].f_back
return[call[name[self]._original_run_baton_query, parameter[name[baton_binary], name[program_arguments], name[input_data]]]] | keyword[def] identifier[_hijacked_run_baton_query] (
identifier[self] , identifier[baton_binary] : identifier[BatonBinary] , identifier[program_arguments] : identifier[List] [ identifier[str] ]= keyword[None] , identifier[input_data] : identifier[Any] = keyword[None] )-> identifier[List] [ identifier[Dict] ]:
literal[string]
keyword[if] identifier[baton_binary] == identifier[BatonBinary] . identifier[BATON_CHMOD] :
identifier[current_frame] = identifier[inspect] . identifier[currentframe] ()
keyword[def] identifier[frame_code_in_same_file] ( identifier[frame] )-> identifier[bool] :
keyword[return] identifier[frame_back] . identifier[f_code] . identifier[co_filename] == identifier[current_frame] . identifier[f_code] . identifier[co_filename]
identifier[frame_back] = identifier[current_frame] . identifier[f_back]
keyword[assert] identifier[frame_code_in_same_file] ( identifier[frame_back] )
keyword[while] identifier[frame_back] keyword[is] keyword[not] keyword[None] keyword[and] identifier[frame_code_in_same_file] ( identifier[frame_back] ):
keyword[if] identifier[id] ( identifier[frame_back] ) keyword[in] identifier[self] . identifier[_hijack_frame_ids] :
keyword[return] identifier[self] . identifier[_original_run_baton_query] ( identifier[baton_binary] ,[ identifier[BATON_CHMOD_RECURSIVE_FLAG] ], identifier[input_data] )
identifier[frame_back] = identifier[frame_back] . identifier[f_back]
keyword[return] identifier[self] . identifier[_original_run_baton_query] ( identifier[baton_binary] , identifier[program_arguments] , identifier[input_data] ) | def _hijacked_run_baton_query(self, baton_binary: BatonBinary, program_arguments: List[str]=None, input_data: Any=None) -> List[Dict]:
"""
Hijacked `run_baton_query` method with hijacking to add the `--recursive` flag to calls to `baton-chmod` that
originate from code called from frames with the ids in `self._hijack_frame_ids`.
:param baton_binary: see `BatonRunner.run_baton_query`
:param program_arguments: see `BatonRunner.run_baton_query`
:param input_data: see `BatonRunner.run_baton_query`
:return: see `BatonRunner.run_baton_query`
"""
if baton_binary == BatonBinary.BATON_CHMOD:
current_frame = inspect.currentframe()
def frame_code_in_same_file(frame) -> bool:
return frame_back.f_code.co_filename == current_frame.f_code.co_filename
frame_back = current_frame.f_back
assert frame_code_in_same_file(frame_back)
while frame_back is not None and frame_code_in_same_file(frame_back):
if id(frame_back) in self._hijack_frame_ids:
return self._original_run_baton_query(baton_binary, [BATON_CHMOD_RECURSIVE_FLAG], input_data) # depends on [control=['if'], data=[]]
frame_back = frame_back.f_back # depends on [control=['while'], data=[]] # depends on [control=['if'], data=['baton_binary']]
return self._original_run_baton_query(baton_binary, program_arguments, input_data) |
def vcenter_credentials_url(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
vcenter = ET.SubElement(config, "vcenter", xmlns="urn:brocade.com:mgmt:brocade-vswitch")
id_key = ET.SubElement(vcenter, "id")
id_key.text = kwargs.pop('id')
credentials = ET.SubElement(vcenter, "credentials")
url = ET.SubElement(credentials, "url")
url.text = kwargs.pop('url')
callback = kwargs.pop('callback', self._callback)
return callback(config) | def function[vcenter_credentials_url, parameter[self]]:
constant[Auto Generated Code
]
variable[config] assign[=] call[name[ET].Element, parameter[constant[config]]]
variable[vcenter] assign[=] call[name[ET].SubElement, parameter[name[config], constant[vcenter]]]
variable[id_key] assign[=] call[name[ET].SubElement, parameter[name[vcenter], constant[id]]]
name[id_key].text assign[=] call[name[kwargs].pop, parameter[constant[id]]]
variable[credentials] assign[=] call[name[ET].SubElement, parameter[name[vcenter], constant[credentials]]]
variable[url] assign[=] call[name[ET].SubElement, parameter[name[credentials], constant[url]]]
name[url].text assign[=] call[name[kwargs].pop, parameter[constant[url]]]
variable[callback] assign[=] call[name[kwargs].pop, parameter[constant[callback], name[self]._callback]]
return[call[name[callback], parameter[name[config]]]] | keyword[def] identifier[vcenter_credentials_url] ( identifier[self] ,** identifier[kwargs] ):
literal[string]
identifier[config] = identifier[ET] . identifier[Element] ( literal[string] )
identifier[vcenter] = identifier[ET] . identifier[SubElement] ( identifier[config] , literal[string] , identifier[xmlns] = literal[string] )
identifier[id_key] = identifier[ET] . identifier[SubElement] ( identifier[vcenter] , literal[string] )
identifier[id_key] . identifier[text] = identifier[kwargs] . identifier[pop] ( literal[string] )
identifier[credentials] = identifier[ET] . identifier[SubElement] ( identifier[vcenter] , literal[string] )
identifier[url] = identifier[ET] . identifier[SubElement] ( identifier[credentials] , literal[string] )
identifier[url] . identifier[text] = identifier[kwargs] . identifier[pop] ( literal[string] )
identifier[callback] = identifier[kwargs] . identifier[pop] ( literal[string] , identifier[self] . identifier[_callback] )
keyword[return] identifier[callback] ( identifier[config] ) | def vcenter_credentials_url(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element('config')
vcenter = ET.SubElement(config, 'vcenter', xmlns='urn:brocade.com:mgmt:brocade-vswitch')
id_key = ET.SubElement(vcenter, 'id')
id_key.text = kwargs.pop('id')
credentials = ET.SubElement(vcenter, 'credentials')
url = ET.SubElement(credentials, 'url')
url.text = kwargs.pop('url')
callback = kwargs.pop('callback', self._callback)
return callback(config) |
def get_changes(self, name, *args):
"""Return a list of changes for the named refactoring action.
Changes are dictionaries describing a single action to be
taken for the refactoring to be successful.
A change has an action and possibly a type. In the description
below, the action is before the slash and the type after it.
change: Change file contents
- file: The path to the file to change
- contents: The new contents for the file
- Diff: A unified diff showing the changes introduced
create/file: Create a new file
- file: The file to create
create/directory: Create a new directory
- path: The directory to create
move/file: Rename a file
- source: The path to the source file
- destination: The path to the destination file name
move/directory: Rename a directory
- source: The path to the source directory
- destination: The path to the destination directory name
delete/file: Delete a file
- file: The file to delete
delete/directory: Delete a directory
- path: The directory to delete
"""
if not name.startswith("refactor_"):
raise ValueError("Bad refactoring name {0}".format(name))
method = getattr(self, name)
if not method.refactor_notes.get('available', True):
raise RuntimeError("Method not available")
return method(*args) | def function[get_changes, parameter[self, name]]:
constant[Return a list of changes for the named refactoring action.
Changes are dictionaries describing a single action to be
taken for the refactoring to be successful.
A change has an action and possibly a type. In the description
below, the action is before the slash and the type after it.
change: Change file contents
- file: The path to the file to change
- contents: The new contents for the file
- Diff: A unified diff showing the changes introduced
create/file: Create a new file
- file: The file to create
create/directory: Create a new directory
- path: The directory to create
move/file: Rename a file
- source: The path to the source file
- destination: The path to the destination file name
move/directory: Rename a directory
- source: The path to the source directory
- destination: The path to the destination directory name
delete/file: Delete a file
- file: The file to delete
delete/directory: Delete a directory
- path: The directory to delete
]
if <ast.UnaryOp object at 0x7da1b16f94e0> begin[:]
<ast.Raise object at 0x7da1b16fb1c0>
variable[method] assign[=] call[name[getattr], parameter[name[self], name[name]]]
if <ast.UnaryOp object at 0x7da1b16f9330> begin[:]
<ast.Raise object at 0x7da1b16f8820>
return[call[name[method], parameter[<ast.Starred object at 0x7da1b16f8100>]]] | keyword[def] identifier[get_changes] ( identifier[self] , identifier[name] ,* identifier[args] ):
literal[string]
keyword[if] keyword[not] identifier[name] . identifier[startswith] ( literal[string] ):
keyword[raise] identifier[ValueError] ( literal[string] . identifier[format] ( identifier[name] ))
identifier[method] = identifier[getattr] ( identifier[self] , identifier[name] )
keyword[if] keyword[not] identifier[method] . identifier[refactor_notes] . identifier[get] ( literal[string] , keyword[True] ):
keyword[raise] identifier[RuntimeError] ( literal[string] )
keyword[return] identifier[method] (* identifier[args] ) | def get_changes(self, name, *args):
"""Return a list of changes for the named refactoring action.
Changes are dictionaries describing a single action to be
taken for the refactoring to be successful.
A change has an action and possibly a type. In the description
below, the action is before the slash and the type after it.
change: Change file contents
- file: The path to the file to change
- contents: The new contents for the file
- Diff: A unified diff showing the changes introduced
create/file: Create a new file
- file: The file to create
create/directory: Create a new directory
- path: The directory to create
move/file: Rename a file
- source: The path to the source file
- destination: The path to the destination file name
move/directory: Rename a directory
- source: The path to the source directory
- destination: The path to the destination directory name
delete/file: Delete a file
- file: The file to delete
delete/directory: Delete a directory
- path: The directory to delete
"""
if not name.startswith('refactor_'):
raise ValueError('Bad refactoring name {0}'.format(name)) # depends on [control=['if'], data=[]]
method = getattr(self, name)
if not method.refactor_notes.get('available', True):
raise RuntimeError('Method not available') # depends on [control=['if'], data=[]]
return method(*args) |
def reset(self, configuration: dict) -> None:
"""
Whenever there was anything stored in the database or not, purge previous state and start
new training process from scratch.
"""
self.clean(0)
self.backend.store_config(configuration) | def function[reset, parameter[self, configuration]]:
constant[
Whenever there was anything stored in the database or not, purge previous state and start
new training process from scratch.
]
call[name[self].clean, parameter[constant[0]]]
call[name[self].backend.store_config, parameter[name[configuration]]] | keyword[def] identifier[reset] ( identifier[self] , identifier[configuration] : identifier[dict] )-> keyword[None] :
literal[string]
identifier[self] . identifier[clean] ( literal[int] )
identifier[self] . identifier[backend] . identifier[store_config] ( identifier[configuration] ) | def reset(self, configuration: dict) -> None:
"""
Whenever there was anything stored in the database or not, purge previous state and start
new training process from scratch.
"""
self.clean(0)
self.backend.store_config(configuration) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.