code stringlengths 75 104k | code_sememe stringlengths 47 309k | token_type stringlengths 215 214k | code_dependency stringlengths 75 155k |
|---|---|---|---|
def tree():
"""Example showing tree progress view"""
#############
# Test data #
#############
# For this example, we're obviously going to be feeding fictitious data
# to ProgressTree, so here it is
leaf_values = [Value(0) for i in range(6)]
bd_defaults = dict(type=Bar, kwargs=dict(max_value=10))
test_d = {
"Warp Jump": {
"1) Prepare fuel": {
"Load Tanks": {
"Tank 1": BarDescriptor(value=leaf_values[0], **bd_defaults),
"Tank 2": BarDescriptor(value=leaf_values[1], **bd_defaults),
},
"Refine tylium ore": BarDescriptor(
value=leaf_values[2], **bd_defaults
),
},
"2) Calculate jump co-ordinates": {
"Resolve common name to co-ordinates": {
"Querying resolution from baseship": BarDescriptor(
value=leaf_values[3], **bd_defaults
),
},
},
"3) Perform jump": {
"Check FTL drive readiness": BarDescriptor(
value=leaf_values[4], **bd_defaults
),
"Juuuuuump!": BarDescriptor(value=leaf_values[5],
**bd_defaults)
}
}
}
# We'll use this function to bump up the leaf values
def incr_value(obj):
for val in leaf_values:
if val.value < 10:
val.value += 1
break
# And this to check if we're to stop drawing
def are_we_done(obj):
return all(val.value == 10 for val in leaf_values)
###################
# The actual code #
###################
# Create blessings.Terminal instance
t = Terminal()
# Initialize a ProgressTree instance
n = ProgressTree(term=t)
# We'll use the make_room method to make sure the terminal
# is filled out with all the room we need
n.make_room(test_d)
while not are_we_done(test_d):
sleep(0.2 * random.random())
# After the cursor position is first saved (in the first draw call)
# this will restore the cursor back to the top so we can draw again
n.cursor.restore()
# We use our incr_value method to bump the fake numbers
incr_value(test_d)
# Actually draw out the bars
n.draw(test_d, BarDescriptor(bd_defaults)) | def function[tree, parameter[]]:
constant[Example showing tree progress view]
variable[leaf_values] assign[=] <ast.ListComp object at 0x7da20c993820>
variable[bd_defaults] assign[=] call[name[dict], parameter[]]
variable[test_d] assign[=] dictionary[[<ast.Constant object at 0x7da20c9934f0>], [<ast.Dict object at 0x7da20c992e60>]]
def function[incr_value, parameter[obj]]:
for taget[name[val]] in starred[name[leaf_values]] begin[:]
if compare[name[val].value less[<] constant[10]] begin[:]
<ast.AugAssign object at 0x7da20c990730>
break
def function[are_we_done, parameter[obj]]:
return[call[name[all], parameter[<ast.GeneratorExp object at 0x7da20c993ee0>]]]
variable[t] assign[=] call[name[Terminal], parameter[]]
variable[n] assign[=] call[name[ProgressTree], parameter[]]
call[name[n].make_room, parameter[name[test_d]]]
while <ast.UnaryOp object at 0x7da18bcc8b80> begin[:]
call[name[sleep], parameter[binary_operation[constant[0.2] * call[name[random].random, parameter[]]]]]
call[name[n].cursor.restore, parameter[]]
call[name[incr_value], parameter[name[test_d]]]
call[name[n].draw, parameter[name[test_d], call[name[BarDescriptor], parameter[name[bd_defaults]]]]] | keyword[def] identifier[tree] ():
literal[string]
identifier[leaf_values] =[ identifier[Value] ( literal[int] ) keyword[for] identifier[i] keyword[in] identifier[range] ( literal[int] )]
identifier[bd_defaults] = identifier[dict] ( identifier[type] = identifier[Bar] , identifier[kwargs] = identifier[dict] ( identifier[max_value] = literal[int] ))
identifier[test_d] ={
literal[string] :{
literal[string] :{
literal[string] :{
literal[string] : identifier[BarDescriptor] ( identifier[value] = identifier[leaf_values] [ literal[int] ],** identifier[bd_defaults] ),
literal[string] : identifier[BarDescriptor] ( identifier[value] = identifier[leaf_values] [ literal[int] ],** identifier[bd_defaults] ),
},
literal[string] : identifier[BarDescriptor] (
identifier[value] = identifier[leaf_values] [ literal[int] ],** identifier[bd_defaults]
),
},
literal[string] :{
literal[string] :{
literal[string] : identifier[BarDescriptor] (
identifier[value] = identifier[leaf_values] [ literal[int] ],** identifier[bd_defaults]
),
},
},
literal[string] :{
literal[string] : identifier[BarDescriptor] (
identifier[value] = identifier[leaf_values] [ literal[int] ],** identifier[bd_defaults]
),
literal[string] : identifier[BarDescriptor] ( identifier[value] = identifier[leaf_values] [ literal[int] ],
** identifier[bd_defaults] )
}
}
}
keyword[def] identifier[incr_value] ( identifier[obj] ):
keyword[for] identifier[val] keyword[in] identifier[leaf_values] :
keyword[if] identifier[val] . identifier[value] < literal[int] :
identifier[val] . identifier[value] += literal[int]
keyword[break]
keyword[def] identifier[are_we_done] ( identifier[obj] ):
keyword[return] identifier[all] ( identifier[val] . identifier[value] == literal[int] keyword[for] identifier[val] keyword[in] identifier[leaf_values] )
identifier[t] = identifier[Terminal] ()
identifier[n] = identifier[ProgressTree] ( identifier[term] = identifier[t] )
identifier[n] . identifier[make_room] ( identifier[test_d] )
keyword[while] keyword[not] identifier[are_we_done] ( identifier[test_d] ):
identifier[sleep] ( literal[int] * identifier[random] . identifier[random] ())
identifier[n] . identifier[cursor] . identifier[restore] ()
identifier[incr_value] ( identifier[test_d] )
identifier[n] . identifier[draw] ( identifier[test_d] , identifier[BarDescriptor] ( identifier[bd_defaults] )) | def tree():
"""Example showing tree progress view"""
#############
# Test data #
#############
# For this example, we're obviously going to be feeding fictitious data
# to ProgressTree, so here it is
leaf_values = [Value(0) for i in range(6)]
bd_defaults = dict(type=Bar, kwargs=dict(max_value=10))
test_d = {'Warp Jump': {'1) Prepare fuel': {'Load Tanks': {'Tank 1': BarDescriptor(value=leaf_values[0], **bd_defaults), 'Tank 2': BarDescriptor(value=leaf_values[1], **bd_defaults)}, 'Refine tylium ore': BarDescriptor(value=leaf_values[2], **bd_defaults)}, '2) Calculate jump co-ordinates': {'Resolve common name to co-ordinates': {'Querying resolution from baseship': BarDescriptor(value=leaf_values[3], **bd_defaults)}}, '3) Perform jump': {'Check FTL drive readiness': BarDescriptor(value=leaf_values[4], **bd_defaults), 'Juuuuuump!': BarDescriptor(value=leaf_values[5], **bd_defaults)}}}
# We'll use this function to bump up the leaf values
def incr_value(obj):
for val in leaf_values:
if val.value < 10:
val.value += 1
break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['val']]
# And this to check if we're to stop drawing
def are_we_done(obj):
return all((val.value == 10 for val in leaf_values))
###################
# The actual code #
###################
# Create blessings.Terminal instance
t = Terminal()
# Initialize a ProgressTree instance
n = ProgressTree(term=t)
# We'll use the make_room method to make sure the terminal
# is filled out with all the room we need
n.make_room(test_d)
while not are_we_done(test_d):
sleep(0.2 * random.random())
# After the cursor position is first saved (in the first draw call)
# this will restore the cursor back to the top so we can draw again
n.cursor.restore()
# We use our incr_value method to bump the fake numbers
incr_value(test_d)
# Actually draw out the bars
n.draw(test_d, BarDescriptor(bd_defaults)) # depends on [control=['while'], data=[]] |
def mont_pub_to_ed_pub(cls, mont_pub):
"""
Derive a Twisted Edwards public key from given Montgomery public key.
:param mont_pub: A bytes-like object encoding the public key with length
MONT_PUB_KEY_SIZE.
:returns: A bytes-like object encoding the public key with length ED_PUB_KEY_SIZE.
"""
if not isinstance(mont_pub, bytes):
raise TypeError("Wrong type passed for the mont_pub parameter.")
if len(mont_pub) != cls.MONT_PUB_KEY_SIZE:
raise ValueError("Invalid value passed for the mont_pub parameter.")
return bytes(cls._mont_pub_to_ed_pub(bytearray(mont_pub))) | def function[mont_pub_to_ed_pub, parameter[cls, mont_pub]]:
constant[
Derive a Twisted Edwards public key from given Montgomery public key.
:param mont_pub: A bytes-like object encoding the public key with length
MONT_PUB_KEY_SIZE.
:returns: A bytes-like object encoding the public key with length ED_PUB_KEY_SIZE.
]
if <ast.UnaryOp object at 0x7da1b1ec1270> begin[:]
<ast.Raise object at 0x7da1b1ec2500>
if compare[call[name[len], parameter[name[mont_pub]]] not_equal[!=] name[cls].MONT_PUB_KEY_SIZE] begin[:]
<ast.Raise object at 0x7da1b1ec20e0>
return[call[name[bytes], parameter[call[name[cls]._mont_pub_to_ed_pub, parameter[call[name[bytearray], parameter[name[mont_pub]]]]]]]] | keyword[def] identifier[mont_pub_to_ed_pub] ( identifier[cls] , identifier[mont_pub] ):
literal[string]
keyword[if] keyword[not] identifier[isinstance] ( identifier[mont_pub] , identifier[bytes] ):
keyword[raise] identifier[TypeError] ( literal[string] )
keyword[if] identifier[len] ( identifier[mont_pub] )!= identifier[cls] . identifier[MONT_PUB_KEY_SIZE] :
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[return] identifier[bytes] ( identifier[cls] . identifier[_mont_pub_to_ed_pub] ( identifier[bytearray] ( identifier[mont_pub] ))) | def mont_pub_to_ed_pub(cls, mont_pub):
"""
Derive a Twisted Edwards public key from given Montgomery public key.
:param mont_pub: A bytes-like object encoding the public key with length
MONT_PUB_KEY_SIZE.
:returns: A bytes-like object encoding the public key with length ED_PUB_KEY_SIZE.
"""
if not isinstance(mont_pub, bytes):
raise TypeError('Wrong type passed for the mont_pub parameter.') # depends on [control=['if'], data=[]]
if len(mont_pub) != cls.MONT_PUB_KEY_SIZE:
raise ValueError('Invalid value passed for the mont_pub parameter.') # depends on [control=['if'], data=[]]
return bytes(cls._mont_pub_to_ed_pub(bytearray(mont_pub))) |
def _check(peers):
'''Checks whether the input is a valid list of peers and transforms domain names into IP Addresses'''
if not isinstance(peers, list):
return False
for peer in peers:
if not isinstance(peer, six.string_types):
return False
if not HAS_NETADDR: # if does not have this lib installed, will simply try to load what user specified
# if the addresses are not correctly specified, will trow error when loading the actual config
return True
ip_only_peers = []
for peer in peers:
try:
ip_only_peers.append(six.text_type(IPAddress(peer))) # append the str value
except AddrFormatError:
# if not a valid IP Address
# will try to see if it is a nameserver and resolve it
if not HAS_DNSRESOLVER:
continue # without the dns resolver cannot populate the list of NTP entities based on their nameserver
# so we'll move on
dns_reply = []
try:
# try to see if it is a valid NS
dns_reply = dns.resolver.query(peer)
except dns.resolver.NoAnswer:
# no a valid DNS entry either
return False
for dns_ip in dns_reply:
ip_only_peers.append(six.text_type(dns_ip))
peers = ip_only_peers
return True | def function[_check, parameter[peers]]:
constant[Checks whether the input is a valid list of peers and transforms domain names into IP Addresses]
if <ast.UnaryOp object at 0x7da1b20ecf40> begin[:]
return[constant[False]]
for taget[name[peer]] in starred[name[peers]] begin[:]
if <ast.UnaryOp object at 0x7da1b20ec040> begin[:]
return[constant[False]]
if <ast.UnaryOp object at 0x7da1b20ef2b0> begin[:]
return[constant[True]]
variable[ip_only_peers] assign[=] list[[]]
for taget[name[peer]] in starred[name[peers]] begin[:]
<ast.Try object at 0x7da1b20ecca0>
variable[peers] assign[=] name[ip_only_peers]
return[constant[True]] | keyword[def] identifier[_check] ( identifier[peers] ):
literal[string]
keyword[if] keyword[not] identifier[isinstance] ( identifier[peers] , identifier[list] ):
keyword[return] keyword[False]
keyword[for] identifier[peer] keyword[in] identifier[peers] :
keyword[if] keyword[not] identifier[isinstance] ( identifier[peer] , identifier[six] . identifier[string_types] ):
keyword[return] keyword[False]
keyword[if] keyword[not] identifier[HAS_NETADDR] :
keyword[return] keyword[True]
identifier[ip_only_peers] =[]
keyword[for] identifier[peer] keyword[in] identifier[peers] :
keyword[try] :
identifier[ip_only_peers] . identifier[append] ( identifier[six] . identifier[text_type] ( identifier[IPAddress] ( identifier[peer] )))
keyword[except] identifier[AddrFormatError] :
keyword[if] keyword[not] identifier[HAS_DNSRESOLVER] :
keyword[continue]
identifier[dns_reply] =[]
keyword[try] :
identifier[dns_reply] = identifier[dns] . identifier[resolver] . identifier[query] ( identifier[peer] )
keyword[except] identifier[dns] . identifier[resolver] . identifier[NoAnswer] :
keyword[return] keyword[False]
keyword[for] identifier[dns_ip] keyword[in] identifier[dns_reply] :
identifier[ip_only_peers] . identifier[append] ( identifier[six] . identifier[text_type] ( identifier[dns_ip] ))
identifier[peers] = identifier[ip_only_peers]
keyword[return] keyword[True] | def _check(peers):
"""Checks whether the input is a valid list of peers and transforms domain names into IP Addresses"""
if not isinstance(peers, list):
return False # depends on [control=['if'], data=[]]
for peer in peers:
if not isinstance(peer, six.string_types):
return False # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['peer']]
if not HAS_NETADDR: # if does not have this lib installed, will simply try to load what user specified
# if the addresses are not correctly specified, will trow error when loading the actual config
return True # depends on [control=['if'], data=[]]
ip_only_peers = []
for peer in peers:
try:
ip_only_peers.append(six.text_type(IPAddress(peer))) # append the str value # depends on [control=['try'], data=[]]
except AddrFormatError:
# if not a valid IP Address
# will try to see if it is a nameserver and resolve it
if not HAS_DNSRESOLVER:
continue # without the dns resolver cannot populate the list of NTP entities based on their nameserver # depends on [control=['if'], data=[]]
# so we'll move on
dns_reply = []
try:
# try to see if it is a valid NS
dns_reply = dns.resolver.query(peer) # depends on [control=['try'], data=[]]
except dns.resolver.NoAnswer:
# no a valid DNS entry either
return False # depends on [control=['except'], data=[]]
for dns_ip in dns_reply:
ip_only_peers.append(six.text_type(dns_ip)) # depends on [control=['for'], data=['dns_ip']] # depends on [control=['except'], data=[]] # depends on [control=['for'], data=['peer']]
peers = ip_only_peers
return True |
def _getArrays(items, attr, defaultValue):
"""Return arrays with equal size of item attributes from a list of sorted
"items" for fast and convenient data processing.
:param attr: list of item attributes that should be added to the returned
array.
:param defaultValue: if an item is missing an attribute, the "defaultValue"
is added to the array instead.
:returns: {'attribute1': numpy.array([attributeValue1, ...]), ...}
"""
arrays = dict([(key, []) for key in attr])
for item in items:
for key in attr:
arrays[key].append(getattr(item, key, defaultValue))
for key in [_ for _ in viewkeys(arrays)]:
arrays[key] = numpy.array(arrays[key])
return arrays | def function[_getArrays, parameter[items, attr, defaultValue]]:
constant[Return arrays with equal size of item attributes from a list of sorted
"items" for fast and convenient data processing.
:param attr: list of item attributes that should be added to the returned
array.
:param defaultValue: if an item is missing an attribute, the "defaultValue"
is added to the array instead.
:returns: {'attribute1': numpy.array([attributeValue1, ...]), ...}
]
variable[arrays] assign[=] call[name[dict], parameter[<ast.ListComp object at 0x7da18ede4340>]]
for taget[name[item]] in starred[name[items]] begin[:]
for taget[name[key]] in starred[name[attr]] begin[:]
call[call[name[arrays]][name[key]].append, parameter[call[name[getattr], parameter[name[item], name[key], name[defaultValue]]]]]
for taget[name[key]] in starred[<ast.ListComp object at 0x7da18f09ff70>] begin[:]
call[name[arrays]][name[key]] assign[=] call[name[numpy].array, parameter[call[name[arrays]][name[key]]]]
return[name[arrays]] | keyword[def] identifier[_getArrays] ( identifier[items] , identifier[attr] , identifier[defaultValue] ):
literal[string]
identifier[arrays] = identifier[dict] ([( identifier[key] ,[]) keyword[for] identifier[key] keyword[in] identifier[attr] ])
keyword[for] identifier[item] keyword[in] identifier[items] :
keyword[for] identifier[key] keyword[in] identifier[attr] :
identifier[arrays] [ identifier[key] ]. identifier[append] ( identifier[getattr] ( identifier[item] , identifier[key] , identifier[defaultValue] ))
keyword[for] identifier[key] keyword[in] [ identifier[_] keyword[for] identifier[_] keyword[in] identifier[viewkeys] ( identifier[arrays] )]:
identifier[arrays] [ identifier[key] ]= identifier[numpy] . identifier[array] ( identifier[arrays] [ identifier[key] ])
keyword[return] identifier[arrays] | def _getArrays(items, attr, defaultValue):
"""Return arrays with equal size of item attributes from a list of sorted
"items" for fast and convenient data processing.
:param attr: list of item attributes that should be added to the returned
array.
:param defaultValue: if an item is missing an attribute, the "defaultValue"
is added to the array instead.
:returns: {'attribute1': numpy.array([attributeValue1, ...]), ...}
"""
arrays = dict([(key, []) for key in attr])
for item in items:
for key in attr:
arrays[key].append(getattr(item, key, defaultValue)) # depends on [control=['for'], data=['key']] # depends on [control=['for'], data=['item']]
for key in [_ for _ in viewkeys(arrays)]:
arrays[key] = numpy.array(arrays[key]) # depends on [control=['for'], data=['key']]
return arrays |
def cartesian(lon, lat):
"""
Converts spherical positions in (lon, lat) to cartesian coordiantes [x,y,z].
For the purposes of this library's focus on orientations, this operates in a
*north = vertical* framework. That is, positions around the equator are in the
[x,y] plane, and dipping planes occur with higher latitudes.
This is intuitive for strike and dip representations, as it maps
(strike, dip) to (lon, lat). However, we note that it is distinct from the
traditional stereonet representation, which puts the X-Y plane along the prime
meridian.
"""
return N.array([
N.cos(lat)*N.cos(lon),
N.cos(lat)*N.sin(lon),
N.sin(lat)
]) | def function[cartesian, parameter[lon, lat]]:
constant[
Converts spherical positions in (lon, lat) to cartesian coordiantes [x,y,z].
For the purposes of this library's focus on orientations, this operates in a
*north = vertical* framework. That is, positions around the equator are in the
[x,y] plane, and dipping planes occur with higher latitudes.
This is intuitive for strike and dip representations, as it maps
(strike, dip) to (lon, lat). However, we note that it is distinct from the
traditional stereonet representation, which puts the X-Y plane along the prime
meridian.
]
return[call[name[N].array, parameter[list[[<ast.BinOp object at 0x7da1b190e800>, <ast.BinOp object at 0x7da1b190f8b0>, <ast.Call object at 0x7da1b190dd80>]]]]] | keyword[def] identifier[cartesian] ( identifier[lon] , identifier[lat] ):
literal[string]
keyword[return] identifier[N] . identifier[array] ([
identifier[N] . identifier[cos] ( identifier[lat] )* identifier[N] . identifier[cos] ( identifier[lon] ),
identifier[N] . identifier[cos] ( identifier[lat] )* identifier[N] . identifier[sin] ( identifier[lon] ),
identifier[N] . identifier[sin] ( identifier[lat] )
]) | def cartesian(lon, lat):
"""
Converts spherical positions in (lon, lat) to cartesian coordiantes [x,y,z].
For the purposes of this library's focus on orientations, this operates in a
*north = vertical* framework. That is, positions around the equator are in the
[x,y] plane, and dipping planes occur with higher latitudes.
This is intuitive for strike and dip representations, as it maps
(strike, dip) to (lon, lat). However, we note that it is distinct from the
traditional stereonet representation, which puts the X-Y plane along the prime
meridian.
"""
return N.array([N.cos(lat) * N.cos(lon), N.cos(lat) * N.sin(lon), N.sin(lat)]) |
def list_(narrow=None,
all_versions=False,
pre_versions=False,
source=None,
local_only=False,
exact=False):
'''
Instructs Chocolatey to pull a vague package list from the repository.
Args:
narrow (str):
Term used to narrow down results. Searches against
name/description/tag. Default is None.
all_versions (bool):
Display all available package versions in results. Default is False.
pre_versions (bool):
Display pre-release packages in results. Default is False.
source (str):
Chocolatey repository (directory, share or remote URL feed) the
package comes from. Defaults to the official Chocolatey feed if
None is passed. Default is None.
local_only (bool):
Display packages only installed locally. Default is False.
exact (bool):
Display only packages that match ``narrow`` exactly. Default is
False.
.. versionadded:: 2017.7.0
Returns:
dict: A dictionary of results.
CLI Example:
.. code-block:: bash
salt '*' chocolatey.list <narrow>
salt '*' chocolatey.list <narrow> all_versions=True
'''
choc_path = _find_chocolatey(__context__, __salt__)
cmd = [choc_path, 'list']
if narrow:
cmd.append(narrow)
if salt.utils.data.is_true(all_versions):
cmd.append('--allversions')
if salt.utils.data.is_true(pre_versions):
cmd.append('--prerelease')
if source:
cmd.extend(['--source', source])
if local_only:
cmd.append('--local-only')
if exact:
cmd.append('--exact')
# This is needed to parse the output correctly
cmd.append('--limit-output')
result = __salt__['cmd.run_all'](cmd, python_shell=False)
if result['retcode'] != 0:
raise CommandExecutionError(
'Running chocolatey failed: {0}'.format(result['stdout'])
)
ret = {}
pkg_re = re.compile(r'(\S+)\|(\S+)')
for line in result['stdout'].split('\n'):
if line.startswith("No packages"):
return ret
for name, ver in pkg_re.findall(line):
if 'chocolatey' in name:
continue
if name not in ret:
ret[name] = []
ret[name].append(ver)
return ret | def function[list_, parameter[narrow, all_versions, pre_versions, source, local_only, exact]]:
constant[
Instructs Chocolatey to pull a vague package list from the repository.
Args:
narrow (str):
Term used to narrow down results. Searches against
name/description/tag. Default is None.
all_versions (bool):
Display all available package versions in results. Default is False.
pre_versions (bool):
Display pre-release packages in results. Default is False.
source (str):
Chocolatey repository (directory, share or remote URL feed) the
package comes from. Defaults to the official Chocolatey feed if
None is passed. Default is None.
local_only (bool):
Display packages only installed locally. Default is False.
exact (bool):
Display only packages that match ``narrow`` exactly. Default is
False.
.. versionadded:: 2017.7.0
Returns:
dict: A dictionary of results.
CLI Example:
.. code-block:: bash
salt '*' chocolatey.list <narrow>
salt '*' chocolatey.list <narrow> all_versions=True
]
variable[choc_path] assign[=] call[name[_find_chocolatey], parameter[name[__context__], name[__salt__]]]
variable[cmd] assign[=] list[[<ast.Name object at 0x7da1b20b8940>, <ast.Constant object at 0x7da1b20b9c30>]]
if name[narrow] begin[:]
call[name[cmd].append, parameter[name[narrow]]]
if call[name[salt].utils.data.is_true, parameter[name[all_versions]]] begin[:]
call[name[cmd].append, parameter[constant[--allversions]]]
if call[name[salt].utils.data.is_true, parameter[name[pre_versions]]] begin[:]
call[name[cmd].append, parameter[constant[--prerelease]]]
if name[source] begin[:]
call[name[cmd].extend, parameter[list[[<ast.Constant object at 0x7da207f9a500>, <ast.Name object at 0x7da207f98a90>]]]]
if name[local_only] begin[:]
call[name[cmd].append, parameter[constant[--local-only]]]
if name[exact] begin[:]
call[name[cmd].append, parameter[constant[--exact]]]
call[name[cmd].append, parameter[constant[--limit-output]]]
variable[result] assign[=] call[call[name[__salt__]][constant[cmd.run_all]], parameter[name[cmd]]]
if compare[call[name[result]][constant[retcode]] not_equal[!=] constant[0]] begin[:]
<ast.Raise object at 0x7da207f99b70>
variable[ret] assign[=] dictionary[[], []]
variable[pkg_re] assign[=] call[name[re].compile, parameter[constant[(\S+)\|(\S+)]]]
for taget[name[line]] in starred[call[call[name[result]][constant[stdout]].split, parameter[constant[
]]]] begin[:]
if call[name[line].startswith, parameter[constant[No packages]]] begin[:]
return[name[ret]]
for taget[tuple[[<ast.Name object at 0x7da207f98100>, <ast.Name object at 0x7da207f98ca0>]]] in starred[call[name[pkg_re].findall, parameter[name[line]]]] begin[:]
if compare[constant[chocolatey] in name[name]] begin[:]
continue
if compare[name[name] <ast.NotIn object at 0x7da2590d7190> name[ret]] begin[:]
call[name[ret]][name[name]] assign[=] list[[]]
call[call[name[ret]][name[name]].append, parameter[name[ver]]]
return[name[ret]] | keyword[def] identifier[list_] ( identifier[narrow] = keyword[None] ,
identifier[all_versions] = keyword[False] ,
identifier[pre_versions] = keyword[False] ,
identifier[source] = keyword[None] ,
identifier[local_only] = keyword[False] ,
identifier[exact] = keyword[False] ):
literal[string]
identifier[choc_path] = identifier[_find_chocolatey] ( identifier[__context__] , identifier[__salt__] )
identifier[cmd] =[ identifier[choc_path] , literal[string] ]
keyword[if] identifier[narrow] :
identifier[cmd] . identifier[append] ( identifier[narrow] )
keyword[if] identifier[salt] . identifier[utils] . identifier[data] . identifier[is_true] ( identifier[all_versions] ):
identifier[cmd] . identifier[append] ( literal[string] )
keyword[if] identifier[salt] . identifier[utils] . identifier[data] . identifier[is_true] ( identifier[pre_versions] ):
identifier[cmd] . identifier[append] ( literal[string] )
keyword[if] identifier[source] :
identifier[cmd] . identifier[extend] ([ literal[string] , identifier[source] ])
keyword[if] identifier[local_only] :
identifier[cmd] . identifier[append] ( literal[string] )
keyword[if] identifier[exact] :
identifier[cmd] . identifier[append] ( literal[string] )
identifier[cmd] . identifier[append] ( literal[string] )
identifier[result] = identifier[__salt__] [ literal[string] ]( identifier[cmd] , identifier[python_shell] = keyword[False] )
keyword[if] identifier[result] [ literal[string] ]!= literal[int] :
keyword[raise] identifier[CommandExecutionError] (
literal[string] . identifier[format] ( identifier[result] [ literal[string] ])
)
identifier[ret] ={}
identifier[pkg_re] = identifier[re] . identifier[compile] ( literal[string] )
keyword[for] identifier[line] keyword[in] identifier[result] [ literal[string] ]. identifier[split] ( literal[string] ):
keyword[if] identifier[line] . identifier[startswith] ( literal[string] ):
keyword[return] identifier[ret]
keyword[for] identifier[name] , identifier[ver] keyword[in] identifier[pkg_re] . identifier[findall] ( identifier[line] ):
keyword[if] literal[string] keyword[in] identifier[name] :
keyword[continue]
keyword[if] identifier[name] keyword[not] keyword[in] identifier[ret] :
identifier[ret] [ identifier[name] ]=[]
identifier[ret] [ identifier[name] ]. identifier[append] ( identifier[ver] )
keyword[return] identifier[ret] | def list_(narrow=None, all_versions=False, pre_versions=False, source=None, local_only=False, exact=False):
"""
Instructs Chocolatey to pull a vague package list from the repository.
Args:
narrow (str):
Term used to narrow down results. Searches against
name/description/tag. Default is None.
all_versions (bool):
Display all available package versions in results. Default is False.
pre_versions (bool):
Display pre-release packages in results. Default is False.
source (str):
Chocolatey repository (directory, share or remote URL feed) the
package comes from. Defaults to the official Chocolatey feed if
None is passed. Default is None.
local_only (bool):
Display packages only installed locally. Default is False.
exact (bool):
Display only packages that match ``narrow`` exactly. Default is
False.
.. versionadded:: 2017.7.0
Returns:
dict: A dictionary of results.
CLI Example:
.. code-block:: bash
salt '*' chocolatey.list <narrow>
salt '*' chocolatey.list <narrow> all_versions=True
"""
choc_path = _find_chocolatey(__context__, __salt__)
cmd = [choc_path, 'list']
if narrow:
cmd.append(narrow) # depends on [control=['if'], data=[]]
if salt.utils.data.is_true(all_versions):
cmd.append('--allversions') # depends on [control=['if'], data=[]]
if salt.utils.data.is_true(pre_versions):
cmd.append('--prerelease') # depends on [control=['if'], data=[]]
if source:
cmd.extend(['--source', source]) # depends on [control=['if'], data=[]]
if local_only:
cmd.append('--local-only') # depends on [control=['if'], data=[]]
if exact:
cmd.append('--exact') # depends on [control=['if'], data=[]]
# This is needed to parse the output correctly
cmd.append('--limit-output')
result = __salt__['cmd.run_all'](cmd, python_shell=False)
if result['retcode'] != 0:
raise CommandExecutionError('Running chocolatey failed: {0}'.format(result['stdout'])) # depends on [control=['if'], data=[]]
ret = {}
pkg_re = re.compile('(\\S+)\\|(\\S+)')
for line in result['stdout'].split('\n'):
if line.startswith('No packages'):
return ret # depends on [control=['if'], data=[]]
for (name, ver) in pkg_re.findall(line):
if 'chocolatey' in name:
continue # depends on [control=['if'], data=[]]
if name not in ret:
ret[name] = [] # depends on [control=['if'], data=['name', 'ret']]
ret[name].append(ver) # depends on [control=['for'], data=[]] # depends on [control=['for'], data=['line']]
return ret |
def query(qname, rdtype=dns.rdatatype.A, rdclass=dns.rdataclass.IN,
tcp=False, source=None, raise_on_no_answer=True, resolver=None):
"""Query nameservers to find the answer to the question.
This is a convenience function that uses the default resolver
object to make the query.
@see: L{dns.resolver.Resolver.query} for more information on the
parameters."""
if resolver is None:
resolver = get_default_resolver()
return resolver.query(qname, rdtype, rdclass, tcp, source, raise_on_no_answer) | def function[query, parameter[qname, rdtype, rdclass, tcp, source, raise_on_no_answer, resolver]]:
constant[Query nameservers to find the answer to the question.
This is a convenience function that uses the default resolver
object to make the query.
@see: L{dns.resolver.Resolver.query} for more information on the
parameters.]
if compare[name[resolver] is constant[None]] begin[:]
variable[resolver] assign[=] call[name[get_default_resolver], parameter[]]
return[call[name[resolver].query, parameter[name[qname], name[rdtype], name[rdclass], name[tcp], name[source], name[raise_on_no_answer]]]] | keyword[def] identifier[query] ( identifier[qname] , identifier[rdtype] = identifier[dns] . identifier[rdatatype] . identifier[A] , identifier[rdclass] = identifier[dns] . identifier[rdataclass] . identifier[IN] ,
identifier[tcp] = keyword[False] , identifier[source] = keyword[None] , identifier[raise_on_no_answer] = keyword[True] , identifier[resolver] = keyword[None] ):
literal[string]
keyword[if] identifier[resolver] keyword[is] keyword[None] :
identifier[resolver] = identifier[get_default_resolver] ()
keyword[return] identifier[resolver] . identifier[query] ( identifier[qname] , identifier[rdtype] , identifier[rdclass] , identifier[tcp] , identifier[source] , identifier[raise_on_no_answer] ) | def query(qname, rdtype=dns.rdatatype.A, rdclass=dns.rdataclass.IN, tcp=False, source=None, raise_on_no_answer=True, resolver=None):
"""Query nameservers to find the answer to the question.
This is a convenience function that uses the default resolver
object to make the query.
@see: L{dns.resolver.Resolver.query} for more information on the
parameters."""
if resolver is None:
resolver = get_default_resolver() # depends on [control=['if'], data=['resolver']]
return resolver.query(qname, rdtype, rdclass, tcp, source, raise_on_no_answer) |
def add(self, client_id, email_address, name, access_level, password):
"""Adds a person to a client. Password is optional and if not supplied, an invitation will be emailed to the person"""
body = {
"EmailAddress": email_address,
"Name": name,
"AccessLevel": access_level,
"Password": password}
response = self._post("/clients/%s/people.json" %
client_id, json.dumps(body))
return json_to_py(response) | def function[add, parameter[self, client_id, email_address, name, access_level, password]]:
constant[Adds a person to a client. Password is optional and if not supplied, an invitation will be emailed to the person]
variable[body] assign[=] dictionary[[<ast.Constant object at 0x7da18f723370>, <ast.Constant object at 0x7da18f722a10>, <ast.Constant object at 0x7da18f720be0>, <ast.Constant object at 0x7da18f722260>], [<ast.Name object at 0x7da18f720a00>, <ast.Name object at 0x7da18f722dd0>, <ast.Name object at 0x7da18f722530>, <ast.Name object at 0x7da18f7209d0>]]
variable[response] assign[=] call[name[self]._post, parameter[binary_operation[constant[/clients/%s/people.json] <ast.Mod object at 0x7da2590d6920> name[client_id]], call[name[json].dumps, parameter[name[body]]]]]
return[call[name[json_to_py], parameter[name[response]]]] | keyword[def] identifier[add] ( identifier[self] , identifier[client_id] , identifier[email_address] , identifier[name] , identifier[access_level] , identifier[password] ):
literal[string]
identifier[body] ={
literal[string] : identifier[email_address] ,
literal[string] : identifier[name] ,
literal[string] : identifier[access_level] ,
literal[string] : identifier[password] }
identifier[response] = identifier[self] . identifier[_post] ( literal[string] %
identifier[client_id] , identifier[json] . identifier[dumps] ( identifier[body] ))
keyword[return] identifier[json_to_py] ( identifier[response] ) | def add(self, client_id, email_address, name, access_level, password):
"""Adds a person to a client. Password is optional and if not supplied, an invitation will be emailed to the person"""
body = {'EmailAddress': email_address, 'Name': name, 'AccessLevel': access_level, 'Password': password}
response = self._post('/clients/%s/people.json' % client_id, json.dumps(body))
return json_to_py(response) |
def from_string(cls, s, name=None, modules=None, active=None):
"""
Instantiate a REPP from a string.
Args:
name (str, optional): the name of the REPP module
modules (dict, optional): a mapping from identifiers to
REPP modules
active (iterable, optional): an iterable of default module
activations
"""
r = cls(name=name, modules=modules, active=active)
_parse_repp(s.splitlines(), r, None)
return r | def function[from_string, parameter[cls, s, name, modules, active]]:
constant[
Instantiate a REPP from a string.
Args:
name (str, optional): the name of the REPP module
modules (dict, optional): a mapping from identifiers to
REPP modules
active (iterable, optional): an iterable of default module
activations
]
variable[r] assign[=] call[name[cls], parameter[]]
call[name[_parse_repp], parameter[call[name[s].splitlines, parameter[]], name[r], constant[None]]]
return[name[r]] | keyword[def] identifier[from_string] ( identifier[cls] , identifier[s] , identifier[name] = keyword[None] , identifier[modules] = keyword[None] , identifier[active] = keyword[None] ):
literal[string]
identifier[r] = identifier[cls] ( identifier[name] = identifier[name] , identifier[modules] = identifier[modules] , identifier[active] = identifier[active] )
identifier[_parse_repp] ( identifier[s] . identifier[splitlines] (), identifier[r] , keyword[None] )
keyword[return] identifier[r] | def from_string(cls, s, name=None, modules=None, active=None):
"""
Instantiate a REPP from a string.
Args:
name (str, optional): the name of the REPP module
modules (dict, optional): a mapping from identifiers to
REPP modules
active (iterable, optional): an iterable of default module
activations
"""
r = cls(name=name, modules=modules, active=active)
_parse_repp(s.splitlines(), r, None)
return r |
def shutdown(self):
"""
Shut down the entire application.
"""
logging.info("Shutting down")
self.closeAllWindows()
self.notifier.hide()
self.service.shutdown()
self.monitor.stop()
self.quit()
os.remove(common.LOCK_FILE) # TODO: maybe use atexit to remove the lock/pid file?
logging.debug("All shutdown tasks complete... quitting") | def function[shutdown, parameter[self]]:
constant[
Shut down the entire application.
]
call[name[logging].info, parameter[constant[Shutting down]]]
call[name[self].closeAllWindows, parameter[]]
call[name[self].notifier.hide, parameter[]]
call[name[self].service.shutdown, parameter[]]
call[name[self].monitor.stop, parameter[]]
call[name[self].quit, parameter[]]
call[name[os].remove, parameter[name[common].LOCK_FILE]]
call[name[logging].debug, parameter[constant[All shutdown tasks complete... quitting]]] | keyword[def] identifier[shutdown] ( identifier[self] ):
literal[string]
identifier[logging] . identifier[info] ( literal[string] )
identifier[self] . identifier[closeAllWindows] ()
identifier[self] . identifier[notifier] . identifier[hide] ()
identifier[self] . identifier[service] . identifier[shutdown] ()
identifier[self] . identifier[monitor] . identifier[stop] ()
identifier[self] . identifier[quit] ()
identifier[os] . identifier[remove] ( identifier[common] . identifier[LOCK_FILE] )
identifier[logging] . identifier[debug] ( literal[string] ) | def shutdown(self):
"""
Shut down the entire application.
"""
logging.info('Shutting down')
self.closeAllWindows()
self.notifier.hide()
self.service.shutdown()
self.monitor.stop()
self.quit()
os.remove(common.LOCK_FILE) # TODO: maybe use atexit to remove the lock/pid file?
logging.debug('All shutdown tasks complete... quitting') |
def apply_scale(self, scale):
"""
Apply a transformation matrix to the current path in- place
Parameters
-----------
scale : float or (3,) float
Scale to be applied to mesh
"""
dimension = self.vertices.shape[1]
matrix = np.eye(dimension + 1)
matrix[:dimension, :dimension] *= scale
self.apply_transform(matrix) | def function[apply_scale, parameter[self, scale]]:
constant[
Apply a transformation matrix to the current path in- place
Parameters
-----------
scale : float or (3,) float
Scale to be applied to mesh
]
variable[dimension] assign[=] call[name[self].vertices.shape][constant[1]]
variable[matrix] assign[=] call[name[np].eye, parameter[binary_operation[name[dimension] + constant[1]]]]
<ast.AugAssign object at 0x7da2044c3c10>
call[name[self].apply_transform, parameter[name[matrix]]] | keyword[def] identifier[apply_scale] ( identifier[self] , identifier[scale] ):
literal[string]
identifier[dimension] = identifier[self] . identifier[vertices] . identifier[shape] [ literal[int] ]
identifier[matrix] = identifier[np] . identifier[eye] ( identifier[dimension] + literal[int] )
identifier[matrix] [: identifier[dimension] ,: identifier[dimension] ]*= identifier[scale]
identifier[self] . identifier[apply_transform] ( identifier[matrix] ) | def apply_scale(self, scale):
"""
Apply a transformation matrix to the current path in- place
Parameters
-----------
scale : float or (3,) float
Scale to be applied to mesh
"""
dimension = self.vertices.shape[1]
matrix = np.eye(dimension + 1)
matrix[:dimension, :dimension] *= scale
self.apply_transform(matrix) |
def profile_list(request, page=1, template_name='userena/profile_list.html',
paginate_by=50, extra_context=None, **kwargs): # pragma: no cover
"""
Returns a list of all profiles that are public.
It's possible to disable this by changing ``USERENA_DISABLE_PROFILE_LIST``
to ``True`` in your settings.
:param page:
Integer of the active page used for pagination. Defaults to the first
page.
:param template_name:
String defining the name of the template that is used to render the
list of all users. Defaults to ``userena/list.html``.
:param paginate_by:
Integer defining the amount of displayed profiles per page. Defaults to
50 profiles per page.
:param extra_context:
Dictionary of variables that are passed on to the ``template_name``
template.
**Context**
``profile_list``
A list of profiles.
``is_paginated``
A boolean representing whether the results are paginated.
If the result is paginated. It will also contain the following variables.
``paginator``
An instance of ``django.core.paginator.Paginator``.
``page_obj``
An instance of ``django.core.paginator.Page``.
"""
warnings.warn("views.profile_list is deprecated. Use ProfileListView instead", DeprecationWarning, stacklevel=2)
try:
page = int(request.GET.get('page', None))
except (TypeError, ValueError):
page = page
if userena_settings.USERENA_DISABLE_PROFILE_LIST \
and not request.user.is_staff:
raise Http404
profile_model = get_profile_model()
queryset = profile_model.objects.get_visible_profiles(request.user)
if not extra_context: extra_context = dict()
return ProfileListView.as_view(queryset=queryset,
paginate_by=paginate_by,
page=page,
template_name=template_name,
extra_context=extra_context,
**kwargs)(request) | def function[profile_list, parameter[request, page, template_name, paginate_by, extra_context]]:
constant[
Returns a list of all profiles that are public.
It's possible to disable this by changing ``USERENA_DISABLE_PROFILE_LIST``
to ``True`` in your settings.
:param page:
Integer of the active page used for pagination. Defaults to the first
page.
:param template_name:
String defining the name of the template that is used to render the
list of all users. Defaults to ``userena/list.html``.
:param paginate_by:
Integer defining the amount of displayed profiles per page. Defaults to
50 profiles per page.
:param extra_context:
Dictionary of variables that are passed on to the ``template_name``
template.
**Context**
``profile_list``
A list of profiles.
``is_paginated``
A boolean representing whether the results are paginated.
If the result is paginated. It will also contain the following variables.
``paginator``
An instance of ``django.core.paginator.Paginator``.
``page_obj``
An instance of ``django.core.paginator.Page``.
]
call[name[warnings].warn, parameter[constant[views.profile_list is deprecated. Use ProfileListView instead], name[DeprecationWarning]]]
<ast.Try object at 0x7da2047e8fd0>
if <ast.BoolOp object at 0x7da2047e9960> begin[:]
<ast.Raise object at 0x7da2047e8730>
variable[profile_model] assign[=] call[name[get_profile_model], parameter[]]
variable[queryset] assign[=] call[name[profile_model].objects.get_visible_profiles, parameter[name[request].user]]
if <ast.UnaryOp object at 0x7da2047e89a0> begin[:]
variable[extra_context] assign[=] call[name[dict], parameter[]]
return[call[call[name[ProfileListView].as_view, parameter[]], parameter[name[request]]]] | keyword[def] identifier[profile_list] ( identifier[request] , identifier[page] = literal[int] , identifier[template_name] = literal[string] ,
identifier[paginate_by] = literal[int] , identifier[extra_context] = keyword[None] ,** identifier[kwargs] ):
literal[string]
identifier[warnings] . identifier[warn] ( literal[string] , identifier[DeprecationWarning] , identifier[stacklevel] = literal[int] )
keyword[try] :
identifier[page] = identifier[int] ( identifier[request] . identifier[GET] . identifier[get] ( literal[string] , keyword[None] ))
keyword[except] ( identifier[TypeError] , identifier[ValueError] ):
identifier[page] = identifier[page]
keyword[if] identifier[userena_settings] . identifier[USERENA_DISABLE_PROFILE_LIST] keyword[and] keyword[not] identifier[request] . identifier[user] . identifier[is_staff] :
keyword[raise] identifier[Http404]
identifier[profile_model] = identifier[get_profile_model] ()
identifier[queryset] = identifier[profile_model] . identifier[objects] . identifier[get_visible_profiles] ( identifier[request] . identifier[user] )
keyword[if] keyword[not] identifier[extra_context] : identifier[extra_context] = identifier[dict] ()
keyword[return] identifier[ProfileListView] . identifier[as_view] ( identifier[queryset] = identifier[queryset] ,
identifier[paginate_by] = identifier[paginate_by] ,
identifier[page] = identifier[page] ,
identifier[template_name] = identifier[template_name] ,
identifier[extra_context] = identifier[extra_context] ,
** identifier[kwargs] )( identifier[request] ) | def profile_list(request, page=1, template_name='userena/profile_list.html', paginate_by=50, extra_context=None, **kwargs): # pragma: no cover
"\n Returns a list of all profiles that are public.\n\n It's possible to disable this by changing ``USERENA_DISABLE_PROFILE_LIST``\n to ``True`` in your settings.\n\n :param page:\n Integer of the active page used for pagination. Defaults to the first\n page.\n\n :param template_name:\n String defining the name of the template that is used to render the\n list of all users. Defaults to ``userena/list.html``.\n\n :param paginate_by:\n Integer defining the amount of displayed profiles per page. Defaults to\n 50 profiles per page.\n\n :param extra_context:\n Dictionary of variables that are passed on to the ``template_name``\n template.\n\n **Context**\n\n ``profile_list``\n A list of profiles.\n\n ``is_paginated``\n A boolean representing whether the results are paginated.\n\n If the result is paginated. It will also contain the following variables.\n\n ``paginator``\n An instance of ``django.core.paginator.Paginator``.\n\n ``page_obj``\n An instance of ``django.core.paginator.Page``.\n\n "
warnings.warn('views.profile_list is deprecated. Use ProfileListView instead', DeprecationWarning, stacklevel=2)
try:
page = int(request.GET.get('page', None)) # depends on [control=['try'], data=[]]
except (TypeError, ValueError):
page = page # depends on [control=['except'], data=[]]
if userena_settings.USERENA_DISABLE_PROFILE_LIST and (not request.user.is_staff):
raise Http404 # depends on [control=['if'], data=[]]
profile_model = get_profile_model()
queryset = profile_model.objects.get_visible_profiles(request.user)
if not extra_context:
extra_context = dict() # depends on [control=['if'], data=[]]
return ProfileListView.as_view(queryset=queryset, paginate_by=paginate_by, page=page, template_name=template_name, extra_context=extra_context, **kwargs)(request) |
def _init_regs_random(self):
"""Initialize register with random values.
"""
# Generate random values and make sure they are all different.
values = set()
while len(values) != len(self._arch_regs_parent):
values.add(random.randint(0, 2**self._arch_info.operand_size - 1))
values = list(values)
# Assign random values to registers.
regs = {}
for idx, reg in enumerate(self._arch_regs_parent):
regs[reg] = values[idx] & (2**self._arch_regs_size[reg] - 1)
return regs | def function[_init_regs_random, parameter[self]]:
constant[Initialize register with random values.
]
variable[values] assign[=] call[name[set], parameter[]]
while compare[call[name[len], parameter[name[values]]] not_equal[!=] call[name[len], parameter[name[self]._arch_regs_parent]]] begin[:]
call[name[values].add, parameter[call[name[random].randint, parameter[constant[0], binary_operation[binary_operation[constant[2] ** name[self]._arch_info.operand_size] - constant[1]]]]]]
variable[values] assign[=] call[name[list], parameter[name[values]]]
variable[regs] assign[=] dictionary[[], []]
for taget[tuple[[<ast.Name object at 0x7da204346140>, <ast.Name object at 0x7da204346fe0>]]] in starred[call[name[enumerate], parameter[name[self]._arch_regs_parent]]] begin[:]
call[name[regs]][name[reg]] assign[=] binary_operation[call[name[values]][name[idx]] <ast.BitAnd object at 0x7da2590d6b60> binary_operation[binary_operation[constant[2] ** call[name[self]._arch_regs_size][name[reg]]] - constant[1]]]
return[name[regs]] | keyword[def] identifier[_init_regs_random] ( identifier[self] ):
literal[string]
identifier[values] = identifier[set] ()
keyword[while] identifier[len] ( identifier[values] )!= identifier[len] ( identifier[self] . identifier[_arch_regs_parent] ):
identifier[values] . identifier[add] ( identifier[random] . identifier[randint] ( literal[int] , literal[int] ** identifier[self] . identifier[_arch_info] . identifier[operand_size] - literal[int] ))
identifier[values] = identifier[list] ( identifier[values] )
identifier[regs] ={}
keyword[for] identifier[idx] , identifier[reg] keyword[in] identifier[enumerate] ( identifier[self] . identifier[_arch_regs_parent] ):
identifier[regs] [ identifier[reg] ]= identifier[values] [ identifier[idx] ]&( literal[int] ** identifier[self] . identifier[_arch_regs_size] [ identifier[reg] ]- literal[int] )
keyword[return] identifier[regs] | def _init_regs_random(self):
"""Initialize register with random values.
"""
# Generate random values and make sure they are all different.
values = set()
while len(values) != len(self._arch_regs_parent):
values.add(random.randint(0, 2 ** self._arch_info.operand_size - 1)) # depends on [control=['while'], data=[]]
values = list(values)
# Assign random values to registers.
regs = {}
for (idx, reg) in enumerate(self._arch_regs_parent):
regs[reg] = values[idx] & 2 ** self._arch_regs_size[reg] - 1 # depends on [control=['for'], data=[]]
return regs |
def setageing(self, time):
""" Set bridge ageing time. """
_runshell([brctlexe, 'setageing', self.name, str(time)],
"Could not set ageing time in %s." % self.name) | def function[setageing, parameter[self, time]]:
constant[ Set bridge ageing time. ]
call[name[_runshell], parameter[list[[<ast.Name object at 0x7da2041db0d0>, <ast.Constant object at 0x7da2041daa70>, <ast.Attribute object at 0x7da2041d9f90>, <ast.Call object at 0x7da2041d8b50>]], binary_operation[constant[Could not set ageing time in %s.] <ast.Mod object at 0x7da2590d6920> name[self].name]]] | keyword[def] identifier[setageing] ( identifier[self] , identifier[time] ):
literal[string]
identifier[_runshell] ([ identifier[brctlexe] , literal[string] , identifier[self] . identifier[name] , identifier[str] ( identifier[time] )],
literal[string] % identifier[self] . identifier[name] ) | def setageing(self, time):
""" Set bridge ageing time. """
_runshell([brctlexe, 'setageing', self.name, str(time)], 'Could not set ageing time in %s.' % self.name) |
def get_assessment_bank_assignment_session(self, proxy):
"""Gets the ``OsidSession`` associated with the assessment bank assignment service.
arg: proxy (osid.proxy.Proxy): a proxy
return: (osid.assessment.AssessmentBankAssignmentSession) - an
``AssessmentBankAssignmentSession``
raise: NullArgument - ``proxy`` is ``null``
raise: OperationFailed - unable to complete request
raise: Unimplemented -
``supports_assessment_bank_assignment()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_assessment_bank_assignment()`` is ``true``.*
"""
if not self.supports_assessment_bank_assignment():
raise errors.Unimplemented()
# pylint: disable=no-member
return sessions.AssessmentBankAssignmentSession(proxy=proxy, runtime=self._runtime) | def function[get_assessment_bank_assignment_session, parameter[self, proxy]]:
constant[Gets the ``OsidSession`` associated with the assessment bank assignment service.
arg: proxy (osid.proxy.Proxy): a proxy
return: (osid.assessment.AssessmentBankAssignmentSession) - an
``AssessmentBankAssignmentSession``
raise: NullArgument - ``proxy`` is ``null``
raise: OperationFailed - unable to complete request
raise: Unimplemented -
``supports_assessment_bank_assignment()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_assessment_bank_assignment()`` is ``true``.*
]
if <ast.UnaryOp object at 0x7da20c795de0> begin[:]
<ast.Raise object at 0x7da20c794460>
return[call[name[sessions].AssessmentBankAssignmentSession, parameter[]]] | keyword[def] identifier[get_assessment_bank_assignment_session] ( identifier[self] , identifier[proxy] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[supports_assessment_bank_assignment] ():
keyword[raise] identifier[errors] . identifier[Unimplemented] ()
keyword[return] identifier[sessions] . identifier[AssessmentBankAssignmentSession] ( identifier[proxy] = identifier[proxy] , identifier[runtime] = identifier[self] . identifier[_runtime] ) | def get_assessment_bank_assignment_session(self, proxy):
"""Gets the ``OsidSession`` associated with the assessment bank assignment service.
arg: proxy (osid.proxy.Proxy): a proxy
return: (osid.assessment.AssessmentBankAssignmentSession) - an
``AssessmentBankAssignmentSession``
raise: NullArgument - ``proxy`` is ``null``
raise: OperationFailed - unable to complete request
raise: Unimplemented -
``supports_assessment_bank_assignment()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_assessment_bank_assignment()`` is ``true``.*
"""
if not self.supports_assessment_bank_assignment():
raise errors.Unimplemented() # depends on [control=['if'], data=[]]
# pylint: disable=no-member
return sessions.AssessmentBankAssignmentSession(proxy=proxy, runtime=self._runtime) |
def has_class(self, classname):
"""Test if an element has a specific classname
@type classname: str
@param classname: Classname to test for; cannot contain spaces
@rtype: bool
@return: True if classname exists; false otherwise
"""
def element_has_class():
"""Wrapper to test if element has a class"""
pattern = re.compile('(\s|^){classname}(\s|$)'.format(classname=classname))
classes = self.element.get_attribute('class')
matches = re.search(pattern, classes)
if matches is not None:
return True
return False
return self.execute_and_handle_webelement_exceptions(
element_has_class,
'check for element class "{}"'.format(classname)
) | def function[has_class, parameter[self, classname]]:
constant[Test if an element has a specific classname
@type classname: str
@param classname: Classname to test for; cannot contain spaces
@rtype: bool
@return: True if classname exists; false otherwise
]
def function[element_has_class, parameter[]]:
constant[Wrapper to test if element has a class]
variable[pattern] assign[=] call[name[re].compile, parameter[call[constant[(\s|^){classname}(\s|$)].format, parameter[]]]]
variable[classes] assign[=] call[name[self].element.get_attribute, parameter[constant[class]]]
variable[matches] assign[=] call[name[re].search, parameter[name[pattern], name[classes]]]
if compare[name[matches] is_not constant[None]] begin[:]
return[constant[True]]
return[constant[False]]
return[call[name[self].execute_and_handle_webelement_exceptions, parameter[name[element_has_class], call[constant[check for element class "{}"].format, parameter[name[classname]]]]]] | keyword[def] identifier[has_class] ( identifier[self] , identifier[classname] ):
literal[string]
keyword[def] identifier[element_has_class] ():
literal[string]
identifier[pattern] = identifier[re] . identifier[compile] ( literal[string] . identifier[format] ( identifier[classname] = identifier[classname] ))
identifier[classes] = identifier[self] . identifier[element] . identifier[get_attribute] ( literal[string] )
identifier[matches] = identifier[re] . identifier[search] ( identifier[pattern] , identifier[classes] )
keyword[if] identifier[matches] keyword[is] keyword[not] keyword[None] :
keyword[return] keyword[True]
keyword[return] keyword[False]
keyword[return] identifier[self] . identifier[execute_and_handle_webelement_exceptions] (
identifier[element_has_class] ,
literal[string] . identifier[format] ( identifier[classname] )
) | def has_class(self, classname):
"""Test if an element has a specific classname
@type classname: str
@param classname: Classname to test for; cannot contain spaces
@rtype: bool
@return: True if classname exists; false otherwise
"""
def element_has_class():
"""Wrapper to test if element has a class"""
pattern = re.compile('(\\s|^){classname}(\\s|$)'.format(classname=classname))
classes = self.element.get_attribute('class')
matches = re.search(pattern, classes)
if matches is not None:
return True # depends on [control=['if'], data=[]]
return False
return self.execute_and_handle_webelement_exceptions(element_has_class, 'check for element class "{}"'.format(classname)) |
def getUnitCost(self, CorpNum, MsgType, UserID=None):
"""
전송단가 확인
:param CorpNum: 팝빌회원 사업자번호
:param MsgType: 카카오톡 유형
:param UserID: 팝빌 회원아이디
:return: unitCost
"""
if MsgType is None or MsgType == "":
raise PopbillException(-99999999, "전송유형이 입력되지 않았습니다.")
result = self._httpget("/KakaoTalk/UnitCost?Type=" + MsgType, CorpNum)
return float(result.unitCost) | def function[getUnitCost, parameter[self, CorpNum, MsgType, UserID]]:
constant[
전송단가 확인
:param CorpNum: 팝빌회원 사업자번호
:param MsgType: 카카오톡 유형
:param UserID: 팝빌 회원아이디
:return: unitCost
]
if <ast.BoolOp object at 0x7da1b0f21600> begin[:]
<ast.Raise object at 0x7da1b0f217b0>
variable[result] assign[=] call[name[self]._httpget, parameter[binary_operation[constant[/KakaoTalk/UnitCost?Type=] + name[MsgType]], name[CorpNum]]]
return[call[name[float], parameter[name[result].unitCost]]] | keyword[def] identifier[getUnitCost] ( identifier[self] , identifier[CorpNum] , identifier[MsgType] , identifier[UserID] = keyword[None] ):
literal[string]
keyword[if] identifier[MsgType] keyword[is] keyword[None] keyword[or] identifier[MsgType] == literal[string] :
keyword[raise] identifier[PopbillException] (- literal[int] , literal[string] )
identifier[result] = identifier[self] . identifier[_httpget] ( literal[string] + identifier[MsgType] , identifier[CorpNum] )
keyword[return] identifier[float] ( identifier[result] . identifier[unitCost] ) | def getUnitCost(self, CorpNum, MsgType, UserID=None):
"""
전송단가 확인
:param CorpNum: 팝빌회원 사업자번호
:param MsgType: 카카오톡 유형
:param UserID: 팝빌 회원아이디
:return: unitCost
"""
if MsgType is None or MsgType == '':
raise PopbillException(-99999999, '전송유형이 입력되지 않았습니다.') # depends on [control=['if'], data=[]]
result = self._httpget('/KakaoTalk/UnitCost?Type=' + MsgType, CorpNum)
return float(result.unitCost) |
def name(self) -> str:
'''Returns the name of pvariable.
Returns:
Name of pvariable.
Raises:
ValueError: If not a pvariable expression.
'''
if not self.is_pvariable_expression():
raise ValueError('Expression is not a pvariable.')
return self._pvar_to_name(self.args) | def function[name, parameter[self]]:
constant[Returns the name of pvariable.
Returns:
Name of pvariable.
Raises:
ValueError: If not a pvariable expression.
]
if <ast.UnaryOp object at 0x7da1b095eb30> begin[:]
<ast.Raise object at 0x7da1b095dae0>
return[call[name[self]._pvar_to_name, parameter[name[self].args]]] | keyword[def] identifier[name] ( identifier[self] )-> identifier[str] :
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[is_pvariable_expression] ():
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[return] identifier[self] . identifier[_pvar_to_name] ( identifier[self] . identifier[args] ) | def name(self) -> str:
"""Returns the name of pvariable.
Returns:
Name of pvariable.
Raises:
ValueError: If not a pvariable expression.
"""
if not self.is_pvariable_expression():
raise ValueError('Expression is not a pvariable.') # depends on [control=['if'], data=[]]
return self._pvar_to_name(self.args) |
def _ProcessEntries(self, fd):
"""Extract entries from the xinetd config files."""
p = config_file.KeyValueParser(kv_sep="{", term="}", sep=None)
data = utils.ReadFileBytesAsUnicode(fd)
entries = p.ParseEntries(data)
for entry in entries:
for section, cfg in iteritems(entry):
# The parser returns a list of configs. There will only be one.
if cfg:
cfg = cfg[0].strip()
else:
cfg = ""
self._ParseSection(section, cfg) | def function[_ProcessEntries, parameter[self, fd]]:
constant[Extract entries from the xinetd config files.]
variable[p] assign[=] call[name[config_file].KeyValueParser, parameter[]]
variable[data] assign[=] call[name[utils].ReadFileBytesAsUnicode, parameter[name[fd]]]
variable[entries] assign[=] call[name[p].ParseEntries, parameter[name[data]]]
for taget[name[entry]] in starred[name[entries]] begin[:]
for taget[tuple[[<ast.Name object at 0x7da1b1b58d30>, <ast.Name object at 0x7da1b1b59f30>]]] in starred[call[name[iteritems], parameter[name[entry]]]] begin[:]
if name[cfg] begin[:]
variable[cfg] assign[=] call[call[name[cfg]][constant[0]].strip, parameter[]]
call[name[self]._ParseSection, parameter[name[section], name[cfg]]] | keyword[def] identifier[_ProcessEntries] ( identifier[self] , identifier[fd] ):
literal[string]
identifier[p] = identifier[config_file] . identifier[KeyValueParser] ( identifier[kv_sep] = literal[string] , identifier[term] = literal[string] , identifier[sep] = keyword[None] )
identifier[data] = identifier[utils] . identifier[ReadFileBytesAsUnicode] ( identifier[fd] )
identifier[entries] = identifier[p] . identifier[ParseEntries] ( identifier[data] )
keyword[for] identifier[entry] keyword[in] identifier[entries] :
keyword[for] identifier[section] , identifier[cfg] keyword[in] identifier[iteritems] ( identifier[entry] ):
keyword[if] identifier[cfg] :
identifier[cfg] = identifier[cfg] [ literal[int] ]. identifier[strip] ()
keyword[else] :
identifier[cfg] = literal[string]
identifier[self] . identifier[_ParseSection] ( identifier[section] , identifier[cfg] ) | def _ProcessEntries(self, fd):
"""Extract entries from the xinetd config files."""
p = config_file.KeyValueParser(kv_sep='{', term='}', sep=None)
data = utils.ReadFileBytesAsUnicode(fd)
entries = p.ParseEntries(data)
for entry in entries:
for (section, cfg) in iteritems(entry):
# The parser returns a list of configs. There will only be one.
if cfg:
cfg = cfg[0].strip() # depends on [control=['if'], data=[]]
else:
cfg = ''
self._ParseSection(section, cfg) # depends on [control=['for'], data=[]] # depends on [control=['for'], data=['entry']] |
def get_support_variables(polynomial):
"""Gets the support of a polynomial.
"""
support = []
if is_number_type(polynomial):
return support
for monomial in polynomial.expand().as_coefficients_dict():
mon, _ = __separate_scalar_factor(monomial)
symbolic_support = flatten(split_commutative_parts(mon))
for s in symbolic_support:
if isinstance(s, Pow):
base = s.base
if is_adjoint(base):
base = base.adjoint()
support.append(base)
elif is_adjoint(s):
support.append(s.adjoint())
elif isinstance(s, Operator):
support.append(s)
return support | def function[get_support_variables, parameter[polynomial]]:
constant[Gets the support of a polynomial.
]
variable[support] assign[=] list[[]]
if call[name[is_number_type], parameter[name[polynomial]]] begin[:]
return[name[support]]
for taget[name[monomial]] in starred[call[call[name[polynomial].expand, parameter[]].as_coefficients_dict, parameter[]]] begin[:]
<ast.Tuple object at 0x7da18f00de40> assign[=] call[name[__separate_scalar_factor], parameter[name[monomial]]]
variable[symbolic_support] assign[=] call[name[flatten], parameter[call[name[split_commutative_parts], parameter[name[mon]]]]]
for taget[name[s]] in starred[name[symbolic_support]] begin[:]
if call[name[isinstance], parameter[name[s], name[Pow]]] begin[:]
variable[base] assign[=] name[s].base
if call[name[is_adjoint], parameter[name[base]]] begin[:]
variable[base] assign[=] call[name[base].adjoint, parameter[]]
call[name[support].append, parameter[name[base]]]
return[name[support]] | keyword[def] identifier[get_support_variables] ( identifier[polynomial] ):
literal[string]
identifier[support] =[]
keyword[if] identifier[is_number_type] ( identifier[polynomial] ):
keyword[return] identifier[support]
keyword[for] identifier[monomial] keyword[in] identifier[polynomial] . identifier[expand] (). identifier[as_coefficients_dict] ():
identifier[mon] , identifier[_] = identifier[__separate_scalar_factor] ( identifier[monomial] )
identifier[symbolic_support] = identifier[flatten] ( identifier[split_commutative_parts] ( identifier[mon] ))
keyword[for] identifier[s] keyword[in] identifier[symbolic_support] :
keyword[if] identifier[isinstance] ( identifier[s] , identifier[Pow] ):
identifier[base] = identifier[s] . identifier[base]
keyword[if] identifier[is_adjoint] ( identifier[base] ):
identifier[base] = identifier[base] . identifier[adjoint] ()
identifier[support] . identifier[append] ( identifier[base] )
keyword[elif] identifier[is_adjoint] ( identifier[s] ):
identifier[support] . identifier[append] ( identifier[s] . identifier[adjoint] ())
keyword[elif] identifier[isinstance] ( identifier[s] , identifier[Operator] ):
identifier[support] . identifier[append] ( identifier[s] )
keyword[return] identifier[support] | def get_support_variables(polynomial):
"""Gets the support of a polynomial.
"""
support = []
if is_number_type(polynomial):
return support # depends on [control=['if'], data=[]]
for monomial in polynomial.expand().as_coefficients_dict():
(mon, _) = __separate_scalar_factor(monomial)
symbolic_support = flatten(split_commutative_parts(mon))
for s in symbolic_support:
if isinstance(s, Pow):
base = s.base
if is_adjoint(base):
base = base.adjoint() # depends on [control=['if'], data=[]]
support.append(base) # depends on [control=['if'], data=[]]
elif is_adjoint(s):
support.append(s.adjoint()) # depends on [control=['if'], data=[]]
elif isinstance(s, Operator):
support.append(s) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['s']] # depends on [control=['for'], data=['monomial']]
return support |
def _neg16(ins):
''' Negates top of the stack (16 bits in HL)
'''
output = _16bit_oper(ins.quad[2])
output.append('call __NEGHL')
output.append('push hl')
REQUIRES.add('neg16.asm')
return output | def function[_neg16, parameter[ins]]:
constant[ Negates top of the stack (16 bits in HL)
]
variable[output] assign[=] call[name[_16bit_oper], parameter[call[name[ins].quad][constant[2]]]]
call[name[output].append, parameter[constant[call __NEGHL]]]
call[name[output].append, parameter[constant[push hl]]]
call[name[REQUIRES].add, parameter[constant[neg16.asm]]]
return[name[output]] | keyword[def] identifier[_neg16] ( identifier[ins] ):
literal[string]
identifier[output] = identifier[_16bit_oper] ( identifier[ins] . identifier[quad] [ literal[int] ])
identifier[output] . identifier[append] ( literal[string] )
identifier[output] . identifier[append] ( literal[string] )
identifier[REQUIRES] . identifier[add] ( literal[string] )
keyword[return] identifier[output] | def _neg16(ins):
""" Negates top of the stack (16 bits in HL)
"""
output = _16bit_oper(ins.quad[2])
output.append('call __NEGHL')
output.append('push hl')
REQUIRES.add('neg16.asm')
return output |
def delete_view(self, request, object_id, extra_context=None):
"The 'delete' admin view for this model."
opts = self.model._meta
app_label = opts.app_label
obj = self.get_object(request, unquote(object_id))
if not self.has_delete_permission(request, obj):
raise PermissionDenied
if obj is None:
raise Http404(_('%(name)s object with primary key %(key)r does not exist.') % {'name': force_unicode(opts.verbose_name), 'key': escape(object_id)})
using = router.db_for_write(self.model)
# Populate deleted_objects, a data structure of all related objects that
# will also be deleted.
(deleted_objects, perms_needed, protected) = get_deleted_objects(
[obj], opts, request.user, self.admin_site, using)
perms_needed = False # cheat! Only object permission is required
if request.POST: # The user has already confirmed the deletion.
if perms_needed:
raise PermissionDenied
obj_display = force_unicode(obj)
self.log_deletion(request, obj, obj_display)
self.delete_model(request, obj)
self.message_user(request, _('The %(name)s "%(obj)s" was deleted successfully.') % {'name': force_unicode(opts.verbose_name), 'obj': force_unicode(obj_display)})
if not self.has_change_permission(request, None):
return HttpResponseRedirect(reverse('admin:index',
current_app=self.admin_site.name))
return HttpResponseRedirect(reverse('admin:%s_%s_changelist' %
(opts.app_label, opts.module_name),
current_app=self.admin_site.name))
object_name = force_unicode(opts.verbose_name)
if perms_needed or protected:
title = _("Cannot delete %(name)s") % {"name": object_name}
else:
title = _("Are you sure?")
context = {
"title": title,
"object_name": object_name,
"object": obj,
"deleted_objects": deleted_objects,
"perms_lacking": perms_needed,
"protected": protected,
"opts": opts,
"app_label": app_label,
}
context.update(extra_context or {})
return TemplateResponse(request, self.delete_confirmation_template or [
"admin/%s/%s/delete_confirmation.html" % (app_label, opts.object_name.lower()),
"admin/%s/delete_confirmation.html" % app_label,
"admin/delete_confirmation.html"
], context, current_app=self.admin_site.name) | def function[delete_view, parameter[self, request, object_id, extra_context]]:
constant[The 'delete' admin view for this model.]
variable[opts] assign[=] name[self].model._meta
variable[app_label] assign[=] name[opts].app_label
variable[obj] assign[=] call[name[self].get_object, parameter[name[request], call[name[unquote], parameter[name[object_id]]]]]
if <ast.UnaryOp object at 0x7da18f721cf0> begin[:]
<ast.Raise object at 0x7da18f7218a0>
if compare[name[obj] is constant[None]] begin[:]
<ast.Raise object at 0x7da18f721750>
variable[using] assign[=] call[name[router].db_for_write, parameter[name[self].model]]
<ast.Tuple object at 0x7da18f721780> assign[=] call[name[get_deleted_objects], parameter[list[[<ast.Name object at 0x7da18f7223e0>]], name[opts], name[request].user, name[self].admin_site, name[using]]]
variable[perms_needed] assign[=] constant[False]
if name[request].POST begin[:]
if name[perms_needed] begin[:]
<ast.Raise object at 0x7da18f722ad0>
variable[obj_display] assign[=] call[name[force_unicode], parameter[name[obj]]]
call[name[self].log_deletion, parameter[name[request], name[obj], name[obj_display]]]
call[name[self].delete_model, parameter[name[request], name[obj]]]
call[name[self].message_user, parameter[name[request], binary_operation[call[name[_], parameter[constant[The %(name)s "%(obj)s" was deleted successfully.]]] <ast.Mod object at 0x7da2590d6920> dictionary[[<ast.Constant object at 0x7da18dc99a80>, <ast.Constant object at 0x7da18dc9bd60>], [<ast.Call object at 0x7da18dc9ac20>, <ast.Call object at 0x7da18dc9ab60>]]]]]
if <ast.UnaryOp object at 0x7da18dc99c00> begin[:]
return[call[name[HttpResponseRedirect], parameter[call[name[reverse], parameter[constant[admin:index]]]]]]
return[call[name[HttpResponseRedirect], parameter[call[name[reverse], parameter[binary_operation[constant[admin:%s_%s_changelist] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Attribute object at 0x7da18dc9b6d0>, <ast.Attribute object at 0x7da18dc987c0>]]]]]]]]
variable[object_name] assign[=] call[name[force_unicode], parameter[name[opts].verbose_name]]
if <ast.BoolOp object at 0x7da18dc9acb0> begin[:]
variable[title] assign[=] binary_operation[call[name[_], parameter[constant[Cannot delete %(name)s]]] <ast.Mod object at 0x7da2590d6920> dictionary[[<ast.Constant object at 0x7da20e956ef0>], [<ast.Name object at 0x7da20e954400>]]]
variable[context] assign[=] dictionary[[<ast.Constant object at 0x7da20e954eb0>, <ast.Constant object at 0x7da20e957eb0>, <ast.Constant object at 0x7da20e9548b0>, <ast.Constant object at 0x7da20e9577f0>, <ast.Constant object at 0x7da20e9547c0>, <ast.Constant object at 0x7da20e957610>, <ast.Constant object at 0x7da20e955ab0>, <ast.Constant object at 0x7da20e954490>], [<ast.Name object at 0x7da20e9557b0>, <ast.Name object at 0x7da20e957700>, <ast.Name object at 0x7da20e957340>, <ast.Name object at 0x7da20e956b00>, <ast.Name object at 0x7da20e9571f0>, <ast.Name object at 0x7da20e954910>, <ast.Name object at 0x7da20e9568c0>, <ast.Name object at 0x7da18dc9a9e0>]]
call[name[context].update, parameter[<ast.BoolOp object at 0x7da18dc9b700>]]
return[call[name[TemplateResponse], parameter[name[request], <ast.BoolOp object at 0x7da18dc99a50>, name[context]]]] | keyword[def] identifier[delete_view] ( identifier[self] , identifier[request] , identifier[object_id] , identifier[extra_context] = keyword[None] ):
literal[string]
identifier[opts] = identifier[self] . identifier[model] . identifier[_meta]
identifier[app_label] = identifier[opts] . identifier[app_label]
identifier[obj] = identifier[self] . identifier[get_object] ( identifier[request] , identifier[unquote] ( identifier[object_id] ))
keyword[if] keyword[not] identifier[self] . identifier[has_delete_permission] ( identifier[request] , identifier[obj] ):
keyword[raise] identifier[PermissionDenied]
keyword[if] identifier[obj] keyword[is] keyword[None] :
keyword[raise] identifier[Http404] ( identifier[_] ( literal[string] )%{ literal[string] : identifier[force_unicode] ( identifier[opts] . identifier[verbose_name] ), literal[string] : identifier[escape] ( identifier[object_id] )})
identifier[using] = identifier[router] . identifier[db_for_write] ( identifier[self] . identifier[model] )
( identifier[deleted_objects] , identifier[perms_needed] , identifier[protected] )= identifier[get_deleted_objects] (
[ identifier[obj] ], identifier[opts] , identifier[request] . identifier[user] , identifier[self] . identifier[admin_site] , identifier[using] )
identifier[perms_needed] = keyword[False]
keyword[if] identifier[request] . identifier[POST] :
keyword[if] identifier[perms_needed] :
keyword[raise] identifier[PermissionDenied]
identifier[obj_display] = identifier[force_unicode] ( identifier[obj] )
identifier[self] . identifier[log_deletion] ( identifier[request] , identifier[obj] , identifier[obj_display] )
identifier[self] . identifier[delete_model] ( identifier[request] , identifier[obj] )
identifier[self] . identifier[message_user] ( identifier[request] , identifier[_] ( literal[string] )%{ literal[string] : identifier[force_unicode] ( identifier[opts] . identifier[verbose_name] ), literal[string] : identifier[force_unicode] ( identifier[obj_display] )})
keyword[if] keyword[not] identifier[self] . identifier[has_change_permission] ( identifier[request] , keyword[None] ):
keyword[return] identifier[HttpResponseRedirect] ( identifier[reverse] ( literal[string] ,
identifier[current_app] = identifier[self] . identifier[admin_site] . identifier[name] ))
keyword[return] identifier[HttpResponseRedirect] ( identifier[reverse] ( literal[string] %
( identifier[opts] . identifier[app_label] , identifier[opts] . identifier[module_name] ),
identifier[current_app] = identifier[self] . identifier[admin_site] . identifier[name] ))
identifier[object_name] = identifier[force_unicode] ( identifier[opts] . identifier[verbose_name] )
keyword[if] identifier[perms_needed] keyword[or] identifier[protected] :
identifier[title] = identifier[_] ( literal[string] )%{ literal[string] : identifier[object_name] }
keyword[else] :
identifier[title] = identifier[_] ( literal[string] )
identifier[context] ={
literal[string] : identifier[title] ,
literal[string] : identifier[object_name] ,
literal[string] : identifier[obj] ,
literal[string] : identifier[deleted_objects] ,
literal[string] : identifier[perms_needed] ,
literal[string] : identifier[protected] ,
literal[string] : identifier[opts] ,
literal[string] : identifier[app_label] ,
}
identifier[context] . identifier[update] ( identifier[extra_context] keyword[or] {})
keyword[return] identifier[TemplateResponse] ( identifier[request] , identifier[self] . identifier[delete_confirmation_template] keyword[or] [
literal[string] %( identifier[app_label] , identifier[opts] . identifier[object_name] . identifier[lower] ()),
literal[string] % identifier[app_label] ,
literal[string]
], identifier[context] , identifier[current_app] = identifier[self] . identifier[admin_site] . identifier[name] ) | def delete_view(self, request, object_id, extra_context=None):
"""The 'delete' admin view for this model."""
opts = self.model._meta
app_label = opts.app_label
obj = self.get_object(request, unquote(object_id))
if not self.has_delete_permission(request, obj):
raise PermissionDenied # depends on [control=['if'], data=[]]
if obj is None:
raise Http404(_('%(name)s object with primary key %(key)r does not exist.') % {'name': force_unicode(opts.verbose_name), 'key': escape(object_id)}) # depends on [control=['if'], data=[]]
using = router.db_for_write(self.model)
# Populate deleted_objects, a data structure of all related objects that
# will also be deleted.
(deleted_objects, perms_needed, protected) = get_deleted_objects([obj], opts, request.user, self.admin_site, using)
perms_needed = False # cheat! Only object permission is required
if request.POST: # The user has already confirmed the deletion.
if perms_needed:
raise PermissionDenied # depends on [control=['if'], data=[]]
obj_display = force_unicode(obj)
self.log_deletion(request, obj, obj_display)
self.delete_model(request, obj)
self.message_user(request, _('The %(name)s "%(obj)s" was deleted successfully.') % {'name': force_unicode(opts.verbose_name), 'obj': force_unicode(obj_display)})
if not self.has_change_permission(request, None):
return HttpResponseRedirect(reverse('admin:index', current_app=self.admin_site.name)) # depends on [control=['if'], data=[]]
return HttpResponseRedirect(reverse('admin:%s_%s_changelist' % (opts.app_label, opts.module_name), current_app=self.admin_site.name)) # depends on [control=['if'], data=[]]
object_name = force_unicode(opts.verbose_name)
if perms_needed or protected:
title = _('Cannot delete %(name)s') % {'name': object_name} # depends on [control=['if'], data=[]]
else:
title = _('Are you sure?')
context = {'title': title, 'object_name': object_name, 'object': obj, 'deleted_objects': deleted_objects, 'perms_lacking': perms_needed, 'protected': protected, 'opts': opts, 'app_label': app_label}
context.update(extra_context or {})
return TemplateResponse(request, self.delete_confirmation_template or ['admin/%s/%s/delete_confirmation.html' % (app_label, opts.object_name.lower()), 'admin/%s/delete_confirmation.html' % app_label, 'admin/delete_confirmation.html'], context, current_app=self.admin_site.name) |
def by_month(self, chamber, year=None, month=None):
"""
Return votes for a single month, defaulting to the current month.
"""
check_chamber(chamber)
now = datetime.datetime.now()
year = year or now.year
month = month or now.month
path = "{chamber}/votes/{year}/{month}.json".format(
chamber=chamber, year=year, month=month)
return self.fetch(path, parse=lambda r: r['results']) | def function[by_month, parameter[self, chamber, year, month]]:
constant[
Return votes for a single month, defaulting to the current month.
]
call[name[check_chamber], parameter[name[chamber]]]
variable[now] assign[=] call[name[datetime].datetime.now, parameter[]]
variable[year] assign[=] <ast.BoolOp object at 0x7da204621210>
variable[month] assign[=] <ast.BoolOp object at 0x7da204621b40>
variable[path] assign[=] call[constant[{chamber}/votes/{year}/{month}.json].format, parameter[]]
return[call[name[self].fetch, parameter[name[path]]]] | keyword[def] identifier[by_month] ( identifier[self] , identifier[chamber] , identifier[year] = keyword[None] , identifier[month] = keyword[None] ):
literal[string]
identifier[check_chamber] ( identifier[chamber] )
identifier[now] = identifier[datetime] . identifier[datetime] . identifier[now] ()
identifier[year] = identifier[year] keyword[or] identifier[now] . identifier[year]
identifier[month] = identifier[month] keyword[or] identifier[now] . identifier[month]
identifier[path] = literal[string] . identifier[format] (
identifier[chamber] = identifier[chamber] , identifier[year] = identifier[year] , identifier[month] = identifier[month] )
keyword[return] identifier[self] . identifier[fetch] ( identifier[path] , identifier[parse] = keyword[lambda] identifier[r] : identifier[r] [ literal[string] ]) | def by_month(self, chamber, year=None, month=None):
"""
Return votes for a single month, defaulting to the current month.
"""
check_chamber(chamber)
now = datetime.datetime.now()
year = year or now.year
month = month or now.month
path = '{chamber}/votes/{year}/{month}.json'.format(chamber=chamber, year=year, month=month)
return self.fetch(path, parse=lambda r: r['results']) |
def set(self, data, path=KISSmetrics.SET_PATH, resp=False):
"""Set a properties provided in `data` for identity.
:param data: key-value pairs to associate with identity
:type data: dict
:param path: endpoint path; defaults to ``KISSmetrics.SET_PATH``
:param resp: indicate whether to return response
:type resp: boolean
:returns: an HTTP response for request if `resp=True`
:rtype: `urllib3.response.HTTPResponse`
:raises: Exception if either `identity` or `key` not set
"""
self.check_id_key()
timestamp = None
response = self.client.set(person=self.identity, properties=data,
timestamp=timestamp, path=path)
if resp:
return response | def function[set, parameter[self, data, path, resp]]:
constant[Set a properties provided in `data` for identity.
:param data: key-value pairs to associate with identity
:type data: dict
:param path: endpoint path; defaults to ``KISSmetrics.SET_PATH``
:param resp: indicate whether to return response
:type resp: boolean
:returns: an HTTP response for request if `resp=True`
:rtype: `urllib3.response.HTTPResponse`
:raises: Exception if either `identity` or `key` not set
]
call[name[self].check_id_key, parameter[]]
variable[timestamp] assign[=] constant[None]
variable[response] assign[=] call[name[self].client.set, parameter[]]
if name[resp] begin[:]
return[name[response]] | keyword[def] identifier[set] ( identifier[self] , identifier[data] , identifier[path] = identifier[KISSmetrics] . identifier[SET_PATH] , identifier[resp] = keyword[False] ):
literal[string]
identifier[self] . identifier[check_id_key] ()
identifier[timestamp] = keyword[None]
identifier[response] = identifier[self] . identifier[client] . identifier[set] ( identifier[person] = identifier[self] . identifier[identity] , identifier[properties] = identifier[data] ,
identifier[timestamp] = identifier[timestamp] , identifier[path] = identifier[path] )
keyword[if] identifier[resp] :
keyword[return] identifier[response] | def set(self, data, path=KISSmetrics.SET_PATH, resp=False):
"""Set a properties provided in `data` for identity.
:param data: key-value pairs to associate with identity
:type data: dict
:param path: endpoint path; defaults to ``KISSmetrics.SET_PATH``
:param resp: indicate whether to return response
:type resp: boolean
:returns: an HTTP response for request if `resp=True`
:rtype: `urllib3.response.HTTPResponse`
:raises: Exception if either `identity` or `key` not set
"""
self.check_id_key()
timestamp = None
response = self.client.set(person=self.identity, properties=data, timestamp=timestamp, path=path)
if resp:
return response # depends on [control=['if'], data=[]] |
def create_role_config_group(self, name, display_name, role_type):
"""
Create a role config group.
@param name: The name of the new group.
@param display_name: The display name of the new group.
@param role_type: The role type of the new group.
@return: New ApiRoleConfigGroup object.
@since: API v3
"""
return role_config_groups.create_role_config_group(
self._get_resource_root(), self.name, name, display_name, role_type,
self._get_cluster_name()) | def function[create_role_config_group, parameter[self, name, display_name, role_type]]:
constant[
Create a role config group.
@param name: The name of the new group.
@param display_name: The display name of the new group.
@param role_type: The role type of the new group.
@return: New ApiRoleConfigGroup object.
@since: API v3
]
return[call[name[role_config_groups].create_role_config_group, parameter[call[name[self]._get_resource_root, parameter[]], name[self].name, name[name], name[display_name], name[role_type], call[name[self]._get_cluster_name, parameter[]]]]] | keyword[def] identifier[create_role_config_group] ( identifier[self] , identifier[name] , identifier[display_name] , identifier[role_type] ):
literal[string]
keyword[return] identifier[role_config_groups] . identifier[create_role_config_group] (
identifier[self] . identifier[_get_resource_root] (), identifier[self] . identifier[name] , identifier[name] , identifier[display_name] , identifier[role_type] ,
identifier[self] . identifier[_get_cluster_name] ()) | def create_role_config_group(self, name, display_name, role_type):
"""
Create a role config group.
@param name: The name of the new group.
@param display_name: The display name of the new group.
@param role_type: The role type of the new group.
@return: New ApiRoleConfigGroup object.
@since: API v3
"""
return role_config_groups.create_role_config_group(self._get_resource_root(), self.name, name, display_name, role_type, self._get_cluster_name()) |
def _lu_solve_assertions(lower_upper, perm, rhs, validate_args):
"""Returns list of assertions related to `lu_solve` assumptions."""
assertions = _lu_reconstruct_assertions(lower_upper, perm, validate_args)
message = 'Input `rhs` must have at least 2 dimensions.'
if rhs.shape.ndims is not None:
if rhs.shape.ndims < 2:
raise ValueError(message)
elif validate_args:
assertions.append(
tf.compat.v1.assert_rank_at_least(rhs, rank=2, message=message))
message = '`lower_upper.shape[-1]` must equal `rhs.shape[-1]`.'
if (tf.compat.dimension_value(lower_upper.shape[-1]) is not None and
tf.compat.dimension_value(rhs.shape[-2]) is not None):
if lower_upper.shape[-1] != rhs.shape[-2]:
raise ValueError(message)
elif validate_args:
assertions.append(
tf.compat.v1.assert_equal(
tf.shape(input=lower_upper)[-1],
tf.shape(input=rhs)[-2],
message=message))
return assertions | def function[_lu_solve_assertions, parameter[lower_upper, perm, rhs, validate_args]]:
constant[Returns list of assertions related to `lu_solve` assumptions.]
variable[assertions] assign[=] call[name[_lu_reconstruct_assertions], parameter[name[lower_upper], name[perm], name[validate_args]]]
variable[message] assign[=] constant[Input `rhs` must have at least 2 dimensions.]
if compare[name[rhs].shape.ndims is_not constant[None]] begin[:]
if compare[name[rhs].shape.ndims less[<] constant[2]] begin[:]
<ast.Raise object at 0x7da2043449d0>
variable[message] assign[=] constant[`lower_upper.shape[-1]` must equal `rhs.shape[-1]`.]
if <ast.BoolOp object at 0x7da1b05bc940> begin[:]
if compare[call[name[lower_upper].shape][<ast.UnaryOp object at 0x7da204345ed0>] not_equal[!=] call[name[rhs].shape][<ast.UnaryOp object at 0x7da2043456f0>]] begin[:]
<ast.Raise object at 0x7da204347c10>
return[name[assertions]] | keyword[def] identifier[_lu_solve_assertions] ( identifier[lower_upper] , identifier[perm] , identifier[rhs] , identifier[validate_args] ):
literal[string]
identifier[assertions] = identifier[_lu_reconstruct_assertions] ( identifier[lower_upper] , identifier[perm] , identifier[validate_args] )
identifier[message] = literal[string]
keyword[if] identifier[rhs] . identifier[shape] . identifier[ndims] keyword[is] keyword[not] keyword[None] :
keyword[if] identifier[rhs] . identifier[shape] . identifier[ndims] < literal[int] :
keyword[raise] identifier[ValueError] ( identifier[message] )
keyword[elif] identifier[validate_args] :
identifier[assertions] . identifier[append] (
identifier[tf] . identifier[compat] . identifier[v1] . identifier[assert_rank_at_least] ( identifier[rhs] , identifier[rank] = literal[int] , identifier[message] = identifier[message] ))
identifier[message] = literal[string]
keyword[if] ( identifier[tf] . identifier[compat] . identifier[dimension_value] ( identifier[lower_upper] . identifier[shape] [- literal[int] ]) keyword[is] keyword[not] keyword[None] keyword[and]
identifier[tf] . identifier[compat] . identifier[dimension_value] ( identifier[rhs] . identifier[shape] [- literal[int] ]) keyword[is] keyword[not] keyword[None] ):
keyword[if] identifier[lower_upper] . identifier[shape] [- literal[int] ]!= identifier[rhs] . identifier[shape] [- literal[int] ]:
keyword[raise] identifier[ValueError] ( identifier[message] )
keyword[elif] identifier[validate_args] :
identifier[assertions] . identifier[append] (
identifier[tf] . identifier[compat] . identifier[v1] . identifier[assert_equal] (
identifier[tf] . identifier[shape] ( identifier[input] = identifier[lower_upper] )[- literal[int] ],
identifier[tf] . identifier[shape] ( identifier[input] = identifier[rhs] )[- literal[int] ],
identifier[message] = identifier[message] ))
keyword[return] identifier[assertions] | def _lu_solve_assertions(lower_upper, perm, rhs, validate_args):
"""Returns list of assertions related to `lu_solve` assumptions."""
assertions = _lu_reconstruct_assertions(lower_upper, perm, validate_args)
message = 'Input `rhs` must have at least 2 dimensions.'
if rhs.shape.ndims is not None:
if rhs.shape.ndims < 2:
raise ValueError(message) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif validate_args:
assertions.append(tf.compat.v1.assert_rank_at_least(rhs, rank=2, message=message)) # depends on [control=['if'], data=[]]
message = '`lower_upper.shape[-1]` must equal `rhs.shape[-1]`.'
if tf.compat.dimension_value(lower_upper.shape[-1]) is not None and tf.compat.dimension_value(rhs.shape[-2]) is not None:
if lower_upper.shape[-1] != rhs.shape[-2]:
raise ValueError(message) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif validate_args:
assertions.append(tf.compat.v1.assert_equal(tf.shape(input=lower_upper)[-1], tf.shape(input=rhs)[-2], message=message)) # depends on [control=['if'], data=[]]
return assertions |
def get_user_tenants(user, group):
"""Return the set of associated Tenants for the given user and group."""
if user.is_active and user.is_authenticated():
if user.is_superuser or is_group_manager(user, group.pk):
return Tenant.objects.filter(group=group)
else:
return Tenant.objects.filter(group=group, tenantrole__user=user).distinct()
else:
return Tenant.objects.none() | def function[get_user_tenants, parameter[user, group]]:
constant[Return the set of associated Tenants for the given user and group.]
if <ast.BoolOp object at 0x7da1b15a1210> begin[:]
if <ast.BoolOp object at 0x7da1b15a0be0> begin[:]
return[call[name[Tenant].objects.filter, parameter[]]] | keyword[def] identifier[get_user_tenants] ( identifier[user] , identifier[group] ):
literal[string]
keyword[if] identifier[user] . identifier[is_active] keyword[and] identifier[user] . identifier[is_authenticated] ():
keyword[if] identifier[user] . identifier[is_superuser] keyword[or] identifier[is_group_manager] ( identifier[user] , identifier[group] . identifier[pk] ):
keyword[return] identifier[Tenant] . identifier[objects] . identifier[filter] ( identifier[group] = identifier[group] )
keyword[else] :
keyword[return] identifier[Tenant] . identifier[objects] . identifier[filter] ( identifier[group] = identifier[group] , identifier[tenantrole__user] = identifier[user] ). identifier[distinct] ()
keyword[else] :
keyword[return] identifier[Tenant] . identifier[objects] . identifier[none] () | def get_user_tenants(user, group):
"""Return the set of associated Tenants for the given user and group."""
if user.is_active and user.is_authenticated():
if user.is_superuser or is_group_manager(user, group.pk):
return Tenant.objects.filter(group=group) # depends on [control=['if'], data=[]]
else:
return Tenant.objects.filter(group=group, tenantrole__user=user).distinct() # depends on [control=['if'], data=[]]
else:
return Tenant.objects.none() |
def read(self, n):
"""Read `n` chars from buffer"""
r = self.buf[self.offset:self.offset + n]
if isinstance(r, array):
r = r.tostring()
self.offset += n
return r | def function[read, parameter[self, n]]:
constant[Read `n` chars from buffer]
variable[r] assign[=] call[name[self].buf][<ast.Slice object at 0x7da18f00c880>]
if call[name[isinstance], parameter[name[r], name[array]]] begin[:]
variable[r] assign[=] call[name[r].tostring, parameter[]]
<ast.AugAssign object at 0x7da18f00f190>
return[name[r]] | keyword[def] identifier[read] ( identifier[self] , identifier[n] ):
literal[string]
identifier[r] = identifier[self] . identifier[buf] [ identifier[self] . identifier[offset] : identifier[self] . identifier[offset] + identifier[n] ]
keyword[if] identifier[isinstance] ( identifier[r] , identifier[array] ):
identifier[r] = identifier[r] . identifier[tostring] ()
identifier[self] . identifier[offset] += identifier[n]
keyword[return] identifier[r] | def read(self, n):
"""Read `n` chars from buffer"""
r = self.buf[self.offset:self.offset + n]
if isinstance(r, array):
r = r.tostring() # depends on [control=['if'], data=[]]
self.offset += n
return r |
def fit(self, data, weights=None, bw_estimator='scott', efficient_bw_estimation=True, update_bandwidth=True):
"""
fits the KDE to the data by estimating the bandwidths and storing the data
Parameters
----------
data: 2d-array, shape N x M
N datapoints in an M dimensional space to which the KDE is fit
weights: 1d array
N weights, one for every data point.
They will be normalized to sum up to one
fix_boundary_effects: bool
whether to reweigh points close to the bondary no fix the pdf
bw_estimator: str
allowed values are 'scott' and 'mlcv' for Scott's rule of thumb
and the maximum likelihood via cross-validation
efficient_bw_estimation: bool
if true, start bandwidth optimization from the previous value, otherwise
start from Scott's values
update_bandwidths: bool
whether to update the bandwidths at all
"""
if self.data is None:
# overwrite some values in case this is the first fit of the KDE
efficient_bw_estimation = False
update_bandwidth=True
self.data = np.asfortranarray(data)
for i,k in enumerate(self.kernels):
self.kernels[i].data = self.data[:,i]
self.weights = self._normalize_weights(weights)
if not update_bandwidth:
return
if not efficient_bw_estimation or bw_estimator == 'scott':
# inspired by the the statsmodels code
sigmas = np.std(self.data, ddof=1, axis=0)
IQRs = np.subtract.reduce(np.percentile(self.data, [75,25], axis=0))
self.bandwidths = 1.059 * np.minimum(sigmas, IQRs) * np.power(self.data.shape[0], -0.2)
# crop bandwidths for categorical parameters
self.bandwidths = np.clip(self.bandwidths , self.min_bandwidth, self.bw_clip)
if bw_estimator == 'mlcv':
# optimize bandwidths here
def opt_me(bw):
self.bandwidths=bw
self._set_kernel_bandwidths()
return(self.loo_negloglikelihood())
res = spo.minimize(opt_me, self.bandwidths, bounds=self.bw_bounds, method='SLSQP')
self.optimizer_result = res
self.bandwidths[:] = res.x
self._set_kernel_bandwidths() | def function[fit, parameter[self, data, weights, bw_estimator, efficient_bw_estimation, update_bandwidth]]:
constant[
fits the KDE to the data by estimating the bandwidths and storing the data
Parameters
----------
data: 2d-array, shape N x M
N datapoints in an M dimensional space to which the KDE is fit
weights: 1d array
N weights, one for every data point.
They will be normalized to sum up to one
fix_boundary_effects: bool
whether to reweigh points close to the bondary no fix the pdf
bw_estimator: str
allowed values are 'scott' and 'mlcv' for Scott's rule of thumb
and the maximum likelihood via cross-validation
efficient_bw_estimation: bool
if true, start bandwidth optimization from the previous value, otherwise
start from Scott's values
update_bandwidths: bool
whether to update the bandwidths at all
]
if compare[name[self].data is constant[None]] begin[:]
variable[efficient_bw_estimation] assign[=] constant[False]
variable[update_bandwidth] assign[=] constant[True]
name[self].data assign[=] call[name[np].asfortranarray, parameter[name[data]]]
for taget[tuple[[<ast.Name object at 0x7da1b17126b0>, <ast.Name object at 0x7da1b1713130>]]] in starred[call[name[enumerate], parameter[name[self].kernels]]] begin[:]
call[name[self].kernels][name[i]].data assign[=] call[name[self].data][tuple[[<ast.Slice object at 0x7da1b1710e80>, <ast.Name object at 0x7da1b1710940>]]]
name[self].weights assign[=] call[name[self]._normalize_weights, parameter[name[weights]]]
if <ast.UnaryOp object at 0x7da1b1713520> begin[:]
return[None]
if <ast.BoolOp object at 0x7da1b17121a0> begin[:]
variable[sigmas] assign[=] call[name[np].std, parameter[name[self].data]]
variable[IQRs] assign[=] call[name[np].subtract.reduce, parameter[call[name[np].percentile, parameter[name[self].data, list[[<ast.Constant object at 0x7da1b1710130>, <ast.Constant object at 0x7da1b1713b80>]]]]]]
name[self].bandwidths assign[=] binary_operation[binary_operation[constant[1.059] * call[name[np].minimum, parameter[name[sigmas], name[IQRs]]]] * call[name[np].power, parameter[call[name[self].data.shape][constant[0]], <ast.UnaryOp object at 0x7da1b17130a0>]]]
name[self].bandwidths assign[=] call[name[np].clip, parameter[name[self].bandwidths, name[self].min_bandwidth, name[self].bw_clip]]
if compare[name[bw_estimator] equal[==] constant[mlcv]] begin[:]
def function[opt_me, parameter[bw]]:
name[self].bandwidths assign[=] name[bw]
call[name[self]._set_kernel_bandwidths, parameter[]]
return[call[name[self].loo_negloglikelihood, parameter[]]]
variable[res] assign[=] call[name[spo].minimize, parameter[name[opt_me], name[self].bandwidths]]
name[self].optimizer_result assign[=] name[res]
call[name[self].bandwidths][<ast.Slice object at 0x7da1b1711990>] assign[=] name[res].x
call[name[self]._set_kernel_bandwidths, parameter[]] | keyword[def] identifier[fit] ( identifier[self] , identifier[data] , identifier[weights] = keyword[None] , identifier[bw_estimator] = literal[string] , identifier[efficient_bw_estimation] = keyword[True] , identifier[update_bandwidth] = keyword[True] ):
literal[string]
keyword[if] identifier[self] . identifier[data] keyword[is] keyword[None] :
identifier[efficient_bw_estimation] = keyword[False]
identifier[update_bandwidth] = keyword[True]
identifier[self] . identifier[data] = identifier[np] . identifier[asfortranarray] ( identifier[data] )
keyword[for] identifier[i] , identifier[k] keyword[in] identifier[enumerate] ( identifier[self] . identifier[kernels] ):
identifier[self] . identifier[kernels] [ identifier[i] ]. identifier[data] = identifier[self] . identifier[data] [:, identifier[i] ]
identifier[self] . identifier[weights] = identifier[self] . identifier[_normalize_weights] ( identifier[weights] )
keyword[if] keyword[not] identifier[update_bandwidth] :
keyword[return]
keyword[if] keyword[not] identifier[efficient_bw_estimation] keyword[or] identifier[bw_estimator] == literal[string] :
identifier[sigmas] = identifier[np] . identifier[std] ( identifier[self] . identifier[data] , identifier[ddof] = literal[int] , identifier[axis] = literal[int] )
identifier[IQRs] = identifier[np] . identifier[subtract] . identifier[reduce] ( identifier[np] . identifier[percentile] ( identifier[self] . identifier[data] ,[ literal[int] , literal[int] ], identifier[axis] = literal[int] ))
identifier[self] . identifier[bandwidths] = literal[int] * identifier[np] . identifier[minimum] ( identifier[sigmas] , identifier[IQRs] )* identifier[np] . identifier[power] ( identifier[self] . identifier[data] . identifier[shape] [ literal[int] ],- literal[int] )
identifier[self] . identifier[bandwidths] = identifier[np] . identifier[clip] ( identifier[self] . identifier[bandwidths] , identifier[self] . identifier[min_bandwidth] , identifier[self] . identifier[bw_clip] )
keyword[if] identifier[bw_estimator] == literal[string] :
keyword[def] identifier[opt_me] ( identifier[bw] ):
identifier[self] . identifier[bandwidths] = identifier[bw]
identifier[self] . identifier[_set_kernel_bandwidths] ()
keyword[return] ( identifier[self] . identifier[loo_negloglikelihood] ())
identifier[res] = identifier[spo] . identifier[minimize] ( identifier[opt_me] , identifier[self] . identifier[bandwidths] , identifier[bounds] = identifier[self] . identifier[bw_bounds] , identifier[method] = literal[string] )
identifier[self] . identifier[optimizer_result] = identifier[res]
identifier[self] . identifier[bandwidths] [:]= identifier[res] . identifier[x]
identifier[self] . identifier[_set_kernel_bandwidths] () | def fit(self, data, weights=None, bw_estimator='scott', efficient_bw_estimation=True, update_bandwidth=True):
"""
fits the KDE to the data by estimating the bandwidths and storing the data
Parameters
----------
data: 2d-array, shape N x M
N datapoints in an M dimensional space to which the KDE is fit
weights: 1d array
N weights, one for every data point.
They will be normalized to sum up to one
fix_boundary_effects: bool
whether to reweigh points close to the bondary no fix the pdf
bw_estimator: str
allowed values are 'scott' and 'mlcv' for Scott's rule of thumb
and the maximum likelihood via cross-validation
efficient_bw_estimation: bool
if true, start bandwidth optimization from the previous value, otherwise
start from Scott's values
update_bandwidths: bool
whether to update the bandwidths at all
"""
if self.data is None: # overwrite some values in case this is the first fit of the KDE
efficient_bw_estimation = False
update_bandwidth = True # depends on [control=['if'], data=[]]
self.data = np.asfortranarray(data)
for (i, k) in enumerate(self.kernels):
self.kernels[i].data = self.data[:, i] # depends on [control=['for'], data=[]]
self.weights = self._normalize_weights(weights)
if not update_bandwidth:
return # depends on [control=['if'], data=[]]
if not efficient_bw_estimation or bw_estimator == 'scott': # inspired by the the statsmodels code
sigmas = np.std(self.data, ddof=1, axis=0)
IQRs = np.subtract.reduce(np.percentile(self.data, [75, 25], axis=0))
self.bandwidths = 1.059 * np.minimum(sigmas, IQRs) * np.power(self.data.shape[0], -0.2) # crop bandwidths for categorical parameters
self.bandwidths = np.clip(self.bandwidths, self.min_bandwidth, self.bw_clip) # depends on [control=['if'], data=[]]
if bw_estimator == 'mlcv': # optimize bandwidths here
def opt_me(bw):
self.bandwidths = bw
self._set_kernel_bandwidths()
return self.loo_negloglikelihood()
res = spo.minimize(opt_me, self.bandwidths, bounds=self.bw_bounds, method='SLSQP')
self.optimizer_result = res
self.bandwidths[:] = res.x # depends on [control=['if'], data=[]]
self._set_kernel_bandwidths() |
def _set_values_on_model(self, model, values, fields=None):
"""
Updates the values with the specified values.
:param Model model: The sqlalchemy model instance
:param dict values: The dictionary of attributes and
the values to set.
:param list fields: A list of strings indicating
the valid fields. Defaults to self.fields.
:return: The model with the updated
:rtype: Model
"""
fields = fields or self.fields
for name, val in six.iteritems(values):
if name not in fields:
continue
setattr(model, name, val)
return model | def function[_set_values_on_model, parameter[self, model, values, fields]]:
constant[
Updates the values with the specified values.
:param Model model: The sqlalchemy model instance
:param dict values: The dictionary of attributes and
the values to set.
:param list fields: A list of strings indicating
the valid fields. Defaults to self.fields.
:return: The model with the updated
:rtype: Model
]
variable[fields] assign[=] <ast.BoolOp object at 0x7da18f58c340>
for taget[tuple[[<ast.Name object at 0x7da18f58e6e0>, <ast.Name object at 0x7da18f58c4c0>]]] in starred[call[name[six].iteritems, parameter[name[values]]]] begin[:]
if compare[name[name] <ast.NotIn object at 0x7da2590d7190> name[fields]] begin[:]
continue
call[name[setattr], parameter[name[model], name[name], name[val]]]
return[name[model]] | keyword[def] identifier[_set_values_on_model] ( identifier[self] , identifier[model] , identifier[values] , identifier[fields] = keyword[None] ):
literal[string]
identifier[fields] = identifier[fields] keyword[or] identifier[self] . identifier[fields]
keyword[for] identifier[name] , identifier[val] keyword[in] identifier[six] . identifier[iteritems] ( identifier[values] ):
keyword[if] identifier[name] keyword[not] keyword[in] identifier[fields] :
keyword[continue]
identifier[setattr] ( identifier[model] , identifier[name] , identifier[val] )
keyword[return] identifier[model] | def _set_values_on_model(self, model, values, fields=None):
"""
Updates the values with the specified values.
:param Model model: The sqlalchemy model instance
:param dict values: The dictionary of attributes and
the values to set.
:param list fields: A list of strings indicating
the valid fields. Defaults to self.fields.
:return: The model with the updated
:rtype: Model
"""
fields = fields or self.fields
for (name, val) in six.iteritems(values):
if name not in fields:
continue # depends on [control=['if'], data=[]]
setattr(model, name, val) # depends on [control=['for'], data=[]]
return model |
def gradients_X_X2(self, dL_dK, X, X2):
"""Derivative of the covariance matrix with respect to X"""
return self._comp_grads(dL_dK, X, X2)[3:] | def function[gradients_X_X2, parameter[self, dL_dK, X, X2]]:
constant[Derivative of the covariance matrix with respect to X]
return[call[call[name[self]._comp_grads, parameter[name[dL_dK], name[X], name[X2]]]][<ast.Slice object at 0x7da1b1bbd9c0>]] | keyword[def] identifier[gradients_X_X2] ( identifier[self] , identifier[dL_dK] , identifier[X] , identifier[X2] ):
literal[string]
keyword[return] identifier[self] . identifier[_comp_grads] ( identifier[dL_dK] , identifier[X] , identifier[X2] )[ literal[int] :] | def gradients_X_X2(self, dL_dK, X, X2):
"""Derivative of the covariance matrix with respect to X"""
return self._comp_grads(dL_dK, X, X2)[3:] |
def load_file_to_list(self):
""" load a file to a list """
lst = []
try:
with open(self.fullname, 'r') as f:
for line in f:
lst.append(line)
return lst
except IOError:
return lst | def function[load_file_to_list, parameter[self]]:
constant[ load a file to a list ]
variable[lst] assign[=] list[[]]
<ast.Try object at 0x7da18bcc87f0> | keyword[def] identifier[load_file_to_list] ( identifier[self] ):
literal[string]
identifier[lst] =[]
keyword[try] :
keyword[with] identifier[open] ( identifier[self] . identifier[fullname] , literal[string] ) keyword[as] identifier[f] :
keyword[for] identifier[line] keyword[in] identifier[f] :
identifier[lst] . identifier[append] ( identifier[line] )
keyword[return] identifier[lst]
keyword[except] identifier[IOError] :
keyword[return] identifier[lst] | def load_file_to_list(self):
""" load a file to a list """
lst = []
try:
with open(self.fullname, 'r') as f:
for line in f:
lst.append(line) # depends on [control=['for'], data=['line']] # depends on [control=['with'], data=['f']]
return lst # depends on [control=['try'], data=[]]
except IOError:
return lst # depends on [control=['except'], data=[]] |
def addSubQueue(self, priority, matcher, name = None, maxdefault = None, maxtotal = None, defaultQueueClass = FifoQueue):
'''
add a sub queue to current queue, with a priority and a matcher
:param priority: priority of this queue. Larger is higher, 0 is lowest.
:param matcher: an event matcher to catch events. Every event match the criteria will be stored in this queue.
:param name: a unique name to identify the sub-queue. If none, the queue is anonymous. It can be any hashable value.
:param maxdefault: max length for default queue.
:param maxtotal: max length for sub-queue total, including sub-queues of sub-queue
'''
if name is not None and name in self.queueindex:
raise IndexError("Duplicated sub-queue name '" + str(name) + "'")
subtree = self.tree.subtree(matcher, True)
newPriority = self.queues.setdefault(priority, CBQueue.MultiQueue(self, priority))
newQueue = CBQueue(subtree, newPriority, maxdefault, maxtotal, defaultQueueClass)
newPriority.addSubQueue(newQueue)
qi = [priority, newQueue, name]
if name is not None:
self.queueindex[name] = qi
self.queueindex[newQueue] = qi
return newQueue | def function[addSubQueue, parameter[self, priority, matcher, name, maxdefault, maxtotal, defaultQueueClass]]:
constant[
add a sub queue to current queue, with a priority and a matcher
:param priority: priority of this queue. Larger is higher, 0 is lowest.
:param matcher: an event matcher to catch events. Every event match the criteria will be stored in this queue.
:param name: a unique name to identify the sub-queue. If none, the queue is anonymous. It can be any hashable value.
:param maxdefault: max length for default queue.
:param maxtotal: max length for sub-queue total, including sub-queues of sub-queue
]
if <ast.BoolOp object at 0x7da2047eba30> begin[:]
<ast.Raise object at 0x7da2047eb3d0>
variable[subtree] assign[=] call[name[self].tree.subtree, parameter[name[matcher], constant[True]]]
variable[newPriority] assign[=] call[name[self].queues.setdefault, parameter[name[priority], call[name[CBQueue].MultiQueue, parameter[name[self], name[priority]]]]]
variable[newQueue] assign[=] call[name[CBQueue], parameter[name[subtree], name[newPriority], name[maxdefault], name[maxtotal], name[defaultQueueClass]]]
call[name[newPriority].addSubQueue, parameter[name[newQueue]]]
variable[qi] assign[=] list[[<ast.Name object at 0x7da2047ebe20>, <ast.Name object at 0x7da2047eae60>, <ast.Name object at 0x7da2047e81f0>]]
if compare[name[name] is_not constant[None]] begin[:]
call[name[self].queueindex][name[name]] assign[=] name[qi]
call[name[self].queueindex][name[newQueue]] assign[=] name[qi]
return[name[newQueue]] | keyword[def] identifier[addSubQueue] ( identifier[self] , identifier[priority] , identifier[matcher] , identifier[name] = keyword[None] , identifier[maxdefault] = keyword[None] , identifier[maxtotal] = keyword[None] , identifier[defaultQueueClass] = identifier[FifoQueue] ):
literal[string]
keyword[if] identifier[name] keyword[is] keyword[not] keyword[None] keyword[and] identifier[name] keyword[in] identifier[self] . identifier[queueindex] :
keyword[raise] identifier[IndexError] ( literal[string] + identifier[str] ( identifier[name] )+ literal[string] )
identifier[subtree] = identifier[self] . identifier[tree] . identifier[subtree] ( identifier[matcher] , keyword[True] )
identifier[newPriority] = identifier[self] . identifier[queues] . identifier[setdefault] ( identifier[priority] , identifier[CBQueue] . identifier[MultiQueue] ( identifier[self] , identifier[priority] ))
identifier[newQueue] = identifier[CBQueue] ( identifier[subtree] , identifier[newPriority] , identifier[maxdefault] , identifier[maxtotal] , identifier[defaultQueueClass] )
identifier[newPriority] . identifier[addSubQueue] ( identifier[newQueue] )
identifier[qi] =[ identifier[priority] , identifier[newQueue] , identifier[name] ]
keyword[if] identifier[name] keyword[is] keyword[not] keyword[None] :
identifier[self] . identifier[queueindex] [ identifier[name] ]= identifier[qi]
identifier[self] . identifier[queueindex] [ identifier[newQueue] ]= identifier[qi]
keyword[return] identifier[newQueue] | def addSubQueue(self, priority, matcher, name=None, maxdefault=None, maxtotal=None, defaultQueueClass=FifoQueue):
"""
add a sub queue to current queue, with a priority and a matcher
:param priority: priority of this queue. Larger is higher, 0 is lowest.
:param matcher: an event matcher to catch events. Every event match the criteria will be stored in this queue.
:param name: a unique name to identify the sub-queue. If none, the queue is anonymous. It can be any hashable value.
:param maxdefault: max length for default queue.
:param maxtotal: max length for sub-queue total, including sub-queues of sub-queue
"""
if name is not None and name in self.queueindex:
raise IndexError("Duplicated sub-queue name '" + str(name) + "'") # depends on [control=['if'], data=[]]
subtree = self.tree.subtree(matcher, True)
newPriority = self.queues.setdefault(priority, CBQueue.MultiQueue(self, priority))
newQueue = CBQueue(subtree, newPriority, maxdefault, maxtotal, defaultQueueClass)
newPriority.addSubQueue(newQueue)
qi = [priority, newQueue, name]
if name is not None:
self.queueindex[name] = qi # depends on [control=['if'], data=['name']]
self.queueindex[newQueue] = qi
return newQueue |
def unique(iterable):
"""
Returns an iterator that yields the first occurence of a hashable item in
`iterable`.
"""
seen = set()
for obj in iterable:
if obj not in seen:
yield obj
seen.add(obj) | def function[unique, parameter[iterable]]:
constant[
Returns an iterator that yields the first occurence of a hashable item in
`iterable`.
]
variable[seen] assign[=] call[name[set], parameter[]]
for taget[name[obj]] in starred[name[iterable]] begin[:]
if compare[name[obj] <ast.NotIn object at 0x7da2590d7190> name[seen]] begin[:]
<ast.Yield object at 0x7da1b16036a0>
call[name[seen].add, parameter[name[obj]]] | keyword[def] identifier[unique] ( identifier[iterable] ):
literal[string]
identifier[seen] = identifier[set] ()
keyword[for] identifier[obj] keyword[in] identifier[iterable] :
keyword[if] identifier[obj] keyword[not] keyword[in] identifier[seen] :
keyword[yield] identifier[obj]
identifier[seen] . identifier[add] ( identifier[obj] ) | def unique(iterable):
"""
Returns an iterator that yields the first occurence of a hashable item in
`iterable`.
"""
seen = set()
for obj in iterable:
if obj not in seen:
yield obj
seen.add(obj) # depends on [control=['if'], data=['obj', 'seen']] # depends on [control=['for'], data=['obj']] |
def build_common_all_meta_df(common_meta_dfs, fields_to_remove, remove_all_metadata_fields):
"""
concatenate the entries in common_meta_dfs, removing columns selectively (fields_to_remove) or entirely (
remove_all_metadata_fields=True; in this case, effectively just merges all the indexes in common_meta_dfs).
Returns 2 dataframes (in a tuple): the first has duplicates removed, the second does not.
Args:
common_meta_dfs: collection of pandas DataFrames containing the metadata in the "common" direction of the
concatenation operation
fields_to_remove: columns to be removed (if present) from the common_meta_dfs
remove_all_metadata_fields: boolean indicating that all metadata fields should be removed from the
common_meta_dfs; overrides fields_to_remove if present
Returns:
tuple containing
all_meta_df: pandas dataframe that is the concatenation of the dataframes in common_meta_dfs,
all_meta_df_with_dups:
"""
if remove_all_metadata_fields:
trimmed_common_meta_dfs = [pd.DataFrame(index=df.index) for df in common_meta_dfs]
else:
shared_column_headers = sorted(set.intersection(*[set(df.columns) for df in common_meta_dfs]))
logger.debug("shared_column_headers: {}".format(shared_column_headers))
trimmed_common_meta_dfs = [df[shared_column_headers] for df in common_meta_dfs]
# Remove any column headers that will prevent dfs from being identical
for df in trimmed_common_meta_dfs:
df.drop(fields_to_remove, axis=1, errors="ignore", inplace=True)
# Concatenate all dfs and then remove duplicate rows
all_meta_df_with_dups = pd.concat(trimmed_common_meta_dfs, axis=0)
logger.debug("all_meta_df_with_dups.shape: {}".format(all_meta_df_with_dups.shape))
logger.debug("all_meta_df_with_dups.columns: {}".format(all_meta_df_with_dups.columns))
logger.debug("all_meta_df_with_dups.index: {}".format(all_meta_df_with_dups.index))
# If all metadata dfs were empty, df will be empty
if all_meta_df_with_dups.empty:
# Simply return unique ids
all_meta_df = pd.DataFrame(index=all_meta_df_with_dups.index.unique())
else:
all_meta_df_with_dups["concat_column_for_index"] = all_meta_df_with_dups.index
all_meta_df = all_meta_df_with_dups.copy(deep=True).drop_duplicates()
all_meta_df.drop("concat_column_for_index", axis=1, inplace=True)
all_meta_df_with_dups.drop("concat_column_for_index", axis=1, inplace=True)
logger.debug("all_meta_df_with_dups.shape: {}".format(all_meta_df_with_dups.shape))
logger.debug("all_meta_df.shape: {}".format(all_meta_df.shape))
return (all_meta_df, all_meta_df_with_dups) | def function[build_common_all_meta_df, parameter[common_meta_dfs, fields_to_remove, remove_all_metadata_fields]]:
constant[
concatenate the entries in common_meta_dfs, removing columns selectively (fields_to_remove) or entirely (
remove_all_metadata_fields=True; in this case, effectively just merges all the indexes in common_meta_dfs).
Returns 2 dataframes (in a tuple): the first has duplicates removed, the second does not.
Args:
common_meta_dfs: collection of pandas DataFrames containing the metadata in the "common" direction of the
concatenation operation
fields_to_remove: columns to be removed (if present) from the common_meta_dfs
remove_all_metadata_fields: boolean indicating that all metadata fields should be removed from the
common_meta_dfs; overrides fields_to_remove if present
Returns:
tuple containing
all_meta_df: pandas dataframe that is the concatenation of the dataframes in common_meta_dfs,
all_meta_df_with_dups:
]
if name[remove_all_metadata_fields] begin[:]
variable[trimmed_common_meta_dfs] assign[=] <ast.ListComp object at 0x7da18f720d90>
variable[all_meta_df_with_dups] assign[=] call[name[pd].concat, parameter[name[trimmed_common_meta_dfs]]]
call[name[logger].debug, parameter[call[constant[all_meta_df_with_dups.shape: {}].format, parameter[name[all_meta_df_with_dups].shape]]]]
call[name[logger].debug, parameter[call[constant[all_meta_df_with_dups.columns: {}].format, parameter[name[all_meta_df_with_dups].columns]]]]
call[name[logger].debug, parameter[call[constant[all_meta_df_with_dups.index: {}].format, parameter[name[all_meta_df_with_dups].index]]]]
if name[all_meta_df_with_dups].empty begin[:]
variable[all_meta_df] assign[=] call[name[pd].DataFrame, parameter[]]
call[name[logger].debug, parameter[call[constant[all_meta_df_with_dups.shape: {}].format, parameter[name[all_meta_df_with_dups].shape]]]]
call[name[logger].debug, parameter[call[constant[all_meta_df.shape: {}].format, parameter[name[all_meta_df].shape]]]]
return[tuple[[<ast.Name object at 0x7da20c993be0>, <ast.Name object at 0x7da20c9905e0>]]] | keyword[def] identifier[build_common_all_meta_df] ( identifier[common_meta_dfs] , identifier[fields_to_remove] , identifier[remove_all_metadata_fields] ):
literal[string]
keyword[if] identifier[remove_all_metadata_fields] :
identifier[trimmed_common_meta_dfs] =[ identifier[pd] . identifier[DataFrame] ( identifier[index] = identifier[df] . identifier[index] ) keyword[for] identifier[df] keyword[in] identifier[common_meta_dfs] ]
keyword[else] :
identifier[shared_column_headers] = identifier[sorted] ( identifier[set] . identifier[intersection] (*[ identifier[set] ( identifier[df] . identifier[columns] ) keyword[for] identifier[df] keyword[in] identifier[common_meta_dfs] ]))
identifier[logger] . identifier[debug] ( literal[string] . identifier[format] ( identifier[shared_column_headers] ))
identifier[trimmed_common_meta_dfs] =[ identifier[df] [ identifier[shared_column_headers] ] keyword[for] identifier[df] keyword[in] identifier[common_meta_dfs] ]
keyword[for] identifier[df] keyword[in] identifier[trimmed_common_meta_dfs] :
identifier[df] . identifier[drop] ( identifier[fields_to_remove] , identifier[axis] = literal[int] , identifier[errors] = literal[string] , identifier[inplace] = keyword[True] )
identifier[all_meta_df_with_dups] = identifier[pd] . identifier[concat] ( identifier[trimmed_common_meta_dfs] , identifier[axis] = literal[int] )
identifier[logger] . identifier[debug] ( literal[string] . identifier[format] ( identifier[all_meta_df_with_dups] . identifier[shape] ))
identifier[logger] . identifier[debug] ( literal[string] . identifier[format] ( identifier[all_meta_df_with_dups] . identifier[columns] ))
identifier[logger] . identifier[debug] ( literal[string] . identifier[format] ( identifier[all_meta_df_with_dups] . identifier[index] ))
keyword[if] identifier[all_meta_df_with_dups] . identifier[empty] :
identifier[all_meta_df] = identifier[pd] . identifier[DataFrame] ( identifier[index] = identifier[all_meta_df_with_dups] . identifier[index] . identifier[unique] ())
keyword[else] :
identifier[all_meta_df_with_dups] [ literal[string] ]= identifier[all_meta_df_with_dups] . identifier[index]
identifier[all_meta_df] = identifier[all_meta_df_with_dups] . identifier[copy] ( identifier[deep] = keyword[True] ). identifier[drop_duplicates] ()
identifier[all_meta_df] . identifier[drop] ( literal[string] , identifier[axis] = literal[int] , identifier[inplace] = keyword[True] )
identifier[all_meta_df_with_dups] . identifier[drop] ( literal[string] , identifier[axis] = literal[int] , identifier[inplace] = keyword[True] )
identifier[logger] . identifier[debug] ( literal[string] . identifier[format] ( identifier[all_meta_df_with_dups] . identifier[shape] ))
identifier[logger] . identifier[debug] ( literal[string] . identifier[format] ( identifier[all_meta_df] . identifier[shape] ))
keyword[return] ( identifier[all_meta_df] , identifier[all_meta_df_with_dups] ) | def build_common_all_meta_df(common_meta_dfs, fields_to_remove, remove_all_metadata_fields):
"""
concatenate the entries in common_meta_dfs, removing columns selectively (fields_to_remove) or entirely (
remove_all_metadata_fields=True; in this case, effectively just merges all the indexes in common_meta_dfs).
Returns 2 dataframes (in a tuple): the first has duplicates removed, the second does not.
Args:
common_meta_dfs: collection of pandas DataFrames containing the metadata in the "common" direction of the
concatenation operation
fields_to_remove: columns to be removed (if present) from the common_meta_dfs
remove_all_metadata_fields: boolean indicating that all metadata fields should be removed from the
common_meta_dfs; overrides fields_to_remove if present
Returns:
tuple containing
all_meta_df: pandas dataframe that is the concatenation of the dataframes in common_meta_dfs,
all_meta_df_with_dups:
"""
if remove_all_metadata_fields:
trimmed_common_meta_dfs = [pd.DataFrame(index=df.index) for df in common_meta_dfs] # depends on [control=['if'], data=[]]
else:
shared_column_headers = sorted(set.intersection(*[set(df.columns) for df in common_meta_dfs]))
logger.debug('shared_column_headers: {}'.format(shared_column_headers))
trimmed_common_meta_dfs = [df[shared_column_headers] for df in common_meta_dfs]
# Remove any column headers that will prevent dfs from being identical
for df in trimmed_common_meta_dfs:
df.drop(fields_to_remove, axis=1, errors='ignore', inplace=True) # depends on [control=['for'], data=['df']]
# Concatenate all dfs and then remove duplicate rows
all_meta_df_with_dups = pd.concat(trimmed_common_meta_dfs, axis=0)
logger.debug('all_meta_df_with_dups.shape: {}'.format(all_meta_df_with_dups.shape))
logger.debug('all_meta_df_with_dups.columns: {}'.format(all_meta_df_with_dups.columns))
logger.debug('all_meta_df_with_dups.index: {}'.format(all_meta_df_with_dups.index))
# If all metadata dfs were empty, df will be empty
if all_meta_df_with_dups.empty:
# Simply return unique ids
all_meta_df = pd.DataFrame(index=all_meta_df_with_dups.index.unique()) # depends on [control=['if'], data=[]]
else:
all_meta_df_with_dups['concat_column_for_index'] = all_meta_df_with_dups.index
all_meta_df = all_meta_df_with_dups.copy(deep=True).drop_duplicates()
all_meta_df.drop('concat_column_for_index', axis=1, inplace=True)
all_meta_df_with_dups.drop('concat_column_for_index', axis=1, inplace=True)
logger.debug('all_meta_df_with_dups.shape: {}'.format(all_meta_df_with_dups.shape))
logger.debug('all_meta_df.shape: {}'.format(all_meta_df.shape))
return (all_meta_df, all_meta_df_with_dups) |
def compute_upper_bound(self):
"""
We have to compute the new upper boundary (nub) starting from the
root nodes. If a path exists between a root node and another entry in
`self.upper` we can ignore the root node because it has been
specialized by one of its successors.
"""
nub = set()
for root in self.roots - self.upper:
found = False
for up in self.upper - self.roots:
domain = self.get_domain()
if has_path(domain, root, up):
found = True
break
if not found:
nub.add(root)
return nub | (self.upper - self.roots) | def function[compute_upper_bound, parameter[self]]:
constant[
We have to compute the new upper boundary (nub) starting from the
root nodes. If a path exists between a root node and another entry in
`self.upper` we can ignore the root node because it has been
specialized by one of its successors.
]
variable[nub] assign[=] call[name[set], parameter[]]
for taget[name[root]] in starred[binary_operation[name[self].roots - name[self].upper]] begin[:]
variable[found] assign[=] constant[False]
for taget[name[up]] in starred[binary_operation[name[self].upper - name[self].roots]] begin[:]
variable[domain] assign[=] call[name[self].get_domain, parameter[]]
if call[name[has_path], parameter[name[domain], name[root], name[up]]] begin[:]
variable[found] assign[=] constant[True]
break
if <ast.UnaryOp object at 0x7da204345600> begin[:]
call[name[nub].add, parameter[name[root]]]
return[binary_operation[name[nub] <ast.BitOr object at 0x7da2590d6aa0> binary_operation[name[self].upper - name[self].roots]]] | keyword[def] identifier[compute_upper_bound] ( identifier[self] ):
literal[string]
identifier[nub] = identifier[set] ()
keyword[for] identifier[root] keyword[in] identifier[self] . identifier[roots] - identifier[self] . identifier[upper] :
identifier[found] = keyword[False]
keyword[for] identifier[up] keyword[in] identifier[self] . identifier[upper] - identifier[self] . identifier[roots] :
identifier[domain] = identifier[self] . identifier[get_domain] ()
keyword[if] identifier[has_path] ( identifier[domain] , identifier[root] , identifier[up] ):
identifier[found] = keyword[True]
keyword[break]
keyword[if] keyword[not] identifier[found] :
identifier[nub] . identifier[add] ( identifier[root] )
keyword[return] identifier[nub] |( identifier[self] . identifier[upper] - identifier[self] . identifier[roots] ) | def compute_upper_bound(self):
"""
We have to compute the new upper boundary (nub) starting from the
root nodes. If a path exists between a root node and another entry in
`self.upper` we can ignore the root node because it has been
specialized by one of its successors.
"""
nub = set()
for root in self.roots - self.upper:
found = False
for up in self.upper - self.roots:
domain = self.get_domain()
if has_path(domain, root, up):
found = True
break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['up']]
if not found:
nub.add(root) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['root']]
return nub | self.upper - self.roots |
def _match_tag_regex(self, event_tag, search_tag):
'''
Check if the event_tag matches the search check.
Uses regular expression search to check.
Return True (matches) or False (no match)
'''
return self.cache_regex.get(search_tag).search(event_tag) is not None | def function[_match_tag_regex, parameter[self, event_tag, search_tag]]:
constant[
Check if the event_tag matches the search check.
Uses regular expression search to check.
Return True (matches) or False (no match)
]
return[compare[call[call[name[self].cache_regex.get, parameter[name[search_tag]]].search, parameter[name[event_tag]]] is_not constant[None]]] | keyword[def] identifier[_match_tag_regex] ( identifier[self] , identifier[event_tag] , identifier[search_tag] ):
literal[string]
keyword[return] identifier[self] . identifier[cache_regex] . identifier[get] ( identifier[search_tag] ). identifier[search] ( identifier[event_tag] ) keyword[is] keyword[not] keyword[None] | def _match_tag_regex(self, event_tag, search_tag):
"""
Check if the event_tag matches the search check.
Uses regular expression search to check.
Return True (matches) or False (no match)
"""
return self.cache_regex.get(search_tag).search(event_tag) is not None |
def new_status(self, new_status):
"""
Sets the new_status of this BuildSetStatusChangedEvent.
:param new_status: The new_status of this BuildSetStatusChangedEvent.
:type: str
"""
allowed_values = ["NEW", "DONE", "REJECTED"]
if new_status not in allowed_values:
raise ValueError(
"Invalid value for `new_status` ({0}), must be one of {1}"
.format(new_status, allowed_values)
)
self._new_status = new_status | def function[new_status, parameter[self, new_status]]:
constant[
Sets the new_status of this BuildSetStatusChangedEvent.
:param new_status: The new_status of this BuildSetStatusChangedEvent.
:type: str
]
variable[allowed_values] assign[=] list[[<ast.Constant object at 0x7da1b2347a90>, <ast.Constant object at 0x7da1b2345690>, <ast.Constant object at 0x7da1b2345ab0>]]
if compare[name[new_status] <ast.NotIn object at 0x7da2590d7190> name[allowed_values]] begin[:]
<ast.Raise object at 0x7da1b2344340>
name[self]._new_status assign[=] name[new_status] | keyword[def] identifier[new_status] ( identifier[self] , identifier[new_status] ):
literal[string]
identifier[allowed_values] =[ literal[string] , literal[string] , literal[string] ]
keyword[if] identifier[new_status] keyword[not] keyword[in] identifier[allowed_values] :
keyword[raise] identifier[ValueError] (
literal[string]
. identifier[format] ( identifier[new_status] , identifier[allowed_values] )
)
identifier[self] . identifier[_new_status] = identifier[new_status] | def new_status(self, new_status):
"""
Sets the new_status of this BuildSetStatusChangedEvent.
:param new_status: The new_status of this BuildSetStatusChangedEvent.
:type: str
"""
allowed_values = ['NEW', 'DONE', 'REJECTED']
if new_status not in allowed_values:
raise ValueError('Invalid value for `new_status` ({0}), must be one of {1}'.format(new_status, allowed_values)) # depends on [control=['if'], data=['new_status', 'allowed_values']]
self._new_status = new_status |
def decrypt_pillar(self, pillar):
'''
Decrypt the specified pillar dictionary items, if configured to do so
'''
errors = []
if self.opts.get('decrypt_pillar'):
decrypt_pillar = self.opts['decrypt_pillar']
if not isinstance(decrypt_pillar, dict):
decrypt_pillar = \
salt.utils.data.repack_dictlist(self.opts['decrypt_pillar'])
if not decrypt_pillar:
errors.append('decrypt_pillar config option is malformed')
for key, rend in six.iteritems(decrypt_pillar):
ptr = salt.utils.data.traverse_dict(
pillar,
key,
default=None,
delimiter=self.opts['decrypt_pillar_delimiter'])
if ptr is None:
log.debug('Pillar key %s not present', key)
continue
try:
hash(ptr)
immutable = True
except TypeError:
immutable = False
try:
ret = salt.utils.crypt.decrypt(
ptr,
rend or self.opts['decrypt_pillar_default'],
renderers=self.rend,
opts=self.opts,
valid_rend=self.opts['decrypt_pillar_renderers'])
if immutable:
# Since the key pointed to an immutable type, we need
# to replace it in the pillar dict. First we will find
# the parent, and then we will replace the child key
# with the return data from the renderer.
parent, _, child = key.rpartition(
self.opts['decrypt_pillar_delimiter'])
if not parent:
# key is a top-level key, so the pointer to the
# parent is the pillar dict itself.
ptr = pillar
else:
ptr = salt.utils.data.traverse_dict(
pillar,
parent,
default=None,
delimiter=self.opts['decrypt_pillar_delimiter'])
if ptr is not None:
ptr[child] = ret
except Exception as exc:
msg = 'Failed to decrypt pillar key \'{0}\': {1}'.format(
key, exc
)
errors.append(msg)
log.error(msg, exc_info=True)
return errors | def function[decrypt_pillar, parameter[self, pillar]]:
constant[
Decrypt the specified pillar dictionary items, if configured to do so
]
variable[errors] assign[=] list[[]]
if call[name[self].opts.get, parameter[constant[decrypt_pillar]]] begin[:]
variable[decrypt_pillar] assign[=] call[name[self].opts][constant[decrypt_pillar]]
if <ast.UnaryOp object at 0x7da204346a40> begin[:]
variable[decrypt_pillar] assign[=] call[name[salt].utils.data.repack_dictlist, parameter[call[name[self].opts][constant[decrypt_pillar]]]]
if <ast.UnaryOp object at 0x7da2043451b0> begin[:]
call[name[errors].append, parameter[constant[decrypt_pillar config option is malformed]]]
for taget[tuple[[<ast.Name object at 0x7da204345000>, <ast.Name object at 0x7da204345330>]]] in starred[call[name[six].iteritems, parameter[name[decrypt_pillar]]]] begin[:]
variable[ptr] assign[=] call[name[salt].utils.data.traverse_dict, parameter[name[pillar], name[key]]]
if compare[name[ptr] is constant[None]] begin[:]
call[name[log].debug, parameter[constant[Pillar key %s not present], name[key]]]
continue
<ast.Try object at 0x7da204346320>
<ast.Try object at 0x7da204344460>
return[name[errors]] | keyword[def] identifier[decrypt_pillar] ( identifier[self] , identifier[pillar] ):
literal[string]
identifier[errors] =[]
keyword[if] identifier[self] . identifier[opts] . identifier[get] ( literal[string] ):
identifier[decrypt_pillar] = identifier[self] . identifier[opts] [ literal[string] ]
keyword[if] keyword[not] identifier[isinstance] ( identifier[decrypt_pillar] , identifier[dict] ):
identifier[decrypt_pillar] = identifier[salt] . identifier[utils] . identifier[data] . identifier[repack_dictlist] ( identifier[self] . identifier[opts] [ literal[string] ])
keyword[if] keyword[not] identifier[decrypt_pillar] :
identifier[errors] . identifier[append] ( literal[string] )
keyword[for] identifier[key] , identifier[rend] keyword[in] identifier[six] . identifier[iteritems] ( identifier[decrypt_pillar] ):
identifier[ptr] = identifier[salt] . identifier[utils] . identifier[data] . identifier[traverse_dict] (
identifier[pillar] ,
identifier[key] ,
identifier[default] = keyword[None] ,
identifier[delimiter] = identifier[self] . identifier[opts] [ literal[string] ])
keyword[if] identifier[ptr] keyword[is] keyword[None] :
identifier[log] . identifier[debug] ( literal[string] , identifier[key] )
keyword[continue]
keyword[try] :
identifier[hash] ( identifier[ptr] )
identifier[immutable] = keyword[True]
keyword[except] identifier[TypeError] :
identifier[immutable] = keyword[False]
keyword[try] :
identifier[ret] = identifier[salt] . identifier[utils] . identifier[crypt] . identifier[decrypt] (
identifier[ptr] ,
identifier[rend] keyword[or] identifier[self] . identifier[opts] [ literal[string] ],
identifier[renderers] = identifier[self] . identifier[rend] ,
identifier[opts] = identifier[self] . identifier[opts] ,
identifier[valid_rend] = identifier[self] . identifier[opts] [ literal[string] ])
keyword[if] identifier[immutable] :
identifier[parent] , identifier[_] , identifier[child] = identifier[key] . identifier[rpartition] (
identifier[self] . identifier[opts] [ literal[string] ])
keyword[if] keyword[not] identifier[parent] :
identifier[ptr] = identifier[pillar]
keyword[else] :
identifier[ptr] = identifier[salt] . identifier[utils] . identifier[data] . identifier[traverse_dict] (
identifier[pillar] ,
identifier[parent] ,
identifier[default] = keyword[None] ,
identifier[delimiter] = identifier[self] . identifier[opts] [ literal[string] ])
keyword[if] identifier[ptr] keyword[is] keyword[not] keyword[None] :
identifier[ptr] [ identifier[child] ]= identifier[ret]
keyword[except] identifier[Exception] keyword[as] identifier[exc] :
identifier[msg] = literal[string] . identifier[format] (
identifier[key] , identifier[exc]
)
identifier[errors] . identifier[append] ( identifier[msg] )
identifier[log] . identifier[error] ( identifier[msg] , identifier[exc_info] = keyword[True] )
keyword[return] identifier[errors] | def decrypt_pillar(self, pillar):
"""
Decrypt the specified pillar dictionary items, if configured to do so
"""
errors = []
if self.opts.get('decrypt_pillar'):
decrypt_pillar = self.opts['decrypt_pillar']
if not isinstance(decrypt_pillar, dict):
decrypt_pillar = salt.utils.data.repack_dictlist(self.opts['decrypt_pillar']) # depends on [control=['if'], data=[]]
if not decrypt_pillar:
errors.append('decrypt_pillar config option is malformed') # depends on [control=['if'], data=[]]
for (key, rend) in six.iteritems(decrypt_pillar):
ptr = salt.utils.data.traverse_dict(pillar, key, default=None, delimiter=self.opts['decrypt_pillar_delimiter'])
if ptr is None:
log.debug('Pillar key %s not present', key)
continue # depends on [control=['if'], data=[]]
try:
hash(ptr)
immutable = True # depends on [control=['try'], data=[]]
except TypeError:
immutable = False # depends on [control=['except'], data=[]]
try:
ret = salt.utils.crypt.decrypt(ptr, rend or self.opts['decrypt_pillar_default'], renderers=self.rend, opts=self.opts, valid_rend=self.opts['decrypt_pillar_renderers'])
if immutable:
# Since the key pointed to an immutable type, we need
# to replace it in the pillar dict. First we will find
# the parent, and then we will replace the child key
# with the return data from the renderer.
(parent, _, child) = key.rpartition(self.opts['decrypt_pillar_delimiter'])
if not parent:
# key is a top-level key, so the pointer to the
# parent is the pillar dict itself.
ptr = pillar # depends on [control=['if'], data=[]]
else:
ptr = salt.utils.data.traverse_dict(pillar, parent, default=None, delimiter=self.opts['decrypt_pillar_delimiter'])
if ptr is not None:
ptr[child] = ret # depends on [control=['if'], data=['ptr']] # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]]
except Exception as exc:
msg = "Failed to decrypt pillar key '{0}': {1}".format(key, exc)
errors.append(msg)
log.error(msg, exc_info=True) # depends on [control=['except'], data=['exc']] # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]]
return errors |
def get_timeseries_list(points, timestamp):
"""Convert a list of `GaugePoint`s into a list of `TimeSeries`.
Get a :class:`opencensus.metrics.export.time_series.TimeSeries` for each
measurement in `points`. Each series contains a single
:class:`opencensus.metrics.export.point.Point` that represents the last
recorded value of the measurement.
:type points: list(:class:`GaugePoint`)
:param points: The list of measurements to convert.
:type timestamp: :class:`datetime.datetime`
:param timestamp: Recording time to report, usually the current time.
:rtype: list(:class:`opencensus.metrics.export.time_series.TimeSeries`)
:return: A list of one `TimeSeries` for each point in `points`.
"""
ts_list = []
for lv, gp in points.items():
point = point_module.Point(gp.to_point_value(), timestamp)
ts_list.append(time_series.TimeSeries(lv, [point], timestamp))
return ts_list | def function[get_timeseries_list, parameter[points, timestamp]]:
constant[Convert a list of `GaugePoint`s into a list of `TimeSeries`.
Get a :class:`opencensus.metrics.export.time_series.TimeSeries` for each
measurement in `points`. Each series contains a single
:class:`opencensus.metrics.export.point.Point` that represents the last
recorded value of the measurement.
:type points: list(:class:`GaugePoint`)
:param points: The list of measurements to convert.
:type timestamp: :class:`datetime.datetime`
:param timestamp: Recording time to report, usually the current time.
:rtype: list(:class:`opencensus.metrics.export.time_series.TimeSeries`)
:return: A list of one `TimeSeries` for each point in `points`.
]
variable[ts_list] assign[=] list[[]]
for taget[tuple[[<ast.Name object at 0x7da1b1849ba0>, <ast.Name object at 0x7da1b184bc10>]]] in starred[call[name[points].items, parameter[]]] begin[:]
variable[point] assign[=] call[name[point_module].Point, parameter[call[name[gp].to_point_value, parameter[]], name[timestamp]]]
call[name[ts_list].append, parameter[call[name[time_series].TimeSeries, parameter[name[lv], list[[<ast.Name object at 0x7da2046222c0>]], name[timestamp]]]]]
return[name[ts_list]] | keyword[def] identifier[get_timeseries_list] ( identifier[points] , identifier[timestamp] ):
literal[string]
identifier[ts_list] =[]
keyword[for] identifier[lv] , identifier[gp] keyword[in] identifier[points] . identifier[items] ():
identifier[point] = identifier[point_module] . identifier[Point] ( identifier[gp] . identifier[to_point_value] (), identifier[timestamp] )
identifier[ts_list] . identifier[append] ( identifier[time_series] . identifier[TimeSeries] ( identifier[lv] ,[ identifier[point] ], identifier[timestamp] ))
keyword[return] identifier[ts_list] | def get_timeseries_list(points, timestamp):
"""Convert a list of `GaugePoint`s into a list of `TimeSeries`.
Get a :class:`opencensus.metrics.export.time_series.TimeSeries` for each
measurement in `points`. Each series contains a single
:class:`opencensus.metrics.export.point.Point` that represents the last
recorded value of the measurement.
:type points: list(:class:`GaugePoint`)
:param points: The list of measurements to convert.
:type timestamp: :class:`datetime.datetime`
:param timestamp: Recording time to report, usually the current time.
:rtype: list(:class:`opencensus.metrics.export.time_series.TimeSeries`)
:return: A list of one `TimeSeries` for each point in `points`.
"""
ts_list = []
for (lv, gp) in points.items():
point = point_module.Point(gp.to_point_value(), timestamp)
ts_list.append(time_series.TimeSeries(lv, [point], timestamp)) # depends on [control=['for'], data=[]]
return ts_list |
def corrArray(self, inputArray):
"""#TODO: docstring
:param inputArray: #TODO: docstring
:returns: #TODO docstring
"""
outputArray = numpy.vstack([numpy.nan_to_num(currSpline(inputArray))
for currSpline in self.splines
]).mean(axis=0)
return outputArray | def function[corrArray, parameter[self, inputArray]]:
constant[#TODO: docstring
:param inputArray: #TODO: docstring
:returns: #TODO docstring
]
variable[outputArray] assign[=] call[call[name[numpy].vstack, parameter[<ast.ListComp object at 0x7da20e9b2680>]].mean, parameter[]]
return[name[outputArray]] | keyword[def] identifier[corrArray] ( identifier[self] , identifier[inputArray] ):
literal[string]
identifier[outputArray] = identifier[numpy] . identifier[vstack] ([ identifier[numpy] . identifier[nan_to_num] ( identifier[currSpline] ( identifier[inputArray] ))
keyword[for] identifier[currSpline] keyword[in] identifier[self] . identifier[splines]
]). identifier[mean] ( identifier[axis] = literal[int] )
keyword[return] identifier[outputArray] | def corrArray(self, inputArray):
"""#TODO: docstring
:param inputArray: #TODO: docstring
:returns: #TODO docstring
"""
outputArray = numpy.vstack([numpy.nan_to_num(currSpline(inputArray)) for currSpline in self.splines]).mean(axis=0)
return outputArray |
def logpowspec(frames, NFFT, norm=1):
"""Compute the log power spectrum of each frame in frames. If frames is an NxD matrix, output will be Nx(NFFT/2+1).
:param frames: the array of frames. Each row is a frame.
:param NFFT: the FFT length to use. If NFFT > frame_len, the frames are zero-padded.
:param norm: If norm=1, the log power spectrum is normalised so that the max value (across all frames) is 0.
:returns: If frames is an NxD matrix, output will be Nx(NFFT/2+1). Each row will be the log power spectrum of the corresponding frame.
"""
ps = powspec(frames, NFFT);
ps[ps <= 1e-30] = 1e-30
lps = 10 * numpy.log10(ps)
if norm:
return lps - numpy.max(lps)
else:
return lps | def function[logpowspec, parameter[frames, NFFT, norm]]:
constant[Compute the log power spectrum of each frame in frames. If frames is an NxD matrix, output will be Nx(NFFT/2+1).
:param frames: the array of frames. Each row is a frame.
:param NFFT: the FFT length to use. If NFFT > frame_len, the frames are zero-padded.
:param norm: If norm=1, the log power spectrum is normalised so that the max value (across all frames) is 0.
:returns: If frames is an NxD matrix, output will be Nx(NFFT/2+1). Each row will be the log power spectrum of the corresponding frame.
]
variable[ps] assign[=] call[name[powspec], parameter[name[frames], name[NFFT]]]
call[name[ps]][compare[name[ps] less_or_equal[<=] constant[1e-30]]] assign[=] constant[1e-30]
variable[lps] assign[=] binary_operation[constant[10] * call[name[numpy].log10, parameter[name[ps]]]]
if name[norm] begin[:]
return[binary_operation[name[lps] - call[name[numpy].max, parameter[name[lps]]]]] | keyword[def] identifier[logpowspec] ( identifier[frames] , identifier[NFFT] , identifier[norm] = literal[int] ):
literal[string]
identifier[ps] = identifier[powspec] ( identifier[frames] , identifier[NFFT] );
identifier[ps] [ identifier[ps] <= literal[int] ]= literal[int]
identifier[lps] = literal[int] * identifier[numpy] . identifier[log10] ( identifier[ps] )
keyword[if] identifier[norm] :
keyword[return] identifier[lps] - identifier[numpy] . identifier[max] ( identifier[lps] )
keyword[else] :
keyword[return] identifier[lps] | def logpowspec(frames, NFFT, norm=1):
"""Compute the log power spectrum of each frame in frames. If frames is an NxD matrix, output will be Nx(NFFT/2+1).
:param frames: the array of frames. Each row is a frame.
:param NFFT: the FFT length to use. If NFFT > frame_len, the frames are zero-padded.
:param norm: If norm=1, the log power spectrum is normalised so that the max value (across all frames) is 0.
:returns: If frames is an NxD matrix, output will be Nx(NFFT/2+1). Each row will be the log power spectrum of the corresponding frame.
"""
ps = powspec(frames, NFFT)
ps[ps <= 1e-30] = 1e-30
lps = 10 * numpy.log10(ps)
if norm:
return lps - numpy.max(lps) # depends on [control=['if'], data=[]]
else:
return lps |
def local_to_global(self, index):
""" Calculate local index from global index
:param index: input index
:return: local index for data
"""
if (type(index) is int) or (type(index) is slice):
if len(self.__mask) > 1:
raise IndexError('check length of parameter index')
# 1D array
if type(index) is int:
return self.int_local_to_global(index)
elif type(index) is slice:
return self.slice_local_to_global(index)
else:
raise IndexError('check data type of index to be integer or slice')
elif type(index) is tuple:
local_index = []
for k, item in enumerate(index):
if k < len(self.__mask):
if type(item) is slice:
temp_index = self.slice_local_to_global(item, k)
elif type(item) in [int, np.int64, np.int32]:
temp_index = self.int_local_to_global(item, k)
if temp_index is None:
return temp_index
else:
temp_index = item
local_index.append(temp_index)
return tuple(local_index)
else:
raise IndexError('check index for correct length and type') | def function[local_to_global, parameter[self, index]]:
constant[ Calculate local index from global index
:param index: input index
:return: local index for data
]
if <ast.BoolOp object at 0x7da2041d8370> begin[:]
if compare[call[name[len], parameter[name[self].__mask]] greater[>] constant[1]] begin[:]
<ast.Raise object at 0x7da2041daf80>
if compare[call[name[type], parameter[name[index]]] is name[int]] begin[:]
return[call[name[self].int_local_to_global, parameter[name[index]]]] | keyword[def] identifier[local_to_global] ( identifier[self] , identifier[index] ):
literal[string]
keyword[if] ( identifier[type] ( identifier[index] ) keyword[is] identifier[int] ) keyword[or] ( identifier[type] ( identifier[index] ) keyword[is] identifier[slice] ):
keyword[if] identifier[len] ( identifier[self] . identifier[__mask] )> literal[int] :
keyword[raise] identifier[IndexError] ( literal[string] )
keyword[if] identifier[type] ( identifier[index] ) keyword[is] identifier[int] :
keyword[return] identifier[self] . identifier[int_local_to_global] ( identifier[index] )
keyword[elif] identifier[type] ( identifier[index] ) keyword[is] identifier[slice] :
keyword[return] identifier[self] . identifier[slice_local_to_global] ( identifier[index] )
keyword[else] :
keyword[raise] identifier[IndexError] ( literal[string] )
keyword[elif] identifier[type] ( identifier[index] ) keyword[is] identifier[tuple] :
identifier[local_index] =[]
keyword[for] identifier[k] , identifier[item] keyword[in] identifier[enumerate] ( identifier[index] ):
keyword[if] identifier[k] < identifier[len] ( identifier[self] . identifier[__mask] ):
keyword[if] identifier[type] ( identifier[item] ) keyword[is] identifier[slice] :
identifier[temp_index] = identifier[self] . identifier[slice_local_to_global] ( identifier[item] , identifier[k] )
keyword[elif] identifier[type] ( identifier[item] ) keyword[in] [ identifier[int] , identifier[np] . identifier[int64] , identifier[np] . identifier[int32] ]:
identifier[temp_index] = identifier[self] . identifier[int_local_to_global] ( identifier[item] , identifier[k] )
keyword[if] identifier[temp_index] keyword[is] keyword[None] :
keyword[return] identifier[temp_index]
keyword[else] :
identifier[temp_index] = identifier[item]
identifier[local_index] . identifier[append] ( identifier[temp_index] )
keyword[return] identifier[tuple] ( identifier[local_index] )
keyword[else] :
keyword[raise] identifier[IndexError] ( literal[string] ) | def local_to_global(self, index):
""" Calculate local index from global index
:param index: input index
:return: local index for data
"""
if type(index) is int or type(index) is slice:
if len(self.__mask) > 1:
raise IndexError('check length of parameter index') # depends on [control=['if'], data=[]]
# 1D array
if type(index) is int:
return self.int_local_to_global(index) # depends on [control=['if'], data=[]]
elif type(index) is slice:
return self.slice_local_to_global(index) # depends on [control=['if'], data=[]]
else:
raise IndexError('check data type of index to be integer or slice') # depends on [control=['if'], data=[]]
elif type(index) is tuple:
local_index = []
for (k, item) in enumerate(index):
if k < len(self.__mask):
if type(item) is slice:
temp_index = self.slice_local_to_global(item, k) # depends on [control=['if'], data=[]]
elif type(item) in [int, np.int64, np.int32]:
temp_index = self.int_local_to_global(item, k) # depends on [control=['if'], data=[]]
if temp_index is None:
return temp_index # depends on [control=['if'], data=['temp_index']] # depends on [control=['if'], data=['k']]
else:
temp_index = item
local_index.append(temp_index) # depends on [control=['for'], data=[]]
return tuple(local_index) # depends on [control=['if'], data=['tuple']]
else:
raise IndexError('check index for correct length and type') |
def from_cli(cls, ifo, args, maxlen):
"""Initialize a StrainBuffer object (data reader) for a particular
detector.
"""
state_channel = analyze_flags = None
if args.state_channel and ifo in args.state_channel \
and args.analyze_flags and ifo in args.analyze_flags:
state_channel = ':'.join([ifo, args.state_channel[ifo]])
analyze_flags = args.analyze_flags[ifo].split(',')
dq_channel = dq_flags = None
if args.data_quality_channel and ifo in args.data_quality_channel \
and args.data_quality_flags and ifo in args.data_quality_flags:
dq_channel = ':'.join([ifo, args.data_quality_channel[ifo]])
dq_flags = args.data_quality_flags[ifo].split(',')
if args.frame_type:
frame_src = pycbc.frame.frame_paths(args.frame_type[ifo],
args.start_time,
args.end_time)
else:
frame_src = [args.frame_src[ifo]]
strain_channel = ':'.join([ifo, args.channel_name[ifo]])
return cls(frame_src, strain_channel,
args.start_time, max_buffer=maxlen * 2,
state_channel=state_channel,
data_quality_channel=dq_channel,
sample_rate=args.sample_rate,
low_frequency_cutoff=args.low_frequency_cutoff,
highpass_frequency=args.highpass_frequency,
highpass_reduction=args.highpass_reduction,
highpass_bandwidth=args.highpass_bandwidth,
psd_samples=args.psd_samples,
trim_padding=args.trim_padding,
psd_segment_length=args.psd_segment_length,
psd_inverse_length=args.psd_inverse_length,
autogating_threshold=args.autogating_threshold,
autogating_cluster=args.autogating_cluster,
autogating_window=args.autogating_window,
autogating_pad=args.autogating_pad,
psd_abort_difference=args.psd_abort_difference,
psd_recalculate_difference=args.psd_recalculate_difference,
force_update_cache=args.force_update_cache,
increment_update_cache=args.increment_update_cache[ifo],
analyze_flags=analyze_flags,
data_quality_flags=dq_flags,
dq_padding=args.data_quality_padding) | def function[from_cli, parameter[cls, ifo, args, maxlen]]:
constant[Initialize a StrainBuffer object (data reader) for a particular
detector.
]
variable[state_channel] assign[=] constant[None]
if <ast.BoolOp object at 0x7da20c9910f0> begin[:]
variable[state_channel] assign[=] call[constant[:].join, parameter[list[[<ast.Name object at 0x7da1b1e73df0>, <ast.Subscript object at 0x7da1b1e73f70>]]]]
variable[analyze_flags] assign[=] call[call[name[args].analyze_flags][name[ifo]].split, parameter[constant[,]]]
variable[dq_channel] assign[=] constant[None]
if <ast.BoolOp object at 0x7da1b1e73d30> begin[:]
variable[dq_channel] assign[=] call[constant[:].join, parameter[list[[<ast.Name object at 0x7da1b1e72fe0>, <ast.Subscript object at 0x7da1b1e71480>]]]]
variable[dq_flags] assign[=] call[call[name[args].data_quality_flags][name[ifo]].split, parameter[constant[,]]]
if name[args].frame_type begin[:]
variable[frame_src] assign[=] call[name[pycbc].frame.frame_paths, parameter[call[name[args].frame_type][name[ifo]], name[args].start_time, name[args].end_time]]
variable[strain_channel] assign[=] call[constant[:].join, parameter[list[[<ast.Name object at 0x7da20c992590>, <ast.Subscript object at 0x7da20c993730>]]]]
return[call[name[cls], parameter[name[frame_src], name[strain_channel], name[args].start_time]]] | keyword[def] identifier[from_cli] ( identifier[cls] , identifier[ifo] , identifier[args] , identifier[maxlen] ):
literal[string]
identifier[state_channel] = identifier[analyze_flags] = keyword[None]
keyword[if] identifier[args] . identifier[state_channel] keyword[and] identifier[ifo] keyword[in] identifier[args] . identifier[state_channel] keyword[and] identifier[args] . identifier[analyze_flags] keyword[and] identifier[ifo] keyword[in] identifier[args] . identifier[analyze_flags] :
identifier[state_channel] = literal[string] . identifier[join] ([ identifier[ifo] , identifier[args] . identifier[state_channel] [ identifier[ifo] ]])
identifier[analyze_flags] = identifier[args] . identifier[analyze_flags] [ identifier[ifo] ]. identifier[split] ( literal[string] )
identifier[dq_channel] = identifier[dq_flags] = keyword[None]
keyword[if] identifier[args] . identifier[data_quality_channel] keyword[and] identifier[ifo] keyword[in] identifier[args] . identifier[data_quality_channel] keyword[and] identifier[args] . identifier[data_quality_flags] keyword[and] identifier[ifo] keyword[in] identifier[args] . identifier[data_quality_flags] :
identifier[dq_channel] = literal[string] . identifier[join] ([ identifier[ifo] , identifier[args] . identifier[data_quality_channel] [ identifier[ifo] ]])
identifier[dq_flags] = identifier[args] . identifier[data_quality_flags] [ identifier[ifo] ]. identifier[split] ( literal[string] )
keyword[if] identifier[args] . identifier[frame_type] :
identifier[frame_src] = identifier[pycbc] . identifier[frame] . identifier[frame_paths] ( identifier[args] . identifier[frame_type] [ identifier[ifo] ],
identifier[args] . identifier[start_time] ,
identifier[args] . identifier[end_time] )
keyword[else] :
identifier[frame_src] =[ identifier[args] . identifier[frame_src] [ identifier[ifo] ]]
identifier[strain_channel] = literal[string] . identifier[join] ([ identifier[ifo] , identifier[args] . identifier[channel_name] [ identifier[ifo] ]])
keyword[return] identifier[cls] ( identifier[frame_src] , identifier[strain_channel] ,
identifier[args] . identifier[start_time] , identifier[max_buffer] = identifier[maxlen] * literal[int] ,
identifier[state_channel] = identifier[state_channel] ,
identifier[data_quality_channel] = identifier[dq_channel] ,
identifier[sample_rate] = identifier[args] . identifier[sample_rate] ,
identifier[low_frequency_cutoff] = identifier[args] . identifier[low_frequency_cutoff] ,
identifier[highpass_frequency] = identifier[args] . identifier[highpass_frequency] ,
identifier[highpass_reduction] = identifier[args] . identifier[highpass_reduction] ,
identifier[highpass_bandwidth] = identifier[args] . identifier[highpass_bandwidth] ,
identifier[psd_samples] = identifier[args] . identifier[psd_samples] ,
identifier[trim_padding] = identifier[args] . identifier[trim_padding] ,
identifier[psd_segment_length] = identifier[args] . identifier[psd_segment_length] ,
identifier[psd_inverse_length] = identifier[args] . identifier[psd_inverse_length] ,
identifier[autogating_threshold] = identifier[args] . identifier[autogating_threshold] ,
identifier[autogating_cluster] = identifier[args] . identifier[autogating_cluster] ,
identifier[autogating_window] = identifier[args] . identifier[autogating_window] ,
identifier[autogating_pad] = identifier[args] . identifier[autogating_pad] ,
identifier[psd_abort_difference] = identifier[args] . identifier[psd_abort_difference] ,
identifier[psd_recalculate_difference] = identifier[args] . identifier[psd_recalculate_difference] ,
identifier[force_update_cache] = identifier[args] . identifier[force_update_cache] ,
identifier[increment_update_cache] = identifier[args] . identifier[increment_update_cache] [ identifier[ifo] ],
identifier[analyze_flags] = identifier[analyze_flags] ,
identifier[data_quality_flags] = identifier[dq_flags] ,
identifier[dq_padding] = identifier[args] . identifier[data_quality_padding] ) | def from_cli(cls, ifo, args, maxlen):
"""Initialize a StrainBuffer object (data reader) for a particular
detector.
"""
state_channel = analyze_flags = None
if args.state_channel and ifo in args.state_channel and args.analyze_flags and (ifo in args.analyze_flags):
state_channel = ':'.join([ifo, args.state_channel[ifo]])
analyze_flags = args.analyze_flags[ifo].split(',') # depends on [control=['if'], data=[]]
dq_channel = dq_flags = None
if args.data_quality_channel and ifo in args.data_quality_channel and args.data_quality_flags and (ifo in args.data_quality_flags):
dq_channel = ':'.join([ifo, args.data_quality_channel[ifo]])
dq_flags = args.data_quality_flags[ifo].split(',') # depends on [control=['if'], data=[]]
if args.frame_type:
frame_src = pycbc.frame.frame_paths(args.frame_type[ifo], args.start_time, args.end_time) # depends on [control=['if'], data=[]]
else:
frame_src = [args.frame_src[ifo]]
strain_channel = ':'.join([ifo, args.channel_name[ifo]])
return cls(frame_src, strain_channel, args.start_time, max_buffer=maxlen * 2, state_channel=state_channel, data_quality_channel=dq_channel, sample_rate=args.sample_rate, low_frequency_cutoff=args.low_frequency_cutoff, highpass_frequency=args.highpass_frequency, highpass_reduction=args.highpass_reduction, highpass_bandwidth=args.highpass_bandwidth, psd_samples=args.psd_samples, trim_padding=args.trim_padding, psd_segment_length=args.psd_segment_length, psd_inverse_length=args.psd_inverse_length, autogating_threshold=args.autogating_threshold, autogating_cluster=args.autogating_cluster, autogating_window=args.autogating_window, autogating_pad=args.autogating_pad, psd_abort_difference=args.psd_abort_difference, psd_recalculate_difference=args.psd_recalculate_difference, force_update_cache=args.force_update_cache, increment_update_cache=args.increment_update_cache[ifo], analyze_flags=analyze_flags, data_quality_flags=dq_flags, dq_padding=args.data_quality_padding) |
def update_rel_to(self, klass):
"""
If we have a string for a model, see if we know about it yet,
if so use it directly otherwise take the lazy approach.
This check is needed because this is called before
the main M2M field contribute to class is called.
"""
if isinstance(self.remote_field.to, basestring):
relation = self.remote_field.to
try:
app_label, model_name = relation.split(".")
except ValueError:
# If we can't split, assume a model in current app
app_label = klass._meta.app_label
model_name = relation
model = None
try:
model = klass._meta.apps.get_registered_model(app_label, model_name)
# For django < 1.6
except AttributeError:
model = models.get_model(
app_label, model_name,
seed_cache=False, only_installed=False)
except LookupError:
pass
if model:
self.remote_field.model = model | def function[update_rel_to, parameter[self, klass]]:
constant[
If we have a string for a model, see if we know about it yet,
if so use it directly otherwise take the lazy approach.
This check is needed because this is called before
the main M2M field contribute to class is called.
]
if call[name[isinstance], parameter[name[self].remote_field.to, name[basestring]]] begin[:]
variable[relation] assign[=] name[self].remote_field.to
<ast.Try object at 0x7da1b0b19120>
variable[model] assign[=] constant[None]
<ast.Try object at 0x7da1b0b18850>
if name[model] begin[:]
name[self].remote_field.model assign[=] name[model] | keyword[def] identifier[update_rel_to] ( identifier[self] , identifier[klass] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[self] . identifier[remote_field] . identifier[to] , identifier[basestring] ):
identifier[relation] = identifier[self] . identifier[remote_field] . identifier[to]
keyword[try] :
identifier[app_label] , identifier[model_name] = identifier[relation] . identifier[split] ( literal[string] )
keyword[except] identifier[ValueError] :
identifier[app_label] = identifier[klass] . identifier[_meta] . identifier[app_label]
identifier[model_name] = identifier[relation]
identifier[model] = keyword[None]
keyword[try] :
identifier[model] = identifier[klass] . identifier[_meta] . identifier[apps] . identifier[get_registered_model] ( identifier[app_label] , identifier[model_name] )
keyword[except] identifier[AttributeError] :
identifier[model] = identifier[models] . identifier[get_model] (
identifier[app_label] , identifier[model_name] ,
identifier[seed_cache] = keyword[False] , identifier[only_installed] = keyword[False] )
keyword[except] identifier[LookupError] :
keyword[pass]
keyword[if] identifier[model] :
identifier[self] . identifier[remote_field] . identifier[model] = identifier[model] | def update_rel_to(self, klass):
"""
If we have a string for a model, see if we know about it yet,
if so use it directly otherwise take the lazy approach.
This check is needed because this is called before
the main M2M field contribute to class is called.
"""
if isinstance(self.remote_field.to, basestring):
relation = self.remote_field.to
try:
(app_label, model_name) = relation.split('.') # depends on [control=['try'], data=[]]
except ValueError:
# If we can't split, assume a model in current app
app_label = klass._meta.app_label
model_name = relation # depends on [control=['except'], data=[]]
model = None
try:
model = klass._meta.apps.get_registered_model(app_label, model_name) # depends on [control=['try'], data=[]]
# For django < 1.6
except AttributeError:
model = models.get_model(app_label, model_name, seed_cache=False, only_installed=False) # depends on [control=['except'], data=[]]
except LookupError:
pass # depends on [control=['except'], data=[]]
if model:
self.remote_field.model = model # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] |
def lchisqprob(chisq,df):
"""
Returns the (1-tailed) probability value associated with the provided
chi-square value and df. Adapted from chisq.c in Gary Perlman's |Stat.
Usage: lchisqprob(chisq,df)
"""
BIG = 20.0
def ex(x):
BIG = 20.0
if x < -BIG:
return 0.0
else:
return math.exp(x)
if chisq <=0 or df < 1:
return 1.0
a = 0.5 * chisq
if df%2 == 0:
even = 1
else:
even = 0
if df > 1:
y = ex(-a)
if even:
s = y
else:
s = 2.0 * zprob(-math.sqrt(chisq))
if (df > 2):
chisq = 0.5 * (df - 1.0)
if even:
z = 1.0
else:
z = 0.5
if a > BIG:
if even:
e = 0.0
else:
e = math.log(math.sqrt(math.pi))
c = math.log(a)
while (z <= chisq):
e = math.log(z) + e
s = s + ex(c*z-a-e)
z = z + 1.0
return s
else:
if even:
e = 1.0
else:
e = 1.0 / math.sqrt(math.pi) / math.sqrt(a)
c = 0.0
while (z <= chisq):
e = e * (a/float(z))
c = c + e
z = z + 1.0
return (c*y+s)
else:
return s | def function[lchisqprob, parameter[chisq, df]]:
constant[
Returns the (1-tailed) probability value associated with the provided
chi-square value and df. Adapted from chisq.c in Gary Perlman's |Stat.
Usage: lchisqprob(chisq,df)
]
variable[BIG] assign[=] constant[20.0]
def function[ex, parameter[x]]:
variable[BIG] assign[=] constant[20.0]
if compare[name[x] less[<] <ast.UnaryOp object at 0x7da1b0f38580>] begin[:]
return[constant[0.0]]
if <ast.BoolOp object at 0x7da1b0f384c0> begin[:]
return[constant[1.0]]
variable[a] assign[=] binary_operation[constant[0.5] * name[chisq]]
if compare[binary_operation[name[df] <ast.Mod object at 0x7da2590d6920> constant[2]] equal[==] constant[0]] begin[:]
variable[even] assign[=] constant[1]
if compare[name[df] greater[>] constant[1]] begin[:]
variable[y] assign[=] call[name[ex], parameter[<ast.UnaryOp object at 0x7da1b0d43c10>]]
if name[even] begin[:]
variable[s] assign[=] name[y]
if compare[name[df] greater[>] constant[2]] begin[:]
variable[chisq] assign[=] binary_operation[constant[0.5] * binary_operation[name[df] - constant[1.0]]]
if name[even] begin[:]
variable[z] assign[=] constant[1.0]
if compare[name[a] greater[>] name[BIG]] begin[:]
if name[even] begin[:]
variable[e] assign[=] constant[0.0]
variable[c] assign[=] call[name[math].log, parameter[name[a]]]
while compare[name[z] less_or_equal[<=] name[chisq]] begin[:]
variable[e] assign[=] binary_operation[call[name[math].log, parameter[name[z]]] + name[e]]
variable[s] assign[=] binary_operation[name[s] + call[name[ex], parameter[binary_operation[binary_operation[binary_operation[name[c] * name[z]] - name[a]] - name[e]]]]]
variable[z] assign[=] binary_operation[name[z] + constant[1.0]]
return[name[s]] | keyword[def] identifier[lchisqprob] ( identifier[chisq] , identifier[df] ):
literal[string]
identifier[BIG] = literal[int]
keyword[def] identifier[ex] ( identifier[x] ):
identifier[BIG] = literal[int]
keyword[if] identifier[x] <- identifier[BIG] :
keyword[return] literal[int]
keyword[else] :
keyword[return] identifier[math] . identifier[exp] ( identifier[x] )
keyword[if] identifier[chisq] <= literal[int] keyword[or] identifier[df] < literal[int] :
keyword[return] literal[int]
identifier[a] = literal[int] * identifier[chisq]
keyword[if] identifier[df] % literal[int] == literal[int] :
identifier[even] = literal[int]
keyword[else] :
identifier[even] = literal[int]
keyword[if] identifier[df] > literal[int] :
identifier[y] = identifier[ex] (- identifier[a] )
keyword[if] identifier[even] :
identifier[s] = identifier[y]
keyword[else] :
identifier[s] = literal[int] * identifier[zprob] (- identifier[math] . identifier[sqrt] ( identifier[chisq] ))
keyword[if] ( identifier[df] > literal[int] ):
identifier[chisq] = literal[int] *( identifier[df] - literal[int] )
keyword[if] identifier[even] :
identifier[z] = literal[int]
keyword[else] :
identifier[z] = literal[int]
keyword[if] identifier[a] > identifier[BIG] :
keyword[if] identifier[even] :
identifier[e] = literal[int]
keyword[else] :
identifier[e] = identifier[math] . identifier[log] ( identifier[math] . identifier[sqrt] ( identifier[math] . identifier[pi] ))
identifier[c] = identifier[math] . identifier[log] ( identifier[a] )
keyword[while] ( identifier[z] <= identifier[chisq] ):
identifier[e] = identifier[math] . identifier[log] ( identifier[z] )+ identifier[e]
identifier[s] = identifier[s] + identifier[ex] ( identifier[c] * identifier[z] - identifier[a] - identifier[e] )
identifier[z] = identifier[z] + literal[int]
keyword[return] identifier[s]
keyword[else] :
keyword[if] identifier[even] :
identifier[e] = literal[int]
keyword[else] :
identifier[e] = literal[int] / identifier[math] . identifier[sqrt] ( identifier[math] . identifier[pi] )/ identifier[math] . identifier[sqrt] ( identifier[a] )
identifier[c] = literal[int]
keyword[while] ( identifier[z] <= identifier[chisq] ):
identifier[e] = identifier[e] *( identifier[a] / identifier[float] ( identifier[z] ))
identifier[c] = identifier[c] + identifier[e]
identifier[z] = identifier[z] + literal[int]
keyword[return] ( identifier[c] * identifier[y] + identifier[s] )
keyword[else] :
keyword[return] identifier[s] | def lchisqprob(chisq, df):
"""
Returns the (1-tailed) probability value associated with the provided
chi-square value and df. Adapted from chisq.c in Gary Perlman's |Stat.
Usage: lchisqprob(chisq,df)
"""
BIG = 20.0
def ex(x):
BIG = 20.0
if x < -BIG:
return 0.0 # depends on [control=['if'], data=[]]
else:
return math.exp(x)
if chisq <= 0 or df < 1:
return 1.0 # depends on [control=['if'], data=[]]
a = 0.5 * chisq
if df % 2 == 0:
even = 1 # depends on [control=['if'], data=[]]
else:
even = 0
if df > 1:
y = ex(-a) # depends on [control=['if'], data=[]]
if even:
s = y # depends on [control=['if'], data=[]]
else:
s = 2.0 * zprob(-math.sqrt(chisq))
if df > 2:
chisq = 0.5 * (df - 1.0)
if even:
z = 1.0 # depends on [control=['if'], data=[]]
else:
z = 0.5
if a > BIG:
if even:
e = 0.0 # depends on [control=['if'], data=[]]
else:
e = math.log(math.sqrt(math.pi))
c = math.log(a)
while z <= chisq:
e = math.log(z) + e
s = s + ex(c * z - a - e)
z = z + 1.0 # depends on [control=['while'], data=['z']]
return s # depends on [control=['if'], data=['a']]
else:
if even:
e = 1.0 # depends on [control=['if'], data=[]]
else:
e = 1.0 / math.sqrt(math.pi) / math.sqrt(a)
c = 0.0
while z <= chisq:
e = e * (a / float(z))
c = c + e
z = z + 1.0 # depends on [control=['while'], data=['z']]
return c * y + s # depends on [control=['if'], data=['df']]
else:
return s |
def editions(self):
""" Get all editions of the texts
:return: List of editions
:rtype: [CtsTextMetadata]
"""
return [
item
for urn, item in self.parent.children.items()
if isinstance(item, CtsEditionMetadata)
] | def function[editions, parameter[self]]:
constant[ Get all editions of the texts
:return: List of editions
:rtype: [CtsTextMetadata]
]
return[<ast.ListComp object at 0x7da2054a7d60>] | keyword[def] identifier[editions] ( identifier[self] ):
literal[string]
keyword[return] [
identifier[item]
keyword[for] identifier[urn] , identifier[item] keyword[in] identifier[self] . identifier[parent] . identifier[children] . identifier[items] ()
keyword[if] identifier[isinstance] ( identifier[item] , identifier[CtsEditionMetadata] )
] | def editions(self):
""" Get all editions of the texts
:return: List of editions
:rtype: [CtsTextMetadata]
"""
return [item for (urn, item) in self.parent.children.items() if isinstance(item, CtsEditionMetadata)] |
def getReferenceAnalysesService(self, service_uid):
""" return all analyses linked to this reference sample for a service """
analyses = []
for analysis in self.objectValues('ReferenceAnalysis'):
if analysis.getServiceUID() == service_uid:
analyses.append(analysis)
return analyses | def function[getReferenceAnalysesService, parameter[self, service_uid]]:
constant[ return all analyses linked to this reference sample for a service ]
variable[analyses] assign[=] list[[]]
for taget[name[analysis]] in starred[call[name[self].objectValues, parameter[constant[ReferenceAnalysis]]]] begin[:]
if compare[call[name[analysis].getServiceUID, parameter[]] equal[==] name[service_uid]] begin[:]
call[name[analyses].append, parameter[name[analysis]]]
return[name[analyses]] | keyword[def] identifier[getReferenceAnalysesService] ( identifier[self] , identifier[service_uid] ):
literal[string]
identifier[analyses] =[]
keyword[for] identifier[analysis] keyword[in] identifier[self] . identifier[objectValues] ( literal[string] ):
keyword[if] identifier[analysis] . identifier[getServiceUID] ()== identifier[service_uid] :
identifier[analyses] . identifier[append] ( identifier[analysis] )
keyword[return] identifier[analyses] | def getReferenceAnalysesService(self, service_uid):
""" return all analyses linked to this reference sample for a service """
analyses = []
for analysis in self.objectValues('ReferenceAnalysis'):
if analysis.getServiceUID() == service_uid:
analyses.append(analysis) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['analysis']]
return analyses |
def query_remote_ref(self, remote, ref):
"""Query remote repo about given ref.
:return: ``('tag', sha)`` if ref is a tag in remote
``('branch', sha)`` if ref is branch (aka "head") in remote
``(None, ref)`` if ref does not exist in remote. This happens
notably if ref if a commit sha (they can't be queried)
"""
out = self.log_call(['git', 'ls-remote', remote, ref],
cwd=self.cwd,
callwith=subprocess.check_output).strip()
for sha, fullref in (l.split() for l in out.splitlines()):
if fullref == 'refs/heads/' + ref:
return 'branch', sha
elif fullref == 'refs/tags/' + ref:
return 'tag', sha
elif fullref == ref and ref == 'HEAD':
return 'HEAD', sha
return None, ref | def function[query_remote_ref, parameter[self, remote, ref]]:
constant[Query remote repo about given ref.
:return: ``('tag', sha)`` if ref is a tag in remote
``('branch', sha)`` if ref is branch (aka "head") in remote
``(None, ref)`` if ref does not exist in remote. This happens
notably if ref if a commit sha (they can't be queried)
]
variable[out] assign[=] call[call[name[self].log_call, parameter[list[[<ast.Constant object at 0x7da1b0247c10>, <ast.Constant object at 0x7da1b02463b0>, <ast.Name object at 0x7da1b0246b90>, <ast.Name object at 0x7da1b0247070>]]]].strip, parameter[]]
for taget[tuple[[<ast.Name object at 0x7da1b0246c50>, <ast.Name object at 0x7da1b0247f10>]]] in starred[<ast.GeneratorExp object at 0x7da1b0245030>] begin[:]
if compare[name[fullref] equal[==] binary_operation[constant[refs/heads/] + name[ref]]] begin[:]
return[tuple[[<ast.Constant object at 0x7da1b02e4160>, <ast.Name object at 0x7da1b02e41c0>]]]
return[tuple[[<ast.Constant object at 0x7da1b02e6050>, <ast.Name object at 0x7da1b02e5ff0>]]] | keyword[def] identifier[query_remote_ref] ( identifier[self] , identifier[remote] , identifier[ref] ):
literal[string]
identifier[out] = identifier[self] . identifier[log_call] ([ literal[string] , literal[string] , identifier[remote] , identifier[ref] ],
identifier[cwd] = identifier[self] . identifier[cwd] ,
identifier[callwith] = identifier[subprocess] . identifier[check_output] ). identifier[strip] ()
keyword[for] identifier[sha] , identifier[fullref] keyword[in] ( identifier[l] . identifier[split] () keyword[for] identifier[l] keyword[in] identifier[out] . identifier[splitlines] ()):
keyword[if] identifier[fullref] == literal[string] + identifier[ref] :
keyword[return] literal[string] , identifier[sha]
keyword[elif] identifier[fullref] == literal[string] + identifier[ref] :
keyword[return] literal[string] , identifier[sha]
keyword[elif] identifier[fullref] == identifier[ref] keyword[and] identifier[ref] == literal[string] :
keyword[return] literal[string] , identifier[sha]
keyword[return] keyword[None] , identifier[ref] | def query_remote_ref(self, remote, ref):
"""Query remote repo about given ref.
:return: ``('tag', sha)`` if ref is a tag in remote
``('branch', sha)`` if ref is branch (aka "head") in remote
``(None, ref)`` if ref does not exist in remote. This happens
notably if ref if a commit sha (they can't be queried)
"""
out = self.log_call(['git', 'ls-remote', remote, ref], cwd=self.cwd, callwith=subprocess.check_output).strip()
for (sha, fullref) in (l.split() for l in out.splitlines()):
if fullref == 'refs/heads/' + ref:
return ('branch', sha) # depends on [control=['if'], data=[]]
elif fullref == 'refs/tags/' + ref:
return ('tag', sha) # depends on [control=['if'], data=[]]
elif fullref == ref and ref == 'HEAD':
return ('HEAD', sha) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
return (None, ref) |
def _optional_no_translator_flag(env):
""" Return '--no-translator' flag if we run *msginit(1)* in non-interactive
mode."""
import SCons.Util
if 'POAUTOINIT' in env:
autoinit = env['POAUTOINIT']
else:
autoinit = False
if autoinit:
return [SCons.Util.CLVar('--no-translator')]
else:
return [SCons.Util.CLVar('')] | def function[_optional_no_translator_flag, parameter[env]]:
constant[ Return '--no-translator' flag if we run *msginit(1)* in non-interactive
mode.]
import module[SCons.Util]
if compare[constant[POAUTOINIT] in name[env]] begin[:]
variable[autoinit] assign[=] call[name[env]][constant[POAUTOINIT]]
if name[autoinit] begin[:]
return[list[[<ast.Call object at 0x7da18f58e440>]]] | keyword[def] identifier[_optional_no_translator_flag] ( identifier[env] ):
literal[string]
keyword[import] identifier[SCons] . identifier[Util]
keyword[if] literal[string] keyword[in] identifier[env] :
identifier[autoinit] = identifier[env] [ literal[string] ]
keyword[else] :
identifier[autoinit] = keyword[False]
keyword[if] identifier[autoinit] :
keyword[return] [ identifier[SCons] . identifier[Util] . identifier[CLVar] ( literal[string] )]
keyword[else] :
keyword[return] [ identifier[SCons] . identifier[Util] . identifier[CLVar] ( literal[string] )] | def _optional_no_translator_flag(env):
""" Return '--no-translator' flag if we run *msginit(1)* in non-interactive
mode."""
import SCons.Util
if 'POAUTOINIT' in env:
autoinit = env['POAUTOINIT'] # depends on [control=['if'], data=['env']]
else:
autoinit = False
if autoinit:
return [SCons.Util.CLVar('--no-translator')] # depends on [control=['if'], data=[]]
else:
return [SCons.Util.CLVar('')] |
def makeConnection(self, protocol):
"""
Called when the connection has been established.
This method is called when an HTTP 200 response has been received,
with the protocol that decodes the individual Twitter stream elements.
That protocol will call the consumer for all Twitter entries received.
The protocol, stored in L{protocol}, has a deferred that fires when
the connection is closed, causing a transition to the
C{'disconnected'} state.
@param protocol: The Twitter stream protocol.
@type protocol: L{TwitterStream}
"""
self._errorState = None
def cb(result):
self.protocol = None
if self._state == 'stopped':
# Don't transition to any other state. We are stopped.
pass
else:
if isinstance(result, failure.Failure):
reason = result
else:
reason = None
self._toState('disconnected', reason)
self.protocol = protocol
d = protocol.deferred
d.addBoth(cb) | def function[makeConnection, parameter[self, protocol]]:
constant[
Called when the connection has been established.
This method is called when an HTTP 200 response has been received,
with the protocol that decodes the individual Twitter stream elements.
That protocol will call the consumer for all Twitter entries received.
The protocol, stored in L{protocol}, has a deferred that fires when
the connection is closed, causing a transition to the
C{'disconnected'} state.
@param protocol: The Twitter stream protocol.
@type protocol: L{TwitterStream}
]
name[self]._errorState assign[=] constant[None]
def function[cb, parameter[result]]:
name[self].protocol assign[=] constant[None]
if compare[name[self]._state equal[==] constant[stopped]] begin[:]
pass
name[self].protocol assign[=] name[protocol]
variable[d] assign[=] name[protocol].deferred
call[name[d].addBoth, parameter[name[cb]]] | keyword[def] identifier[makeConnection] ( identifier[self] , identifier[protocol] ):
literal[string]
identifier[self] . identifier[_errorState] = keyword[None]
keyword[def] identifier[cb] ( identifier[result] ):
identifier[self] . identifier[protocol] = keyword[None]
keyword[if] identifier[self] . identifier[_state] == literal[string] :
keyword[pass]
keyword[else] :
keyword[if] identifier[isinstance] ( identifier[result] , identifier[failure] . identifier[Failure] ):
identifier[reason] = identifier[result]
keyword[else] :
identifier[reason] = keyword[None]
identifier[self] . identifier[_toState] ( literal[string] , identifier[reason] )
identifier[self] . identifier[protocol] = identifier[protocol]
identifier[d] = identifier[protocol] . identifier[deferred]
identifier[d] . identifier[addBoth] ( identifier[cb] ) | def makeConnection(self, protocol):
"""
Called when the connection has been established.
This method is called when an HTTP 200 response has been received,
with the protocol that decodes the individual Twitter stream elements.
That protocol will call the consumer for all Twitter entries received.
The protocol, stored in L{protocol}, has a deferred that fires when
the connection is closed, causing a transition to the
C{'disconnected'} state.
@param protocol: The Twitter stream protocol.
@type protocol: L{TwitterStream}
"""
self._errorState = None
def cb(result):
self.protocol = None
if self._state == 'stopped':
# Don't transition to any other state. We are stopped.
pass # depends on [control=['if'], data=[]]
else:
if isinstance(result, failure.Failure):
reason = result # depends on [control=['if'], data=[]]
else:
reason = None
self._toState('disconnected', reason)
self.protocol = protocol
d = protocol.deferred
d.addBoth(cb) |
def truncate(self, before=None, after=None, axis=None, copy=True):
"""
Truncate a Series or DataFrame before and after some index value.
This is a useful shorthand for boolean indexing based on index
values above or below certain thresholds.
Parameters
----------
before : date, string, int
Truncate all rows before this index value.
after : date, string, int
Truncate all rows after this index value.
axis : {0 or 'index', 1 or 'columns'}, optional
Axis to truncate. Truncates the index (rows) by default.
copy : boolean, default is True,
Return a copy of the truncated section.
Returns
-------
type of caller
The truncated Series or DataFrame.
See Also
--------
DataFrame.loc : Select a subset of a DataFrame by label.
DataFrame.iloc : Select a subset of a DataFrame by position.
Notes
-----
If the index being truncated contains only datetime values,
`before` and `after` may be specified as strings instead of
Timestamps.
Examples
--------
>>> df = pd.DataFrame({'A': ['a', 'b', 'c', 'd', 'e'],
... 'B': ['f', 'g', 'h', 'i', 'j'],
... 'C': ['k', 'l', 'm', 'n', 'o']},
... index=[1, 2, 3, 4, 5])
>>> df
A B C
1 a f k
2 b g l
3 c h m
4 d i n
5 e j o
>>> df.truncate(before=2, after=4)
A B C
2 b g l
3 c h m
4 d i n
The columns of a DataFrame can be truncated.
>>> df.truncate(before="A", after="B", axis="columns")
A B
1 a f
2 b g
3 c h
4 d i
5 e j
For Series, only rows can be truncated.
>>> df['A'].truncate(before=2, after=4)
2 b
3 c
4 d
Name: A, dtype: object
The index values in ``truncate`` can be datetimes or string
dates.
>>> dates = pd.date_range('2016-01-01', '2016-02-01', freq='s')
>>> df = pd.DataFrame(index=dates, data={'A': 1})
>>> df.tail()
A
2016-01-31 23:59:56 1
2016-01-31 23:59:57 1
2016-01-31 23:59:58 1
2016-01-31 23:59:59 1
2016-02-01 00:00:00 1
>>> df.truncate(before=pd.Timestamp('2016-01-05'),
... after=pd.Timestamp('2016-01-10')).tail()
A
2016-01-09 23:59:56 1
2016-01-09 23:59:57 1
2016-01-09 23:59:58 1
2016-01-09 23:59:59 1
2016-01-10 00:00:00 1
Because the index is a DatetimeIndex containing only dates, we can
specify `before` and `after` as strings. They will be coerced to
Timestamps before truncation.
>>> df.truncate('2016-01-05', '2016-01-10').tail()
A
2016-01-09 23:59:56 1
2016-01-09 23:59:57 1
2016-01-09 23:59:58 1
2016-01-09 23:59:59 1
2016-01-10 00:00:00 1
Note that ``truncate`` assumes a 0 value for any unspecified time
component (midnight). This differs from partial string slicing, which
returns any partially matching dates.
>>> df.loc['2016-01-05':'2016-01-10', :].tail()
A
2016-01-10 23:59:55 1
2016-01-10 23:59:56 1
2016-01-10 23:59:57 1
2016-01-10 23:59:58 1
2016-01-10 23:59:59 1
"""
if axis is None:
axis = self._stat_axis_number
axis = self._get_axis_number(axis)
ax = self._get_axis(axis)
# GH 17935
# Check that index is sorted
if not ax.is_monotonic_increasing and not ax.is_monotonic_decreasing:
raise ValueError("truncate requires a sorted index")
# if we have a date index, convert to dates, otherwise
# treat like a slice
if ax.is_all_dates:
from pandas.core.tools.datetimes import to_datetime
before = to_datetime(before)
after = to_datetime(after)
if before is not None and after is not None:
if before > after:
raise ValueError('Truncate: %s must be after %s' %
(after, before))
slicer = [slice(None, None)] * self._AXIS_LEN
slicer[axis] = slice(before, after)
result = self.loc[tuple(slicer)]
if isinstance(ax, MultiIndex):
setattr(result, self._get_axis_name(axis),
ax.truncate(before, after))
if copy:
result = result.copy()
return result | def function[truncate, parameter[self, before, after, axis, copy]]:
constant[
Truncate a Series or DataFrame before and after some index value.
This is a useful shorthand for boolean indexing based on index
values above or below certain thresholds.
Parameters
----------
before : date, string, int
Truncate all rows before this index value.
after : date, string, int
Truncate all rows after this index value.
axis : {0 or 'index', 1 or 'columns'}, optional
Axis to truncate. Truncates the index (rows) by default.
copy : boolean, default is True,
Return a copy of the truncated section.
Returns
-------
type of caller
The truncated Series or DataFrame.
See Also
--------
DataFrame.loc : Select a subset of a DataFrame by label.
DataFrame.iloc : Select a subset of a DataFrame by position.
Notes
-----
If the index being truncated contains only datetime values,
`before` and `after` may be specified as strings instead of
Timestamps.
Examples
--------
>>> df = pd.DataFrame({'A': ['a', 'b', 'c', 'd', 'e'],
... 'B': ['f', 'g', 'h', 'i', 'j'],
... 'C': ['k', 'l', 'm', 'n', 'o']},
... index=[1, 2, 3, 4, 5])
>>> df
A B C
1 a f k
2 b g l
3 c h m
4 d i n
5 e j o
>>> df.truncate(before=2, after=4)
A B C
2 b g l
3 c h m
4 d i n
The columns of a DataFrame can be truncated.
>>> df.truncate(before="A", after="B", axis="columns")
A B
1 a f
2 b g
3 c h
4 d i
5 e j
For Series, only rows can be truncated.
>>> df['A'].truncate(before=2, after=4)
2 b
3 c
4 d
Name: A, dtype: object
The index values in ``truncate`` can be datetimes or string
dates.
>>> dates = pd.date_range('2016-01-01', '2016-02-01', freq='s')
>>> df = pd.DataFrame(index=dates, data={'A': 1})
>>> df.tail()
A
2016-01-31 23:59:56 1
2016-01-31 23:59:57 1
2016-01-31 23:59:58 1
2016-01-31 23:59:59 1
2016-02-01 00:00:00 1
>>> df.truncate(before=pd.Timestamp('2016-01-05'),
... after=pd.Timestamp('2016-01-10')).tail()
A
2016-01-09 23:59:56 1
2016-01-09 23:59:57 1
2016-01-09 23:59:58 1
2016-01-09 23:59:59 1
2016-01-10 00:00:00 1
Because the index is a DatetimeIndex containing only dates, we can
specify `before` and `after` as strings. They will be coerced to
Timestamps before truncation.
>>> df.truncate('2016-01-05', '2016-01-10').tail()
A
2016-01-09 23:59:56 1
2016-01-09 23:59:57 1
2016-01-09 23:59:58 1
2016-01-09 23:59:59 1
2016-01-10 00:00:00 1
Note that ``truncate`` assumes a 0 value for any unspecified time
component (midnight). This differs from partial string slicing, which
returns any partially matching dates.
>>> df.loc['2016-01-05':'2016-01-10', :].tail()
A
2016-01-10 23:59:55 1
2016-01-10 23:59:56 1
2016-01-10 23:59:57 1
2016-01-10 23:59:58 1
2016-01-10 23:59:59 1
]
if compare[name[axis] is constant[None]] begin[:]
variable[axis] assign[=] name[self]._stat_axis_number
variable[axis] assign[=] call[name[self]._get_axis_number, parameter[name[axis]]]
variable[ax] assign[=] call[name[self]._get_axis, parameter[name[axis]]]
if <ast.BoolOp object at 0x7da1b20293f0> begin[:]
<ast.Raise object at 0x7da1b2028640>
if name[ax].is_all_dates begin[:]
from relative_module[pandas.core.tools.datetimes] import module[to_datetime]
variable[before] assign[=] call[name[to_datetime], parameter[name[before]]]
variable[after] assign[=] call[name[to_datetime], parameter[name[after]]]
if <ast.BoolOp object at 0x7da1b2029030> begin[:]
if compare[name[before] greater[>] name[after]] begin[:]
<ast.Raise object at 0x7da1b202a320>
variable[slicer] assign[=] binary_operation[list[[<ast.Call object at 0x7da1b202bb20>]] * name[self]._AXIS_LEN]
call[name[slicer]][name[axis]] assign[=] call[name[slice], parameter[name[before], name[after]]]
variable[result] assign[=] call[name[self].loc][call[name[tuple], parameter[name[slicer]]]]
if call[name[isinstance], parameter[name[ax], name[MultiIndex]]] begin[:]
call[name[setattr], parameter[name[result], call[name[self]._get_axis_name, parameter[name[axis]]], call[name[ax].truncate, parameter[name[before], name[after]]]]]
if name[copy] begin[:]
variable[result] assign[=] call[name[result].copy, parameter[]]
return[name[result]] | keyword[def] identifier[truncate] ( identifier[self] , identifier[before] = keyword[None] , identifier[after] = keyword[None] , identifier[axis] = keyword[None] , identifier[copy] = keyword[True] ):
literal[string]
keyword[if] identifier[axis] keyword[is] keyword[None] :
identifier[axis] = identifier[self] . identifier[_stat_axis_number]
identifier[axis] = identifier[self] . identifier[_get_axis_number] ( identifier[axis] )
identifier[ax] = identifier[self] . identifier[_get_axis] ( identifier[axis] )
keyword[if] keyword[not] identifier[ax] . identifier[is_monotonic_increasing] keyword[and] keyword[not] identifier[ax] . identifier[is_monotonic_decreasing] :
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[if] identifier[ax] . identifier[is_all_dates] :
keyword[from] identifier[pandas] . identifier[core] . identifier[tools] . identifier[datetimes] keyword[import] identifier[to_datetime]
identifier[before] = identifier[to_datetime] ( identifier[before] )
identifier[after] = identifier[to_datetime] ( identifier[after] )
keyword[if] identifier[before] keyword[is] keyword[not] keyword[None] keyword[and] identifier[after] keyword[is] keyword[not] keyword[None] :
keyword[if] identifier[before] > identifier[after] :
keyword[raise] identifier[ValueError] ( literal[string] %
( identifier[after] , identifier[before] ))
identifier[slicer] =[ identifier[slice] ( keyword[None] , keyword[None] )]* identifier[self] . identifier[_AXIS_LEN]
identifier[slicer] [ identifier[axis] ]= identifier[slice] ( identifier[before] , identifier[after] )
identifier[result] = identifier[self] . identifier[loc] [ identifier[tuple] ( identifier[slicer] )]
keyword[if] identifier[isinstance] ( identifier[ax] , identifier[MultiIndex] ):
identifier[setattr] ( identifier[result] , identifier[self] . identifier[_get_axis_name] ( identifier[axis] ),
identifier[ax] . identifier[truncate] ( identifier[before] , identifier[after] ))
keyword[if] identifier[copy] :
identifier[result] = identifier[result] . identifier[copy] ()
keyword[return] identifier[result] | def truncate(self, before=None, after=None, axis=None, copy=True):
"""
Truncate a Series or DataFrame before and after some index value.
This is a useful shorthand for boolean indexing based on index
values above or below certain thresholds.
Parameters
----------
before : date, string, int
Truncate all rows before this index value.
after : date, string, int
Truncate all rows after this index value.
axis : {0 or 'index', 1 or 'columns'}, optional
Axis to truncate. Truncates the index (rows) by default.
copy : boolean, default is True,
Return a copy of the truncated section.
Returns
-------
type of caller
The truncated Series or DataFrame.
See Also
--------
DataFrame.loc : Select a subset of a DataFrame by label.
DataFrame.iloc : Select a subset of a DataFrame by position.
Notes
-----
If the index being truncated contains only datetime values,
`before` and `after` may be specified as strings instead of
Timestamps.
Examples
--------
>>> df = pd.DataFrame({'A': ['a', 'b', 'c', 'd', 'e'],
... 'B': ['f', 'g', 'h', 'i', 'j'],
... 'C': ['k', 'l', 'm', 'n', 'o']},
... index=[1, 2, 3, 4, 5])
>>> df
A B C
1 a f k
2 b g l
3 c h m
4 d i n
5 e j o
>>> df.truncate(before=2, after=4)
A B C
2 b g l
3 c h m
4 d i n
The columns of a DataFrame can be truncated.
>>> df.truncate(before="A", after="B", axis="columns")
A B
1 a f
2 b g
3 c h
4 d i
5 e j
For Series, only rows can be truncated.
>>> df['A'].truncate(before=2, after=4)
2 b
3 c
4 d
Name: A, dtype: object
The index values in ``truncate`` can be datetimes or string
dates.
>>> dates = pd.date_range('2016-01-01', '2016-02-01', freq='s')
>>> df = pd.DataFrame(index=dates, data={'A': 1})
>>> df.tail()
A
2016-01-31 23:59:56 1
2016-01-31 23:59:57 1
2016-01-31 23:59:58 1
2016-01-31 23:59:59 1
2016-02-01 00:00:00 1
>>> df.truncate(before=pd.Timestamp('2016-01-05'),
... after=pd.Timestamp('2016-01-10')).tail()
A
2016-01-09 23:59:56 1
2016-01-09 23:59:57 1
2016-01-09 23:59:58 1
2016-01-09 23:59:59 1
2016-01-10 00:00:00 1
Because the index is a DatetimeIndex containing only dates, we can
specify `before` and `after` as strings. They will be coerced to
Timestamps before truncation.
>>> df.truncate('2016-01-05', '2016-01-10').tail()
A
2016-01-09 23:59:56 1
2016-01-09 23:59:57 1
2016-01-09 23:59:58 1
2016-01-09 23:59:59 1
2016-01-10 00:00:00 1
Note that ``truncate`` assumes a 0 value for any unspecified time
component (midnight). This differs from partial string slicing, which
returns any partially matching dates.
>>> df.loc['2016-01-05':'2016-01-10', :].tail()
A
2016-01-10 23:59:55 1
2016-01-10 23:59:56 1
2016-01-10 23:59:57 1
2016-01-10 23:59:58 1
2016-01-10 23:59:59 1
"""
if axis is None:
axis = self._stat_axis_number # depends on [control=['if'], data=['axis']]
axis = self._get_axis_number(axis)
ax = self._get_axis(axis)
# GH 17935
# Check that index is sorted
if not ax.is_monotonic_increasing and (not ax.is_monotonic_decreasing):
raise ValueError('truncate requires a sorted index') # depends on [control=['if'], data=[]]
# if we have a date index, convert to dates, otherwise
# treat like a slice
if ax.is_all_dates:
from pandas.core.tools.datetimes import to_datetime
before = to_datetime(before)
after = to_datetime(after) # depends on [control=['if'], data=[]]
if before is not None and after is not None:
if before > after:
raise ValueError('Truncate: %s must be after %s' % (after, before)) # depends on [control=['if'], data=['before', 'after']] # depends on [control=['if'], data=[]]
slicer = [slice(None, None)] * self._AXIS_LEN
slicer[axis] = slice(before, after)
result = self.loc[tuple(slicer)]
if isinstance(ax, MultiIndex):
setattr(result, self._get_axis_name(axis), ax.truncate(before, after)) # depends on [control=['if'], data=[]]
if copy:
result = result.copy() # depends on [control=['if'], data=[]]
return result |
def scroll_mouse(self, mouse_x: int):
"""
Scrolls the mouse if ROI Selection reaches corner of view
:param mouse_x:
:return:
"""
scrollbar = self.horizontalScrollBar()
if mouse_x - self.view_rect().x() > self.view_rect().width():
scrollbar.setValue(scrollbar.value() + 5)
elif mouse_x < self.view_rect().x():
scrollbar.setValue(scrollbar.value() - 5) | def function[scroll_mouse, parameter[self, mouse_x]]:
constant[
Scrolls the mouse if ROI Selection reaches corner of view
:param mouse_x:
:return:
]
variable[scrollbar] assign[=] call[name[self].horizontalScrollBar, parameter[]]
if compare[binary_operation[name[mouse_x] - call[call[name[self].view_rect, parameter[]].x, parameter[]]] greater[>] call[call[name[self].view_rect, parameter[]].width, parameter[]]] begin[:]
call[name[scrollbar].setValue, parameter[binary_operation[call[name[scrollbar].value, parameter[]] + constant[5]]]] | keyword[def] identifier[scroll_mouse] ( identifier[self] , identifier[mouse_x] : identifier[int] ):
literal[string]
identifier[scrollbar] = identifier[self] . identifier[horizontalScrollBar] ()
keyword[if] identifier[mouse_x] - identifier[self] . identifier[view_rect] (). identifier[x] ()> identifier[self] . identifier[view_rect] (). identifier[width] ():
identifier[scrollbar] . identifier[setValue] ( identifier[scrollbar] . identifier[value] ()+ literal[int] )
keyword[elif] identifier[mouse_x] < identifier[self] . identifier[view_rect] (). identifier[x] ():
identifier[scrollbar] . identifier[setValue] ( identifier[scrollbar] . identifier[value] ()- literal[int] ) | def scroll_mouse(self, mouse_x: int):
"""
Scrolls the mouse if ROI Selection reaches corner of view
:param mouse_x:
:return:
"""
scrollbar = self.horizontalScrollBar()
if mouse_x - self.view_rect().x() > self.view_rect().width():
scrollbar.setValue(scrollbar.value() + 5) # depends on [control=['if'], data=[]]
elif mouse_x < self.view_rect().x():
scrollbar.setValue(scrollbar.value() - 5) # depends on [control=['if'], data=[]] |
def init(project_name):
"""Creates a new project"""
if not VALID_PROJECT_NAME.match(project_name):
print("Invalid project name. It may only contain letters, numbers and underscores.", file=sys.stderr)
return
check_path(project_name, functools.partial(shutil.copytree, skeleton_path("plugin")))
check_path("static", os.mkdir)
check_path("templates", os.mkdir)
check_path("config.py", functools.partial(config_maker, project_name)) | def function[init, parameter[project_name]]:
constant[Creates a new project]
if <ast.UnaryOp object at 0x7da1b0b10250> begin[:]
call[name[print], parameter[constant[Invalid project name. It may only contain letters, numbers and underscores.]]]
return[None]
call[name[check_path], parameter[name[project_name], call[name[functools].partial, parameter[name[shutil].copytree, call[name[skeleton_path], parameter[constant[plugin]]]]]]]
call[name[check_path], parameter[constant[static], name[os].mkdir]]
call[name[check_path], parameter[constant[templates], name[os].mkdir]]
call[name[check_path], parameter[constant[config.py], call[name[functools].partial, parameter[name[config_maker], name[project_name]]]]] | keyword[def] identifier[init] ( identifier[project_name] ):
literal[string]
keyword[if] keyword[not] identifier[VALID_PROJECT_NAME] . identifier[match] ( identifier[project_name] ):
identifier[print] ( literal[string] , identifier[file] = identifier[sys] . identifier[stderr] )
keyword[return]
identifier[check_path] ( identifier[project_name] , identifier[functools] . identifier[partial] ( identifier[shutil] . identifier[copytree] , identifier[skeleton_path] ( literal[string] )))
identifier[check_path] ( literal[string] , identifier[os] . identifier[mkdir] )
identifier[check_path] ( literal[string] , identifier[os] . identifier[mkdir] )
identifier[check_path] ( literal[string] , identifier[functools] . identifier[partial] ( identifier[config_maker] , identifier[project_name] )) | def init(project_name):
"""Creates a new project"""
if not VALID_PROJECT_NAME.match(project_name):
print('Invalid project name. It may only contain letters, numbers and underscores.', file=sys.stderr)
return # depends on [control=['if'], data=[]]
check_path(project_name, functools.partial(shutil.copytree, skeleton_path('plugin')))
check_path('static', os.mkdir)
check_path('templates', os.mkdir)
check_path('config.py', functools.partial(config_maker, project_name)) |
def create_page_from_template(template_file, output_path):
""" Copy the correct html template file to the output directory """
mkdir_p(os.path.dirname(output_path))
shutil.copy(os.path.join(livvkit.resource_dir, template_file), output_path) | def function[create_page_from_template, parameter[template_file, output_path]]:
constant[ Copy the correct html template file to the output directory ]
call[name[mkdir_p], parameter[call[name[os].path.dirname, parameter[name[output_path]]]]]
call[name[shutil].copy, parameter[call[name[os].path.join, parameter[name[livvkit].resource_dir, name[template_file]]], name[output_path]]] | keyword[def] identifier[create_page_from_template] ( identifier[template_file] , identifier[output_path] ):
literal[string]
identifier[mkdir_p] ( identifier[os] . identifier[path] . identifier[dirname] ( identifier[output_path] ))
identifier[shutil] . identifier[copy] ( identifier[os] . identifier[path] . identifier[join] ( identifier[livvkit] . identifier[resource_dir] , identifier[template_file] ), identifier[output_path] ) | def create_page_from_template(template_file, output_path):
""" Copy the correct html template file to the output directory """
mkdir_p(os.path.dirname(output_path))
shutil.copy(os.path.join(livvkit.resource_dir, template_file), output_path) |
def binC(y, bins):
"""
Bin categorical/qualitative data
Parameters
----------
y : array
(n,q), categorical values
bins : array
(k,1), unique values associated with each bin
Return
------
b : array
(n,q), bin membership, values between 0 and k-1
Examples
--------
>>> import numpy as np
>>> import mapclassify as mc
>>> np.random.seed(1)
>>> x = np.random.randint(2, 8, (10, 3))
>>> bins = list(range(2, 8))
>>> x
array([[7, 5, 6],
[2, 3, 5],
[7, 2, 2],
[3, 6, 7],
[6, 3, 4],
[6, 7, 4],
[6, 5, 6],
[4, 6, 7],
[4, 6, 3],
[3, 2, 7]])
>>> y = mc.classifiers.binC(x, bins)
>>> y
array([[5, 3, 4],
[0, 1, 3],
[5, 0, 0],
[1, 4, 5],
[4, 1, 2],
[4, 5, 2],
[4, 3, 4],
[2, 4, 5],
[2, 4, 1],
[1, 0, 5]])
"""
if np.ndim(y) == 1:
k = 1
n = np.shape(y)[0]
else:
n, k = np.shape(y)
b = np.zeros((n, k), dtype='int')
for i, bin in enumerate(bins):
b[np.nonzero(y == bin)] = i
# check for non-binned items and warn if needed
vals = set(y.flatten())
for val in vals:
if val not in bins:
Warn('value not in bin: {}'.format(val), UserWarning)
Warn('bins: {}'.format(bins), UserWarning)
return b | def function[binC, parameter[y, bins]]:
constant[
Bin categorical/qualitative data
Parameters
----------
y : array
(n,q), categorical values
bins : array
(k,1), unique values associated with each bin
Return
------
b : array
(n,q), bin membership, values between 0 and k-1
Examples
--------
>>> import numpy as np
>>> import mapclassify as mc
>>> np.random.seed(1)
>>> x = np.random.randint(2, 8, (10, 3))
>>> bins = list(range(2, 8))
>>> x
array([[7, 5, 6],
[2, 3, 5],
[7, 2, 2],
[3, 6, 7],
[6, 3, 4],
[6, 7, 4],
[6, 5, 6],
[4, 6, 7],
[4, 6, 3],
[3, 2, 7]])
>>> y = mc.classifiers.binC(x, bins)
>>> y
array([[5, 3, 4],
[0, 1, 3],
[5, 0, 0],
[1, 4, 5],
[4, 1, 2],
[4, 5, 2],
[4, 3, 4],
[2, 4, 5],
[2, 4, 1],
[1, 0, 5]])
]
if compare[call[name[np].ndim, parameter[name[y]]] equal[==] constant[1]] begin[:]
variable[k] assign[=] constant[1]
variable[n] assign[=] call[call[name[np].shape, parameter[name[y]]]][constant[0]]
variable[b] assign[=] call[name[np].zeros, parameter[tuple[[<ast.Name object at 0x7da1b12f2c20>, <ast.Name object at 0x7da1b12f0f70>]]]]
for taget[tuple[[<ast.Name object at 0x7da1b11a4160>, <ast.Name object at 0x7da1b11a59c0>]]] in starred[call[name[enumerate], parameter[name[bins]]]] begin[:]
call[name[b]][call[name[np].nonzero, parameter[compare[name[y] equal[==] name[bin]]]]] assign[=] name[i]
variable[vals] assign[=] call[name[set], parameter[call[name[y].flatten, parameter[]]]]
for taget[name[val]] in starred[name[vals]] begin[:]
if compare[name[val] <ast.NotIn object at 0x7da2590d7190> name[bins]] begin[:]
call[name[Warn], parameter[call[constant[value not in bin: {}].format, parameter[name[val]]], name[UserWarning]]]
call[name[Warn], parameter[call[constant[bins: {}].format, parameter[name[bins]]], name[UserWarning]]]
return[name[b]] | keyword[def] identifier[binC] ( identifier[y] , identifier[bins] ):
literal[string]
keyword[if] identifier[np] . identifier[ndim] ( identifier[y] )== literal[int] :
identifier[k] = literal[int]
identifier[n] = identifier[np] . identifier[shape] ( identifier[y] )[ literal[int] ]
keyword[else] :
identifier[n] , identifier[k] = identifier[np] . identifier[shape] ( identifier[y] )
identifier[b] = identifier[np] . identifier[zeros] (( identifier[n] , identifier[k] ), identifier[dtype] = literal[string] )
keyword[for] identifier[i] , identifier[bin] keyword[in] identifier[enumerate] ( identifier[bins] ):
identifier[b] [ identifier[np] . identifier[nonzero] ( identifier[y] == identifier[bin] )]= identifier[i]
identifier[vals] = identifier[set] ( identifier[y] . identifier[flatten] ())
keyword[for] identifier[val] keyword[in] identifier[vals] :
keyword[if] identifier[val] keyword[not] keyword[in] identifier[bins] :
identifier[Warn] ( literal[string] . identifier[format] ( identifier[val] ), identifier[UserWarning] )
identifier[Warn] ( literal[string] . identifier[format] ( identifier[bins] ), identifier[UserWarning] )
keyword[return] identifier[b] | def binC(y, bins):
"""
Bin categorical/qualitative data
Parameters
----------
y : array
(n,q), categorical values
bins : array
(k,1), unique values associated with each bin
Return
------
b : array
(n,q), bin membership, values between 0 and k-1
Examples
--------
>>> import numpy as np
>>> import mapclassify as mc
>>> np.random.seed(1)
>>> x = np.random.randint(2, 8, (10, 3))
>>> bins = list(range(2, 8))
>>> x
array([[7, 5, 6],
[2, 3, 5],
[7, 2, 2],
[3, 6, 7],
[6, 3, 4],
[6, 7, 4],
[6, 5, 6],
[4, 6, 7],
[4, 6, 3],
[3, 2, 7]])
>>> y = mc.classifiers.binC(x, bins)
>>> y
array([[5, 3, 4],
[0, 1, 3],
[5, 0, 0],
[1, 4, 5],
[4, 1, 2],
[4, 5, 2],
[4, 3, 4],
[2, 4, 5],
[2, 4, 1],
[1, 0, 5]])
"""
if np.ndim(y) == 1:
k = 1
n = np.shape(y)[0] # depends on [control=['if'], data=[]]
else:
(n, k) = np.shape(y)
b = np.zeros((n, k), dtype='int')
for (i, bin) in enumerate(bins):
b[np.nonzero(y == bin)] = i # depends on [control=['for'], data=[]]
# check for non-binned items and warn if needed
vals = set(y.flatten())
for val in vals:
if val not in bins:
Warn('value not in bin: {}'.format(val), UserWarning)
Warn('bins: {}'.format(bins), UserWarning) # depends on [control=['if'], data=['val', 'bins']] # depends on [control=['for'], data=['val']]
return b |
def dosundec(sundata):
"""
returns the declination for a given set of suncompass data
Parameters
__________
sundata : dictionary with these keys:
date: time string with the format 'yyyy:mm:dd:hr:min'
delta_u: time to SUBTRACT from local time for Universal time
lat: latitude of location (negative for south)
lon: longitude of location (negative for west)
shadow_angle: shadow angle of the desired direction with respect to the sun.
Returns
________
sunaz : the declination of the desired direction wrt true north.
"""
iday = 0
timedate = sundata["date"]
timedate = timedate.split(":")
year = int(timedate[0])
mon = int(timedate[1])
day = int(timedate[2])
hours = float(timedate[3])
min = float(timedate[4])
du = int(sundata["delta_u"])
hrs = hours - du
if hrs > 24:
day += 1
hrs = hrs - 24
if hrs < 0:
day = day - 1
hrs = hrs + 24
julian_day = julian(mon, day, year)
utd = old_div((hrs + old_div(min, 60.)), 24.)
greenwich_hour_angle, delta = gha(julian_day, utd)
H = greenwich_hour_angle + float(sundata["lon"])
if H > 360:
H = H - 360
lat = float(sundata["lat"])
if H > 90 and H < 270:
lat = -lat
# now do spherical trig to get azimuth to sun
lat = np.radians(lat)
delta = np.radians(delta)
H = np.radians(H)
ctheta = np.sin(lat) * np.sin(delta) + np.cos(lat) * \
np.cos(delta) * np.cos(H)
theta = np.arccos(ctheta)
beta = np.cos(delta) * np.sin(H) / np.sin(theta)
#
# check which beta
#
beta = np.degrees(np.arcsin(beta))
if delta < lat:
beta = 180 - beta
sunaz = 180 - beta
sunaz = (sunaz + float(sundata["shadow_angle"])) % 360. # mod 360
return sunaz | def function[dosundec, parameter[sundata]]:
constant[
returns the declination for a given set of suncompass data
Parameters
__________
sundata : dictionary with these keys:
date: time string with the format 'yyyy:mm:dd:hr:min'
delta_u: time to SUBTRACT from local time for Universal time
lat: latitude of location (negative for south)
lon: longitude of location (negative for west)
shadow_angle: shadow angle of the desired direction with respect to the sun.
Returns
________
sunaz : the declination of the desired direction wrt true north.
]
variable[iday] assign[=] constant[0]
variable[timedate] assign[=] call[name[sundata]][constant[date]]
variable[timedate] assign[=] call[name[timedate].split, parameter[constant[:]]]
variable[year] assign[=] call[name[int], parameter[call[name[timedate]][constant[0]]]]
variable[mon] assign[=] call[name[int], parameter[call[name[timedate]][constant[1]]]]
variable[day] assign[=] call[name[int], parameter[call[name[timedate]][constant[2]]]]
variable[hours] assign[=] call[name[float], parameter[call[name[timedate]][constant[3]]]]
variable[min] assign[=] call[name[float], parameter[call[name[timedate]][constant[4]]]]
variable[du] assign[=] call[name[int], parameter[call[name[sundata]][constant[delta_u]]]]
variable[hrs] assign[=] binary_operation[name[hours] - name[du]]
if compare[name[hrs] greater[>] constant[24]] begin[:]
<ast.AugAssign object at 0x7da2044c2da0>
variable[hrs] assign[=] binary_operation[name[hrs] - constant[24]]
if compare[name[hrs] less[<] constant[0]] begin[:]
variable[day] assign[=] binary_operation[name[day] - constant[1]]
variable[hrs] assign[=] binary_operation[name[hrs] + constant[24]]
variable[julian_day] assign[=] call[name[julian], parameter[name[mon], name[day], name[year]]]
variable[utd] assign[=] call[name[old_div], parameter[binary_operation[name[hrs] + call[name[old_div], parameter[name[min], constant[60.0]]]], constant[24.0]]]
<ast.Tuple object at 0x7da2044c1e40> assign[=] call[name[gha], parameter[name[julian_day], name[utd]]]
variable[H] assign[=] binary_operation[name[greenwich_hour_angle] + call[name[float], parameter[call[name[sundata]][constant[lon]]]]]
if compare[name[H] greater[>] constant[360]] begin[:]
variable[H] assign[=] binary_operation[name[H] - constant[360]]
variable[lat] assign[=] call[name[float], parameter[call[name[sundata]][constant[lat]]]]
if <ast.BoolOp object at 0x7da2044c3100> begin[:]
variable[lat] assign[=] <ast.UnaryOp object at 0x7da2044c3130>
variable[lat] assign[=] call[name[np].radians, parameter[name[lat]]]
variable[delta] assign[=] call[name[np].radians, parameter[name[delta]]]
variable[H] assign[=] call[name[np].radians, parameter[name[H]]]
variable[ctheta] assign[=] binary_operation[binary_operation[call[name[np].sin, parameter[name[lat]]] * call[name[np].sin, parameter[name[delta]]]] + binary_operation[binary_operation[call[name[np].cos, parameter[name[lat]]] * call[name[np].cos, parameter[name[delta]]]] * call[name[np].cos, parameter[name[H]]]]]
variable[theta] assign[=] call[name[np].arccos, parameter[name[ctheta]]]
variable[beta] assign[=] binary_operation[binary_operation[call[name[np].cos, parameter[name[delta]]] * call[name[np].sin, parameter[name[H]]]] / call[name[np].sin, parameter[name[theta]]]]
variable[beta] assign[=] call[name[np].degrees, parameter[call[name[np].arcsin, parameter[name[beta]]]]]
if compare[name[delta] less[<] name[lat]] begin[:]
variable[beta] assign[=] binary_operation[constant[180] - name[beta]]
variable[sunaz] assign[=] binary_operation[constant[180] - name[beta]]
variable[sunaz] assign[=] binary_operation[binary_operation[name[sunaz] + call[name[float], parameter[call[name[sundata]][constant[shadow_angle]]]]] <ast.Mod object at 0x7da2590d6920> constant[360.0]]
return[name[sunaz]] | keyword[def] identifier[dosundec] ( identifier[sundata] ):
literal[string]
identifier[iday] = literal[int]
identifier[timedate] = identifier[sundata] [ literal[string] ]
identifier[timedate] = identifier[timedate] . identifier[split] ( literal[string] )
identifier[year] = identifier[int] ( identifier[timedate] [ literal[int] ])
identifier[mon] = identifier[int] ( identifier[timedate] [ literal[int] ])
identifier[day] = identifier[int] ( identifier[timedate] [ literal[int] ])
identifier[hours] = identifier[float] ( identifier[timedate] [ literal[int] ])
identifier[min] = identifier[float] ( identifier[timedate] [ literal[int] ])
identifier[du] = identifier[int] ( identifier[sundata] [ literal[string] ])
identifier[hrs] = identifier[hours] - identifier[du]
keyword[if] identifier[hrs] > literal[int] :
identifier[day] += literal[int]
identifier[hrs] = identifier[hrs] - literal[int]
keyword[if] identifier[hrs] < literal[int] :
identifier[day] = identifier[day] - literal[int]
identifier[hrs] = identifier[hrs] + literal[int]
identifier[julian_day] = identifier[julian] ( identifier[mon] , identifier[day] , identifier[year] )
identifier[utd] = identifier[old_div] (( identifier[hrs] + identifier[old_div] ( identifier[min] , literal[int] )), literal[int] )
identifier[greenwich_hour_angle] , identifier[delta] = identifier[gha] ( identifier[julian_day] , identifier[utd] )
identifier[H] = identifier[greenwich_hour_angle] + identifier[float] ( identifier[sundata] [ literal[string] ])
keyword[if] identifier[H] > literal[int] :
identifier[H] = identifier[H] - literal[int]
identifier[lat] = identifier[float] ( identifier[sundata] [ literal[string] ])
keyword[if] identifier[H] > literal[int] keyword[and] identifier[H] < literal[int] :
identifier[lat] =- identifier[lat]
identifier[lat] = identifier[np] . identifier[radians] ( identifier[lat] )
identifier[delta] = identifier[np] . identifier[radians] ( identifier[delta] )
identifier[H] = identifier[np] . identifier[radians] ( identifier[H] )
identifier[ctheta] = identifier[np] . identifier[sin] ( identifier[lat] )* identifier[np] . identifier[sin] ( identifier[delta] )+ identifier[np] . identifier[cos] ( identifier[lat] )* identifier[np] . identifier[cos] ( identifier[delta] )* identifier[np] . identifier[cos] ( identifier[H] )
identifier[theta] = identifier[np] . identifier[arccos] ( identifier[ctheta] )
identifier[beta] = identifier[np] . identifier[cos] ( identifier[delta] )* identifier[np] . identifier[sin] ( identifier[H] )/ identifier[np] . identifier[sin] ( identifier[theta] )
identifier[beta] = identifier[np] . identifier[degrees] ( identifier[np] . identifier[arcsin] ( identifier[beta] ))
keyword[if] identifier[delta] < identifier[lat] :
identifier[beta] = literal[int] - identifier[beta]
identifier[sunaz] = literal[int] - identifier[beta]
identifier[sunaz] =( identifier[sunaz] + identifier[float] ( identifier[sundata] [ literal[string] ]))% literal[int]
keyword[return] identifier[sunaz] | def dosundec(sundata):
"""
returns the declination for a given set of suncompass data
Parameters
__________
sundata : dictionary with these keys:
date: time string with the format 'yyyy:mm:dd:hr:min'
delta_u: time to SUBTRACT from local time for Universal time
lat: latitude of location (negative for south)
lon: longitude of location (negative for west)
shadow_angle: shadow angle of the desired direction with respect to the sun.
Returns
________
sunaz : the declination of the desired direction wrt true north.
"""
iday = 0
timedate = sundata['date']
timedate = timedate.split(':')
year = int(timedate[0])
mon = int(timedate[1])
day = int(timedate[2])
hours = float(timedate[3])
min = float(timedate[4])
du = int(sundata['delta_u'])
hrs = hours - du
if hrs > 24:
day += 1
hrs = hrs - 24 # depends on [control=['if'], data=['hrs']]
if hrs < 0:
day = day - 1
hrs = hrs + 24 # depends on [control=['if'], data=['hrs']]
julian_day = julian(mon, day, year)
utd = old_div(hrs + old_div(min, 60.0), 24.0)
(greenwich_hour_angle, delta) = gha(julian_day, utd)
H = greenwich_hour_angle + float(sundata['lon'])
if H > 360:
H = H - 360 # depends on [control=['if'], data=['H']]
lat = float(sundata['lat'])
if H > 90 and H < 270:
lat = -lat # depends on [control=['if'], data=[]]
# now do spherical trig to get azimuth to sun
lat = np.radians(lat)
delta = np.radians(delta)
H = np.radians(H)
ctheta = np.sin(lat) * np.sin(delta) + np.cos(lat) * np.cos(delta) * np.cos(H)
theta = np.arccos(ctheta)
beta = np.cos(delta) * np.sin(H) / np.sin(theta)
#
# check which beta
#
beta = np.degrees(np.arcsin(beta))
if delta < lat:
beta = 180 - beta # depends on [control=['if'], data=[]]
sunaz = 180 - beta
sunaz = (sunaz + float(sundata['shadow_angle'])) % 360.0 # mod 360
return sunaz |
def dry_run(command, dry_run):
"""Executes a shell command unless the dry run option is set"""
if not dry_run:
cmd_parts = command.split(' ')
# http://plumbum.readthedocs.org/en/latest/local_commands.html#run-and-popen
return local[cmd_parts[0]](cmd_parts[1:])
else:
log.info('Dry run of %s, skipping' % command)
return True | def function[dry_run, parameter[command, dry_run]]:
constant[Executes a shell command unless the dry run option is set]
if <ast.UnaryOp object at 0x7da1b0692ce0> begin[:]
variable[cmd_parts] assign[=] call[name[command].split, parameter[constant[ ]]]
return[call[call[name[local]][call[name[cmd_parts]][constant[0]]], parameter[call[name[cmd_parts]][<ast.Slice object at 0x7da1b06937f0>]]]]
return[constant[True]] | keyword[def] identifier[dry_run] ( identifier[command] , identifier[dry_run] ):
literal[string]
keyword[if] keyword[not] identifier[dry_run] :
identifier[cmd_parts] = identifier[command] . identifier[split] ( literal[string] )
keyword[return] identifier[local] [ identifier[cmd_parts] [ literal[int] ]]( identifier[cmd_parts] [ literal[int] :])
keyword[else] :
identifier[log] . identifier[info] ( literal[string] % identifier[command] )
keyword[return] keyword[True] | def dry_run(command, dry_run):
"""Executes a shell command unless the dry run option is set"""
if not dry_run:
cmd_parts = command.split(' ')
# http://plumbum.readthedocs.org/en/latest/local_commands.html#run-and-popen
return local[cmd_parts[0]](cmd_parts[1:]) # depends on [control=['if'], data=[]]
else:
log.info('Dry run of %s, skipping' % command)
return True |
def _wait_fd(conn, read=True):
'''Wait for an event on file descriptor ``fd``.
:param conn: file descriptor
:param read: wait for a read event if ``True``, otherwise a wait
for write event.
This function must be invoked from a coroutine with parent, therefore
invoking it from the main greenlet will raise an exception.
'''
current = getcurrent()
parent = current.parent
assert parent, '"_wait_fd" must be called by greenlet with a parent'
try:
fileno = conn.fileno()
except AttributeError:
fileno = conn
future = Future()
# When the event on fd occurs switch back to the current greenlet
if read:
future._loop.add_reader(fileno, _done_wait_fd, fileno, future, read)
else:
future._loop.add_writer(fileno, _done_wait_fd, fileno, future, read)
# switch back to parent greenlet
parent.switch(future)
# Back on the child greenlet. Raise error if there is one
future.result() | def function[_wait_fd, parameter[conn, read]]:
constant[Wait for an event on file descriptor ``fd``.
:param conn: file descriptor
:param read: wait for a read event if ``True``, otherwise a wait
for write event.
This function must be invoked from a coroutine with parent, therefore
invoking it from the main greenlet will raise an exception.
]
variable[current] assign[=] call[name[getcurrent], parameter[]]
variable[parent] assign[=] name[current].parent
assert[name[parent]]
<ast.Try object at 0x7da2044c31f0>
variable[future] assign[=] call[name[Future], parameter[]]
if name[read] begin[:]
call[name[future]._loop.add_reader, parameter[name[fileno], name[_done_wait_fd], name[fileno], name[future], name[read]]]
call[name[parent].switch, parameter[name[future]]]
call[name[future].result, parameter[]] | keyword[def] identifier[_wait_fd] ( identifier[conn] , identifier[read] = keyword[True] ):
literal[string]
identifier[current] = identifier[getcurrent] ()
identifier[parent] = identifier[current] . identifier[parent]
keyword[assert] identifier[parent] , literal[string]
keyword[try] :
identifier[fileno] = identifier[conn] . identifier[fileno] ()
keyword[except] identifier[AttributeError] :
identifier[fileno] = identifier[conn]
identifier[future] = identifier[Future] ()
keyword[if] identifier[read] :
identifier[future] . identifier[_loop] . identifier[add_reader] ( identifier[fileno] , identifier[_done_wait_fd] , identifier[fileno] , identifier[future] , identifier[read] )
keyword[else] :
identifier[future] . identifier[_loop] . identifier[add_writer] ( identifier[fileno] , identifier[_done_wait_fd] , identifier[fileno] , identifier[future] , identifier[read] )
identifier[parent] . identifier[switch] ( identifier[future] )
identifier[future] . identifier[result] () | def _wait_fd(conn, read=True):
"""Wait for an event on file descriptor ``fd``.
:param conn: file descriptor
:param read: wait for a read event if ``True``, otherwise a wait
for write event.
This function must be invoked from a coroutine with parent, therefore
invoking it from the main greenlet will raise an exception.
"""
current = getcurrent()
parent = current.parent
assert parent, '"_wait_fd" must be called by greenlet with a parent'
try:
fileno = conn.fileno() # depends on [control=['try'], data=[]]
except AttributeError:
fileno = conn # depends on [control=['except'], data=[]]
future = Future()
# When the event on fd occurs switch back to the current greenlet
if read:
future._loop.add_reader(fileno, _done_wait_fd, fileno, future, read) # depends on [control=['if'], data=[]]
else:
future._loop.add_writer(fileno, _done_wait_fd, fileno, future, read)
# switch back to parent greenlet
parent.switch(future)
# Back on the child greenlet. Raise error if there is one
future.result() |
def tf_step(self, time, variables, source_variables, **kwargs):
"""
Creates the TensorFlow operations for performing an optimization step.
Args:
time: Time tensor.
variables: List of variables to optimize.
source_variables: List of source variables to synchronize with.
**kwargs: Additional arguments, not used.
Returns:
List of delta tensors corresponding to the updates for each optimized variable.
"""
assert all(util.shape(source) == util.shape(target) for source, target in zip(source_variables, variables))
last_sync = tf.get_variable(
name='last-sync',
shape=(),
dtype=tf.int64,
initializer=tf.constant_initializer(value=(-self.sync_frequency), dtype=tf.int64),
trainable=False
)
def sync():
deltas = list()
for source_variable, target_variable in zip(source_variables, variables):
delta = self.update_weight * (source_variable - target_variable)
deltas.append(delta)
applied = self.apply_step(variables=variables, deltas=deltas)
last_sync_updated = last_sync.assign(value=time)
with tf.control_dependencies(control_inputs=(applied, last_sync_updated)):
# Trivial operation to enforce control dependency
return [delta + 0.0 for delta in deltas]
def no_sync():
deltas = list()
for variable in variables:
delta = tf.zeros(shape=util.shape(variable))
deltas.append(delta)
return deltas
do_sync = (time - last_sync >= self.sync_frequency)
return tf.cond(pred=do_sync, true_fn=sync, false_fn=no_sync) | def function[tf_step, parameter[self, time, variables, source_variables]]:
constant[
Creates the TensorFlow operations for performing an optimization step.
Args:
time: Time tensor.
variables: List of variables to optimize.
source_variables: List of source variables to synchronize with.
**kwargs: Additional arguments, not used.
Returns:
List of delta tensors corresponding to the updates for each optimized variable.
]
assert[call[name[all], parameter[<ast.GeneratorExp object at 0x7da207f03fa0>]]]
variable[last_sync] assign[=] call[name[tf].get_variable, parameter[]]
def function[sync, parameter[]]:
variable[deltas] assign[=] call[name[list], parameter[]]
for taget[tuple[[<ast.Name object at 0x7da207f02110>, <ast.Name object at 0x7da207f00760>]]] in starred[call[name[zip], parameter[name[source_variables], name[variables]]]] begin[:]
variable[delta] assign[=] binary_operation[name[self].update_weight * binary_operation[name[source_variable] - name[target_variable]]]
call[name[deltas].append, parameter[name[delta]]]
variable[applied] assign[=] call[name[self].apply_step, parameter[]]
variable[last_sync_updated] assign[=] call[name[last_sync].assign, parameter[]]
with call[name[tf].control_dependencies, parameter[]] begin[:]
return[<ast.ListComp object at 0x7da207f033a0>]
def function[no_sync, parameter[]]:
variable[deltas] assign[=] call[name[list], parameter[]]
for taget[name[variable]] in starred[name[variables]] begin[:]
variable[delta] assign[=] call[name[tf].zeros, parameter[]]
call[name[deltas].append, parameter[name[delta]]]
return[name[deltas]]
variable[do_sync] assign[=] compare[binary_operation[name[time] - name[last_sync]] greater_or_equal[>=] name[self].sync_frequency]
return[call[name[tf].cond, parameter[]]] | keyword[def] identifier[tf_step] ( identifier[self] , identifier[time] , identifier[variables] , identifier[source_variables] ,** identifier[kwargs] ):
literal[string]
keyword[assert] identifier[all] ( identifier[util] . identifier[shape] ( identifier[source] )== identifier[util] . identifier[shape] ( identifier[target] ) keyword[for] identifier[source] , identifier[target] keyword[in] identifier[zip] ( identifier[source_variables] , identifier[variables] ))
identifier[last_sync] = identifier[tf] . identifier[get_variable] (
identifier[name] = literal[string] ,
identifier[shape] =(),
identifier[dtype] = identifier[tf] . identifier[int64] ,
identifier[initializer] = identifier[tf] . identifier[constant_initializer] ( identifier[value] =(- identifier[self] . identifier[sync_frequency] ), identifier[dtype] = identifier[tf] . identifier[int64] ),
identifier[trainable] = keyword[False]
)
keyword[def] identifier[sync] ():
identifier[deltas] = identifier[list] ()
keyword[for] identifier[source_variable] , identifier[target_variable] keyword[in] identifier[zip] ( identifier[source_variables] , identifier[variables] ):
identifier[delta] = identifier[self] . identifier[update_weight] *( identifier[source_variable] - identifier[target_variable] )
identifier[deltas] . identifier[append] ( identifier[delta] )
identifier[applied] = identifier[self] . identifier[apply_step] ( identifier[variables] = identifier[variables] , identifier[deltas] = identifier[deltas] )
identifier[last_sync_updated] = identifier[last_sync] . identifier[assign] ( identifier[value] = identifier[time] )
keyword[with] identifier[tf] . identifier[control_dependencies] ( identifier[control_inputs] =( identifier[applied] , identifier[last_sync_updated] )):
keyword[return] [ identifier[delta] + literal[int] keyword[for] identifier[delta] keyword[in] identifier[deltas] ]
keyword[def] identifier[no_sync] ():
identifier[deltas] = identifier[list] ()
keyword[for] identifier[variable] keyword[in] identifier[variables] :
identifier[delta] = identifier[tf] . identifier[zeros] ( identifier[shape] = identifier[util] . identifier[shape] ( identifier[variable] ))
identifier[deltas] . identifier[append] ( identifier[delta] )
keyword[return] identifier[deltas]
identifier[do_sync] =( identifier[time] - identifier[last_sync] >= identifier[self] . identifier[sync_frequency] )
keyword[return] identifier[tf] . identifier[cond] ( identifier[pred] = identifier[do_sync] , identifier[true_fn] = identifier[sync] , identifier[false_fn] = identifier[no_sync] ) | def tf_step(self, time, variables, source_variables, **kwargs):
"""
Creates the TensorFlow operations for performing an optimization step.
Args:
time: Time tensor.
variables: List of variables to optimize.
source_variables: List of source variables to synchronize with.
**kwargs: Additional arguments, not used.
Returns:
List of delta tensors corresponding to the updates for each optimized variable.
"""
assert all((util.shape(source) == util.shape(target) for (source, target) in zip(source_variables, variables)))
last_sync = tf.get_variable(name='last-sync', shape=(), dtype=tf.int64, initializer=tf.constant_initializer(value=-self.sync_frequency, dtype=tf.int64), trainable=False)
def sync():
deltas = list()
for (source_variable, target_variable) in zip(source_variables, variables):
delta = self.update_weight * (source_variable - target_variable)
deltas.append(delta) # depends on [control=['for'], data=[]]
applied = self.apply_step(variables=variables, deltas=deltas)
last_sync_updated = last_sync.assign(value=time)
with tf.control_dependencies(control_inputs=(applied, last_sync_updated)):
# Trivial operation to enforce control dependency
return [delta + 0.0 for delta in deltas] # depends on [control=['with'], data=[]]
def no_sync():
deltas = list()
for variable in variables:
delta = tf.zeros(shape=util.shape(variable))
deltas.append(delta) # depends on [control=['for'], data=['variable']]
return deltas
do_sync = time - last_sync >= self.sync_frequency
return tf.cond(pred=do_sync, true_fn=sync, false_fn=no_sync) |
def download_as_json(url):
"""
Download the data at the URL and load it as JSON
"""
try:
return Response('application/json', request(url=url)).read()
except HTTPError as err:
raise ResponseException('application/json', err) | def function[download_as_json, parameter[url]]:
constant[
Download the data at the URL and load it as JSON
]
<ast.Try object at 0x7da20c9935b0> | keyword[def] identifier[download_as_json] ( identifier[url] ):
literal[string]
keyword[try] :
keyword[return] identifier[Response] ( literal[string] , identifier[request] ( identifier[url] = identifier[url] )). identifier[read] ()
keyword[except] identifier[HTTPError] keyword[as] identifier[err] :
keyword[raise] identifier[ResponseException] ( literal[string] , identifier[err] ) | def download_as_json(url):
"""
Download the data at the URL and load it as JSON
"""
try:
return Response('application/json', request(url=url)).read() # depends on [control=['try'], data=[]]
except HTTPError as err:
raise ResponseException('application/json', err) # depends on [control=['except'], data=['err']] |
def triangular_coord(coord):
"""
Convert a 2D coordinate into a triangle-based coordinate system for a
prettier phase diagram.
Args:
coordinate: coordinate used in the convex hull computation.
Returns:
coordinates in a triangular-based coordinate system.
"""
unitvec = np.array([[1, 0], [0.5, math.sqrt(3) / 2]])
result = np.dot(np.array(coord), unitvec)
return result.transpose() | def function[triangular_coord, parameter[coord]]:
constant[
Convert a 2D coordinate into a triangle-based coordinate system for a
prettier phase diagram.
Args:
coordinate: coordinate used in the convex hull computation.
Returns:
coordinates in a triangular-based coordinate system.
]
variable[unitvec] assign[=] call[name[np].array, parameter[list[[<ast.List object at 0x7da207f9a950>, <ast.List object at 0x7da207f9a8f0>]]]]
variable[result] assign[=] call[name[np].dot, parameter[call[name[np].array, parameter[name[coord]]], name[unitvec]]]
return[call[name[result].transpose, parameter[]]] | keyword[def] identifier[triangular_coord] ( identifier[coord] ):
literal[string]
identifier[unitvec] = identifier[np] . identifier[array] ([[ literal[int] , literal[int] ],[ literal[int] , identifier[math] . identifier[sqrt] ( literal[int] )/ literal[int] ]])
identifier[result] = identifier[np] . identifier[dot] ( identifier[np] . identifier[array] ( identifier[coord] ), identifier[unitvec] )
keyword[return] identifier[result] . identifier[transpose] () | def triangular_coord(coord):
"""
Convert a 2D coordinate into a triangle-based coordinate system for a
prettier phase diagram.
Args:
coordinate: coordinate used in the convex hull computation.
Returns:
coordinates in a triangular-based coordinate system.
"""
unitvec = np.array([[1, 0], [0.5, math.sqrt(3) / 2]])
result = np.dot(np.array(coord), unitvec)
return result.transpose() |
def verify_checksum(message, previous_csum=0):
"""Verify checksum for incoming message.
:param message: incoming message
:param previous_csum: accumulated checksum value
:return return True if message checksum type is None
or checksum is correct
"""
if message.message_type in CHECKSUM_MSG_TYPES:
csum = compute_checksum(
message.checksum[0],
message.args,
previous_csum,
)
if csum == message.checksum[1]:
return True
else:
return False
else:
return True | def function[verify_checksum, parameter[message, previous_csum]]:
constant[Verify checksum for incoming message.
:param message: incoming message
:param previous_csum: accumulated checksum value
:return return True if message checksum type is None
or checksum is correct
]
if compare[name[message].message_type in name[CHECKSUM_MSG_TYPES]] begin[:]
variable[csum] assign[=] call[name[compute_checksum], parameter[call[name[message].checksum][constant[0]], name[message].args, name[previous_csum]]]
if compare[name[csum] equal[==] call[name[message].checksum][constant[1]]] begin[:]
return[constant[True]] | keyword[def] identifier[verify_checksum] ( identifier[message] , identifier[previous_csum] = literal[int] ):
literal[string]
keyword[if] identifier[message] . identifier[message_type] keyword[in] identifier[CHECKSUM_MSG_TYPES] :
identifier[csum] = identifier[compute_checksum] (
identifier[message] . identifier[checksum] [ literal[int] ],
identifier[message] . identifier[args] ,
identifier[previous_csum] ,
)
keyword[if] identifier[csum] == identifier[message] . identifier[checksum] [ literal[int] ]:
keyword[return] keyword[True]
keyword[else] :
keyword[return] keyword[False]
keyword[else] :
keyword[return] keyword[True] | def verify_checksum(message, previous_csum=0):
"""Verify checksum for incoming message.
:param message: incoming message
:param previous_csum: accumulated checksum value
:return return True if message checksum type is None
or checksum is correct
"""
if message.message_type in CHECKSUM_MSG_TYPES:
csum = compute_checksum(message.checksum[0], message.args, previous_csum)
if csum == message.checksum[1]:
return True # depends on [control=['if'], data=[]]
else:
return False # depends on [control=['if'], data=[]]
else:
return True |
def requests(self):
"""
Retrieve any pending contact requests.
Returns:
:class:`SkypeRequest` list: collection of requests
"""
requests = []
for json in self.skype.conn("GET", "{0}/users/{1}/invites"
.format(SkypeConnection.API_CONTACTS, self.skype.userId),
auth=SkypeConnection.Auth.SkypeToken).json().get("invite_list", []):
for invite in json.get("invites", []):
# Copy user identifier to each invite message.
invite["userId"] = SkypeUtils.noPrefix(json.get("mri"))
requests.append(SkypeRequest.fromRaw(self.skype, invite))
return requests | def function[requests, parameter[self]]:
constant[
Retrieve any pending contact requests.
Returns:
:class:`SkypeRequest` list: collection of requests
]
variable[requests] assign[=] list[[]]
for taget[name[json]] in starred[call[call[call[name[self].skype.conn, parameter[constant[GET], call[constant[{0}/users/{1}/invites].format, parameter[name[SkypeConnection].API_CONTACTS, name[self].skype.userId]]]].json, parameter[]].get, parameter[constant[invite_list], list[[]]]]] begin[:]
for taget[name[invite]] in starred[call[name[json].get, parameter[constant[invites], list[[]]]]] begin[:]
call[name[invite]][constant[userId]] assign[=] call[name[SkypeUtils].noPrefix, parameter[call[name[json].get, parameter[constant[mri]]]]]
call[name[requests].append, parameter[call[name[SkypeRequest].fromRaw, parameter[name[self].skype, name[invite]]]]]
return[name[requests]] | keyword[def] identifier[requests] ( identifier[self] ):
literal[string]
identifier[requests] =[]
keyword[for] identifier[json] keyword[in] identifier[self] . identifier[skype] . identifier[conn] ( literal[string] , literal[string]
. identifier[format] ( identifier[SkypeConnection] . identifier[API_CONTACTS] , identifier[self] . identifier[skype] . identifier[userId] ),
identifier[auth] = identifier[SkypeConnection] . identifier[Auth] . identifier[SkypeToken] ). identifier[json] (). identifier[get] ( literal[string] ,[]):
keyword[for] identifier[invite] keyword[in] identifier[json] . identifier[get] ( literal[string] ,[]):
identifier[invite] [ literal[string] ]= identifier[SkypeUtils] . identifier[noPrefix] ( identifier[json] . identifier[get] ( literal[string] ))
identifier[requests] . identifier[append] ( identifier[SkypeRequest] . identifier[fromRaw] ( identifier[self] . identifier[skype] , identifier[invite] ))
keyword[return] identifier[requests] | def requests(self):
"""
Retrieve any pending contact requests.
Returns:
:class:`SkypeRequest` list: collection of requests
"""
requests = []
for json in self.skype.conn('GET', '{0}/users/{1}/invites'.format(SkypeConnection.API_CONTACTS, self.skype.userId), auth=SkypeConnection.Auth.SkypeToken).json().get('invite_list', []):
for invite in json.get('invites', []):
# Copy user identifier to each invite message.
invite['userId'] = SkypeUtils.noPrefix(json.get('mri'))
requests.append(SkypeRequest.fromRaw(self.skype, invite)) # depends on [control=['for'], data=['invite']] # depends on [control=['for'], data=['json']]
return requests |
def set_meta(self, meta, name=None, index=None):
"""Add metadata columns as pd.Series, list or value (int/float/str)
Parameters
----------
meta: pd.Series, list, int, float or str
column to be added to metadata
(by `['model', 'scenario']` index if possible)
name: str, optional
meta column name (defaults to meta pd.Series.name);
either a meta.name or the name kwarg must be defined
index: pyam.IamDataFrame, pd.DataFrame or pd.MultiIndex, optional
index to be used for setting meta column (`['model', 'scenario']`)
"""
# check that name is valid and doesn't conflict with data columns
if (name or (hasattr(meta, 'name') and meta.name)) in [None, False]:
raise ValueError('Must pass a name or use a named pd.Series')
name = name or meta.name
if name in self.data.columns:
raise ValueError('`{}` already exists in `data`!'.format(name))
# check if meta has a valid index and use it for further workflow
if hasattr(meta, 'index') and hasattr(meta.index, 'names') \
and set(META_IDX).issubset(meta.index.names):
index = meta.index
# if no valid index is provided, add meta as new column `name` and exit
if index is None:
self.meta[name] = list(meta) if islistable(meta) else meta
return # EXIT FUNCTION
# use meta.index if index arg is an IamDataFrame
if isinstance(index, IamDataFrame):
index = index.meta.index
# turn dataframe to index if index arg is a DataFrame
if isinstance(index, pd.DataFrame):
index = index.set_index(META_IDX).index
if not isinstance(index, pd.MultiIndex):
raise ValueError('index cannot be coerced to pd.MultiIndex')
# raise error if index is not unique
if index.duplicated().any():
raise ValueError("non-unique ['model', 'scenario'] index!")
# create pd.Series from meta, index and name if provided
meta = pd.Series(data=meta, index=index, name=name)
# reduce index dimensions to model-scenario only
meta = (
meta
.reset_index()
.reindex(columns=META_IDX + [name])
.set_index(META_IDX)
)
# check if trying to add model-scenario index not existing in self
diff = meta.index.difference(self.meta.index)
if not diff.empty:
error = "adding metadata for non-existing scenarios '{}'!"
raise ValueError(error.format(diff))
self._new_meta_column(name)
self.meta[name] = meta[name].combine_first(self.meta[name]) | def function[set_meta, parameter[self, meta, name, index]]:
constant[Add metadata columns as pd.Series, list or value (int/float/str)
Parameters
----------
meta: pd.Series, list, int, float or str
column to be added to metadata
(by `['model', 'scenario']` index if possible)
name: str, optional
meta column name (defaults to meta pd.Series.name);
either a meta.name or the name kwarg must be defined
index: pyam.IamDataFrame, pd.DataFrame or pd.MultiIndex, optional
index to be used for setting meta column (`['model', 'scenario']`)
]
if compare[<ast.BoolOp object at 0x7da18dc06dd0> in list[[<ast.Constant object at 0x7da18dc05780>, <ast.Constant object at 0x7da18dc06740>]]] begin[:]
<ast.Raise object at 0x7da18dc072b0>
variable[name] assign[=] <ast.BoolOp object at 0x7da18dc04df0>
if compare[name[name] in name[self].data.columns] begin[:]
<ast.Raise object at 0x7da18dc05960>
if <ast.BoolOp object at 0x7da18dc071f0> begin[:]
variable[index] assign[=] name[meta].index
if compare[name[index] is constant[None]] begin[:]
call[name[self].meta][name[name]] assign[=] <ast.IfExp object at 0x7da18dc07ac0>
return[None]
if call[name[isinstance], parameter[name[index], name[IamDataFrame]]] begin[:]
variable[index] assign[=] name[index].meta.index
if call[name[isinstance], parameter[name[index], name[pd].DataFrame]] begin[:]
variable[index] assign[=] call[name[index].set_index, parameter[name[META_IDX]]].index
if <ast.UnaryOp object at 0x7da18dc04310> begin[:]
<ast.Raise object at 0x7da18dc045b0>
if call[call[name[index].duplicated, parameter[]].any, parameter[]] begin[:]
<ast.Raise object at 0x7da18dc064d0>
variable[meta] assign[=] call[name[pd].Series, parameter[]]
variable[meta] assign[=] call[call[call[name[meta].reset_index, parameter[]].reindex, parameter[]].set_index, parameter[name[META_IDX]]]
variable[diff] assign[=] call[name[meta].index.difference, parameter[name[self].meta.index]]
if <ast.UnaryOp object at 0x7da1b0f056c0> begin[:]
variable[error] assign[=] constant[adding metadata for non-existing scenarios '{}'!]
<ast.Raise object at 0x7da1b0f04fd0>
call[name[self]._new_meta_column, parameter[name[name]]]
call[name[self].meta][name[name]] assign[=] call[call[name[meta]][name[name]].combine_first, parameter[call[name[self].meta][name[name]]]] | keyword[def] identifier[set_meta] ( identifier[self] , identifier[meta] , identifier[name] = keyword[None] , identifier[index] = keyword[None] ):
literal[string]
keyword[if] ( identifier[name] keyword[or] ( identifier[hasattr] ( identifier[meta] , literal[string] ) keyword[and] identifier[meta] . identifier[name] )) keyword[in] [ keyword[None] , keyword[False] ]:
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[name] = identifier[name] keyword[or] identifier[meta] . identifier[name]
keyword[if] identifier[name] keyword[in] identifier[self] . identifier[data] . identifier[columns] :
keyword[raise] identifier[ValueError] ( literal[string] . identifier[format] ( identifier[name] ))
keyword[if] identifier[hasattr] ( identifier[meta] , literal[string] ) keyword[and] identifier[hasattr] ( identifier[meta] . identifier[index] , literal[string] ) keyword[and] identifier[set] ( identifier[META_IDX] ). identifier[issubset] ( identifier[meta] . identifier[index] . identifier[names] ):
identifier[index] = identifier[meta] . identifier[index]
keyword[if] identifier[index] keyword[is] keyword[None] :
identifier[self] . identifier[meta] [ identifier[name] ]= identifier[list] ( identifier[meta] ) keyword[if] identifier[islistable] ( identifier[meta] ) keyword[else] identifier[meta]
keyword[return]
keyword[if] identifier[isinstance] ( identifier[index] , identifier[IamDataFrame] ):
identifier[index] = identifier[index] . identifier[meta] . identifier[index]
keyword[if] identifier[isinstance] ( identifier[index] , identifier[pd] . identifier[DataFrame] ):
identifier[index] = identifier[index] . identifier[set_index] ( identifier[META_IDX] ). identifier[index]
keyword[if] keyword[not] identifier[isinstance] ( identifier[index] , identifier[pd] . identifier[MultiIndex] ):
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[if] identifier[index] . identifier[duplicated] (). identifier[any] ():
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[meta] = identifier[pd] . identifier[Series] ( identifier[data] = identifier[meta] , identifier[index] = identifier[index] , identifier[name] = identifier[name] )
identifier[meta] =(
identifier[meta]
. identifier[reset_index] ()
. identifier[reindex] ( identifier[columns] = identifier[META_IDX] +[ identifier[name] ])
. identifier[set_index] ( identifier[META_IDX] )
)
identifier[diff] = identifier[meta] . identifier[index] . identifier[difference] ( identifier[self] . identifier[meta] . identifier[index] )
keyword[if] keyword[not] identifier[diff] . identifier[empty] :
identifier[error] = literal[string]
keyword[raise] identifier[ValueError] ( identifier[error] . identifier[format] ( identifier[diff] ))
identifier[self] . identifier[_new_meta_column] ( identifier[name] )
identifier[self] . identifier[meta] [ identifier[name] ]= identifier[meta] [ identifier[name] ]. identifier[combine_first] ( identifier[self] . identifier[meta] [ identifier[name] ]) | def set_meta(self, meta, name=None, index=None):
"""Add metadata columns as pd.Series, list or value (int/float/str)
Parameters
----------
meta: pd.Series, list, int, float or str
column to be added to metadata
(by `['model', 'scenario']` index if possible)
name: str, optional
meta column name (defaults to meta pd.Series.name);
either a meta.name or the name kwarg must be defined
index: pyam.IamDataFrame, pd.DataFrame or pd.MultiIndex, optional
index to be used for setting meta column (`['model', 'scenario']`)
"""
# check that name is valid and doesn't conflict with data columns
if (name or (hasattr(meta, 'name') and meta.name)) in [None, False]:
raise ValueError('Must pass a name or use a named pd.Series') # depends on [control=['if'], data=[]]
name = name or meta.name
if name in self.data.columns:
raise ValueError('`{}` already exists in `data`!'.format(name)) # depends on [control=['if'], data=['name']]
# check if meta has a valid index and use it for further workflow
if hasattr(meta, 'index') and hasattr(meta.index, 'names') and set(META_IDX).issubset(meta.index.names):
index = meta.index # depends on [control=['if'], data=[]]
# if no valid index is provided, add meta as new column `name` and exit
if index is None:
self.meta[name] = list(meta) if islistable(meta) else meta
return # EXIT FUNCTION # depends on [control=['if'], data=[]]
# use meta.index if index arg is an IamDataFrame
if isinstance(index, IamDataFrame):
index = index.meta.index # depends on [control=['if'], data=[]]
# turn dataframe to index if index arg is a DataFrame
if isinstance(index, pd.DataFrame):
index = index.set_index(META_IDX).index # depends on [control=['if'], data=[]]
if not isinstance(index, pd.MultiIndex):
raise ValueError('index cannot be coerced to pd.MultiIndex') # depends on [control=['if'], data=[]]
# raise error if index is not unique
if index.duplicated().any():
raise ValueError("non-unique ['model', 'scenario'] index!") # depends on [control=['if'], data=[]]
# create pd.Series from meta, index and name if provided
meta = pd.Series(data=meta, index=index, name=name)
# reduce index dimensions to model-scenario only
meta = meta.reset_index().reindex(columns=META_IDX + [name]).set_index(META_IDX)
# check if trying to add model-scenario index not existing in self
diff = meta.index.difference(self.meta.index)
if not diff.empty:
error = "adding metadata for non-existing scenarios '{}'!"
raise ValueError(error.format(diff)) # depends on [control=['if'], data=[]]
self._new_meta_column(name)
self.meta[name] = meta[name].combine_first(self.meta[name]) |
def set(self, quote_id, customer_data, store_view=None):
"""
Add customer information into a shopping cart
:param quote_id: Shopping cart ID (quote ID)
:param customer_data, dict of customer details, example
{
'firstname': 'testFirstname',
'lastname': 'testLastName',
'email': 'testEmail',
'website_id': '0',
'store_id': '0',
'mode': 'guest'
}
:param store_view: Store view ID or code
:return: boolean, True if information added
"""
return bool(
self.call('cart_customer.set',
[quote_id, customer_data, store_view])
) | def function[set, parameter[self, quote_id, customer_data, store_view]]:
constant[
Add customer information into a shopping cart
:param quote_id: Shopping cart ID (quote ID)
:param customer_data, dict of customer details, example
{
'firstname': 'testFirstname',
'lastname': 'testLastName',
'email': 'testEmail',
'website_id': '0',
'store_id': '0',
'mode': 'guest'
}
:param store_view: Store view ID or code
:return: boolean, True if information added
]
return[call[name[bool], parameter[call[name[self].call, parameter[constant[cart_customer.set], list[[<ast.Name object at 0x7da1b04d3d90>, <ast.Name object at 0x7da1b04d0ca0>, <ast.Name object at 0x7da1b04d2b60>]]]]]]] | keyword[def] identifier[set] ( identifier[self] , identifier[quote_id] , identifier[customer_data] , identifier[store_view] = keyword[None] ):
literal[string]
keyword[return] identifier[bool] (
identifier[self] . identifier[call] ( literal[string] ,
[ identifier[quote_id] , identifier[customer_data] , identifier[store_view] ])
) | def set(self, quote_id, customer_data, store_view=None):
"""
Add customer information into a shopping cart
:param quote_id: Shopping cart ID (quote ID)
:param customer_data, dict of customer details, example
{
'firstname': 'testFirstname',
'lastname': 'testLastName',
'email': 'testEmail',
'website_id': '0',
'store_id': '0',
'mode': 'guest'
}
:param store_view: Store view ID or code
:return: boolean, True if information added
"""
return bool(self.call('cart_customer.set', [quote_id, customer_data, store_view])) |
def LoadCHM(self, archiveName):
'''Loads a CHM archive.
This function will also call GetArchiveInfo to obtain information
such as the index file name and the topics file. It returns 1 on
success, and 0 if it fails.
'''
if self.filename is not None:
self.CloseCHM()
self.file = chmlib.chm_open(archiveName)
if self.file is None:
return 0
self.filename = archiveName
self.GetArchiveInfo()
return 1 | def function[LoadCHM, parameter[self, archiveName]]:
constant[Loads a CHM archive.
This function will also call GetArchiveInfo to obtain information
such as the index file name and the topics file. It returns 1 on
success, and 0 if it fails.
]
if compare[name[self].filename is_not constant[None]] begin[:]
call[name[self].CloseCHM, parameter[]]
name[self].file assign[=] call[name[chmlib].chm_open, parameter[name[archiveName]]]
if compare[name[self].file is constant[None]] begin[:]
return[constant[0]]
name[self].filename assign[=] name[archiveName]
call[name[self].GetArchiveInfo, parameter[]]
return[constant[1]] | keyword[def] identifier[LoadCHM] ( identifier[self] , identifier[archiveName] ):
literal[string]
keyword[if] identifier[self] . identifier[filename] keyword[is] keyword[not] keyword[None] :
identifier[self] . identifier[CloseCHM] ()
identifier[self] . identifier[file] = identifier[chmlib] . identifier[chm_open] ( identifier[archiveName] )
keyword[if] identifier[self] . identifier[file] keyword[is] keyword[None] :
keyword[return] literal[int]
identifier[self] . identifier[filename] = identifier[archiveName]
identifier[self] . identifier[GetArchiveInfo] ()
keyword[return] literal[int] | def LoadCHM(self, archiveName):
"""Loads a CHM archive.
This function will also call GetArchiveInfo to obtain information
such as the index file name and the topics file. It returns 1 on
success, and 0 if it fails.
"""
if self.filename is not None:
self.CloseCHM() # depends on [control=['if'], data=[]]
self.file = chmlib.chm_open(archiveName)
if self.file is None:
return 0 # depends on [control=['if'], data=[]]
self.filename = archiveName
self.GetArchiveInfo()
return 1 |
def is_delimiter(line):
""" True if a line consists only of a single punctuation character."""
return bool(line) and line[0] in punctuation and line[0]*len(line) == line | def function[is_delimiter, parameter[line]]:
constant[ True if a line consists only of a single punctuation character.]
return[<ast.BoolOp object at 0x7da18f723ac0>] | keyword[def] identifier[is_delimiter] ( identifier[line] ):
literal[string]
keyword[return] identifier[bool] ( identifier[line] ) keyword[and] identifier[line] [ literal[int] ] keyword[in] identifier[punctuation] keyword[and] identifier[line] [ literal[int] ]* identifier[len] ( identifier[line] )== identifier[line] | def is_delimiter(line):
""" True if a line consists only of a single punctuation character."""
return bool(line) and line[0] in punctuation and (line[0] * len(line) == line) |
def fmt_margin(text, margin = None, margin_left = None, margin_right = None, margin_char = ' '):
"""
Surround given text with given margin characters.
"""
if margin_left is None:
margin_left = margin
if margin_right is None:
margin_right = margin
if margin_left is not None:
text = '{}{}'.format(str(margin_char)[0] * int(margin_left), text)
if margin_right is not None:
text = '{}{}'.format(text, str(margin_char)[0] * int(margin_right))
return text | def function[fmt_margin, parameter[text, margin, margin_left, margin_right, margin_char]]:
constant[
Surround given text with given margin characters.
]
if compare[name[margin_left] is constant[None]] begin[:]
variable[margin_left] assign[=] name[margin]
if compare[name[margin_right] is constant[None]] begin[:]
variable[margin_right] assign[=] name[margin]
if compare[name[margin_left] is_not constant[None]] begin[:]
variable[text] assign[=] call[constant[{}{}].format, parameter[binary_operation[call[call[name[str], parameter[name[margin_char]]]][constant[0]] * call[name[int], parameter[name[margin_left]]]], name[text]]]
if compare[name[margin_right] is_not constant[None]] begin[:]
variable[text] assign[=] call[constant[{}{}].format, parameter[name[text], binary_operation[call[call[name[str], parameter[name[margin_char]]]][constant[0]] * call[name[int], parameter[name[margin_right]]]]]]
return[name[text]] | keyword[def] identifier[fmt_margin] ( identifier[text] , identifier[margin] = keyword[None] , identifier[margin_left] = keyword[None] , identifier[margin_right] = keyword[None] , identifier[margin_char] = literal[string] ):
literal[string]
keyword[if] identifier[margin_left] keyword[is] keyword[None] :
identifier[margin_left] = identifier[margin]
keyword[if] identifier[margin_right] keyword[is] keyword[None] :
identifier[margin_right] = identifier[margin]
keyword[if] identifier[margin_left] keyword[is] keyword[not] keyword[None] :
identifier[text] = literal[string] . identifier[format] ( identifier[str] ( identifier[margin_char] )[ literal[int] ]* identifier[int] ( identifier[margin_left] ), identifier[text] )
keyword[if] identifier[margin_right] keyword[is] keyword[not] keyword[None] :
identifier[text] = literal[string] . identifier[format] ( identifier[text] , identifier[str] ( identifier[margin_char] )[ literal[int] ]* identifier[int] ( identifier[margin_right] ))
keyword[return] identifier[text] | def fmt_margin(text, margin=None, margin_left=None, margin_right=None, margin_char=' '):
"""
Surround given text with given margin characters.
"""
if margin_left is None:
margin_left = margin # depends on [control=['if'], data=['margin_left']]
if margin_right is None:
margin_right = margin # depends on [control=['if'], data=['margin_right']]
if margin_left is not None:
text = '{}{}'.format(str(margin_char)[0] * int(margin_left), text) # depends on [control=['if'], data=['margin_left']]
if margin_right is not None:
text = '{}{}'.format(text, str(margin_char)[0] * int(margin_right)) # depends on [control=['if'], data=['margin_right']]
return text |
def get_function(function_name, composite_function_expression=None):
"""
Returns the function "name", which must be among the known functions or a composite function.
:param function_name: the name of the function (use 'composite' if the function is a composite function)
:param composite_function_expression: composite function specification such as
((((powerlaw{1} + (sin{2} * 3)) + (sin{2} * 25)) - (powerlaw{1} * 16)) + (sin{2} ** 3.0))
:return: the an instance of the requested class
"""
# Check whether this is a composite function or a simple function
if composite_function_expression is not None:
# Composite function
return _parse_function_expression(composite_function_expression)
else:
if function_name in _known_functions:
return _known_functions[function_name]()
else:
# Maybe this is a template
# NOTE: import here to avoid circular import
from astromodels.functions.template_model import TemplateModel, MissingDataFile
try:
instance = TemplateModel(function_name)
except MissingDataFile:
raise UnknownFunction("Function %s is not known. Known functions are: %s" %
(function_name, ",".join(_known_functions.keys())))
else:
return instance | def function[get_function, parameter[function_name, composite_function_expression]]:
constant[
Returns the function "name", which must be among the known functions or a composite function.
:param function_name: the name of the function (use 'composite' if the function is a composite function)
:param composite_function_expression: composite function specification such as
((((powerlaw{1} + (sin{2} * 3)) + (sin{2} * 25)) - (powerlaw{1} * 16)) + (sin{2} ** 3.0))
:return: the an instance of the requested class
]
if compare[name[composite_function_expression] is_not constant[None]] begin[:]
return[call[name[_parse_function_expression], parameter[name[composite_function_expression]]]] | keyword[def] identifier[get_function] ( identifier[function_name] , identifier[composite_function_expression] = keyword[None] ):
literal[string]
keyword[if] identifier[composite_function_expression] keyword[is] keyword[not] keyword[None] :
keyword[return] identifier[_parse_function_expression] ( identifier[composite_function_expression] )
keyword[else] :
keyword[if] identifier[function_name] keyword[in] identifier[_known_functions] :
keyword[return] identifier[_known_functions] [ identifier[function_name] ]()
keyword[else] :
keyword[from] identifier[astromodels] . identifier[functions] . identifier[template_model] keyword[import] identifier[TemplateModel] , identifier[MissingDataFile]
keyword[try] :
identifier[instance] = identifier[TemplateModel] ( identifier[function_name] )
keyword[except] identifier[MissingDataFile] :
keyword[raise] identifier[UnknownFunction] ( literal[string] %
( identifier[function_name] , literal[string] . identifier[join] ( identifier[_known_functions] . identifier[keys] ())))
keyword[else] :
keyword[return] identifier[instance] | def get_function(function_name, composite_function_expression=None):
"""
Returns the function "name", which must be among the known functions or a composite function.
:param function_name: the name of the function (use 'composite' if the function is a composite function)
:param composite_function_expression: composite function specification such as
((((powerlaw{1} + (sin{2} * 3)) + (sin{2} * 25)) - (powerlaw{1} * 16)) + (sin{2} ** 3.0))
:return: the an instance of the requested class
"""
# Check whether this is a composite function or a simple function
if composite_function_expression is not None:
# Composite function
return _parse_function_expression(composite_function_expression) # depends on [control=['if'], data=['composite_function_expression']]
elif function_name in _known_functions:
return _known_functions[function_name]() # depends on [control=['if'], data=['function_name', '_known_functions']]
else:
# Maybe this is a template
# NOTE: import here to avoid circular import
from astromodels.functions.template_model import TemplateModel, MissingDataFile
try:
instance = TemplateModel(function_name) # depends on [control=['try'], data=[]]
except MissingDataFile:
raise UnknownFunction('Function %s is not known. Known functions are: %s' % (function_name, ','.join(_known_functions.keys()))) # depends on [control=['except'], data=[]]
else:
return instance |
def check(self, query):
"""
:param query:
"""
if query.get_type() not in {Keyword.SELECT, Keyword.DELETE}:
# Only select and delete queries deal with time durations
# All others are not affected by this rule. Bailing out.
return Ok(True)
datapoints = query.get_datapoints()
if datapoints <= self.max_datapoints:
return Ok(True)
return Err(("Expecting {} datapoints from that query, which is above the threshold! "
"Set a date range (e.g. where time > now() - 24h), "
"increase grouping (e.g. group by time(24h) "
"or limit the number of datapoints (e.g. limit 100)").format(datapoints)) | def function[check, parameter[self, query]]:
constant[
:param query:
]
if compare[call[name[query].get_type, parameter[]] <ast.NotIn object at 0x7da2590d7190> <ast.Set object at 0x7da1afe67220>] begin[:]
return[call[name[Ok], parameter[constant[True]]]]
variable[datapoints] assign[=] call[name[query].get_datapoints, parameter[]]
if compare[name[datapoints] less_or_equal[<=] name[self].max_datapoints] begin[:]
return[call[name[Ok], parameter[constant[True]]]]
return[call[name[Err], parameter[call[constant[Expecting {} datapoints from that query, which is above the threshold! Set a date range (e.g. where time > now() - 24h), increase grouping (e.g. group by time(24h) or limit the number of datapoints (e.g. limit 100)].format, parameter[name[datapoints]]]]]] | keyword[def] identifier[check] ( identifier[self] , identifier[query] ):
literal[string]
keyword[if] identifier[query] . identifier[get_type] () keyword[not] keyword[in] { identifier[Keyword] . identifier[SELECT] , identifier[Keyword] . identifier[DELETE] }:
keyword[return] identifier[Ok] ( keyword[True] )
identifier[datapoints] = identifier[query] . identifier[get_datapoints] ()
keyword[if] identifier[datapoints] <= identifier[self] . identifier[max_datapoints] :
keyword[return] identifier[Ok] ( keyword[True] )
keyword[return] identifier[Err] (( literal[string]
literal[string]
literal[string]
literal[string] ). identifier[format] ( identifier[datapoints] )) | def check(self, query):
"""
:param query:
"""
if query.get_type() not in {Keyword.SELECT, Keyword.DELETE}:
# Only select and delete queries deal with time durations
# All others are not affected by this rule. Bailing out.
return Ok(True) # depends on [control=['if'], data=[]]
datapoints = query.get_datapoints()
if datapoints <= self.max_datapoints:
return Ok(True) # depends on [control=['if'], data=[]]
return Err('Expecting {} datapoints from that query, which is above the threshold! Set a date range (e.g. where time > now() - 24h), increase grouping (e.g. group by time(24h) or limit the number of datapoints (e.g. limit 100)'.format(datapoints)) |
def get_fixed_param_names(self) -> List[str]:
"""
Get the fixed params of the network.
:return: List of strings, names of the layers
"""
args = set(self.args.keys()) | set(self.auxs.keys())
return list(args & set(self.sym.list_arguments())) | def function[get_fixed_param_names, parameter[self]]:
constant[
Get the fixed params of the network.
:return: List of strings, names of the layers
]
variable[args] assign[=] binary_operation[call[name[set], parameter[call[name[self].args.keys, parameter[]]]] <ast.BitOr object at 0x7da2590d6aa0> call[name[set], parameter[call[name[self].auxs.keys, parameter[]]]]]
return[call[name[list], parameter[binary_operation[name[args] <ast.BitAnd object at 0x7da2590d6b60> call[name[set], parameter[call[name[self].sym.list_arguments, parameter[]]]]]]]] | keyword[def] identifier[get_fixed_param_names] ( identifier[self] )-> identifier[List] [ identifier[str] ]:
literal[string]
identifier[args] = identifier[set] ( identifier[self] . identifier[args] . identifier[keys] ())| identifier[set] ( identifier[self] . identifier[auxs] . identifier[keys] ())
keyword[return] identifier[list] ( identifier[args] & identifier[set] ( identifier[self] . identifier[sym] . identifier[list_arguments] ())) | def get_fixed_param_names(self) -> List[str]:
"""
Get the fixed params of the network.
:return: List of strings, names of the layers
"""
args = set(self.args.keys()) | set(self.auxs.keys())
return list(args & set(self.sym.list_arguments())) |
def _serialize_call(self, format_, call):
"""Return serialized version of the Call using the record's FORMAT'"""
if isinstance(call, record.UnparsedCall):
return call.unparsed_data
else:
result = [
format_value(self.header.get_format_field_info(key), call.data.get(key), "FORMAT")
for key in format_
]
return ":".join(result) | def function[_serialize_call, parameter[self, format_, call]]:
constant[Return serialized version of the Call using the record's FORMAT']
if call[name[isinstance], parameter[name[call], name[record].UnparsedCall]] begin[:]
return[name[call].unparsed_data] | keyword[def] identifier[_serialize_call] ( identifier[self] , identifier[format_] , identifier[call] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[call] , identifier[record] . identifier[UnparsedCall] ):
keyword[return] identifier[call] . identifier[unparsed_data]
keyword[else] :
identifier[result] =[
identifier[format_value] ( identifier[self] . identifier[header] . identifier[get_format_field_info] ( identifier[key] ), identifier[call] . identifier[data] . identifier[get] ( identifier[key] ), literal[string] )
keyword[for] identifier[key] keyword[in] identifier[format_]
]
keyword[return] literal[string] . identifier[join] ( identifier[result] ) | def _serialize_call(self, format_, call):
"""Return serialized version of the Call using the record's FORMAT'"""
if isinstance(call, record.UnparsedCall):
return call.unparsed_data # depends on [control=['if'], data=[]]
else:
result = [format_value(self.header.get_format_field_info(key), call.data.get(key), 'FORMAT') for key in format_]
return ':'.join(result) |
def decrypt(self):
"""Decrypt decrypts the secret and returns the plaintext.
Calling decrypt() may incur side effects such as a call to a remote service for decryption.
"""
if not self._crypter:
return b''
try:
plaintext = self._crypter.decrypt(self._ciphertext, **self._decrypt_params)
return plaintext
except Exception as e:
exc_info = sys.exc_info()
six.reraise(
ValueError('Invalid ciphertext "%s", error: %s' % (self._ciphertext, e)),
None,
exc_info[2]
) | def function[decrypt, parameter[self]]:
constant[Decrypt decrypts the secret and returns the plaintext.
Calling decrypt() may incur side effects such as a call to a remote service for decryption.
]
if <ast.UnaryOp object at 0x7da1b054b040> begin[:]
return[constant[b'']]
<ast.Try object at 0x7da1b054b4f0> | keyword[def] identifier[decrypt] ( identifier[self] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[_crypter] :
keyword[return] literal[string]
keyword[try] :
identifier[plaintext] = identifier[self] . identifier[_crypter] . identifier[decrypt] ( identifier[self] . identifier[_ciphertext] ,** identifier[self] . identifier[_decrypt_params] )
keyword[return] identifier[plaintext]
keyword[except] identifier[Exception] keyword[as] identifier[e] :
identifier[exc_info] = identifier[sys] . identifier[exc_info] ()
identifier[six] . identifier[reraise] (
identifier[ValueError] ( literal[string] %( identifier[self] . identifier[_ciphertext] , identifier[e] )),
keyword[None] ,
identifier[exc_info] [ literal[int] ]
) | def decrypt(self):
"""Decrypt decrypts the secret and returns the plaintext.
Calling decrypt() may incur side effects such as a call to a remote service for decryption.
"""
if not self._crypter:
return b'' # depends on [control=['if'], data=[]]
try:
plaintext = self._crypter.decrypt(self._ciphertext, **self._decrypt_params)
return plaintext # depends on [control=['try'], data=[]]
except Exception as e:
exc_info = sys.exc_info()
six.reraise(ValueError('Invalid ciphertext "%s", error: %s' % (self._ciphertext, e)), None, exc_info[2]) # depends on [control=['except'], data=['e']] |
def makeSoftwareVersion(store, version, systemVersion):
"""
Return the SoftwareVersion object from store corresponding to the
version object, creating it if it doesn't already exist.
"""
return store.findOrCreate(SoftwareVersion,
systemVersion=systemVersion,
package=unicode(version.package),
version=unicode(version.short()),
major=version.major,
minor=version.minor,
micro=version.micro) | def function[makeSoftwareVersion, parameter[store, version, systemVersion]]:
constant[
Return the SoftwareVersion object from store corresponding to the
version object, creating it if it doesn't already exist.
]
return[call[name[store].findOrCreate, parameter[name[SoftwareVersion]]]] | keyword[def] identifier[makeSoftwareVersion] ( identifier[store] , identifier[version] , identifier[systemVersion] ):
literal[string]
keyword[return] identifier[store] . identifier[findOrCreate] ( identifier[SoftwareVersion] ,
identifier[systemVersion] = identifier[systemVersion] ,
identifier[package] = identifier[unicode] ( identifier[version] . identifier[package] ),
identifier[version] = identifier[unicode] ( identifier[version] . identifier[short] ()),
identifier[major] = identifier[version] . identifier[major] ,
identifier[minor] = identifier[version] . identifier[minor] ,
identifier[micro] = identifier[version] . identifier[micro] ) | def makeSoftwareVersion(store, version, systemVersion):
"""
Return the SoftwareVersion object from store corresponding to the
version object, creating it if it doesn't already exist.
"""
return store.findOrCreate(SoftwareVersion, systemVersion=systemVersion, package=unicode(version.package), version=unicode(version.short()), major=version.major, minor=version.minor, micro=version.micro) |
def _get_children_path_interval(cls, path):
""":returns: An interval of all possible children paths for a node."""
return (path + cls.alphabet[0] * cls.steplen,
path + cls.alphabet[-1] * cls.steplen) | def function[_get_children_path_interval, parameter[cls, path]]:
constant[:returns: An interval of all possible children paths for a node.]
return[tuple[[<ast.BinOp object at 0x7da1b20aab60>, <ast.BinOp object at 0x7da18bc72f80>]]] | keyword[def] identifier[_get_children_path_interval] ( identifier[cls] , identifier[path] ):
literal[string]
keyword[return] ( identifier[path] + identifier[cls] . identifier[alphabet] [ literal[int] ]* identifier[cls] . identifier[steplen] ,
identifier[path] + identifier[cls] . identifier[alphabet] [- literal[int] ]* identifier[cls] . identifier[steplen] ) | def _get_children_path_interval(cls, path):
""":returns: An interval of all possible children paths for a node."""
return (path + cls.alphabet[0] * cls.steplen, path + cls.alphabet[-1] * cls.steplen) |
def _iterslice(self, slice):
"""Yield records from a slice index."""
indices = range(*slice.indices(len(self._records)))
if self.is_attached():
rows = self._enum_attached_rows(indices)
if slice.step is not None and slice.step < 0:
rows = reversed(list(rows))
else:
rows = zip(indices, self._records[slice])
fields = self.fields
for i, row in rows:
yield Record._make(fields, row, self, i) | def function[_iterslice, parameter[self, slice]]:
constant[Yield records from a slice index.]
variable[indices] assign[=] call[name[range], parameter[<ast.Starred object at 0x7da1b06ca770>]]
if call[name[self].is_attached, parameter[]] begin[:]
variable[rows] assign[=] call[name[self]._enum_attached_rows, parameter[name[indices]]]
if <ast.BoolOp object at 0x7da1b06cb280> begin[:]
variable[rows] assign[=] call[name[reversed], parameter[call[name[list], parameter[name[rows]]]]]
variable[fields] assign[=] name[self].fields
for taget[tuple[[<ast.Name object at 0x7da1b06cbc10>, <ast.Name object at 0x7da1b06cace0>]]] in starred[name[rows]] begin[:]
<ast.Yield object at 0x7da1b06c8af0> | keyword[def] identifier[_iterslice] ( identifier[self] , identifier[slice] ):
literal[string]
identifier[indices] = identifier[range] (* identifier[slice] . identifier[indices] ( identifier[len] ( identifier[self] . identifier[_records] )))
keyword[if] identifier[self] . identifier[is_attached] ():
identifier[rows] = identifier[self] . identifier[_enum_attached_rows] ( identifier[indices] )
keyword[if] identifier[slice] . identifier[step] keyword[is] keyword[not] keyword[None] keyword[and] identifier[slice] . identifier[step] < literal[int] :
identifier[rows] = identifier[reversed] ( identifier[list] ( identifier[rows] ))
keyword[else] :
identifier[rows] = identifier[zip] ( identifier[indices] , identifier[self] . identifier[_records] [ identifier[slice] ])
identifier[fields] = identifier[self] . identifier[fields]
keyword[for] identifier[i] , identifier[row] keyword[in] identifier[rows] :
keyword[yield] identifier[Record] . identifier[_make] ( identifier[fields] , identifier[row] , identifier[self] , identifier[i] ) | def _iterslice(self, slice):
"""Yield records from a slice index."""
indices = range(*slice.indices(len(self._records)))
if self.is_attached():
rows = self._enum_attached_rows(indices)
if slice.step is not None and slice.step < 0:
rows = reversed(list(rows)) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
else:
rows = zip(indices, self._records[slice])
fields = self.fields
for (i, row) in rows:
yield Record._make(fields, row, self, i) # depends on [control=['for'], data=[]] |
def check_tx(self, raw_transaction):
"""Validate the transaction before entry into
the mempool.
Args:
raw_tx: a raw string (in bytes) transaction.
"""
self.abort_if_abci_chain_is_not_synced()
logger.debug('check_tx: %s', raw_transaction)
transaction = decode_transaction(raw_transaction)
if self.bigchaindb.is_valid_transaction(transaction):
logger.debug('check_tx: VALID')
return ResponseCheckTx(code=CodeTypeOk)
else:
logger.debug('check_tx: INVALID')
return ResponseCheckTx(code=CodeTypeError) | def function[check_tx, parameter[self, raw_transaction]]:
constant[Validate the transaction before entry into
the mempool.
Args:
raw_tx: a raw string (in bytes) transaction.
]
call[name[self].abort_if_abci_chain_is_not_synced, parameter[]]
call[name[logger].debug, parameter[constant[check_tx: %s], name[raw_transaction]]]
variable[transaction] assign[=] call[name[decode_transaction], parameter[name[raw_transaction]]]
if call[name[self].bigchaindb.is_valid_transaction, parameter[name[transaction]]] begin[:]
call[name[logger].debug, parameter[constant[check_tx: VALID]]]
return[call[name[ResponseCheckTx], parameter[]]] | keyword[def] identifier[check_tx] ( identifier[self] , identifier[raw_transaction] ):
literal[string]
identifier[self] . identifier[abort_if_abci_chain_is_not_synced] ()
identifier[logger] . identifier[debug] ( literal[string] , identifier[raw_transaction] )
identifier[transaction] = identifier[decode_transaction] ( identifier[raw_transaction] )
keyword[if] identifier[self] . identifier[bigchaindb] . identifier[is_valid_transaction] ( identifier[transaction] ):
identifier[logger] . identifier[debug] ( literal[string] )
keyword[return] identifier[ResponseCheckTx] ( identifier[code] = identifier[CodeTypeOk] )
keyword[else] :
identifier[logger] . identifier[debug] ( literal[string] )
keyword[return] identifier[ResponseCheckTx] ( identifier[code] = identifier[CodeTypeError] ) | def check_tx(self, raw_transaction):
"""Validate the transaction before entry into
the mempool.
Args:
raw_tx: a raw string (in bytes) transaction.
"""
self.abort_if_abci_chain_is_not_synced()
logger.debug('check_tx: %s', raw_transaction)
transaction = decode_transaction(raw_transaction)
if self.bigchaindb.is_valid_transaction(transaction):
logger.debug('check_tx: VALID')
return ResponseCheckTx(code=CodeTypeOk) # depends on [control=['if'], data=[]]
else:
logger.debug('check_tx: INVALID')
return ResponseCheckTx(code=CodeTypeError) |
def p_lpartselect_lpointer(self, p):
'lpartselect : pointer LBRACKET expression COLON expression RBRACKET'
p[0] = Partselect(p[1], p[3], p[5], lineno=p.lineno(1))
p.set_lineno(0, p.lineno(1)) | def function[p_lpartselect_lpointer, parameter[self, p]]:
constant[lpartselect : pointer LBRACKET expression COLON expression RBRACKET]
call[name[p]][constant[0]] assign[=] call[name[Partselect], parameter[call[name[p]][constant[1]], call[name[p]][constant[3]], call[name[p]][constant[5]]]]
call[name[p].set_lineno, parameter[constant[0], call[name[p].lineno, parameter[constant[1]]]]] | keyword[def] identifier[p_lpartselect_lpointer] ( identifier[self] , identifier[p] ):
literal[string]
identifier[p] [ literal[int] ]= identifier[Partselect] ( identifier[p] [ literal[int] ], identifier[p] [ literal[int] ], identifier[p] [ literal[int] ], identifier[lineno] = identifier[p] . identifier[lineno] ( literal[int] ))
identifier[p] . identifier[set_lineno] ( literal[int] , identifier[p] . identifier[lineno] ( literal[int] )) | def p_lpartselect_lpointer(self, p):
"""lpartselect : pointer LBRACKET expression COLON expression RBRACKET"""
p[0] = Partselect(p[1], p[3], p[5], lineno=p.lineno(1))
p.set_lineno(0, p.lineno(1)) |
async def InstanceTypes(self, constraints):
'''
constraints : typing.Sequence[~CloudInstanceTypesConstraint]
Returns -> typing.Sequence[~InstanceTypesResult]
'''
# map input types to rpc msg
_params = dict()
msg = dict(type='Cloud',
request='InstanceTypes',
version=1,
params=_params)
_params['constraints'] = constraints
reply = await self.rpc(msg)
return reply | <ast.AsyncFunctionDef object at 0x7da2041db1c0> | keyword[async] keyword[def] identifier[InstanceTypes] ( identifier[self] , identifier[constraints] ):
literal[string]
identifier[_params] = identifier[dict] ()
identifier[msg] = identifier[dict] ( identifier[type] = literal[string] ,
identifier[request] = literal[string] ,
identifier[version] = literal[int] ,
identifier[params] = identifier[_params] )
identifier[_params] [ literal[string] ]= identifier[constraints]
identifier[reply] = keyword[await] identifier[self] . identifier[rpc] ( identifier[msg] )
keyword[return] identifier[reply] | async def InstanceTypes(self, constraints):
"""
constraints : typing.Sequence[~CloudInstanceTypesConstraint]
Returns -> typing.Sequence[~InstanceTypesResult]
"""
# map input types to rpc msg
_params = dict()
msg = dict(type='Cloud', request='InstanceTypes', version=1, params=_params)
_params['constraints'] = constraints
reply = await self.rpc(msg)
return reply |
def moments_block(X, Y, remove_mean=False, modify_data=False,
sparse_mode='auto', sparse_tol=0.0,
column_selection=None, diag_only=False):
""" Computes the first two unnormalized moments of X and Y
Computes
.. math:
s_x &=& \sum_t x_t
s_y &=& \sum_t y_t
C_XX &=& X^\top X
C_XY &=& X^\top Y
C_YX &=& Y^\top X
C_YY &=& Y^\top Y
while exploiting zero or constant columns in the data matrix.
Parameters
----------
X : ndarray (T, M)
Data matrix
Y : ndarray (T, N)
Second data matrix
remove_mean : bool
True: remove column mean from the data, False: don't remove mean.
modify_data : bool
If remove_mean=True, the mean will be removed in the data matrix X,
without creating an independent copy. This option is faster but might
lead to surprises because your input array is changed.
sparse_mode : str
one of:
* 'dense' : always use dense mode
* 'sparse' : always use sparse mode if possible
* 'auto' : automatic
sparse_tol: float
Threshold for considering column to be zero in order to save computing
effort when the data is sparse or almost sparse.
If max(abs(X[:, i])) < sparse_tol, then row i (and also column i if Y
is not given) of the covariance matrix will be set to zero. If Y is
given and max(abs(Y[:, i])) < sparse_tol, then column i of the
covariance matrix will be set to zero.
column_selection: ndarray(k, dtype=int) or None
Indices of those columns that are to be computed. If None, all columns are computed.
diag_only: bool
If True, the computation is restricted to the diagonal entries (autocorrelations) only.
Returns
-------
w : float
statistical weight of this estimation
s : [ndarray (M), ndarray (M)]
list of two elements with s[0]=sx and s[1]=sy
C : [[ndarray(M,M), ndarray(M,N)], [ndarray(N,M),ndarray(N,N)]]
list of two lists with two elements.
C[0,0] = Cxx, C[0,1] = Cxy, C[1,0] = Cyx, C[1,1] = Cyy
"""
# diag_only is only implemented for dense mode
if diag_only and sparse_mode is not 'dense':
if sparse_mode is 'sparse':
import warnings
warnings.warn('Computing diagonal entries only is not implemented for sparse mode. Switching to dense mode.')
sparse_mode = 'dense'
# sparsify
X0, mask_X, xconst = _sparsify(X, sparse_mode=sparse_mode, sparse_tol=sparse_tol)
Y0, mask_Y, yconst = _sparsify(Y, sparse_mode=sparse_mode, sparse_tol=sparse_tol)
is_sparse = mask_X is not None and mask_Y is not None
# copy / convert
copy = is_sparse or (remove_mean and not modify_data)
X0, xconst = _copy_convert(X0, const=xconst, copy=copy)
Y0, yconst = _copy_convert(Y0, const=yconst, copy=copy)
# sum / center
w, sx, sx_centered, sy, sy_centered = _sum(X0, xmask=mask_X, xconst=xconst, Y=Y0, ymask=mask_Y, yconst=yconst,
symmetric=False, remove_mean=remove_mean)
if remove_mean:
_center(X0, w, sx, mask=mask_X, const=xconst, inplace=True) # fast in-place centering
_center(Y0, w, sy, mask=mask_Y, const=yconst, inplace=True) # fast in-place centering
if column_selection is not None:
if is_sparse:
Xk = X[:, column_selection]
mask_Xk = mask_X[column_selection] if mask_X is not None else mask_X
X0k = Xk[:, mask_Xk]
xksum = sx_centered[column_selection]
xkconst = Xk[0, ~mask_Xk]
X0k, xkconst = _copy_convert(X0k, const=xkconst, remove_mean=remove_mean,
copy=True)
Yk = Y[:, column_selection]
mask_Yk = mask_Y[column_selection] if mask_Y is not None else mask_Y
Y0k = Yk[:, mask_Yk]
yksum = sy_centered[column_selection]
ykconst = Yk[0, ~mask_Yk]
Y0k, ykconst = _copy_convert(Y0k, const=ykconst, remove_mean=remove_mean,
copy=True)
Cxx = _M2(X0, X0k, mask_X=mask_X, mask_Y=mask_Xk,
xsum=sx_centered, xconst=xconst, ysum=xksum, yconst=xkconst)
Cxy = _M2(X0, Y0k, mask_X=mask_X, mask_Y=mask_Yk,
xsum=sx_centered, xconst=xconst, ysum=yksum, yconst=ykconst)
Cyx = _M2(Y0, X0k, mask_X=mask_Y, mask_Y=mask_Xk,
xsum=sy_centered, xconst=yconst, ysum=xksum, yconst=xkconst)
Cyy = _M2(Y0, Y0k, mask_X=mask_Y, mask_Y=mask_Yk,
xsum=sy_centered, xconst=yconst, ysum=yksum, yconst=ykconst)
else:
X0k = X0[:, column_selection]
Y0k = Y0[:, column_selection]
Cxx = _M2(X0, X0k, mask_X=mask_X, mask_Y=mask_X,
xsum=sx_centered, xconst=xconst,
ysum=sx_centered[column_selection], yconst=xconst)
Cxy = _M2(X0, Y0k, mask_X=mask_X, mask_Y=mask_Y,
xsum=sx_centered, xconst=xconst,
ysum=sy_centered[column_selection], yconst=yconst)
Cyx = _M2(Y0, X0k, mask_X=mask_Y, mask_Y=mask_X,
xsum=sy_centered, xconst=yconst,
ysum=sx_centered[column_selection], yconst=xconst)
Cyy = _M2(Y0, Y0k, mask_X=mask_Y, mask_Y=mask_Y,
xsum=sy_centered, xconst=yconst,
ysum=sy_centered[column_selection], yconst=yconst)
else:
Cxx = _M2(X0, X0, mask_X=mask_X, mask_Y=mask_X,
xsum=sx_centered, xconst=xconst, ysum=sx_centered, yconst=xconst,
diag_only=diag_only)
Cxy = _M2(X0, Y0, mask_X=mask_X, mask_Y=mask_Y,
xsum=sx_centered, xconst=xconst, ysum=sy_centered, yconst=yconst,
diag_only=diag_only)
Cyx = Cxy.T
Cyy = _M2(Y0, Y0, mask_X=mask_Y, mask_Y=mask_Y,
xsum=sy_centered, xconst=yconst, ysum=sy_centered, yconst=yconst,
diag_only=diag_only)
return w, (sx, sy), ((Cxx, Cxy), (Cyx, Cyy)) | def function[moments_block, parameter[X, Y, remove_mean, modify_data, sparse_mode, sparse_tol, column_selection, diag_only]]:
constant[ Computes the first two unnormalized moments of X and Y
Computes
.. math:
s_x &=& \sum_t x_t
s_y &=& \sum_t y_t
C_XX &=& X^ op X
C_XY &=& X^ op Y
C_YX &=& Y^ op X
C_YY &=& Y^ op Y
while exploiting zero or constant columns in the data matrix.
Parameters
----------
X : ndarray (T, M)
Data matrix
Y : ndarray (T, N)
Second data matrix
remove_mean : bool
True: remove column mean from the data, False: don't remove mean.
modify_data : bool
If remove_mean=True, the mean will be removed in the data matrix X,
without creating an independent copy. This option is faster but might
lead to surprises because your input array is changed.
sparse_mode : str
one of:
* 'dense' : always use dense mode
* 'sparse' : always use sparse mode if possible
* 'auto' : automatic
sparse_tol: float
Threshold for considering column to be zero in order to save computing
effort when the data is sparse or almost sparse.
If max(abs(X[:, i])) < sparse_tol, then row i (and also column i if Y
is not given) of the covariance matrix will be set to zero. If Y is
given and max(abs(Y[:, i])) < sparse_tol, then column i of the
covariance matrix will be set to zero.
column_selection: ndarray(k, dtype=int) or None
Indices of those columns that are to be computed. If None, all columns are computed.
diag_only: bool
If True, the computation is restricted to the diagonal entries (autocorrelations) only.
Returns
-------
w : float
statistical weight of this estimation
s : [ndarray (M), ndarray (M)]
list of two elements with s[0]=sx and s[1]=sy
C : [[ndarray(M,M), ndarray(M,N)], [ndarray(N,M),ndarray(N,N)]]
list of two lists with two elements.
C[0,0] = Cxx, C[0,1] = Cxy, C[1,0] = Cyx, C[1,1] = Cyy
]
if <ast.BoolOp object at 0x7da1b071f7c0> begin[:]
if compare[name[sparse_mode] is constant[sparse]] begin[:]
import module[warnings]
call[name[warnings].warn, parameter[constant[Computing diagonal entries only is not implemented for sparse mode. Switching to dense mode.]]]
variable[sparse_mode] assign[=] constant[dense]
<ast.Tuple object at 0x7da1b071f3a0> assign[=] call[name[_sparsify], parameter[name[X]]]
<ast.Tuple object at 0x7da1b071f160> assign[=] call[name[_sparsify], parameter[name[Y]]]
variable[is_sparse] assign[=] <ast.BoolOp object at 0x7da1b071eef0>
variable[copy] assign[=] <ast.BoolOp object at 0x7da1b071ed40>
<ast.Tuple object at 0x7da1b071ebf0> assign[=] call[name[_copy_convert], parameter[name[X0]]]
<ast.Tuple object at 0x7da1b071e9e0> assign[=] call[name[_copy_convert], parameter[name[Y0]]]
<ast.Tuple object at 0x7da1b071e7d0> assign[=] call[name[_sum], parameter[name[X0]]]
if name[remove_mean] begin[:]
call[name[_center], parameter[name[X0], name[w], name[sx]]]
call[name[_center], parameter[name[Y0], name[w], name[sy]]]
if compare[name[column_selection] is_not constant[None]] begin[:]
if name[is_sparse] begin[:]
variable[Xk] assign[=] call[name[X]][tuple[[<ast.Slice object at 0x7da1b071dc00>, <ast.Name object at 0x7da1b071dbd0>]]]
variable[mask_Xk] assign[=] <ast.IfExp object at 0x7da1b071db40>
variable[X0k] assign[=] call[name[Xk]][tuple[[<ast.Slice object at 0x7da1b071d8d0>, <ast.Name object at 0x7da1b071d8a0>]]]
variable[xksum] assign[=] call[name[sx_centered]][name[column_selection]]
variable[xkconst] assign[=] call[name[Xk]][tuple[[<ast.Constant object at 0x7da1b071d690>, <ast.UnaryOp object at 0x7da1b071d660>]]]
<ast.Tuple object at 0x7da1b071d5d0> assign[=] call[name[_copy_convert], parameter[name[X0k]]]
variable[Yk] assign[=] call[name[Y]][tuple[[<ast.Slice object at 0x7da1b071d2a0>, <ast.Name object at 0x7da1b071d270>]]]
variable[mask_Yk] assign[=] <ast.IfExp object at 0x7da1b071d1e0>
variable[Y0k] assign[=] call[name[Yk]][tuple[[<ast.Slice object at 0x7da1b071cf70>, <ast.Name object at 0x7da1b071cf40>]]]
variable[yksum] assign[=] call[name[sy_centered]][name[column_selection]]
variable[ykconst] assign[=] call[name[Yk]][tuple[[<ast.Constant object at 0x7da1b071cd30>, <ast.UnaryOp object at 0x7da1b071cd00>]]]
<ast.Tuple object at 0x7da1b071cc70> assign[=] call[name[_copy_convert], parameter[name[Y0k]]]
variable[Cxx] assign[=] call[name[_M2], parameter[name[X0], name[X0k]]]
variable[Cxy] assign[=] call[name[_M2], parameter[name[X0], name[Y0k]]]
variable[Cyx] assign[=] call[name[_M2], parameter[name[Y0], name[X0k]]]
variable[Cyy] assign[=] call[name[_M2], parameter[name[Y0], name[Y0k]]]
return[tuple[[<ast.Name object at 0x7da1b0771930>, <ast.Tuple object at 0x7da1b0771960>, <ast.Tuple object at 0x7da1b07719f0>]]] | keyword[def] identifier[moments_block] ( identifier[X] , identifier[Y] , identifier[remove_mean] = keyword[False] , identifier[modify_data] = keyword[False] ,
identifier[sparse_mode] = literal[string] , identifier[sparse_tol] = literal[int] ,
identifier[column_selection] = keyword[None] , identifier[diag_only] = keyword[False] ):
literal[string]
keyword[if] identifier[diag_only] keyword[and] identifier[sparse_mode] keyword[is] keyword[not] literal[string] :
keyword[if] identifier[sparse_mode] keyword[is] literal[string] :
keyword[import] identifier[warnings]
identifier[warnings] . identifier[warn] ( literal[string] )
identifier[sparse_mode] = literal[string]
identifier[X0] , identifier[mask_X] , identifier[xconst] = identifier[_sparsify] ( identifier[X] , identifier[sparse_mode] = identifier[sparse_mode] , identifier[sparse_tol] = identifier[sparse_tol] )
identifier[Y0] , identifier[mask_Y] , identifier[yconst] = identifier[_sparsify] ( identifier[Y] , identifier[sparse_mode] = identifier[sparse_mode] , identifier[sparse_tol] = identifier[sparse_tol] )
identifier[is_sparse] = identifier[mask_X] keyword[is] keyword[not] keyword[None] keyword[and] identifier[mask_Y] keyword[is] keyword[not] keyword[None]
identifier[copy] = identifier[is_sparse] keyword[or] ( identifier[remove_mean] keyword[and] keyword[not] identifier[modify_data] )
identifier[X0] , identifier[xconst] = identifier[_copy_convert] ( identifier[X0] , identifier[const] = identifier[xconst] , identifier[copy] = identifier[copy] )
identifier[Y0] , identifier[yconst] = identifier[_copy_convert] ( identifier[Y0] , identifier[const] = identifier[yconst] , identifier[copy] = identifier[copy] )
identifier[w] , identifier[sx] , identifier[sx_centered] , identifier[sy] , identifier[sy_centered] = identifier[_sum] ( identifier[X0] , identifier[xmask] = identifier[mask_X] , identifier[xconst] = identifier[xconst] , identifier[Y] = identifier[Y0] , identifier[ymask] = identifier[mask_Y] , identifier[yconst] = identifier[yconst] ,
identifier[symmetric] = keyword[False] , identifier[remove_mean] = identifier[remove_mean] )
keyword[if] identifier[remove_mean] :
identifier[_center] ( identifier[X0] , identifier[w] , identifier[sx] , identifier[mask] = identifier[mask_X] , identifier[const] = identifier[xconst] , identifier[inplace] = keyword[True] )
identifier[_center] ( identifier[Y0] , identifier[w] , identifier[sy] , identifier[mask] = identifier[mask_Y] , identifier[const] = identifier[yconst] , identifier[inplace] = keyword[True] )
keyword[if] identifier[column_selection] keyword[is] keyword[not] keyword[None] :
keyword[if] identifier[is_sparse] :
identifier[Xk] = identifier[X] [:, identifier[column_selection] ]
identifier[mask_Xk] = identifier[mask_X] [ identifier[column_selection] ] keyword[if] identifier[mask_X] keyword[is] keyword[not] keyword[None] keyword[else] identifier[mask_X]
identifier[X0k] = identifier[Xk] [:, identifier[mask_Xk] ]
identifier[xksum] = identifier[sx_centered] [ identifier[column_selection] ]
identifier[xkconst] = identifier[Xk] [ literal[int] ,~ identifier[mask_Xk] ]
identifier[X0k] , identifier[xkconst] = identifier[_copy_convert] ( identifier[X0k] , identifier[const] = identifier[xkconst] , identifier[remove_mean] = identifier[remove_mean] ,
identifier[copy] = keyword[True] )
identifier[Yk] = identifier[Y] [:, identifier[column_selection] ]
identifier[mask_Yk] = identifier[mask_Y] [ identifier[column_selection] ] keyword[if] identifier[mask_Y] keyword[is] keyword[not] keyword[None] keyword[else] identifier[mask_Y]
identifier[Y0k] = identifier[Yk] [:, identifier[mask_Yk] ]
identifier[yksum] = identifier[sy_centered] [ identifier[column_selection] ]
identifier[ykconst] = identifier[Yk] [ literal[int] ,~ identifier[mask_Yk] ]
identifier[Y0k] , identifier[ykconst] = identifier[_copy_convert] ( identifier[Y0k] , identifier[const] = identifier[ykconst] , identifier[remove_mean] = identifier[remove_mean] ,
identifier[copy] = keyword[True] )
identifier[Cxx] = identifier[_M2] ( identifier[X0] , identifier[X0k] , identifier[mask_X] = identifier[mask_X] , identifier[mask_Y] = identifier[mask_Xk] ,
identifier[xsum] = identifier[sx_centered] , identifier[xconst] = identifier[xconst] , identifier[ysum] = identifier[xksum] , identifier[yconst] = identifier[xkconst] )
identifier[Cxy] = identifier[_M2] ( identifier[X0] , identifier[Y0k] , identifier[mask_X] = identifier[mask_X] , identifier[mask_Y] = identifier[mask_Yk] ,
identifier[xsum] = identifier[sx_centered] , identifier[xconst] = identifier[xconst] , identifier[ysum] = identifier[yksum] , identifier[yconst] = identifier[ykconst] )
identifier[Cyx] = identifier[_M2] ( identifier[Y0] , identifier[X0k] , identifier[mask_X] = identifier[mask_Y] , identifier[mask_Y] = identifier[mask_Xk] ,
identifier[xsum] = identifier[sy_centered] , identifier[xconst] = identifier[yconst] , identifier[ysum] = identifier[xksum] , identifier[yconst] = identifier[xkconst] )
identifier[Cyy] = identifier[_M2] ( identifier[Y0] , identifier[Y0k] , identifier[mask_X] = identifier[mask_Y] , identifier[mask_Y] = identifier[mask_Yk] ,
identifier[xsum] = identifier[sy_centered] , identifier[xconst] = identifier[yconst] , identifier[ysum] = identifier[yksum] , identifier[yconst] = identifier[ykconst] )
keyword[else] :
identifier[X0k] = identifier[X0] [:, identifier[column_selection] ]
identifier[Y0k] = identifier[Y0] [:, identifier[column_selection] ]
identifier[Cxx] = identifier[_M2] ( identifier[X0] , identifier[X0k] , identifier[mask_X] = identifier[mask_X] , identifier[mask_Y] = identifier[mask_X] ,
identifier[xsum] = identifier[sx_centered] , identifier[xconst] = identifier[xconst] ,
identifier[ysum] = identifier[sx_centered] [ identifier[column_selection] ], identifier[yconst] = identifier[xconst] )
identifier[Cxy] = identifier[_M2] ( identifier[X0] , identifier[Y0k] , identifier[mask_X] = identifier[mask_X] , identifier[mask_Y] = identifier[mask_Y] ,
identifier[xsum] = identifier[sx_centered] , identifier[xconst] = identifier[xconst] ,
identifier[ysum] = identifier[sy_centered] [ identifier[column_selection] ], identifier[yconst] = identifier[yconst] )
identifier[Cyx] = identifier[_M2] ( identifier[Y0] , identifier[X0k] , identifier[mask_X] = identifier[mask_Y] , identifier[mask_Y] = identifier[mask_X] ,
identifier[xsum] = identifier[sy_centered] , identifier[xconst] = identifier[yconst] ,
identifier[ysum] = identifier[sx_centered] [ identifier[column_selection] ], identifier[yconst] = identifier[xconst] )
identifier[Cyy] = identifier[_M2] ( identifier[Y0] , identifier[Y0k] , identifier[mask_X] = identifier[mask_Y] , identifier[mask_Y] = identifier[mask_Y] ,
identifier[xsum] = identifier[sy_centered] , identifier[xconst] = identifier[yconst] ,
identifier[ysum] = identifier[sy_centered] [ identifier[column_selection] ], identifier[yconst] = identifier[yconst] )
keyword[else] :
identifier[Cxx] = identifier[_M2] ( identifier[X0] , identifier[X0] , identifier[mask_X] = identifier[mask_X] , identifier[mask_Y] = identifier[mask_X] ,
identifier[xsum] = identifier[sx_centered] , identifier[xconst] = identifier[xconst] , identifier[ysum] = identifier[sx_centered] , identifier[yconst] = identifier[xconst] ,
identifier[diag_only] = identifier[diag_only] )
identifier[Cxy] = identifier[_M2] ( identifier[X0] , identifier[Y0] , identifier[mask_X] = identifier[mask_X] , identifier[mask_Y] = identifier[mask_Y] ,
identifier[xsum] = identifier[sx_centered] , identifier[xconst] = identifier[xconst] , identifier[ysum] = identifier[sy_centered] , identifier[yconst] = identifier[yconst] ,
identifier[diag_only] = identifier[diag_only] )
identifier[Cyx] = identifier[Cxy] . identifier[T]
identifier[Cyy] = identifier[_M2] ( identifier[Y0] , identifier[Y0] , identifier[mask_X] = identifier[mask_Y] , identifier[mask_Y] = identifier[mask_Y] ,
identifier[xsum] = identifier[sy_centered] , identifier[xconst] = identifier[yconst] , identifier[ysum] = identifier[sy_centered] , identifier[yconst] = identifier[yconst] ,
identifier[diag_only] = identifier[diag_only] )
keyword[return] identifier[w] ,( identifier[sx] , identifier[sy] ),(( identifier[Cxx] , identifier[Cxy] ),( identifier[Cyx] , identifier[Cyy] )) | def moments_block(X, Y, remove_mean=False, modify_data=False, sparse_mode='auto', sparse_tol=0.0, column_selection=None, diag_only=False):
""" Computes the first two unnormalized moments of X and Y
Computes
.. math:
s_x &=& \\sum_t x_t
s_y &=& \\sum_t y_t
C_XX &=& X^ op X
C_XY &=& X^ op Y
C_YX &=& Y^ op X
C_YY &=& Y^ op Y
while exploiting zero or constant columns in the data matrix.
Parameters
----------
X : ndarray (T, M)
Data matrix
Y : ndarray (T, N)
Second data matrix
remove_mean : bool
True: remove column mean from the data, False: don't remove mean.
modify_data : bool
If remove_mean=True, the mean will be removed in the data matrix X,
without creating an independent copy. This option is faster but might
lead to surprises because your input array is changed.
sparse_mode : str
one of:
* 'dense' : always use dense mode
* 'sparse' : always use sparse mode if possible
* 'auto' : automatic
sparse_tol: float
Threshold for considering column to be zero in order to save computing
effort when the data is sparse or almost sparse.
If max(abs(X[:, i])) < sparse_tol, then row i (and also column i if Y
is not given) of the covariance matrix will be set to zero. If Y is
given and max(abs(Y[:, i])) < sparse_tol, then column i of the
covariance matrix will be set to zero.
column_selection: ndarray(k, dtype=int) or None
Indices of those columns that are to be computed. If None, all columns are computed.
diag_only: bool
If True, the computation is restricted to the diagonal entries (autocorrelations) only.
Returns
-------
w : float
statistical weight of this estimation
s : [ndarray (M), ndarray (M)]
list of two elements with s[0]=sx and s[1]=sy
C : [[ndarray(M,M), ndarray(M,N)], [ndarray(N,M),ndarray(N,N)]]
list of two lists with two elements.
C[0,0] = Cxx, C[0,1] = Cxy, C[1,0] = Cyx, C[1,1] = Cyy
"""
# diag_only is only implemented for dense mode
if diag_only and sparse_mode is not 'dense':
if sparse_mode is 'sparse':
import warnings
warnings.warn('Computing diagonal entries only is not implemented for sparse mode. Switching to dense mode.') # depends on [control=['if'], data=[]]
sparse_mode = 'dense' # depends on [control=['if'], data=[]]
# sparsify
(X0, mask_X, xconst) = _sparsify(X, sparse_mode=sparse_mode, sparse_tol=sparse_tol)
(Y0, mask_Y, yconst) = _sparsify(Y, sparse_mode=sparse_mode, sparse_tol=sparse_tol)
is_sparse = mask_X is not None and mask_Y is not None
# copy / convert
copy = is_sparse or (remove_mean and (not modify_data))
(X0, xconst) = _copy_convert(X0, const=xconst, copy=copy)
(Y0, yconst) = _copy_convert(Y0, const=yconst, copy=copy)
# sum / center
(w, sx, sx_centered, sy, sy_centered) = _sum(X0, xmask=mask_X, xconst=xconst, Y=Y0, ymask=mask_Y, yconst=yconst, symmetric=False, remove_mean=remove_mean)
if remove_mean:
_center(X0, w, sx, mask=mask_X, const=xconst, inplace=True) # fast in-place centering
_center(Y0, w, sy, mask=mask_Y, const=yconst, inplace=True) # fast in-place centering # depends on [control=['if'], data=[]]
if column_selection is not None:
if is_sparse:
Xk = X[:, column_selection]
mask_Xk = mask_X[column_selection] if mask_X is not None else mask_X
X0k = Xk[:, mask_Xk]
xksum = sx_centered[column_selection]
xkconst = Xk[0, ~mask_Xk]
(X0k, xkconst) = _copy_convert(X0k, const=xkconst, remove_mean=remove_mean, copy=True)
Yk = Y[:, column_selection]
mask_Yk = mask_Y[column_selection] if mask_Y is not None else mask_Y
Y0k = Yk[:, mask_Yk]
yksum = sy_centered[column_selection]
ykconst = Yk[0, ~mask_Yk]
(Y0k, ykconst) = _copy_convert(Y0k, const=ykconst, remove_mean=remove_mean, copy=True)
Cxx = _M2(X0, X0k, mask_X=mask_X, mask_Y=mask_Xk, xsum=sx_centered, xconst=xconst, ysum=xksum, yconst=xkconst)
Cxy = _M2(X0, Y0k, mask_X=mask_X, mask_Y=mask_Yk, xsum=sx_centered, xconst=xconst, ysum=yksum, yconst=ykconst)
Cyx = _M2(Y0, X0k, mask_X=mask_Y, mask_Y=mask_Xk, xsum=sy_centered, xconst=yconst, ysum=xksum, yconst=xkconst)
Cyy = _M2(Y0, Y0k, mask_X=mask_Y, mask_Y=mask_Yk, xsum=sy_centered, xconst=yconst, ysum=yksum, yconst=ykconst) # depends on [control=['if'], data=[]]
else:
X0k = X0[:, column_selection]
Y0k = Y0[:, column_selection]
Cxx = _M2(X0, X0k, mask_X=mask_X, mask_Y=mask_X, xsum=sx_centered, xconst=xconst, ysum=sx_centered[column_selection], yconst=xconst)
Cxy = _M2(X0, Y0k, mask_X=mask_X, mask_Y=mask_Y, xsum=sx_centered, xconst=xconst, ysum=sy_centered[column_selection], yconst=yconst)
Cyx = _M2(Y0, X0k, mask_X=mask_Y, mask_Y=mask_X, xsum=sy_centered, xconst=yconst, ysum=sx_centered[column_selection], yconst=xconst)
Cyy = _M2(Y0, Y0k, mask_X=mask_Y, mask_Y=mask_Y, xsum=sy_centered, xconst=yconst, ysum=sy_centered[column_selection], yconst=yconst) # depends on [control=['if'], data=['column_selection']]
else:
Cxx = _M2(X0, X0, mask_X=mask_X, mask_Y=mask_X, xsum=sx_centered, xconst=xconst, ysum=sx_centered, yconst=xconst, diag_only=diag_only)
Cxy = _M2(X0, Y0, mask_X=mask_X, mask_Y=mask_Y, xsum=sx_centered, xconst=xconst, ysum=sy_centered, yconst=yconst, diag_only=diag_only)
Cyx = Cxy.T
Cyy = _M2(Y0, Y0, mask_X=mask_Y, mask_Y=mask_Y, xsum=sy_centered, xconst=yconst, ysum=sy_centered, yconst=yconst, diag_only=diag_only)
return (w, (sx, sy), ((Cxx, Cxy), (Cyx, Cyy))) |
def _set_client_interface(self, v, load=False):
"""
Setter method for client_interface, mapped from YANG variable /cluster/client/client_interface (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_client_interface is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_client_interface() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=client_interface.client_interface, is_container='container', presence=False, yang_name="client-interface", rest_name="client-interface", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Client Interface', u'cli-sequence-commands': None, u'cli-incomplete-command': None, u'cli-suppress-show-conf-path': None, u'cli-suppress-show-match': None}}, namespace='urn:brocade.com:mgmt:brocade-mct', defining_module='brocade-mct', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """client_interface must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=client_interface.client_interface, is_container='container', presence=False, yang_name="client-interface", rest_name="client-interface", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Client Interface', u'cli-sequence-commands': None, u'cli-incomplete-command': None, u'cli-suppress-show-conf-path': None, u'cli-suppress-show-match': None}}, namespace='urn:brocade.com:mgmt:brocade-mct', defining_module='brocade-mct', yang_type='container', is_config=True)""",
})
self.__client_interface = t
if hasattr(self, '_set'):
self._set() | def function[_set_client_interface, parameter[self, v, load]]:
constant[
Setter method for client_interface, mapped from YANG variable /cluster/client/client_interface (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_client_interface is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_client_interface() directly.
]
if call[name[hasattr], parameter[name[v], constant[_utype]]] begin[:]
variable[v] assign[=] call[name[v]._utype, parameter[name[v]]]
<ast.Try object at 0x7da207f9a800>
name[self].__client_interface assign[=] name[t]
if call[name[hasattr], parameter[name[self], constant[_set]]] begin[:]
call[name[self]._set, parameter[]] | keyword[def] identifier[_set_client_interface] ( identifier[self] , identifier[v] , identifier[load] = keyword[False] ):
literal[string]
keyword[if] identifier[hasattr] ( identifier[v] , literal[string] ):
identifier[v] = identifier[v] . identifier[_utype] ( identifier[v] )
keyword[try] :
identifier[t] = identifier[YANGDynClass] ( identifier[v] , identifier[base] = identifier[client_interface] . identifier[client_interface] , identifier[is_container] = literal[string] , identifier[presence] = keyword[False] , identifier[yang_name] = literal[string] , identifier[rest_name] = literal[string] , identifier[parent] = identifier[self] , identifier[path_helper] = identifier[self] . identifier[_path_helper] , identifier[extmethods] = identifier[self] . identifier[_extmethods] , identifier[register_paths] = keyword[True] , identifier[extensions] ={ literal[string] :{ literal[string] : literal[string] , literal[string] : keyword[None] , literal[string] : keyword[None] , literal[string] : keyword[None] , literal[string] : keyword[None] }}, identifier[namespace] = literal[string] , identifier[defining_module] = literal[string] , identifier[yang_type] = literal[string] , identifier[is_config] = keyword[True] )
keyword[except] ( identifier[TypeError] , identifier[ValueError] ):
keyword[raise] identifier[ValueError] ({
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
})
identifier[self] . identifier[__client_interface] = identifier[t]
keyword[if] identifier[hasattr] ( identifier[self] , literal[string] ):
identifier[self] . identifier[_set] () | def _set_client_interface(self, v, load=False):
"""
Setter method for client_interface, mapped from YANG variable /cluster/client/client_interface (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_client_interface is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_client_interface() directly.
"""
if hasattr(v, '_utype'):
v = v._utype(v) # depends on [control=['if'], data=[]]
try:
t = YANGDynClass(v, base=client_interface.client_interface, is_container='container', presence=False, yang_name='client-interface', rest_name='client-interface', parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Client Interface', u'cli-sequence-commands': None, u'cli-incomplete-command': None, u'cli-suppress-show-conf-path': None, u'cli-suppress-show-match': None}}, namespace='urn:brocade.com:mgmt:brocade-mct', defining_module='brocade-mct', yang_type='container', is_config=True) # depends on [control=['try'], data=[]]
except (TypeError, ValueError):
raise ValueError({'error-string': 'client_interface must be of a type compatible with container', 'defined-type': 'container', 'generated-type': 'YANGDynClass(base=client_interface.client_interface, is_container=\'container\', presence=False, yang_name="client-interface", rest_name="client-interface", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u\'tailf-common\': {u\'info\': u\'Client Interface\', u\'cli-sequence-commands\': None, u\'cli-incomplete-command\': None, u\'cli-suppress-show-conf-path\': None, u\'cli-suppress-show-match\': None}}, namespace=\'urn:brocade.com:mgmt:brocade-mct\', defining_module=\'brocade-mct\', yang_type=\'container\', is_config=True)'}) # depends on [control=['except'], data=[]]
self.__client_interface = t
if hasattr(self, '_set'):
self._set() # depends on [control=['if'], data=[]] |
def render_robots_meta_tag(context):
"""
Returns the robots meta tag.
"""
request = context['request']
robots_indexing = None
robots_following = None
# Prevent indexing any unwanted domains (e.g. staging).
if context.request.get_host() in settings.META_TAGGER_ROBOTS_DOMAIN_WHITELIST:
# Try to get the title from the context object (e.g. DetailViews).
if context.get('object'):
try:
robots_indexing = context['object'].get_robots_indexing()
robots_following = context['object'].get_robots_following()
except AttributeError:
pass
elif context.get('meta_tagger'):
robots_indexing = context['meta_tagger'].get('robots_indexing', robots_indexing)
robots_following = context['meta_tagger'].get('robots_following', robots_following)
# Try fetching the robots values of the cms page.
if robots_indexing is None:
try:
robots_indexing = request.current_page.metatagpageextension.robots_indexing
except (AttributeError, NoReverseMatch, MetaTagPageExtension.DoesNotExist):
robots_indexing = True
if robots_following is None:
try:
robots_following = request.current_page.metatagpageextension.robots_following
except (AttributeError, NoReverseMatch, MetaTagPageExtension.DoesNotExist):
robots_following = True
return mark_safe('<meta name="robots" content="{robots_indexing}, {robots_following}">'.format(
robots_indexing='index' if robots_indexing else 'noindex',
robots_following='follow' if robots_following else 'nofollow'
)) | def function[render_robots_meta_tag, parameter[context]]:
constant[
Returns the robots meta tag.
]
variable[request] assign[=] call[name[context]][constant[request]]
variable[robots_indexing] assign[=] constant[None]
variable[robots_following] assign[=] constant[None]
if compare[call[name[context].request.get_host, parameter[]] in name[settings].META_TAGGER_ROBOTS_DOMAIN_WHITELIST] begin[:]
if call[name[context].get, parameter[constant[object]]] begin[:]
<ast.Try object at 0x7da18fe90eb0>
if compare[name[robots_indexing] is constant[None]] begin[:]
<ast.Try object at 0x7da18fe93dc0>
if compare[name[robots_following] is constant[None]] begin[:]
<ast.Try object at 0x7da18fe93700>
return[call[name[mark_safe], parameter[call[constant[<meta name="robots" content="{robots_indexing}, {robots_following}">].format, parameter[]]]]] | keyword[def] identifier[render_robots_meta_tag] ( identifier[context] ):
literal[string]
identifier[request] = identifier[context] [ literal[string] ]
identifier[robots_indexing] = keyword[None]
identifier[robots_following] = keyword[None]
keyword[if] identifier[context] . identifier[request] . identifier[get_host] () keyword[in] identifier[settings] . identifier[META_TAGGER_ROBOTS_DOMAIN_WHITELIST] :
keyword[if] identifier[context] . identifier[get] ( literal[string] ):
keyword[try] :
identifier[robots_indexing] = identifier[context] [ literal[string] ]. identifier[get_robots_indexing] ()
identifier[robots_following] = identifier[context] [ literal[string] ]. identifier[get_robots_following] ()
keyword[except] identifier[AttributeError] :
keyword[pass]
keyword[elif] identifier[context] . identifier[get] ( literal[string] ):
identifier[robots_indexing] = identifier[context] [ literal[string] ]. identifier[get] ( literal[string] , identifier[robots_indexing] )
identifier[robots_following] = identifier[context] [ literal[string] ]. identifier[get] ( literal[string] , identifier[robots_following] )
keyword[if] identifier[robots_indexing] keyword[is] keyword[None] :
keyword[try] :
identifier[robots_indexing] = identifier[request] . identifier[current_page] . identifier[metatagpageextension] . identifier[robots_indexing]
keyword[except] ( identifier[AttributeError] , identifier[NoReverseMatch] , identifier[MetaTagPageExtension] . identifier[DoesNotExist] ):
identifier[robots_indexing] = keyword[True]
keyword[if] identifier[robots_following] keyword[is] keyword[None] :
keyword[try] :
identifier[robots_following] = identifier[request] . identifier[current_page] . identifier[metatagpageextension] . identifier[robots_following]
keyword[except] ( identifier[AttributeError] , identifier[NoReverseMatch] , identifier[MetaTagPageExtension] . identifier[DoesNotExist] ):
identifier[robots_following] = keyword[True]
keyword[return] identifier[mark_safe] ( literal[string] . identifier[format] (
identifier[robots_indexing] = literal[string] keyword[if] identifier[robots_indexing] keyword[else] literal[string] ,
identifier[robots_following] = literal[string] keyword[if] identifier[robots_following] keyword[else] literal[string]
)) | def render_robots_meta_tag(context):
"""
Returns the robots meta tag.
"""
request = context['request']
robots_indexing = None
robots_following = None
# Prevent indexing any unwanted domains (e.g. staging).
if context.request.get_host() in settings.META_TAGGER_ROBOTS_DOMAIN_WHITELIST:
# Try to get the title from the context object (e.g. DetailViews).
if context.get('object'):
try:
robots_indexing = context['object'].get_robots_indexing()
robots_following = context['object'].get_robots_following() # depends on [control=['try'], data=[]]
except AttributeError:
pass # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
elif context.get('meta_tagger'):
robots_indexing = context['meta_tagger'].get('robots_indexing', robots_indexing)
robots_following = context['meta_tagger'].get('robots_following', robots_following) # depends on [control=['if'], data=[]]
# Try fetching the robots values of the cms page.
if robots_indexing is None:
try:
robots_indexing = request.current_page.metatagpageextension.robots_indexing # depends on [control=['try'], data=[]]
except (AttributeError, NoReverseMatch, MetaTagPageExtension.DoesNotExist):
robots_indexing = True # depends on [control=['except'], data=[]] # depends on [control=['if'], data=['robots_indexing']]
if robots_following is None:
try:
robots_following = request.current_page.metatagpageextension.robots_following # depends on [control=['try'], data=[]]
except (AttributeError, NoReverseMatch, MetaTagPageExtension.DoesNotExist):
robots_following = True # depends on [control=['except'], data=[]] # depends on [control=['if'], data=['robots_following']] # depends on [control=['if'], data=[]]
return mark_safe('<meta name="robots" content="{robots_indexing}, {robots_following}">'.format(robots_indexing='index' if robots_indexing else 'noindex', robots_following='follow' if robots_following else 'nofollow')) |
def nltk_download(name, ignore_errors=True):
r"""Like nltk.download, but be quiet about it, and get a room (separate python process)
Does some simple whitespace normalization on `name`, but doesn't yet do fuzzy matching
Caches the normalized names of packages already attempted, so they aren't re-tried
>>> nltk_download('nonexistent dataset name', ignore_errors=True)
False
>>> nltk_download('WordNet', ignore_errors=True)
True
>>> nltk_download('wordnet', ignore_errors=True)
True
"""
name = re.sub(r"[-\s=+']+", '_', name.lower())
if name in nltk_download.done:
return nltk_download.done[name]
proc = subprocess.Popen(["python", "-c", "import nltk; nltk.download('{}')".format(name)], stdout=subprocess.PIPE)
msgs = [s for s in proc.communicate() if s is not None]
if any(re.match(r'^\[nltk_data\]\s+Error', msg, flags=re.IGNORECASE) for msg in msgs):
nltk_download.done[name] = False
if ignore_errors:
return nltk_download.done[name]
raise ValueError('Unable to download the requested NLTK dataset: {}'.format('\n'.join(msgs)))
nltk_download.done[name] = True
return nltk_download.done[name] | def function[nltk_download, parameter[name, ignore_errors]]:
constant[Like nltk.download, but be quiet about it, and get a room (separate python process)
Does some simple whitespace normalization on `name`, but doesn't yet do fuzzy matching
Caches the normalized names of packages already attempted, so they aren't re-tried
>>> nltk_download('nonexistent dataset name', ignore_errors=True)
False
>>> nltk_download('WordNet', ignore_errors=True)
True
>>> nltk_download('wordnet', ignore_errors=True)
True
]
variable[name] assign[=] call[name[re].sub, parameter[constant[[-\s=+']+], constant[_], call[name[name].lower, parameter[]]]]
if compare[name[name] in name[nltk_download].done] begin[:]
return[call[name[nltk_download].done][name[name]]]
variable[proc] assign[=] call[name[subprocess].Popen, parameter[list[[<ast.Constant object at 0x7da2054a69b0>, <ast.Constant object at 0x7da2054a4d60>, <ast.Call object at 0x7da2054a54e0>]]]]
variable[msgs] assign[=] <ast.ListComp object at 0x7da2054a4ac0>
if call[name[any], parameter[<ast.GeneratorExp object at 0x7da2054a5f00>]] begin[:]
call[name[nltk_download].done][name[name]] assign[=] constant[False]
if name[ignore_errors] begin[:]
return[call[name[nltk_download].done][name[name]]]
<ast.Raise object at 0x7da2054a5e10>
call[name[nltk_download].done][name[name]] assign[=] constant[True]
return[call[name[nltk_download].done][name[name]]] | keyword[def] identifier[nltk_download] ( identifier[name] , identifier[ignore_errors] = keyword[True] ):
literal[string]
identifier[name] = identifier[re] . identifier[sub] ( literal[string] , literal[string] , identifier[name] . identifier[lower] ())
keyword[if] identifier[name] keyword[in] identifier[nltk_download] . identifier[done] :
keyword[return] identifier[nltk_download] . identifier[done] [ identifier[name] ]
identifier[proc] = identifier[subprocess] . identifier[Popen] ([ literal[string] , literal[string] , literal[string] . identifier[format] ( identifier[name] )], identifier[stdout] = identifier[subprocess] . identifier[PIPE] )
identifier[msgs] =[ identifier[s] keyword[for] identifier[s] keyword[in] identifier[proc] . identifier[communicate] () keyword[if] identifier[s] keyword[is] keyword[not] keyword[None] ]
keyword[if] identifier[any] ( identifier[re] . identifier[match] ( literal[string] , identifier[msg] , identifier[flags] = identifier[re] . identifier[IGNORECASE] ) keyword[for] identifier[msg] keyword[in] identifier[msgs] ):
identifier[nltk_download] . identifier[done] [ identifier[name] ]= keyword[False]
keyword[if] identifier[ignore_errors] :
keyword[return] identifier[nltk_download] . identifier[done] [ identifier[name] ]
keyword[raise] identifier[ValueError] ( literal[string] . identifier[format] ( literal[string] . identifier[join] ( identifier[msgs] )))
identifier[nltk_download] . identifier[done] [ identifier[name] ]= keyword[True]
keyword[return] identifier[nltk_download] . identifier[done] [ identifier[name] ] | def nltk_download(name, ignore_errors=True):
"""Like nltk.download, but be quiet about it, and get a room (separate python process)
Does some simple whitespace normalization on `name`, but doesn't yet do fuzzy matching
Caches the normalized names of packages already attempted, so they aren't re-tried
>>> nltk_download('nonexistent dataset name', ignore_errors=True)
False
>>> nltk_download('WordNet', ignore_errors=True)
True
>>> nltk_download('wordnet', ignore_errors=True)
True
"""
name = re.sub("[-\\s=+']+", '_', name.lower())
if name in nltk_download.done:
return nltk_download.done[name] # depends on [control=['if'], data=['name']]
proc = subprocess.Popen(['python', '-c', "import nltk; nltk.download('{}')".format(name)], stdout=subprocess.PIPE)
msgs = [s for s in proc.communicate() if s is not None]
if any((re.match('^\\[nltk_data\\]\\s+Error', msg, flags=re.IGNORECASE) for msg in msgs)):
nltk_download.done[name] = False
if ignore_errors:
return nltk_download.done[name] # depends on [control=['if'], data=[]]
raise ValueError('Unable to download the requested NLTK dataset: {}'.format('\n'.join(msgs))) # depends on [control=['if'], data=[]]
nltk_download.done[name] = True
return nltk_download.done[name] |
def CreateApproval(self,
reason=None,
notified_users=None,
email_cc_addresses=None):
"""Create a new approval for the current user to access this hunt."""
if not reason:
raise ValueError("reason can't be empty")
if not notified_users:
raise ValueError("notified_users list can't be empty.")
approval = user_pb2.ApiHuntApproval(
reason=reason,
notified_users=notified_users,
email_cc_addresses=email_cc_addresses or [])
args = user_pb2.ApiCreateHuntApprovalArgs(
hunt_id=self.hunt_id, approval=approval)
data = self._context.SendRequest("CreateHuntApproval", args)
return HuntApproval(
data=data, username=self._context.username, context=self._context) | def function[CreateApproval, parameter[self, reason, notified_users, email_cc_addresses]]:
constant[Create a new approval for the current user to access this hunt.]
if <ast.UnaryOp object at 0x7da1b1cc1ae0> begin[:]
<ast.Raise object at 0x7da1b1cc3d90>
if <ast.UnaryOp object at 0x7da1b1cc2590> begin[:]
<ast.Raise object at 0x7da1b1cc3fa0>
variable[approval] assign[=] call[name[user_pb2].ApiHuntApproval, parameter[]]
variable[args] assign[=] call[name[user_pb2].ApiCreateHuntApprovalArgs, parameter[]]
variable[data] assign[=] call[name[self]._context.SendRequest, parameter[constant[CreateHuntApproval], name[args]]]
return[call[name[HuntApproval], parameter[]]] | keyword[def] identifier[CreateApproval] ( identifier[self] ,
identifier[reason] = keyword[None] ,
identifier[notified_users] = keyword[None] ,
identifier[email_cc_addresses] = keyword[None] ):
literal[string]
keyword[if] keyword[not] identifier[reason] :
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[if] keyword[not] identifier[notified_users] :
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[approval] = identifier[user_pb2] . identifier[ApiHuntApproval] (
identifier[reason] = identifier[reason] ,
identifier[notified_users] = identifier[notified_users] ,
identifier[email_cc_addresses] = identifier[email_cc_addresses] keyword[or] [])
identifier[args] = identifier[user_pb2] . identifier[ApiCreateHuntApprovalArgs] (
identifier[hunt_id] = identifier[self] . identifier[hunt_id] , identifier[approval] = identifier[approval] )
identifier[data] = identifier[self] . identifier[_context] . identifier[SendRequest] ( literal[string] , identifier[args] )
keyword[return] identifier[HuntApproval] (
identifier[data] = identifier[data] , identifier[username] = identifier[self] . identifier[_context] . identifier[username] , identifier[context] = identifier[self] . identifier[_context] ) | def CreateApproval(self, reason=None, notified_users=None, email_cc_addresses=None):
"""Create a new approval for the current user to access this hunt."""
if not reason:
raise ValueError("reason can't be empty") # depends on [control=['if'], data=[]]
if not notified_users:
raise ValueError("notified_users list can't be empty.") # depends on [control=['if'], data=[]]
approval = user_pb2.ApiHuntApproval(reason=reason, notified_users=notified_users, email_cc_addresses=email_cc_addresses or [])
args = user_pb2.ApiCreateHuntApprovalArgs(hunt_id=self.hunt_id, approval=approval)
data = self._context.SendRequest('CreateHuntApproval', args)
return HuntApproval(data=data, username=self._context.username, context=self._context) |
def calculate_similarity(container1=None,
container2=None,
comparison=None,
metric=None):
'''calculate_similarity will calculate similarity of two containers
by files content, default will calculate
2.0*len(intersect) / total package1 + total package2
Parameters
==========
container1: container 1
container2: container 2 must be defined or
metric a function to take a total1, total2, and intersect count
(we can make this more general if / when more are added)
valid are currently files.txt or folders.txt
comparison: the comparison result object for the tree. If provided,
will skip over function to obtain it.
'''
if metric is None:
metric = information_coefficient
if comparison == None:
comparison = compare_containers(container1=container1,
container2=container2)
return metric(total1=comparison['total1'],
total2=comparison['total2'],
intersect=comparison["intersect"]) | def function[calculate_similarity, parameter[container1, container2, comparison, metric]]:
constant[calculate_similarity will calculate similarity of two containers
by files content, default will calculate
2.0*len(intersect) / total package1 + total package2
Parameters
==========
container1: container 1
container2: container 2 must be defined or
metric a function to take a total1, total2, and intersect count
(we can make this more general if / when more are added)
valid are currently files.txt or folders.txt
comparison: the comparison result object for the tree. If provided,
will skip over function to obtain it.
]
if compare[name[metric] is constant[None]] begin[:]
variable[metric] assign[=] name[information_coefficient]
if compare[name[comparison] equal[==] constant[None]] begin[:]
variable[comparison] assign[=] call[name[compare_containers], parameter[]]
return[call[name[metric], parameter[]]] | keyword[def] identifier[calculate_similarity] ( identifier[container1] = keyword[None] ,
identifier[container2] = keyword[None] ,
identifier[comparison] = keyword[None] ,
identifier[metric] = keyword[None] ):
literal[string]
keyword[if] identifier[metric] keyword[is] keyword[None] :
identifier[metric] = identifier[information_coefficient]
keyword[if] identifier[comparison] == keyword[None] :
identifier[comparison] = identifier[compare_containers] ( identifier[container1] = identifier[container1] ,
identifier[container2] = identifier[container2] )
keyword[return] identifier[metric] ( identifier[total1] = identifier[comparison] [ literal[string] ],
identifier[total2] = identifier[comparison] [ literal[string] ],
identifier[intersect] = identifier[comparison] [ literal[string] ]) | def calculate_similarity(container1=None, container2=None, comparison=None, metric=None):
"""calculate_similarity will calculate similarity of two containers
by files content, default will calculate
2.0*len(intersect) / total package1 + total package2
Parameters
==========
container1: container 1
container2: container 2 must be defined or
metric a function to take a total1, total2, and intersect count
(we can make this more general if / when more are added)
valid are currently files.txt or folders.txt
comparison: the comparison result object for the tree. If provided,
will skip over function to obtain it.
"""
if metric is None:
metric = information_coefficient # depends on [control=['if'], data=['metric']]
if comparison == None:
comparison = compare_containers(container1=container1, container2=container2) # depends on [control=['if'], data=['comparison']]
return metric(total1=comparison['total1'], total2=comparison['total2'], intersect=comparison['intersect']) |
def rs_find_error_evaluator(synd, err_loc, nsym):
'''Compute the error (or erasures if you supply sigma=erasures locator polynomial, or errata) evaluator polynomial Omega from the syndrome and the error/erasures/errata locator Sigma. Omega is already computed at the same time as Sigma inside the Berlekamp-Massey implemented above, but in case you modify Sigma, you can recompute Omega afterwards using this method, or just ensure that Omega computed by BM is correct given Sigma.'''
# Omega(x) = [ Synd(x) * Error_loc(x) ] mod x^(n-k+1)
_, remainder = gf_poly_div( gf_poly_mul(synd, err_loc), ([1] + [0]*(nsym+1)) ) # first multiply syndromes * errata_locator, then do a polynomial division to truncate the polynomial to the required length
# Faster way that is equivalent
#remainder = gf_poly_mul(synd, err_loc) # first multiply the syndromes with the errata locator polynomial
#remainder = remainder[len(remainder)-(nsym+1):] # then divide by a polynomial of the length we want, which is equivalent to slicing the list (which represents the polynomial)
return remainder | def function[rs_find_error_evaluator, parameter[synd, err_loc, nsym]]:
constant[Compute the error (or erasures if you supply sigma=erasures locator polynomial, or errata) evaluator polynomial Omega from the syndrome and the error/erasures/errata locator Sigma. Omega is already computed at the same time as Sigma inside the Berlekamp-Massey implemented above, but in case you modify Sigma, you can recompute Omega afterwards using this method, or just ensure that Omega computed by BM is correct given Sigma.]
<ast.Tuple object at 0x7da18f00dc90> assign[=] call[name[gf_poly_div], parameter[call[name[gf_poly_mul], parameter[name[synd], name[err_loc]]], binary_operation[list[[<ast.Constant object at 0x7da18f00f9d0>]] + binary_operation[list[[<ast.Constant object at 0x7da18f00f430>]] * binary_operation[name[nsym] + constant[1]]]]]]
return[name[remainder]] | keyword[def] identifier[rs_find_error_evaluator] ( identifier[synd] , identifier[err_loc] , identifier[nsym] ):
literal[string]
identifier[_] , identifier[remainder] = identifier[gf_poly_div] ( identifier[gf_poly_mul] ( identifier[synd] , identifier[err_loc] ),([ literal[int] ]+[ literal[int] ]*( identifier[nsym] + literal[int] )))
keyword[return] identifier[remainder] | def rs_find_error_evaluator(synd, err_loc, nsym):
"""Compute the error (or erasures if you supply sigma=erasures locator polynomial, or errata) evaluator polynomial Omega from the syndrome and the error/erasures/errata locator Sigma. Omega is already computed at the same time as Sigma inside the Berlekamp-Massey implemented above, but in case you modify Sigma, you can recompute Omega afterwards using this method, or just ensure that Omega computed by BM is correct given Sigma."""
# Omega(x) = [ Synd(x) * Error_loc(x) ] mod x^(n-k+1)
(_, remainder) = gf_poly_div(gf_poly_mul(synd, err_loc), [1] + [0] * (nsym + 1)) # first multiply syndromes * errata_locator, then do a polynomial division to truncate the polynomial to the required length
# Faster way that is equivalent
#remainder = gf_poly_mul(synd, err_loc) # first multiply the syndromes with the errata locator polynomial
#remainder = remainder[len(remainder)-(nsym+1):] # then divide by a polynomial of the length we want, which is equivalent to slicing the list (which represents the polynomial)
return remainder |
def scatter(self, method, args={}, nowait=False, timeout=None, **kwargs):
"""Broadcast method to all agents.
if nowait is False, returns generator to iterate over the results.
:keyword limit: Limit number of reads from the queue.
Unlimited by default.
:keyword timeout: the timeout (in float seconds) waiting for replies.
Default is :attr:`default_timeout`.
**Examples**
``scatter`` is a generator (if nowait is False)::
>>> res = scatter()
>>> res.next() # one event consumed, or timed out.
>>> res = scatter(limit=2):
>>> for i in res: # two events consumed or timeout
>>> pass
See :meth:`call_or_cast` for a full list of supported
arguments.
"""
timeout = timeout if timeout is not None else self.default_timeout
r = self.call_or_cast(method, args, type=ACTOR_TYPE.SCATTER,
nowait=nowait, timeout=timeout, **kwargs)
if not nowait:
return r.gather(timeout=timeout, **kwargs) | def function[scatter, parameter[self, method, args, nowait, timeout]]:
constant[Broadcast method to all agents.
if nowait is False, returns generator to iterate over the results.
:keyword limit: Limit number of reads from the queue.
Unlimited by default.
:keyword timeout: the timeout (in float seconds) waiting for replies.
Default is :attr:`default_timeout`.
**Examples**
``scatter`` is a generator (if nowait is False)::
>>> res = scatter()
>>> res.next() # one event consumed, or timed out.
>>> res = scatter(limit=2):
>>> for i in res: # two events consumed or timeout
>>> pass
See :meth:`call_or_cast` for a full list of supported
arguments.
]
variable[timeout] assign[=] <ast.IfExp object at 0x7da2054a6cb0>
variable[r] assign[=] call[name[self].call_or_cast, parameter[name[method], name[args]]]
if <ast.UnaryOp object at 0x7da2054a6aa0> begin[:]
return[call[name[r].gather, parameter[]]] | keyword[def] identifier[scatter] ( identifier[self] , identifier[method] , identifier[args] ={}, identifier[nowait] = keyword[False] , identifier[timeout] = keyword[None] ,** identifier[kwargs] ):
literal[string]
identifier[timeout] = identifier[timeout] keyword[if] identifier[timeout] keyword[is] keyword[not] keyword[None] keyword[else] identifier[self] . identifier[default_timeout]
identifier[r] = identifier[self] . identifier[call_or_cast] ( identifier[method] , identifier[args] , identifier[type] = identifier[ACTOR_TYPE] . identifier[SCATTER] ,
identifier[nowait] = identifier[nowait] , identifier[timeout] = identifier[timeout] ,** identifier[kwargs] )
keyword[if] keyword[not] identifier[nowait] :
keyword[return] identifier[r] . identifier[gather] ( identifier[timeout] = identifier[timeout] ,** identifier[kwargs] ) | def scatter(self, method, args={}, nowait=False, timeout=None, **kwargs):
"""Broadcast method to all agents.
if nowait is False, returns generator to iterate over the results.
:keyword limit: Limit number of reads from the queue.
Unlimited by default.
:keyword timeout: the timeout (in float seconds) waiting for replies.
Default is :attr:`default_timeout`.
**Examples**
``scatter`` is a generator (if nowait is False)::
>>> res = scatter()
>>> res.next() # one event consumed, or timed out.
>>> res = scatter(limit=2):
>>> for i in res: # two events consumed or timeout
>>> pass
See :meth:`call_or_cast` for a full list of supported
arguments.
"""
timeout = timeout if timeout is not None else self.default_timeout
r = self.call_or_cast(method, args, type=ACTOR_TYPE.SCATTER, nowait=nowait, timeout=timeout, **kwargs)
if not nowait:
return r.gather(timeout=timeout, **kwargs) # depends on [control=['if'], data=[]] |
def subscribe(self, method, *params):
'''
Perform a remote command which will stream events/data to us.
Expects a method name, which look like:
server.peers.subscribe
.. and sometimes take arguments, all of which are positional.
Returns a tuple: (Future, asyncio.Queue).
The future will have the result of the initial
call, and the queue will receive additional
responses as they happen.
'''
assert '.' in method
assert method.endswith('subscribe')
return self._send_request(method, params, is_subscribe=True) | def function[subscribe, parameter[self, method]]:
constant[
Perform a remote command which will stream events/data to us.
Expects a method name, which look like:
server.peers.subscribe
.. and sometimes take arguments, all of which are positional.
Returns a tuple: (Future, asyncio.Queue).
The future will have the result of the initial
call, and the queue will receive additional
responses as they happen.
]
assert[compare[constant[.] in name[method]]]
assert[call[name[method].endswith, parameter[constant[subscribe]]]]
return[call[name[self]._send_request, parameter[name[method], name[params]]]] | keyword[def] identifier[subscribe] ( identifier[self] , identifier[method] ,* identifier[params] ):
literal[string]
keyword[assert] literal[string] keyword[in] identifier[method]
keyword[assert] identifier[method] . identifier[endswith] ( literal[string] )
keyword[return] identifier[self] . identifier[_send_request] ( identifier[method] , identifier[params] , identifier[is_subscribe] = keyword[True] ) | def subscribe(self, method, *params):
"""
Perform a remote command which will stream events/data to us.
Expects a method name, which look like:
server.peers.subscribe
.. and sometimes take arguments, all of which are positional.
Returns a tuple: (Future, asyncio.Queue).
The future will have the result of the initial
call, and the queue will receive additional
responses as they happen.
"""
assert '.' in method
assert method.endswith('subscribe')
return self._send_request(method, params, is_subscribe=True) |
def find_module(self, fullname, path=None):
"""
Return self when fullname starts with root_name and the
target module is one vendored through this importer.
"""
root, base, target = fullname.partition(self.root_name + '.')
if root:
return
if not any(map(target.startswith, self.vendored_names)):
return
return self | def function[find_module, parameter[self, fullname, path]]:
constant[
Return self when fullname starts with root_name and the
target module is one vendored through this importer.
]
<ast.Tuple object at 0x7da1b1baac50> assign[=] call[name[fullname].partition, parameter[binary_operation[name[self].root_name + constant[.]]]]
if name[root] begin[:]
return[None]
if <ast.UnaryOp object at 0x7da1b1bab910> begin[:]
return[None]
return[name[self]] | keyword[def] identifier[find_module] ( identifier[self] , identifier[fullname] , identifier[path] = keyword[None] ):
literal[string]
identifier[root] , identifier[base] , identifier[target] = identifier[fullname] . identifier[partition] ( identifier[self] . identifier[root_name] + literal[string] )
keyword[if] identifier[root] :
keyword[return]
keyword[if] keyword[not] identifier[any] ( identifier[map] ( identifier[target] . identifier[startswith] , identifier[self] . identifier[vendored_names] )):
keyword[return]
keyword[return] identifier[self] | def find_module(self, fullname, path=None):
"""
Return self when fullname starts with root_name and the
target module is one vendored through this importer.
"""
(root, base, target) = fullname.partition(self.root_name + '.')
if root:
return # depends on [control=['if'], data=[]]
if not any(map(target.startswith, self.vendored_names)):
return # depends on [control=['if'], data=[]]
return self |
def formfield(self, **kwargs):
"""Gets the form field associated with this field.
Because this is a slug field which is automatically
populated, it should be hidden from the form.
"""
defaults = {
'form_class': forms.CharField,
'required': False
}
defaults.update(kwargs)
form_field = super().formfield(**defaults)
form_field.widget = forms.HiddenInput()
return form_field | def function[formfield, parameter[self]]:
constant[Gets the form field associated with this field.
Because this is a slug field which is automatically
populated, it should be hidden from the form.
]
variable[defaults] assign[=] dictionary[[<ast.Constant object at 0x7da2054a68f0>, <ast.Constant object at 0x7da2054a7520>], [<ast.Attribute object at 0x7da2054a7df0>, <ast.Constant object at 0x7da2054a4520>]]
call[name[defaults].update, parameter[name[kwargs]]]
variable[form_field] assign[=] call[call[name[super], parameter[]].formfield, parameter[]]
name[form_field].widget assign[=] call[name[forms].HiddenInput, parameter[]]
return[name[form_field]] | keyword[def] identifier[formfield] ( identifier[self] ,** identifier[kwargs] ):
literal[string]
identifier[defaults] ={
literal[string] : identifier[forms] . identifier[CharField] ,
literal[string] : keyword[False]
}
identifier[defaults] . identifier[update] ( identifier[kwargs] )
identifier[form_field] = identifier[super] (). identifier[formfield] (** identifier[defaults] )
identifier[form_field] . identifier[widget] = identifier[forms] . identifier[HiddenInput] ()
keyword[return] identifier[form_field] | def formfield(self, **kwargs):
"""Gets the form field associated with this field.
Because this is a slug field which is automatically
populated, it should be hidden from the form.
"""
defaults = {'form_class': forms.CharField, 'required': False}
defaults.update(kwargs)
form_field = super().formfield(**defaults)
form_field.widget = forms.HiddenInput()
return form_field |
def endpoints(self):
"""
Gets the Endpoints API client.
Returns:
Endpoints:
"""
if not self.__endpoints:
self.__endpoints = Endpoints(self.__connection)
return self.__endpoints | def function[endpoints, parameter[self]]:
constant[
Gets the Endpoints API client.
Returns:
Endpoints:
]
if <ast.UnaryOp object at 0x7da20c76f670> begin[:]
name[self].__endpoints assign[=] call[name[Endpoints], parameter[name[self].__connection]]
return[name[self].__endpoints] | keyword[def] identifier[endpoints] ( identifier[self] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[__endpoints] :
identifier[self] . identifier[__endpoints] = identifier[Endpoints] ( identifier[self] . identifier[__connection] )
keyword[return] identifier[self] . identifier[__endpoints] | def endpoints(self):
"""
Gets the Endpoints API client.
Returns:
Endpoints:
"""
if not self.__endpoints:
self.__endpoints = Endpoints(self.__connection) # depends on [control=['if'], data=[]]
return self.__endpoints |
def is_child_of_log(self, id_, log_id):
"""Tests if an ``Id`` is a direct child of a log.
arg: id (osid.id.Id): an ``Id``
arg: log_id (osid.id.Id): the ``Id`` of a log
return: (boolean) - ``true`` if this ``id`` is a child of
``log_id,`` ``false`` otherwise
raise: NotFound - ``log_id`` is not found
raise: NullArgument - ``id`` or ``log_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
*implementation notes*: If ``id`` not found return ``false``.
"""
# Implemented from template for
# osid.resource.BinHierarchySession.is_child_of_bin
if self._catalog_session is not None:
return self._catalog_session.is_child_of_catalog(id_=id_, catalog_id=log_id)
return self._hierarchy_session.is_child(id_=log_id, child_id=id_) | def function[is_child_of_log, parameter[self, id_, log_id]]:
constant[Tests if an ``Id`` is a direct child of a log.
arg: id (osid.id.Id): an ``Id``
arg: log_id (osid.id.Id): the ``Id`` of a log
return: (boolean) - ``true`` if this ``id`` is a child of
``log_id,`` ``false`` otherwise
raise: NotFound - ``log_id`` is not found
raise: NullArgument - ``id`` or ``log_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
*implementation notes*: If ``id`` not found return ``false``.
]
if compare[name[self]._catalog_session is_not constant[None]] begin[:]
return[call[name[self]._catalog_session.is_child_of_catalog, parameter[]]]
return[call[name[self]._hierarchy_session.is_child, parameter[]]] | keyword[def] identifier[is_child_of_log] ( identifier[self] , identifier[id_] , identifier[log_id] ):
literal[string]
keyword[if] identifier[self] . identifier[_catalog_session] keyword[is] keyword[not] keyword[None] :
keyword[return] identifier[self] . identifier[_catalog_session] . identifier[is_child_of_catalog] ( identifier[id_] = identifier[id_] , identifier[catalog_id] = identifier[log_id] )
keyword[return] identifier[self] . identifier[_hierarchy_session] . identifier[is_child] ( identifier[id_] = identifier[log_id] , identifier[child_id] = identifier[id_] ) | def is_child_of_log(self, id_, log_id):
"""Tests if an ``Id`` is a direct child of a log.
arg: id (osid.id.Id): an ``Id``
arg: log_id (osid.id.Id): the ``Id`` of a log
return: (boolean) - ``true`` if this ``id`` is a child of
``log_id,`` ``false`` otherwise
raise: NotFound - ``log_id`` is not found
raise: NullArgument - ``id`` or ``log_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
*implementation notes*: If ``id`` not found return ``false``.
"""
# Implemented from template for
# osid.resource.BinHierarchySession.is_child_of_bin
if self._catalog_session is not None:
return self._catalog_session.is_child_of_catalog(id_=id_, catalog_id=log_id) # depends on [control=['if'], data=[]]
return self._hierarchy_session.is_child(id_=log_id, child_id=id_) |
def get_private(self):
""" Derive private key from the brain key and the current sequence
number
"""
encoded = "%s %d" % (self.brainkey, self.sequence)
a = _bytes(encoded)
s = hashlib.sha256(hashlib.sha512(a).digest()).digest()
return PrivateKey(hexlify(s).decode("ascii"), prefix=self.prefix) | def function[get_private, parameter[self]]:
constant[ Derive private key from the brain key and the current sequence
number
]
variable[encoded] assign[=] binary_operation[constant[%s %d] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Attribute object at 0x7da1b0060280>, <ast.Attribute object at 0x7da1b00637c0>]]]
variable[a] assign[=] call[name[_bytes], parameter[name[encoded]]]
variable[s] assign[=] call[call[name[hashlib].sha256, parameter[call[call[name[hashlib].sha512, parameter[name[a]]].digest, parameter[]]]].digest, parameter[]]
return[call[name[PrivateKey], parameter[call[call[name[hexlify], parameter[name[s]]].decode, parameter[constant[ascii]]]]]] | keyword[def] identifier[get_private] ( identifier[self] ):
literal[string]
identifier[encoded] = literal[string] %( identifier[self] . identifier[brainkey] , identifier[self] . identifier[sequence] )
identifier[a] = identifier[_bytes] ( identifier[encoded] )
identifier[s] = identifier[hashlib] . identifier[sha256] ( identifier[hashlib] . identifier[sha512] ( identifier[a] ). identifier[digest] ()). identifier[digest] ()
keyword[return] identifier[PrivateKey] ( identifier[hexlify] ( identifier[s] ). identifier[decode] ( literal[string] ), identifier[prefix] = identifier[self] . identifier[prefix] ) | def get_private(self):
""" Derive private key from the brain key and the current sequence
number
"""
encoded = '%s %d' % (self.brainkey, self.sequence)
a = _bytes(encoded)
s = hashlib.sha256(hashlib.sha512(a).digest()).digest()
return PrivateKey(hexlify(s).decode('ascii'), prefix=self.prefix) |
def count(args):
"""
%prog count *.gz
Count reads based on FASTQC results. FASTQC needs to be run on all the input
data given before running this command.
"""
from jcvi.utils.table import loadtable, write_csv
p = OptionParser(count.__doc__)
p.add_option("--dir",
help="Sub-directory where FASTQC was run [default: %default]")
p.add_option("--human", default=False, action="store_true",
help="Human friendly numbers [default: %default]")
p.set_table()
p.set_outfile()
opts, args = p.parse_args(args)
if len(args) < 1:
sys.exit(not p.print_help())
filenames = args
subdir = opts.dir
header = "Filename|Total Sequences|Sequence length|Total Bases".split("|")
rows = []
human = opts.human
for f in filenames:
folder = f.replace(".gz", "").rsplit(".", 1)[0] + "_fastqc"
if subdir:
folder = op.join(subdir, folder)
summaryfile = op.join(folder, "fastqc_data.txt")
fqcdata = FastQCdata(summaryfile, human=human)
row = [fqcdata[x] for x in header]
rows.append(row)
print(loadtable(header, rows), file=sys.stderr)
write_csv(header, rows, sep=opts.sep,
filename=opts.outfile, align=opts.align) | def function[count, parameter[args]]:
constant[
%prog count *.gz
Count reads based on FASTQC results. FASTQC needs to be run on all the input
data given before running this command.
]
from relative_module[jcvi.utils.table] import module[loadtable], module[write_csv]
variable[p] assign[=] call[name[OptionParser], parameter[name[count].__doc__]]
call[name[p].add_option, parameter[constant[--dir]]]
call[name[p].add_option, parameter[constant[--human]]]
call[name[p].set_table, parameter[]]
call[name[p].set_outfile, parameter[]]
<ast.Tuple object at 0x7da1b084d870> assign[=] call[name[p].parse_args, parameter[name[args]]]
if compare[call[name[len], parameter[name[args]]] less[<] constant[1]] begin[:]
call[name[sys].exit, parameter[<ast.UnaryOp object at 0x7da1b084de70>]]
variable[filenames] assign[=] name[args]
variable[subdir] assign[=] name[opts].dir
variable[header] assign[=] call[constant[Filename|Total Sequences|Sequence length|Total Bases].split, parameter[constant[|]]]
variable[rows] assign[=] list[[]]
variable[human] assign[=] name[opts].human
for taget[name[f]] in starred[name[filenames]] begin[:]
variable[folder] assign[=] binary_operation[call[call[call[name[f].replace, parameter[constant[.gz], constant[]]].rsplit, parameter[constant[.], constant[1]]]][constant[0]] + constant[_fastqc]]
if name[subdir] begin[:]
variable[folder] assign[=] call[name[op].join, parameter[name[subdir], name[folder]]]
variable[summaryfile] assign[=] call[name[op].join, parameter[name[folder], constant[fastqc_data.txt]]]
variable[fqcdata] assign[=] call[name[FastQCdata], parameter[name[summaryfile]]]
variable[row] assign[=] <ast.ListComp object at 0x7da1b076d090>
call[name[rows].append, parameter[name[row]]]
call[name[print], parameter[call[name[loadtable], parameter[name[header], name[rows]]]]]
call[name[write_csv], parameter[name[header], name[rows]]] | keyword[def] identifier[count] ( identifier[args] ):
literal[string]
keyword[from] identifier[jcvi] . identifier[utils] . identifier[table] keyword[import] identifier[loadtable] , identifier[write_csv]
identifier[p] = identifier[OptionParser] ( identifier[count] . identifier[__doc__] )
identifier[p] . identifier[add_option] ( literal[string] ,
identifier[help] = literal[string] )
identifier[p] . identifier[add_option] ( literal[string] , identifier[default] = keyword[False] , identifier[action] = literal[string] ,
identifier[help] = literal[string] )
identifier[p] . identifier[set_table] ()
identifier[p] . identifier[set_outfile] ()
identifier[opts] , identifier[args] = identifier[p] . identifier[parse_args] ( identifier[args] )
keyword[if] identifier[len] ( identifier[args] )< literal[int] :
identifier[sys] . identifier[exit] ( keyword[not] identifier[p] . identifier[print_help] ())
identifier[filenames] = identifier[args]
identifier[subdir] = identifier[opts] . identifier[dir]
identifier[header] = literal[string] . identifier[split] ( literal[string] )
identifier[rows] =[]
identifier[human] = identifier[opts] . identifier[human]
keyword[for] identifier[f] keyword[in] identifier[filenames] :
identifier[folder] = identifier[f] . identifier[replace] ( literal[string] , literal[string] ). identifier[rsplit] ( literal[string] , literal[int] )[ literal[int] ]+ literal[string]
keyword[if] identifier[subdir] :
identifier[folder] = identifier[op] . identifier[join] ( identifier[subdir] , identifier[folder] )
identifier[summaryfile] = identifier[op] . identifier[join] ( identifier[folder] , literal[string] )
identifier[fqcdata] = identifier[FastQCdata] ( identifier[summaryfile] , identifier[human] = identifier[human] )
identifier[row] =[ identifier[fqcdata] [ identifier[x] ] keyword[for] identifier[x] keyword[in] identifier[header] ]
identifier[rows] . identifier[append] ( identifier[row] )
identifier[print] ( identifier[loadtable] ( identifier[header] , identifier[rows] ), identifier[file] = identifier[sys] . identifier[stderr] )
identifier[write_csv] ( identifier[header] , identifier[rows] , identifier[sep] = identifier[opts] . identifier[sep] ,
identifier[filename] = identifier[opts] . identifier[outfile] , identifier[align] = identifier[opts] . identifier[align] ) | def count(args):
"""
%prog count *.gz
Count reads based on FASTQC results. FASTQC needs to be run on all the input
data given before running this command.
"""
from jcvi.utils.table import loadtable, write_csv
p = OptionParser(count.__doc__)
p.add_option('--dir', help='Sub-directory where FASTQC was run [default: %default]')
p.add_option('--human', default=False, action='store_true', help='Human friendly numbers [default: %default]')
p.set_table()
p.set_outfile()
(opts, args) = p.parse_args(args)
if len(args) < 1:
sys.exit(not p.print_help()) # depends on [control=['if'], data=[]]
filenames = args
subdir = opts.dir
header = 'Filename|Total Sequences|Sequence length|Total Bases'.split('|')
rows = []
human = opts.human
for f in filenames:
folder = f.replace('.gz', '').rsplit('.', 1)[0] + '_fastqc'
if subdir:
folder = op.join(subdir, folder) # depends on [control=['if'], data=[]]
summaryfile = op.join(folder, 'fastqc_data.txt')
fqcdata = FastQCdata(summaryfile, human=human)
row = [fqcdata[x] for x in header]
rows.append(row) # depends on [control=['for'], data=['f']]
print(loadtable(header, rows), file=sys.stderr)
write_csv(header, rows, sep=opts.sep, filename=opts.outfile, align=opts.align) |
def get_spellcheck_config():
"""
Create TinyMCE spellchecker config based on Django settings
:return: spellchecker parameters for TinyMCE
:rtype: dict
"""
config = {}
if mce_settings.USE_SPELLCHECKER:
from enchant import list_languages
enchant_languages = list_languages()
if settings.DEBUG:
logger.info('Enchant languages: {0}'.format(enchant_languages))
lang_names = []
for lang, name in settings.LANGUAGES:
lang = convert_language_code(lang)
if lang not in enchant_languages:
lang = lang[:2]
if lang not in enchant_languages:
logger.warning('Missing {0} spellchecker dictionary!'.format(lang))
continue
if config.get('spellchecker_language') is None:
config['spellchecker_language'] = lang
lang_names.append('{0}={1}'.format(name, lang))
config['spellchecker_languages'] = ','.join(lang_names)
return config | def function[get_spellcheck_config, parameter[]]:
constant[
Create TinyMCE spellchecker config based on Django settings
:return: spellchecker parameters for TinyMCE
:rtype: dict
]
variable[config] assign[=] dictionary[[], []]
if name[mce_settings].USE_SPELLCHECKER begin[:]
from relative_module[enchant] import module[list_languages]
variable[enchant_languages] assign[=] call[name[list_languages], parameter[]]
if name[settings].DEBUG begin[:]
call[name[logger].info, parameter[call[constant[Enchant languages: {0}].format, parameter[name[enchant_languages]]]]]
variable[lang_names] assign[=] list[[]]
for taget[tuple[[<ast.Name object at 0x7da1b06410f0>, <ast.Name object at 0x7da1b0641180>]]] in starred[name[settings].LANGUAGES] begin[:]
variable[lang] assign[=] call[name[convert_language_code], parameter[name[lang]]]
if compare[name[lang] <ast.NotIn object at 0x7da2590d7190> name[enchant_languages]] begin[:]
variable[lang] assign[=] call[name[lang]][<ast.Slice object at 0x7da1b0643f10>]
if compare[name[lang] <ast.NotIn object at 0x7da2590d7190> name[enchant_languages]] begin[:]
call[name[logger].warning, parameter[call[constant[Missing {0} spellchecker dictionary!].format, parameter[name[lang]]]]]
continue
if compare[call[name[config].get, parameter[constant[spellchecker_language]]] is constant[None]] begin[:]
call[name[config]][constant[spellchecker_language]] assign[=] name[lang]
call[name[lang_names].append, parameter[call[constant[{0}={1}].format, parameter[name[name], name[lang]]]]]
call[name[config]][constant[spellchecker_languages]] assign[=] call[constant[,].join, parameter[name[lang_names]]]
return[name[config]] | keyword[def] identifier[get_spellcheck_config] ():
literal[string]
identifier[config] ={}
keyword[if] identifier[mce_settings] . identifier[USE_SPELLCHECKER] :
keyword[from] identifier[enchant] keyword[import] identifier[list_languages]
identifier[enchant_languages] = identifier[list_languages] ()
keyword[if] identifier[settings] . identifier[DEBUG] :
identifier[logger] . identifier[info] ( literal[string] . identifier[format] ( identifier[enchant_languages] ))
identifier[lang_names] =[]
keyword[for] identifier[lang] , identifier[name] keyword[in] identifier[settings] . identifier[LANGUAGES] :
identifier[lang] = identifier[convert_language_code] ( identifier[lang] )
keyword[if] identifier[lang] keyword[not] keyword[in] identifier[enchant_languages] :
identifier[lang] = identifier[lang] [: literal[int] ]
keyword[if] identifier[lang] keyword[not] keyword[in] identifier[enchant_languages] :
identifier[logger] . identifier[warning] ( literal[string] . identifier[format] ( identifier[lang] ))
keyword[continue]
keyword[if] identifier[config] . identifier[get] ( literal[string] ) keyword[is] keyword[None] :
identifier[config] [ literal[string] ]= identifier[lang]
identifier[lang_names] . identifier[append] ( literal[string] . identifier[format] ( identifier[name] , identifier[lang] ))
identifier[config] [ literal[string] ]= literal[string] . identifier[join] ( identifier[lang_names] )
keyword[return] identifier[config] | def get_spellcheck_config():
"""
Create TinyMCE spellchecker config based on Django settings
:return: spellchecker parameters for TinyMCE
:rtype: dict
"""
config = {}
if mce_settings.USE_SPELLCHECKER:
from enchant import list_languages
enchant_languages = list_languages()
if settings.DEBUG:
logger.info('Enchant languages: {0}'.format(enchant_languages)) # depends on [control=['if'], data=[]]
lang_names = []
for (lang, name) in settings.LANGUAGES:
lang = convert_language_code(lang)
if lang not in enchant_languages:
lang = lang[:2] # depends on [control=['if'], data=['lang']]
if lang not in enchant_languages:
logger.warning('Missing {0} spellchecker dictionary!'.format(lang))
continue # depends on [control=['if'], data=['lang']]
if config.get('spellchecker_language') is None:
config['spellchecker_language'] = lang # depends on [control=['if'], data=[]]
lang_names.append('{0}={1}'.format(name, lang)) # depends on [control=['for'], data=[]]
config['spellchecker_languages'] = ','.join(lang_names) # depends on [control=['if'], data=[]]
return config |
def on_close(self, stats, previous_stats):
"""Print the extended JSON report to reporter's output.
:param dict stats: Metrics for the current pylint run
:param dict previous_stats: Metrics for the previous pylint run
"""
reports = {
'messages': self._messages,
'stats': stats,
'previous': previous_stats,
}
print(json.dumps(reports, cls=JSONSetEncoder, indent=4), file=self.out) | def function[on_close, parameter[self, stats, previous_stats]]:
constant[Print the extended JSON report to reporter's output.
:param dict stats: Metrics for the current pylint run
:param dict previous_stats: Metrics for the previous pylint run
]
variable[reports] assign[=] dictionary[[<ast.Constant object at 0x7da18f09f280>, <ast.Constant object at 0x7da18f09c070>, <ast.Constant object at 0x7da18f09e350>], [<ast.Attribute object at 0x7da18f09cb20>, <ast.Name object at 0x7da18f09e5c0>, <ast.Name object at 0x7da18f09cb50>]]
call[name[print], parameter[call[name[json].dumps, parameter[name[reports]]]]] | keyword[def] identifier[on_close] ( identifier[self] , identifier[stats] , identifier[previous_stats] ):
literal[string]
identifier[reports] ={
literal[string] : identifier[self] . identifier[_messages] ,
literal[string] : identifier[stats] ,
literal[string] : identifier[previous_stats] ,
}
identifier[print] ( identifier[json] . identifier[dumps] ( identifier[reports] , identifier[cls] = identifier[JSONSetEncoder] , identifier[indent] = literal[int] ), identifier[file] = identifier[self] . identifier[out] ) | def on_close(self, stats, previous_stats):
"""Print the extended JSON report to reporter's output.
:param dict stats: Metrics for the current pylint run
:param dict previous_stats: Metrics for the previous pylint run
"""
reports = {'messages': self._messages, 'stats': stats, 'previous': previous_stats}
print(json.dumps(reports, cls=JSONSetEncoder, indent=4), file=self.out) |
def central_vertices(self):
"""Vertices that have the lowest maximum distance to any other vertex"""
max_distances = self.distances.max(0)
max_distances_min = max_distances[max_distances > 0].min()
return (max_distances == max_distances_min).nonzero()[0] | def function[central_vertices, parameter[self]]:
constant[Vertices that have the lowest maximum distance to any other vertex]
variable[max_distances] assign[=] call[name[self].distances.max, parameter[constant[0]]]
variable[max_distances_min] assign[=] call[call[name[max_distances]][compare[name[max_distances] greater[>] constant[0]]].min, parameter[]]
return[call[call[compare[name[max_distances] equal[==] name[max_distances_min]].nonzero, parameter[]]][constant[0]]] | keyword[def] identifier[central_vertices] ( identifier[self] ):
literal[string]
identifier[max_distances] = identifier[self] . identifier[distances] . identifier[max] ( literal[int] )
identifier[max_distances_min] = identifier[max_distances] [ identifier[max_distances] > literal[int] ]. identifier[min] ()
keyword[return] ( identifier[max_distances] == identifier[max_distances_min] ). identifier[nonzero] ()[ literal[int] ] | def central_vertices(self):
"""Vertices that have the lowest maximum distance to any other vertex"""
max_distances = self.distances.max(0)
max_distances_min = max_distances[max_distances > 0].min()
return (max_distances == max_distances_min).nonzero()[0] |
def generate_dot(self, name, urls={}, env=None,
graph_attrs={}, node_attrs={}, edge_attrs={}):
# type: (unicode, Dict, BuildEnvironment, Dict, Dict, Dict) -> unicode
"""Generate a graphviz dot graph from the classes that were passed in
to __init__.
*name* is the name of the graph.
*urls* is a dictionary mapping class names to HTTP URLs.
*graph_attrs*, *node_attrs*, *edge_attrs* are dictionaries containing
key/value pairs to pass on as graphviz properties.
"""
g_attrs = self.default_graph_attrs.copy()
n_attrs = self.default_node_attrs.copy()
e_attrs = self.default_edge_attrs.copy()
c_attrs = self.default_cluster_attrs.copy()
g_attrs.update(graph_attrs)
n_attrs.update(node_attrs)
e_attrs.update(edge_attrs)
if env:
g_attrs.update(env.config.inheritance_graph_attrs)
n_attrs.update(env.config.inheritance_node_attrs)
e_attrs.update(env.config.inheritance_edge_attrs)
c_attrs.update(env.config.inheritance_cluster_attrs)
res = [] # type: List[unicode]
res.append('digraph %s {\n' % name)
res.append(self._format_graph_attrs(g_attrs))
subgraphs = defaultdict(list) # subgraph_name => list of node names
for name, fullname, bases, tooltip in sorted(self.class_info):
subgraph_name = ".".join(fullname.split(".")[:-1])
subgraphs[subgraph_name].append(name)
# Write the node
this_node_attrs = n_attrs.copy()
if fullname in urls:
this_node_attrs['URL'] = '"%s"' % urls[fullname]
this_node_attrs['target'] = '"_top"'
if tooltip:
this_node_attrs['tooltip'] = tooltip
res.append(' "%s" [%s];\n' %
(name, self._format_node_attrs(this_node_attrs)))
# Write the edges
for base_name in bases:
res.append(' "%s" -> "%s" [%s];\n' %
(base_name, name,
self._format_node_attrs(e_attrs)))
if self.cluster_modules:
for subgraph_name in subgraphs:
res.append('subgraph cluster_%s {\n'
% subgraph_name.replace('.', '_'))
res.append(' label="%s";\n' % subgraph_name)
res.append(' graph[' + self._format_node_attrs(c_attrs) +
"];\n")
res.append(' ' + "; ".join(subgraphs[subgraph_name]) + "\n")
res.append('}\n')
res.append('}\n')
return ''.join(res) | def function[generate_dot, parameter[self, name, urls, env, graph_attrs, node_attrs, edge_attrs]]:
constant[Generate a graphviz dot graph from the classes that were passed in
to __init__.
*name* is the name of the graph.
*urls* is a dictionary mapping class names to HTTP URLs.
*graph_attrs*, *node_attrs*, *edge_attrs* are dictionaries containing
key/value pairs to pass on as graphviz properties.
]
variable[g_attrs] assign[=] call[name[self].default_graph_attrs.copy, parameter[]]
variable[n_attrs] assign[=] call[name[self].default_node_attrs.copy, parameter[]]
variable[e_attrs] assign[=] call[name[self].default_edge_attrs.copy, parameter[]]
variable[c_attrs] assign[=] call[name[self].default_cluster_attrs.copy, parameter[]]
call[name[g_attrs].update, parameter[name[graph_attrs]]]
call[name[n_attrs].update, parameter[name[node_attrs]]]
call[name[e_attrs].update, parameter[name[edge_attrs]]]
if name[env] begin[:]
call[name[g_attrs].update, parameter[name[env].config.inheritance_graph_attrs]]
call[name[n_attrs].update, parameter[name[env].config.inheritance_node_attrs]]
call[name[e_attrs].update, parameter[name[env].config.inheritance_edge_attrs]]
call[name[c_attrs].update, parameter[name[env].config.inheritance_cluster_attrs]]
variable[res] assign[=] list[[]]
call[name[res].append, parameter[binary_operation[constant[digraph %s {
] <ast.Mod object at 0x7da2590d6920> name[name]]]]
call[name[res].append, parameter[call[name[self]._format_graph_attrs, parameter[name[g_attrs]]]]]
variable[subgraphs] assign[=] call[name[defaultdict], parameter[name[list]]]
for taget[tuple[[<ast.Name object at 0x7da20e9570d0>, <ast.Name object at 0x7da20e954c70>, <ast.Name object at 0x7da20e956380>, <ast.Name object at 0x7da20e956740>]]] in starred[call[name[sorted], parameter[name[self].class_info]]] begin[:]
variable[subgraph_name] assign[=] call[constant[.].join, parameter[call[call[name[fullname].split, parameter[constant[.]]]][<ast.Slice object at 0x7da20e9552a0>]]]
call[call[name[subgraphs]][name[subgraph_name]].append, parameter[name[name]]]
variable[this_node_attrs] assign[=] call[name[n_attrs].copy, parameter[]]
if compare[name[fullname] in name[urls]] begin[:]
call[name[this_node_attrs]][constant[URL]] assign[=] binary_operation[constant["%s"] <ast.Mod object at 0x7da2590d6920> call[name[urls]][name[fullname]]]
call[name[this_node_attrs]][constant[target]] assign[=] constant["_top"]
if name[tooltip] begin[:]
call[name[this_node_attrs]][constant[tooltip]] assign[=] name[tooltip]
call[name[res].append, parameter[binary_operation[constant[ "%s" [%s];
] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da204344eb0>, <ast.Call object at 0x7da204344340>]]]]]
for taget[name[base_name]] in starred[name[bases]] begin[:]
call[name[res].append, parameter[binary_operation[constant[ "%s" -> "%s" [%s];
] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da204345c00>, <ast.Name object at 0x7da204347520>, <ast.Call object at 0x7da204346ce0>]]]]]
if name[self].cluster_modules begin[:]
for taget[name[subgraph_name]] in starred[name[subgraphs]] begin[:]
call[name[res].append, parameter[binary_operation[constant[subgraph cluster_%s {
] <ast.Mod object at 0x7da2590d6920> call[name[subgraph_name].replace, parameter[constant[.], constant[_]]]]]]
call[name[res].append, parameter[binary_operation[constant[ label="%s";
] <ast.Mod object at 0x7da2590d6920> name[subgraph_name]]]]
call[name[res].append, parameter[binary_operation[binary_operation[constant[ graph[] + call[name[self]._format_node_attrs, parameter[name[c_attrs]]]] + constant[];
]]]]
call[name[res].append, parameter[binary_operation[binary_operation[constant[ ] + call[constant[; ].join, parameter[call[name[subgraphs]][name[subgraph_name]]]]] + constant[
]]]]
call[name[res].append, parameter[constant[}
]]]
call[name[res].append, parameter[constant[}
]]]
return[call[constant[].join, parameter[name[res]]]] | keyword[def] identifier[generate_dot] ( identifier[self] , identifier[name] , identifier[urls] ={}, identifier[env] = keyword[None] ,
identifier[graph_attrs] ={}, identifier[node_attrs] ={}, identifier[edge_attrs] ={}):
literal[string]
identifier[g_attrs] = identifier[self] . identifier[default_graph_attrs] . identifier[copy] ()
identifier[n_attrs] = identifier[self] . identifier[default_node_attrs] . identifier[copy] ()
identifier[e_attrs] = identifier[self] . identifier[default_edge_attrs] . identifier[copy] ()
identifier[c_attrs] = identifier[self] . identifier[default_cluster_attrs] . identifier[copy] ()
identifier[g_attrs] . identifier[update] ( identifier[graph_attrs] )
identifier[n_attrs] . identifier[update] ( identifier[node_attrs] )
identifier[e_attrs] . identifier[update] ( identifier[edge_attrs] )
keyword[if] identifier[env] :
identifier[g_attrs] . identifier[update] ( identifier[env] . identifier[config] . identifier[inheritance_graph_attrs] )
identifier[n_attrs] . identifier[update] ( identifier[env] . identifier[config] . identifier[inheritance_node_attrs] )
identifier[e_attrs] . identifier[update] ( identifier[env] . identifier[config] . identifier[inheritance_edge_attrs] )
identifier[c_attrs] . identifier[update] ( identifier[env] . identifier[config] . identifier[inheritance_cluster_attrs] )
identifier[res] =[]
identifier[res] . identifier[append] ( literal[string] % identifier[name] )
identifier[res] . identifier[append] ( identifier[self] . identifier[_format_graph_attrs] ( identifier[g_attrs] ))
identifier[subgraphs] = identifier[defaultdict] ( identifier[list] )
keyword[for] identifier[name] , identifier[fullname] , identifier[bases] , identifier[tooltip] keyword[in] identifier[sorted] ( identifier[self] . identifier[class_info] ):
identifier[subgraph_name] = literal[string] . identifier[join] ( identifier[fullname] . identifier[split] ( literal[string] )[:- literal[int] ])
identifier[subgraphs] [ identifier[subgraph_name] ]. identifier[append] ( identifier[name] )
identifier[this_node_attrs] = identifier[n_attrs] . identifier[copy] ()
keyword[if] identifier[fullname] keyword[in] identifier[urls] :
identifier[this_node_attrs] [ literal[string] ]= literal[string] % identifier[urls] [ identifier[fullname] ]
identifier[this_node_attrs] [ literal[string] ]= literal[string]
keyword[if] identifier[tooltip] :
identifier[this_node_attrs] [ literal[string] ]= identifier[tooltip]
identifier[res] . identifier[append] ( literal[string] %
( identifier[name] , identifier[self] . identifier[_format_node_attrs] ( identifier[this_node_attrs] )))
keyword[for] identifier[base_name] keyword[in] identifier[bases] :
identifier[res] . identifier[append] ( literal[string] %
( identifier[base_name] , identifier[name] ,
identifier[self] . identifier[_format_node_attrs] ( identifier[e_attrs] )))
keyword[if] identifier[self] . identifier[cluster_modules] :
keyword[for] identifier[subgraph_name] keyword[in] identifier[subgraphs] :
identifier[res] . identifier[append] ( literal[string]
% identifier[subgraph_name] . identifier[replace] ( literal[string] , literal[string] ))
identifier[res] . identifier[append] ( literal[string] % identifier[subgraph_name] )
identifier[res] . identifier[append] ( literal[string] + identifier[self] . identifier[_format_node_attrs] ( identifier[c_attrs] )+
literal[string] )
identifier[res] . identifier[append] ( literal[string] + literal[string] . identifier[join] ( identifier[subgraphs] [ identifier[subgraph_name] ])+ literal[string] )
identifier[res] . identifier[append] ( literal[string] )
identifier[res] . identifier[append] ( literal[string] )
keyword[return] literal[string] . identifier[join] ( identifier[res] ) | def generate_dot(self, name, urls={}, env=None, graph_attrs={}, node_attrs={}, edge_attrs={}):
# type: (unicode, Dict, BuildEnvironment, Dict, Dict, Dict) -> unicode
'Generate a graphviz dot graph from the classes that were passed in\n to __init__.\n\n *name* is the name of the graph.\n\n *urls* is a dictionary mapping class names to HTTP URLs.\n\n *graph_attrs*, *node_attrs*, *edge_attrs* are dictionaries containing\n key/value pairs to pass on as graphviz properties.\n '
g_attrs = self.default_graph_attrs.copy()
n_attrs = self.default_node_attrs.copy()
e_attrs = self.default_edge_attrs.copy()
c_attrs = self.default_cluster_attrs.copy()
g_attrs.update(graph_attrs)
n_attrs.update(node_attrs)
e_attrs.update(edge_attrs)
if env:
g_attrs.update(env.config.inheritance_graph_attrs)
n_attrs.update(env.config.inheritance_node_attrs)
e_attrs.update(env.config.inheritance_edge_attrs)
c_attrs.update(env.config.inheritance_cluster_attrs) # depends on [control=['if'], data=[]]
res = [] # type: List[unicode]
res.append('digraph %s {\n' % name)
res.append(self._format_graph_attrs(g_attrs))
subgraphs = defaultdict(list) # subgraph_name => list of node names
for (name, fullname, bases, tooltip) in sorted(self.class_info):
subgraph_name = '.'.join(fullname.split('.')[:-1])
subgraphs[subgraph_name].append(name)
# Write the node
this_node_attrs = n_attrs.copy()
if fullname in urls:
this_node_attrs['URL'] = '"%s"' % urls[fullname]
this_node_attrs['target'] = '"_top"' # depends on [control=['if'], data=['fullname', 'urls']]
if tooltip:
this_node_attrs['tooltip'] = tooltip # depends on [control=['if'], data=[]]
res.append(' "%s" [%s];\n' % (name, self._format_node_attrs(this_node_attrs)))
# Write the edges
for base_name in bases:
res.append(' "%s" -> "%s" [%s];\n' % (base_name, name, self._format_node_attrs(e_attrs))) # depends on [control=['for'], data=['base_name']] # depends on [control=['for'], data=[]]
if self.cluster_modules:
for subgraph_name in subgraphs:
res.append('subgraph cluster_%s {\n' % subgraph_name.replace('.', '_'))
res.append(' label="%s";\n' % subgraph_name)
res.append(' graph[' + self._format_node_attrs(c_attrs) + '];\n')
res.append(' ' + '; '.join(subgraphs[subgraph_name]) + '\n')
res.append('}\n') # depends on [control=['for'], data=['subgraph_name']] # depends on [control=['if'], data=[]]
res.append('}\n')
return ''.join(res) |
def run(self, *args):
"""Merge unique identities using a matching algorithm."""
params = self.parser.parse_args(args)
code = self.unify(params.matching, params.sources,
params.fast_matching, params.no_strict,
params.interactive, params.recovery)
return code | def function[run, parameter[self]]:
constant[Merge unique identities using a matching algorithm.]
variable[params] assign[=] call[name[self].parser.parse_args, parameter[name[args]]]
variable[code] assign[=] call[name[self].unify, parameter[name[params].matching, name[params].sources, name[params].fast_matching, name[params].no_strict, name[params].interactive, name[params].recovery]]
return[name[code]] | keyword[def] identifier[run] ( identifier[self] ,* identifier[args] ):
literal[string]
identifier[params] = identifier[self] . identifier[parser] . identifier[parse_args] ( identifier[args] )
identifier[code] = identifier[self] . identifier[unify] ( identifier[params] . identifier[matching] , identifier[params] . identifier[sources] ,
identifier[params] . identifier[fast_matching] , identifier[params] . identifier[no_strict] ,
identifier[params] . identifier[interactive] , identifier[params] . identifier[recovery] )
keyword[return] identifier[code] | def run(self, *args):
"""Merge unique identities using a matching algorithm."""
params = self.parser.parse_args(args)
code = self.unify(params.matching, params.sources, params.fast_matching, params.no_strict, params.interactive, params.recovery)
return code |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.