code stringlengths 75 104k | code_sememe stringlengths 47 309k | token_type stringlengths 215 214k | code_dependency stringlengths 75 155k |
|---|---|---|---|
def send_video_note(self, chat_id, data, duration=None, length=None, reply_to_message_id=None, reply_markup=None,
disable_notification=None, timeout=None):
"""
Use this method to send video files, Telegram clients support mp4 videos.
:param chat_id: Integer : Unique identifier for the message recipient — User or GroupChat id
:param data: InputFile or String : Video note to send. You can either pass a file_id as String to resend a video that is already on the Telegram server
:param duration: Integer : Duration of sent video in seconds
:param length: Integer : Video width and height, Can't be None and should be in range of (0, 640)
:param reply_to_message_id:
:param reply_markup:
:return:
"""
return types.Message.de_json(
apihelper.send_video_note(self.token, chat_id, data, duration, length, reply_to_message_id, reply_markup,
disable_notification, timeout)) | def function[send_video_note, parameter[self, chat_id, data, duration, length, reply_to_message_id, reply_markup, disable_notification, timeout]]:
constant[
Use this method to send video files, Telegram clients support mp4 videos.
:param chat_id: Integer : Unique identifier for the message recipient — User or GroupChat id
:param data: InputFile or String : Video note to send. You can either pass a file_id as String to resend a video that is already on the Telegram server
:param duration: Integer : Duration of sent video in seconds
:param length: Integer : Video width and height, Can't be None and should be in range of (0, 640)
:param reply_to_message_id:
:param reply_markup:
:return:
]
return[call[name[types].Message.de_json, parameter[call[name[apihelper].send_video_note, parameter[name[self].token, name[chat_id], name[data], name[duration], name[length], name[reply_to_message_id], name[reply_markup], name[disable_notification], name[timeout]]]]]] | keyword[def] identifier[send_video_note] ( identifier[self] , identifier[chat_id] , identifier[data] , identifier[duration] = keyword[None] , identifier[length] = keyword[None] , identifier[reply_to_message_id] = keyword[None] , identifier[reply_markup] = keyword[None] ,
identifier[disable_notification] = keyword[None] , identifier[timeout] = keyword[None] ):
literal[string]
keyword[return] identifier[types] . identifier[Message] . identifier[de_json] (
identifier[apihelper] . identifier[send_video_note] ( identifier[self] . identifier[token] , identifier[chat_id] , identifier[data] , identifier[duration] , identifier[length] , identifier[reply_to_message_id] , identifier[reply_markup] ,
identifier[disable_notification] , identifier[timeout] )) | def send_video_note(self, chat_id, data, duration=None, length=None, reply_to_message_id=None, reply_markup=None, disable_notification=None, timeout=None):
"""
Use this method to send video files, Telegram clients support mp4 videos.
:param chat_id: Integer : Unique identifier for the message recipient — User or GroupChat id
:param data: InputFile or String : Video note to send. You can either pass a file_id as String to resend a video that is already on the Telegram server
:param duration: Integer : Duration of sent video in seconds
:param length: Integer : Video width and height, Can't be None and should be in range of (0, 640)
:param reply_to_message_id:
:param reply_markup:
:return:
"""
return types.Message.de_json(apihelper.send_video_note(self.token, chat_id, data, duration, length, reply_to_message_id, reply_markup, disable_notification, timeout)) |
def dsa_sign(private_key, data, hash_algorithm):
"""
Generates a DSA signature
:param private_key:
The PrivateKey to generate the signature with
:param data:
A byte string of the data the signature is for
:param hash_algorithm:
A unicode string of "md5", "sha1", "sha256", "sha384" or "sha512"
:raises:
ValueError - when any of the parameters contain an invalid value
TypeError - when any of the parameters are of the wrong type
OSError - when an error is returned by the OS crypto library
:return:
A byte string of the signature
"""
if private_key.algorithm != 'dsa':
raise ValueError('The key specified is not a DSA private key')
return _sign(private_key, data, hash_algorithm) | def function[dsa_sign, parameter[private_key, data, hash_algorithm]]:
constant[
Generates a DSA signature
:param private_key:
The PrivateKey to generate the signature with
:param data:
A byte string of the data the signature is for
:param hash_algorithm:
A unicode string of "md5", "sha1", "sha256", "sha384" or "sha512"
:raises:
ValueError - when any of the parameters contain an invalid value
TypeError - when any of the parameters are of the wrong type
OSError - when an error is returned by the OS crypto library
:return:
A byte string of the signature
]
if compare[name[private_key].algorithm not_equal[!=] constant[dsa]] begin[:]
<ast.Raise object at 0x7da1b00b0370>
return[call[name[_sign], parameter[name[private_key], name[data], name[hash_algorithm]]]] | keyword[def] identifier[dsa_sign] ( identifier[private_key] , identifier[data] , identifier[hash_algorithm] ):
literal[string]
keyword[if] identifier[private_key] . identifier[algorithm] != literal[string] :
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[return] identifier[_sign] ( identifier[private_key] , identifier[data] , identifier[hash_algorithm] ) | def dsa_sign(private_key, data, hash_algorithm):
"""
Generates a DSA signature
:param private_key:
The PrivateKey to generate the signature with
:param data:
A byte string of the data the signature is for
:param hash_algorithm:
A unicode string of "md5", "sha1", "sha256", "sha384" or "sha512"
:raises:
ValueError - when any of the parameters contain an invalid value
TypeError - when any of the parameters are of the wrong type
OSError - when an error is returned by the OS crypto library
:return:
A byte string of the signature
"""
if private_key.algorithm != 'dsa':
raise ValueError('The key specified is not a DSA private key') # depends on [control=['if'], data=[]]
return _sign(private_key, data, hash_algorithm) |
async def reset_all(self):
"""
Reset states in DB
"""
async with self.connection() as conn:
await r.table(self._table).delete().run(conn) | <ast.AsyncFunctionDef object at 0x7da1b184a170> | keyword[async] keyword[def] identifier[reset_all] ( identifier[self] ):
literal[string]
keyword[async] keyword[with] identifier[self] . identifier[connection] () keyword[as] identifier[conn] :
keyword[await] identifier[r] . identifier[table] ( identifier[self] . identifier[_table] ). identifier[delete] (). identifier[run] ( identifier[conn] ) | async def reset_all(self):
"""
Reset states in DB
"""
async with self.connection() as conn:
await r.table(self._table).delete().run(conn) |
def list_service_profiles(self, retrieve_all=True, **_params):
"""Fetches a list of all Neutron service flavor profiles."""
return self.list('service_profiles', self.service_profiles_path,
retrieve_all, **_params) | def function[list_service_profiles, parameter[self, retrieve_all]]:
constant[Fetches a list of all Neutron service flavor profiles.]
return[call[name[self].list, parameter[constant[service_profiles], name[self].service_profiles_path, name[retrieve_all]]]] | keyword[def] identifier[list_service_profiles] ( identifier[self] , identifier[retrieve_all] = keyword[True] ,** identifier[_params] ):
literal[string]
keyword[return] identifier[self] . identifier[list] ( literal[string] , identifier[self] . identifier[service_profiles_path] ,
identifier[retrieve_all] ,** identifier[_params] ) | def list_service_profiles(self, retrieve_all=True, **_params):
"""Fetches a list of all Neutron service flavor profiles."""
return self.list('service_profiles', self.service_profiles_path, retrieve_all, **_params) |
def has_gpg_key(fingerprint):
"""Checks to see if we have this gpg fingerprint"""
if len(fingerprint) > 8:
fingerprint = fingerprint[-8:]
fingerprint = fingerprint.upper()
cmd = flatten([gnupg_bin(), gnupg_home(), "--list-public-keys"])
lines = stderr_output(cmd).split('\n')
return len([key for key in lines if key.find(fingerprint) > -1]) == 1 | def function[has_gpg_key, parameter[fingerprint]]:
constant[Checks to see if we have this gpg fingerprint]
if compare[call[name[len], parameter[name[fingerprint]]] greater[>] constant[8]] begin[:]
variable[fingerprint] assign[=] call[name[fingerprint]][<ast.Slice object at 0x7da1b190dde0>]
variable[fingerprint] assign[=] call[name[fingerprint].upper, parameter[]]
variable[cmd] assign[=] call[name[flatten], parameter[list[[<ast.Call object at 0x7da1b190eef0>, <ast.Call object at 0x7da1b190e4a0>, <ast.Constant object at 0x7da1b190dc00>]]]]
variable[lines] assign[=] call[call[name[stderr_output], parameter[name[cmd]]].split, parameter[constant[
]]]
return[compare[call[name[len], parameter[<ast.ListComp object at 0x7da1b190f5b0>]] equal[==] constant[1]]] | keyword[def] identifier[has_gpg_key] ( identifier[fingerprint] ):
literal[string]
keyword[if] identifier[len] ( identifier[fingerprint] )> literal[int] :
identifier[fingerprint] = identifier[fingerprint] [- literal[int] :]
identifier[fingerprint] = identifier[fingerprint] . identifier[upper] ()
identifier[cmd] = identifier[flatten] ([ identifier[gnupg_bin] (), identifier[gnupg_home] (), literal[string] ])
identifier[lines] = identifier[stderr_output] ( identifier[cmd] ). identifier[split] ( literal[string] )
keyword[return] identifier[len] ([ identifier[key] keyword[for] identifier[key] keyword[in] identifier[lines] keyword[if] identifier[key] . identifier[find] ( identifier[fingerprint] )>- literal[int] ])== literal[int] | def has_gpg_key(fingerprint):
"""Checks to see if we have this gpg fingerprint"""
if len(fingerprint) > 8:
fingerprint = fingerprint[-8:] # depends on [control=['if'], data=[]]
fingerprint = fingerprint.upper()
cmd = flatten([gnupg_bin(), gnupg_home(), '--list-public-keys'])
lines = stderr_output(cmd).split('\n')
return len([key for key in lines if key.find(fingerprint) > -1]) == 1 |
def cwtmorlet(points, width):
"""complex morlet wavelet function compatible with scipy.signal.cwt
Parameters: points: int
Number of points in `vector`.
width: scalar
Width parameter of wavelet.
Equals (sample rate / fundamental frequency of wavelet)
Returns: `vector`: complex-valued ndarray of shape (points,)
"""
omega = 5.0
s = points / (2.0 * omega * width)
return wavelets.morlet(points, omega, s, complete=True) | def function[cwtmorlet, parameter[points, width]]:
constant[complex morlet wavelet function compatible with scipy.signal.cwt
Parameters: points: int
Number of points in `vector`.
width: scalar
Width parameter of wavelet.
Equals (sample rate / fundamental frequency of wavelet)
Returns: `vector`: complex-valued ndarray of shape (points,)
]
variable[omega] assign[=] constant[5.0]
variable[s] assign[=] binary_operation[name[points] / binary_operation[binary_operation[constant[2.0] * name[omega]] * name[width]]]
return[call[name[wavelets].morlet, parameter[name[points], name[omega], name[s]]]] | keyword[def] identifier[cwtmorlet] ( identifier[points] , identifier[width] ):
literal[string]
identifier[omega] = literal[int]
identifier[s] = identifier[points] /( literal[int] * identifier[omega] * identifier[width] )
keyword[return] identifier[wavelets] . identifier[morlet] ( identifier[points] , identifier[omega] , identifier[s] , identifier[complete] = keyword[True] ) | def cwtmorlet(points, width):
"""complex morlet wavelet function compatible with scipy.signal.cwt
Parameters: points: int
Number of points in `vector`.
width: scalar
Width parameter of wavelet.
Equals (sample rate / fundamental frequency of wavelet)
Returns: `vector`: complex-valued ndarray of shape (points,)
"""
omega = 5.0
s = points / (2.0 * omega * width)
return wavelets.morlet(points, omega, s, complete=True) |
def _cleanPictures(self):
"""
Delete unused images
"""
# Project have been deleted
if not os.path.exists(self.path):
return
try:
pictures = set(os.listdir(self.pictures_directory))
for drawing in self._drawings.values():
try:
pictures.remove(drawing.ressource_filename)
except KeyError:
pass
for pict in pictures:
os.remove(os.path.join(self.pictures_directory, pict))
except OSError as e:
log.warning(str(e)) | def function[_cleanPictures, parameter[self]]:
constant[
Delete unused images
]
if <ast.UnaryOp object at 0x7da1b17d6a40> begin[:]
return[None]
<ast.Try object at 0x7da1b17d43d0> | keyword[def] identifier[_cleanPictures] ( identifier[self] ):
literal[string]
keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[exists] ( identifier[self] . identifier[path] ):
keyword[return]
keyword[try] :
identifier[pictures] = identifier[set] ( identifier[os] . identifier[listdir] ( identifier[self] . identifier[pictures_directory] ))
keyword[for] identifier[drawing] keyword[in] identifier[self] . identifier[_drawings] . identifier[values] ():
keyword[try] :
identifier[pictures] . identifier[remove] ( identifier[drawing] . identifier[ressource_filename] )
keyword[except] identifier[KeyError] :
keyword[pass]
keyword[for] identifier[pict] keyword[in] identifier[pictures] :
identifier[os] . identifier[remove] ( identifier[os] . identifier[path] . identifier[join] ( identifier[self] . identifier[pictures_directory] , identifier[pict] ))
keyword[except] identifier[OSError] keyword[as] identifier[e] :
identifier[log] . identifier[warning] ( identifier[str] ( identifier[e] )) | def _cleanPictures(self):
"""
Delete unused images
"""
# Project have been deleted
if not os.path.exists(self.path):
return # depends on [control=['if'], data=[]]
try:
pictures = set(os.listdir(self.pictures_directory))
for drawing in self._drawings.values():
try:
pictures.remove(drawing.ressource_filename) # depends on [control=['try'], data=[]]
except KeyError:
pass # depends on [control=['except'], data=[]] # depends on [control=['for'], data=['drawing']]
for pict in pictures:
os.remove(os.path.join(self.pictures_directory, pict)) # depends on [control=['for'], data=['pict']] # depends on [control=['try'], data=[]]
except OSError as e:
log.warning(str(e)) # depends on [control=['except'], data=['e']] |
def hlog(x, b=500, r=_display_max, d=_l_mmax):
"""
Base 10 hyperlog transform.
Parameters
----------
x : num | num iterable
values to be transformed.
b : num
Parameter controling the location of the shift
from linear to log transformation.
r : num (default = 10**4)
maximal transformed value.
d : num (default = log10(2**18))
log10 of maximal possible measured value.
hlog_inv(r) = 10**d
Returns
-------
Array of transformed values.
"""
hlog_fun = _make_hlog_numeric(b, r, d)
if not hasattr(x, '__len__'): # if transforming a single number
y = hlog_fun(x)
else:
n = len(x)
if not n: # if transforming empty container
return x
else:
y = hlog_fun(x)
return y | def function[hlog, parameter[x, b, r, d]]:
constant[
Base 10 hyperlog transform.
Parameters
----------
x : num | num iterable
values to be transformed.
b : num
Parameter controling the location of the shift
from linear to log transformation.
r : num (default = 10**4)
maximal transformed value.
d : num (default = log10(2**18))
log10 of maximal possible measured value.
hlog_inv(r) = 10**d
Returns
-------
Array of transformed values.
]
variable[hlog_fun] assign[=] call[name[_make_hlog_numeric], parameter[name[b], name[r], name[d]]]
if <ast.UnaryOp object at 0x7da20e963cd0> begin[:]
variable[y] assign[=] call[name[hlog_fun], parameter[name[x]]]
return[name[y]] | keyword[def] identifier[hlog] ( identifier[x] , identifier[b] = literal[int] , identifier[r] = identifier[_display_max] , identifier[d] = identifier[_l_mmax] ):
literal[string]
identifier[hlog_fun] = identifier[_make_hlog_numeric] ( identifier[b] , identifier[r] , identifier[d] )
keyword[if] keyword[not] identifier[hasattr] ( identifier[x] , literal[string] ):
identifier[y] = identifier[hlog_fun] ( identifier[x] )
keyword[else] :
identifier[n] = identifier[len] ( identifier[x] )
keyword[if] keyword[not] identifier[n] :
keyword[return] identifier[x]
keyword[else] :
identifier[y] = identifier[hlog_fun] ( identifier[x] )
keyword[return] identifier[y] | def hlog(x, b=500, r=_display_max, d=_l_mmax):
"""
Base 10 hyperlog transform.
Parameters
----------
x : num | num iterable
values to be transformed.
b : num
Parameter controling the location of the shift
from linear to log transformation.
r : num (default = 10**4)
maximal transformed value.
d : num (default = log10(2**18))
log10 of maximal possible measured value.
hlog_inv(r) = 10**d
Returns
-------
Array of transformed values.
"""
hlog_fun = _make_hlog_numeric(b, r, d)
if not hasattr(x, '__len__'): # if transforming a single number
y = hlog_fun(x) # depends on [control=['if'], data=[]]
else:
n = len(x)
if not n: # if transforming empty container
return x # depends on [control=['if'], data=[]]
else:
y = hlog_fun(x)
return y |
def display(self):
"""
Displays the Layer instance to the screen.
"""
if self.displayWidth == 0: return
print("=============================")
print("Layer '%s': (Kind: %s, Size: %d, Active: %d, Frozen: %d)" % (
self.name, self.kind, self.size, self.active, self.frozen))
if (self.type == 'Output'):
displayArray('Target ', self.target, self.displayWidth)
displayArray('Activation', self.activation, self.displayWidth)
if (self.type != 'Input' and self._verbosity > 1):
displayArray('Error ', self.error, self.displayWidth)
if (self._verbosity > 4 and self.type != 'Input'):
print(" ", end=" "); displayArray('weight', self.weight)
print(" ", end=" "); displayArray('dweight', self.dweight)
print(" ", end=" "); displayArray('delta', self.delta)
print(" ", end=" "); displayArray('netinput', self.netinput)
print(" ", end=" "); displayArray('wed', self.wed) | def function[display, parameter[self]]:
constant[
Displays the Layer instance to the screen.
]
if compare[name[self].displayWidth equal[==] constant[0]] begin[:]
return[None]
call[name[print], parameter[constant[=============================]]]
call[name[print], parameter[binary_operation[constant[Layer '%s': (Kind: %s, Size: %d, Active: %d, Frozen: %d)] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Attribute object at 0x7da1b033d840>, <ast.Attribute object at 0x7da1b033c8e0>, <ast.Attribute object at 0x7da1b033d540>, <ast.Attribute object at 0x7da1b033f490>, <ast.Attribute object at 0x7da1b033c5b0>]]]]]
if compare[name[self].type equal[==] constant[Output]] begin[:]
call[name[displayArray], parameter[constant[Target ], name[self].target, name[self].displayWidth]]
call[name[displayArray], parameter[constant[Activation], name[self].activation, name[self].displayWidth]]
if <ast.BoolOp object at 0x7da1b033d2a0> begin[:]
call[name[displayArray], parameter[constant[Error ], name[self].error, name[self].displayWidth]]
if <ast.BoolOp object at 0x7da1b033f6d0> begin[:]
call[name[print], parameter[constant[ ]]]
call[name[displayArray], parameter[constant[weight], name[self].weight]]
call[name[print], parameter[constant[ ]]]
call[name[displayArray], parameter[constant[dweight], name[self].dweight]]
call[name[print], parameter[constant[ ]]]
call[name[displayArray], parameter[constant[delta], name[self].delta]]
call[name[print], parameter[constant[ ]]]
call[name[displayArray], parameter[constant[netinput], name[self].netinput]]
call[name[print], parameter[constant[ ]]]
call[name[displayArray], parameter[constant[wed], name[self].wed]] | keyword[def] identifier[display] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[displayWidth] == literal[int] : keyword[return]
identifier[print] ( literal[string] )
identifier[print] ( literal[string] %(
identifier[self] . identifier[name] , identifier[self] . identifier[kind] , identifier[self] . identifier[size] , identifier[self] . identifier[active] , identifier[self] . identifier[frozen] ))
keyword[if] ( identifier[self] . identifier[type] == literal[string] ):
identifier[displayArray] ( literal[string] , identifier[self] . identifier[target] , identifier[self] . identifier[displayWidth] )
identifier[displayArray] ( literal[string] , identifier[self] . identifier[activation] , identifier[self] . identifier[displayWidth] )
keyword[if] ( identifier[self] . identifier[type] != literal[string] keyword[and] identifier[self] . identifier[_verbosity] > literal[int] ):
identifier[displayArray] ( literal[string] , identifier[self] . identifier[error] , identifier[self] . identifier[displayWidth] )
keyword[if] ( identifier[self] . identifier[_verbosity] > literal[int] keyword[and] identifier[self] . identifier[type] != literal[string] ):
identifier[print] ( literal[string] , identifier[end] = literal[string] ); identifier[displayArray] ( literal[string] , identifier[self] . identifier[weight] )
identifier[print] ( literal[string] , identifier[end] = literal[string] ); identifier[displayArray] ( literal[string] , identifier[self] . identifier[dweight] )
identifier[print] ( literal[string] , identifier[end] = literal[string] ); identifier[displayArray] ( literal[string] , identifier[self] . identifier[delta] )
identifier[print] ( literal[string] , identifier[end] = literal[string] ); identifier[displayArray] ( literal[string] , identifier[self] . identifier[netinput] )
identifier[print] ( literal[string] , identifier[end] = literal[string] ); identifier[displayArray] ( literal[string] , identifier[self] . identifier[wed] ) | def display(self):
"""
Displays the Layer instance to the screen.
"""
if self.displayWidth == 0:
return # depends on [control=['if'], data=[]]
print('=============================')
print("Layer '%s': (Kind: %s, Size: %d, Active: %d, Frozen: %d)" % (self.name, self.kind, self.size, self.active, self.frozen))
if self.type == 'Output':
displayArray('Target ', self.target, self.displayWidth) # depends on [control=['if'], data=[]]
displayArray('Activation', self.activation, self.displayWidth)
if self.type != 'Input' and self._verbosity > 1:
displayArray('Error ', self.error, self.displayWidth) # depends on [control=['if'], data=[]]
if self._verbosity > 4 and self.type != 'Input':
print(' ', end=' ')
displayArray('weight', self.weight)
print(' ', end=' ')
displayArray('dweight', self.dweight)
print(' ', end=' ')
displayArray('delta', self.delta)
print(' ', end=' ')
displayArray('netinput', self.netinput)
print(' ', end=' ')
displayArray('wed', self.wed) # depends on [control=['if'], data=[]] |
def init(fname=None, format=None):
"""This is a "factory" procedure that creates a new canvas.T object.
Both parameters, <fname> and
<format>, are optional. Parameter <fname> specifies either the output
file name or a file object. Parameter <format>, if specified, defines the
file's format. Its value must be one of "ps", "pdf", "svg", "x11", or
"png".
When <fname> is omitted or is None, the output is sent to standard
output. When <format> is omitted, it is guessed from the <fname>'s
suffix; failing that, "ps" is selected."""
fname = fname or theme.output_file
format = format or theme.output_format
if format == None:
if not isinstance(fname, str):
format = "ps"
elif re.search("pdf$", fname):
format = "pdf"
elif re.search("png$", fname):
format = "png"
elif re.search("svg$", fname):
format = "svg"
else:
format = "ps"
if format == "ps":
can = pscanvas.T(fname)
elif format == "png":
can = pngcanvas.T(fname)
elif format == "x11":
can = x11canvas.T(fname)
elif format == "svg":
can = svgcanvas.T(fname)
elif format == "pdf-uncompressed":
can = pdfcanvas.T(fname, False)
else:
can = pdfcanvas.T(fname, theme.compress_output)
return can | def function[init, parameter[fname, format]]:
constant[This is a "factory" procedure that creates a new canvas.T object.
Both parameters, <fname> and
<format>, are optional. Parameter <fname> specifies either the output
file name or a file object. Parameter <format>, if specified, defines the
file's format. Its value must be one of "ps", "pdf", "svg", "x11", or
"png".
When <fname> is omitted or is None, the output is sent to standard
output. When <format> is omitted, it is guessed from the <fname>'s
suffix; failing that, "ps" is selected.]
variable[fname] assign[=] <ast.BoolOp object at 0x7da1b0ae01c0>
variable[format] assign[=] <ast.BoolOp object at 0x7da1b0ae2cb0>
if compare[name[format] equal[==] constant[None]] begin[:]
if <ast.UnaryOp object at 0x7da1b0ae1060> begin[:]
variable[format] assign[=] constant[ps]
if compare[name[format] equal[==] constant[ps]] begin[:]
variable[can] assign[=] call[name[pscanvas].T, parameter[name[fname]]]
return[name[can]] | keyword[def] identifier[init] ( identifier[fname] = keyword[None] , identifier[format] = keyword[None] ):
literal[string]
identifier[fname] = identifier[fname] keyword[or] identifier[theme] . identifier[output_file]
identifier[format] = identifier[format] keyword[or] identifier[theme] . identifier[output_format]
keyword[if] identifier[format] == keyword[None] :
keyword[if] keyword[not] identifier[isinstance] ( identifier[fname] , identifier[str] ):
identifier[format] = literal[string]
keyword[elif] identifier[re] . identifier[search] ( literal[string] , identifier[fname] ):
identifier[format] = literal[string]
keyword[elif] identifier[re] . identifier[search] ( literal[string] , identifier[fname] ):
identifier[format] = literal[string]
keyword[elif] identifier[re] . identifier[search] ( literal[string] , identifier[fname] ):
identifier[format] = literal[string]
keyword[else] :
identifier[format] = literal[string]
keyword[if] identifier[format] == literal[string] :
identifier[can] = identifier[pscanvas] . identifier[T] ( identifier[fname] )
keyword[elif] identifier[format] == literal[string] :
identifier[can] = identifier[pngcanvas] . identifier[T] ( identifier[fname] )
keyword[elif] identifier[format] == literal[string] :
identifier[can] = identifier[x11canvas] . identifier[T] ( identifier[fname] )
keyword[elif] identifier[format] == literal[string] :
identifier[can] = identifier[svgcanvas] . identifier[T] ( identifier[fname] )
keyword[elif] identifier[format] == literal[string] :
identifier[can] = identifier[pdfcanvas] . identifier[T] ( identifier[fname] , keyword[False] )
keyword[else] :
identifier[can] = identifier[pdfcanvas] . identifier[T] ( identifier[fname] , identifier[theme] . identifier[compress_output] )
keyword[return] identifier[can] | def init(fname=None, format=None):
"""This is a "factory" procedure that creates a new canvas.T object.
Both parameters, <fname> and
<format>, are optional. Parameter <fname> specifies either the output
file name or a file object. Parameter <format>, if specified, defines the
file's format. Its value must be one of "ps", "pdf", "svg", "x11", or
"png".
When <fname> is omitted or is None, the output is sent to standard
output. When <format> is omitted, it is guessed from the <fname>'s
suffix; failing that, "ps" is selected."""
fname = fname or theme.output_file
format = format or theme.output_format
if format == None:
if not isinstance(fname, str):
format = 'ps' # depends on [control=['if'], data=[]]
elif re.search('pdf$', fname):
format = 'pdf' # depends on [control=['if'], data=[]]
elif re.search('png$', fname):
format = 'png' # depends on [control=['if'], data=[]]
elif re.search('svg$', fname):
format = 'svg' # depends on [control=['if'], data=[]]
else:
format = 'ps' # depends on [control=['if'], data=['format']]
if format == 'ps':
can = pscanvas.T(fname) # depends on [control=['if'], data=[]]
elif format == 'png':
can = pngcanvas.T(fname) # depends on [control=['if'], data=[]]
elif format == 'x11':
can = x11canvas.T(fname) # depends on [control=['if'], data=[]]
elif format == 'svg':
can = svgcanvas.T(fname) # depends on [control=['if'], data=[]]
elif format == 'pdf-uncompressed':
can = pdfcanvas.T(fname, False) # depends on [control=['if'], data=[]]
else:
can = pdfcanvas.T(fname, theme.compress_output)
return can |
def main():
"""
NAME
eqarea.py
DESCRIPTION
makes equal area projections from declination/inclination data
INPUT FORMAT
takes dec/inc as first two columns in space delimited file
SYNTAX
eqarea.py [options]
OPTIONS
-f FILE, specify file on command line
-sav save figure and quit
-fmt [svg,jpg,png,pdf] set figure format [default is svg]
-s SIZE specify symbol size - default is 20
-Lsym SHAPE COLOR specify shape and color for lower hemisphere
-Usym SHAPE COLOR specify shape and color for upper hemisphere
shapes: 's': square,'o': circle,'^,>,v,<': [up,right,down,left] triangle, 'd': diamond,
'p': pentagram, 'h': hexagon, '8': octagon, '+': plus, 'x': cross
colors: [b]lue,[g]reen,[r]ed,[c]yan,[m]agenta,[y]ellow,blac[k],[w]hite
"""
title = ""
files, fmt = {}, 'svg'
sym = {'lower': ['o', 'r'], 'upper': ['o', 'w']}
plot = 0
if '-h' in sys.argv: # check if help is needed
print(main.__doc__)
sys.exit() # graceful quit
if '-sav' in sys.argv:
plot = 1
if '-fmt' in sys.argv:
ind = sys.argv.index('-fmt')
fmt = sys.argv[ind + 1]
if '-s' in sys.argv:
ind = sys.argv.index('-s')
sym['size'] = int(sys.argv[ind + 1])
else:
sym['size'] = 20
if '-Lsym' in sys.argv:
ind = sys.argv.index('-Lsym')
sym['lower'][0] = sys.argv[ind + 1]
sym['lower'][1] = sys.argv[ind + 2]
if '-Usym' in sys.argv:
ind = sys.argv.index('-Usym')
sym['upper'][0] = sys.argv[ind + 1]
sym['upper'][1] = sys.argv[ind + 2]
if '-f' in sys.argv: # ask for filename
ind = sys.argv.index('-f')
fname = sys.argv[ind + 1]
else:
print(main.__doc__)
print(' \n -f option required')
sys.exit() # graceful quit
DI = numpy.loadtxt(fname)
EQ = {'eq': 1}
pmagplotlib.plot_init(EQ['eq'], 5, 5)
pmagplotlib.plot_eq_sym(EQ['eq'], DI, 'Equal Area Plot', sym) # make plot
if plot == 0:
pmagplotlib.draw_figs(EQ) # make it visible
for key in list(EQ.keys()):
files[key] = key + '.' + fmt
if pmagplotlib.isServer:
black = '#000000'
purple = '#800080'
titles = {}
titles['eq'] = 'Equal Area Plot'
EQ = pmagplotlib.add_borders(EQ, titles, black, purple)
pmagplotlib.save_plots(EQ, files)
elif plot == 1:
fname = os.path.split(fname)[1].split('.')[0]
files['eq'] = fname + '_eq.' + fmt
pmagplotlib.save_plots(EQ, files)
else:
ans = input(" S[a]ve to save plot, [q]uit without saving: ")
if ans == "a":
pmagplotlib.save_plots(EQ, files) | def function[main, parameter[]]:
constant[
NAME
eqarea.py
DESCRIPTION
makes equal area projections from declination/inclination data
INPUT FORMAT
takes dec/inc as first two columns in space delimited file
SYNTAX
eqarea.py [options]
OPTIONS
-f FILE, specify file on command line
-sav save figure and quit
-fmt [svg,jpg,png,pdf] set figure format [default is svg]
-s SIZE specify symbol size - default is 20
-Lsym SHAPE COLOR specify shape and color for lower hemisphere
-Usym SHAPE COLOR specify shape and color for upper hemisphere
shapes: 's': square,'o': circle,'^,>,v,<': [up,right,down,left] triangle, 'd': diamond,
'p': pentagram, 'h': hexagon, '8': octagon, '+': plus, 'x': cross
colors: [b]lue,[g]reen,[r]ed,[c]yan,[m]agenta,[y]ellow,blac[k],[w]hite
]
variable[title] assign[=] constant[]
<ast.Tuple object at 0x7da1b0499f90> assign[=] tuple[[<ast.Dict object at 0x7da1b0499f30>, <ast.Constant object at 0x7da1b0499ea0>]]
variable[sym] assign[=] dictionary[[<ast.Constant object at 0x7da1b049a050>, <ast.Constant object at 0x7da1b049a110>], [<ast.List object at 0x7da1b049a320>, <ast.List object at 0x7da1b049a860>]]
variable[plot] assign[=] constant[0]
if compare[constant[-h] in name[sys].argv] begin[:]
call[name[print], parameter[name[main].__doc__]]
call[name[sys].exit, parameter[]]
if compare[constant[-sav] in name[sys].argv] begin[:]
variable[plot] assign[=] constant[1]
if compare[constant[-fmt] in name[sys].argv] begin[:]
variable[ind] assign[=] call[name[sys].argv.index, parameter[constant[-fmt]]]
variable[fmt] assign[=] call[name[sys].argv][binary_operation[name[ind] + constant[1]]]
if compare[constant[-s] in name[sys].argv] begin[:]
variable[ind] assign[=] call[name[sys].argv.index, parameter[constant[-s]]]
call[name[sym]][constant[size]] assign[=] call[name[int], parameter[call[name[sys].argv][binary_operation[name[ind] + constant[1]]]]]
if compare[constant[-Lsym] in name[sys].argv] begin[:]
variable[ind] assign[=] call[name[sys].argv.index, parameter[constant[-Lsym]]]
call[call[name[sym]][constant[lower]]][constant[0]] assign[=] call[name[sys].argv][binary_operation[name[ind] + constant[1]]]
call[call[name[sym]][constant[lower]]][constant[1]] assign[=] call[name[sys].argv][binary_operation[name[ind] + constant[2]]]
if compare[constant[-Usym] in name[sys].argv] begin[:]
variable[ind] assign[=] call[name[sys].argv.index, parameter[constant[-Usym]]]
call[call[name[sym]][constant[upper]]][constant[0]] assign[=] call[name[sys].argv][binary_operation[name[ind] + constant[1]]]
call[call[name[sym]][constant[upper]]][constant[1]] assign[=] call[name[sys].argv][binary_operation[name[ind] + constant[2]]]
if compare[constant[-f] in name[sys].argv] begin[:]
variable[ind] assign[=] call[name[sys].argv.index, parameter[constant[-f]]]
variable[fname] assign[=] call[name[sys].argv][binary_operation[name[ind] + constant[1]]]
variable[DI] assign[=] call[name[numpy].loadtxt, parameter[name[fname]]]
variable[EQ] assign[=] dictionary[[<ast.Constant object at 0x7da1b049b640>], [<ast.Constant object at 0x7da1b0498460>]]
call[name[pmagplotlib].plot_init, parameter[call[name[EQ]][constant[eq]], constant[5], constant[5]]]
call[name[pmagplotlib].plot_eq_sym, parameter[call[name[EQ]][constant[eq]], name[DI], constant[Equal Area Plot], name[sym]]]
if compare[name[plot] equal[==] constant[0]] begin[:]
call[name[pmagplotlib].draw_figs, parameter[name[EQ]]]
for taget[name[key]] in starred[call[name[list], parameter[call[name[EQ].keys, parameter[]]]]] begin[:]
call[name[files]][name[key]] assign[=] binary_operation[binary_operation[name[key] + constant[.]] + name[fmt]]
if name[pmagplotlib].isServer begin[:]
variable[black] assign[=] constant[#000000]
variable[purple] assign[=] constant[#800080]
variable[titles] assign[=] dictionary[[], []]
call[name[titles]][constant[eq]] assign[=] constant[Equal Area Plot]
variable[EQ] assign[=] call[name[pmagplotlib].add_borders, parameter[name[EQ], name[titles], name[black], name[purple]]]
call[name[pmagplotlib].save_plots, parameter[name[EQ], name[files]]] | keyword[def] identifier[main] ():
literal[string]
identifier[title] = literal[string]
identifier[files] , identifier[fmt] ={}, literal[string]
identifier[sym] ={ literal[string] :[ literal[string] , literal[string] ], literal[string] :[ literal[string] , literal[string] ]}
identifier[plot] = literal[int]
keyword[if] literal[string] keyword[in] identifier[sys] . identifier[argv] :
identifier[print] ( identifier[main] . identifier[__doc__] )
identifier[sys] . identifier[exit] ()
keyword[if] literal[string] keyword[in] identifier[sys] . identifier[argv] :
identifier[plot] = literal[int]
keyword[if] literal[string] keyword[in] identifier[sys] . identifier[argv] :
identifier[ind] = identifier[sys] . identifier[argv] . identifier[index] ( literal[string] )
identifier[fmt] = identifier[sys] . identifier[argv] [ identifier[ind] + literal[int] ]
keyword[if] literal[string] keyword[in] identifier[sys] . identifier[argv] :
identifier[ind] = identifier[sys] . identifier[argv] . identifier[index] ( literal[string] )
identifier[sym] [ literal[string] ]= identifier[int] ( identifier[sys] . identifier[argv] [ identifier[ind] + literal[int] ])
keyword[else] :
identifier[sym] [ literal[string] ]= literal[int]
keyword[if] literal[string] keyword[in] identifier[sys] . identifier[argv] :
identifier[ind] = identifier[sys] . identifier[argv] . identifier[index] ( literal[string] )
identifier[sym] [ literal[string] ][ literal[int] ]= identifier[sys] . identifier[argv] [ identifier[ind] + literal[int] ]
identifier[sym] [ literal[string] ][ literal[int] ]= identifier[sys] . identifier[argv] [ identifier[ind] + literal[int] ]
keyword[if] literal[string] keyword[in] identifier[sys] . identifier[argv] :
identifier[ind] = identifier[sys] . identifier[argv] . identifier[index] ( literal[string] )
identifier[sym] [ literal[string] ][ literal[int] ]= identifier[sys] . identifier[argv] [ identifier[ind] + literal[int] ]
identifier[sym] [ literal[string] ][ literal[int] ]= identifier[sys] . identifier[argv] [ identifier[ind] + literal[int] ]
keyword[if] literal[string] keyword[in] identifier[sys] . identifier[argv] :
identifier[ind] = identifier[sys] . identifier[argv] . identifier[index] ( literal[string] )
identifier[fname] = identifier[sys] . identifier[argv] [ identifier[ind] + literal[int] ]
keyword[else] :
identifier[print] ( identifier[main] . identifier[__doc__] )
identifier[print] ( literal[string] )
identifier[sys] . identifier[exit] ()
identifier[DI] = identifier[numpy] . identifier[loadtxt] ( identifier[fname] )
identifier[EQ] ={ literal[string] : literal[int] }
identifier[pmagplotlib] . identifier[plot_init] ( identifier[EQ] [ literal[string] ], literal[int] , literal[int] )
identifier[pmagplotlib] . identifier[plot_eq_sym] ( identifier[EQ] [ literal[string] ], identifier[DI] , literal[string] , identifier[sym] )
keyword[if] identifier[plot] == literal[int] :
identifier[pmagplotlib] . identifier[draw_figs] ( identifier[EQ] )
keyword[for] identifier[key] keyword[in] identifier[list] ( identifier[EQ] . identifier[keys] ()):
identifier[files] [ identifier[key] ]= identifier[key] + literal[string] + identifier[fmt]
keyword[if] identifier[pmagplotlib] . identifier[isServer] :
identifier[black] = literal[string]
identifier[purple] = literal[string]
identifier[titles] ={}
identifier[titles] [ literal[string] ]= literal[string]
identifier[EQ] = identifier[pmagplotlib] . identifier[add_borders] ( identifier[EQ] , identifier[titles] , identifier[black] , identifier[purple] )
identifier[pmagplotlib] . identifier[save_plots] ( identifier[EQ] , identifier[files] )
keyword[elif] identifier[plot] == literal[int] :
identifier[fname] = identifier[os] . identifier[path] . identifier[split] ( identifier[fname] )[ literal[int] ]. identifier[split] ( literal[string] )[ literal[int] ]
identifier[files] [ literal[string] ]= identifier[fname] + literal[string] + identifier[fmt]
identifier[pmagplotlib] . identifier[save_plots] ( identifier[EQ] , identifier[files] )
keyword[else] :
identifier[ans] = identifier[input] ( literal[string] )
keyword[if] identifier[ans] == literal[string] :
identifier[pmagplotlib] . identifier[save_plots] ( identifier[EQ] , identifier[files] ) | def main():
"""
NAME
eqarea.py
DESCRIPTION
makes equal area projections from declination/inclination data
INPUT FORMAT
takes dec/inc as first two columns in space delimited file
SYNTAX
eqarea.py [options]
OPTIONS
-f FILE, specify file on command line
-sav save figure and quit
-fmt [svg,jpg,png,pdf] set figure format [default is svg]
-s SIZE specify symbol size - default is 20
-Lsym SHAPE COLOR specify shape and color for lower hemisphere
-Usym SHAPE COLOR specify shape and color for upper hemisphere
shapes: 's': square,'o': circle,'^,>,v,<': [up,right,down,left] triangle, 'd': diamond,
'p': pentagram, 'h': hexagon, '8': octagon, '+': plus, 'x': cross
colors: [b]lue,[g]reen,[r]ed,[c]yan,[m]agenta,[y]ellow,blac[k],[w]hite
"""
title = ''
(files, fmt) = ({}, 'svg')
sym = {'lower': ['o', 'r'], 'upper': ['o', 'w']}
plot = 0
if '-h' in sys.argv: # check if help is needed
print(main.__doc__)
sys.exit() # graceful quit # depends on [control=['if'], data=[]]
if '-sav' in sys.argv:
plot = 1 # depends on [control=['if'], data=[]]
if '-fmt' in sys.argv:
ind = sys.argv.index('-fmt')
fmt = sys.argv[ind + 1] # depends on [control=['if'], data=[]]
if '-s' in sys.argv:
ind = sys.argv.index('-s')
sym['size'] = int(sys.argv[ind + 1]) # depends on [control=['if'], data=[]]
else:
sym['size'] = 20
if '-Lsym' in sys.argv:
ind = sys.argv.index('-Lsym')
sym['lower'][0] = sys.argv[ind + 1]
sym['lower'][1] = sys.argv[ind + 2] # depends on [control=['if'], data=[]]
if '-Usym' in sys.argv:
ind = sys.argv.index('-Usym')
sym['upper'][0] = sys.argv[ind + 1]
sym['upper'][1] = sys.argv[ind + 2] # depends on [control=['if'], data=[]]
if '-f' in sys.argv: # ask for filename
ind = sys.argv.index('-f')
fname = sys.argv[ind + 1] # depends on [control=['if'], data=[]]
else:
print(main.__doc__)
print(' \n -f option required')
sys.exit() # graceful quit
DI = numpy.loadtxt(fname)
EQ = {'eq': 1}
pmagplotlib.plot_init(EQ['eq'], 5, 5)
pmagplotlib.plot_eq_sym(EQ['eq'], DI, 'Equal Area Plot', sym) # make plot
if plot == 0:
pmagplotlib.draw_figs(EQ) # make it visible # depends on [control=['if'], data=[]]
for key in list(EQ.keys()):
files[key] = key + '.' + fmt # depends on [control=['for'], data=['key']]
if pmagplotlib.isServer:
black = '#000000'
purple = '#800080'
titles = {}
titles['eq'] = 'Equal Area Plot'
EQ = pmagplotlib.add_borders(EQ, titles, black, purple)
pmagplotlib.save_plots(EQ, files) # depends on [control=['if'], data=[]]
elif plot == 1:
fname = os.path.split(fname)[1].split('.')[0]
files['eq'] = fname + '_eq.' + fmt
pmagplotlib.save_plots(EQ, files) # depends on [control=['if'], data=[]]
else:
ans = input(' S[a]ve to save plot, [q]uit without saving: ')
if ans == 'a':
pmagplotlib.save_plots(EQ, files) # depends on [control=['if'], data=[]] |
def matrix_iter(matrix, version, scale=1, border=None):
"""\
Returns an iterator / generator over the provided matrix which includes
the border and the scaling factor.
If either the `scale` or `border` value is invalid, a :py:exc:`ValueError`
is raised.
:param matrix: An iterable of bytearrays.
:param int version: A version constant.
:param int scale: The scaling factor (default: ``1``).
:param int border: The border size or ``None`` to specify the
default quiet zone (4 for QR Codes, 2 for Micro QR Codes).
:raises: :py:exc:`ValueError` if an illegal scale or border value is provided
"""
check_valid_border(border)
scale = int(scale)
check_valid_scale(scale)
border = get_border(version, border)
width, height = get_symbol_size(version, scale=1, border=0)
def get_bit(i, j):
return 0x1 if (0 <= i < height and 0 <= j < width and matrix[i][j]) else 0x0
for i in range(-border, height + border):
for s in range(scale):
yield chain.from_iterable(([get_bit(i, j)] * scale for j in range(-border, width + border))) | def function[matrix_iter, parameter[matrix, version, scale, border]]:
constant[ Returns an iterator / generator over the provided matrix which includes
the border and the scaling factor.
If either the `scale` or `border` value is invalid, a :py:exc:`ValueError`
is raised.
:param matrix: An iterable of bytearrays.
:param int version: A version constant.
:param int scale: The scaling factor (default: ``1``).
:param int border: The border size or ``None`` to specify the
default quiet zone (4 for QR Codes, 2 for Micro QR Codes).
:raises: :py:exc:`ValueError` if an illegal scale or border value is provided
]
call[name[check_valid_border], parameter[name[border]]]
variable[scale] assign[=] call[name[int], parameter[name[scale]]]
call[name[check_valid_scale], parameter[name[scale]]]
variable[border] assign[=] call[name[get_border], parameter[name[version], name[border]]]
<ast.Tuple object at 0x7da1b0b29930> assign[=] call[name[get_symbol_size], parameter[name[version]]]
def function[get_bit, parameter[i, j]]:
return[<ast.IfExp object at 0x7da1b0b2b550>]
for taget[name[i]] in starred[call[name[range], parameter[<ast.UnaryOp object at 0x7da2041d8be0>, binary_operation[name[height] + name[border]]]]] begin[:]
for taget[name[s]] in starred[call[name[range], parameter[name[scale]]]] begin[:]
<ast.Yield object at 0x7da2041db400> | keyword[def] identifier[matrix_iter] ( identifier[matrix] , identifier[version] , identifier[scale] = literal[int] , identifier[border] = keyword[None] ):
literal[string]
identifier[check_valid_border] ( identifier[border] )
identifier[scale] = identifier[int] ( identifier[scale] )
identifier[check_valid_scale] ( identifier[scale] )
identifier[border] = identifier[get_border] ( identifier[version] , identifier[border] )
identifier[width] , identifier[height] = identifier[get_symbol_size] ( identifier[version] , identifier[scale] = literal[int] , identifier[border] = literal[int] )
keyword[def] identifier[get_bit] ( identifier[i] , identifier[j] ):
keyword[return] literal[int] keyword[if] ( literal[int] <= identifier[i] < identifier[height] keyword[and] literal[int] <= identifier[j] < identifier[width] keyword[and] identifier[matrix] [ identifier[i] ][ identifier[j] ]) keyword[else] literal[int]
keyword[for] identifier[i] keyword[in] identifier[range] (- identifier[border] , identifier[height] + identifier[border] ):
keyword[for] identifier[s] keyword[in] identifier[range] ( identifier[scale] ):
keyword[yield] identifier[chain] . identifier[from_iterable] (([ identifier[get_bit] ( identifier[i] , identifier[j] )]* identifier[scale] keyword[for] identifier[j] keyword[in] identifier[range] (- identifier[border] , identifier[width] + identifier[border] ))) | def matrix_iter(matrix, version, scale=1, border=None):
""" Returns an iterator / generator over the provided matrix which includes
the border and the scaling factor.
If either the `scale` or `border` value is invalid, a :py:exc:`ValueError`
is raised.
:param matrix: An iterable of bytearrays.
:param int version: A version constant.
:param int scale: The scaling factor (default: ``1``).
:param int border: The border size or ``None`` to specify the
default quiet zone (4 for QR Codes, 2 for Micro QR Codes).
:raises: :py:exc:`ValueError` if an illegal scale or border value is provided
"""
check_valid_border(border)
scale = int(scale)
check_valid_scale(scale)
border = get_border(version, border)
(width, height) = get_symbol_size(version, scale=1, border=0)
def get_bit(i, j):
return 1 if 0 <= i < height and 0 <= j < width and matrix[i][j] else 0
for i in range(-border, height + border):
for s in range(scale):
yield chain.from_iterable(([get_bit(i, j)] * scale for j in range(-border, width + border))) # depends on [control=['for'], data=[]] # depends on [control=['for'], data=['i']] |
def check_if_vislcg_is_in_path( self, vislcg_cmd1 ):
''' Checks whether given vislcg_cmd1 is in system's PATH. Returns True, there is
a file named vislcg_cmd1 in the path, otherwise returns False;
The idea borrows from: http://stackoverflow.com/a/377028
'''
for path in os.environ["PATH"].split( os.pathsep ):
path1 = path.strip('"')
file1 = os.path.join(path1, vislcg_cmd1)
if os.path.isfile(file1) or os.path.isfile(file1+'.exe'):
return True
return False | def function[check_if_vislcg_is_in_path, parameter[self, vislcg_cmd1]]:
constant[ Checks whether given vislcg_cmd1 is in system's PATH. Returns True, there is
a file named vislcg_cmd1 in the path, otherwise returns False;
The idea borrows from: http://stackoverflow.com/a/377028
]
for taget[name[path]] in starred[call[call[name[os].environ][constant[PATH]].split, parameter[name[os].pathsep]]] begin[:]
variable[path1] assign[=] call[name[path].strip, parameter[constant["]]]
variable[file1] assign[=] call[name[os].path.join, parameter[name[path1], name[vislcg_cmd1]]]
if <ast.BoolOp object at 0x7da18f00e6e0> begin[:]
return[constant[True]]
return[constant[False]] | keyword[def] identifier[check_if_vislcg_is_in_path] ( identifier[self] , identifier[vislcg_cmd1] ):
literal[string]
keyword[for] identifier[path] keyword[in] identifier[os] . identifier[environ] [ literal[string] ]. identifier[split] ( identifier[os] . identifier[pathsep] ):
identifier[path1] = identifier[path] . identifier[strip] ( literal[string] )
identifier[file1] = identifier[os] . identifier[path] . identifier[join] ( identifier[path1] , identifier[vislcg_cmd1] )
keyword[if] identifier[os] . identifier[path] . identifier[isfile] ( identifier[file1] ) keyword[or] identifier[os] . identifier[path] . identifier[isfile] ( identifier[file1] + literal[string] ):
keyword[return] keyword[True]
keyword[return] keyword[False] | def check_if_vislcg_is_in_path(self, vislcg_cmd1):
""" Checks whether given vislcg_cmd1 is in system's PATH. Returns True, there is
a file named vislcg_cmd1 in the path, otherwise returns False;
The idea borrows from: http://stackoverflow.com/a/377028
"""
for path in os.environ['PATH'].split(os.pathsep):
path1 = path.strip('"')
file1 = os.path.join(path1, vislcg_cmd1)
if os.path.isfile(file1) or os.path.isfile(file1 + '.exe'):
return True # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['path']]
return False |
def get_obj(self, objpath, metahash, dst_path):
"""Get object from cache, write it to dst_path.
Args:
objpath: filename relative to buildroot
(example: mini-boot/blahblah/somefile.bin)
metahash: metahash. See targets/base.py
dst_path: Absolute path where the file should be written.
Raises:
CacheMiss: if the item is not in the cache
"""
incachepath = self.path_in_cache(objpath, metahash)
if not os.path.exists(incachepath):
raise CacheMiss('%s not in cache.' % incachepath)
else:
log.debug('Cache hit! %s~%s', objpath, metahash.hexdigest())
if not os.path.exists(os.path.dirname(dst_path)):
os.makedirs(os.path.dirname(dst_path))
os.link(incachepath, dst_path) | def function[get_obj, parameter[self, objpath, metahash, dst_path]]:
constant[Get object from cache, write it to dst_path.
Args:
objpath: filename relative to buildroot
(example: mini-boot/blahblah/somefile.bin)
metahash: metahash. See targets/base.py
dst_path: Absolute path where the file should be written.
Raises:
CacheMiss: if the item is not in the cache
]
variable[incachepath] assign[=] call[name[self].path_in_cache, parameter[name[objpath], name[metahash]]]
if <ast.UnaryOp object at 0x7da18f09ccd0> begin[:]
<ast.Raise object at 0x7da18f09d720> | keyword[def] identifier[get_obj] ( identifier[self] , identifier[objpath] , identifier[metahash] , identifier[dst_path] ):
literal[string]
identifier[incachepath] = identifier[self] . identifier[path_in_cache] ( identifier[objpath] , identifier[metahash] )
keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[exists] ( identifier[incachepath] ):
keyword[raise] identifier[CacheMiss] ( literal[string] % identifier[incachepath] )
keyword[else] :
identifier[log] . identifier[debug] ( literal[string] , identifier[objpath] , identifier[metahash] . identifier[hexdigest] ())
keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[exists] ( identifier[os] . identifier[path] . identifier[dirname] ( identifier[dst_path] )):
identifier[os] . identifier[makedirs] ( identifier[os] . identifier[path] . identifier[dirname] ( identifier[dst_path] ))
identifier[os] . identifier[link] ( identifier[incachepath] , identifier[dst_path] ) | def get_obj(self, objpath, metahash, dst_path):
"""Get object from cache, write it to dst_path.
Args:
objpath: filename relative to buildroot
(example: mini-boot/blahblah/somefile.bin)
metahash: metahash. See targets/base.py
dst_path: Absolute path where the file should be written.
Raises:
CacheMiss: if the item is not in the cache
"""
incachepath = self.path_in_cache(objpath, metahash)
if not os.path.exists(incachepath):
raise CacheMiss('%s not in cache.' % incachepath) # depends on [control=['if'], data=[]]
else:
log.debug('Cache hit! %s~%s', objpath, metahash.hexdigest())
if not os.path.exists(os.path.dirname(dst_path)):
os.makedirs(os.path.dirname(dst_path)) # depends on [control=['if'], data=[]]
os.link(incachepath, dst_path) |
def autolink_role(typ, rawtext, etext, lineno, inliner,
options={}, content=[]):
"""Smart linking role.
Expands to ':obj:`text`' if `text` is an object that can be imported;
otherwise expands to '*text*'.
"""
env = inliner.document.settings.env
r = env.get_domain('py').role('obj')(
'obj', rawtext, etext, lineno, inliner, options, content)
pnode = r[0][0]
prefixes = get_import_prefixes_from_env(env)
try:
name, obj, parent = import_by_name(pnode['reftarget'], prefixes)
except ImportError:
content = pnode[0]
r[0][0] = nodes.emphasis(rawtext, content[0].astext(),
classes=content['classes'])
return r | def function[autolink_role, parameter[typ, rawtext, etext, lineno, inliner, options, content]]:
constant[Smart linking role.
Expands to ':obj:`text`' if `text` is an object that can be imported;
otherwise expands to '*text*'.
]
variable[env] assign[=] name[inliner].document.settings.env
variable[r] assign[=] call[call[call[name[env].get_domain, parameter[constant[py]]].role, parameter[constant[obj]]], parameter[constant[obj], name[rawtext], name[etext], name[lineno], name[inliner], name[options], name[content]]]
variable[pnode] assign[=] call[call[name[r]][constant[0]]][constant[0]]
variable[prefixes] assign[=] call[name[get_import_prefixes_from_env], parameter[name[env]]]
<ast.Try object at 0x7da1b0284280>
return[name[r]] | keyword[def] identifier[autolink_role] ( identifier[typ] , identifier[rawtext] , identifier[etext] , identifier[lineno] , identifier[inliner] ,
identifier[options] ={}, identifier[content] =[]):
literal[string]
identifier[env] = identifier[inliner] . identifier[document] . identifier[settings] . identifier[env]
identifier[r] = identifier[env] . identifier[get_domain] ( literal[string] ). identifier[role] ( literal[string] )(
literal[string] , identifier[rawtext] , identifier[etext] , identifier[lineno] , identifier[inliner] , identifier[options] , identifier[content] )
identifier[pnode] = identifier[r] [ literal[int] ][ literal[int] ]
identifier[prefixes] = identifier[get_import_prefixes_from_env] ( identifier[env] )
keyword[try] :
identifier[name] , identifier[obj] , identifier[parent] = identifier[import_by_name] ( identifier[pnode] [ literal[string] ], identifier[prefixes] )
keyword[except] identifier[ImportError] :
identifier[content] = identifier[pnode] [ literal[int] ]
identifier[r] [ literal[int] ][ literal[int] ]= identifier[nodes] . identifier[emphasis] ( identifier[rawtext] , identifier[content] [ literal[int] ]. identifier[astext] (),
identifier[classes] = identifier[content] [ literal[string] ])
keyword[return] identifier[r] | def autolink_role(typ, rawtext, etext, lineno, inliner, options={}, content=[]):
"""Smart linking role.
Expands to ':obj:`text`' if `text` is an object that can be imported;
otherwise expands to '*text*'.
"""
env = inliner.document.settings.env
r = env.get_domain('py').role('obj')('obj', rawtext, etext, lineno, inliner, options, content)
pnode = r[0][0]
prefixes = get_import_prefixes_from_env(env)
try:
(name, obj, parent) = import_by_name(pnode['reftarget'], prefixes) # depends on [control=['try'], data=[]]
except ImportError:
content = pnode[0]
r[0][0] = nodes.emphasis(rawtext, content[0].astext(), classes=content['classes']) # depends on [control=['except'], data=[]]
return r |
def aggregate(d, y_size, x_size):
"""Average every 4 elements (2x2) in a 2D array"""
if d.ndim != 2:
# we can't guarantee what blocks we are getting and how
# it should be reshaped to do the averaging.
raise ValueError("Can't aggregrate (reduce) data arrays with "
"more than 2 dimensions.")
if not (x_size.is_integer() and y_size.is_integer()):
raise ValueError("Aggregation factors are not integers")
for agg_size, chunks in zip([y_size, x_size], d.chunks):
for chunk_size in chunks:
if chunk_size % agg_size != 0:
raise ValueError("Aggregation requires arrays with "
"shapes and chunks divisible by the "
"factor")
new_chunks = (tuple(int(x / y_size) for x in d.chunks[0]),
tuple(int(x / x_size) for x in d.chunks[1]))
return da.core.map_blocks(_mean, d, y_size, x_size, dtype=d.dtype, chunks=new_chunks) | def function[aggregate, parameter[d, y_size, x_size]]:
constant[Average every 4 elements (2x2) in a 2D array]
if compare[name[d].ndim not_equal[!=] constant[2]] begin[:]
<ast.Raise object at 0x7da1b22ad450>
if <ast.UnaryOp object at 0x7da1b22ad360> begin[:]
<ast.Raise object at 0x7da1b22ad0c0>
for taget[tuple[[<ast.Name object at 0x7da1b22ad870>, <ast.Name object at 0x7da1b22adf00>]]] in starred[call[name[zip], parameter[list[[<ast.Name object at 0x7da1b22aeda0>, <ast.Name object at 0x7da1b22ad990>]], name[d].chunks]]] begin[:]
for taget[name[chunk_size]] in starred[name[chunks]] begin[:]
if compare[binary_operation[name[chunk_size] <ast.Mod object at 0x7da2590d6920> name[agg_size]] not_equal[!=] constant[0]] begin[:]
<ast.Raise object at 0x7da1b22ade10>
variable[new_chunks] assign[=] tuple[[<ast.Call object at 0x7da1b22af040>, <ast.Call object at 0x7da1b22ad3c0>]]
return[call[name[da].core.map_blocks, parameter[name[_mean], name[d], name[y_size], name[x_size]]]] | keyword[def] identifier[aggregate] ( identifier[d] , identifier[y_size] , identifier[x_size] ):
literal[string]
keyword[if] identifier[d] . identifier[ndim] != literal[int] :
keyword[raise] identifier[ValueError] ( literal[string]
literal[string] )
keyword[if] keyword[not] ( identifier[x_size] . identifier[is_integer] () keyword[and] identifier[y_size] . identifier[is_integer] ()):
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[for] identifier[agg_size] , identifier[chunks] keyword[in] identifier[zip] ([ identifier[y_size] , identifier[x_size] ], identifier[d] . identifier[chunks] ):
keyword[for] identifier[chunk_size] keyword[in] identifier[chunks] :
keyword[if] identifier[chunk_size] % identifier[agg_size] != literal[int] :
keyword[raise] identifier[ValueError] ( literal[string]
literal[string]
literal[string] )
identifier[new_chunks] =( identifier[tuple] ( identifier[int] ( identifier[x] / identifier[y_size] ) keyword[for] identifier[x] keyword[in] identifier[d] . identifier[chunks] [ literal[int] ]),
identifier[tuple] ( identifier[int] ( identifier[x] / identifier[x_size] ) keyword[for] identifier[x] keyword[in] identifier[d] . identifier[chunks] [ literal[int] ]))
keyword[return] identifier[da] . identifier[core] . identifier[map_blocks] ( identifier[_mean] , identifier[d] , identifier[y_size] , identifier[x_size] , identifier[dtype] = identifier[d] . identifier[dtype] , identifier[chunks] = identifier[new_chunks] ) | def aggregate(d, y_size, x_size):
"""Average every 4 elements (2x2) in a 2D array"""
if d.ndim != 2:
# we can't guarantee what blocks we are getting and how
# it should be reshaped to do the averaging.
raise ValueError("Can't aggregrate (reduce) data arrays with more than 2 dimensions.") # depends on [control=['if'], data=[]]
if not (x_size.is_integer() and y_size.is_integer()):
raise ValueError('Aggregation factors are not integers') # depends on [control=['if'], data=[]]
for (agg_size, chunks) in zip([y_size, x_size], d.chunks):
for chunk_size in chunks:
if chunk_size % agg_size != 0:
raise ValueError('Aggregation requires arrays with shapes and chunks divisible by the factor') # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['chunk_size']] # depends on [control=['for'], data=[]]
new_chunks = (tuple((int(x / y_size) for x in d.chunks[0])), tuple((int(x / x_size) for x in d.chunks[1])))
return da.core.map_blocks(_mean, d, y_size, x_size, dtype=d.dtype, chunks=new_chunks) |
async def on_raw_433(self, message):
""" Nickname in use. """
if not self.registered:
self._registration_attempts += 1
# Attempt to set new nickname.
if self._attempt_nicknames:
await self.set_nickname(self._attempt_nicknames.pop(0))
else:
await self.set_nickname(
self._nicknames[0] + '_' * (self._registration_attempts - len(self._nicknames))) | <ast.AsyncFunctionDef object at 0x7da18f00e140> | keyword[async] keyword[def] identifier[on_raw_433] ( identifier[self] , identifier[message] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[registered] :
identifier[self] . identifier[_registration_attempts] += literal[int]
keyword[if] identifier[self] . identifier[_attempt_nicknames] :
keyword[await] identifier[self] . identifier[set_nickname] ( identifier[self] . identifier[_attempt_nicknames] . identifier[pop] ( literal[int] ))
keyword[else] :
keyword[await] identifier[self] . identifier[set_nickname] (
identifier[self] . identifier[_nicknames] [ literal[int] ]+ literal[string] *( identifier[self] . identifier[_registration_attempts] - identifier[len] ( identifier[self] . identifier[_nicknames] ))) | async def on_raw_433(self, message):
""" Nickname in use. """
if not self.registered:
self._registration_attempts += 1
# Attempt to set new nickname.
if self._attempt_nicknames:
await self.set_nickname(self._attempt_nicknames.pop(0)) # depends on [control=['if'], data=[]]
else:
await self.set_nickname(self._nicknames[0] + '_' * (self._registration_attempts - len(self._nicknames))) # depends on [control=['if'], data=[]] |
def BB(n):
"""constructs the BB context"""
if (n<=1):return Context('0\n1')
else:
BB1=BB(n-1)
AA1=AA(n-1)
r1 = C1((n-1)*2**(n-2),2**(n-1)) - AA1 - BB1
r2 = BB1 - C1(2**(n-1),2**(n-1)) - BB1;
return r1 + r2 | def function[BB, parameter[n]]:
constant[constructs the BB context]
if compare[name[n] less_or_equal[<=] constant[1]] begin[:]
return[call[name[Context], parameter[constant[0
1]]]] | keyword[def] identifier[BB] ( identifier[n] ):
literal[string]
keyword[if] ( identifier[n] <= literal[int] ): keyword[return] identifier[Context] ( literal[string] )
keyword[else] :
identifier[BB1] = identifier[BB] ( identifier[n] - literal[int] )
identifier[AA1] = identifier[AA] ( identifier[n] - literal[int] )
identifier[r1] = identifier[C1] (( identifier[n] - literal[int] )* literal[int] **( identifier[n] - literal[int] ), literal[int] **( identifier[n] - literal[int] ))- identifier[AA1] - identifier[BB1]
identifier[r2] = identifier[BB1] - identifier[C1] ( literal[int] **( identifier[n] - literal[int] ), literal[int] **( identifier[n] - literal[int] ))- identifier[BB1] ;
keyword[return] identifier[r1] + identifier[r2] | def BB(n):
"""constructs the BB context"""
if n <= 1:
return Context('0\n1') # depends on [control=['if'], data=[]]
else:
BB1 = BB(n - 1)
AA1 = AA(n - 1)
r1 = C1((n - 1) * 2 ** (n - 2), 2 ** (n - 1)) - AA1 - BB1
r2 = BB1 - C1(2 ** (n - 1), 2 ** (n - 1)) - BB1
return r1 + r2 |
def _do_download(version, download_base, to_dir, download_delay):
"""Download Setuptools."""
py_desig = 'py{sys.version_info[0]}.{sys.version_info[1]}'.format(sys=sys)
tp = 'setuptools-{version}-{py_desig}.egg'
egg = os.path.join(to_dir, tp.format(**locals()))
if not os.path.exists(egg):
archive = download_setuptools(version, download_base,
to_dir, download_delay)
_build_egg(egg, archive, to_dir)
sys.path.insert(0, egg)
# Remove previously-imported pkg_resources if present (see
# https://bitbucket.org/pypa/setuptools/pull-request/7/ for details).
if 'pkg_resources' in sys.modules:
_unload_pkg_resources()
import setuptools
setuptools.bootstrap_install_from = egg | def function[_do_download, parameter[version, download_base, to_dir, download_delay]]:
constant[Download Setuptools.]
variable[py_desig] assign[=] call[constant[py{sys.version_info[0]}.{sys.version_info[1]}].format, parameter[]]
variable[tp] assign[=] constant[setuptools-{version}-{py_desig}.egg]
variable[egg] assign[=] call[name[os].path.join, parameter[name[to_dir], call[name[tp].format, parameter[]]]]
if <ast.UnaryOp object at 0x7da1b18c1d50> begin[:]
variable[archive] assign[=] call[name[download_setuptools], parameter[name[version], name[download_base], name[to_dir], name[download_delay]]]
call[name[_build_egg], parameter[name[egg], name[archive], name[to_dir]]]
call[name[sys].path.insert, parameter[constant[0], name[egg]]]
if compare[constant[pkg_resources] in name[sys].modules] begin[:]
call[name[_unload_pkg_resources], parameter[]]
import module[setuptools]
name[setuptools].bootstrap_install_from assign[=] name[egg] | keyword[def] identifier[_do_download] ( identifier[version] , identifier[download_base] , identifier[to_dir] , identifier[download_delay] ):
literal[string]
identifier[py_desig] = literal[string] . identifier[format] ( identifier[sys] = identifier[sys] )
identifier[tp] = literal[string]
identifier[egg] = identifier[os] . identifier[path] . identifier[join] ( identifier[to_dir] , identifier[tp] . identifier[format] (** identifier[locals] ()))
keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[exists] ( identifier[egg] ):
identifier[archive] = identifier[download_setuptools] ( identifier[version] , identifier[download_base] ,
identifier[to_dir] , identifier[download_delay] )
identifier[_build_egg] ( identifier[egg] , identifier[archive] , identifier[to_dir] )
identifier[sys] . identifier[path] . identifier[insert] ( literal[int] , identifier[egg] )
keyword[if] literal[string] keyword[in] identifier[sys] . identifier[modules] :
identifier[_unload_pkg_resources] ()
keyword[import] identifier[setuptools]
identifier[setuptools] . identifier[bootstrap_install_from] = identifier[egg] | def _do_download(version, download_base, to_dir, download_delay):
"""Download Setuptools."""
py_desig = 'py{sys.version_info[0]}.{sys.version_info[1]}'.format(sys=sys)
tp = 'setuptools-{version}-{py_desig}.egg'
egg = os.path.join(to_dir, tp.format(**locals()))
if not os.path.exists(egg):
archive = download_setuptools(version, download_base, to_dir, download_delay)
_build_egg(egg, archive, to_dir) # depends on [control=['if'], data=[]]
sys.path.insert(0, egg)
# Remove previously-imported pkg_resources if present (see
# https://bitbucket.org/pypa/setuptools/pull-request/7/ for details).
if 'pkg_resources' in sys.modules:
_unload_pkg_resources() # depends on [control=['if'], data=[]]
import setuptools
setuptools.bootstrap_install_from = egg |
def reorder(self, single_column=False):
"""Force a reorder of the displayed items"""
if single_column:
columns = self.sortOrder[:1]
else:
columns = self.sortOrder
for ascending,column in columns[::-1]:
# Python 2.2+ guarantees stable sort, so sort by each column in reverse
# order will order by the assigned columns
self.sorted.sort( key=column.get, reverse=(not ascending)) | def function[reorder, parameter[self, single_column]]:
constant[Force a reorder of the displayed items]
if name[single_column] begin[:]
variable[columns] assign[=] call[name[self].sortOrder][<ast.Slice object at 0x7da1b0473280>]
for taget[tuple[[<ast.Name object at 0x7da1b04704f0>, <ast.Name object at 0x7da1b0473ca0>]]] in starred[call[name[columns]][<ast.Slice object at 0x7da1b0470760>]] begin[:]
call[name[self].sorted.sort, parameter[]] | keyword[def] identifier[reorder] ( identifier[self] , identifier[single_column] = keyword[False] ):
literal[string]
keyword[if] identifier[single_column] :
identifier[columns] = identifier[self] . identifier[sortOrder] [: literal[int] ]
keyword[else] :
identifier[columns] = identifier[self] . identifier[sortOrder]
keyword[for] identifier[ascending] , identifier[column] keyword[in] identifier[columns] [::- literal[int] ]:
identifier[self] . identifier[sorted] . identifier[sort] ( identifier[key] = identifier[column] . identifier[get] , identifier[reverse] =( keyword[not] identifier[ascending] )) | def reorder(self, single_column=False):
"""Force a reorder of the displayed items"""
if single_column:
columns = self.sortOrder[:1] # depends on [control=['if'], data=[]]
else:
columns = self.sortOrder
for (ascending, column) in columns[::-1]: # Python 2.2+ guarantees stable sort, so sort by each column in reverse
# order will order by the assigned columns
self.sorted.sort(key=column.get, reverse=not ascending) # depends on [control=['for'], data=[]] |
def _connect(cls, url, token, timeout, results, i, job_is_done_event=None):
""" Connects to the specified cls with url and token. Stores the connection
information to results[i] in a threadsafe way.
Arguments:
cls: the class which is responsible for establishing connection, basically it's
:class:`~plexapi.client.PlexClient` or :class:`~plexapi.server.PlexServer`
url (str): url which should be passed as `baseurl` argument to cls.__init__()
token (str): authentication token which should be passed as `baseurl` argument to cls.__init__()
timeout (int): timeout which should be passed as `baseurl` argument to cls.__init__()
results (list): pre-filled list for results
i (int): index of current job, should be less than len(results)
job_is_done_event (:class:`~threading.Event`): is X_PLEX_ENABLE_FAST_CONNECT is True then the
event would be set as soon the connection is established
"""
starttime = time.time()
try:
device = cls(baseurl=url, token=token, timeout=timeout)
runtime = int(time.time() - starttime)
results[i] = (url, token, device, runtime)
if X_PLEX_ENABLE_FAST_CONNECT and job_is_done_event:
job_is_done_event.set()
except Exception as err:
runtime = int(time.time() - starttime)
log.error('%s: %s', url, err)
results[i] = (url, token, None, runtime) | def function[_connect, parameter[cls, url, token, timeout, results, i, job_is_done_event]]:
constant[ Connects to the specified cls with url and token. Stores the connection
information to results[i] in a threadsafe way.
Arguments:
cls: the class which is responsible for establishing connection, basically it's
:class:`~plexapi.client.PlexClient` or :class:`~plexapi.server.PlexServer`
url (str): url which should be passed as `baseurl` argument to cls.__init__()
token (str): authentication token which should be passed as `baseurl` argument to cls.__init__()
timeout (int): timeout which should be passed as `baseurl` argument to cls.__init__()
results (list): pre-filled list for results
i (int): index of current job, should be less than len(results)
job_is_done_event (:class:`~threading.Event`): is X_PLEX_ENABLE_FAST_CONNECT is True then the
event would be set as soon the connection is established
]
variable[starttime] assign[=] call[name[time].time, parameter[]]
<ast.Try object at 0x7da18eb57010> | keyword[def] identifier[_connect] ( identifier[cls] , identifier[url] , identifier[token] , identifier[timeout] , identifier[results] , identifier[i] , identifier[job_is_done_event] = keyword[None] ):
literal[string]
identifier[starttime] = identifier[time] . identifier[time] ()
keyword[try] :
identifier[device] = identifier[cls] ( identifier[baseurl] = identifier[url] , identifier[token] = identifier[token] , identifier[timeout] = identifier[timeout] )
identifier[runtime] = identifier[int] ( identifier[time] . identifier[time] ()- identifier[starttime] )
identifier[results] [ identifier[i] ]=( identifier[url] , identifier[token] , identifier[device] , identifier[runtime] )
keyword[if] identifier[X_PLEX_ENABLE_FAST_CONNECT] keyword[and] identifier[job_is_done_event] :
identifier[job_is_done_event] . identifier[set] ()
keyword[except] identifier[Exception] keyword[as] identifier[err] :
identifier[runtime] = identifier[int] ( identifier[time] . identifier[time] ()- identifier[starttime] )
identifier[log] . identifier[error] ( literal[string] , identifier[url] , identifier[err] )
identifier[results] [ identifier[i] ]=( identifier[url] , identifier[token] , keyword[None] , identifier[runtime] ) | def _connect(cls, url, token, timeout, results, i, job_is_done_event=None):
""" Connects to the specified cls with url and token. Stores the connection
information to results[i] in a threadsafe way.
Arguments:
cls: the class which is responsible for establishing connection, basically it's
:class:`~plexapi.client.PlexClient` or :class:`~plexapi.server.PlexServer`
url (str): url which should be passed as `baseurl` argument to cls.__init__()
token (str): authentication token which should be passed as `baseurl` argument to cls.__init__()
timeout (int): timeout which should be passed as `baseurl` argument to cls.__init__()
results (list): pre-filled list for results
i (int): index of current job, should be less than len(results)
job_is_done_event (:class:`~threading.Event`): is X_PLEX_ENABLE_FAST_CONNECT is True then the
event would be set as soon the connection is established
"""
starttime = time.time()
try:
device = cls(baseurl=url, token=token, timeout=timeout)
runtime = int(time.time() - starttime)
results[i] = (url, token, device, runtime)
if X_PLEX_ENABLE_FAST_CONNECT and job_is_done_event:
job_is_done_event.set() # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]]
except Exception as err:
runtime = int(time.time() - starttime)
log.error('%s: %s', url, err)
results[i] = (url, token, None, runtime) # depends on [control=['except'], data=['err']] |
def get_member_device(self, device):
"""Returns the given virtual media device object.
:param device: virtual media device to be queried
:returns virtual media device object.
"""
for vmedia_device in self.get_members():
if device in vmedia_device.media_types:
return vmedia_device | def function[get_member_device, parameter[self, device]]:
constant[Returns the given virtual media device object.
:param device: virtual media device to be queried
:returns virtual media device object.
]
for taget[name[vmedia_device]] in starred[call[name[self].get_members, parameter[]]] begin[:]
if compare[name[device] in name[vmedia_device].media_types] begin[:]
return[name[vmedia_device]] | keyword[def] identifier[get_member_device] ( identifier[self] , identifier[device] ):
literal[string]
keyword[for] identifier[vmedia_device] keyword[in] identifier[self] . identifier[get_members] ():
keyword[if] identifier[device] keyword[in] identifier[vmedia_device] . identifier[media_types] :
keyword[return] identifier[vmedia_device] | def get_member_device(self, device):
"""Returns the given virtual media device object.
:param device: virtual media device to be queried
:returns virtual media device object.
"""
for vmedia_device in self.get_members():
if device in vmedia_device.media_types:
return vmedia_device # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['vmedia_device']] |
def checkfiles(args):
"""Checks existence of files based on your criteria"""
from .query import Database
db = Database()
r = db.objects()
# go through all files, check if they are available on the filesystem
good = []
bad = []
for f in r:
if os.path.exists(f.make_path(args.directory, args.extension)):
good.append(f)
else:
bad.append(f)
# report
output = sys.stdout
if args.selftest:
from bob.db.utils import null
output = null()
if bad:
for f in bad:
output.write('Cannot find file "%s"\n' % (f.make_path(args.directory, args.extension),))
output.write('%d files (out of %d) were not found at "%s"\n' % \
(len(bad), len(r), args.directory))
return 0 | def function[checkfiles, parameter[args]]:
constant[Checks existence of files based on your criteria]
from relative_module[query] import module[Database]
variable[db] assign[=] call[name[Database], parameter[]]
variable[r] assign[=] call[name[db].objects, parameter[]]
variable[good] assign[=] list[[]]
variable[bad] assign[=] list[[]]
for taget[name[f]] in starred[name[r]] begin[:]
if call[name[os].path.exists, parameter[call[name[f].make_path, parameter[name[args].directory, name[args].extension]]]] begin[:]
call[name[good].append, parameter[name[f]]]
variable[output] assign[=] name[sys].stdout
if name[args].selftest begin[:]
from relative_module[bob.db.utils] import module[null]
variable[output] assign[=] call[name[null], parameter[]]
if name[bad] begin[:]
for taget[name[f]] in starred[name[bad]] begin[:]
call[name[output].write, parameter[binary_operation[constant[Cannot find file "%s"
] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Call object at 0x7da1b164b940>]]]]]
call[name[output].write, parameter[binary_operation[constant[%d files (out of %d) were not found at "%s"
] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Call object at 0x7da1b164b100>, <ast.Call object at 0x7da1b164aec0>, <ast.Attribute object at 0x7da1b164b1c0>]]]]]
return[constant[0]] | keyword[def] identifier[checkfiles] ( identifier[args] ):
literal[string]
keyword[from] . identifier[query] keyword[import] identifier[Database]
identifier[db] = identifier[Database] ()
identifier[r] = identifier[db] . identifier[objects] ()
identifier[good] =[]
identifier[bad] =[]
keyword[for] identifier[f] keyword[in] identifier[r] :
keyword[if] identifier[os] . identifier[path] . identifier[exists] ( identifier[f] . identifier[make_path] ( identifier[args] . identifier[directory] , identifier[args] . identifier[extension] )):
identifier[good] . identifier[append] ( identifier[f] )
keyword[else] :
identifier[bad] . identifier[append] ( identifier[f] )
identifier[output] = identifier[sys] . identifier[stdout]
keyword[if] identifier[args] . identifier[selftest] :
keyword[from] identifier[bob] . identifier[db] . identifier[utils] keyword[import] identifier[null]
identifier[output] = identifier[null] ()
keyword[if] identifier[bad] :
keyword[for] identifier[f] keyword[in] identifier[bad] :
identifier[output] . identifier[write] ( literal[string] %( identifier[f] . identifier[make_path] ( identifier[args] . identifier[directory] , identifier[args] . identifier[extension] ),))
identifier[output] . identifier[write] ( literal[string] %( identifier[len] ( identifier[bad] ), identifier[len] ( identifier[r] ), identifier[args] . identifier[directory] ))
keyword[return] literal[int] | def checkfiles(args):
"""Checks existence of files based on your criteria"""
from .query import Database
db = Database()
r = db.objects()
# go through all files, check if they are available on the filesystem
good = []
bad = []
for f in r:
if os.path.exists(f.make_path(args.directory, args.extension)):
good.append(f) # depends on [control=['if'], data=[]]
else:
bad.append(f) # depends on [control=['for'], data=['f']]
# report
output = sys.stdout
if args.selftest:
from bob.db.utils import null
output = null() # depends on [control=['if'], data=[]]
if bad:
for f in bad:
output.write('Cannot find file "%s"\n' % (f.make_path(args.directory, args.extension),)) # depends on [control=['for'], data=['f']]
output.write('%d files (out of %d) were not found at "%s"\n' % (len(bad), len(r), args.directory)) # depends on [control=['if'], data=[]]
return 0 |
def _create_indexed_dictionary(self, group=0):
"""
Creates a dict of lists of Wells. Which way the labware is segmented
determines whether this is a dict of rows or dict of columns. If group
is 1, then it will collect wells that have the same alphabetic prefix
and therefore are considered to be in the same row. If group is 2, it
will collect wells that have the same numeric postfix and therefore
are considered to be in the same column.
"""
dict_list = defaultdict(list)
for index, well_obj in zip(self._ordering, self._wells):
dict_list[self._pattern.match(index).group(group)].append(well_obj)
return dict_list | def function[_create_indexed_dictionary, parameter[self, group]]:
constant[
Creates a dict of lists of Wells. Which way the labware is segmented
determines whether this is a dict of rows or dict of columns. If group
is 1, then it will collect wells that have the same alphabetic prefix
and therefore are considered to be in the same row. If group is 2, it
will collect wells that have the same numeric postfix and therefore
are considered to be in the same column.
]
variable[dict_list] assign[=] call[name[defaultdict], parameter[name[list]]]
for taget[tuple[[<ast.Name object at 0x7da18f09efb0>, <ast.Name object at 0x7da18f09f490>]]] in starred[call[name[zip], parameter[name[self]._ordering, name[self]._wells]]] begin[:]
call[call[name[dict_list]][call[call[name[self]._pattern.match, parameter[name[index]]].group, parameter[name[group]]]].append, parameter[name[well_obj]]]
return[name[dict_list]] | keyword[def] identifier[_create_indexed_dictionary] ( identifier[self] , identifier[group] = literal[int] ):
literal[string]
identifier[dict_list] = identifier[defaultdict] ( identifier[list] )
keyword[for] identifier[index] , identifier[well_obj] keyword[in] identifier[zip] ( identifier[self] . identifier[_ordering] , identifier[self] . identifier[_wells] ):
identifier[dict_list] [ identifier[self] . identifier[_pattern] . identifier[match] ( identifier[index] ). identifier[group] ( identifier[group] )]. identifier[append] ( identifier[well_obj] )
keyword[return] identifier[dict_list] | def _create_indexed_dictionary(self, group=0):
"""
Creates a dict of lists of Wells. Which way the labware is segmented
determines whether this is a dict of rows or dict of columns. If group
is 1, then it will collect wells that have the same alphabetic prefix
and therefore are considered to be in the same row. If group is 2, it
will collect wells that have the same numeric postfix and therefore
are considered to be in the same column.
"""
dict_list = defaultdict(list)
for (index, well_obj) in zip(self._ordering, self._wells):
dict_list[self._pattern.match(index).group(group)].append(well_obj) # depends on [control=['for'], data=[]]
return dict_list |
def combinations(l):
"""Pure-Python implementation of itertools.combinations(l, 2)."""
result = []
for x in xrange(len(l) - 1):
ls = l[x + 1:]
for y in ls:
result.append((l[x], y))
return result | def function[combinations, parameter[l]]:
constant[Pure-Python implementation of itertools.combinations(l, 2).]
variable[result] assign[=] list[[]]
for taget[name[x]] in starred[call[name[xrange], parameter[binary_operation[call[name[len], parameter[name[l]]] - constant[1]]]]] begin[:]
variable[ls] assign[=] call[name[l]][<ast.Slice object at 0x7da1b020d270>]
for taget[name[y]] in starred[name[ls]] begin[:]
call[name[result].append, parameter[tuple[[<ast.Subscript object at 0x7da1b020ec50>, <ast.Name object at 0x7da1b020e4a0>]]]]
return[name[result]] | keyword[def] identifier[combinations] ( identifier[l] ):
literal[string]
identifier[result] =[]
keyword[for] identifier[x] keyword[in] identifier[xrange] ( identifier[len] ( identifier[l] )- literal[int] ):
identifier[ls] = identifier[l] [ identifier[x] + literal[int] :]
keyword[for] identifier[y] keyword[in] identifier[ls] :
identifier[result] . identifier[append] (( identifier[l] [ identifier[x] ], identifier[y] ))
keyword[return] identifier[result] | def combinations(l):
"""Pure-Python implementation of itertools.combinations(l, 2)."""
result = []
for x in xrange(len(l) - 1):
ls = l[x + 1:]
for y in ls:
result.append((l[x], y)) # depends on [control=['for'], data=['y']] # depends on [control=['for'], data=['x']]
return result |
def recursive_file_count(files, item=None, checksum=False):
"""Given a filepath or list of filepaths, return the total number of files."""
if not isinstance(files, (list, set)):
files = [files]
total_files = 0
if checksum is True:
md5s = [f.get('md5') for f in item.files]
else:
md5s = list()
if isinstance(files, dict):
# make sure to use local filenames.
_files = files.values()
else:
if isinstance(files[0], tuple):
_files = dict(files).values()
else:
_files = files
for f in _files:
try:
is_dir = os.path.isdir(f)
except TypeError:
try:
f = f[0]
is_dir = os.path.isdir(f)
except (AttributeError, TypeError):
is_dir = False
if is_dir:
for x, _ in iter_directory(f):
lmd5 = get_md5(open(x, 'rb'))
if lmd5 in md5s:
continue
else:
total_files += 1
else:
try:
lmd5 = get_md5(open(f, 'rb'))
except TypeError:
# Support file-like objects.
lmd5 = get_md5(f)
if lmd5 in md5s:
continue
else:
total_files += 1
return total_files | def function[recursive_file_count, parameter[files, item, checksum]]:
constant[Given a filepath or list of filepaths, return the total number of files.]
if <ast.UnaryOp object at 0x7da18dc9a1a0> begin[:]
variable[files] assign[=] list[[<ast.Name object at 0x7da18dc9a4a0>]]
variable[total_files] assign[=] constant[0]
if compare[name[checksum] is constant[True]] begin[:]
variable[md5s] assign[=] <ast.ListComp object at 0x7da18dc9ab00>
if call[name[isinstance], parameter[name[files], name[dict]]] begin[:]
variable[_files] assign[=] call[name[files].values, parameter[]]
for taget[name[f]] in starred[name[_files]] begin[:]
<ast.Try object at 0x7da18dc9aaa0>
if name[is_dir] begin[:]
for taget[tuple[[<ast.Name object at 0x7da18dc98c40>, <ast.Name object at 0x7da18dc98e50>]]] in starred[call[name[iter_directory], parameter[name[f]]]] begin[:]
variable[lmd5] assign[=] call[name[get_md5], parameter[call[name[open], parameter[name[x], constant[rb]]]]]
if compare[name[lmd5] in name[md5s]] begin[:]
continue
return[name[total_files]] | keyword[def] identifier[recursive_file_count] ( identifier[files] , identifier[item] = keyword[None] , identifier[checksum] = keyword[False] ):
literal[string]
keyword[if] keyword[not] identifier[isinstance] ( identifier[files] ,( identifier[list] , identifier[set] )):
identifier[files] =[ identifier[files] ]
identifier[total_files] = literal[int]
keyword[if] identifier[checksum] keyword[is] keyword[True] :
identifier[md5s] =[ identifier[f] . identifier[get] ( literal[string] ) keyword[for] identifier[f] keyword[in] identifier[item] . identifier[files] ]
keyword[else] :
identifier[md5s] = identifier[list] ()
keyword[if] identifier[isinstance] ( identifier[files] , identifier[dict] ):
identifier[_files] = identifier[files] . identifier[values] ()
keyword[else] :
keyword[if] identifier[isinstance] ( identifier[files] [ literal[int] ], identifier[tuple] ):
identifier[_files] = identifier[dict] ( identifier[files] ). identifier[values] ()
keyword[else] :
identifier[_files] = identifier[files]
keyword[for] identifier[f] keyword[in] identifier[_files] :
keyword[try] :
identifier[is_dir] = identifier[os] . identifier[path] . identifier[isdir] ( identifier[f] )
keyword[except] identifier[TypeError] :
keyword[try] :
identifier[f] = identifier[f] [ literal[int] ]
identifier[is_dir] = identifier[os] . identifier[path] . identifier[isdir] ( identifier[f] )
keyword[except] ( identifier[AttributeError] , identifier[TypeError] ):
identifier[is_dir] = keyword[False]
keyword[if] identifier[is_dir] :
keyword[for] identifier[x] , identifier[_] keyword[in] identifier[iter_directory] ( identifier[f] ):
identifier[lmd5] = identifier[get_md5] ( identifier[open] ( identifier[x] , literal[string] ))
keyword[if] identifier[lmd5] keyword[in] identifier[md5s] :
keyword[continue]
keyword[else] :
identifier[total_files] += literal[int]
keyword[else] :
keyword[try] :
identifier[lmd5] = identifier[get_md5] ( identifier[open] ( identifier[f] , literal[string] ))
keyword[except] identifier[TypeError] :
identifier[lmd5] = identifier[get_md5] ( identifier[f] )
keyword[if] identifier[lmd5] keyword[in] identifier[md5s] :
keyword[continue]
keyword[else] :
identifier[total_files] += literal[int]
keyword[return] identifier[total_files] | def recursive_file_count(files, item=None, checksum=False):
"""Given a filepath or list of filepaths, return the total number of files."""
if not isinstance(files, (list, set)):
files = [files] # depends on [control=['if'], data=[]]
total_files = 0
if checksum is True:
md5s = [f.get('md5') for f in item.files] # depends on [control=['if'], data=[]]
else:
md5s = list()
if isinstance(files, dict):
# make sure to use local filenames.
_files = files.values() # depends on [control=['if'], data=[]]
elif isinstance(files[0], tuple):
_files = dict(files).values() # depends on [control=['if'], data=[]]
else:
_files = files
for f in _files:
try:
is_dir = os.path.isdir(f) # depends on [control=['try'], data=[]]
except TypeError:
try:
f = f[0]
is_dir = os.path.isdir(f) # depends on [control=['try'], data=[]]
except (AttributeError, TypeError):
is_dir = False # depends on [control=['except'], data=[]] # depends on [control=['except'], data=[]]
if is_dir:
for (x, _) in iter_directory(f):
lmd5 = get_md5(open(x, 'rb'))
if lmd5 in md5s:
continue # depends on [control=['if'], data=[]]
else:
total_files += 1 # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]]
else:
try:
lmd5 = get_md5(open(f, 'rb')) # depends on [control=['try'], data=[]]
except TypeError:
# Support file-like objects.
lmd5 = get_md5(f) # depends on [control=['except'], data=[]]
if lmd5 in md5s:
continue # depends on [control=['if'], data=[]]
else:
total_files += 1 # depends on [control=['for'], data=['f']]
return total_files |
def states(self):
"""Returns a set containing the enabled states."""
state_list = []
for state in States:
if state.value & self._states != 0:
state_list.append(state)
if (self._flashing_states & States.FILTER) != 0:
state_list.append(States.FILTER_LOW_SPEED)
return state_list | def function[states, parameter[self]]:
constant[Returns a set containing the enabled states.]
variable[state_list] assign[=] list[[]]
for taget[name[state]] in starred[name[States]] begin[:]
if compare[binary_operation[name[state].value <ast.BitAnd object at 0x7da2590d6b60> name[self]._states] not_equal[!=] constant[0]] begin[:]
call[name[state_list].append, parameter[name[state]]]
if compare[binary_operation[name[self]._flashing_states <ast.BitAnd object at 0x7da2590d6b60> name[States].FILTER] not_equal[!=] constant[0]] begin[:]
call[name[state_list].append, parameter[name[States].FILTER_LOW_SPEED]]
return[name[state_list]] | keyword[def] identifier[states] ( identifier[self] ):
literal[string]
identifier[state_list] =[]
keyword[for] identifier[state] keyword[in] identifier[States] :
keyword[if] identifier[state] . identifier[value] & identifier[self] . identifier[_states] != literal[int] :
identifier[state_list] . identifier[append] ( identifier[state] )
keyword[if] ( identifier[self] . identifier[_flashing_states] & identifier[States] . identifier[FILTER] )!= literal[int] :
identifier[state_list] . identifier[append] ( identifier[States] . identifier[FILTER_LOW_SPEED] )
keyword[return] identifier[state_list] | def states(self):
"""Returns a set containing the enabled states."""
state_list = []
for state in States:
if state.value & self._states != 0:
state_list.append(state) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['state']]
if self._flashing_states & States.FILTER != 0:
state_list.append(States.FILTER_LOW_SPEED) # depends on [control=['if'], data=[]]
return state_list |
def get_image(self, image_id, **kwargs):
"""Get details about an image.
:param int image: The ID of the image.
:param dict \\*\\*kwargs: response-level options (mask, limit, etc.)
"""
if 'mask' not in kwargs:
kwargs['mask'] = IMAGE_MASK
return self.vgbdtg.getObject(id=image_id, **kwargs) | def function[get_image, parameter[self, image_id]]:
constant[Get details about an image.
:param int image: The ID of the image.
:param dict \*\*kwargs: response-level options (mask, limit, etc.)
]
if compare[constant[mask] <ast.NotIn object at 0x7da2590d7190> name[kwargs]] begin[:]
call[name[kwargs]][constant[mask]] assign[=] name[IMAGE_MASK]
return[call[name[self].vgbdtg.getObject, parameter[]]] | keyword[def] identifier[get_image] ( identifier[self] , identifier[image_id] ,** identifier[kwargs] ):
literal[string]
keyword[if] literal[string] keyword[not] keyword[in] identifier[kwargs] :
identifier[kwargs] [ literal[string] ]= identifier[IMAGE_MASK]
keyword[return] identifier[self] . identifier[vgbdtg] . identifier[getObject] ( identifier[id] = identifier[image_id] ,** identifier[kwargs] ) | def get_image(self, image_id, **kwargs):
"""Get details about an image.
:param int image: The ID of the image.
:param dict \\*\\*kwargs: response-level options (mask, limit, etc.)
"""
if 'mask' not in kwargs:
kwargs['mask'] = IMAGE_MASK # depends on [control=['if'], data=['kwargs']]
return self.vgbdtg.getObject(id=image_id, **kwargs) |
def init_conv_weight(layer):
'''initilize conv layer weight.
'''
n_filters = layer.filters
filter_shape = (layer.kernel_size,) * get_n_dim(layer)
weight = np.zeros((n_filters, n_filters) + filter_shape)
center = tuple(map(lambda x: int((x - 1) / 2), filter_shape))
for i in range(n_filters):
filter_weight = np.zeros((n_filters,) + filter_shape)
index = (i,) + center
filter_weight[index] = 1
weight[i, ...] = filter_weight
bias = np.zeros(n_filters)
layer.set_weights(
(add_noise(weight, np.array([0, 1])), add_noise(bias, np.array([0, 1])))
) | def function[init_conv_weight, parameter[layer]]:
constant[initilize conv layer weight.
]
variable[n_filters] assign[=] name[layer].filters
variable[filter_shape] assign[=] binary_operation[tuple[[<ast.Attribute object at 0x7da2054a4be0>]] * call[name[get_n_dim], parameter[name[layer]]]]
variable[weight] assign[=] call[name[np].zeros, parameter[binary_operation[tuple[[<ast.Name object at 0x7da2054a4610>, <ast.Name object at 0x7da2054a6a70>]] + name[filter_shape]]]]
variable[center] assign[=] call[name[tuple], parameter[call[name[map], parameter[<ast.Lambda object at 0x7da2054a6ef0>, name[filter_shape]]]]]
for taget[name[i]] in starred[call[name[range], parameter[name[n_filters]]]] begin[:]
variable[filter_weight] assign[=] call[name[np].zeros, parameter[binary_operation[tuple[[<ast.Name object at 0x7da2054a59f0>]] + name[filter_shape]]]]
variable[index] assign[=] binary_operation[tuple[[<ast.Name object at 0x7da2054a45b0>]] + name[center]]
call[name[filter_weight]][name[index]] assign[=] constant[1]
call[name[weight]][tuple[[<ast.Name object at 0x7da2054a6260>, <ast.Constant object at 0x7da2054a7ee0>]]] assign[=] name[filter_weight]
variable[bias] assign[=] call[name[np].zeros, parameter[name[n_filters]]]
call[name[layer].set_weights, parameter[tuple[[<ast.Call object at 0x7da2054a5de0>, <ast.Call object at 0x7da2054a41c0>]]]] | keyword[def] identifier[init_conv_weight] ( identifier[layer] ):
literal[string]
identifier[n_filters] = identifier[layer] . identifier[filters]
identifier[filter_shape] =( identifier[layer] . identifier[kernel_size] ,)* identifier[get_n_dim] ( identifier[layer] )
identifier[weight] = identifier[np] . identifier[zeros] (( identifier[n_filters] , identifier[n_filters] )+ identifier[filter_shape] )
identifier[center] = identifier[tuple] ( identifier[map] ( keyword[lambda] identifier[x] : identifier[int] (( identifier[x] - literal[int] )/ literal[int] ), identifier[filter_shape] ))
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[n_filters] ):
identifier[filter_weight] = identifier[np] . identifier[zeros] (( identifier[n_filters] ,)+ identifier[filter_shape] )
identifier[index] =( identifier[i] ,)+ identifier[center]
identifier[filter_weight] [ identifier[index] ]= literal[int]
identifier[weight] [ identifier[i] ,...]= identifier[filter_weight]
identifier[bias] = identifier[np] . identifier[zeros] ( identifier[n_filters] )
identifier[layer] . identifier[set_weights] (
( identifier[add_noise] ( identifier[weight] , identifier[np] . identifier[array] ([ literal[int] , literal[int] ])), identifier[add_noise] ( identifier[bias] , identifier[np] . identifier[array] ([ literal[int] , literal[int] ])))
) | def init_conv_weight(layer):
"""initilize conv layer weight.
"""
n_filters = layer.filters
filter_shape = (layer.kernel_size,) * get_n_dim(layer)
weight = np.zeros((n_filters, n_filters) + filter_shape)
center = tuple(map(lambda x: int((x - 1) / 2), filter_shape))
for i in range(n_filters):
filter_weight = np.zeros((n_filters,) + filter_shape)
index = (i,) + center
filter_weight[index] = 1
weight[i, ...] = filter_weight # depends on [control=['for'], data=['i']]
bias = np.zeros(n_filters)
layer.set_weights((add_noise(weight, np.array([0, 1])), add_noise(bias, np.array([0, 1])))) |
def _shouldOwn(self, param):
"""
Validates that the input param belongs to this Params instance.
"""
if not (self.uid == param.parent and self.hasParam(param.name)):
raise ValueError("Param %r does not belong to %r." % (param, self)) | def function[_shouldOwn, parameter[self, param]]:
constant[
Validates that the input param belongs to this Params instance.
]
if <ast.UnaryOp object at 0x7da20e955ed0> begin[:]
<ast.Raise object at 0x7da18f812650> | keyword[def] identifier[_shouldOwn] ( identifier[self] , identifier[param] ):
literal[string]
keyword[if] keyword[not] ( identifier[self] . identifier[uid] == identifier[param] . identifier[parent] keyword[and] identifier[self] . identifier[hasParam] ( identifier[param] . identifier[name] )):
keyword[raise] identifier[ValueError] ( literal[string] %( identifier[param] , identifier[self] )) | def _shouldOwn(self, param):
"""
Validates that the input param belongs to this Params instance.
"""
if not (self.uid == param.parent and self.hasParam(param.name)):
raise ValueError('Param %r does not belong to %r.' % (param, self)) # depends on [control=['if'], data=[]] |
def update_account(self, account_id, body, **kwargs): # noqa: E501
"""Update attributes of an existing account. # noqa: E501
An endpoint for updating an account. **Example usage:** `curl -X PUT https://api.us-east-1.mbedcloud.com/v3/accounts/{account-id} -d '{\"phone_number\": \"12345678\"}' -H 'content-type: application/json' -H 'Authorization: Bearer API_KEY'` # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass asynchronous=True
>>> thread = api.update_account(account_id, body, asynchronous=True)
>>> result = thread.get()
:param asynchronous bool
:param str account_id: The ID of the account to be updated. (required)
:param AccountUpdateRootReq body: Details of the account to be updated. (required)
:return: AccountInfo
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('asynchronous'):
return self.update_account_with_http_info(account_id, body, **kwargs) # noqa: E501
else:
(data) = self.update_account_with_http_info(account_id, body, **kwargs) # noqa: E501
return data | def function[update_account, parameter[self, account_id, body]]:
constant[Update attributes of an existing account. # noqa: E501
An endpoint for updating an account. **Example usage:** `curl -X PUT https://api.us-east-1.mbedcloud.com/v3/accounts/{account-id} -d '{"phone_number": "12345678"}' -H 'content-type: application/json' -H 'Authorization: Bearer API_KEY'` # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass asynchronous=True
>>> thread = api.update_account(account_id, body, asynchronous=True)
>>> result = thread.get()
:param asynchronous bool
:param str account_id: The ID of the account to be updated. (required)
:param AccountUpdateRootReq body: Details of the account to be updated. (required)
:return: AccountInfo
If the method is called asynchronously,
returns the request thread.
]
call[name[kwargs]][constant[_return_http_data_only]] assign[=] constant[True]
if call[name[kwargs].get, parameter[constant[asynchronous]]] begin[:]
return[call[name[self].update_account_with_http_info, parameter[name[account_id], name[body]]]] | keyword[def] identifier[update_account] ( identifier[self] , identifier[account_id] , identifier[body] ,** identifier[kwargs] ):
literal[string]
identifier[kwargs] [ literal[string] ]= keyword[True]
keyword[if] identifier[kwargs] . identifier[get] ( literal[string] ):
keyword[return] identifier[self] . identifier[update_account_with_http_info] ( identifier[account_id] , identifier[body] ,** identifier[kwargs] )
keyword[else] :
( identifier[data] )= identifier[self] . identifier[update_account_with_http_info] ( identifier[account_id] , identifier[body] ,** identifier[kwargs] )
keyword[return] identifier[data] | def update_account(self, account_id, body, **kwargs): # noqa: E501
'Update attributes of an existing account. # noqa: E501\n\n An endpoint for updating an account. **Example usage:** `curl -X PUT https://api.us-east-1.mbedcloud.com/v3/accounts/{account-id} -d \'{"phone_number": "12345678"}\' -H \'content-type: application/json\' -H \'Authorization: Bearer API_KEY\'` # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass asynchronous=True\n >>> thread = api.update_account(account_id, body, asynchronous=True)\n >>> result = thread.get()\n\n :param asynchronous bool\n :param str account_id: The ID of the account to be updated. (required)\n :param AccountUpdateRootReq body: Details of the account to be updated. (required)\n :return: AccountInfo\n If the method is called asynchronously,\n returns the request thread.\n '
kwargs['_return_http_data_only'] = True
if kwargs.get('asynchronous'):
return self.update_account_with_http_info(account_id, body, **kwargs) # noqa: E501 # depends on [control=['if'], data=[]]
else:
data = self.update_account_with_http_info(account_id, body, **kwargs) # noqa: E501
return data |
def modulation_type(self, value: int):
"""
0 - "ASK", 1 - "FSK", 2 - "PSK", 3 - "APSK (QAM)"
:param value:
:return:
"""
if self.__modulation_type != value:
self.__modulation_type = value
self._qad = None
self.modulation_type_changed.emit(self.__modulation_type)
if not self.block_protocol_update:
self.protocol_needs_update.emit() | def function[modulation_type, parameter[self, value]]:
constant[
0 - "ASK", 1 - "FSK", 2 - "PSK", 3 - "APSK (QAM)"
:param value:
:return:
]
if compare[name[self].__modulation_type not_equal[!=] name[value]] begin[:]
name[self].__modulation_type assign[=] name[value]
name[self]._qad assign[=] constant[None]
call[name[self].modulation_type_changed.emit, parameter[name[self].__modulation_type]]
if <ast.UnaryOp object at 0x7da1b1c3d240> begin[:]
call[name[self].protocol_needs_update.emit, parameter[]] | keyword[def] identifier[modulation_type] ( identifier[self] , identifier[value] : identifier[int] ):
literal[string]
keyword[if] identifier[self] . identifier[__modulation_type] != identifier[value] :
identifier[self] . identifier[__modulation_type] = identifier[value]
identifier[self] . identifier[_qad] = keyword[None]
identifier[self] . identifier[modulation_type_changed] . identifier[emit] ( identifier[self] . identifier[__modulation_type] )
keyword[if] keyword[not] identifier[self] . identifier[block_protocol_update] :
identifier[self] . identifier[protocol_needs_update] . identifier[emit] () | def modulation_type(self, value: int):
"""
0 - "ASK", 1 - "FSK", 2 - "PSK", 3 - "APSK (QAM)"
:param value:
:return:
"""
if self.__modulation_type != value:
self.__modulation_type = value
self._qad = None
self.modulation_type_changed.emit(self.__modulation_type)
if not self.block_protocol_update:
self.protocol_needs_update.emit() # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['value']] |
def get_peb(self):
"""
Returns a copy of the PEB.
To dereference pointers in it call L{Process.read_structure}.
@rtype: L{win32.PEB}
@return: PEB structure.
@raise WindowsError: An exception is raised on error.
"""
self.get_handle( win32.PROCESS_VM_READ |
win32.PROCESS_QUERY_INFORMATION )
return self.read_structure(self.get_peb_address(), win32.PEB) | def function[get_peb, parameter[self]]:
constant[
Returns a copy of the PEB.
To dereference pointers in it call L{Process.read_structure}.
@rtype: L{win32.PEB}
@return: PEB structure.
@raise WindowsError: An exception is raised on error.
]
call[name[self].get_handle, parameter[binary_operation[name[win32].PROCESS_VM_READ <ast.BitOr object at 0x7da2590d6aa0> name[win32].PROCESS_QUERY_INFORMATION]]]
return[call[name[self].read_structure, parameter[call[name[self].get_peb_address, parameter[]], name[win32].PEB]]] | keyword[def] identifier[get_peb] ( identifier[self] ):
literal[string]
identifier[self] . identifier[get_handle] ( identifier[win32] . identifier[PROCESS_VM_READ] |
identifier[win32] . identifier[PROCESS_QUERY_INFORMATION] )
keyword[return] identifier[self] . identifier[read_structure] ( identifier[self] . identifier[get_peb_address] (), identifier[win32] . identifier[PEB] ) | def get_peb(self):
"""
Returns a copy of the PEB.
To dereference pointers in it call L{Process.read_structure}.
@rtype: L{win32.PEB}
@return: PEB structure.
@raise WindowsError: An exception is raised on error.
"""
self.get_handle(win32.PROCESS_VM_READ | win32.PROCESS_QUERY_INFORMATION)
return self.read_structure(self.get_peb_address(), win32.PEB) |
def setWidth(self, typeID, width):
"""setWidth(string, double) -> None
Sets the width in m of vehicles of this type.
"""
self._connection._sendDoubleCmd(
tc.CMD_SET_VEHICLETYPE_VARIABLE, tc.VAR_WIDTH, typeID, width) | def function[setWidth, parameter[self, typeID, width]]:
constant[setWidth(string, double) -> None
Sets the width in m of vehicles of this type.
]
call[name[self]._connection._sendDoubleCmd, parameter[name[tc].CMD_SET_VEHICLETYPE_VARIABLE, name[tc].VAR_WIDTH, name[typeID], name[width]]] | keyword[def] identifier[setWidth] ( identifier[self] , identifier[typeID] , identifier[width] ):
literal[string]
identifier[self] . identifier[_connection] . identifier[_sendDoubleCmd] (
identifier[tc] . identifier[CMD_SET_VEHICLETYPE_VARIABLE] , identifier[tc] . identifier[VAR_WIDTH] , identifier[typeID] , identifier[width] ) | def setWidth(self, typeID, width):
"""setWidth(string, double) -> None
Sets the width in m of vehicles of this type.
"""
self._connection._sendDoubleCmd(tc.CMD_SET_VEHICLETYPE_VARIABLE, tc.VAR_WIDTH, typeID, width) |
def is_running(self, submissionid, user_check=True):
""" Tells if a submission is running/in queue """
submission = self.get_submission(submissionid, user_check)
return submission["status"] == "waiting" | def function[is_running, parameter[self, submissionid, user_check]]:
constant[ Tells if a submission is running/in queue ]
variable[submission] assign[=] call[name[self].get_submission, parameter[name[submissionid], name[user_check]]]
return[compare[call[name[submission]][constant[status]] equal[==] constant[waiting]]] | keyword[def] identifier[is_running] ( identifier[self] , identifier[submissionid] , identifier[user_check] = keyword[True] ):
literal[string]
identifier[submission] = identifier[self] . identifier[get_submission] ( identifier[submissionid] , identifier[user_check] )
keyword[return] identifier[submission] [ literal[string] ]== literal[string] | def is_running(self, submissionid, user_check=True):
""" Tells if a submission is running/in queue """
submission = self.get_submission(submissionid, user_check)
return submission['status'] == 'waiting' |
def _Vapor_Density(cls, T):
"""Auxiliary equation for the density of saturated vapor
Parameters
----------
T : float
Temperature, [K]
Returns
-------
rho : float
Saturated vapor density, [kg/m³]
References
----------
IAPWS, Revised Supplementary Release on Saturation Properties of
Ordinary Water Substance September 1992,
http://www.iapws.org/relguide/Supp-sat.html, Eq.3
"""
eq = cls._rhoG["eq"]
Tita = 1-T/cls.Tc
if eq == 4:
Tita = Tita**(1./3)
suma = 0
for n, x in zip(cls._rhoG["ao"], cls._rhoG["exp"]):
suma += n*Tita**x
Pr = exp(suma)
rho = Pr*cls.rhoc
return rho | def function[_Vapor_Density, parameter[cls, T]]:
constant[Auxiliary equation for the density of saturated vapor
Parameters
----------
T : float
Temperature, [K]
Returns
-------
rho : float
Saturated vapor density, [kg/m³]
References
----------
IAPWS, Revised Supplementary Release on Saturation Properties of
Ordinary Water Substance September 1992,
http://www.iapws.org/relguide/Supp-sat.html, Eq.3
]
variable[eq] assign[=] call[name[cls]._rhoG][constant[eq]]
variable[Tita] assign[=] binary_operation[constant[1] - binary_operation[name[T] / name[cls].Tc]]
if compare[name[eq] equal[==] constant[4]] begin[:]
variable[Tita] assign[=] binary_operation[name[Tita] ** binary_operation[constant[1.0] / constant[3]]]
variable[suma] assign[=] constant[0]
for taget[tuple[[<ast.Name object at 0x7da2044c2a70>, <ast.Name object at 0x7da2044c3820>]]] in starred[call[name[zip], parameter[call[name[cls]._rhoG][constant[ao]], call[name[cls]._rhoG][constant[exp]]]]] begin[:]
<ast.AugAssign object at 0x7da2044c3580>
variable[Pr] assign[=] call[name[exp], parameter[name[suma]]]
variable[rho] assign[=] binary_operation[name[Pr] * name[cls].rhoc]
return[name[rho]] | keyword[def] identifier[_Vapor_Density] ( identifier[cls] , identifier[T] ):
literal[string]
identifier[eq] = identifier[cls] . identifier[_rhoG] [ literal[string] ]
identifier[Tita] = literal[int] - identifier[T] / identifier[cls] . identifier[Tc]
keyword[if] identifier[eq] == literal[int] :
identifier[Tita] = identifier[Tita] **( literal[int] / literal[int] )
identifier[suma] = literal[int]
keyword[for] identifier[n] , identifier[x] keyword[in] identifier[zip] ( identifier[cls] . identifier[_rhoG] [ literal[string] ], identifier[cls] . identifier[_rhoG] [ literal[string] ]):
identifier[suma] += identifier[n] * identifier[Tita] ** identifier[x]
identifier[Pr] = identifier[exp] ( identifier[suma] )
identifier[rho] = identifier[Pr] * identifier[cls] . identifier[rhoc]
keyword[return] identifier[rho] | def _Vapor_Density(cls, T):
"""Auxiliary equation for the density of saturated vapor
Parameters
----------
T : float
Temperature, [K]
Returns
-------
rho : float
Saturated vapor density, [kg/m³]
References
----------
IAPWS, Revised Supplementary Release on Saturation Properties of
Ordinary Water Substance September 1992,
http://www.iapws.org/relguide/Supp-sat.html, Eq.3
"""
eq = cls._rhoG['eq']
Tita = 1 - T / cls.Tc
if eq == 4:
Tita = Tita ** (1.0 / 3) # depends on [control=['if'], data=[]]
suma = 0
for (n, x) in zip(cls._rhoG['ao'], cls._rhoG['exp']):
suma += n * Tita ** x # depends on [control=['for'], data=[]]
Pr = exp(suma)
rho = Pr * cls.rhoc
return rho |
def _set_image(self, image, nodata=None):
"""
Set self._image.
:param image: supported: np.ma.array, np.array, TODO: PIL image
:param nodata: if provided image is array (not masked array), treat pixels with value=nodata as nodata
:return:
"""
# convert to masked array:
if isinstance(image, np.ma.core.MaskedArray):
masked = image
elif isinstance(image, np.core.ndarray):
masked = self._build_masked_array(image, nodata)
else:
raise GeoRaster2NotImplementedError('only ndarray or masked array supported, got %s' % type(image))
# make sure array is 3d:
if len(masked.shape) == 3:
self._image = masked
elif len(masked.shape) == 2:
self._image = masked[np.newaxis, :, :]
else:
raise GeoRaster2Error('expected 2d or 3d image, got shape=%s' % masked.shape)
# update shape
if self._shape is None:
self._set_shape(self._image.shape)
self._image_after_load_validations()
if self._image_readonly:
self._image.setflags(write=0) | def function[_set_image, parameter[self, image, nodata]]:
constant[
Set self._image.
:param image: supported: np.ma.array, np.array, TODO: PIL image
:param nodata: if provided image is array (not masked array), treat pixels with value=nodata as nodata
:return:
]
if call[name[isinstance], parameter[name[image], name[np].ma.core.MaskedArray]] begin[:]
variable[masked] assign[=] name[image]
if compare[call[name[len], parameter[name[masked].shape]] equal[==] constant[3]] begin[:]
name[self]._image assign[=] name[masked]
if compare[name[self]._shape is constant[None]] begin[:]
call[name[self]._set_shape, parameter[name[self]._image.shape]]
call[name[self]._image_after_load_validations, parameter[]]
if name[self]._image_readonly begin[:]
call[name[self]._image.setflags, parameter[]] | keyword[def] identifier[_set_image] ( identifier[self] , identifier[image] , identifier[nodata] = keyword[None] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[image] , identifier[np] . identifier[ma] . identifier[core] . identifier[MaskedArray] ):
identifier[masked] = identifier[image]
keyword[elif] identifier[isinstance] ( identifier[image] , identifier[np] . identifier[core] . identifier[ndarray] ):
identifier[masked] = identifier[self] . identifier[_build_masked_array] ( identifier[image] , identifier[nodata] )
keyword[else] :
keyword[raise] identifier[GeoRaster2NotImplementedError] ( literal[string] % identifier[type] ( identifier[image] ))
keyword[if] identifier[len] ( identifier[masked] . identifier[shape] )== literal[int] :
identifier[self] . identifier[_image] = identifier[masked]
keyword[elif] identifier[len] ( identifier[masked] . identifier[shape] )== literal[int] :
identifier[self] . identifier[_image] = identifier[masked] [ identifier[np] . identifier[newaxis] ,:,:]
keyword[else] :
keyword[raise] identifier[GeoRaster2Error] ( literal[string] % identifier[masked] . identifier[shape] )
keyword[if] identifier[self] . identifier[_shape] keyword[is] keyword[None] :
identifier[self] . identifier[_set_shape] ( identifier[self] . identifier[_image] . identifier[shape] )
identifier[self] . identifier[_image_after_load_validations] ()
keyword[if] identifier[self] . identifier[_image_readonly] :
identifier[self] . identifier[_image] . identifier[setflags] ( identifier[write] = literal[int] ) | def _set_image(self, image, nodata=None):
"""
Set self._image.
:param image: supported: np.ma.array, np.array, TODO: PIL image
:param nodata: if provided image is array (not masked array), treat pixels with value=nodata as nodata
:return:
"""
# convert to masked array:
if isinstance(image, np.ma.core.MaskedArray):
masked = image # depends on [control=['if'], data=[]]
elif isinstance(image, np.core.ndarray):
masked = self._build_masked_array(image, nodata) # depends on [control=['if'], data=[]]
else:
raise GeoRaster2NotImplementedError('only ndarray or masked array supported, got %s' % type(image))
# make sure array is 3d:
if len(masked.shape) == 3:
self._image = masked # depends on [control=['if'], data=[]]
elif len(masked.shape) == 2:
self._image = masked[np.newaxis, :, :] # depends on [control=['if'], data=[]]
else:
raise GeoRaster2Error('expected 2d or 3d image, got shape=%s' % masked.shape)
# update shape
if self._shape is None:
self._set_shape(self._image.shape) # depends on [control=['if'], data=[]]
self._image_after_load_validations()
if self._image_readonly:
self._image.setflags(write=0) # depends on [control=['if'], data=[]] |
def _pfp__add_child(self, name, child, stream=None):
"""Add a child to the Union field
:name: The name of the child
:child: A :class:`.Field` instance
:returns: The resulting field
"""
res = super(Union, self)._pfp__add_child(name, child)
self._pfp__buff.seek(0, 0)
child._pfp__build(stream=self._pfp__buff)
size = len(self._pfp__buff.getvalue())
self._pfp__buff.seek(0, 0)
if stream is not None:
curr_pos = stream.tell()
stream.seek(curr_pos-size, 0)
return res | def function[_pfp__add_child, parameter[self, name, child, stream]]:
constant[Add a child to the Union field
:name: The name of the child
:child: A :class:`.Field` instance
:returns: The resulting field
]
variable[res] assign[=] call[call[name[super], parameter[name[Union], name[self]]]._pfp__add_child, parameter[name[name], name[child]]]
call[name[self]._pfp__buff.seek, parameter[constant[0], constant[0]]]
call[name[child]._pfp__build, parameter[]]
variable[size] assign[=] call[name[len], parameter[call[name[self]._pfp__buff.getvalue, parameter[]]]]
call[name[self]._pfp__buff.seek, parameter[constant[0], constant[0]]]
if compare[name[stream] is_not constant[None]] begin[:]
variable[curr_pos] assign[=] call[name[stream].tell, parameter[]]
call[name[stream].seek, parameter[binary_operation[name[curr_pos] - name[size]], constant[0]]]
return[name[res]] | keyword[def] identifier[_pfp__add_child] ( identifier[self] , identifier[name] , identifier[child] , identifier[stream] = keyword[None] ):
literal[string]
identifier[res] = identifier[super] ( identifier[Union] , identifier[self] ). identifier[_pfp__add_child] ( identifier[name] , identifier[child] )
identifier[self] . identifier[_pfp__buff] . identifier[seek] ( literal[int] , literal[int] )
identifier[child] . identifier[_pfp__build] ( identifier[stream] = identifier[self] . identifier[_pfp__buff] )
identifier[size] = identifier[len] ( identifier[self] . identifier[_pfp__buff] . identifier[getvalue] ())
identifier[self] . identifier[_pfp__buff] . identifier[seek] ( literal[int] , literal[int] )
keyword[if] identifier[stream] keyword[is] keyword[not] keyword[None] :
identifier[curr_pos] = identifier[stream] . identifier[tell] ()
identifier[stream] . identifier[seek] ( identifier[curr_pos] - identifier[size] , literal[int] )
keyword[return] identifier[res] | def _pfp__add_child(self, name, child, stream=None):
"""Add a child to the Union field
:name: The name of the child
:child: A :class:`.Field` instance
:returns: The resulting field
"""
res = super(Union, self)._pfp__add_child(name, child)
self._pfp__buff.seek(0, 0)
child._pfp__build(stream=self._pfp__buff)
size = len(self._pfp__buff.getvalue())
self._pfp__buff.seek(0, 0)
if stream is not None:
curr_pos = stream.tell()
stream.seek(curr_pos - size, 0) # depends on [control=['if'], data=['stream']]
return res |
def _prepare_summary_table(rows):
"""Create a new table that is a summary of the input rows.
All with the same (job-name or job-id, status) go together.
Args:
rows: the input rows, a list of dictionaries.
Returns:
A new row set of summary information.
"""
if not rows:
return []
# We either group on the job-name (if present) or fall back to the job-id
key_field = 'job-name'
if key_field not in rows[0]:
key_field = 'job-id'
# Group each of the rows based on (job-name or job-id, status)
grouped = collections.defaultdict(lambda: collections.defaultdict(lambda: []))
for row in rows:
grouped[row.get(key_field, '')][row.get('status', '')] += [row]
# Now that we have the rows grouped, create a summary table.
# Use the original table as the driver in order to preserve the order.
new_rows = []
for job_key in sorted(grouped.keys()):
group = grouped.get(job_key, None)
canonical_status = ['RUNNING', 'SUCCESS', 'FAILURE', 'CANCEL']
# Written this way to ensure that if somehow a new status is introduced,
# it shows up in our output.
for status in canonical_status + sorted(group.keys()):
if status not in group:
continue
task_count = len(group[status])
del group[status]
if task_count:
summary_row = collections.OrderedDict()
summary_row[key_field] = job_key
summary_row['status'] = status
summary_row['task-count'] = task_count
new_rows.append(summary_row)
return new_rows | def function[_prepare_summary_table, parameter[rows]]:
constant[Create a new table that is a summary of the input rows.
All with the same (job-name or job-id, status) go together.
Args:
rows: the input rows, a list of dictionaries.
Returns:
A new row set of summary information.
]
if <ast.UnaryOp object at 0x7da1b00528f0> begin[:]
return[list[[]]]
variable[key_field] assign[=] constant[job-name]
if compare[name[key_field] <ast.NotIn object at 0x7da2590d7190> call[name[rows]][constant[0]]] begin[:]
variable[key_field] assign[=] constant[job-id]
variable[grouped] assign[=] call[name[collections].defaultdict, parameter[<ast.Lambda object at 0x7da1b008abc0>]]
for taget[name[row]] in starred[name[rows]] begin[:]
<ast.AugAssign object at 0x7da1b0088280>
variable[new_rows] assign[=] list[[]]
for taget[name[job_key]] in starred[call[name[sorted], parameter[call[name[grouped].keys, parameter[]]]]] begin[:]
variable[group] assign[=] call[name[grouped].get, parameter[name[job_key], constant[None]]]
variable[canonical_status] assign[=] list[[<ast.Constant object at 0x7da1b0088220>, <ast.Constant object at 0x7da1b0088490>, <ast.Constant object at 0x7da1b0088250>, <ast.Constant object at 0x7da1b008bee0>]]
for taget[name[status]] in starred[binary_operation[name[canonical_status] + call[name[sorted], parameter[call[name[group].keys, parameter[]]]]]] begin[:]
if compare[name[status] <ast.NotIn object at 0x7da2590d7190> name[group]] begin[:]
continue
variable[task_count] assign[=] call[name[len], parameter[call[name[group]][name[status]]]]
<ast.Delete object at 0x7da1b008bf40>
if name[task_count] begin[:]
variable[summary_row] assign[=] call[name[collections].OrderedDict, parameter[]]
call[name[summary_row]][name[key_field]] assign[=] name[job_key]
call[name[summary_row]][constant[status]] assign[=] name[status]
call[name[summary_row]][constant[task-count]] assign[=] name[task_count]
call[name[new_rows].append, parameter[name[summary_row]]]
return[name[new_rows]] | keyword[def] identifier[_prepare_summary_table] ( identifier[rows] ):
literal[string]
keyword[if] keyword[not] identifier[rows] :
keyword[return] []
identifier[key_field] = literal[string]
keyword[if] identifier[key_field] keyword[not] keyword[in] identifier[rows] [ literal[int] ]:
identifier[key_field] = literal[string]
identifier[grouped] = identifier[collections] . identifier[defaultdict] ( keyword[lambda] : identifier[collections] . identifier[defaultdict] ( keyword[lambda] :[]))
keyword[for] identifier[row] keyword[in] identifier[rows] :
identifier[grouped] [ identifier[row] . identifier[get] ( identifier[key_field] , literal[string] )][ identifier[row] . identifier[get] ( literal[string] , literal[string] )]+=[ identifier[row] ]
identifier[new_rows] =[]
keyword[for] identifier[job_key] keyword[in] identifier[sorted] ( identifier[grouped] . identifier[keys] ()):
identifier[group] = identifier[grouped] . identifier[get] ( identifier[job_key] , keyword[None] )
identifier[canonical_status] =[ literal[string] , literal[string] , literal[string] , literal[string] ]
keyword[for] identifier[status] keyword[in] identifier[canonical_status] + identifier[sorted] ( identifier[group] . identifier[keys] ()):
keyword[if] identifier[status] keyword[not] keyword[in] identifier[group] :
keyword[continue]
identifier[task_count] = identifier[len] ( identifier[group] [ identifier[status] ])
keyword[del] identifier[group] [ identifier[status] ]
keyword[if] identifier[task_count] :
identifier[summary_row] = identifier[collections] . identifier[OrderedDict] ()
identifier[summary_row] [ identifier[key_field] ]= identifier[job_key]
identifier[summary_row] [ literal[string] ]= identifier[status]
identifier[summary_row] [ literal[string] ]= identifier[task_count]
identifier[new_rows] . identifier[append] ( identifier[summary_row] )
keyword[return] identifier[new_rows] | def _prepare_summary_table(rows):
"""Create a new table that is a summary of the input rows.
All with the same (job-name or job-id, status) go together.
Args:
rows: the input rows, a list of dictionaries.
Returns:
A new row set of summary information.
"""
if not rows:
return [] # depends on [control=['if'], data=[]]
# We either group on the job-name (if present) or fall back to the job-id
key_field = 'job-name'
if key_field not in rows[0]:
key_field = 'job-id' # depends on [control=['if'], data=['key_field']]
# Group each of the rows based on (job-name or job-id, status)
grouped = collections.defaultdict(lambda : collections.defaultdict(lambda : []))
for row in rows:
grouped[row.get(key_field, '')][row.get('status', '')] += [row] # depends on [control=['for'], data=['row']]
# Now that we have the rows grouped, create a summary table.
# Use the original table as the driver in order to preserve the order.
new_rows = []
for job_key in sorted(grouped.keys()):
group = grouped.get(job_key, None)
canonical_status = ['RUNNING', 'SUCCESS', 'FAILURE', 'CANCEL']
# Written this way to ensure that if somehow a new status is introduced,
# it shows up in our output.
for status in canonical_status + sorted(group.keys()):
if status not in group:
continue # depends on [control=['if'], data=[]]
task_count = len(group[status])
del group[status]
if task_count:
summary_row = collections.OrderedDict()
summary_row[key_field] = job_key
summary_row['status'] = status
summary_row['task-count'] = task_count
new_rows.append(summary_row) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['status']] # depends on [control=['for'], data=['job_key']]
return new_rows |
def safe_size_check(checked_path, error_detail, max_bytes=500000000):
"""Determines if a particular path is larger than expected. Useful before any recursive remove."""
actual_size = 0
for dirpath, dirnames, filenames in os.walk(checked_path):
for f in filenames:
fp = os.path.join(dirpath, f)
actual_size += os.path.getsize(fp)
assert actual_size <= max_bytes, "Path {} size of {} >= {} bytes. {}".format(
checked_path, actual_size, max_bytes, error_detail) | def function[safe_size_check, parameter[checked_path, error_detail, max_bytes]]:
constant[Determines if a particular path is larger than expected. Useful before any recursive remove.]
variable[actual_size] assign[=] constant[0]
for taget[tuple[[<ast.Name object at 0x7da1b0a4f430>, <ast.Name object at 0x7da1b0a4c610>, <ast.Name object at 0x7da1b0a4e0b0>]]] in starred[call[name[os].walk, parameter[name[checked_path]]]] begin[:]
for taget[name[f]] in starred[name[filenames]] begin[:]
variable[fp] assign[=] call[name[os].path.join, parameter[name[dirpath], name[f]]]
<ast.AugAssign object at 0x7da1b0a4d3f0>
assert[compare[name[actual_size] less_or_equal[<=] name[max_bytes]]] | keyword[def] identifier[safe_size_check] ( identifier[checked_path] , identifier[error_detail] , identifier[max_bytes] = literal[int] ):
literal[string]
identifier[actual_size] = literal[int]
keyword[for] identifier[dirpath] , identifier[dirnames] , identifier[filenames] keyword[in] identifier[os] . identifier[walk] ( identifier[checked_path] ):
keyword[for] identifier[f] keyword[in] identifier[filenames] :
identifier[fp] = identifier[os] . identifier[path] . identifier[join] ( identifier[dirpath] , identifier[f] )
identifier[actual_size] += identifier[os] . identifier[path] . identifier[getsize] ( identifier[fp] )
keyword[assert] identifier[actual_size] <= identifier[max_bytes] , literal[string] . identifier[format] (
identifier[checked_path] , identifier[actual_size] , identifier[max_bytes] , identifier[error_detail] ) | def safe_size_check(checked_path, error_detail, max_bytes=500000000):
"""Determines if a particular path is larger than expected. Useful before any recursive remove."""
actual_size = 0
for (dirpath, dirnames, filenames) in os.walk(checked_path):
for f in filenames:
fp = os.path.join(dirpath, f)
actual_size += os.path.getsize(fp) # depends on [control=['for'], data=['f']] # depends on [control=['for'], data=[]]
assert actual_size <= max_bytes, 'Path {} size of {} >= {} bytes. {}'.format(checked_path, actual_size, max_bytes, error_detail) |
def get_mors_with_properties(service_instance, object_type, property_list=None,
container_ref=None, traversal_spec=None,
local_properties=False):
'''
Returns a list containing properties and managed object references for the managed object.
service_instance
The Service Instance from which to obtain managed object references.
object_type
The type of content for which to obtain managed object references.
property_list
An optional list of object properties used to return even more filtered managed object reference results.
container_ref
An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter,
ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory
rootFolder.
traversal_spec
An optional TraversalSpec to be used instead of the standard
``Traverse All`` spec
local_properties
Flag specigying whether the properties to be retrieved are local to the
container. If that is the case, the traversal spec needs to be None.
'''
# Get all the content
content_args = [service_instance, object_type]
content_kwargs = {'property_list': property_list,
'container_ref': container_ref,
'traversal_spec': traversal_spec,
'local_properties': local_properties}
try:
content = get_content(*content_args, **content_kwargs)
except BadStatusLine:
content = get_content(*content_args, **content_kwargs)
except IOError as exc:
if exc.errno != errno.EPIPE:
raise exc
content = get_content(*content_args, **content_kwargs)
object_list = []
for obj in content:
properties = {}
for prop in obj.propSet:
properties[prop.name] = prop.val
properties['object'] = obj.obj
object_list.append(properties)
log.trace('Retrieved %s objects', len(object_list))
return object_list | def function[get_mors_with_properties, parameter[service_instance, object_type, property_list, container_ref, traversal_spec, local_properties]]:
constant[
Returns a list containing properties and managed object references for the managed object.
service_instance
The Service Instance from which to obtain managed object references.
object_type
The type of content for which to obtain managed object references.
property_list
An optional list of object properties used to return even more filtered managed object reference results.
container_ref
An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter,
ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory
rootFolder.
traversal_spec
An optional TraversalSpec to be used instead of the standard
``Traverse All`` spec
local_properties
Flag specigying whether the properties to be retrieved are local to the
container. If that is the case, the traversal spec needs to be None.
]
variable[content_args] assign[=] list[[<ast.Name object at 0x7da1b1f66020>, <ast.Name object at 0x7da1b1f67190>]]
variable[content_kwargs] assign[=] dictionary[[<ast.Constant object at 0x7da1b1f67ac0>, <ast.Constant object at 0x7da1b1f65450>, <ast.Constant object at 0x7da1b1f65b10>, <ast.Constant object at 0x7da1b1f67cd0>], [<ast.Name object at 0x7da1b1f64c70>, <ast.Name object at 0x7da1b1f65660>, <ast.Name object at 0x7da1b1f65ae0>, <ast.Name object at 0x7da1b1f67b50>]]
<ast.Try object at 0x7da1b1f66a40>
variable[object_list] assign[=] list[[]]
for taget[name[obj]] in starred[name[content]] begin[:]
variable[properties] assign[=] dictionary[[], []]
for taget[name[prop]] in starred[name[obj].propSet] begin[:]
call[name[properties]][name[prop].name] assign[=] name[prop].val
call[name[properties]][constant[object]] assign[=] name[obj].obj
call[name[object_list].append, parameter[name[properties]]]
call[name[log].trace, parameter[constant[Retrieved %s objects], call[name[len], parameter[name[object_list]]]]]
return[name[object_list]] | keyword[def] identifier[get_mors_with_properties] ( identifier[service_instance] , identifier[object_type] , identifier[property_list] = keyword[None] ,
identifier[container_ref] = keyword[None] , identifier[traversal_spec] = keyword[None] ,
identifier[local_properties] = keyword[False] ):
literal[string]
identifier[content_args] =[ identifier[service_instance] , identifier[object_type] ]
identifier[content_kwargs] ={ literal[string] : identifier[property_list] ,
literal[string] : identifier[container_ref] ,
literal[string] : identifier[traversal_spec] ,
literal[string] : identifier[local_properties] }
keyword[try] :
identifier[content] = identifier[get_content] (* identifier[content_args] ,** identifier[content_kwargs] )
keyword[except] identifier[BadStatusLine] :
identifier[content] = identifier[get_content] (* identifier[content_args] ,** identifier[content_kwargs] )
keyword[except] identifier[IOError] keyword[as] identifier[exc] :
keyword[if] identifier[exc] . identifier[errno] != identifier[errno] . identifier[EPIPE] :
keyword[raise] identifier[exc]
identifier[content] = identifier[get_content] (* identifier[content_args] ,** identifier[content_kwargs] )
identifier[object_list] =[]
keyword[for] identifier[obj] keyword[in] identifier[content] :
identifier[properties] ={}
keyword[for] identifier[prop] keyword[in] identifier[obj] . identifier[propSet] :
identifier[properties] [ identifier[prop] . identifier[name] ]= identifier[prop] . identifier[val]
identifier[properties] [ literal[string] ]= identifier[obj] . identifier[obj]
identifier[object_list] . identifier[append] ( identifier[properties] )
identifier[log] . identifier[trace] ( literal[string] , identifier[len] ( identifier[object_list] ))
keyword[return] identifier[object_list] | def get_mors_with_properties(service_instance, object_type, property_list=None, container_ref=None, traversal_spec=None, local_properties=False):
"""
Returns a list containing properties and managed object references for the managed object.
service_instance
The Service Instance from which to obtain managed object references.
object_type
The type of content for which to obtain managed object references.
property_list
An optional list of object properties used to return even more filtered managed object reference results.
container_ref
An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter,
ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory
rootFolder.
traversal_spec
An optional TraversalSpec to be used instead of the standard
``Traverse All`` spec
local_properties
Flag specigying whether the properties to be retrieved are local to the
container. If that is the case, the traversal spec needs to be None.
"""
# Get all the content
content_args = [service_instance, object_type]
content_kwargs = {'property_list': property_list, 'container_ref': container_ref, 'traversal_spec': traversal_spec, 'local_properties': local_properties}
try:
content = get_content(*content_args, **content_kwargs) # depends on [control=['try'], data=[]]
except BadStatusLine:
content = get_content(*content_args, **content_kwargs) # depends on [control=['except'], data=[]]
except IOError as exc:
if exc.errno != errno.EPIPE:
raise exc # depends on [control=['if'], data=[]]
content = get_content(*content_args, **content_kwargs) # depends on [control=['except'], data=['exc']]
object_list = []
for obj in content:
properties = {}
for prop in obj.propSet:
properties[prop.name] = prop.val # depends on [control=['for'], data=['prop']]
properties['object'] = obj.obj
object_list.append(properties) # depends on [control=['for'], data=['obj']]
log.trace('Retrieved %s objects', len(object_list))
return object_list |
def join(*vectors):
r"""
Takes an arbitrary number of aligned vectors of the same length and combines
them into a single vector (vertically).
E.g. taking two 100-sample feature vectors of once 5 and once 7 features, a 100x12
feature vector is created and returned.
The feature vectors are expected to have the form samples*features i.e.::
s1 s2 s3 [...]
f1
f2
[...]
Parameters
----------
*vectors : sequences
A number of vectors with the same number of samples.
Returns
-------
vector : ndarray
The combined vectors.
"""
# check supplied arguments
if len(vectors) < 2:
return vectors[0]
# process supplied arguments
vectors = list(vectors)
for i in range(len(vectors)):
vectors[i] = numpy.array(vectors[i], copy=False)
if vectors[i].ndim == 1:
vectors[i] = numpy.array([vectors[i]], copy=False).T
# treat single-value cases special (no squeezing)
if 1 == len(vectors[0]):
return numpy.concatenate(vectors, 1)
return numpy.squeeze(numpy.concatenate(vectors, 1)) | def function[join, parameter[]]:
constant[
Takes an arbitrary number of aligned vectors of the same length and combines
them into a single vector (vertically).
E.g. taking two 100-sample feature vectors of once 5 and once 7 features, a 100x12
feature vector is created and returned.
The feature vectors are expected to have the form samples*features i.e.::
s1 s2 s3 [...]
f1
f2
[...]
Parameters
----------
*vectors : sequences
A number of vectors with the same number of samples.
Returns
-------
vector : ndarray
The combined vectors.
]
if compare[call[name[len], parameter[name[vectors]]] less[<] constant[2]] begin[:]
return[call[name[vectors]][constant[0]]]
variable[vectors] assign[=] call[name[list], parameter[name[vectors]]]
for taget[name[i]] in starred[call[name[range], parameter[call[name[len], parameter[name[vectors]]]]]] begin[:]
call[name[vectors]][name[i]] assign[=] call[name[numpy].array, parameter[call[name[vectors]][name[i]]]]
if compare[call[name[vectors]][name[i]].ndim equal[==] constant[1]] begin[:]
call[name[vectors]][name[i]] assign[=] call[name[numpy].array, parameter[list[[<ast.Subscript object at 0x7da1b12d8280>]]]].T
if compare[constant[1] equal[==] call[name[len], parameter[call[name[vectors]][constant[0]]]]] begin[:]
return[call[name[numpy].concatenate, parameter[name[vectors], constant[1]]]]
return[call[name[numpy].squeeze, parameter[call[name[numpy].concatenate, parameter[name[vectors], constant[1]]]]]] | keyword[def] identifier[join] (* identifier[vectors] ):
literal[string]
keyword[if] identifier[len] ( identifier[vectors] )< literal[int] :
keyword[return] identifier[vectors] [ literal[int] ]
identifier[vectors] = identifier[list] ( identifier[vectors] )
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[len] ( identifier[vectors] )):
identifier[vectors] [ identifier[i] ]= identifier[numpy] . identifier[array] ( identifier[vectors] [ identifier[i] ], identifier[copy] = keyword[False] )
keyword[if] identifier[vectors] [ identifier[i] ]. identifier[ndim] == literal[int] :
identifier[vectors] [ identifier[i] ]= identifier[numpy] . identifier[array] ([ identifier[vectors] [ identifier[i] ]], identifier[copy] = keyword[False] ). identifier[T]
keyword[if] literal[int] == identifier[len] ( identifier[vectors] [ literal[int] ]):
keyword[return] identifier[numpy] . identifier[concatenate] ( identifier[vectors] , literal[int] )
keyword[return] identifier[numpy] . identifier[squeeze] ( identifier[numpy] . identifier[concatenate] ( identifier[vectors] , literal[int] )) | def join(*vectors):
"""
Takes an arbitrary number of aligned vectors of the same length and combines
them into a single vector (vertically).
E.g. taking two 100-sample feature vectors of once 5 and once 7 features, a 100x12
feature vector is created and returned.
The feature vectors are expected to have the form samples*features i.e.::
s1 s2 s3 [...]
f1
f2
[...]
Parameters
----------
*vectors : sequences
A number of vectors with the same number of samples.
Returns
-------
vector : ndarray
The combined vectors.
"""
# check supplied arguments
if len(vectors) < 2:
return vectors[0] # depends on [control=['if'], data=[]]
# process supplied arguments
vectors = list(vectors)
for i in range(len(vectors)):
vectors[i] = numpy.array(vectors[i], copy=False)
if vectors[i].ndim == 1:
vectors[i] = numpy.array([vectors[i]], copy=False).T # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['i']]
# treat single-value cases special (no squeezing)
if 1 == len(vectors[0]):
return numpy.concatenate(vectors, 1) # depends on [control=['if'], data=[]]
return numpy.squeeze(numpy.concatenate(vectors, 1)) |
def create_releasenotes(project_dir=os.curdir, bugtracker_url=''):
"""
Creates the release notes file, if not in a package.
Args:
project_dir(str): Path to the git repo of the project.
bugtracker_url(str): Url to the bug tracker for the issues.
Returns:
None
Raises:
RuntimeError: If the release notes could not be retrieved
"""
pkg_info_file = os.path.join(project_dir, 'PKG-INFO')
if os.path.exists(pkg_info_file):
return
with open('RELEASE_NOTES', 'wb') as releasenotes_fd:
releasenotes_fd.write(
get_releasenotes(
project_dir=project_dir,
bugtracker_url=bugtracker_url,
).encode('utf-8') + b'\n'
) | def function[create_releasenotes, parameter[project_dir, bugtracker_url]]:
constant[
Creates the release notes file, if not in a package.
Args:
project_dir(str): Path to the git repo of the project.
bugtracker_url(str): Url to the bug tracker for the issues.
Returns:
None
Raises:
RuntimeError: If the release notes could not be retrieved
]
variable[pkg_info_file] assign[=] call[name[os].path.join, parameter[name[project_dir], constant[PKG-INFO]]]
if call[name[os].path.exists, parameter[name[pkg_info_file]]] begin[:]
return[None]
with call[name[open], parameter[constant[RELEASE_NOTES], constant[wb]]] begin[:]
call[name[releasenotes_fd].write, parameter[binary_operation[call[call[name[get_releasenotes], parameter[]].encode, parameter[constant[utf-8]]] + constant[b'\n']]]] | keyword[def] identifier[create_releasenotes] ( identifier[project_dir] = identifier[os] . identifier[curdir] , identifier[bugtracker_url] = literal[string] ):
literal[string]
identifier[pkg_info_file] = identifier[os] . identifier[path] . identifier[join] ( identifier[project_dir] , literal[string] )
keyword[if] identifier[os] . identifier[path] . identifier[exists] ( identifier[pkg_info_file] ):
keyword[return]
keyword[with] identifier[open] ( literal[string] , literal[string] ) keyword[as] identifier[releasenotes_fd] :
identifier[releasenotes_fd] . identifier[write] (
identifier[get_releasenotes] (
identifier[project_dir] = identifier[project_dir] ,
identifier[bugtracker_url] = identifier[bugtracker_url] ,
). identifier[encode] ( literal[string] )+ literal[string]
) | def create_releasenotes(project_dir=os.curdir, bugtracker_url=''):
"""
Creates the release notes file, if not in a package.
Args:
project_dir(str): Path to the git repo of the project.
bugtracker_url(str): Url to the bug tracker for the issues.
Returns:
None
Raises:
RuntimeError: If the release notes could not be retrieved
"""
pkg_info_file = os.path.join(project_dir, 'PKG-INFO')
if os.path.exists(pkg_info_file):
return # depends on [control=['if'], data=[]]
with open('RELEASE_NOTES', 'wb') as releasenotes_fd:
releasenotes_fd.write(get_releasenotes(project_dir=project_dir, bugtracker_url=bugtracker_url).encode('utf-8') + b'\n') # depends on [control=['with'], data=['releasenotes_fd']] |
def tryDynMod(name):
'''
Dynamically import a python module or exception.
'''
try:
return importlib.import_module(name)
except ModuleNotFoundError:
raise s_exc.NoSuchDyn(name=name) | def function[tryDynMod, parameter[name]]:
constant[
Dynamically import a python module or exception.
]
<ast.Try object at 0x7da1b23ef0d0> | keyword[def] identifier[tryDynMod] ( identifier[name] ):
literal[string]
keyword[try] :
keyword[return] identifier[importlib] . identifier[import_module] ( identifier[name] )
keyword[except] identifier[ModuleNotFoundError] :
keyword[raise] identifier[s_exc] . identifier[NoSuchDyn] ( identifier[name] = identifier[name] ) | def tryDynMod(name):
"""
Dynamically import a python module or exception.
"""
try:
return importlib.import_module(name) # depends on [control=['try'], data=[]]
except ModuleNotFoundError:
raise s_exc.NoSuchDyn(name=name) # depends on [control=['except'], data=[]] |
def analyze_entities(self, document, encoding_type=None, retry=None, timeout=None, metadata=None):
"""
Finds named entities in the text along with entity types,
salience, mentions for each entity, and other properties.
:param document: Input document.
If a dict is provided, it must be of the same form as the protobuf message Document
:type document: dict or class google.cloud.language_v1.types.Document
:param encoding_type: The encoding type used by the API to calculate offsets.
:type encoding_type: google.cloud.language_v1.types.EncodingType
:param retry: A retry object used to retry requests. If None is specified, requests will not be
retried.
:type retry: google.api_core.retry.Retry
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
retry is specified, the timeout applies to each individual attempt.
:type timeout: float
:param metadata: Additional metadata that is provided to the method.
:type metadata: sequence[tuple[str, str]]]
:rtype: google.cloud.language_v1.types.AnalyzeEntitiesResponse
"""
client = self.get_conn()
return client.analyze_entities(
document=document, encoding_type=encoding_type, retry=retry, timeout=timeout, metadata=metadata
) | def function[analyze_entities, parameter[self, document, encoding_type, retry, timeout, metadata]]:
constant[
Finds named entities in the text along with entity types,
salience, mentions for each entity, and other properties.
:param document: Input document.
If a dict is provided, it must be of the same form as the protobuf message Document
:type document: dict or class google.cloud.language_v1.types.Document
:param encoding_type: The encoding type used by the API to calculate offsets.
:type encoding_type: google.cloud.language_v1.types.EncodingType
:param retry: A retry object used to retry requests. If None is specified, requests will not be
retried.
:type retry: google.api_core.retry.Retry
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
retry is specified, the timeout applies to each individual attempt.
:type timeout: float
:param metadata: Additional metadata that is provided to the method.
:type metadata: sequence[tuple[str, str]]]
:rtype: google.cloud.language_v1.types.AnalyzeEntitiesResponse
]
variable[client] assign[=] call[name[self].get_conn, parameter[]]
return[call[name[client].analyze_entities, parameter[]]] | keyword[def] identifier[analyze_entities] ( identifier[self] , identifier[document] , identifier[encoding_type] = keyword[None] , identifier[retry] = keyword[None] , identifier[timeout] = keyword[None] , identifier[metadata] = keyword[None] ):
literal[string]
identifier[client] = identifier[self] . identifier[get_conn] ()
keyword[return] identifier[client] . identifier[analyze_entities] (
identifier[document] = identifier[document] , identifier[encoding_type] = identifier[encoding_type] , identifier[retry] = identifier[retry] , identifier[timeout] = identifier[timeout] , identifier[metadata] = identifier[metadata]
) | def analyze_entities(self, document, encoding_type=None, retry=None, timeout=None, metadata=None):
"""
Finds named entities in the text along with entity types,
salience, mentions for each entity, and other properties.
:param document: Input document.
If a dict is provided, it must be of the same form as the protobuf message Document
:type document: dict or class google.cloud.language_v1.types.Document
:param encoding_type: The encoding type used by the API to calculate offsets.
:type encoding_type: google.cloud.language_v1.types.EncodingType
:param retry: A retry object used to retry requests. If None is specified, requests will not be
retried.
:type retry: google.api_core.retry.Retry
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
retry is specified, the timeout applies to each individual attempt.
:type timeout: float
:param metadata: Additional metadata that is provided to the method.
:type metadata: sequence[tuple[str, str]]]
:rtype: google.cloud.language_v1.types.AnalyzeEntitiesResponse
"""
client = self.get_conn()
return client.analyze_entities(document=document, encoding_type=encoding_type, retry=retry, timeout=timeout, metadata=metadata) |
def create(self, typ, data, return_response=False):
"""
Create new type
Valid arguments:
skip : number of records to skip
limit : number of records to limit request to
"""
res = self._request(typ, method='POST', data=data)
if res.status_code != 201:
try:
data = res.json()
self._throw(res, data)
except ValueError as e:
if not isinstance(e, InvalidRequestException):
self._throw(res, {})
else:
raise
loc = res.headers.get("location", None)
if loc and loc.startswith('/'):
return self._load(self._request(None, url=self.url_update(path=loc)))
if return_response:
return res.json()
return self._load(self._request(None, url=loc)) | def function[create, parameter[self, typ, data, return_response]]:
constant[
Create new type
Valid arguments:
skip : number of records to skip
limit : number of records to limit request to
]
variable[res] assign[=] call[name[self]._request, parameter[name[typ]]]
if compare[name[res].status_code not_equal[!=] constant[201]] begin[:]
<ast.Try object at 0x7da20c9908b0>
variable[loc] assign[=] call[name[res].headers.get, parameter[constant[location], constant[None]]]
if <ast.BoolOp object at 0x7da18bccbc40> begin[:]
return[call[name[self]._load, parameter[call[name[self]._request, parameter[constant[None]]]]]]
if name[return_response] begin[:]
return[call[name[res].json, parameter[]]]
return[call[name[self]._load, parameter[call[name[self]._request, parameter[constant[None]]]]]] | keyword[def] identifier[create] ( identifier[self] , identifier[typ] , identifier[data] , identifier[return_response] = keyword[False] ):
literal[string]
identifier[res] = identifier[self] . identifier[_request] ( identifier[typ] , identifier[method] = literal[string] , identifier[data] = identifier[data] )
keyword[if] identifier[res] . identifier[status_code] != literal[int] :
keyword[try] :
identifier[data] = identifier[res] . identifier[json] ()
identifier[self] . identifier[_throw] ( identifier[res] , identifier[data] )
keyword[except] identifier[ValueError] keyword[as] identifier[e] :
keyword[if] keyword[not] identifier[isinstance] ( identifier[e] , identifier[InvalidRequestException] ):
identifier[self] . identifier[_throw] ( identifier[res] ,{})
keyword[else] :
keyword[raise]
identifier[loc] = identifier[res] . identifier[headers] . identifier[get] ( literal[string] , keyword[None] )
keyword[if] identifier[loc] keyword[and] identifier[loc] . identifier[startswith] ( literal[string] ):
keyword[return] identifier[self] . identifier[_load] ( identifier[self] . identifier[_request] ( keyword[None] , identifier[url] = identifier[self] . identifier[url_update] ( identifier[path] = identifier[loc] )))
keyword[if] identifier[return_response] :
keyword[return] identifier[res] . identifier[json] ()
keyword[return] identifier[self] . identifier[_load] ( identifier[self] . identifier[_request] ( keyword[None] , identifier[url] = identifier[loc] )) | def create(self, typ, data, return_response=False):
"""
Create new type
Valid arguments:
skip : number of records to skip
limit : number of records to limit request to
"""
res = self._request(typ, method='POST', data=data)
if res.status_code != 201:
try:
data = res.json()
self._throw(res, data) # depends on [control=['try'], data=[]]
except ValueError as e:
if not isinstance(e, InvalidRequestException):
self._throw(res, {}) # depends on [control=['if'], data=[]]
else:
raise # depends on [control=['except'], data=['e']] # depends on [control=['if'], data=[]]
loc = res.headers.get('location', None)
if loc and loc.startswith('/'):
return self._load(self._request(None, url=self.url_update(path=loc))) # depends on [control=['if'], data=[]]
if return_response:
return res.json() # depends on [control=['if'], data=[]]
return self._load(self._request(None, url=loc)) |
def get_target(cls, path, index, chain=None):
"""
Calculate the target difficulty at a particular difficulty interval (index).
Return (bits, target) on success
"""
if chain is None:
chain = [] # Do not use mutables as default values!
max_target = 0x00000000FFFF0000000000000000000000000000000000000000000000000000
if index == 0:
return 0x1d00ffff, max_target
first = SPVClient.read_header( path, (index-1)*BLOCK_DIFFICULTY_CHUNK_SIZE)
last = SPVClient.read_header( path, index*BLOCK_DIFFICULTY_CHUNK_SIZE - 1, allow_none=True)
if last is None:
for h in chain:
if h.get('block_height') == index*BLOCK_DIFFICULTY_CHUNK_SIZE - 1:
last = h
nActualTimespan = last.get('timestamp') - first.get('timestamp')
nTargetTimespan = BLOCK_DIFFICULTY_INTERVAL
nActualTimespan = max(nActualTimespan, nTargetTimespan/4)
nActualTimespan = min(nActualTimespan, nTargetTimespan*4)
bits = last.get('bits')
# convert to bignum
MM = 256*256*256
a = bits%MM
if a < 0x8000:
a *= 256
target = (a) * pow(2, 8 * (bits/MM - 3))
# new target
new_target = min( max_target, (target * nActualTimespan)/nTargetTimespan )
# convert it to bits
c = ("%064X"%new_target)[2:]
i = 31
while c[0:2]=="00":
c = c[2:]
i -= 1
c = int('0x'+c[0:6],16)
if c >= 0x800000:
c /= 256
i += 1
new_bits = c + MM * i
return new_bits, new_target | def function[get_target, parameter[cls, path, index, chain]]:
constant[
Calculate the target difficulty at a particular difficulty interval (index).
Return (bits, target) on success
]
if compare[name[chain] is constant[None]] begin[:]
variable[chain] assign[=] list[[]]
variable[max_target] assign[=] constant[26959535291011309493156476344723991336010898738574164086137773096960]
if compare[name[index] equal[==] constant[0]] begin[:]
return[tuple[[<ast.Constant object at 0x7da1b2820ee0>, <ast.Name object at 0x7da1b2820790>]]]
variable[first] assign[=] call[name[SPVClient].read_header, parameter[name[path], binary_operation[binary_operation[name[index] - constant[1]] * name[BLOCK_DIFFICULTY_CHUNK_SIZE]]]]
variable[last] assign[=] call[name[SPVClient].read_header, parameter[name[path], binary_operation[binary_operation[name[index] * name[BLOCK_DIFFICULTY_CHUNK_SIZE]] - constant[1]]]]
if compare[name[last] is constant[None]] begin[:]
for taget[name[h]] in starred[name[chain]] begin[:]
if compare[call[name[h].get, parameter[constant[block_height]]] equal[==] binary_operation[binary_operation[name[index] * name[BLOCK_DIFFICULTY_CHUNK_SIZE]] - constant[1]]] begin[:]
variable[last] assign[=] name[h]
variable[nActualTimespan] assign[=] binary_operation[call[name[last].get, parameter[constant[timestamp]]] - call[name[first].get, parameter[constant[timestamp]]]]
variable[nTargetTimespan] assign[=] name[BLOCK_DIFFICULTY_INTERVAL]
variable[nActualTimespan] assign[=] call[name[max], parameter[name[nActualTimespan], binary_operation[name[nTargetTimespan] / constant[4]]]]
variable[nActualTimespan] assign[=] call[name[min], parameter[name[nActualTimespan], binary_operation[name[nTargetTimespan] * constant[4]]]]
variable[bits] assign[=] call[name[last].get, parameter[constant[bits]]]
variable[MM] assign[=] binary_operation[binary_operation[constant[256] * constant[256]] * constant[256]]
variable[a] assign[=] binary_operation[name[bits] <ast.Mod object at 0x7da2590d6920> name[MM]]
if compare[name[a] less[<] constant[32768]] begin[:]
<ast.AugAssign object at 0x7da1b287a080>
variable[target] assign[=] binary_operation[name[a] * call[name[pow], parameter[constant[2], binary_operation[constant[8] * binary_operation[binary_operation[name[bits] / name[MM]] - constant[3]]]]]]
variable[new_target] assign[=] call[name[min], parameter[name[max_target], binary_operation[binary_operation[name[target] * name[nActualTimespan]] / name[nTargetTimespan]]]]
variable[c] assign[=] call[binary_operation[constant[%064X] <ast.Mod object at 0x7da2590d6920> name[new_target]]][<ast.Slice object at 0x7da1b2841750>]
variable[i] assign[=] constant[31]
while compare[call[name[c]][<ast.Slice object at 0x7da1b28421d0>] equal[==] constant[00]] begin[:]
variable[c] assign[=] call[name[c]][<ast.Slice object at 0x7da1b28425c0>]
<ast.AugAssign object at 0x7da1b2842020>
variable[c] assign[=] call[name[int], parameter[binary_operation[constant[0x] + call[name[c]][<ast.Slice object at 0x7da1b2841930>]], constant[16]]]
if compare[name[c] greater_or_equal[>=] constant[8388608]] begin[:]
<ast.AugAssign object at 0x7da1b28434c0>
<ast.AugAssign object at 0x7da1b2842cb0>
variable[new_bits] assign[=] binary_operation[name[c] + binary_operation[name[MM] * name[i]]]
return[tuple[[<ast.Name object at 0x7da1b2840ee0>, <ast.Name object at 0x7da1b2841000>]]] | keyword[def] identifier[get_target] ( identifier[cls] , identifier[path] , identifier[index] , identifier[chain] = keyword[None] ):
literal[string]
keyword[if] identifier[chain] keyword[is] keyword[None] :
identifier[chain] =[]
identifier[max_target] = literal[int]
keyword[if] identifier[index] == literal[int] :
keyword[return] literal[int] , identifier[max_target]
identifier[first] = identifier[SPVClient] . identifier[read_header] ( identifier[path] ,( identifier[index] - literal[int] )* identifier[BLOCK_DIFFICULTY_CHUNK_SIZE] )
identifier[last] = identifier[SPVClient] . identifier[read_header] ( identifier[path] , identifier[index] * identifier[BLOCK_DIFFICULTY_CHUNK_SIZE] - literal[int] , identifier[allow_none] = keyword[True] )
keyword[if] identifier[last] keyword[is] keyword[None] :
keyword[for] identifier[h] keyword[in] identifier[chain] :
keyword[if] identifier[h] . identifier[get] ( literal[string] )== identifier[index] * identifier[BLOCK_DIFFICULTY_CHUNK_SIZE] - literal[int] :
identifier[last] = identifier[h]
identifier[nActualTimespan] = identifier[last] . identifier[get] ( literal[string] )- identifier[first] . identifier[get] ( literal[string] )
identifier[nTargetTimespan] = identifier[BLOCK_DIFFICULTY_INTERVAL]
identifier[nActualTimespan] = identifier[max] ( identifier[nActualTimespan] , identifier[nTargetTimespan] / literal[int] )
identifier[nActualTimespan] = identifier[min] ( identifier[nActualTimespan] , identifier[nTargetTimespan] * literal[int] )
identifier[bits] = identifier[last] . identifier[get] ( literal[string] )
identifier[MM] = literal[int] * literal[int] * literal[int]
identifier[a] = identifier[bits] % identifier[MM]
keyword[if] identifier[a] < literal[int] :
identifier[a] *= literal[int]
identifier[target] =( identifier[a] )* identifier[pow] ( literal[int] , literal[int] *( identifier[bits] / identifier[MM] - literal[int] ))
identifier[new_target] = identifier[min] ( identifier[max_target] ,( identifier[target] * identifier[nActualTimespan] )/ identifier[nTargetTimespan] )
identifier[c] =( literal[string] % identifier[new_target] )[ literal[int] :]
identifier[i] = literal[int]
keyword[while] identifier[c] [ literal[int] : literal[int] ]== literal[string] :
identifier[c] = identifier[c] [ literal[int] :]
identifier[i] -= literal[int]
identifier[c] = identifier[int] ( literal[string] + identifier[c] [ literal[int] : literal[int] ], literal[int] )
keyword[if] identifier[c] >= literal[int] :
identifier[c] /= literal[int]
identifier[i] += literal[int]
identifier[new_bits] = identifier[c] + identifier[MM] * identifier[i]
keyword[return] identifier[new_bits] , identifier[new_target] | def get_target(cls, path, index, chain=None):
"""
Calculate the target difficulty at a particular difficulty interval (index).
Return (bits, target) on success
"""
if chain is None:
chain = [] # Do not use mutables as default values! # depends on [control=['if'], data=['chain']]
max_target = 26959535291011309493156476344723991336010898738574164086137773096960
if index == 0:
return (486604799, max_target) # depends on [control=['if'], data=[]]
first = SPVClient.read_header(path, (index - 1) * BLOCK_DIFFICULTY_CHUNK_SIZE)
last = SPVClient.read_header(path, index * BLOCK_DIFFICULTY_CHUNK_SIZE - 1, allow_none=True)
if last is None:
for h in chain:
if h.get('block_height') == index * BLOCK_DIFFICULTY_CHUNK_SIZE - 1:
last = h # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['h']] # depends on [control=['if'], data=['last']]
nActualTimespan = last.get('timestamp') - first.get('timestamp')
nTargetTimespan = BLOCK_DIFFICULTY_INTERVAL
nActualTimespan = max(nActualTimespan, nTargetTimespan / 4)
nActualTimespan = min(nActualTimespan, nTargetTimespan * 4)
bits = last.get('bits')
# convert to bignum
MM = 256 * 256 * 256
a = bits % MM
if a < 32768:
a *= 256 # depends on [control=['if'], data=['a']]
target = a * pow(2, 8 * (bits / MM - 3))
# new target
new_target = min(max_target, target * nActualTimespan / nTargetTimespan)
# convert it to bits
c = ('%064X' % new_target)[2:]
i = 31
while c[0:2] == '00':
c = c[2:]
i -= 1 # depends on [control=['while'], data=[]]
c = int('0x' + c[0:6], 16)
if c >= 8388608:
c /= 256
i += 1 # depends on [control=['if'], data=['c']]
new_bits = c + MM * i
return (new_bits, new_target) |
def segment(self, tokens):
"""
Segments a sequence of tokens into a sequence of segments.
:Parameters:
tokens : `list` ( :class:`~deltas.Token` )
"""
look_ahead = LookAhead(tokens)
segments = Segment()
while not look_ahead.empty():
if look_ahead.peek().type not in self.whitespace: # Paragraph!
paragraph = MatchableSegment(look_ahead.i)
while not look_ahead.empty() and \
look_ahead.peek().type not in self.paragraph_end:
if look_ahead.peek().type == "tab_open": # Table
tab_depth = 1
sentence = MatchableSegment(
look_ahead.i, [next(look_ahead)])
while not look_ahead.empty() and tab_depth > 0:
tab_depth += look_ahead.peek().type == "tab_open"
tab_depth -= look_ahead.peek().type == "tab_close"
sentence.append(next(look_ahead))
paragraph.append(sentence)
elif look_ahead.peek().type not in self.whitespace: # Sentence!
sentence = MatchableSegment(
look_ahead.i, [next(look_ahead)])
sub_depth = int(sentence[0].type in SUB_OPEN)
while not look_ahead.empty():
sub_depth += look_ahead.peek().type in SUB_OPEN
sub_depth -= look_ahead.peek().type in SUB_CLOSE
sentence.append(next(look_ahead))
if sentence[-1].type in self.sentence_end and sub_depth <= 0:
non_whitespace = sum(s.type not in self.whitespace for s in sentence)
if non_whitespace >= self.min_sentence:
break
paragraph.append(sentence)
else: # look_ahead.peek().type in self.whitespace
whitespace = Segment(look_ahead.i, [next(look_ahead)])
paragraph.append(whitespace)
segments.append(paragraph)
else: # look_ahead.peek().type in self.whitespace
whitespace = Segment(look_ahead.i, [next(look_ahead)])
segments.append(whitespace)
return segments | def function[segment, parameter[self, tokens]]:
constant[
Segments a sequence of tokens into a sequence of segments.
:Parameters:
tokens : `list` ( :class:`~deltas.Token` )
]
variable[look_ahead] assign[=] call[name[LookAhead], parameter[name[tokens]]]
variable[segments] assign[=] call[name[Segment], parameter[]]
while <ast.UnaryOp object at 0x7da20c797730> begin[:]
if compare[call[name[look_ahead].peek, parameter[]].type <ast.NotIn object at 0x7da2590d7190> name[self].whitespace] begin[:]
variable[paragraph] assign[=] call[name[MatchableSegment], parameter[name[look_ahead].i]]
while <ast.BoolOp object at 0x7da1b0a33d60> begin[:]
if compare[call[name[look_ahead].peek, parameter[]].type equal[==] constant[tab_open]] begin[:]
variable[tab_depth] assign[=] constant[1]
variable[sentence] assign[=] call[name[MatchableSegment], parameter[name[look_ahead].i, list[[<ast.Call object at 0x7da1b0b834c0>]]]]
while <ast.BoolOp object at 0x7da1b0b80bb0> begin[:]
<ast.AugAssign object at 0x7da1b0b821d0>
<ast.AugAssign object at 0x7da1b0b82e30>
call[name[sentence].append, parameter[call[name[next], parameter[name[look_ahead]]]]]
call[name[paragraph].append, parameter[name[sentence]]]
call[name[segments].append, parameter[name[paragraph]]]
return[name[segments]] | keyword[def] identifier[segment] ( identifier[self] , identifier[tokens] ):
literal[string]
identifier[look_ahead] = identifier[LookAhead] ( identifier[tokens] )
identifier[segments] = identifier[Segment] ()
keyword[while] keyword[not] identifier[look_ahead] . identifier[empty] ():
keyword[if] identifier[look_ahead] . identifier[peek] (). identifier[type] keyword[not] keyword[in] identifier[self] . identifier[whitespace] :
identifier[paragraph] = identifier[MatchableSegment] ( identifier[look_ahead] . identifier[i] )
keyword[while] keyword[not] identifier[look_ahead] . identifier[empty] () keyword[and] identifier[look_ahead] . identifier[peek] (). identifier[type] keyword[not] keyword[in] identifier[self] . identifier[paragraph_end] :
keyword[if] identifier[look_ahead] . identifier[peek] (). identifier[type] == literal[string] :
identifier[tab_depth] = literal[int]
identifier[sentence] = identifier[MatchableSegment] (
identifier[look_ahead] . identifier[i] ,[ identifier[next] ( identifier[look_ahead] )])
keyword[while] keyword[not] identifier[look_ahead] . identifier[empty] () keyword[and] identifier[tab_depth] > literal[int] :
identifier[tab_depth] += identifier[look_ahead] . identifier[peek] (). identifier[type] == literal[string]
identifier[tab_depth] -= identifier[look_ahead] . identifier[peek] (). identifier[type] == literal[string]
identifier[sentence] . identifier[append] ( identifier[next] ( identifier[look_ahead] ))
identifier[paragraph] . identifier[append] ( identifier[sentence] )
keyword[elif] identifier[look_ahead] . identifier[peek] (). identifier[type] keyword[not] keyword[in] identifier[self] . identifier[whitespace] :
identifier[sentence] = identifier[MatchableSegment] (
identifier[look_ahead] . identifier[i] ,[ identifier[next] ( identifier[look_ahead] )])
identifier[sub_depth] = identifier[int] ( identifier[sentence] [ literal[int] ]. identifier[type] keyword[in] identifier[SUB_OPEN] )
keyword[while] keyword[not] identifier[look_ahead] . identifier[empty] ():
identifier[sub_depth] += identifier[look_ahead] . identifier[peek] (). identifier[type] keyword[in] identifier[SUB_OPEN]
identifier[sub_depth] -= identifier[look_ahead] . identifier[peek] (). identifier[type] keyword[in] identifier[SUB_CLOSE]
identifier[sentence] . identifier[append] ( identifier[next] ( identifier[look_ahead] ))
keyword[if] identifier[sentence] [- literal[int] ]. identifier[type] keyword[in] identifier[self] . identifier[sentence_end] keyword[and] identifier[sub_depth] <= literal[int] :
identifier[non_whitespace] = identifier[sum] ( identifier[s] . identifier[type] keyword[not] keyword[in] identifier[self] . identifier[whitespace] keyword[for] identifier[s] keyword[in] identifier[sentence] )
keyword[if] identifier[non_whitespace] >= identifier[self] . identifier[min_sentence] :
keyword[break]
identifier[paragraph] . identifier[append] ( identifier[sentence] )
keyword[else] :
identifier[whitespace] = identifier[Segment] ( identifier[look_ahead] . identifier[i] ,[ identifier[next] ( identifier[look_ahead] )])
identifier[paragraph] . identifier[append] ( identifier[whitespace] )
identifier[segments] . identifier[append] ( identifier[paragraph] )
keyword[else] :
identifier[whitespace] = identifier[Segment] ( identifier[look_ahead] . identifier[i] ,[ identifier[next] ( identifier[look_ahead] )])
identifier[segments] . identifier[append] ( identifier[whitespace] )
keyword[return] identifier[segments] | def segment(self, tokens):
"""
Segments a sequence of tokens into a sequence of segments.
:Parameters:
tokens : `list` ( :class:`~deltas.Token` )
"""
look_ahead = LookAhead(tokens)
segments = Segment()
while not look_ahead.empty():
if look_ahead.peek().type not in self.whitespace: # Paragraph!
paragraph = MatchableSegment(look_ahead.i)
while not look_ahead.empty() and look_ahead.peek().type not in self.paragraph_end:
if look_ahead.peek().type == 'tab_open': # Table
tab_depth = 1
sentence = MatchableSegment(look_ahead.i, [next(look_ahead)])
while not look_ahead.empty() and tab_depth > 0:
tab_depth += look_ahead.peek().type == 'tab_open'
tab_depth -= look_ahead.peek().type == 'tab_close'
sentence.append(next(look_ahead)) # depends on [control=['while'], data=[]]
paragraph.append(sentence) # depends on [control=['if'], data=[]]
elif look_ahead.peek().type not in self.whitespace: # Sentence!
sentence = MatchableSegment(look_ahead.i, [next(look_ahead)])
sub_depth = int(sentence[0].type in SUB_OPEN)
while not look_ahead.empty():
sub_depth += look_ahead.peek().type in SUB_OPEN
sub_depth -= look_ahead.peek().type in SUB_CLOSE
sentence.append(next(look_ahead))
if sentence[-1].type in self.sentence_end and sub_depth <= 0:
non_whitespace = sum((s.type not in self.whitespace for s in sentence))
if non_whitespace >= self.min_sentence:
break # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['while'], data=[]]
paragraph.append(sentence) # depends on [control=['if'], data=[]]
else: # look_ahead.peek().type in self.whitespace
whitespace = Segment(look_ahead.i, [next(look_ahead)])
paragraph.append(whitespace) # depends on [control=['while'], data=[]]
segments.append(paragraph) # depends on [control=['if'], data=[]]
else: # look_ahead.peek().type in self.whitespace
whitespace = Segment(look_ahead.i, [next(look_ahead)])
segments.append(whitespace) # depends on [control=['while'], data=[]]
return segments |
def command_max_run_time(self, event=None):
""" CPU burst max running time - self.runtime_cfg.max_run_time """
try:
max_run_time = self.max_run_time_var.get()
except ValueError:
max_run_time = self.runtime_cfg.max_run_time
self.runtime_cfg.max_run_time = max_run_time
self.max_run_time_var.set(self.runtime_cfg.max_run_time) | def function[command_max_run_time, parameter[self, event]]:
constant[ CPU burst max running time - self.runtime_cfg.max_run_time ]
<ast.Try object at 0x7da1b054bfa0>
name[self].runtime_cfg.max_run_time assign[=] name[max_run_time]
call[name[self].max_run_time_var.set, parameter[name[self].runtime_cfg.max_run_time]] | keyword[def] identifier[command_max_run_time] ( identifier[self] , identifier[event] = keyword[None] ):
literal[string]
keyword[try] :
identifier[max_run_time] = identifier[self] . identifier[max_run_time_var] . identifier[get] ()
keyword[except] identifier[ValueError] :
identifier[max_run_time] = identifier[self] . identifier[runtime_cfg] . identifier[max_run_time]
identifier[self] . identifier[runtime_cfg] . identifier[max_run_time] = identifier[max_run_time]
identifier[self] . identifier[max_run_time_var] . identifier[set] ( identifier[self] . identifier[runtime_cfg] . identifier[max_run_time] ) | def command_max_run_time(self, event=None):
""" CPU burst max running time - self.runtime_cfg.max_run_time """
try:
max_run_time = self.max_run_time_var.get() # depends on [control=['try'], data=[]]
except ValueError:
max_run_time = self.runtime_cfg.max_run_time # depends on [control=['except'], data=[]]
self.runtime_cfg.max_run_time = max_run_time
self.max_run_time_var.set(self.runtime_cfg.max_run_time) |
def connect_external_kernel(self, shellwidget):
"""
Connect an external kernel to the Variable Explorer and Help, if
it is a Spyder kernel.
"""
sw = shellwidget
kc = shellwidget.kernel_client
if self.main.help is not None:
self.main.help.set_shell(sw)
if self.main.variableexplorer is not None:
self.main.variableexplorer.add_shellwidget(sw)
sw.set_namespace_view_settings()
sw.refresh_namespacebrowser()
kc.stopped_channels.connect(lambda :
self.main.variableexplorer.remove_shellwidget(id(sw))) | def function[connect_external_kernel, parameter[self, shellwidget]]:
constant[
Connect an external kernel to the Variable Explorer and Help, if
it is a Spyder kernel.
]
variable[sw] assign[=] name[shellwidget]
variable[kc] assign[=] name[shellwidget].kernel_client
if compare[name[self].main.help is_not constant[None]] begin[:]
call[name[self].main.help.set_shell, parameter[name[sw]]]
if compare[name[self].main.variableexplorer is_not constant[None]] begin[:]
call[name[self].main.variableexplorer.add_shellwidget, parameter[name[sw]]]
call[name[sw].set_namespace_view_settings, parameter[]]
call[name[sw].refresh_namespacebrowser, parameter[]]
call[name[kc].stopped_channels.connect, parameter[<ast.Lambda object at 0x7da204345d80>]] | keyword[def] identifier[connect_external_kernel] ( identifier[self] , identifier[shellwidget] ):
literal[string]
identifier[sw] = identifier[shellwidget]
identifier[kc] = identifier[shellwidget] . identifier[kernel_client]
keyword[if] identifier[self] . identifier[main] . identifier[help] keyword[is] keyword[not] keyword[None] :
identifier[self] . identifier[main] . identifier[help] . identifier[set_shell] ( identifier[sw] )
keyword[if] identifier[self] . identifier[main] . identifier[variableexplorer] keyword[is] keyword[not] keyword[None] :
identifier[self] . identifier[main] . identifier[variableexplorer] . identifier[add_shellwidget] ( identifier[sw] )
identifier[sw] . identifier[set_namespace_view_settings] ()
identifier[sw] . identifier[refresh_namespacebrowser] ()
identifier[kc] . identifier[stopped_channels] . identifier[connect] ( keyword[lambda] :
identifier[self] . identifier[main] . identifier[variableexplorer] . identifier[remove_shellwidget] ( identifier[id] ( identifier[sw] ))) | def connect_external_kernel(self, shellwidget):
"""
Connect an external kernel to the Variable Explorer and Help, if
it is a Spyder kernel.
"""
sw = shellwidget
kc = shellwidget.kernel_client
if self.main.help is not None:
self.main.help.set_shell(sw) # depends on [control=['if'], data=[]]
if self.main.variableexplorer is not None:
self.main.variableexplorer.add_shellwidget(sw)
sw.set_namespace_view_settings()
sw.refresh_namespacebrowser()
kc.stopped_channels.connect(lambda : self.main.variableexplorer.remove_shellwidget(id(sw))) # depends on [control=['if'], data=[]] |
def is_unwrapped(f):
"""If `f` was imported and then unwrapped, this function might return True.
.. |is_unwrapped| replace:: :py:func:`is_unwrapped`"""
try:
g = look_up(object_name(f))
return g != f and unwrap(g) == f
except (AttributeError, TypeError, ImportError):
return False | def function[is_unwrapped, parameter[f]]:
constant[If `f` was imported and then unwrapped, this function might return True.
.. |is_unwrapped| replace:: :py:func:`is_unwrapped`]
<ast.Try object at 0x7da20c991420> | keyword[def] identifier[is_unwrapped] ( identifier[f] ):
literal[string]
keyword[try] :
identifier[g] = identifier[look_up] ( identifier[object_name] ( identifier[f] ))
keyword[return] identifier[g] != identifier[f] keyword[and] identifier[unwrap] ( identifier[g] )== identifier[f]
keyword[except] ( identifier[AttributeError] , identifier[TypeError] , identifier[ImportError] ):
keyword[return] keyword[False] | def is_unwrapped(f):
"""If `f` was imported and then unwrapped, this function might return True.
.. |is_unwrapped| replace:: :py:func:`is_unwrapped`"""
try:
g = look_up(object_name(f))
return g != f and unwrap(g) == f # depends on [control=['try'], data=[]]
except (AttributeError, TypeError, ImportError):
return False # depends on [control=['except'], data=[]] |
def rolling(self, *args, **kwargs):
"""
Return a rolling grouper, providing rolling functionality per group.
"""
from pandas.core.window import RollingGroupby
return RollingGroupby(self, *args, **kwargs) | def function[rolling, parameter[self]]:
constant[
Return a rolling grouper, providing rolling functionality per group.
]
from relative_module[pandas.core.window] import module[RollingGroupby]
return[call[name[RollingGroupby], parameter[name[self], <ast.Starred object at 0x7da1b26af970>]]] | keyword[def] identifier[rolling] ( identifier[self] ,* identifier[args] ,** identifier[kwargs] ):
literal[string]
keyword[from] identifier[pandas] . identifier[core] . identifier[window] keyword[import] identifier[RollingGroupby]
keyword[return] identifier[RollingGroupby] ( identifier[self] ,* identifier[args] ,** identifier[kwargs] ) | def rolling(self, *args, **kwargs):
"""
Return a rolling grouper, providing rolling functionality per group.
"""
from pandas.core.window import RollingGroupby
return RollingGroupby(self, *args, **kwargs) |
def flag(self, diagnostic, thresh=None):
'''
Returns indices of diagnostic that satisfy (return True from) the
threshold predicate. Will use class-level default threshold if
None provided.
Args:
diagnostic (str): name of the diagnostic
thresh (func): threshold function (boolean predicate) to apply to
each element
'''
if thresh is None:
thresh = self.defaults[diagnostic]
result = self.results[diagnostic]
if isinstance(result, pd.DataFrame):
if diagnostic == 'CorrelationMatrix':
result = result.copy()
np.fill_diagonal(result.values, 0)
return result.applymap(thresh).sum().nonzero()[0]
else:
return result.apply(thresh).nonzero()[0] | def function[flag, parameter[self, diagnostic, thresh]]:
constant[
Returns indices of diagnostic that satisfy (return True from) the
threshold predicate. Will use class-level default threshold if
None provided.
Args:
diagnostic (str): name of the diagnostic
thresh (func): threshold function (boolean predicate) to apply to
each element
]
if compare[name[thresh] is constant[None]] begin[:]
variable[thresh] assign[=] call[name[self].defaults][name[diagnostic]]
variable[result] assign[=] call[name[self].results][name[diagnostic]]
if call[name[isinstance], parameter[name[result], name[pd].DataFrame]] begin[:]
if compare[name[diagnostic] equal[==] constant[CorrelationMatrix]] begin[:]
variable[result] assign[=] call[name[result].copy, parameter[]]
call[name[np].fill_diagonal, parameter[name[result].values, constant[0]]]
return[call[call[call[call[name[result].applymap, parameter[name[thresh]]].sum, parameter[]].nonzero, parameter[]]][constant[0]]] | keyword[def] identifier[flag] ( identifier[self] , identifier[diagnostic] , identifier[thresh] = keyword[None] ):
literal[string]
keyword[if] identifier[thresh] keyword[is] keyword[None] :
identifier[thresh] = identifier[self] . identifier[defaults] [ identifier[diagnostic] ]
identifier[result] = identifier[self] . identifier[results] [ identifier[diagnostic] ]
keyword[if] identifier[isinstance] ( identifier[result] , identifier[pd] . identifier[DataFrame] ):
keyword[if] identifier[diagnostic] == literal[string] :
identifier[result] = identifier[result] . identifier[copy] ()
identifier[np] . identifier[fill_diagonal] ( identifier[result] . identifier[values] , literal[int] )
keyword[return] identifier[result] . identifier[applymap] ( identifier[thresh] ). identifier[sum] (). identifier[nonzero] ()[ literal[int] ]
keyword[else] :
keyword[return] identifier[result] . identifier[apply] ( identifier[thresh] ). identifier[nonzero] ()[ literal[int] ] | def flag(self, diagnostic, thresh=None):
"""
Returns indices of diagnostic that satisfy (return True from) the
threshold predicate. Will use class-level default threshold if
None provided.
Args:
diagnostic (str): name of the diagnostic
thresh (func): threshold function (boolean predicate) to apply to
each element
"""
if thresh is None:
thresh = self.defaults[diagnostic] # depends on [control=['if'], data=['thresh']]
result = self.results[diagnostic]
if isinstance(result, pd.DataFrame):
if diagnostic == 'CorrelationMatrix':
result = result.copy()
np.fill_diagonal(result.values, 0) # depends on [control=['if'], data=[]]
return result.applymap(thresh).sum().nonzero()[0] # depends on [control=['if'], data=[]]
else:
return result.apply(thresh).nonzero()[0] |
def to_cldf(self, dest, mdname='cldf-metadata.json'):
"""
Write the data from the db to a CLDF dataset according to the metadata in `self.dataset`.
:param dest:
:param mdname:
:return: path of the metadata file
"""
dest = Path(dest)
if not dest.exists():
dest.mkdir()
data = self.read()
if data[self.source_table_name]:
sources = Sources()
for src in data[self.source_table_name]:
sources.add(Source(
src['genre'],
src['id'],
**{k: v for k, v in src.items() if k not in ['id', 'genre']}))
sources.write(dest / self.dataset.properties.get('dc:source', 'sources.bib'))
for table_type, items in data.items():
try:
table = self.dataset[table_type]
table.common_props['dc:extent'] = table.write(
[self.retranslate(table, item) for item in items],
base=dest)
except KeyError:
assert table_type == self.source_table_name, table_type
return self.dataset.write_metadata(dest / mdname) | def function[to_cldf, parameter[self, dest, mdname]]:
constant[
Write the data from the db to a CLDF dataset according to the metadata in `self.dataset`.
:param dest:
:param mdname:
:return: path of the metadata file
]
variable[dest] assign[=] call[name[Path], parameter[name[dest]]]
if <ast.UnaryOp object at 0x7da1afe180d0> begin[:]
call[name[dest].mkdir, parameter[]]
variable[data] assign[=] call[name[self].read, parameter[]]
if call[name[data]][name[self].source_table_name] begin[:]
variable[sources] assign[=] call[name[Sources], parameter[]]
for taget[name[src]] in starred[call[name[data]][name[self].source_table_name]] begin[:]
call[name[sources].add, parameter[call[name[Source], parameter[call[name[src]][constant[genre]], call[name[src]][constant[id]]]]]]
call[name[sources].write, parameter[binary_operation[name[dest] / call[name[self].dataset.properties.get, parameter[constant[dc:source], constant[sources.bib]]]]]]
for taget[tuple[[<ast.Name object at 0x7da1afe1a140>, <ast.Name object at 0x7da1afe180a0>]]] in starred[call[name[data].items, parameter[]]] begin[:]
<ast.Try object at 0x7da1afe1b0a0>
return[call[name[self].dataset.write_metadata, parameter[binary_operation[name[dest] / name[mdname]]]]] | keyword[def] identifier[to_cldf] ( identifier[self] , identifier[dest] , identifier[mdname] = literal[string] ):
literal[string]
identifier[dest] = identifier[Path] ( identifier[dest] )
keyword[if] keyword[not] identifier[dest] . identifier[exists] ():
identifier[dest] . identifier[mkdir] ()
identifier[data] = identifier[self] . identifier[read] ()
keyword[if] identifier[data] [ identifier[self] . identifier[source_table_name] ]:
identifier[sources] = identifier[Sources] ()
keyword[for] identifier[src] keyword[in] identifier[data] [ identifier[self] . identifier[source_table_name] ]:
identifier[sources] . identifier[add] ( identifier[Source] (
identifier[src] [ literal[string] ],
identifier[src] [ literal[string] ],
**{ identifier[k] : identifier[v] keyword[for] identifier[k] , identifier[v] keyword[in] identifier[src] . identifier[items] () keyword[if] identifier[k] keyword[not] keyword[in] [ literal[string] , literal[string] ]}))
identifier[sources] . identifier[write] ( identifier[dest] / identifier[self] . identifier[dataset] . identifier[properties] . identifier[get] ( literal[string] , literal[string] ))
keyword[for] identifier[table_type] , identifier[items] keyword[in] identifier[data] . identifier[items] ():
keyword[try] :
identifier[table] = identifier[self] . identifier[dataset] [ identifier[table_type] ]
identifier[table] . identifier[common_props] [ literal[string] ]= identifier[table] . identifier[write] (
[ identifier[self] . identifier[retranslate] ( identifier[table] , identifier[item] ) keyword[for] identifier[item] keyword[in] identifier[items] ],
identifier[base] = identifier[dest] )
keyword[except] identifier[KeyError] :
keyword[assert] identifier[table_type] == identifier[self] . identifier[source_table_name] , identifier[table_type]
keyword[return] identifier[self] . identifier[dataset] . identifier[write_metadata] ( identifier[dest] / identifier[mdname] ) | def to_cldf(self, dest, mdname='cldf-metadata.json'):
"""
Write the data from the db to a CLDF dataset according to the metadata in `self.dataset`.
:param dest:
:param mdname:
:return: path of the metadata file
"""
dest = Path(dest)
if not dest.exists():
dest.mkdir() # depends on [control=['if'], data=[]]
data = self.read()
if data[self.source_table_name]:
sources = Sources()
for src in data[self.source_table_name]:
sources.add(Source(src['genre'], src['id'], **{k: v for (k, v) in src.items() if k not in ['id', 'genre']})) # depends on [control=['for'], data=['src']]
sources.write(dest / self.dataset.properties.get('dc:source', 'sources.bib')) # depends on [control=['if'], data=[]]
for (table_type, items) in data.items():
try:
table = self.dataset[table_type]
table.common_props['dc:extent'] = table.write([self.retranslate(table, item) for item in items], base=dest) # depends on [control=['try'], data=[]]
except KeyError:
assert table_type == self.source_table_name, table_type # depends on [control=['except'], data=[]] # depends on [control=['for'], data=[]]
return self.dataset.write_metadata(dest / mdname) |
def execute_replay() -> None:
"""
Execute all commands.
For every command that is found in replay/toDo, execute each of them
and move the file to the replay/archive directory.
"""
files = glob.glob('./replay/toDo/*')
sorted_files = sorted(files, key=os.path.getctime)
if not sorted_files: # list is not empty
LOG.debug('Found %s, beginning execution.', sorted_files)
for command_file in sorted_files:
with open(command_file, 'r') as command:
cmd = command.read()
LOG.debug('executing command: %s', cmd)
resp = run([cmd, '-v', 'DEBUG'], shell=True, check=True)
LOG.debug(resp)
LOG.debug('moving %s to archive', command.name)
move_command = 'mv {0} ./replay/archive/'.format(command.name)
run(move_command, shell=True, check=True)
LOG.info('LaunchDarkly is now up to date.')
else:
LOG.warning('No files found, nothing to replay.') | def function[execute_replay, parameter[]]:
constant[
Execute all commands.
For every command that is found in replay/toDo, execute each of them
and move the file to the replay/archive directory.
]
variable[files] assign[=] call[name[glob].glob, parameter[constant[./replay/toDo/*]]]
variable[sorted_files] assign[=] call[name[sorted], parameter[name[files]]]
if <ast.UnaryOp object at 0x7da1b18bb460> begin[:]
call[name[LOG].debug, parameter[constant[Found %s, beginning execution.], name[sorted_files]]]
for taget[name[command_file]] in starred[name[sorted_files]] begin[:]
with call[name[open], parameter[name[command_file], constant[r]]] begin[:]
variable[cmd] assign[=] call[name[command].read, parameter[]]
call[name[LOG].debug, parameter[constant[executing command: %s], name[cmd]]]
variable[resp] assign[=] call[name[run], parameter[list[[<ast.Name object at 0x7da18bcc8af0>, <ast.Constant object at 0x7da18bccad10>, <ast.Constant object at 0x7da18bccae60>]]]]
call[name[LOG].debug, parameter[name[resp]]]
call[name[LOG].debug, parameter[constant[moving %s to archive], name[command].name]]
variable[move_command] assign[=] call[constant[mv {0} ./replay/archive/].format, parameter[name[command].name]]
call[name[run], parameter[name[move_command]]]
call[name[LOG].info, parameter[constant[LaunchDarkly is now up to date.]]] | keyword[def] identifier[execute_replay] ()-> keyword[None] :
literal[string]
identifier[files] = identifier[glob] . identifier[glob] ( literal[string] )
identifier[sorted_files] = identifier[sorted] ( identifier[files] , identifier[key] = identifier[os] . identifier[path] . identifier[getctime] )
keyword[if] keyword[not] identifier[sorted_files] :
identifier[LOG] . identifier[debug] ( literal[string] , identifier[sorted_files] )
keyword[for] identifier[command_file] keyword[in] identifier[sorted_files] :
keyword[with] identifier[open] ( identifier[command_file] , literal[string] ) keyword[as] identifier[command] :
identifier[cmd] = identifier[command] . identifier[read] ()
identifier[LOG] . identifier[debug] ( literal[string] , identifier[cmd] )
identifier[resp] = identifier[run] ([ identifier[cmd] , literal[string] , literal[string] ], identifier[shell] = keyword[True] , identifier[check] = keyword[True] )
identifier[LOG] . identifier[debug] ( identifier[resp] )
identifier[LOG] . identifier[debug] ( literal[string] , identifier[command] . identifier[name] )
identifier[move_command] = literal[string] . identifier[format] ( identifier[command] . identifier[name] )
identifier[run] ( identifier[move_command] , identifier[shell] = keyword[True] , identifier[check] = keyword[True] )
identifier[LOG] . identifier[info] ( literal[string] )
keyword[else] :
identifier[LOG] . identifier[warning] ( literal[string] ) | def execute_replay() -> None:
"""
Execute all commands.
For every command that is found in replay/toDo, execute each of them
and move the file to the replay/archive directory.
"""
files = glob.glob('./replay/toDo/*')
sorted_files = sorted(files, key=os.path.getctime)
if not sorted_files: # list is not empty
LOG.debug('Found %s, beginning execution.', sorted_files)
for command_file in sorted_files:
with open(command_file, 'r') as command:
cmd = command.read()
LOG.debug('executing command: %s', cmd)
resp = run([cmd, '-v', 'DEBUG'], shell=True, check=True)
LOG.debug(resp)
LOG.debug('moving %s to archive', command.name)
move_command = 'mv {0} ./replay/archive/'.format(command.name)
run(move_command, shell=True, check=True) # depends on [control=['with'], data=['command']] # depends on [control=['for'], data=['command_file']]
LOG.info('LaunchDarkly is now up to date.') # depends on [control=['if'], data=[]]
else:
LOG.warning('No files found, nothing to replay.') |
def remove(self, name):
"""Remove an enum member by name"""
member = self[name]
serial = member.serial
value = member.value
bmask = member.bmask
success = idaapi.del_enum_member(self._eid, value, serial, bmask)
if not success:
raise exceptions.CantDeleteEnumMember("Can't delete enum member {!r}.".format(name)) | def function[remove, parameter[self, name]]:
constant[Remove an enum member by name]
variable[member] assign[=] call[name[self]][name[name]]
variable[serial] assign[=] name[member].serial
variable[value] assign[=] name[member].value
variable[bmask] assign[=] name[member].bmask
variable[success] assign[=] call[name[idaapi].del_enum_member, parameter[name[self]._eid, name[value], name[serial], name[bmask]]]
if <ast.UnaryOp object at 0x7da1b12f1ff0> begin[:]
<ast.Raise object at 0x7da1b12f34c0> | keyword[def] identifier[remove] ( identifier[self] , identifier[name] ):
literal[string]
identifier[member] = identifier[self] [ identifier[name] ]
identifier[serial] = identifier[member] . identifier[serial]
identifier[value] = identifier[member] . identifier[value]
identifier[bmask] = identifier[member] . identifier[bmask]
identifier[success] = identifier[idaapi] . identifier[del_enum_member] ( identifier[self] . identifier[_eid] , identifier[value] , identifier[serial] , identifier[bmask] )
keyword[if] keyword[not] identifier[success] :
keyword[raise] identifier[exceptions] . identifier[CantDeleteEnumMember] ( literal[string] . identifier[format] ( identifier[name] )) | def remove(self, name):
"""Remove an enum member by name"""
member = self[name]
serial = member.serial
value = member.value
bmask = member.bmask
success = idaapi.del_enum_member(self._eid, value, serial, bmask)
if not success:
raise exceptions.CantDeleteEnumMember("Can't delete enum member {!r}.".format(name)) # depends on [control=['if'], data=[]] |
def cut(list_, index=0):
"""Cut a list by index or arg"""
if isinstance(index, int):
cut_ = lambda x: x[index]
else:
cut_ = lambda x: getattr(x, index)
return list(map(cut_, list_)) | def function[cut, parameter[list_, index]]:
constant[Cut a list by index or arg]
if call[name[isinstance], parameter[name[index], name[int]]] begin[:]
variable[cut_] assign[=] <ast.Lambda object at 0x7da20c993a90>
return[call[name[list], parameter[call[name[map], parameter[name[cut_], name[list_]]]]]] | keyword[def] identifier[cut] ( identifier[list_] , identifier[index] = literal[int] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[index] , identifier[int] ):
identifier[cut_] = keyword[lambda] identifier[x] : identifier[x] [ identifier[index] ]
keyword[else] :
identifier[cut_] = keyword[lambda] identifier[x] : identifier[getattr] ( identifier[x] , identifier[index] )
keyword[return] identifier[list] ( identifier[map] ( identifier[cut_] , identifier[list_] )) | def cut(list_, index=0):
"""Cut a list by index or arg"""
if isinstance(index, int):
cut_ = lambda x: x[index] # depends on [control=['if'], data=[]]
else:
cut_ = lambda x: getattr(x, index)
return list(map(cut_, list_)) |
def NewWalker(self, reader):
"""Setup an xmltextReader to parse a preparsed XML document.
This reuses the existing @reader xmlTextReader. """
if reader is None: reader__o = None
else: reader__o = reader._o
ret = libxml2mod.xmlReaderNewWalker(reader__o, self._o)
return ret | def function[NewWalker, parameter[self, reader]]:
constant[Setup an xmltextReader to parse a preparsed XML document.
This reuses the existing @reader xmlTextReader. ]
if compare[name[reader] is constant[None]] begin[:]
variable[reader__o] assign[=] constant[None]
variable[ret] assign[=] call[name[libxml2mod].xmlReaderNewWalker, parameter[name[reader__o], name[self]._o]]
return[name[ret]] | keyword[def] identifier[NewWalker] ( identifier[self] , identifier[reader] ):
literal[string]
keyword[if] identifier[reader] keyword[is] keyword[None] : identifier[reader__o] = keyword[None]
keyword[else] : identifier[reader__o] = identifier[reader] . identifier[_o]
identifier[ret] = identifier[libxml2mod] . identifier[xmlReaderNewWalker] ( identifier[reader__o] , identifier[self] . identifier[_o] )
keyword[return] identifier[ret] | def NewWalker(self, reader):
"""Setup an xmltextReader to parse a preparsed XML document.
This reuses the existing @reader xmlTextReader. """
if reader is None:
reader__o = None # depends on [control=['if'], data=[]]
else:
reader__o = reader._o
ret = libxml2mod.xmlReaderNewWalker(reader__o, self._o)
return ret |
def clear(self):
"""Clear task output: remove value ``budget_inflation_adjusted`` from all :class:`Movie` objects.
"""
self.mark_incomplete()
session = client.get_client().create_session()
movies = session.query(models.Movie)
movies.update({'budget_inflation_adjusted': None})
session.commit()
session.close() | def function[clear, parameter[self]]:
constant[Clear task output: remove value ``budget_inflation_adjusted`` from all :class:`Movie` objects.
]
call[name[self].mark_incomplete, parameter[]]
variable[session] assign[=] call[call[name[client].get_client, parameter[]].create_session, parameter[]]
variable[movies] assign[=] call[name[session].query, parameter[name[models].Movie]]
call[name[movies].update, parameter[dictionary[[<ast.Constant object at 0x7da1b24e37f0>], [<ast.Constant object at 0x7da1b24e0a90>]]]]
call[name[session].commit, parameter[]]
call[name[session].close, parameter[]] | keyword[def] identifier[clear] ( identifier[self] ):
literal[string]
identifier[self] . identifier[mark_incomplete] ()
identifier[session] = identifier[client] . identifier[get_client] (). identifier[create_session] ()
identifier[movies] = identifier[session] . identifier[query] ( identifier[models] . identifier[Movie] )
identifier[movies] . identifier[update] ({ literal[string] : keyword[None] })
identifier[session] . identifier[commit] ()
identifier[session] . identifier[close] () | def clear(self):
"""Clear task output: remove value ``budget_inflation_adjusted`` from all :class:`Movie` objects.
"""
self.mark_incomplete()
session = client.get_client().create_session()
movies = session.query(models.Movie)
movies.update({'budget_inflation_adjusted': None})
session.commit()
session.close() |
def cmd_command_int(self, args):
'''execute supplied command_int'''
if len(args) != 11:
print("num args{0}".format(len(args)))
print("Usage: command_int frame command current autocontinue param1 param2 param3 param4 x y z")
print("e.g. command_int GLOBAL_RELATIVE_ALT DO_SET_HOME 0 0 0 0 0 0 -353632120 1491659330 0")
print("e.g. command_int GLOBAL MAV_CMD_DO_SET_ROI 0 0 0 0 0 0 5000000 5000000 500")
return
if args[0].isdigit():
frame = int(args[0])
else:
try:
# attempt to allow MAV_FRAME_GLOBAL for frame
frame = eval("mavutil.mavlink." + args[0])
except AttributeError as e:
try:
# attempt to allow GLOBAL for frame
frame = eval("mavutil.mavlink.MAV_FRAME_" + args[0])
except AttributeError as e:
pass
if frame is None:
print("Unknown frame ({0})".format(args[0]))
return
command = None
if args[1].isdigit():
command = int(args[1])
else:
# let "command_int ... MAV_CMD_DO_SET_HOME ..." work
try:
command = eval("mavutil.mavlink." + args[1])
except AttributeError as e:
try:
# let "command_int ... DO_SET_HOME" work
command = eval("mavutil.mavlink.MAV_CMD_" + args[1])
except AttributeError as e:
pass
current = int(args[2])
autocontinue = int(args[3])
param1 = float(args[4])
param2 = float(args[5])
param3 = float(args[6])
param4 = float(args[7])
x = int(args[8])
y = int(args[9])
z = float(args[10])
self.master.mav.command_int_send(self.settings.target_system,
self.settings.target_component,
frame,
command,
0,
0,
param1,
param2,
param3,
param4,
x,
y,
z) | def function[cmd_command_int, parameter[self, args]]:
constant[execute supplied command_int]
if compare[call[name[len], parameter[name[args]]] not_equal[!=] constant[11]] begin[:]
call[name[print], parameter[call[constant[num args{0}].format, parameter[call[name[len], parameter[name[args]]]]]]]
call[name[print], parameter[constant[Usage: command_int frame command current autocontinue param1 param2 param3 param4 x y z]]]
call[name[print], parameter[constant[e.g. command_int GLOBAL_RELATIVE_ALT DO_SET_HOME 0 0 0 0 0 0 -353632120 1491659330 0]]]
call[name[print], parameter[constant[e.g. command_int GLOBAL MAV_CMD_DO_SET_ROI 0 0 0 0 0 0 5000000 5000000 500]]]
return[None]
if call[call[name[args]][constant[0]].isdigit, parameter[]] begin[:]
variable[frame] assign[=] call[name[int], parameter[call[name[args]][constant[0]]]]
if compare[name[frame] is constant[None]] begin[:]
call[name[print], parameter[call[constant[Unknown frame ({0})].format, parameter[call[name[args]][constant[0]]]]]]
return[None]
variable[command] assign[=] constant[None]
if call[call[name[args]][constant[1]].isdigit, parameter[]] begin[:]
variable[command] assign[=] call[name[int], parameter[call[name[args]][constant[1]]]]
variable[current] assign[=] call[name[int], parameter[call[name[args]][constant[2]]]]
variable[autocontinue] assign[=] call[name[int], parameter[call[name[args]][constant[3]]]]
variable[param1] assign[=] call[name[float], parameter[call[name[args]][constant[4]]]]
variable[param2] assign[=] call[name[float], parameter[call[name[args]][constant[5]]]]
variable[param3] assign[=] call[name[float], parameter[call[name[args]][constant[6]]]]
variable[param4] assign[=] call[name[float], parameter[call[name[args]][constant[7]]]]
variable[x] assign[=] call[name[int], parameter[call[name[args]][constant[8]]]]
variable[y] assign[=] call[name[int], parameter[call[name[args]][constant[9]]]]
variable[z] assign[=] call[name[float], parameter[call[name[args]][constant[10]]]]
call[name[self].master.mav.command_int_send, parameter[name[self].settings.target_system, name[self].settings.target_component, name[frame], name[command], constant[0], constant[0], name[param1], name[param2], name[param3], name[param4], name[x], name[y], name[z]]] | keyword[def] identifier[cmd_command_int] ( identifier[self] , identifier[args] ):
literal[string]
keyword[if] identifier[len] ( identifier[args] )!= literal[int] :
identifier[print] ( literal[string] . identifier[format] ( identifier[len] ( identifier[args] )))
identifier[print] ( literal[string] )
identifier[print] ( literal[string] )
identifier[print] ( literal[string] )
keyword[return]
keyword[if] identifier[args] [ literal[int] ]. identifier[isdigit] ():
identifier[frame] = identifier[int] ( identifier[args] [ literal[int] ])
keyword[else] :
keyword[try] :
identifier[frame] = identifier[eval] ( literal[string] + identifier[args] [ literal[int] ])
keyword[except] identifier[AttributeError] keyword[as] identifier[e] :
keyword[try] :
identifier[frame] = identifier[eval] ( literal[string] + identifier[args] [ literal[int] ])
keyword[except] identifier[AttributeError] keyword[as] identifier[e] :
keyword[pass]
keyword[if] identifier[frame] keyword[is] keyword[None] :
identifier[print] ( literal[string] . identifier[format] ( identifier[args] [ literal[int] ]))
keyword[return]
identifier[command] = keyword[None]
keyword[if] identifier[args] [ literal[int] ]. identifier[isdigit] ():
identifier[command] = identifier[int] ( identifier[args] [ literal[int] ])
keyword[else] :
keyword[try] :
identifier[command] = identifier[eval] ( literal[string] + identifier[args] [ literal[int] ])
keyword[except] identifier[AttributeError] keyword[as] identifier[e] :
keyword[try] :
identifier[command] = identifier[eval] ( literal[string] + identifier[args] [ literal[int] ])
keyword[except] identifier[AttributeError] keyword[as] identifier[e] :
keyword[pass]
identifier[current] = identifier[int] ( identifier[args] [ literal[int] ])
identifier[autocontinue] = identifier[int] ( identifier[args] [ literal[int] ])
identifier[param1] = identifier[float] ( identifier[args] [ literal[int] ])
identifier[param2] = identifier[float] ( identifier[args] [ literal[int] ])
identifier[param3] = identifier[float] ( identifier[args] [ literal[int] ])
identifier[param4] = identifier[float] ( identifier[args] [ literal[int] ])
identifier[x] = identifier[int] ( identifier[args] [ literal[int] ])
identifier[y] = identifier[int] ( identifier[args] [ literal[int] ])
identifier[z] = identifier[float] ( identifier[args] [ literal[int] ])
identifier[self] . identifier[master] . identifier[mav] . identifier[command_int_send] ( identifier[self] . identifier[settings] . identifier[target_system] ,
identifier[self] . identifier[settings] . identifier[target_component] ,
identifier[frame] ,
identifier[command] ,
literal[int] ,
literal[int] ,
identifier[param1] ,
identifier[param2] ,
identifier[param3] ,
identifier[param4] ,
identifier[x] ,
identifier[y] ,
identifier[z] ) | def cmd_command_int(self, args):
"""execute supplied command_int"""
if len(args) != 11:
print('num args{0}'.format(len(args)))
print('Usage: command_int frame command current autocontinue param1 param2 param3 param4 x y z')
print('e.g. command_int GLOBAL_RELATIVE_ALT DO_SET_HOME 0 0 0 0 0 0 -353632120 1491659330 0')
print('e.g. command_int GLOBAL MAV_CMD_DO_SET_ROI 0 0 0 0 0 0 5000000 5000000 500')
return # depends on [control=['if'], data=[]]
if args[0].isdigit():
frame = int(args[0]) # depends on [control=['if'], data=[]]
else:
try:
# attempt to allow MAV_FRAME_GLOBAL for frame
frame = eval('mavutil.mavlink.' + args[0]) # depends on [control=['try'], data=[]]
except AttributeError as e:
try:
# attempt to allow GLOBAL for frame
frame = eval('mavutil.mavlink.MAV_FRAME_' + args[0]) # depends on [control=['try'], data=[]]
except AttributeError as e:
pass # depends on [control=['except'], data=[]] # depends on [control=['except'], data=[]]
if frame is None:
print('Unknown frame ({0})'.format(args[0]))
return # depends on [control=['if'], data=[]]
command = None
if args[1].isdigit():
command = int(args[1]) # depends on [control=['if'], data=[]]
else:
# let "command_int ... MAV_CMD_DO_SET_HOME ..." work
try:
command = eval('mavutil.mavlink.' + args[1]) # depends on [control=['try'], data=[]]
except AttributeError as e:
try:
# let "command_int ... DO_SET_HOME" work
command = eval('mavutil.mavlink.MAV_CMD_' + args[1]) # depends on [control=['try'], data=[]]
except AttributeError as e:
pass # depends on [control=['except'], data=[]] # depends on [control=['except'], data=[]]
current = int(args[2])
autocontinue = int(args[3])
param1 = float(args[4])
param2 = float(args[5])
param3 = float(args[6])
param4 = float(args[7])
x = int(args[8])
y = int(args[9])
z = float(args[10])
self.master.mav.command_int_send(self.settings.target_system, self.settings.target_component, frame, command, 0, 0, param1, param2, param3, param4, x, y, z) |
def parse_elem(element):
"""Parse a OSM node XML element.
Args:
element (etree.Element): XML Element to parse
Returns:
Node: Object representing parsed element
"""
ident = int(element.get('id'))
latitude = element.get('lat')
longitude = element.get('lon')
flags = _parse_flags(element)
return Node(ident, latitude, longitude, *flags) | def function[parse_elem, parameter[element]]:
constant[Parse a OSM node XML element.
Args:
element (etree.Element): XML Element to parse
Returns:
Node: Object representing parsed element
]
variable[ident] assign[=] call[name[int], parameter[call[name[element].get, parameter[constant[id]]]]]
variable[latitude] assign[=] call[name[element].get, parameter[constant[lat]]]
variable[longitude] assign[=] call[name[element].get, parameter[constant[lon]]]
variable[flags] assign[=] call[name[_parse_flags], parameter[name[element]]]
return[call[name[Node], parameter[name[ident], name[latitude], name[longitude], <ast.Starred object at 0x7da20c796200>]]] | keyword[def] identifier[parse_elem] ( identifier[element] ):
literal[string]
identifier[ident] = identifier[int] ( identifier[element] . identifier[get] ( literal[string] ))
identifier[latitude] = identifier[element] . identifier[get] ( literal[string] )
identifier[longitude] = identifier[element] . identifier[get] ( literal[string] )
identifier[flags] = identifier[_parse_flags] ( identifier[element] )
keyword[return] identifier[Node] ( identifier[ident] , identifier[latitude] , identifier[longitude] ,* identifier[flags] ) | def parse_elem(element):
"""Parse a OSM node XML element.
Args:
element (etree.Element): XML Element to parse
Returns:
Node: Object representing parsed element
"""
ident = int(element.get('id'))
latitude = element.get('lat')
longitude = element.get('lon')
flags = _parse_flags(element)
return Node(ident, latitude, longitude, *flags) |
def create_network(self):
"""Get an instance of vlan services facade."""
return Network(
self.networkapi_url,
self.user,
self.password,
self.user_ldap) | def function[create_network, parameter[self]]:
constant[Get an instance of vlan services facade.]
return[call[name[Network], parameter[name[self].networkapi_url, name[self].user, name[self].password, name[self].user_ldap]]] | keyword[def] identifier[create_network] ( identifier[self] ):
literal[string]
keyword[return] identifier[Network] (
identifier[self] . identifier[networkapi_url] ,
identifier[self] . identifier[user] ,
identifier[self] . identifier[password] ,
identifier[self] . identifier[user_ldap] ) | def create_network(self):
"""Get an instance of vlan services facade."""
return Network(self.networkapi_url, self.user, self.password, self.user_ldap) |
def _get_systemd_services(root):
'''
Use os.listdir() to get all the unit files
'''
ret = set()
for path in SYSTEM_CONFIG_PATHS + (LOCAL_CONFIG_PATH,):
# Make sure user has access to the path, and if the path is a
# link it's likely that another entry in SYSTEM_CONFIG_PATHS
# or LOCAL_CONFIG_PATH points to it, so we can ignore it.
path = _root(path, root)
if os.access(path, os.R_OK) and not os.path.islink(path):
for fullname in os.listdir(path):
try:
unit_name, unit_type = fullname.rsplit('.', 1)
except ValueError:
continue
if unit_type in VALID_UNIT_TYPES:
ret.add(unit_name if unit_type == 'service' else fullname)
return ret | def function[_get_systemd_services, parameter[root]]:
constant[
Use os.listdir() to get all the unit files
]
variable[ret] assign[=] call[name[set], parameter[]]
for taget[name[path]] in starred[binary_operation[name[SYSTEM_CONFIG_PATHS] + tuple[[<ast.Name object at 0x7da20c76dd20>]]]] begin[:]
variable[path] assign[=] call[name[_root], parameter[name[path], name[root]]]
if <ast.BoolOp object at 0x7da20c76dde0> begin[:]
for taget[name[fullname]] in starred[call[name[os].listdir, parameter[name[path]]]] begin[:]
<ast.Try object at 0x7da2043451e0>
if compare[name[unit_type] in name[VALID_UNIT_TYPES]] begin[:]
call[name[ret].add, parameter[<ast.IfExp object at 0x7da2043459f0>]]
return[name[ret]] | keyword[def] identifier[_get_systemd_services] ( identifier[root] ):
literal[string]
identifier[ret] = identifier[set] ()
keyword[for] identifier[path] keyword[in] identifier[SYSTEM_CONFIG_PATHS] +( identifier[LOCAL_CONFIG_PATH] ,):
identifier[path] = identifier[_root] ( identifier[path] , identifier[root] )
keyword[if] identifier[os] . identifier[access] ( identifier[path] , identifier[os] . identifier[R_OK] ) keyword[and] keyword[not] identifier[os] . identifier[path] . identifier[islink] ( identifier[path] ):
keyword[for] identifier[fullname] keyword[in] identifier[os] . identifier[listdir] ( identifier[path] ):
keyword[try] :
identifier[unit_name] , identifier[unit_type] = identifier[fullname] . identifier[rsplit] ( literal[string] , literal[int] )
keyword[except] identifier[ValueError] :
keyword[continue]
keyword[if] identifier[unit_type] keyword[in] identifier[VALID_UNIT_TYPES] :
identifier[ret] . identifier[add] ( identifier[unit_name] keyword[if] identifier[unit_type] == literal[string] keyword[else] identifier[fullname] )
keyword[return] identifier[ret] | def _get_systemd_services(root):
"""
Use os.listdir() to get all the unit files
"""
ret = set()
for path in SYSTEM_CONFIG_PATHS + (LOCAL_CONFIG_PATH,):
# Make sure user has access to the path, and if the path is a
# link it's likely that another entry in SYSTEM_CONFIG_PATHS
# or LOCAL_CONFIG_PATH points to it, so we can ignore it.
path = _root(path, root)
if os.access(path, os.R_OK) and (not os.path.islink(path)):
for fullname in os.listdir(path):
try:
(unit_name, unit_type) = fullname.rsplit('.', 1) # depends on [control=['try'], data=[]]
except ValueError:
continue # depends on [control=['except'], data=[]]
if unit_type in VALID_UNIT_TYPES:
ret.add(unit_name if unit_type == 'service' else fullname) # depends on [control=['if'], data=['unit_type']] # depends on [control=['for'], data=['fullname']] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['path']]
return ret |
def get_jwt_claims(self, auth_token):
"""Decodes the auth_token into JWT claims represented as a JSON object.
This method first tries to look up the cache and returns the result
immediately in case of a cache hit. When cache misses, the method tries to
decode the given auth token, verify its signature, and check the existence
of required JWT claims. When successful, the decoded JWT claims are loaded
into the cache and then returned.
Args:
auth_token: the auth token to be decoded.
Returns:
The decoded JWT claims.
Raises:
UnauthenticatedException: When the signature verification fails, or when
required claims are missing.
"""
def _decode_and_verify():
jwt_claims = jwt.JWT().unpack(auth_token).payload()
_verify_required_claims_exist(jwt_claims)
issuer = jwt_claims[u"iss"]
keys = self._jwks_supplier.supply(issuer)
try:
return jws.JWS().verify_compact(auth_token, keys)
except (jwkest.BadSignature, jws.NoSuitableSigningKeys,
jws.SignerAlgError) as exception:
raise suppliers.UnauthenticatedException(u"Signature verification failed",
exception)
return self._cache.get_or_create(auth_token, _decode_and_verify) | def function[get_jwt_claims, parameter[self, auth_token]]:
constant[Decodes the auth_token into JWT claims represented as a JSON object.
This method first tries to look up the cache and returns the result
immediately in case of a cache hit. When cache misses, the method tries to
decode the given auth token, verify its signature, and check the existence
of required JWT claims. When successful, the decoded JWT claims are loaded
into the cache and then returned.
Args:
auth_token: the auth token to be decoded.
Returns:
The decoded JWT claims.
Raises:
UnauthenticatedException: When the signature verification fails, or when
required claims are missing.
]
def function[_decode_and_verify, parameter[]]:
variable[jwt_claims] assign[=] call[call[call[name[jwt].JWT, parameter[]].unpack, parameter[name[auth_token]]].payload, parameter[]]
call[name[_verify_required_claims_exist], parameter[name[jwt_claims]]]
variable[issuer] assign[=] call[name[jwt_claims]][constant[iss]]
variable[keys] assign[=] call[name[self]._jwks_supplier.supply, parameter[name[issuer]]]
<ast.Try object at 0x7da1b04730d0>
return[call[name[self]._cache.get_or_create, parameter[name[auth_token], name[_decode_and_verify]]]] | keyword[def] identifier[get_jwt_claims] ( identifier[self] , identifier[auth_token] ):
literal[string]
keyword[def] identifier[_decode_and_verify] ():
identifier[jwt_claims] = identifier[jwt] . identifier[JWT] (). identifier[unpack] ( identifier[auth_token] ). identifier[payload] ()
identifier[_verify_required_claims_exist] ( identifier[jwt_claims] )
identifier[issuer] = identifier[jwt_claims] [ literal[string] ]
identifier[keys] = identifier[self] . identifier[_jwks_supplier] . identifier[supply] ( identifier[issuer] )
keyword[try] :
keyword[return] identifier[jws] . identifier[JWS] (). identifier[verify_compact] ( identifier[auth_token] , identifier[keys] )
keyword[except] ( identifier[jwkest] . identifier[BadSignature] , identifier[jws] . identifier[NoSuitableSigningKeys] ,
identifier[jws] . identifier[SignerAlgError] ) keyword[as] identifier[exception] :
keyword[raise] identifier[suppliers] . identifier[UnauthenticatedException] ( literal[string] ,
identifier[exception] )
keyword[return] identifier[self] . identifier[_cache] . identifier[get_or_create] ( identifier[auth_token] , identifier[_decode_and_verify] ) | def get_jwt_claims(self, auth_token):
"""Decodes the auth_token into JWT claims represented as a JSON object.
This method first tries to look up the cache and returns the result
immediately in case of a cache hit. When cache misses, the method tries to
decode the given auth token, verify its signature, and check the existence
of required JWT claims. When successful, the decoded JWT claims are loaded
into the cache and then returned.
Args:
auth_token: the auth token to be decoded.
Returns:
The decoded JWT claims.
Raises:
UnauthenticatedException: When the signature verification fails, or when
required claims are missing.
"""
def _decode_and_verify():
jwt_claims = jwt.JWT().unpack(auth_token).payload()
_verify_required_claims_exist(jwt_claims)
issuer = jwt_claims[u'iss']
keys = self._jwks_supplier.supply(issuer)
try:
return jws.JWS().verify_compact(auth_token, keys) # depends on [control=['try'], data=[]]
except (jwkest.BadSignature, jws.NoSuitableSigningKeys, jws.SignerAlgError) as exception:
raise suppliers.UnauthenticatedException(u'Signature verification failed', exception) # depends on [control=['except'], data=['exception']]
return self._cache.get_or_create(auth_token, _decode_and_verify) |
def p_export(self, p):
'''export : IDENTIFIER LPAREN opt_param_types RPAREN
| IDENTIFIER
| EXPORT LPAREN opt_param_types RPAREN
| ORDER LPAREN opt_param_types RPAREN'''
# unlikely case: the IDENTIFIER is an otherwise reserved name
if len(p) > 2:
sigs = p[3] or ((),)
else:
sigs = ()
p[0] = p[1], sigs | def function[p_export, parameter[self, p]]:
constant[export : IDENTIFIER LPAREN opt_param_types RPAREN
| IDENTIFIER
| EXPORT LPAREN opt_param_types RPAREN
| ORDER LPAREN opt_param_types RPAREN]
if compare[call[name[len], parameter[name[p]]] greater[>] constant[2]] begin[:]
variable[sigs] assign[=] <ast.BoolOp object at 0x7da2054a7be0>
call[name[p]][constant[0]] assign[=] tuple[[<ast.Subscript object at 0x7da2054a4790>, <ast.Name object at 0x7da2054a4100>]] | keyword[def] identifier[p_export] ( identifier[self] , identifier[p] ):
literal[string]
keyword[if] identifier[len] ( identifier[p] )> literal[int] :
identifier[sigs] = identifier[p] [ literal[int] ] keyword[or] ((),)
keyword[else] :
identifier[sigs] =()
identifier[p] [ literal[int] ]= identifier[p] [ literal[int] ], identifier[sigs] | def p_export(self, p):
"""export : IDENTIFIER LPAREN opt_param_types RPAREN
| IDENTIFIER
| EXPORT LPAREN opt_param_types RPAREN
| ORDER LPAREN opt_param_types RPAREN"""
# unlikely case: the IDENTIFIER is an otherwise reserved name
if len(p) > 2:
sigs = p[3] or ((),) # depends on [control=['if'], data=[]]
else:
sigs = ()
p[0] = (p[1], sigs) |
def from_headers(self, headers):
"""Generate a SpanContext object using the trace context header.
:type headers: dict
:param headers: HTTP request headers.
:rtype: :class:`~opencensus.trace.span_context.SpanContext`
:returns: SpanContext generated from the trace context header.
"""
if headers is None:
return SpanContext()
header = headers.get(_TRACE_CONTEXT_HEADER_NAME)
if header is None:
return SpanContext()
header = str(header.encode('utf-8'))
return self.from_header(header) | def function[from_headers, parameter[self, headers]]:
constant[Generate a SpanContext object using the trace context header.
:type headers: dict
:param headers: HTTP request headers.
:rtype: :class:`~opencensus.trace.span_context.SpanContext`
:returns: SpanContext generated from the trace context header.
]
if compare[name[headers] is constant[None]] begin[:]
return[call[name[SpanContext], parameter[]]]
variable[header] assign[=] call[name[headers].get, parameter[name[_TRACE_CONTEXT_HEADER_NAME]]]
if compare[name[header] is constant[None]] begin[:]
return[call[name[SpanContext], parameter[]]]
variable[header] assign[=] call[name[str], parameter[call[name[header].encode, parameter[constant[utf-8]]]]]
return[call[name[self].from_header, parameter[name[header]]]] | keyword[def] identifier[from_headers] ( identifier[self] , identifier[headers] ):
literal[string]
keyword[if] identifier[headers] keyword[is] keyword[None] :
keyword[return] identifier[SpanContext] ()
identifier[header] = identifier[headers] . identifier[get] ( identifier[_TRACE_CONTEXT_HEADER_NAME] )
keyword[if] identifier[header] keyword[is] keyword[None] :
keyword[return] identifier[SpanContext] ()
identifier[header] = identifier[str] ( identifier[header] . identifier[encode] ( literal[string] ))
keyword[return] identifier[self] . identifier[from_header] ( identifier[header] ) | def from_headers(self, headers):
"""Generate a SpanContext object using the trace context header.
:type headers: dict
:param headers: HTTP request headers.
:rtype: :class:`~opencensus.trace.span_context.SpanContext`
:returns: SpanContext generated from the trace context header.
"""
if headers is None:
return SpanContext() # depends on [control=['if'], data=[]]
header = headers.get(_TRACE_CONTEXT_HEADER_NAME)
if header is None:
return SpanContext() # depends on [control=['if'], data=[]]
header = str(header.encode('utf-8'))
return self.from_header(header) |
def close_files(subseqs):
"""Close file statements."""
print(' . close_files')
lines = Lines()
lines.add(1, 'cpdef inline close_files(self):')
for seq in subseqs:
lines.add(2, 'if self._%s_diskflag:' % seq.name)
lines.add(3, 'fclose(self._%s_file)' % seq.name)
return lines | def function[close_files, parameter[subseqs]]:
constant[Close file statements.]
call[name[print], parameter[constant[ . close_files]]]
variable[lines] assign[=] call[name[Lines], parameter[]]
call[name[lines].add, parameter[constant[1], constant[cpdef inline close_files(self):]]]
for taget[name[seq]] in starred[name[subseqs]] begin[:]
call[name[lines].add, parameter[constant[2], binary_operation[constant[if self._%s_diskflag:] <ast.Mod object at 0x7da2590d6920> name[seq].name]]]
call[name[lines].add, parameter[constant[3], binary_operation[constant[fclose(self._%s_file)] <ast.Mod object at 0x7da2590d6920> name[seq].name]]]
return[name[lines]] | keyword[def] identifier[close_files] ( identifier[subseqs] ):
literal[string]
identifier[print] ( literal[string] )
identifier[lines] = identifier[Lines] ()
identifier[lines] . identifier[add] ( literal[int] , literal[string] )
keyword[for] identifier[seq] keyword[in] identifier[subseqs] :
identifier[lines] . identifier[add] ( literal[int] , literal[string] % identifier[seq] . identifier[name] )
identifier[lines] . identifier[add] ( literal[int] , literal[string] % identifier[seq] . identifier[name] )
keyword[return] identifier[lines] | def close_files(subseqs):
"""Close file statements."""
print(' . close_files')
lines = Lines()
lines.add(1, 'cpdef inline close_files(self):')
for seq in subseqs:
lines.add(2, 'if self._%s_diskflag:' % seq.name)
lines.add(3, 'fclose(self._%s_file)' % seq.name) # depends on [control=['for'], data=['seq']]
return lines |
def save_grid_data(self):
"""
Save grid data in the data object
"""
if not self.grid.changes:
print('-I- No changes to save')
return
if self.grid_type == 'age':
age_data_type = self.er_magic.age_type
self.er_magic.write_ages = True
starred_cols = self.grid.remove_starred_labels()
self.grid.SaveEditControlValue() # locks in value in cell currently edited
if self.grid.changes:
num_cols = self.grid.GetNumberCols()
for change in self.grid.changes:
if change == -1:
continue
else:
old_item = self.grid.row_items[change]
new_item_name = self.grid.GetCellValue(change, 0)
new_er_data = {}
new_pmag_data = {}
er_header = self.grid_headers[self.grid_type]['er'][0]
pmag_header = self.grid_headers[self.grid_type]['pmag'][0]
start_num = 2 if self.parent_type else 1
result_data = {}
for col in range(start_num, num_cols):
col_label = str(self.grid.GetColLabelValue(col))
value = str(self.grid.GetCellValue(change, col))
#new_data[col_label] = value
if value == '\t':
value = ''
if '++' in col_label:
col_name = col_label[:-2]
new_pmag_data[col_name] = value
continue
# pmag_* files are new interpretations, so should only have "This study"
# er_* files can have multiple citations
if col_label == 'er_citation_names':
new_pmag_data[col_label] = 'This study'
new_er_data[col_label] = value
continue
if er_header and (col_label in er_header):
new_er_data[col_label] = value
if self.grid_type in ('specimen', 'sample', 'site'):
if pmag_header and (col_label in pmag_header) and (col_label not in self.er_magic.double):
new_pmag_data[col_label] = value
else:
if pmag_header and (col_label in pmag_header):
new_pmag_data[col_label] = value
if col_label in ('er_specimen_names', 'er_sample_names',
'er_site_names', 'er_location_names'):
result_data[col_label] = value
# if there is an item in the data, get its name
if isinstance(old_item, str):
old_item_name = None
else:
old_item_name = self.grid.row_items[change].name
if self.parent_type:
new_parent_name = self.grid.GetCellValue(change, 1)
else:
new_parent_name = ''
# create a new item
if new_item_name and not old_item_name:
print('-I- make new item named', new_item_name)
if self.grid_type == 'result':
specs, samps, sites, locs = self.get_result_children(result_data)
item = self.er_magic.add_result(new_item_name, specs, samps, sites,
locs, new_pmag_data)
else:
item = self.er_magic.add_methods[self.grid_type](new_item_name, new_parent_name,
new_er_data, new_pmag_data)
# update an existing item
elif new_item_name and old_item_name:
print('-I- update existing {} formerly named {} to {}'.format(self.grid_type,
old_item_name,
new_item_name))
if self.grid_type == 'result':
specs, samps, sites, locs = self.get_result_children(result_data)
item = self.er_magic.update_methods['result'](old_item_name, new_item_name,
new_er_data=None,
new_pmag_data=new_pmag_data,
spec_names=specs,
samp_names=samps,
site_names=sites,
loc_names=locs,
replace_data=True)
elif self.grid_type == 'age':
item_type = age_data_type
item = self.er_magic.update_methods['age'](old_item_name, new_er_data,
item_type, replace_data=True)
else:
item = self.er_magic.update_methods[self.grid_type](old_item_name, new_item_name,
new_parent_name, new_er_data,
new_pmag_data, replace_data=True) | def function[save_grid_data, parameter[self]]:
constant[
Save grid data in the data object
]
if <ast.UnaryOp object at 0x7da1b26afb20> begin[:]
call[name[print], parameter[constant[-I- No changes to save]]]
return[None]
if compare[name[self].grid_type equal[==] constant[age]] begin[:]
variable[age_data_type] assign[=] name[self].er_magic.age_type
name[self].er_magic.write_ages assign[=] constant[True]
variable[starred_cols] assign[=] call[name[self].grid.remove_starred_labels, parameter[]]
call[name[self].grid.SaveEditControlValue, parameter[]]
if name[self].grid.changes begin[:]
variable[num_cols] assign[=] call[name[self].grid.GetNumberCols, parameter[]]
for taget[name[change]] in starred[name[self].grid.changes] begin[:]
if compare[name[change] equal[==] <ast.UnaryOp object at 0x7da1b26ae260>] begin[:]
continue | keyword[def] identifier[save_grid_data] ( identifier[self] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[grid] . identifier[changes] :
identifier[print] ( literal[string] )
keyword[return]
keyword[if] identifier[self] . identifier[grid_type] == literal[string] :
identifier[age_data_type] = identifier[self] . identifier[er_magic] . identifier[age_type]
identifier[self] . identifier[er_magic] . identifier[write_ages] = keyword[True]
identifier[starred_cols] = identifier[self] . identifier[grid] . identifier[remove_starred_labels] ()
identifier[self] . identifier[grid] . identifier[SaveEditControlValue] ()
keyword[if] identifier[self] . identifier[grid] . identifier[changes] :
identifier[num_cols] = identifier[self] . identifier[grid] . identifier[GetNumberCols] ()
keyword[for] identifier[change] keyword[in] identifier[self] . identifier[grid] . identifier[changes] :
keyword[if] identifier[change] ==- literal[int] :
keyword[continue]
keyword[else] :
identifier[old_item] = identifier[self] . identifier[grid] . identifier[row_items] [ identifier[change] ]
identifier[new_item_name] = identifier[self] . identifier[grid] . identifier[GetCellValue] ( identifier[change] , literal[int] )
identifier[new_er_data] ={}
identifier[new_pmag_data] ={}
identifier[er_header] = identifier[self] . identifier[grid_headers] [ identifier[self] . identifier[grid_type] ][ literal[string] ][ literal[int] ]
identifier[pmag_header] = identifier[self] . identifier[grid_headers] [ identifier[self] . identifier[grid_type] ][ literal[string] ][ literal[int] ]
identifier[start_num] = literal[int] keyword[if] identifier[self] . identifier[parent_type] keyword[else] literal[int]
identifier[result_data] ={}
keyword[for] identifier[col] keyword[in] identifier[range] ( identifier[start_num] , identifier[num_cols] ):
identifier[col_label] = identifier[str] ( identifier[self] . identifier[grid] . identifier[GetColLabelValue] ( identifier[col] ))
identifier[value] = identifier[str] ( identifier[self] . identifier[grid] . identifier[GetCellValue] ( identifier[change] , identifier[col] ))
keyword[if] identifier[value] == literal[string] :
identifier[value] = literal[string]
keyword[if] literal[string] keyword[in] identifier[col_label] :
identifier[col_name] = identifier[col_label] [:- literal[int] ]
identifier[new_pmag_data] [ identifier[col_name] ]= identifier[value]
keyword[continue]
keyword[if] identifier[col_label] == literal[string] :
identifier[new_pmag_data] [ identifier[col_label] ]= literal[string]
identifier[new_er_data] [ identifier[col_label] ]= identifier[value]
keyword[continue]
keyword[if] identifier[er_header] keyword[and] ( identifier[col_label] keyword[in] identifier[er_header] ):
identifier[new_er_data] [ identifier[col_label] ]= identifier[value]
keyword[if] identifier[self] . identifier[grid_type] keyword[in] ( literal[string] , literal[string] , literal[string] ):
keyword[if] identifier[pmag_header] keyword[and] ( identifier[col_label] keyword[in] identifier[pmag_header] ) keyword[and] ( identifier[col_label] keyword[not] keyword[in] identifier[self] . identifier[er_magic] . identifier[double] ):
identifier[new_pmag_data] [ identifier[col_label] ]= identifier[value]
keyword[else] :
keyword[if] identifier[pmag_header] keyword[and] ( identifier[col_label] keyword[in] identifier[pmag_header] ):
identifier[new_pmag_data] [ identifier[col_label] ]= identifier[value]
keyword[if] identifier[col_label] keyword[in] ( literal[string] , literal[string] ,
literal[string] , literal[string] ):
identifier[result_data] [ identifier[col_label] ]= identifier[value]
keyword[if] identifier[isinstance] ( identifier[old_item] , identifier[str] ):
identifier[old_item_name] = keyword[None]
keyword[else] :
identifier[old_item_name] = identifier[self] . identifier[grid] . identifier[row_items] [ identifier[change] ]. identifier[name]
keyword[if] identifier[self] . identifier[parent_type] :
identifier[new_parent_name] = identifier[self] . identifier[grid] . identifier[GetCellValue] ( identifier[change] , literal[int] )
keyword[else] :
identifier[new_parent_name] = literal[string]
keyword[if] identifier[new_item_name] keyword[and] keyword[not] identifier[old_item_name] :
identifier[print] ( literal[string] , identifier[new_item_name] )
keyword[if] identifier[self] . identifier[grid_type] == literal[string] :
identifier[specs] , identifier[samps] , identifier[sites] , identifier[locs] = identifier[self] . identifier[get_result_children] ( identifier[result_data] )
identifier[item] = identifier[self] . identifier[er_magic] . identifier[add_result] ( identifier[new_item_name] , identifier[specs] , identifier[samps] , identifier[sites] ,
identifier[locs] , identifier[new_pmag_data] )
keyword[else] :
identifier[item] = identifier[self] . identifier[er_magic] . identifier[add_methods] [ identifier[self] . identifier[grid_type] ]( identifier[new_item_name] , identifier[new_parent_name] ,
identifier[new_er_data] , identifier[new_pmag_data] )
keyword[elif] identifier[new_item_name] keyword[and] identifier[old_item_name] :
identifier[print] ( literal[string] . identifier[format] ( identifier[self] . identifier[grid_type] ,
identifier[old_item_name] ,
identifier[new_item_name] ))
keyword[if] identifier[self] . identifier[grid_type] == literal[string] :
identifier[specs] , identifier[samps] , identifier[sites] , identifier[locs] = identifier[self] . identifier[get_result_children] ( identifier[result_data] )
identifier[item] = identifier[self] . identifier[er_magic] . identifier[update_methods] [ literal[string] ]( identifier[old_item_name] , identifier[new_item_name] ,
identifier[new_er_data] = keyword[None] ,
identifier[new_pmag_data] = identifier[new_pmag_data] ,
identifier[spec_names] = identifier[specs] ,
identifier[samp_names] = identifier[samps] ,
identifier[site_names] = identifier[sites] ,
identifier[loc_names] = identifier[locs] ,
identifier[replace_data] = keyword[True] )
keyword[elif] identifier[self] . identifier[grid_type] == literal[string] :
identifier[item_type] = identifier[age_data_type]
identifier[item] = identifier[self] . identifier[er_magic] . identifier[update_methods] [ literal[string] ]( identifier[old_item_name] , identifier[new_er_data] ,
identifier[item_type] , identifier[replace_data] = keyword[True] )
keyword[else] :
identifier[item] = identifier[self] . identifier[er_magic] . identifier[update_methods] [ identifier[self] . identifier[grid_type] ]( identifier[old_item_name] , identifier[new_item_name] ,
identifier[new_parent_name] , identifier[new_er_data] ,
identifier[new_pmag_data] , identifier[replace_data] = keyword[True] ) | def save_grid_data(self):
"""
Save grid data in the data object
"""
if not self.grid.changes:
print('-I- No changes to save')
return # depends on [control=['if'], data=[]]
if self.grid_type == 'age':
age_data_type = self.er_magic.age_type
self.er_magic.write_ages = True # depends on [control=['if'], data=[]]
starred_cols = self.grid.remove_starred_labels()
self.grid.SaveEditControlValue() # locks in value in cell currently edited
if self.grid.changes:
num_cols = self.grid.GetNumberCols()
for change in self.grid.changes:
if change == -1:
continue # depends on [control=['if'], data=[]]
else:
old_item = self.grid.row_items[change]
new_item_name = self.grid.GetCellValue(change, 0)
new_er_data = {}
new_pmag_data = {}
er_header = self.grid_headers[self.grid_type]['er'][0]
pmag_header = self.grid_headers[self.grid_type]['pmag'][0]
start_num = 2 if self.parent_type else 1
result_data = {}
for col in range(start_num, num_cols):
col_label = str(self.grid.GetColLabelValue(col))
value = str(self.grid.GetCellValue(change, col))
#new_data[col_label] = value
if value == '\t':
value = '' # depends on [control=['if'], data=['value']]
if '++' in col_label:
col_name = col_label[:-2]
new_pmag_data[col_name] = value
continue # depends on [control=['if'], data=['col_label']]
# pmag_* files are new interpretations, so should only have "This study"
# er_* files can have multiple citations
if col_label == 'er_citation_names':
new_pmag_data[col_label] = 'This study'
new_er_data[col_label] = value
continue # depends on [control=['if'], data=['col_label']]
if er_header and col_label in er_header:
new_er_data[col_label] = value # depends on [control=['if'], data=[]]
if self.grid_type in ('specimen', 'sample', 'site'):
if pmag_header and col_label in pmag_header and (col_label not in self.er_magic.double):
new_pmag_data[col_label] = value # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif pmag_header and col_label in pmag_header:
new_pmag_data[col_label] = value # depends on [control=['if'], data=[]]
if col_label in ('er_specimen_names', 'er_sample_names', 'er_site_names', 'er_location_names'):
result_data[col_label] = value # depends on [control=['if'], data=['col_label']] # depends on [control=['for'], data=['col']]
# if there is an item in the data, get its name
if isinstance(old_item, str):
old_item_name = None # depends on [control=['if'], data=[]]
else:
old_item_name = self.grid.row_items[change].name
if self.parent_type:
new_parent_name = self.grid.GetCellValue(change, 1) # depends on [control=['if'], data=[]]
else:
new_parent_name = ''
# create a new item
if new_item_name and (not old_item_name):
print('-I- make new item named', new_item_name)
if self.grid_type == 'result':
(specs, samps, sites, locs) = self.get_result_children(result_data)
item = self.er_magic.add_result(new_item_name, specs, samps, sites, locs, new_pmag_data) # depends on [control=['if'], data=[]]
else:
item = self.er_magic.add_methods[self.grid_type](new_item_name, new_parent_name, new_er_data, new_pmag_data) # depends on [control=['if'], data=[]]
# update an existing item
elif new_item_name and old_item_name:
print('-I- update existing {} formerly named {} to {}'.format(self.grid_type, old_item_name, new_item_name))
if self.grid_type == 'result':
(specs, samps, sites, locs) = self.get_result_children(result_data)
item = self.er_magic.update_methods['result'](old_item_name, new_item_name, new_er_data=None, new_pmag_data=new_pmag_data, spec_names=specs, samp_names=samps, site_names=sites, loc_names=locs, replace_data=True) # depends on [control=['if'], data=[]]
elif self.grid_type == 'age':
item_type = age_data_type
item = self.er_magic.update_methods['age'](old_item_name, new_er_data, item_type, replace_data=True) # depends on [control=['if'], data=[]]
else:
item = self.er_magic.update_methods[self.grid_type](old_item_name, new_item_name, new_parent_name, new_er_data, new_pmag_data, replace_data=True) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['change']] # depends on [control=['if'], data=[]] |
def mapiv2(ol,map_func,*args,**kwargs):
'''
from elist.elist import *
ol = ['a','b','c','d']
#1
def map_func(index,value,*others):
return(value * index + others[0] +others[-1])
mapiv(ol,map_func,'tailA-','tailB')
#2
mapiv2(ol,lambda index,value,other:(value*index+other),['-'])
mapiv2(ol,lambda index,value,other:(value*index+other),'-')
mapiv2(ol,lambda index,value:(value*index))
'''
args = list(args)
if(args.__len__() > 0):
map_func_args = args
else:
if('map_func_args' in kwargs):
map_func_args = kwargs['map_func_args']
else:
map_func_args = []
lngth = ol.__len__()
rslt = []
for i in range(0,lngth):
ele = map_func(i,ol[i],*map_func_args)
rslt.append(ele)
return(rslt) | def function[mapiv2, parameter[ol, map_func]]:
constant[
from elist.elist import *
ol = ['a','b','c','d']
#1
def map_func(index,value,*others):
return(value * index + others[0] +others[-1])
mapiv(ol,map_func,'tailA-','tailB')
#2
mapiv2(ol,lambda index,value,other:(value*index+other),['-'])
mapiv2(ol,lambda index,value,other:(value*index+other),'-')
mapiv2(ol,lambda index,value:(value*index))
]
variable[args] assign[=] call[name[list], parameter[name[args]]]
if compare[call[name[args].__len__, parameter[]] greater[>] constant[0]] begin[:]
variable[map_func_args] assign[=] name[args]
variable[lngth] assign[=] call[name[ol].__len__, parameter[]]
variable[rslt] assign[=] list[[]]
for taget[name[i]] in starred[call[name[range], parameter[constant[0], name[lngth]]]] begin[:]
variable[ele] assign[=] call[name[map_func], parameter[name[i], call[name[ol]][name[i]], <ast.Starred object at 0x7da20c6aa3e0>]]
call[name[rslt].append, parameter[name[ele]]]
return[name[rslt]] | keyword[def] identifier[mapiv2] ( identifier[ol] , identifier[map_func] ,* identifier[args] ,** identifier[kwargs] ):
literal[string]
identifier[args] = identifier[list] ( identifier[args] )
keyword[if] ( identifier[args] . identifier[__len__] ()> literal[int] ):
identifier[map_func_args] = identifier[args]
keyword[else] :
keyword[if] ( literal[string] keyword[in] identifier[kwargs] ):
identifier[map_func_args] = identifier[kwargs] [ literal[string] ]
keyword[else] :
identifier[map_func_args] =[]
identifier[lngth] = identifier[ol] . identifier[__len__] ()
identifier[rslt] =[]
keyword[for] identifier[i] keyword[in] identifier[range] ( literal[int] , identifier[lngth] ):
identifier[ele] = identifier[map_func] ( identifier[i] , identifier[ol] [ identifier[i] ],* identifier[map_func_args] )
identifier[rslt] . identifier[append] ( identifier[ele] )
keyword[return] ( identifier[rslt] ) | def mapiv2(ol, map_func, *args, **kwargs):
"""
from elist.elist import *
ol = ['a','b','c','d']
#1
def map_func(index,value,*others):
return(value * index + others[0] +others[-1])
mapiv(ol,map_func,'tailA-','tailB')
#2
mapiv2(ol,lambda index,value,other:(value*index+other),['-'])
mapiv2(ol,lambda index,value,other:(value*index+other),'-')
mapiv2(ol,lambda index,value:(value*index))
"""
args = list(args)
if args.__len__() > 0:
map_func_args = args # depends on [control=['if'], data=[]]
elif 'map_func_args' in kwargs:
map_func_args = kwargs['map_func_args'] # depends on [control=['if'], data=['kwargs']]
else:
map_func_args = []
lngth = ol.__len__()
rslt = []
for i in range(0, lngth):
ele = map_func(i, ol[i], *map_func_args)
rslt.append(ele) # depends on [control=['for'], data=['i']]
return rslt |
def extend(cls, name):
"""
派生一个新的 leancloud.Object 子类
:param name: 子类名称
:type name: string_types
:return: 派生的子类
:rtype: ObjectMeta
"""
if six.PY2 and isinstance(name, six.text_type):
# In python2, class name must be a python2 str.
name = name.encode('utf-8')
return type(name, (cls,), {}) | def function[extend, parameter[cls, name]]:
constant[
派生一个新的 leancloud.Object 子类
:param name: 子类名称
:type name: string_types
:return: 派生的子类
:rtype: ObjectMeta
]
if <ast.BoolOp object at 0x7da1b0c89480> begin[:]
variable[name] assign[=] call[name[name].encode, parameter[constant[utf-8]]]
return[call[name[type], parameter[name[name], tuple[[<ast.Name object at 0x7da1b0c406d0>]], dictionary[[], []]]]] | keyword[def] identifier[extend] ( identifier[cls] , identifier[name] ):
literal[string]
keyword[if] identifier[six] . identifier[PY2] keyword[and] identifier[isinstance] ( identifier[name] , identifier[six] . identifier[text_type] ):
identifier[name] = identifier[name] . identifier[encode] ( literal[string] )
keyword[return] identifier[type] ( identifier[name] ,( identifier[cls] ,),{}) | def extend(cls, name):
"""
派生一个新的 leancloud.Object 子类
:param name: 子类名称
:type name: string_types
:return: 派生的子类
:rtype: ObjectMeta
"""
if six.PY2 and isinstance(name, six.text_type):
# In python2, class name must be a python2 str.
name = name.encode('utf-8') # depends on [control=['if'], data=[]]
return type(name, (cls,), {}) |
def restore_region(self, region, bbox=None, xy=None):
"""
Restore the saved region. If bbox (instance of BboxBase, or
its extents) is given, only the region specified by the bbox
will be restored. *xy* (a tuple of two floasts) optionally
specifies the new position (the LLC of the original region,
not the LLC of the bbox) where the region will be restored.
>>> region = renderer.copy_from_bbox()
>>> x1, y1, x2, y2 = region.get_extents()
>>> renderer.restore_region(region, bbox=(x1+dx, y1, x2, y2),
... xy=(x1-dx, y1))
"""
if bbox is not None or xy is not None:
if bbox is None:
x1, y1, x2, y2 = region.get_extents()
elif isinstance(bbox, BboxBase):
x1, y1, x2, y2 = bbox.extents
else:
x1, y1, x2, y2 = bbox
if xy is None:
ox, oy = x1, y1
else:
ox, oy = xy
self._renderer.restore_region2(region, x1, y1, x2, y2, ox, oy)
else:
self._renderer.restore_region(region) | def function[restore_region, parameter[self, region, bbox, xy]]:
constant[
Restore the saved region. If bbox (instance of BboxBase, or
its extents) is given, only the region specified by the bbox
will be restored. *xy* (a tuple of two floasts) optionally
specifies the new position (the LLC of the original region,
not the LLC of the bbox) where the region will be restored.
>>> region = renderer.copy_from_bbox()
>>> x1, y1, x2, y2 = region.get_extents()
>>> renderer.restore_region(region, bbox=(x1+dx, y1, x2, y2),
... xy=(x1-dx, y1))
]
if <ast.BoolOp object at 0x7da1b1623d60> begin[:]
if compare[name[bbox] is constant[None]] begin[:]
<ast.Tuple object at 0x7da1b1623b20> assign[=] call[name[region].get_extents, parameter[]]
if compare[name[xy] is constant[None]] begin[:]
<ast.Tuple object at 0x7da1b1628cd0> assign[=] tuple[[<ast.Name object at 0x7da1b162bb80>, <ast.Name object at 0x7da1b162b280>]]
call[name[self]._renderer.restore_region2, parameter[name[region], name[x1], name[y1], name[x2], name[y2], name[ox], name[oy]]] | keyword[def] identifier[restore_region] ( identifier[self] , identifier[region] , identifier[bbox] = keyword[None] , identifier[xy] = keyword[None] ):
literal[string]
keyword[if] identifier[bbox] keyword[is] keyword[not] keyword[None] keyword[or] identifier[xy] keyword[is] keyword[not] keyword[None] :
keyword[if] identifier[bbox] keyword[is] keyword[None] :
identifier[x1] , identifier[y1] , identifier[x2] , identifier[y2] = identifier[region] . identifier[get_extents] ()
keyword[elif] identifier[isinstance] ( identifier[bbox] , identifier[BboxBase] ):
identifier[x1] , identifier[y1] , identifier[x2] , identifier[y2] = identifier[bbox] . identifier[extents]
keyword[else] :
identifier[x1] , identifier[y1] , identifier[x2] , identifier[y2] = identifier[bbox]
keyword[if] identifier[xy] keyword[is] keyword[None] :
identifier[ox] , identifier[oy] = identifier[x1] , identifier[y1]
keyword[else] :
identifier[ox] , identifier[oy] = identifier[xy]
identifier[self] . identifier[_renderer] . identifier[restore_region2] ( identifier[region] , identifier[x1] , identifier[y1] , identifier[x2] , identifier[y2] , identifier[ox] , identifier[oy] )
keyword[else] :
identifier[self] . identifier[_renderer] . identifier[restore_region] ( identifier[region] ) | def restore_region(self, region, bbox=None, xy=None):
"""
Restore the saved region. If bbox (instance of BboxBase, or
its extents) is given, only the region specified by the bbox
will be restored. *xy* (a tuple of two floasts) optionally
specifies the new position (the LLC of the original region,
not the LLC of the bbox) where the region will be restored.
>>> region = renderer.copy_from_bbox()
>>> x1, y1, x2, y2 = region.get_extents()
>>> renderer.restore_region(region, bbox=(x1+dx, y1, x2, y2),
... xy=(x1-dx, y1))
"""
if bbox is not None or xy is not None:
if bbox is None:
(x1, y1, x2, y2) = region.get_extents() # depends on [control=['if'], data=[]]
elif isinstance(bbox, BboxBase):
(x1, y1, x2, y2) = bbox.extents # depends on [control=['if'], data=[]]
else:
(x1, y1, x2, y2) = bbox
if xy is None:
(ox, oy) = (x1, y1) # depends on [control=['if'], data=[]]
else:
(ox, oy) = xy
self._renderer.restore_region2(region, x1, y1, x2, y2, ox, oy) # depends on [control=['if'], data=[]]
else:
self._renderer.restore_region(region) |
def load_overrides(introspection_module):
"""Loads overrides for an introspection module.
Either returns the same module again in case there are no overrides or a
proxy module including overrides. Doesn't cache the result.
"""
namespace = introspection_module.__name__.rsplit(".", 1)[-1]
module_keys = [prefix + "." + namespace for prefix in const.PREFIX]
# We use sys.modules so overrides can import from gi.repository
# but restore everything at the end so this doesn't have any side effects
for module_key in module_keys:
has_old = module_key in sys.modules
old_module = sys.modules.get(module_key)
# Create a new sub type, so we can separate descriptors like
# _DeprecatedAttribute for each namespace.
proxy_type = type(namespace + "ProxyModule", (OverridesProxyModule, ), {})
proxy = proxy_type(introspection_module)
for module_key in module_keys:
sys.modules[module_key] = proxy
try:
override_package_name = 'pgi.overrides.' + namespace
# http://bugs.python.org/issue14710
try:
override_loader = get_loader(override_package_name)
except AttributeError:
override_loader = None
# Avoid checking for an ImportError, an override might
# depend on a missing module thus causing an ImportError
if override_loader is None:
return introspection_module
override_mod = importlib.import_module(override_package_name)
finally:
for module_key in module_keys:
del sys.modules[module_key]
if has_old:
sys.modules[module_key] = old_module
override_all = []
if hasattr(override_mod, "__all__"):
override_all = override_mod.__all__
for var in override_all:
try:
item = getattr(override_mod, var)
except (AttributeError, TypeError):
# Gedit puts a non-string in __all__, so catch TypeError here
continue
# make sure new classes have a proper __module__
try:
if item.__module__.split(".")[-1] == namespace:
item.__module__ = namespace
except AttributeError:
pass
setattr(proxy, var, item)
# Replace deprecated module level attributes with a descriptor
# which emits a warning when accessed.
for attr, replacement in _deprecated_attrs.pop(namespace, []):
try:
value = getattr(proxy, attr)
except AttributeError:
raise AssertionError(
"%s was set deprecated but wasn't added to __all__" % attr)
delattr(proxy, attr)
deprecated_attr = _DeprecatedAttribute(
namespace, attr, value, replacement)
setattr(proxy_type, attr, deprecated_attr)
return proxy | def function[load_overrides, parameter[introspection_module]]:
constant[Loads overrides for an introspection module.
Either returns the same module again in case there are no overrides or a
proxy module including overrides. Doesn't cache the result.
]
variable[namespace] assign[=] call[call[name[introspection_module].__name__.rsplit, parameter[constant[.], constant[1]]]][<ast.UnaryOp object at 0x7da1b10c7be0>]
variable[module_keys] assign[=] <ast.ListComp object at 0x7da1b10c5b70>
for taget[name[module_key]] in starred[name[module_keys]] begin[:]
variable[has_old] assign[=] compare[name[module_key] in name[sys].modules]
variable[old_module] assign[=] call[name[sys].modules.get, parameter[name[module_key]]]
variable[proxy_type] assign[=] call[name[type], parameter[binary_operation[name[namespace] + constant[ProxyModule]], tuple[[<ast.Name object at 0x7da1b10e53f0>]], dictionary[[], []]]]
variable[proxy] assign[=] call[name[proxy_type], parameter[name[introspection_module]]]
for taget[name[module_key]] in starred[name[module_keys]] begin[:]
call[name[sys].modules][name[module_key]] assign[=] name[proxy]
<ast.Try object at 0x7da1b10e7190>
variable[override_all] assign[=] list[[]]
if call[name[hasattr], parameter[name[override_mod], constant[__all__]]] begin[:]
variable[override_all] assign[=] name[override_mod].__all__
for taget[name[var]] in starred[name[override_all]] begin[:]
<ast.Try object at 0x7da1b10e4520>
<ast.Try object at 0x7da1b10e4a90>
call[name[setattr], parameter[name[proxy], name[var], name[item]]]
for taget[tuple[[<ast.Name object at 0x7da1b10e58a0>, <ast.Name object at 0x7da1b10e4490>]]] in starred[call[name[_deprecated_attrs].pop, parameter[name[namespace], list[[]]]]] begin[:]
<ast.Try object at 0x7da1b10e5810>
call[name[delattr], parameter[name[proxy], name[attr]]]
variable[deprecated_attr] assign[=] call[name[_DeprecatedAttribute], parameter[name[namespace], name[attr], name[value], name[replacement]]]
call[name[setattr], parameter[name[proxy_type], name[attr], name[deprecated_attr]]]
return[name[proxy]] | keyword[def] identifier[load_overrides] ( identifier[introspection_module] ):
literal[string]
identifier[namespace] = identifier[introspection_module] . identifier[__name__] . identifier[rsplit] ( literal[string] , literal[int] )[- literal[int] ]
identifier[module_keys] =[ identifier[prefix] + literal[string] + identifier[namespace] keyword[for] identifier[prefix] keyword[in] identifier[const] . identifier[PREFIX] ]
keyword[for] identifier[module_key] keyword[in] identifier[module_keys] :
identifier[has_old] = identifier[module_key] keyword[in] identifier[sys] . identifier[modules]
identifier[old_module] = identifier[sys] . identifier[modules] . identifier[get] ( identifier[module_key] )
identifier[proxy_type] = identifier[type] ( identifier[namespace] + literal[string] ,( identifier[OverridesProxyModule] ,),{})
identifier[proxy] = identifier[proxy_type] ( identifier[introspection_module] )
keyword[for] identifier[module_key] keyword[in] identifier[module_keys] :
identifier[sys] . identifier[modules] [ identifier[module_key] ]= identifier[proxy]
keyword[try] :
identifier[override_package_name] = literal[string] + identifier[namespace]
keyword[try] :
identifier[override_loader] = identifier[get_loader] ( identifier[override_package_name] )
keyword[except] identifier[AttributeError] :
identifier[override_loader] = keyword[None]
keyword[if] identifier[override_loader] keyword[is] keyword[None] :
keyword[return] identifier[introspection_module]
identifier[override_mod] = identifier[importlib] . identifier[import_module] ( identifier[override_package_name] )
keyword[finally] :
keyword[for] identifier[module_key] keyword[in] identifier[module_keys] :
keyword[del] identifier[sys] . identifier[modules] [ identifier[module_key] ]
keyword[if] identifier[has_old] :
identifier[sys] . identifier[modules] [ identifier[module_key] ]= identifier[old_module]
identifier[override_all] =[]
keyword[if] identifier[hasattr] ( identifier[override_mod] , literal[string] ):
identifier[override_all] = identifier[override_mod] . identifier[__all__]
keyword[for] identifier[var] keyword[in] identifier[override_all] :
keyword[try] :
identifier[item] = identifier[getattr] ( identifier[override_mod] , identifier[var] )
keyword[except] ( identifier[AttributeError] , identifier[TypeError] ):
keyword[continue]
keyword[try] :
keyword[if] identifier[item] . identifier[__module__] . identifier[split] ( literal[string] )[- literal[int] ]== identifier[namespace] :
identifier[item] . identifier[__module__] = identifier[namespace]
keyword[except] identifier[AttributeError] :
keyword[pass]
identifier[setattr] ( identifier[proxy] , identifier[var] , identifier[item] )
keyword[for] identifier[attr] , identifier[replacement] keyword[in] identifier[_deprecated_attrs] . identifier[pop] ( identifier[namespace] ,[]):
keyword[try] :
identifier[value] = identifier[getattr] ( identifier[proxy] , identifier[attr] )
keyword[except] identifier[AttributeError] :
keyword[raise] identifier[AssertionError] (
literal[string] % identifier[attr] )
identifier[delattr] ( identifier[proxy] , identifier[attr] )
identifier[deprecated_attr] = identifier[_DeprecatedAttribute] (
identifier[namespace] , identifier[attr] , identifier[value] , identifier[replacement] )
identifier[setattr] ( identifier[proxy_type] , identifier[attr] , identifier[deprecated_attr] )
keyword[return] identifier[proxy] | def load_overrides(introspection_module):
"""Loads overrides for an introspection module.
Either returns the same module again in case there are no overrides or a
proxy module including overrides. Doesn't cache the result.
"""
namespace = introspection_module.__name__.rsplit('.', 1)[-1]
module_keys = [prefix + '.' + namespace for prefix in const.PREFIX]
# We use sys.modules so overrides can import from gi.repository
# but restore everything at the end so this doesn't have any side effects
for module_key in module_keys:
has_old = module_key in sys.modules
old_module = sys.modules.get(module_key) # depends on [control=['for'], data=['module_key']]
# Create a new sub type, so we can separate descriptors like
# _DeprecatedAttribute for each namespace.
proxy_type = type(namespace + 'ProxyModule', (OverridesProxyModule,), {})
proxy = proxy_type(introspection_module)
for module_key in module_keys:
sys.modules[module_key] = proxy # depends on [control=['for'], data=['module_key']]
try:
override_package_name = 'pgi.overrides.' + namespace
# http://bugs.python.org/issue14710
try:
override_loader = get_loader(override_package_name) # depends on [control=['try'], data=[]]
except AttributeError:
override_loader = None # depends on [control=['except'], data=[]]
# Avoid checking for an ImportError, an override might
# depend on a missing module thus causing an ImportError
if override_loader is None:
return introspection_module # depends on [control=['if'], data=[]]
override_mod = importlib.import_module(override_package_name) # depends on [control=['try'], data=[]]
finally:
for module_key in module_keys:
del sys.modules[module_key]
if has_old:
sys.modules[module_key] = old_module # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['module_key']]
override_all = []
if hasattr(override_mod, '__all__'):
override_all = override_mod.__all__ # depends on [control=['if'], data=[]]
for var in override_all:
try:
item = getattr(override_mod, var) # depends on [control=['try'], data=[]]
except (AttributeError, TypeError):
# Gedit puts a non-string in __all__, so catch TypeError here
continue # depends on [control=['except'], data=[]]
# make sure new classes have a proper __module__
try:
if item.__module__.split('.')[-1] == namespace:
item.__module__ = namespace # depends on [control=['if'], data=['namespace']] # depends on [control=['try'], data=[]]
except AttributeError:
pass # depends on [control=['except'], data=[]]
setattr(proxy, var, item) # depends on [control=['for'], data=['var']]
# Replace deprecated module level attributes with a descriptor
# which emits a warning when accessed.
for (attr, replacement) in _deprecated_attrs.pop(namespace, []):
try:
value = getattr(proxy, attr) # depends on [control=['try'], data=[]]
except AttributeError:
raise AssertionError("%s was set deprecated but wasn't added to __all__" % attr) # depends on [control=['except'], data=[]]
delattr(proxy, attr)
deprecated_attr = _DeprecatedAttribute(namespace, attr, value, replacement)
setattr(proxy_type, attr, deprecated_attr) # depends on [control=['for'], data=[]]
return proxy |
def add_user(self, username, password, full_name=None, trusted=False, readonly=False):
""" Add user to SQLite database.
* `username` [string]
Username of new user.
* `password` [string]
Password of new user.
* `full_name` [string]
Full name of new user.
* `trusted` [boolean]
Whether the new user should be trusted or not.
* `readonly` [boolean]
Whether the new user can only read or not
"""
# generate salt
char_set = string.ascii_letters + string.digits
salt = ''.join(random.choice(char_set) for x in range(8))
sql = '''INSERT INTO user
(username, pwd_salt, pwd_hash, full_name, trusted, readonly)
VALUES
(?, ?, ?, ?, ?, ?)'''
try:
self._db_curs.execute(sql, (username, salt,
self._gen_hash(password, salt), full_name, trusted or False,
readonly or False))
self._db_conn.commit()
except (sqlite3.OperationalError, sqlite3.IntegrityError) as error:
raise AuthError(error) | def function[add_user, parameter[self, username, password, full_name, trusted, readonly]]:
constant[ Add user to SQLite database.
* `username` [string]
Username of new user.
* `password` [string]
Password of new user.
* `full_name` [string]
Full name of new user.
* `trusted` [boolean]
Whether the new user should be trusted or not.
* `readonly` [boolean]
Whether the new user can only read or not
]
variable[char_set] assign[=] binary_operation[name[string].ascii_letters + name[string].digits]
variable[salt] assign[=] call[constant[].join, parameter[<ast.GeneratorExp object at 0x7da2046232b0>]]
variable[sql] assign[=] constant[INSERT INTO user
(username, pwd_salt, pwd_hash, full_name, trusted, readonly)
VALUES
(?, ?, ?, ?, ?, ?)]
<ast.Try object at 0x7da204623850> | keyword[def] identifier[add_user] ( identifier[self] , identifier[username] , identifier[password] , identifier[full_name] = keyword[None] , identifier[trusted] = keyword[False] , identifier[readonly] = keyword[False] ):
literal[string]
identifier[char_set] = identifier[string] . identifier[ascii_letters] + identifier[string] . identifier[digits]
identifier[salt] = literal[string] . identifier[join] ( identifier[random] . identifier[choice] ( identifier[char_set] ) keyword[for] identifier[x] keyword[in] identifier[range] ( literal[int] ))
identifier[sql] = literal[string]
keyword[try] :
identifier[self] . identifier[_db_curs] . identifier[execute] ( identifier[sql] ,( identifier[username] , identifier[salt] ,
identifier[self] . identifier[_gen_hash] ( identifier[password] , identifier[salt] ), identifier[full_name] , identifier[trusted] keyword[or] keyword[False] ,
identifier[readonly] keyword[or] keyword[False] ))
identifier[self] . identifier[_db_conn] . identifier[commit] ()
keyword[except] ( identifier[sqlite3] . identifier[OperationalError] , identifier[sqlite3] . identifier[IntegrityError] ) keyword[as] identifier[error] :
keyword[raise] identifier[AuthError] ( identifier[error] ) | def add_user(self, username, password, full_name=None, trusted=False, readonly=False):
""" Add user to SQLite database.
* `username` [string]
Username of new user.
* `password` [string]
Password of new user.
* `full_name` [string]
Full name of new user.
* `trusted` [boolean]
Whether the new user should be trusted or not.
* `readonly` [boolean]
Whether the new user can only read or not
"""
# generate salt
char_set = string.ascii_letters + string.digits
salt = ''.join((random.choice(char_set) for x in range(8)))
sql = 'INSERT INTO user\n (username, pwd_salt, pwd_hash, full_name, trusted, readonly)\n VALUES\n (?, ?, ?, ?, ?, ?)'
try:
self._db_curs.execute(sql, (username, salt, self._gen_hash(password, salt), full_name, trusted or False, readonly or False))
self._db_conn.commit() # depends on [control=['try'], data=[]]
except (sqlite3.OperationalError, sqlite3.IntegrityError) as error:
raise AuthError(error) # depends on [control=['except'], data=['error']] |
def _default_make_pool(http, proxy_info):
"""Creates a urllib3.PoolManager object that has SSL verification enabled
and uses the certifi certificates."""
if not http.ca_certs:
http.ca_certs = _certifi_where_for_ssl_version()
ssl_disabled = http.disable_ssl_certificate_validation
cert_reqs = 'CERT_REQUIRED' if http.ca_certs and not ssl_disabled else None
if isinstance(proxy_info, collections.Callable):
proxy_info = proxy_info()
if proxy_info:
if proxy_info.proxy_user and proxy_info.proxy_pass:
proxy_url = 'http://{}:{}@{}:{}/'.format(
proxy_info.proxy_user, proxy_info.proxy_pass,
proxy_info.proxy_host, proxy_info.proxy_port,
)
proxy_headers = urllib3.util.request.make_headers(
proxy_basic_auth='{}:{}'.format(
proxy_info.proxy_user, proxy_info.proxy_pass,
)
)
else:
proxy_url = 'http://{}:{}/'.format(
proxy_info.proxy_host, proxy_info.proxy_port,
)
proxy_headers = {}
return urllib3.ProxyManager(
proxy_url=proxy_url,
proxy_headers=proxy_headers,
ca_certs=http.ca_certs,
cert_reqs=cert_reqs,
)
return urllib3.PoolManager(
ca_certs=http.ca_certs,
cert_reqs=cert_reqs,
) | def function[_default_make_pool, parameter[http, proxy_info]]:
constant[Creates a urllib3.PoolManager object that has SSL verification enabled
and uses the certifi certificates.]
if <ast.UnaryOp object at 0x7da20c6c76d0> begin[:]
name[http].ca_certs assign[=] call[name[_certifi_where_for_ssl_version], parameter[]]
variable[ssl_disabled] assign[=] name[http].disable_ssl_certificate_validation
variable[cert_reqs] assign[=] <ast.IfExp object at 0x7da20c6c72e0>
if call[name[isinstance], parameter[name[proxy_info], name[collections].Callable]] begin[:]
variable[proxy_info] assign[=] call[name[proxy_info], parameter[]]
if name[proxy_info] begin[:]
if <ast.BoolOp object at 0x7da20c795c60> begin[:]
variable[proxy_url] assign[=] call[constant[http://{}:{}@{}:{}/].format, parameter[name[proxy_info].proxy_user, name[proxy_info].proxy_pass, name[proxy_info].proxy_host, name[proxy_info].proxy_port]]
variable[proxy_headers] assign[=] call[name[urllib3].util.request.make_headers, parameter[]]
return[call[name[urllib3].ProxyManager, parameter[]]]
return[call[name[urllib3].PoolManager, parameter[]]] | keyword[def] identifier[_default_make_pool] ( identifier[http] , identifier[proxy_info] ):
literal[string]
keyword[if] keyword[not] identifier[http] . identifier[ca_certs] :
identifier[http] . identifier[ca_certs] = identifier[_certifi_where_for_ssl_version] ()
identifier[ssl_disabled] = identifier[http] . identifier[disable_ssl_certificate_validation]
identifier[cert_reqs] = literal[string] keyword[if] identifier[http] . identifier[ca_certs] keyword[and] keyword[not] identifier[ssl_disabled] keyword[else] keyword[None]
keyword[if] identifier[isinstance] ( identifier[proxy_info] , identifier[collections] . identifier[Callable] ):
identifier[proxy_info] = identifier[proxy_info] ()
keyword[if] identifier[proxy_info] :
keyword[if] identifier[proxy_info] . identifier[proxy_user] keyword[and] identifier[proxy_info] . identifier[proxy_pass] :
identifier[proxy_url] = literal[string] . identifier[format] (
identifier[proxy_info] . identifier[proxy_user] , identifier[proxy_info] . identifier[proxy_pass] ,
identifier[proxy_info] . identifier[proxy_host] , identifier[proxy_info] . identifier[proxy_port] ,
)
identifier[proxy_headers] = identifier[urllib3] . identifier[util] . identifier[request] . identifier[make_headers] (
identifier[proxy_basic_auth] = literal[string] . identifier[format] (
identifier[proxy_info] . identifier[proxy_user] , identifier[proxy_info] . identifier[proxy_pass] ,
)
)
keyword[else] :
identifier[proxy_url] = literal[string] . identifier[format] (
identifier[proxy_info] . identifier[proxy_host] , identifier[proxy_info] . identifier[proxy_port] ,
)
identifier[proxy_headers] ={}
keyword[return] identifier[urllib3] . identifier[ProxyManager] (
identifier[proxy_url] = identifier[proxy_url] ,
identifier[proxy_headers] = identifier[proxy_headers] ,
identifier[ca_certs] = identifier[http] . identifier[ca_certs] ,
identifier[cert_reqs] = identifier[cert_reqs] ,
)
keyword[return] identifier[urllib3] . identifier[PoolManager] (
identifier[ca_certs] = identifier[http] . identifier[ca_certs] ,
identifier[cert_reqs] = identifier[cert_reqs] ,
) | def _default_make_pool(http, proxy_info):
"""Creates a urllib3.PoolManager object that has SSL verification enabled
and uses the certifi certificates."""
if not http.ca_certs:
http.ca_certs = _certifi_where_for_ssl_version() # depends on [control=['if'], data=[]]
ssl_disabled = http.disable_ssl_certificate_validation
cert_reqs = 'CERT_REQUIRED' if http.ca_certs and (not ssl_disabled) else None
if isinstance(proxy_info, collections.Callable):
proxy_info = proxy_info() # depends on [control=['if'], data=[]]
if proxy_info:
if proxy_info.proxy_user and proxy_info.proxy_pass:
proxy_url = 'http://{}:{}@{}:{}/'.format(proxy_info.proxy_user, proxy_info.proxy_pass, proxy_info.proxy_host, proxy_info.proxy_port)
proxy_headers = urllib3.util.request.make_headers(proxy_basic_auth='{}:{}'.format(proxy_info.proxy_user, proxy_info.proxy_pass)) # depends on [control=['if'], data=[]]
else:
proxy_url = 'http://{}:{}/'.format(proxy_info.proxy_host, proxy_info.proxy_port)
proxy_headers = {}
return urllib3.ProxyManager(proxy_url=proxy_url, proxy_headers=proxy_headers, ca_certs=http.ca_certs, cert_reqs=cert_reqs) # depends on [control=['if'], data=[]]
return urllib3.PoolManager(ca_certs=http.ca_certs, cert_reqs=cert_reqs) |
def install (self, scanner, target, vtarget):
""" Installs the specified scanner on actual target 'target'.
vtarget: virtual target from which 'target' was actualized.
"""
assert isinstance(scanner, Scanner)
assert isinstance(target, basestring)
assert isinstance(vtarget, basestring)
engine = self.manager_.engine()
engine.set_target_variable(target, "HDRSCAN", scanner.pattern())
if scanner not in self.exported_scanners_:
exported_name = "scanner_" + str(self.count_)
self.count_ = self.count_ + 1
self.exported_scanners_[scanner] = exported_name
bjam.import_rule("", exported_name, scanner.process)
else:
exported_name = self.exported_scanners_[scanner]
engine.set_target_variable(target, "HDRRULE", exported_name)
# scanner reflects difference in properties affecting
# binding of 'target', which will be known when processing
# includes for it, will give information on how to
# interpret quoted includes.
engine.set_target_variable(target, "HDRGRIST", str(id(scanner)))
pass | def function[install, parameter[self, scanner, target, vtarget]]:
constant[ Installs the specified scanner on actual target 'target'.
vtarget: virtual target from which 'target' was actualized.
]
assert[call[name[isinstance], parameter[name[scanner], name[Scanner]]]]
assert[call[name[isinstance], parameter[name[target], name[basestring]]]]
assert[call[name[isinstance], parameter[name[vtarget], name[basestring]]]]
variable[engine] assign[=] call[name[self].manager_.engine, parameter[]]
call[name[engine].set_target_variable, parameter[name[target], constant[HDRSCAN], call[name[scanner].pattern, parameter[]]]]
if compare[name[scanner] <ast.NotIn object at 0x7da2590d7190> name[self].exported_scanners_] begin[:]
variable[exported_name] assign[=] binary_operation[constant[scanner_] + call[name[str], parameter[name[self].count_]]]
name[self].count_ assign[=] binary_operation[name[self].count_ + constant[1]]
call[name[self].exported_scanners_][name[scanner]] assign[=] name[exported_name]
call[name[bjam].import_rule, parameter[constant[], name[exported_name], name[scanner].process]]
call[name[engine].set_target_variable, parameter[name[target], constant[HDRRULE], name[exported_name]]]
call[name[engine].set_target_variable, parameter[name[target], constant[HDRGRIST], call[name[str], parameter[call[name[id], parameter[name[scanner]]]]]]]
pass | keyword[def] identifier[install] ( identifier[self] , identifier[scanner] , identifier[target] , identifier[vtarget] ):
literal[string]
keyword[assert] identifier[isinstance] ( identifier[scanner] , identifier[Scanner] )
keyword[assert] identifier[isinstance] ( identifier[target] , identifier[basestring] )
keyword[assert] identifier[isinstance] ( identifier[vtarget] , identifier[basestring] )
identifier[engine] = identifier[self] . identifier[manager_] . identifier[engine] ()
identifier[engine] . identifier[set_target_variable] ( identifier[target] , literal[string] , identifier[scanner] . identifier[pattern] ())
keyword[if] identifier[scanner] keyword[not] keyword[in] identifier[self] . identifier[exported_scanners_] :
identifier[exported_name] = literal[string] + identifier[str] ( identifier[self] . identifier[count_] )
identifier[self] . identifier[count_] = identifier[self] . identifier[count_] + literal[int]
identifier[self] . identifier[exported_scanners_] [ identifier[scanner] ]= identifier[exported_name]
identifier[bjam] . identifier[import_rule] ( literal[string] , identifier[exported_name] , identifier[scanner] . identifier[process] )
keyword[else] :
identifier[exported_name] = identifier[self] . identifier[exported_scanners_] [ identifier[scanner] ]
identifier[engine] . identifier[set_target_variable] ( identifier[target] , literal[string] , identifier[exported_name] )
identifier[engine] . identifier[set_target_variable] ( identifier[target] , literal[string] , identifier[str] ( identifier[id] ( identifier[scanner] )))
keyword[pass] | def install(self, scanner, target, vtarget):
""" Installs the specified scanner on actual target 'target'.
vtarget: virtual target from which 'target' was actualized.
"""
assert isinstance(scanner, Scanner)
assert isinstance(target, basestring)
assert isinstance(vtarget, basestring)
engine = self.manager_.engine()
engine.set_target_variable(target, 'HDRSCAN', scanner.pattern())
if scanner not in self.exported_scanners_:
exported_name = 'scanner_' + str(self.count_)
self.count_ = self.count_ + 1
self.exported_scanners_[scanner] = exported_name
bjam.import_rule('', exported_name, scanner.process) # depends on [control=['if'], data=['scanner']]
else:
exported_name = self.exported_scanners_[scanner]
engine.set_target_variable(target, 'HDRRULE', exported_name)
# scanner reflects difference in properties affecting
# binding of 'target', which will be known when processing
# includes for it, will give information on how to
# interpret quoted includes.
engine.set_target_variable(target, 'HDRGRIST', str(id(scanner)))
pass |
def sync_out(self, release):
"""Sync our tree to the canonical location"""
if release.get('rsync_out_objs'):
tree = release['canonical_dir']
if not os.path.isdir(tree):
self.log.info('Creating %s', tree)
os.makedirs(tree)
self.call(release['rsync_out_objs'])
self.call(release['rsync_out_rest']) | def function[sync_out, parameter[self, release]]:
constant[Sync our tree to the canonical location]
if call[name[release].get, parameter[constant[rsync_out_objs]]] begin[:]
variable[tree] assign[=] call[name[release]][constant[canonical_dir]]
if <ast.UnaryOp object at 0x7da1b28dae90> begin[:]
call[name[self].log.info, parameter[constant[Creating %s], name[tree]]]
call[name[os].makedirs, parameter[name[tree]]]
call[name[self].call, parameter[call[name[release]][constant[rsync_out_objs]]]]
call[name[self].call, parameter[call[name[release]][constant[rsync_out_rest]]]] | keyword[def] identifier[sync_out] ( identifier[self] , identifier[release] ):
literal[string]
keyword[if] identifier[release] . identifier[get] ( literal[string] ):
identifier[tree] = identifier[release] [ literal[string] ]
keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[isdir] ( identifier[tree] ):
identifier[self] . identifier[log] . identifier[info] ( literal[string] , identifier[tree] )
identifier[os] . identifier[makedirs] ( identifier[tree] )
identifier[self] . identifier[call] ( identifier[release] [ literal[string] ])
identifier[self] . identifier[call] ( identifier[release] [ literal[string] ]) | def sync_out(self, release):
"""Sync our tree to the canonical location"""
if release.get('rsync_out_objs'):
tree = release['canonical_dir']
if not os.path.isdir(tree):
self.log.info('Creating %s', tree)
os.makedirs(tree) # depends on [control=['if'], data=[]]
self.call(release['rsync_out_objs'])
self.call(release['rsync_out_rest']) # depends on [control=['if'], data=[]] |
def readVersion(self):
""" Read the document version.
::
<designspace format="3">
"""
ds = self.root.findall("[@format]")[0]
raw_format = ds.attrib['format']
try:
self.documentFormatVersion = int(raw_format)
except ValueError:
# as of fontTools >= 3.27 'format' is formatted as a float "4.0"
self.documentFormatVersion = float(raw_format) | def function[readVersion, parameter[self]]:
constant[ Read the document version.
::
<designspace format="3">
]
variable[ds] assign[=] call[call[name[self].root.findall, parameter[constant[[@format]]]]][constant[0]]
variable[raw_format] assign[=] call[name[ds].attrib][constant[format]]
<ast.Try object at 0x7da2045673a0> | keyword[def] identifier[readVersion] ( identifier[self] ):
literal[string]
identifier[ds] = identifier[self] . identifier[root] . identifier[findall] ( literal[string] )[ literal[int] ]
identifier[raw_format] = identifier[ds] . identifier[attrib] [ literal[string] ]
keyword[try] :
identifier[self] . identifier[documentFormatVersion] = identifier[int] ( identifier[raw_format] )
keyword[except] identifier[ValueError] :
identifier[self] . identifier[documentFormatVersion] = identifier[float] ( identifier[raw_format] ) | def readVersion(self):
""" Read the document version.
::
<designspace format="3">
"""
ds = self.root.findall('[@format]')[0]
raw_format = ds.attrib['format']
try:
self.documentFormatVersion = int(raw_format) # depends on [control=['try'], data=[]]
except ValueError:
# as of fontTools >= 3.27 'format' is formatted as a float "4.0"
self.documentFormatVersion = float(raw_format) # depends on [control=['except'], data=[]] |
def callback(self, callback):
"""
Set callback for callback queries
:Example:
>>> @bot.callback
>>> def echo(chat, cq):
>>> return cq.answer()
>>> @bot.callback(r"buttonclick-(.+)")
>>> def echo(chat, cq, match):
>>> return chat.reply(match.group(1))
"""
if callable(callback):
self._default_callback = callback
return callback
elif isinstance(callback, str):
def decorator(fn):
self.add_callback(callback, fn)
return fn
return decorator
else:
raise TypeError("str expected {} given".format(type(callback))) | def function[callback, parameter[self, callback]]:
constant[
Set callback for callback queries
:Example:
>>> @bot.callback
>>> def echo(chat, cq):
>>> return cq.answer()
>>> @bot.callback(r"buttonclick-(.+)")
>>> def echo(chat, cq, match):
>>> return chat.reply(match.group(1))
]
if call[name[callable], parameter[name[callback]]] begin[:]
name[self]._default_callback assign[=] name[callback]
return[name[callback]] | keyword[def] identifier[callback] ( identifier[self] , identifier[callback] ):
literal[string]
keyword[if] identifier[callable] ( identifier[callback] ):
identifier[self] . identifier[_default_callback] = identifier[callback]
keyword[return] identifier[callback]
keyword[elif] identifier[isinstance] ( identifier[callback] , identifier[str] ):
keyword[def] identifier[decorator] ( identifier[fn] ):
identifier[self] . identifier[add_callback] ( identifier[callback] , identifier[fn] )
keyword[return] identifier[fn]
keyword[return] identifier[decorator]
keyword[else] :
keyword[raise] identifier[TypeError] ( literal[string] . identifier[format] ( identifier[type] ( identifier[callback] ))) | def callback(self, callback):
"""
Set callback for callback queries
:Example:
>>> @bot.callback
>>> def echo(chat, cq):
>>> return cq.answer()
>>> @bot.callback(r"buttonclick-(.+)")
>>> def echo(chat, cq, match):
>>> return chat.reply(match.group(1))
"""
if callable(callback):
self._default_callback = callback
return callback # depends on [control=['if'], data=[]]
elif isinstance(callback, str):
def decorator(fn):
self.add_callback(callback, fn)
return fn
return decorator # depends on [control=['if'], data=[]]
else:
raise TypeError('str expected {} given'.format(type(callback))) |
def spatial_firing_rate(self,label_x='x',label_y='y',bins=None,resolution=1.0,geometry=None,weight_function=None,normalize_time=True,normalize_n=False,start_units_with_0=True):
"""
`bins` can be either a number of bins (has to be int) or a list of edges.
`bins` can be a tuple (for x and y respectively).
imshow(s.spatial_firing_rate(weight_function=lambda x: (x[:,1]>0.5)*(x[:,1]<0.6)))
"""
if bool(self):
if type(bins) is tuple:
if type(bins[0]) is int:
bins_x = self.spike_times.get_label(label_y).linspace_bins(bins=bins[0])
else:
bins_x = bins[0]
if type(bins[1]) is int:
bins_y = self.spike_times.get_label(label_x).linspace_bins(bins=bins[1])
else:
bins_y = bins[1]
else:
if type(bins) is int:
bins_x = self.spike_times.get_label(label_y).linspace_bins(bins=bins)
bins_y = self.spike_times.get_label(label_x).linspace_bins(bins=bins)
else:
bins_x = bins
bins_y = bins
if weight_function is None:
H,xed,yed = np.histogram2d(self.spike_times[label_x],self.spike_times[label_y],bins=(bins_x,bins_y))
else:
if start_units_with_0:
## TODO : reintroduce a neuron number thing
spike_numbers = np.transpose([self.spikes[:,0]-self.n_range[0],self.spikes[:,1]])
weights = weight_function(spike_numbers)
else:
weights = weight_function(self.spike_times[label_x],self.spike_times[label_y])
H,xed,yed = np.histogram2d(self.spike_times[label_x],self.spike_times[label_y],bins=(bins_x,bins_y),weights=weights)
if normalize_time:
H = H/(self.spike_times.labels[0].convert('s').len())
if normalize_n:
H = H/cells_per_bin
return H,xed,yed
else:
return (np.array([[0]]),np.array([0]),np.array([0])) | def function[spatial_firing_rate, parameter[self, label_x, label_y, bins, resolution, geometry, weight_function, normalize_time, normalize_n, start_units_with_0]]:
constant[
`bins` can be either a number of bins (has to be int) or a list of edges.
`bins` can be a tuple (for x and y respectively).
imshow(s.spatial_firing_rate(weight_function=lambda x: (x[:,1]>0.5)*(x[:,1]<0.6)))
]
if call[name[bool], parameter[name[self]]] begin[:]
if compare[call[name[type], parameter[name[bins]]] is name[tuple]] begin[:]
if compare[call[name[type], parameter[call[name[bins]][constant[0]]]] is name[int]] begin[:]
variable[bins_x] assign[=] call[call[name[self].spike_times.get_label, parameter[name[label_y]]].linspace_bins, parameter[]]
if compare[call[name[type], parameter[call[name[bins]][constant[1]]]] is name[int]] begin[:]
variable[bins_y] assign[=] call[call[name[self].spike_times.get_label, parameter[name[label_x]]].linspace_bins, parameter[]]
if compare[name[weight_function] is constant[None]] begin[:]
<ast.Tuple object at 0x7da20c6e5030> assign[=] call[name[np].histogram2d, parameter[call[name[self].spike_times][name[label_x]], call[name[self].spike_times][name[label_y]]]]
if name[normalize_time] begin[:]
variable[H] assign[=] binary_operation[name[H] / call[call[call[name[self].spike_times.labels][constant[0]].convert, parameter[constant[s]]].len, parameter[]]]
if name[normalize_n] begin[:]
variable[H] assign[=] binary_operation[name[H] / name[cells_per_bin]]
return[tuple[[<ast.Name object at 0x7da2044c1540>, <ast.Name object at 0x7da2044c3760>, <ast.Name object at 0x7da2044c3850>]]] | keyword[def] identifier[spatial_firing_rate] ( identifier[self] , identifier[label_x] = literal[string] , identifier[label_y] = literal[string] , identifier[bins] = keyword[None] , identifier[resolution] = literal[int] , identifier[geometry] = keyword[None] , identifier[weight_function] = keyword[None] , identifier[normalize_time] = keyword[True] , identifier[normalize_n] = keyword[False] , identifier[start_units_with_0] = keyword[True] ):
literal[string]
keyword[if] identifier[bool] ( identifier[self] ):
keyword[if] identifier[type] ( identifier[bins] ) keyword[is] identifier[tuple] :
keyword[if] identifier[type] ( identifier[bins] [ literal[int] ]) keyword[is] identifier[int] :
identifier[bins_x] = identifier[self] . identifier[spike_times] . identifier[get_label] ( identifier[label_y] ). identifier[linspace_bins] ( identifier[bins] = identifier[bins] [ literal[int] ])
keyword[else] :
identifier[bins_x] = identifier[bins] [ literal[int] ]
keyword[if] identifier[type] ( identifier[bins] [ literal[int] ]) keyword[is] identifier[int] :
identifier[bins_y] = identifier[self] . identifier[spike_times] . identifier[get_label] ( identifier[label_x] ). identifier[linspace_bins] ( identifier[bins] = identifier[bins] [ literal[int] ])
keyword[else] :
identifier[bins_y] = identifier[bins] [ literal[int] ]
keyword[else] :
keyword[if] identifier[type] ( identifier[bins] ) keyword[is] identifier[int] :
identifier[bins_x] = identifier[self] . identifier[spike_times] . identifier[get_label] ( identifier[label_y] ). identifier[linspace_bins] ( identifier[bins] = identifier[bins] )
identifier[bins_y] = identifier[self] . identifier[spike_times] . identifier[get_label] ( identifier[label_x] ). identifier[linspace_bins] ( identifier[bins] = identifier[bins] )
keyword[else] :
identifier[bins_x] = identifier[bins]
identifier[bins_y] = identifier[bins]
keyword[if] identifier[weight_function] keyword[is] keyword[None] :
identifier[H] , identifier[xed] , identifier[yed] = identifier[np] . identifier[histogram2d] ( identifier[self] . identifier[spike_times] [ identifier[label_x] ], identifier[self] . identifier[spike_times] [ identifier[label_y] ], identifier[bins] =( identifier[bins_x] , identifier[bins_y] ))
keyword[else] :
keyword[if] identifier[start_units_with_0] :
identifier[spike_numbers] = identifier[np] . identifier[transpose] ([ identifier[self] . identifier[spikes] [:, literal[int] ]- identifier[self] . identifier[n_range] [ literal[int] ], identifier[self] . identifier[spikes] [:, literal[int] ]])
identifier[weights] = identifier[weight_function] ( identifier[spike_numbers] )
keyword[else] :
identifier[weights] = identifier[weight_function] ( identifier[self] . identifier[spike_times] [ identifier[label_x] ], identifier[self] . identifier[spike_times] [ identifier[label_y] ])
identifier[H] , identifier[xed] , identifier[yed] = identifier[np] . identifier[histogram2d] ( identifier[self] . identifier[spike_times] [ identifier[label_x] ], identifier[self] . identifier[spike_times] [ identifier[label_y] ], identifier[bins] =( identifier[bins_x] , identifier[bins_y] ), identifier[weights] = identifier[weights] )
keyword[if] identifier[normalize_time] :
identifier[H] = identifier[H] /( identifier[self] . identifier[spike_times] . identifier[labels] [ literal[int] ]. identifier[convert] ( literal[string] ). identifier[len] ())
keyword[if] identifier[normalize_n] :
identifier[H] = identifier[H] / identifier[cells_per_bin]
keyword[return] identifier[H] , identifier[xed] , identifier[yed]
keyword[else] :
keyword[return] ( identifier[np] . identifier[array] ([[ literal[int] ]]), identifier[np] . identifier[array] ([ literal[int] ]), identifier[np] . identifier[array] ([ literal[int] ])) | def spatial_firing_rate(self, label_x='x', label_y='y', bins=None, resolution=1.0, geometry=None, weight_function=None, normalize_time=True, normalize_n=False, start_units_with_0=True):
"""
`bins` can be either a number of bins (has to be int) or a list of edges.
`bins` can be a tuple (for x and y respectively).
imshow(s.spatial_firing_rate(weight_function=lambda x: (x[:,1]>0.5)*(x[:,1]<0.6)))
"""
if bool(self):
if type(bins) is tuple:
if type(bins[0]) is int:
bins_x = self.spike_times.get_label(label_y).linspace_bins(bins=bins[0]) # depends on [control=['if'], data=[]]
else:
bins_x = bins[0]
if type(bins[1]) is int:
bins_y = self.spike_times.get_label(label_x).linspace_bins(bins=bins[1]) # depends on [control=['if'], data=[]]
else:
bins_y = bins[1] # depends on [control=['if'], data=[]]
elif type(bins) is int:
bins_x = self.spike_times.get_label(label_y).linspace_bins(bins=bins)
bins_y = self.spike_times.get_label(label_x).linspace_bins(bins=bins) # depends on [control=['if'], data=[]]
else:
bins_x = bins
bins_y = bins
if weight_function is None:
(H, xed, yed) = np.histogram2d(self.spike_times[label_x], self.spike_times[label_y], bins=(bins_x, bins_y)) # depends on [control=['if'], data=[]]
else:
if start_units_with_0:
## TODO : reintroduce a neuron number thing
spike_numbers = np.transpose([self.spikes[:, 0] - self.n_range[0], self.spikes[:, 1]])
weights = weight_function(spike_numbers) # depends on [control=['if'], data=[]]
else:
weights = weight_function(self.spike_times[label_x], self.spike_times[label_y])
(H, xed, yed) = np.histogram2d(self.spike_times[label_x], self.spike_times[label_y], bins=(bins_x, bins_y), weights=weights)
if normalize_time:
H = H / self.spike_times.labels[0].convert('s').len() # depends on [control=['if'], data=[]]
if normalize_n:
H = H / cells_per_bin # depends on [control=['if'], data=[]]
return (H, xed, yed) # depends on [control=['if'], data=[]]
else:
return (np.array([[0]]), np.array([0]), np.array([0])) |
def parse_list_parts(data, bucket_name, object_name, upload_id):
"""
Parser for list parts response.
:param data: Response data for list parts.
:param bucket_name: Response for the bucket.
:param object_name: Response for the object.
:param upload_id: Upload id of object name for
the active multipart session.
:return: Replies back three distinctive components.
- List of :class:`UploadPart <UploadPart>`.
- True if list is truncated, False otherwise.
- Next part marker for the next request if the
list was truncated.
"""
root = S3Element.fromstring('ListPartsResult', data)
is_truncated = root.get_child_text('IsTruncated').lower() == 'true'
part_marker = root.get_child_text('NextPartNumberMarker', strict=False)
parts = [
UploadPart(bucket_name, object_name, upload_id,
part.get_int_elem('PartNumber'),
part.get_etag_elem(),
part.get_localized_time_elem('LastModified'),
part.get_int_elem('Size'))
for part in root.findall('Part')
]
return parts, is_truncated, part_marker | def function[parse_list_parts, parameter[data, bucket_name, object_name, upload_id]]:
constant[
Parser for list parts response.
:param data: Response data for list parts.
:param bucket_name: Response for the bucket.
:param object_name: Response for the object.
:param upload_id: Upload id of object name for
the active multipart session.
:return: Replies back three distinctive components.
- List of :class:`UploadPart <UploadPart>`.
- True if list is truncated, False otherwise.
- Next part marker for the next request if the
list was truncated.
]
variable[root] assign[=] call[name[S3Element].fromstring, parameter[constant[ListPartsResult], name[data]]]
variable[is_truncated] assign[=] compare[call[call[name[root].get_child_text, parameter[constant[IsTruncated]]].lower, parameter[]] equal[==] constant[true]]
variable[part_marker] assign[=] call[name[root].get_child_text, parameter[constant[NextPartNumberMarker]]]
variable[parts] assign[=] <ast.ListComp object at 0x7da1b1ecd840>
return[tuple[[<ast.Name object at 0x7da1b1ecd540>, <ast.Name object at 0x7da1b1eceec0>, <ast.Name object at 0x7da1b1eccfa0>]]] | keyword[def] identifier[parse_list_parts] ( identifier[data] , identifier[bucket_name] , identifier[object_name] , identifier[upload_id] ):
literal[string]
identifier[root] = identifier[S3Element] . identifier[fromstring] ( literal[string] , identifier[data] )
identifier[is_truncated] = identifier[root] . identifier[get_child_text] ( literal[string] ). identifier[lower] ()== literal[string]
identifier[part_marker] = identifier[root] . identifier[get_child_text] ( literal[string] , identifier[strict] = keyword[False] )
identifier[parts] =[
identifier[UploadPart] ( identifier[bucket_name] , identifier[object_name] , identifier[upload_id] ,
identifier[part] . identifier[get_int_elem] ( literal[string] ),
identifier[part] . identifier[get_etag_elem] (),
identifier[part] . identifier[get_localized_time_elem] ( literal[string] ),
identifier[part] . identifier[get_int_elem] ( literal[string] ))
keyword[for] identifier[part] keyword[in] identifier[root] . identifier[findall] ( literal[string] )
]
keyword[return] identifier[parts] , identifier[is_truncated] , identifier[part_marker] | def parse_list_parts(data, bucket_name, object_name, upload_id):
"""
Parser for list parts response.
:param data: Response data for list parts.
:param bucket_name: Response for the bucket.
:param object_name: Response for the object.
:param upload_id: Upload id of object name for
the active multipart session.
:return: Replies back three distinctive components.
- List of :class:`UploadPart <UploadPart>`.
- True if list is truncated, False otherwise.
- Next part marker for the next request if the
list was truncated.
"""
root = S3Element.fromstring('ListPartsResult', data)
is_truncated = root.get_child_text('IsTruncated').lower() == 'true'
part_marker = root.get_child_text('NextPartNumberMarker', strict=False)
parts = [UploadPart(bucket_name, object_name, upload_id, part.get_int_elem('PartNumber'), part.get_etag_elem(), part.get_localized_time_elem('LastModified'), part.get_int_elem('Size')) for part in root.findall('Part')]
return (parts, is_truncated, part_marker) |
def wrap_stub(elf_file):
""" Wrap an ELF file into a stub 'dict' """
print('Wrapping ELF file %s...' % elf_file)
e = esptool.ELFFile(elf_file)
text_section = e.get_section('.text')
try:
data_section = e.get_section('.data')
except ValueError:
data_section = None
stub = {
'text': text_section.data,
'text_start': text_section.addr,
'entry': e.entrypoint,
}
if data_section is not None:
stub['data'] = data_section.data
stub['data_start'] = data_section.addr
# Pad text with NOPs to mod 4.
if len(stub['text']) % 4 != 0:
stub['text'] += (4 - (len(stub['text']) % 4)) * '\0'
print('Stub text: %d @ 0x%08x, data: %d @ 0x%08x, entry @ 0x%x' % (
len(stub['text']), stub['text_start'],
len(stub.get('data', '')), stub.get('data_start', 0),
stub['entry']), file=sys.stderr)
return stub | def function[wrap_stub, parameter[elf_file]]:
constant[ Wrap an ELF file into a stub 'dict' ]
call[name[print], parameter[binary_operation[constant[Wrapping ELF file %s...] <ast.Mod object at 0x7da2590d6920> name[elf_file]]]]
variable[e] assign[=] call[name[esptool].ELFFile, parameter[name[elf_file]]]
variable[text_section] assign[=] call[name[e].get_section, parameter[constant[.text]]]
<ast.Try object at 0x7da1b2345000>
variable[stub] assign[=] dictionary[[<ast.Constant object at 0x7da1b2346b90>, <ast.Constant object at 0x7da1b23452a0>, <ast.Constant object at 0x7da1b2347520>], [<ast.Attribute object at 0x7da1b2344670>, <ast.Attribute object at 0x7da1b23475e0>, <ast.Attribute object at 0x7da1b2345ff0>]]
if compare[name[data_section] is_not constant[None]] begin[:]
call[name[stub]][constant[data]] assign[=] name[data_section].data
call[name[stub]][constant[data_start]] assign[=] name[data_section].addr
if compare[binary_operation[call[name[len], parameter[call[name[stub]][constant[text]]]] <ast.Mod object at 0x7da2590d6920> constant[4]] not_equal[!=] constant[0]] begin[:]
<ast.AugAssign object at 0x7da1b2346920>
call[name[print], parameter[binary_operation[constant[Stub text: %d @ 0x%08x, data: %d @ 0x%08x, entry @ 0x%x] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Call object at 0x7da1b2346140>, <ast.Subscript object at 0x7da1b23469b0>, <ast.Call object at 0x7da1b2345e40>, <ast.Call object at 0x7da1b23460e0>, <ast.Subscript object at 0x7da1b2345e70>]]]]]
return[name[stub]] | keyword[def] identifier[wrap_stub] ( identifier[elf_file] ):
literal[string]
identifier[print] ( literal[string] % identifier[elf_file] )
identifier[e] = identifier[esptool] . identifier[ELFFile] ( identifier[elf_file] )
identifier[text_section] = identifier[e] . identifier[get_section] ( literal[string] )
keyword[try] :
identifier[data_section] = identifier[e] . identifier[get_section] ( literal[string] )
keyword[except] identifier[ValueError] :
identifier[data_section] = keyword[None]
identifier[stub] ={
literal[string] : identifier[text_section] . identifier[data] ,
literal[string] : identifier[text_section] . identifier[addr] ,
literal[string] : identifier[e] . identifier[entrypoint] ,
}
keyword[if] identifier[data_section] keyword[is] keyword[not] keyword[None] :
identifier[stub] [ literal[string] ]= identifier[data_section] . identifier[data]
identifier[stub] [ literal[string] ]= identifier[data_section] . identifier[addr]
keyword[if] identifier[len] ( identifier[stub] [ literal[string] ])% literal[int] != literal[int] :
identifier[stub] [ literal[string] ]+=( literal[int] -( identifier[len] ( identifier[stub] [ literal[string] ])% literal[int] ))* literal[string]
identifier[print] ( literal[string] %(
identifier[len] ( identifier[stub] [ literal[string] ]), identifier[stub] [ literal[string] ],
identifier[len] ( identifier[stub] . identifier[get] ( literal[string] , literal[string] )), identifier[stub] . identifier[get] ( literal[string] , literal[int] ),
identifier[stub] [ literal[string] ]), identifier[file] = identifier[sys] . identifier[stderr] )
keyword[return] identifier[stub] | def wrap_stub(elf_file):
""" Wrap an ELF file into a stub 'dict' """
print('Wrapping ELF file %s...' % elf_file)
e = esptool.ELFFile(elf_file)
text_section = e.get_section('.text')
try:
data_section = e.get_section('.data') # depends on [control=['try'], data=[]]
except ValueError:
data_section = None # depends on [control=['except'], data=[]]
stub = {'text': text_section.data, 'text_start': text_section.addr, 'entry': e.entrypoint}
if data_section is not None:
stub['data'] = data_section.data
stub['data_start'] = data_section.addr # depends on [control=['if'], data=['data_section']]
# Pad text with NOPs to mod 4.
if len(stub['text']) % 4 != 0:
stub['text'] += (4 - len(stub['text']) % 4) * '\x00' # depends on [control=['if'], data=[]]
print('Stub text: %d @ 0x%08x, data: %d @ 0x%08x, entry @ 0x%x' % (len(stub['text']), stub['text_start'], len(stub.get('data', '')), stub.get('data_start', 0), stub['entry']), file=sys.stderr)
return stub |
def run(self, blocking: bool=True):
"""
Run the internal control loop.
Args:
blocking (bool): Defaults to `True`. If set to `False`, will
intialize a thread to run the control loop.
Raises:
RuntimeError: If called and not using the internal control loop
via `self._run_control_loop`, set in the intializer of the
class
"""
if not self._run_control_loop:
err = ("`run` called, but not using the internal control loop. Use"
" `start` instead")
raise RuntimeError(err)
self._setup()
self._heartbeat_reciever.start()
if blocking:
return self.loop.start()
else:
self._run_thread = _threading.Thread(target=self.loop.start,
daemon=True)
self._thread.run() | def function[run, parameter[self, blocking]]:
constant[
Run the internal control loop.
Args:
blocking (bool): Defaults to `True`. If set to `False`, will
intialize a thread to run the control loop.
Raises:
RuntimeError: If called and not using the internal control loop
via `self._run_control_loop`, set in the intializer of the
class
]
if <ast.UnaryOp object at 0x7da1b0bcb460> begin[:]
variable[err] assign[=] constant[`run` called, but not using the internal control loop. Use `start` instead]
<ast.Raise object at 0x7da1b0bcbee0>
call[name[self]._setup, parameter[]]
call[name[self]._heartbeat_reciever.start, parameter[]]
if name[blocking] begin[:]
return[call[name[self].loop.start, parameter[]]] | keyword[def] identifier[run] ( identifier[self] , identifier[blocking] : identifier[bool] = keyword[True] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[_run_control_loop] :
identifier[err] =( literal[string]
literal[string] )
keyword[raise] identifier[RuntimeError] ( identifier[err] )
identifier[self] . identifier[_setup] ()
identifier[self] . identifier[_heartbeat_reciever] . identifier[start] ()
keyword[if] identifier[blocking] :
keyword[return] identifier[self] . identifier[loop] . identifier[start] ()
keyword[else] :
identifier[self] . identifier[_run_thread] = identifier[_threading] . identifier[Thread] ( identifier[target] = identifier[self] . identifier[loop] . identifier[start] ,
identifier[daemon] = keyword[True] )
identifier[self] . identifier[_thread] . identifier[run] () | def run(self, blocking: bool=True):
"""
Run the internal control loop.
Args:
blocking (bool): Defaults to `True`. If set to `False`, will
intialize a thread to run the control loop.
Raises:
RuntimeError: If called and not using the internal control loop
via `self._run_control_loop`, set in the intializer of the
class
"""
if not self._run_control_loop:
err = '`run` called, but not using the internal control loop. Use `start` instead'
raise RuntimeError(err) # depends on [control=['if'], data=[]]
self._setup()
self._heartbeat_reciever.start()
if blocking:
return self.loop.start() # depends on [control=['if'], data=[]]
else:
self._run_thread = _threading.Thread(target=self.loop.start, daemon=True)
self._thread.run() |
def transform_alias(self, alias,rest=''):
"""Transform alias to system command string."""
nargs, cmd = self.alias_table[alias]
if ' ' in cmd and os.path.isfile(cmd):
cmd = '"%s"' % cmd
# Expand the %l special to be the user's input line
if cmd.find('%l') >= 0:
cmd = cmd.replace('%l', rest)
rest = ''
if nargs==0:
# Simple, argument-less aliases
cmd = '%s %s' % (cmd, rest)
else:
# Handle aliases with positional arguments
args = rest.split(None, nargs)
if len(args) < nargs:
raise AliasError('Alias <%s> requires %s arguments, %s given.' %
(alias, nargs, len(args)))
cmd = '%s %s' % (cmd % tuple(args[:nargs]),' '.join(args[nargs:]))
return cmd | def function[transform_alias, parameter[self, alias, rest]]:
constant[Transform alias to system command string.]
<ast.Tuple object at 0x7da2043463e0> assign[=] call[name[self].alias_table][name[alias]]
if <ast.BoolOp object at 0x7da204347190> begin[:]
variable[cmd] assign[=] binary_operation[constant["%s"] <ast.Mod object at 0x7da2590d6920> name[cmd]]
if compare[call[name[cmd].find, parameter[constant[%l]]] greater_or_equal[>=] constant[0]] begin[:]
variable[cmd] assign[=] call[name[cmd].replace, parameter[constant[%l], name[rest]]]
variable[rest] assign[=] constant[]
if compare[name[nargs] equal[==] constant[0]] begin[:]
variable[cmd] assign[=] binary_operation[constant[%s %s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b021de70>, <ast.Name object at 0x7da1b021ca30>]]]
return[name[cmd]] | keyword[def] identifier[transform_alias] ( identifier[self] , identifier[alias] , identifier[rest] = literal[string] ):
literal[string]
identifier[nargs] , identifier[cmd] = identifier[self] . identifier[alias_table] [ identifier[alias] ]
keyword[if] literal[string] keyword[in] identifier[cmd] keyword[and] identifier[os] . identifier[path] . identifier[isfile] ( identifier[cmd] ):
identifier[cmd] = literal[string] % identifier[cmd]
keyword[if] identifier[cmd] . identifier[find] ( literal[string] )>= literal[int] :
identifier[cmd] = identifier[cmd] . identifier[replace] ( literal[string] , identifier[rest] )
identifier[rest] = literal[string]
keyword[if] identifier[nargs] == literal[int] :
identifier[cmd] = literal[string] %( identifier[cmd] , identifier[rest] )
keyword[else] :
identifier[args] = identifier[rest] . identifier[split] ( keyword[None] , identifier[nargs] )
keyword[if] identifier[len] ( identifier[args] )< identifier[nargs] :
keyword[raise] identifier[AliasError] ( literal[string] %
( identifier[alias] , identifier[nargs] , identifier[len] ( identifier[args] )))
identifier[cmd] = literal[string] %( identifier[cmd] % identifier[tuple] ( identifier[args] [: identifier[nargs] ]), literal[string] . identifier[join] ( identifier[args] [ identifier[nargs] :]))
keyword[return] identifier[cmd] | def transform_alias(self, alias, rest=''):
"""Transform alias to system command string."""
(nargs, cmd) = self.alias_table[alias]
if ' ' in cmd and os.path.isfile(cmd):
cmd = '"%s"' % cmd # depends on [control=['if'], data=[]]
# Expand the %l special to be the user's input line
if cmd.find('%l') >= 0:
cmd = cmd.replace('%l', rest)
rest = '' # depends on [control=['if'], data=[]]
if nargs == 0:
# Simple, argument-less aliases
cmd = '%s %s' % (cmd, rest) # depends on [control=['if'], data=[]]
else:
# Handle aliases with positional arguments
args = rest.split(None, nargs)
if len(args) < nargs:
raise AliasError('Alias <%s> requires %s arguments, %s given.' % (alias, nargs, len(args))) # depends on [control=['if'], data=['nargs']]
cmd = '%s %s' % (cmd % tuple(args[:nargs]), ' '.join(args[nargs:]))
return cmd |
def Fgamma(m,x):
"""
Incomplete gamma function
>>> np.isclose(Fgamma(0,0),1.0)
True
"""
SMALL=1e-12
x = max(x,SMALL)
return 0.5*pow(x,-m-0.5)*gamm_inc(m+0.5,x) | def function[Fgamma, parameter[m, x]]:
constant[
Incomplete gamma function
>>> np.isclose(Fgamma(0,0),1.0)
True
]
variable[SMALL] assign[=] constant[1e-12]
variable[x] assign[=] call[name[max], parameter[name[x], name[SMALL]]]
return[binary_operation[binary_operation[constant[0.5] * call[name[pow], parameter[name[x], binary_operation[<ast.UnaryOp object at 0x7da20c6e4a90> - constant[0.5]]]]] * call[name[gamm_inc], parameter[binary_operation[name[m] + constant[0.5]], name[x]]]]] | keyword[def] identifier[Fgamma] ( identifier[m] , identifier[x] ):
literal[string]
identifier[SMALL] = literal[int]
identifier[x] = identifier[max] ( identifier[x] , identifier[SMALL] )
keyword[return] literal[int] * identifier[pow] ( identifier[x] ,- identifier[m] - literal[int] )* identifier[gamm_inc] ( identifier[m] + literal[int] , identifier[x] ) | def Fgamma(m, x):
"""
Incomplete gamma function
>>> np.isclose(Fgamma(0,0),1.0)
True
"""
SMALL = 1e-12
x = max(x, SMALL)
return 0.5 * pow(x, -m - 0.5) * gamm_inc(m + 0.5, x) |
def validate(self):
"""
Validate the current file against the SLD schema. This first normalizes
the SLD document, then validates it. Any schema validation error messages
are logged at the INFO level.
@rtype: boolean
@return: A flag indicating if the SLD is valid.
"""
self.normalize()
if self._node is None:
logging.debug('The node is empty, and cannot be validated.')
return False
if self._schema is None:
self._schema = XMLSchema(self._schemadoc)
is_valid = self._schema.validate(self._node)
for msg in self._schema.error_log:
logging.info('Line:%d, Column:%d -- %s', msg.line, msg.column, msg.message)
return is_valid | def function[validate, parameter[self]]:
constant[
Validate the current file against the SLD schema. This first normalizes
the SLD document, then validates it. Any schema validation error messages
are logged at the INFO level.
@rtype: boolean
@return: A flag indicating if the SLD is valid.
]
call[name[self].normalize, parameter[]]
if compare[name[self]._node is constant[None]] begin[:]
call[name[logging].debug, parameter[constant[The node is empty, and cannot be validated.]]]
return[constant[False]]
if compare[name[self]._schema is constant[None]] begin[:]
name[self]._schema assign[=] call[name[XMLSchema], parameter[name[self]._schemadoc]]
variable[is_valid] assign[=] call[name[self]._schema.validate, parameter[name[self]._node]]
for taget[name[msg]] in starred[name[self]._schema.error_log] begin[:]
call[name[logging].info, parameter[constant[Line:%d, Column:%d -- %s], name[msg].line, name[msg].column, name[msg].message]]
return[name[is_valid]] | keyword[def] identifier[validate] ( identifier[self] ):
literal[string]
identifier[self] . identifier[normalize] ()
keyword[if] identifier[self] . identifier[_node] keyword[is] keyword[None] :
identifier[logging] . identifier[debug] ( literal[string] )
keyword[return] keyword[False]
keyword[if] identifier[self] . identifier[_schema] keyword[is] keyword[None] :
identifier[self] . identifier[_schema] = identifier[XMLSchema] ( identifier[self] . identifier[_schemadoc] )
identifier[is_valid] = identifier[self] . identifier[_schema] . identifier[validate] ( identifier[self] . identifier[_node] )
keyword[for] identifier[msg] keyword[in] identifier[self] . identifier[_schema] . identifier[error_log] :
identifier[logging] . identifier[info] ( literal[string] , identifier[msg] . identifier[line] , identifier[msg] . identifier[column] , identifier[msg] . identifier[message] )
keyword[return] identifier[is_valid] | def validate(self):
"""
Validate the current file against the SLD schema. This first normalizes
the SLD document, then validates it. Any schema validation error messages
are logged at the INFO level.
@rtype: boolean
@return: A flag indicating if the SLD is valid.
"""
self.normalize()
if self._node is None:
logging.debug('The node is empty, and cannot be validated.')
return False # depends on [control=['if'], data=[]]
if self._schema is None:
self._schema = XMLSchema(self._schemadoc) # depends on [control=['if'], data=[]]
is_valid = self._schema.validate(self._node)
for msg in self._schema.error_log:
logging.info('Line:%d, Column:%d -- %s', msg.line, msg.column, msg.message) # depends on [control=['for'], data=['msg']]
return is_valid |
def C(self):
'''
Third vertex of triangle, Point subclass.
'''
try:
return self._C
except AttributeError:
pass
self._C = Point(0, 1)
return self._C | def function[C, parameter[self]]:
constant[
Third vertex of triangle, Point subclass.
]
<ast.Try object at 0x7da18fe930a0>
name[self]._C assign[=] call[name[Point], parameter[constant[0], constant[1]]]
return[name[self]._C] | keyword[def] identifier[C] ( identifier[self] ):
literal[string]
keyword[try] :
keyword[return] identifier[self] . identifier[_C]
keyword[except] identifier[AttributeError] :
keyword[pass]
identifier[self] . identifier[_C] = identifier[Point] ( literal[int] , literal[int] )
keyword[return] identifier[self] . identifier[_C] | def C(self):
"""
Third vertex of triangle, Point subclass.
"""
try:
return self._C # depends on [control=['try'], data=[]]
except AttributeError:
pass # depends on [control=['except'], data=[]]
self._C = Point(0, 1)
return self._C |
def render_search_template(self, id=None, body=None, params=None):
"""
`<http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-template.html>`_
:arg id: The id of the stored search template
:arg body: The search definition template and its params
"""
return self.transport.perform_request(
"GET", _make_path("_render", "template", id), params=params, body=body
) | def function[render_search_template, parameter[self, id, body, params]]:
constant[
`<http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-template.html>`_
:arg id: The id of the stored search template
:arg body: The search definition template and its params
]
return[call[name[self].transport.perform_request, parameter[constant[GET], call[name[_make_path], parameter[constant[_render], constant[template], name[id]]]]]] | keyword[def] identifier[render_search_template] ( identifier[self] , identifier[id] = keyword[None] , identifier[body] = keyword[None] , identifier[params] = keyword[None] ):
literal[string]
keyword[return] identifier[self] . identifier[transport] . identifier[perform_request] (
literal[string] , identifier[_make_path] ( literal[string] , literal[string] , identifier[id] ), identifier[params] = identifier[params] , identifier[body] = identifier[body]
) | def render_search_template(self, id=None, body=None, params=None):
"""
`<http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-template.html>`_
:arg id: The id of the stored search template
:arg body: The search definition template and its params
"""
return self.transport.perform_request('GET', _make_path('_render', 'template', id), params=params, body=body) |
def _build_graph(self, tags):
"""Builds a graph from the entities included in the tags.
Note this is used internally.
Args:
tags (list): A list of the tags to include in graph
Returns:
graph : this is the resulting graph of the tagged entities.
"""
graph = SimpleGraph()
for tag_index in xrange(len(tags)):
for entity_index in xrange(len(tags[tag_index].get('entities'))):
a_entity_name = graph_key_from_tag(tags[tag_index], entity_index)
tokens = self.tokenizer.tokenize(tags[tag_index].get('entities', [])[entity_index].get('match'))
for tag in tags[tag_index + 1:]:
start_token = tag.get('start_token')
if start_token >= tags[tag_index].get('start_token') + len(tokens):
for b_entity_index in xrange(len(tag.get('entities'))):
b_entity_name = graph_key_from_tag(tag, b_entity_index)
graph.add_edge(a_entity_name, b_entity_name)
return graph | def function[_build_graph, parameter[self, tags]]:
constant[Builds a graph from the entities included in the tags.
Note this is used internally.
Args:
tags (list): A list of the tags to include in graph
Returns:
graph : this is the resulting graph of the tagged entities.
]
variable[graph] assign[=] call[name[SimpleGraph], parameter[]]
for taget[name[tag_index]] in starred[call[name[xrange], parameter[call[name[len], parameter[name[tags]]]]]] begin[:]
for taget[name[entity_index]] in starred[call[name[xrange], parameter[call[name[len], parameter[call[call[name[tags]][name[tag_index]].get, parameter[constant[entities]]]]]]]] begin[:]
variable[a_entity_name] assign[=] call[name[graph_key_from_tag], parameter[call[name[tags]][name[tag_index]], name[entity_index]]]
variable[tokens] assign[=] call[name[self].tokenizer.tokenize, parameter[call[call[call[call[name[tags]][name[tag_index]].get, parameter[constant[entities], list[[]]]]][name[entity_index]].get, parameter[constant[match]]]]]
for taget[name[tag]] in starred[call[name[tags]][<ast.Slice object at 0x7da1b0863fd0>]] begin[:]
variable[start_token] assign[=] call[name[tag].get, parameter[constant[start_token]]]
if compare[name[start_token] greater_or_equal[>=] binary_operation[call[call[name[tags]][name[tag_index]].get, parameter[constant[start_token]]] + call[name[len], parameter[name[tokens]]]]] begin[:]
for taget[name[b_entity_index]] in starred[call[name[xrange], parameter[call[name[len], parameter[call[name[tag].get, parameter[constant[entities]]]]]]]] begin[:]
variable[b_entity_name] assign[=] call[name[graph_key_from_tag], parameter[name[tag], name[b_entity_index]]]
call[name[graph].add_edge, parameter[name[a_entity_name], name[b_entity_name]]]
return[name[graph]] | keyword[def] identifier[_build_graph] ( identifier[self] , identifier[tags] ):
literal[string]
identifier[graph] = identifier[SimpleGraph] ()
keyword[for] identifier[tag_index] keyword[in] identifier[xrange] ( identifier[len] ( identifier[tags] )):
keyword[for] identifier[entity_index] keyword[in] identifier[xrange] ( identifier[len] ( identifier[tags] [ identifier[tag_index] ]. identifier[get] ( literal[string] ))):
identifier[a_entity_name] = identifier[graph_key_from_tag] ( identifier[tags] [ identifier[tag_index] ], identifier[entity_index] )
identifier[tokens] = identifier[self] . identifier[tokenizer] . identifier[tokenize] ( identifier[tags] [ identifier[tag_index] ]. identifier[get] ( literal[string] ,[])[ identifier[entity_index] ]. identifier[get] ( literal[string] ))
keyword[for] identifier[tag] keyword[in] identifier[tags] [ identifier[tag_index] + literal[int] :]:
identifier[start_token] = identifier[tag] . identifier[get] ( literal[string] )
keyword[if] identifier[start_token] >= identifier[tags] [ identifier[tag_index] ]. identifier[get] ( literal[string] )+ identifier[len] ( identifier[tokens] ):
keyword[for] identifier[b_entity_index] keyword[in] identifier[xrange] ( identifier[len] ( identifier[tag] . identifier[get] ( literal[string] ))):
identifier[b_entity_name] = identifier[graph_key_from_tag] ( identifier[tag] , identifier[b_entity_index] )
identifier[graph] . identifier[add_edge] ( identifier[a_entity_name] , identifier[b_entity_name] )
keyword[return] identifier[graph] | def _build_graph(self, tags):
"""Builds a graph from the entities included in the tags.
Note this is used internally.
Args:
tags (list): A list of the tags to include in graph
Returns:
graph : this is the resulting graph of the tagged entities.
"""
graph = SimpleGraph()
for tag_index in xrange(len(tags)):
for entity_index in xrange(len(tags[tag_index].get('entities'))):
a_entity_name = graph_key_from_tag(tags[tag_index], entity_index)
tokens = self.tokenizer.tokenize(tags[tag_index].get('entities', [])[entity_index].get('match'))
for tag in tags[tag_index + 1:]:
start_token = tag.get('start_token')
if start_token >= tags[tag_index].get('start_token') + len(tokens):
for b_entity_index in xrange(len(tag.get('entities'))):
b_entity_name = graph_key_from_tag(tag, b_entity_index)
graph.add_edge(a_entity_name, b_entity_name) # depends on [control=['for'], data=['b_entity_index']] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['tag']] # depends on [control=['for'], data=['entity_index']] # depends on [control=['for'], data=['tag_index']]
return graph |
def update_hints(obj, data):
"""Update the hints on the root-node of a workflow. Usually, schedule
hints are fixed per function. Sometimes a user may want to set hints
manually on a specific promised object. :func:`update_hints` uses the
`update` method on the hints dictionary with `data` as its argument.
:param obj: a :py:class:`PromisedObject`.
:param data: a :py:class:`dict` containing additional hints.
The hints are modified, in place, on the node. All workflows that contain
the node are affected."""
root = obj._workflow.root
obj._workflow.nodes[root].hints.update(data) | def function[update_hints, parameter[obj, data]]:
constant[Update the hints on the root-node of a workflow. Usually, schedule
hints are fixed per function. Sometimes a user may want to set hints
manually on a specific promised object. :func:`update_hints` uses the
`update` method on the hints dictionary with `data` as its argument.
:param obj: a :py:class:`PromisedObject`.
:param data: a :py:class:`dict` containing additional hints.
The hints are modified, in place, on the node. All workflows that contain
the node are affected.]
variable[root] assign[=] name[obj]._workflow.root
call[call[name[obj]._workflow.nodes][name[root]].hints.update, parameter[name[data]]] | keyword[def] identifier[update_hints] ( identifier[obj] , identifier[data] ):
literal[string]
identifier[root] = identifier[obj] . identifier[_workflow] . identifier[root]
identifier[obj] . identifier[_workflow] . identifier[nodes] [ identifier[root] ]. identifier[hints] . identifier[update] ( identifier[data] ) | def update_hints(obj, data):
"""Update the hints on the root-node of a workflow. Usually, schedule
hints are fixed per function. Sometimes a user may want to set hints
manually on a specific promised object. :func:`update_hints` uses the
`update` method on the hints dictionary with `data` as its argument.
:param obj: a :py:class:`PromisedObject`.
:param data: a :py:class:`dict` containing additional hints.
The hints are modified, in place, on the node. All workflows that contain
the node are affected."""
root = obj._workflow.root
obj._workflow.nodes[root].hints.update(data) |
def _get_app(self, appname):
"""
returns app object or None
"""
try:
app = APPS.get_app_config(appname)
except Exception as e:
self.err(e)
return
return app | def function[_get_app, parameter[self, appname]]:
constant[
returns app object or None
]
<ast.Try object at 0x7da18eb56ef0>
return[name[app]] | keyword[def] identifier[_get_app] ( identifier[self] , identifier[appname] ):
literal[string]
keyword[try] :
identifier[app] = identifier[APPS] . identifier[get_app_config] ( identifier[appname] )
keyword[except] identifier[Exception] keyword[as] identifier[e] :
identifier[self] . identifier[err] ( identifier[e] )
keyword[return]
keyword[return] identifier[app] | def _get_app(self, appname):
"""
returns app object or None
"""
try:
app = APPS.get_app_config(appname) # depends on [control=['try'], data=[]]
except Exception as e:
self.err(e)
return # depends on [control=['except'], data=['e']]
return app |
def GetAnalyzerInstances(cls, analyzer_names):
"""Retrieves instances for all the specified analyzers.
Args:
analyzer_names (list[str]): names of the analyzers to retrieve.
Returns:
list[BaseAnalyzer]: analyzer instances.
"""
analyzer_instances = []
for analyzer_name, analyzer_class in iter(cls.GetAnalyzers()):
if analyzer_name in analyzer_names:
analyzer_instances.append(analyzer_class())
return analyzer_instances | def function[GetAnalyzerInstances, parameter[cls, analyzer_names]]:
constant[Retrieves instances for all the specified analyzers.
Args:
analyzer_names (list[str]): names of the analyzers to retrieve.
Returns:
list[BaseAnalyzer]: analyzer instances.
]
variable[analyzer_instances] assign[=] list[[]]
for taget[tuple[[<ast.Name object at 0x7da20c7c9330>, <ast.Name object at 0x7da20c7cbfd0>]]] in starred[call[name[iter], parameter[call[name[cls].GetAnalyzers, parameter[]]]]] begin[:]
if compare[name[analyzer_name] in name[analyzer_names]] begin[:]
call[name[analyzer_instances].append, parameter[call[name[analyzer_class], parameter[]]]]
return[name[analyzer_instances]] | keyword[def] identifier[GetAnalyzerInstances] ( identifier[cls] , identifier[analyzer_names] ):
literal[string]
identifier[analyzer_instances] =[]
keyword[for] identifier[analyzer_name] , identifier[analyzer_class] keyword[in] identifier[iter] ( identifier[cls] . identifier[GetAnalyzers] ()):
keyword[if] identifier[analyzer_name] keyword[in] identifier[analyzer_names] :
identifier[analyzer_instances] . identifier[append] ( identifier[analyzer_class] ())
keyword[return] identifier[analyzer_instances] | def GetAnalyzerInstances(cls, analyzer_names):
"""Retrieves instances for all the specified analyzers.
Args:
analyzer_names (list[str]): names of the analyzers to retrieve.
Returns:
list[BaseAnalyzer]: analyzer instances.
"""
analyzer_instances = []
for (analyzer_name, analyzer_class) in iter(cls.GetAnalyzers()):
if analyzer_name in analyzer_names:
analyzer_instances.append(analyzer_class()) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
return analyzer_instances |
def config_unset(name,
value_regex=None,
repo=None,
user=None,
password=None,
output_encoding=None,
**kwargs):
r'''
.. versionadded:: 2015.8.0
Ensure that the named config key is not present
name
The name of the configuration key to unset. This value can be a regex,
but the regex must match the entire key name. For example, ``foo\.``
would not match all keys in the ``foo`` section, it would be necessary
to use ``foo\..+`` to do so.
value_regex
Regex indicating the values to unset for the matching key(s)
.. note::
This option behaves differently depending on whether or not ``all``
is set to ``True``. If it is, then all values matching the regex
will be deleted (this is the only way to delete multiple values
from a multivar). If ``all`` is set to ``False``, then this state
will fail if the regex matches more than one value in a multivar.
all : False
If ``True``, unset all matches
repo
Location of the git repository for which the config value should be
set. Required unless ``global`` is set to ``True``.
user
User under which to run git commands. By default, commands are run by
the user under which the minion is running.
password
Windows only. Required when specifying ``user``. This parameter will be
ignored on non-Windows platforms.
.. versionadded:: 2016.3.4
global : False
If ``True``, this will set a global git config option
output_encoding
Use this option to specify which encoding to use to decode the output
from any git commands which are run. This should not be needed in most
cases.
.. note::
This should only be needed if the files in the repository were
created with filenames using an encoding other than UTF-8 to handle
Unicode characters.
.. versionadded:: 2018.3.1
**Examples:**
.. code-block:: yaml
# Value matching 'baz'
mylocalrepo:
git.config_unset:
- name: foo.bar
- value_regex: 'baz'
- repo: /path/to/repo
# Ensure entire multivar is unset
mylocalrepo:
git.config_unset:
- name: foo.bar
- all: True
# Ensure all variables in 'foo' section are unset, including multivars
mylocalrepo:
git.config_unset:
- name: 'foo\..+'
- all: True
# Ensure that global config value is unset
mylocalrepo:
git.config_unset:
- name: foo.bar
- global: True
'''
ret = {'name': name,
'changes': {},
'result': True,
'comment': 'No matching keys are set'}
# Sanitize kwargs and make sure that no invalid ones were passed. This
# allows us to accept 'global' as an argument to this function without
# shadowing global(), while also not allowing unwanted arguments to be
# passed.
kwargs = salt.utils.args.clean_kwargs(**kwargs)
global_ = kwargs.pop('global', False)
all_ = kwargs.pop('all', False)
if kwargs:
return _fail(
ret,
salt.utils.args.invalid_kwargs(kwargs, raise_exc=False)
)
if not global_ and not repo:
return _fail(
ret,
'Non-global config options require the \'repo\' argument to be '
'set'
)
if not isinstance(name, six.string_types):
name = six.text_type(name)
if value_regex is not None:
if not isinstance(value_regex, six.string_types):
value_regex = six.text_type(value_regex)
# Ensure that the key regex matches the full key name
key = '^' + name.lstrip('^').rstrip('$') + '$'
# Get matching keys/values
pre_matches = __salt__['git.config_get_regexp'](
cwd=repo,
key=key,
value_regex=value_regex,
user=user,
password=password,
ignore_retcode=True,
output_encoding=output_encoding,
**{'global': global_}
)
if not pre_matches:
# No changes need to be made
return ret
# Perform sanity check on the matches. We can't proceed if the value_regex
# matches more than one value in a given key, and 'all' is not set to True
if not all_:
greedy_matches = ['{0} ({1})'.format(x, ', '.join(y))
for x, y in six.iteritems(pre_matches)
if len(y) > 1]
if greedy_matches:
if value_regex is not None:
return _fail(
ret,
'Multiple values are matched by value_regex for the '
'following keys (set \'all\' to True to force removal): '
'{0}'.format('; '.join(greedy_matches))
)
else:
return _fail(
ret,
'Multivar(s) matched by the key expression (set \'all\' '
'to True to force removal): {0}'.format(
'; '.join(greedy_matches)
)
)
if __opts__['test']:
ret['changes'] = pre_matches
return _neutral_test(
ret,
'{0} key(s) would have value(s) unset'.format(len(pre_matches))
)
if value_regex is None:
pre = pre_matches
else:
# Get all keys matching the key expression, so we can accurately report
# on changes made.
pre = __salt__['git.config_get_regexp'](
cwd=repo,
key=key,
value_regex=None,
user=user,
password=password,
ignore_retcode=True,
output_encoding=output_encoding,
**{'global': global_}
)
failed = []
# Unset the specified value(s). There is no unset for regexes so loop
# through the pre_matches dict and unset each matching key individually.
for key_name in pre_matches:
try:
__salt__['git.config_unset'](
cwd=repo,
key=name,
value_regex=value_regex,
all=all_,
user=user,
password=password,
output_encoding=output_encoding,
**{'global': global_}
)
except CommandExecutionError as exc:
msg = 'Failed to unset \'{0}\''.format(key_name)
if value_regex is not None:
msg += ' using value_regex \'{1}\''
msg += ': ' + _strip_exc(exc)
log.error(msg)
failed.append(key_name)
if failed:
return _fail(
ret,
'Error(s) occurred unsetting values for the following keys (see '
'the minion log for details): {0}'.format(', '.join(failed))
)
post = __salt__['git.config_get_regexp'](
cwd=repo,
key=key,
value_regex=None,
user=user,
password=password,
ignore_retcode=True,
output_encoding=output_encoding,
**{'global': global_}
)
for key_name in pre:
if key_name not in post:
ret['changes'][key_name] = pre[key_name]
unset = [x for x in pre[key_name] if x not in post[key_name]]
if unset:
ret['changes'][key_name] = unset
if value_regex is None:
post_matches = post
else:
post_matches = __salt__['git.config_get_regexp'](
cwd=repo,
key=key,
value_regex=value_regex,
user=user,
password=password,
ignore_retcode=True,
output_encoding=output_encoding,
**{'global': global_}
)
if post_matches:
failed = ['{0} ({1})'.format(x, ', '.join(y))
for x, y in six.iteritems(post_matches)]
return _fail(
ret,
'Failed to unset value(s): {0}'.format('; '.join(failed))
)
ret['comment'] = 'Value(s) successfully unset'
return ret | def function[config_unset, parameter[name, value_regex, repo, user, password, output_encoding]]:
constant[
.. versionadded:: 2015.8.0
Ensure that the named config key is not present
name
The name of the configuration key to unset. This value can be a regex,
but the regex must match the entire key name. For example, ``foo\.``
would not match all keys in the ``foo`` section, it would be necessary
to use ``foo\..+`` to do so.
value_regex
Regex indicating the values to unset for the matching key(s)
.. note::
This option behaves differently depending on whether or not ``all``
is set to ``True``. If it is, then all values matching the regex
will be deleted (this is the only way to delete multiple values
from a multivar). If ``all`` is set to ``False``, then this state
will fail if the regex matches more than one value in a multivar.
all : False
If ``True``, unset all matches
repo
Location of the git repository for which the config value should be
set. Required unless ``global`` is set to ``True``.
user
User under which to run git commands. By default, commands are run by
the user under which the minion is running.
password
Windows only. Required when specifying ``user``. This parameter will be
ignored on non-Windows platforms.
.. versionadded:: 2016.3.4
global : False
If ``True``, this will set a global git config option
output_encoding
Use this option to specify which encoding to use to decode the output
from any git commands which are run. This should not be needed in most
cases.
.. note::
This should only be needed if the files in the repository were
created with filenames using an encoding other than UTF-8 to handle
Unicode characters.
.. versionadded:: 2018.3.1
**Examples:**
.. code-block:: yaml
# Value matching 'baz'
mylocalrepo:
git.config_unset:
- name: foo.bar
- value_regex: 'baz'
- repo: /path/to/repo
# Ensure entire multivar is unset
mylocalrepo:
git.config_unset:
- name: foo.bar
- all: True
# Ensure all variables in 'foo' section are unset, including multivars
mylocalrepo:
git.config_unset:
- name: 'foo\..+'
- all: True
# Ensure that global config value is unset
mylocalrepo:
git.config_unset:
- name: foo.bar
- global: True
]
variable[ret] assign[=] dictionary[[<ast.Constant object at 0x7da1b1f6f580>, <ast.Constant object at 0x7da1b1f6f550>, <ast.Constant object at 0x7da1b1f6f520>, <ast.Constant object at 0x7da1b1f6f4f0>], [<ast.Name object at 0x7da1b1f6f4c0>, <ast.Dict object at 0x7da1b1f6f490>, <ast.Constant object at 0x7da1b1f6f460>, <ast.Constant object at 0x7da1b1f6f430>]]
variable[kwargs] assign[=] call[name[salt].utils.args.clean_kwargs, parameter[]]
variable[global_] assign[=] call[name[kwargs].pop, parameter[constant[global], constant[False]]]
variable[all_] assign[=] call[name[kwargs].pop, parameter[constant[all], constant[False]]]
if name[kwargs] begin[:]
return[call[name[_fail], parameter[name[ret], call[name[salt].utils.args.invalid_kwargs, parameter[name[kwargs]]]]]]
if <ast.BoolOp object at 0x7da1b1f6ecb0> begin[:]
return[call[name[_fail], parameter[name[ret], constant[Non-global config options require the 'repo' argument to be set]]]]
if <ast.UnaryOp object at 0x7da1b1f6ea70> begin[:]
variable[name] assign[=] call[name[six].text_type, parameter[name[name]]]
if compare[name[value_regex] is_not constant[None]] begin[:]
if <ast.UnaryOp object at 0x7da1b1f6e740> begin[:]
variable[value_regex] assign[=] call[name[six].text_type, parameter[name[value_regex]]]
variable[key] assign[=] binary_operation[binary_operation[constant[^] + call[call[name[name].lstrip, parameter[constant[^]]].rstrip, parameter[constant[$]]]] + constant[$]]
variable[pre_matches] assign[=] call[call[name[__salt__]][constant[git.config_get_regexp]], parameter[]]
if <ast.UnaryOp object at 0x7da1b1f6dde0> begin[:]
return[name[ret]]
if <ast.UnaryOp object at 0x7da1b1f6dcc0> begin[:]
variable[greedy_matches] assign[=] <ast.ListComp object at 0x7da1b1f6dc00>
if name[greedy_matches] begin[:]
if compare[name[value_regex] is_not constant[None]] begin[:]
return[call[name[_fail], parameter[name[ret], call[constant[Multiple values are matched by value_regex for the following keys (set 'all' to True to force removal): {0}].format, parameter[call[constant[; ].join, parameter[name[greedy_matches]]]]]]]]
if call[name[__opts__]][constant[test]] begin[:]
call[name[ret]][constant[changes]] assign[=] name[pre_matches]
return[call[name[_neutral_test], parameter[name[ret], call[constant[{0} key(s) would have value(s) unset].format, parameter[call[name[len], parameter[name[pre_matches]]]]]]]]
if compare[name[value_regex] is constant[None]] begin[:]
variable[pre] assign[=] name[pre_matches]
variable[failed] assign[=] list[[]]
for taget[name[key_name]] in starred[name[pre_matches]] begin[:]
<ast.Try object at 0x7da1b1f6c760>
if name[failed] begin[:]
return[call[name[_fail], parameter[name[ret], call[constant[Error(s) occurred unsetting values for the following keys (see the minion log for details): {0}].format, parameter[call[constant[, ].join, parameter[name[failed]]]]]]]]
variable[post] assign[=] call[call[name[__salt__]][constant[git.config_get_regexp]], parameter[]]
for taget[name[key_name]] in starred[name[pre]] begin[:]
if compare[name[key_name] <ast.NotIn object at 0x7da2590d7190> name[post]] begin[:]
call[call[name[ret]][constant[changes]]][name[key_name]] assign[=] call[name[pre]][name[key_name]]
variable[unset] assign[=] <ast.ListComp object at 0x7da1b1f48a60>
if name[unset] begin[:]
call[call[name[ret]][constant[changes]]][name[key_name]] assign[=] name[unset]
if compare[name[value_regex] is constant[None]] begin[:]
variable[post_matches] assign[=] name[post]
if name[post_matches] begin[:]
variable[failed] assign[=] <ast.ListComp object at 0x7da1b2034c70>
return[call[name[_fail], parameter[name[ret], call[constant[Failed to unset value(s): {0}].format, parameter[call[constant[; ].join, parameter[name[failed]]]]]]]]
call[name[ret]][constant[comment]] assign[=] constant[Value(s) successfully unset]
return[name[ret]] | keyword[def] identifier[config_unset] ( identifier[name] ,
identifier[value_regex] = keyword[None] ,
identifier[repo] = keyword[None] ,
identifier[user] = keyword[None] ,
identifier[password] = keyword[None] ,
identifier[output_encoding] = keyword[None] ,
** identifier[kwargs] ):
literal[string]
identifier[ret] ={ literal[string] : identifier[name] ,
literal[string] :{},
literal[string] : keyword[True] ,
literal[string] : literal[string] }
identifier[kwargs] = identifier[salt] . identifier[utils] . identifier[args] . identifier[clean_kwargs] (** identifier[kwargs] )
identifier[global_] = identifier[kwargs] . identifier[pop] ( literal[string] , keyword[False] )
identifier[all_] = identifier[kwargs] . identifier[pop] ( literal[string] , keyword[False] )
keyword[if] identifier[kwargs] :
keyword[return] identifier[_fail] (
identifier[ret] ,
identifier[salt] . identifier[utils] . identifier[args] . identifier[invalid_kwargs] ( identifier[kwargs] , identifier[raise_exc] = keyword[False] )
)
keyword[if] keyword[not] identifier[global_] keyword[and] keyword[not] identifier[repo] :
keyword[return] identifier[_fail] (
identifier[ret] ,
literal[string]
literal[string]
)
keyword[if] keyword[not] identifier[isinstance] ( identifier[name] , identifier[six] . identifier[string_types] ):
identifier[name] = identifier[six] . identifier[text_type] ( identifier[name] )
keyword[if] identifier[value_regex] keyword[is] keyword[not] keyword[None] :
keyword[if] keyword[not] identifier[isinstance] ( identifier[value_regex] , identifier[six] . identifier[string_types] ):
identifier[value_regex] = identifier[six] . identifier[text_type] ( identifier[value_regex] )
identifier[key] = literal[string] + identifier[name] . identifier[lstrip] ( literal[string] ). identifier[rstrip] ( literal[string] )+ literal[string]
identifier[pre_matches] = identifier[__salt__] [ literal[string] ](
identifier[cwd] = identifier[repo] ,
identifier[key] = identifier[key] ,
identifier[value_regex] = identifier[value_regex] ,
identifier[user] = identifier[user] ,
identifier[password] = identifier[password] ,
identifier[ignore_retcode] = keyword[True] ,
identifier[output_encoding] = identifier[output_encoding] ,
**{ literal[string] : identifier[global_] }
)
keyword[if] keyword[not] identifier[pre_matches] :
keyword[return] identifier[ret]
keyword[if] keyword[not] identifier[all_] :
identifier[greedy_matches] =[ literal[string] . identifier[format] ( identifier[x] , literal[string] . identifier[join] ( identifier[y] ))
keyword[for] identifier[x] , identifier[y] keyword[in] identifier[six] . identifier[iteritems] ( identifier[pre_matches] )
keyword[if] identifier[len] ( identifier[y] )> literal[int] ]
keyword[if] identifier[greedy_matches] :
keyword[if] identifier[value_regex] keyword[is] keyword[not] keyword[None] :
keyword[return] identifier[_fail] (
identifier[ret] ,
literal[string]
literal[string]
literal[string] . identifier[format] ( literal[string] . identifier[join] ( identifier[greedy_matches] ))
)
keyword[else] :
keyword[return] identifier[_fail] (
identifier[ret] ,
literal[string]
literal[string] . identifier[format] (
literal[string] . identifier[join] ( identifier[greedy_matches] )
)
)
keyword[if] identifier[__opts__] [ literal[string] ]:
identifier[ret] [ literal[string] ]= identifier[pre_matches]
keyword[return] identifier[_neutral_test] (
identifier[ret] ,
literal[string] . identifier[format] ( identifier[len] ( identifier[pre_matches] ))
)
keyword[if] identifier[value_regex] keyword[is] keyword[None] :
identifier[pre] = identifier[pre_matches]
keyword[else] :
identifier[pre] = identifier[__salt__] [ literal[string] ](
identifier[cwd] = identifier[repo] ,
identifier[key] = identifier[key] ,
identifier[value_regex] = keyword[None] ,
identifier[user] = identifier[user] ,
identifier[password] = identifier[password] ,
identifier[ignore_retcode] = keyword[True] ,
identifier[output_encoding] = identifier[output_encoding] ,
**{ literal[string] : identifier[global_] }
)
identifier[failed] =[]
keyword[for] identifier[key_name] keyword[in] identifier[pre_matches] :
keyword[try] :
identifier[__salt__] [ literal[string] ](
identifier[cwd] = identifier[repo] ,
identifier[key] = identifier[name] ,
identifier[value_regex] = identifier[value_regex] ,
identifier[all] = identifier[all_] ,
identifier[user] = identifier[user] ,
identifier[password] = identifier[password] ,
identifier[output_encoding] = identifier[output_encoding] ,
**{ literal[string] : identifier[global_] }
)
keyword[except] identifier[CommandExecutionError] keyword[as] identifier[exc] :
identifier[msg] = literal[string] . identifier[format] ( identifier[key_name] )
keyword[if] identifier[value_regex] keyword[is] keyword[not] keyword[None] :
identifier[msg] += literal[string]
identifier[msg] += literal[string] + identifier[_strip_exc] ( identifier[exc] )
identifier[log] . identifier[error] ( identifier[msg] )
identifier[failed] . identifier[append] ( identifier[key_name] )
keyword[if] identifier[failed] :
keyword[return] identifier[_fail] (
identifier[ret] ,
literal[string]
literal[string] . identifier[format] ( literal[string] . identifier[join] ( identifier[failed] ))
)
identifier[post] = identifier[__salt__] [ literal[string] ](
identifier[cwd] = identifier[repo] ,
identifier[key] = identifier[key] ,
identifier[value_regex] = keyword[None] ,
identifier[user] = identifier[user] ,
identifier[password] = identifier[password] ,
identifier[ignore_retcode] = keyword[True] ,
identifier[output_encoding] = identifier[output_encoding] ,
**{ literal[string] : identifier[global_] }
)
keyword[for] identifier[key_name] keyword[in] identifier[pre] :
keyword[if] identifier[key_name] keyword[not] keyword[in] identifier[post] :
identifier[ret] [ literal[string] ][ identifier[key_name] ]= identifier[pre] [ identifier[key_name] ]
identifier[unset] =[ identifier[x] keyword[for] identifier[x] keyword[in] identifier[pre] [ identifier[key_name] ] keyword[if] identifier[x] keyword[not] keyword[in] identifier[post] [ identifier[key_name] ]]
keyword[if] identifier[unset] :
identifier[ret] [ literal[string] ][ identifier[key_name] ]= identifier[unset]
keyword[if] identifier[value_regex] keyword[is] keyword[None] :
identifier[post_matches] = identifier[post]
keyword[else] :
identifier[post_matches] = identifier[__salt__] [ literal[string] ](
identifier[cwd] = identifier[repo] ,
identifier[key] = identifier[key] ,
identifier[value_regex] = identifier[value_regex] ,
identifier[user] = identifier[user] ,
identifier[password] = identifier[password] ,
identifier[ignore_retcode] = keyword[True] ,
identifier[output_encoding] = identifier[output_encoding] ,
**{ literal[string] : identifier[global_] }
)
keyword[if] identifier[post_matches] :
identifier[failed] =[ literal[string] . identifier[format] ( identifier[x] , literal[string] . identifier[join] ( identifier[y] ))
keyword[for] identifier[x] , identifier[y] keyword[in] identifier[six] . identifier[iteritems] ( identifier[post_matches] )]
keyword[return] identifier[_fail] (
identifier[ret] ,
literal[string] . identifier[format] ( literal[string] . identifier[join] ( identifier[failed] ))
)
identifier[ret] [ literal[string] ]= literal[string]
keyword[return] identifier[ret] | def config_unset(name, value_regex=None, repo=None, user=None, password=None, output_encoding=None, **kwargs):
"""
.. versionadded:: 2015.8.0
Ensure that the named config key is not present
name
The name of the configuration key to unset. This value can be a regex,
but the regex must match the entire key name. For example, ``foo\\.``
would not match all keys in the ``foo`` section, it would be necessary
to use ``foo\\..+`` to do so.
value_regex
Regex indicating the values to unset for the matching key(s)
.. note::
This option behaves differently depending on whether or not ``all``
is set to ``True``. If it is, then all values matching the regex
will be deleted (this is the only way to delete multiple values
from a multivar). If ``all`` is set to ``False``, then this state
will fail if the regex matches more than one value in a multivar.
all : False
If ``True``, unset all matches
repo
Location of the git repository for which the config value should be
set. Required unless ``global`` is set to ``True``.
user
User under which to run git commands. By default, commands are run by
the user under which the minion is running.
password
Windows only. Required when specifying ``user``. This parameter will be
ignored on non-Windows platforms.
.. versionadded:: 2016.3.4
global : False
If ``True``, this will set a global git config option
output_encoding
Use this option to specify which encoding to use to decode the output
from any git commands which are run. This should not be needed in most
cases.
.. note::
This should only be needed if the files in the repository were
created with filenames using an encoding other than UTF-8 to handle
Unicode characters.
.. versionadded:: 2018.3.1
**Examples:**
.. code-block:: yaml
# Value matching 'baz'
mylocalrepo:
git.config_unset:
- name: foo.bar
- value_regex: 'baz'
- repo: /path/to/repo
# Ensure entire multivar is unset
mylocalrepo:
git.config_unset:
- name: foo.bar
- all: True
# Ensure all variables in 'foo' section are unset, including multivars
mylocalrepo:
git.config_unset:
- name: 'foo\\..+'
- all: True
# Ensure that global config value is unset
mylocalrepo:
git.config_unset:
- name: foo.bar
- global: True
"""
ret = {'name': name, 'changes': {}, 'result': True, 'comment': 'No matching keys are set'}
# Sanitize kwargs and make sure that no invalid ones were passed. This
# allows us to accept 'global' as an argument to this function without
# shadowing global(), while also not allowing unwanted arguments to be
# passed.
kwargs = salt.utils.args.clean_kwargs(**kwargs)
global_ = kwargs.pop('global', False)
all_ = kwargs.pop('all', False)
if kwargs:
return _fail(ret, salt.utils.args.invalid_kwargs(kwargs, raise_exc=False)) # depends on [control=['if'], data=[]]
if not global_ and (not repo):
return _fail(ret, "Non-global config options require the 'repo' argument to be set") # depends on [control=['if'], data=[]]
if not isinstance(name, six.string_types):
name = six.text_type(name) # depends on [control=['if'], data=[]]
if value_regex is not None:
if not isinstance(value_regex, six.string_types):
value_regex = six.text_type(value_regex) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['value_regex']]
# Ensure that the key regex matches the full key name
key = '^' + name.lstrip('^').rstrip('$') + '$'
# Get matching keys/values
pre_matches = __salt__['git.config_get_regexp'](cwd=repo, key=key, value_regex=value_regex, user=user, password=password, ignore_retcode=True, output_encoding=output_encoding, **{'global': global_})
if not pre_matches:
# No changes need to be made
return ret # depends on [control=['if'], data=[]]
# Perform sanity check on the matches. We can't proceed if the value_regex
# matches more than one value in a given key, and 'all' is not set to True
if not all_:
greedy_matches = ['{0} ({1})'.format(x, ', '.join(y)) for (x, y) in six.iteritems(pre_matches) if len(y) > 1]
if greedy_matches:
if value_regex is not None:
return _fail(ret, "Multiple values are matched by value_regex for the following keys (set 'all' to True to force removal): {0}".format('; '.join(greedy_matches))) # depends on [control=['if'], data=[]]
else:
return _fail(ret, "Multivar(s) matched by the key expression (set 'all' to True to force removal): {0}".format('; '.join(greedy_matches))) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
if __opts__['test']:
ret['changes'] = pre_matches
return _neutral_test(ret, '{0} key(s) would have value(s) unset'.format(len(pre_matches))) # depends on [control=['if'], data=[]]
if value_regex is None:
pre = pre_matches # depends on [control=['if'], data=[]]
else:
# Get all keys matching the key expression, so we can accurately report
# on changes made.
pre = __salt__['git.config_get_regexp'](cwd=repo, key=key, value_regex=None, user=user, password=password, ignore_retcode=True, output_encoding=output_encoding, **{'global': global_})
failed = []
# Unset the specified value(s). There is no unset for regexes so loop
# through the pre_matches dict and unset each matching key individually.
for key_name in pre_matches:
try:
__salt__['git.config_unset'](cwd=repo, key=name, value_regex=value_regex, all=all_, user=user, password=password, output_encoding=output_encoding, **{'global': global_}) # depends on [control=['try'], data=[]]
except CommandExecutionError as exc:
msg = "Failed to unset '{0}'".format(key_name)
if value_regex is not None:
msg += " using value_regex '{1}'" # depends on [control=['if'], data=[]]
msg += ': ' + _strip_exc(exc)
log.error(msg)
failed.append(key_name) # depends on [control=['except'], data=['exc']] # depends on [control=['for'], data=['key_name']]
if failed:
return _fail(ret, 'Error(s) occurred unsetting values for the following keys (see the minion log for details): {0}'.format(', '.join(failed))) # depends on [control=['if'], data=[]]
post = __salt__['git.config_get_regexp'](cwd=repo, key=key, value_regex=None, user=user, password=password, ignore_retcode=True, output_encoding=output_encoding, **{'global': global_})
for key_name in pre:
if key_name not in post:
ret['changes'][key_name] = pre[key_name] # depends on [control=['if'], data=['key_name']]
unset = [x for x in pre[key_name] if x not in post[key_name]]
if unset:
ret['changes'][key_name] = unset # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['key_name']]
if value_regex is None:
post_matches = post # depends on [control=['if'], data=[]]
else:
post_matches = __salt__['git.config_get_regexp'](cwd=repo, key=key, value_regex=value_regex, user=user, password=password, ignore_retcode=True, output_encoding=output_encoding, **{'global': global_})
if post_matches:
failed = ['{0} ({1})'.format(x, ', '.join(y)) for (x, y) in six.iteritems(post_matches)]
return _fail(ret, 'Failed to unset value(s): {0}'.format('; '.join(failed))) # depends on [control=['if'], data=[]]
ret['comment'] = 'Value(s) successfully unset'
return ret |
def encode(self, word):
"""Return the Kölner Phonetik (numeric output) code for a word.
While the output code is numeric, it is still a str because 0s can lead
the code.
Parameters
----------
word : str
The word to transform
Returns
-------
str
The Kölner Phonetik value as a numeric string
Example
-------
>>> pe = Koelner()
>>> pe.encode('Christopher')
'478237'
>>> pe.encode('Niall')
'65'
>>> pe.encode('Smith')
'862'
>>> pe.encode('Schmidt')
'862'
>>> pe.encode('Müller')
'657'
>>> pe.encode('Zimmermann')
'86766'
"""
def _after(word, pos, letters):
"""Return True if word[pos] follows one of the supplied letters.
Parameters
----------
word : str
The word to check
pos : int
Position within word to check
letters : str
Letters to confirm precede word[pos]
Returns
-------
bool
True if word[pos] follows a value in letters
"""
return pos > 0 and word[pos - 1] in letters
def _before(word, pos, letters):
"""Return True if word[pos] precedes one of the supplied letters.
Parameters
----------
word : str
The word to check
pos : int
Position within word to check
letters : str
Letters to confirm follow word[pos]
Returns
-------
bool
True if word[pos] precedes a value in letters
"""
return pos + 1 < len(word) and word[pos + 1] in letters
sdx = ''
word = unicode_normalize('NFKD', text_type(word.upper()))
word = word.replace('ß', 'SS')
word = word.replace('Ä', 'AE')
word = word.replace('Ö', 'OE')
word = word.replace('Ü', 'UE')
word = ''.join(c for c in word if c in self._uc_set)
# Nothing to convert, return base case
if not word:
return sdx
for i in range(len(word)):
if word[i] in self._uc_v_set:
sdx += '0'
elif word[i] == 'B':
sdx += '1'
elif word[i] == 'P':
if _before(word, i, {'H'}):
sdx += '3'
else:
sdx += '1'
elif word[i] in {'D', 'T'}:
if _before(word, i, {'C', 'S', 'Z'}):
sdx += '8'
else:
sdx += '2'
elif word[i] in {'F', 'V', 'W'}:
sdx += '3'
elif word[i] in {'G', 'K', 'Q'}:
sdx += '4'
elif word[i] == 'C':
if _after(word, i, {'S', 'Z'}):
sdx += '8'
elif i == 0:
if _before(
word, i, {'A', 'H', 'K', 'L', 'O', 'Q', 'R', 'U', 'X'}
):
sdx += '4'
else:
sdx += '8'
elif _before(word, i, {'A', 'H', 'K', 'O', 'Q', 'U', 'X'}):
sdx += '4'
else:
sdx += '8'
elif word[i] == 'X':
if _after(word, i, {'C', 'K', 'Q'}):
sdx += '8'
else:
sdx += '48'
elif word[i] == 'L':
sdx += '5'
elif word[i] in {'M', 'N'}:
sdx += '6'
elif word[i] == 'R':
sdx += '7'
elif word[i] in {'S', 'Z'}:
sdx += '8'
sdx = self._delete_consecutive_repeats(sdx)
if sdx:
sdx = sdx[:1] + sdx[1:].replace('0', '')
return sdx | def function[encode, parameter[self, word]]:
constant[Return the Kölner Phonetik (numeric output) code for a word.
While the output code is numeric, it is still a str because 0s can lead
the code.
Parameters
----------
word : str
The word to transform
Returns
-------
str
The Kölner Phonetik value as a numeric string
Example
-------
>>> pe = Koelner()
>>> pe.encode('Christopher')
'478237'
>>> pe.encode('Niall')
'65'
>>> pe.encode('Smith')
'862'
>>> pe.encode('Schmidt')
'862'
>>> pe.encode('Müller')
'657'
>>> pe.encode('Zimmermann')
'86766'
]
def function[_after, parameter[word, pos, letters]]:
constant[Return True if word[pos] follows one of the supplied letters.
Parameters
----------
word : str
The word to check
pos : int
Position within word to check
letters : str
Letters to confirm precede word[pos]
Returns
-------
bool
True if word[pos] follows a value in letters
]
return[<ast.BoolOp object at 0x7da1b01e4d00>]
def function[_before, parameter[word, pos, letters]]:
constant[Return True if word[pos] precedes one of the supplied letters.
Parameters
----------
word : str
The word to check
pos : int
Position within word to check
letters : str
Letters to confirm follow word[pos]
Returns
-------
bool
True if word[pos] precedes a value in letters
]
return[<ast.BoolOp object at 0x7da1b01e48e0>]
variable[sdx] assign[=] constant[]
variable[word] assign[=] call[name[unicode_normalize], parameter[constant[NFKD], call[name[text_type], parameter[call[name[word].upper, parameter[]]]]]]
variable[word] assign[=] call[name[word].replace, parameter[constant[ß], constant[SS]]]
variable[word] assign[=] call[name[word].replace, parameter[constant[Ä], constant[AE]]]
variable[word] assign[=] call[name[word].replace, parameter[constant[Ö], constant[OE]]]
variable[word] assign[=] call[name[word].replace, parameter[constant[Ü], constant[UE]]]
variable[word] assign[=] call[constant[].join, parameter[<ast.GeneratorExp object at 0x7da1b01e69b0>]]
if <ast.UnaryOp object at 0x7da1b01e4e20> begin[:]
return[name[sdx]]
for taget[name[i]] in starred[call[name[range], parameter[call[name[len], parameter[name[word]]]]]] begin[:]
if compare[call[name[word]][name[i]] in name[self]._uc_v_set] begin[:]
<ast.AugAssign object at 0x7da1b01e7ca0>
variable[sdx] assign[=] call[name[self]._delete_consecutive_repeats, parameter[name[sdx]]]
if name[sdx] begin[:]
variable[sdx] assign[=] binary_operation[call[name[sdx]][<ast.Slice object at 0x7da1b0190e50>] + call[call[name[sdx]][<ast.Slice object at 0x7da1b0190f70>].replace, parameter[constant[0], constant[]]]]
return[name[sdx]] | keyword[def] identifier[encode] ( identifier[self] , identifier[word] ):
literal[string]
keyword[def] identifier[_after] ( identifier[word] , identifier[pos] , identifier[letters] ):
literal[string]
keyword[return] identifier[pos] > literal[int] keyword[and] identifier[word] [ identifier[pos] - literal[int] ] keyword[in] identifier[letters]
keyword[def] identifier[_before] ( identifier[word] , identifier[pos] , identifier[letters] ):
literal[string]
keyword[return] identifier[pos] + literal[int] < identifier[len] ( identifier[word] ) keyword[and] identifier[word] [ identifier[pos] + literal[int] ] keyword[in] identifier[letters]
identifier[sdx] = literal[string]
identifier[word] = identifier[unicode_normalize] ( literal[string] , identifier[text_type] ( identifier[word] . identifier[upper] ()))
identifier[word] = identifier[word] . identifier[replace] ( literal[string] , literal[string] )
identifier[word] = identifier[word] . identifier[replace] ( literal[string] , literal[string] )
identifier[word] = identifier[word] . identifier[replace] ( literal[string] , literal[string] )
identifier[word] = identifier[word] . identifier[replace] ( literal[string] , literal[string] )
identifier[word] = literal[string] . identifier[join] ( identifier[c] keyword[for] identifier[c] keyword[in] identifier[word] keyword[if] identifier[c] keyword[in] identifier[self] . identifier[_uc_set] )
keyword[if] keyword[not] identifier[word] :
keyword[return] identifier[sdx]
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[len] ( identifier[word] )):
keyword[if] identifier[word] [ identifier[i] ] keyword[in] identifier[self] . identifier[_uc_v_set] :
identifier[sdx] += literal[string]
keyword[elif] identifier[word] [ identifier[i] ]== literal[string] :
identifier[sdx] += literal[string]
keyword[elif] identifier[word] [ identifier[i] ]== literal[string] :
keyword[if] identifier[_before] ( identifier[word] , identifier[i] ,{ literal[string] }):
identifier[sdx] += literal[string]
keyword[else] :
identifier[sdx] += literal[string]
keyword[elif] identifier[word] [ identifier[i] ] keyword[in] { literal[string] , literal[string] }:
keyword[if] identifier[_before] ( identifier[word] , identifier[i] ,{ literal[string] , literal[string] , literal[string] }):
identifier[sdx] += literal[string]
keyword[else] :
identifier[sdx] += literal[string]
keyword[elif] identifier[word] [ identifier[i] ] keyword[in] { literal[string] , literal[string] , literal[string] }:
identifier[sdx] += literal[string]
keyword[elif] identifier[word] [ identifier[i] ] keyword[in] { literal[string] , literal[string] , literal[string] }:
identifier[sdx] += literal[string]
keyword[elif] identifier[word] [ identifier[i] ]== literal[string] :
keyword[if] identifier[_after] ( identifier[word] , identifier[i] ,{ literal[string] , literal[string] }):
identifier[sdx] += literal[string]
keyword[elif] identifier[i] == literal[int] :
keyword[if] identifier[_before] (
identifier[word] , identifier[i] ,{ literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] }
):
identifier[sdx] += literal[string]
keyword[else] :
identifier[sdx] += literal[string]
keyword[elif] identifier[_before] ( identifier[word] , identifier[i] ,{ literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] }):
identifier[sdx] += literal[string]
keyword[else] :
identifier[sdx] += literal[string]
keyword[elif] identifier[word] [ identifier[i] ]== literal[string] :
keyword[if] identifier[_after] ( identifier[word] , identifier[i] ,{ literal[string] , literal[string] , literal[string] }):
identifier[sdx] += literal[string]
keyword[else] :
identifier[sdx] += literal[string]
keyword[elif] identifier[word] [ identifier[i] ]== literal[string] :
identifier[sdx] += literal[string]
keyword[elif] identifier[word] [ identifier[i] ] keyword[in] { literal[string] , literal[string] }:
identifier[sdx] += literal[string]
keyword[elif] identifier[word] [ identifier[i] ]== literal[string] :
identifier[sdx] += literal[string]
keyword[elif] identifier[word] [ identifier[i] ] keyword[in] { literal[string] , literal[string] }:
identifier[sdx] += literal[string]
identifier[sdx] = identifier[self] . identifier[_delete_consecutive_repeats] ( identifier[sdx] )
keyword[if] identifier[sdx] :
identifier[sdx] = identifier[sdx] [: literal[int] ]+ identifier[sdx] [ literal[int] :]. identifier[replace] ( literal[string] , literal[string] )
keyword[return] identifier[sdx] | def encode(self, word):
"""Return the Kölner Phonetik (numeric output) code for a word.
While the output code is numeric, it is still a str because 0s can lead
the code.
Parameters
----------
word : str
The word to transform
Returns
-------
str
The Kölner Phonetik value as a numeric string
Example
-------
>>> pe = Koelner()
>>> pe.encode('Christopher')
'478237'
>>> pe.encode('Niall')
'65'
>>> pe.encode('Smith')
'862'
>>> pe.encode('Schmidt')
'862'
>>> pe.encode('Müller')
'657'
>>> pe.encode('Zimmermann')
'86766'
"""
def _after(word, pos, letters):
"""Return True if word[pos] follows one of the supplied letters.
Parameters
----------
word : str
The word to check
pos : int
Position within word to check
letters : str
Letters to confirm precede word[pos]
Returns
-------
bool
True if word[pos] follows a value in letters
"""
return pos > 0 and word[pos - 1] in letters
def _before(word, pos, letters):
"""Return True if word[pos] precedes one of the supplied letters.
Parameters
----------
word : str
The word to check
pos : int
Position within word to check
letters : str
Letters to confirm follow word[pos]
Returns
-------
bool
True if word[pos] precedes a value in letters
"""
return pos + 1 < len(word) and word[pos + 1] in letters
sdx = ''
word = unicode_normalize('NFKD', text_type(word.upper()))
word = word.replace('ß', 'SS')
word = word.replace('Ä', 'AE')
word = word.replace('Ö', 'OE')
word = word.replace('Ü', 'UE')
word = ''.join((c for c in word if c in self._uc_set))
# Nothing to convert, return base case
if not word:
return sdx # depends on [control=['if'], data=[]]
for i in range(len(word)):
if word[i] in self._uc_v_set:
sdx += '0' # depends on [control=['if'], data=[]]
elif word[i] == 'B':
sdx += '1' # depends on [control=['if'], data=[]]
elif word[i] == 'P':
if _before(word, i, {'H'}):
sdx += '3' # depends on [control=['if'], data=[]]
else:
sdx += '1' # depends on [control=['if'], data=[]]
elif word[i] in {'D', 'T'}:
if _before(word, i, {'C', 'S', 'Z'}):
sdx += '8' # depends on [control=['if'], data=[]]
else:
sdx += '2' # depends on [control=['if'], data=[]]
elif word[i] in {'F', 'V', 'W'}:
sdx += '3' # depends on [control=['if'], data=[]]
elif word[i] in {'G', 'K', 'Q'}:
sdx += '4' # depends on [control=['if'], data=[]]
elif word[i] == 'C':
if _after(word, i, {'S', 'Z'}):
sdx += '8' # depends on [control=['if'], data=[]]
elif i == 0:
if _before(word, i, {'A', 'H', 'K', 'L', 'O', 'Q', 'R', 'U', 'X'}):
sdx += '4' # depends on [control=['if'], data=[]]
else:
sdx += '8' # depends on [control=['if'], data=['i']]
elif _before(word, i, {'A', 'H', 'K', 'O', 'Q', 'U', 'X'}):
sdx += '4' # depends on [control=['if'], data=[]]
else:
sdx += '8' # depends on [control=['if'], data=[]]
elif word[i] == 'X':
if _after(word, i, {'C', 'K', 'Q'}):
sdx += '8' # depends on [control=['if'], data=[]]
else:
sdx += '48' # depends on [control=['if'], data=[]]
elif word[i] == 'L':
sdx += '5' # depends on [control=['if'], data=[]]
elif word[i] in {'M', 'N'}:
sdx += '6' # depends on [control=['if'], data=[]]
elif word[i] == 'R':
sdx += '7' # depends on [control=['if'], data=[]]
elif word[i] in {'S', 'Z'}:
sdx += '8' # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['i']]
sdx = self._delete_consecutive_repeats(sdx)
if sdx:
sdx = sdx[:1] + sdx[1:].replace('0', '') # depends on [control=['if'], data=[]]
return sdx |
def create_setup_cfg(self, path): # type: (str) -> None
"""
Just setup.cfg
:param path:
:return:
"""
source = """[metadata]
name = {0}
version=0.0.1
""".format(
self.project
)
with io.open(path, "w", encoding="utf-8") as outfile:
outfile.write(source) | def function[create_setup_cfg, parameter[self, path]]:
constant[
Just setup.cfg
:param path:
:return:
]
variable[source] assign[=] call[constant[[metadata]
name = {0}
version=0.0.1
].format, parameter[name[self].project]]
with call[name[io].open, parameter[name[path], constant[w]]] begin[:]
call[name[outfile].write, parameter[name[source]]] | keyword[def] identifier[create_setup_cfg] ( identifier[self] , identifier[path] ):
literal[string]
identifier[source] = literal[string] . identifier[format] (
identifier[self] . identifier[project]
)
keyword[with] identifier[io] . identifier[open] ( identifier[path] , literal[string] , identifier[encoding] = literal[string] ) keyword[as] identifier[outfile] :
identifier[outfile] . identifier[write] ( identifier[source] ) | def create_setup_cfg(self, path): # type: (str) -> None
'\n Just setup.cfg\n :param path:\n :return:\n '
source = '[metadata]\nname = {0}\nversion=0.0.1 \n'.format(self.project)
with io.open(path, 'w', encoding='utf-8') as outfile:
outfile.write(source) # depends on [control=['with'], data=['outfile']] |
def registration_agency(self, ids, **kwargs):
'''
Determine registration agency for DOIs
:param ids: [Array] DOIs (digital object identifier) or other identifiers
:param kwargs: additional named arguments passed on to `requests.get`, e.g., field
queries (see examples)
:return: list of DOI minting agencies
Usage::
from habanero import Crossref
cr = Crossref()
cr.registration_agency('10.1371/journal.pone.0033693')
cr.registration_agency(ids = ['10.1007/12080.1874-1746','10.1007/10452.1573-5125', '10.1111/(issn)1442-9993'])
'''
check_kwargs(["query", "filter", "offset", "limit", "sample", "sort",
"order", "facet", "works"], kwargs)
res = request(self.mailto, self.base_url, "/works/", ids,
None, None, None, None, None, None,
None, None, None, None, None, None, True, **kwargs)
if res.__class__ != list:
k = []
k.append(res)
else:
k = res
return [ z['message']['agency']['label'] for z in k ] | def function[registration_agency, parameter[self, ids]]:
constant[
Determine registration agency for DOIs
:param ids: [Array] DOIs (digital object identifier) or other identifiers
:param kwargs: additional named arguments passed on to `requests.get`, e.g., field
queries (see examples)
:return: list of DOI minting agencies
Usage::
from habanero import Crossref
cr = Crossref()
cr.registration_agency('10.1371/journal.pone.0033693')
cr.registration_agency(ids = ['10.1007/12080.1874-1746','10.1007/10452.1573-5125', '10.1111/(issn)1442-9993'])
]
call[name[check_kwargs], parameter[list[[<ast.Constant object at 0x7da1b0538340>, <ast.Constant object at 0x7da1b0538430>, <ast.Constant object at 0x7da1b05384f0>, <ast.Constant object at 0x7da1b053abf0>, <ast.Constant object at 0x7da1b053bb80>, <ast.Constant object at 0x7da1b053aa40>, <ast.Constant object at 0x7da1b0538190>, <ast.Constant object at 0x7da1b05388e0>, <ast.Constant object at 0x7da1b053add0>]], name[kwargs]]]
variable[res] assign[=] call[name[request], parameter[name[self].mailto, name[self].base_url, constant[/works/], name[ids], constant[None], constant[None], constant[None], constant[None], constant[None], constant[None], constant[None], constant[None], constant[None], constant[None], constant[None], constant[None], constant[True]]]
if compare[name[res].__class__ not_equal[!=] name[list]] begin[:]
variable[k] assign[=] list[[]]
call[name[k].append, parameter[name[res]]]
return[<ast.ListComp object at 0x7da1b0447760>] | keyword[def] identifier[registration_agency] ( identifier[self] , identifier[ids] ,** identifier[kwargs] ):
literal[string]
identifier[check_kwargs] ([ literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ,
literal[string] , literal[string] , literal[string] ], identifier[kwargs] )
identifier[res] = identifier[request] ( identifier[self] . identifier[mailto] , identifier[self] . identifier[base_url] , literal[string] , identifier[ids] ,
keyword[None] , keyword[None] , keyword[None] , keyword[None] , keyword[None] , keyword[None] ,
keyword[None] , keyword[None] , keyword[None] , keyword[None] , keyword[None] , keyword[None] , keyword[True] ,** identifier[kwargs] )
keyword[if] identifier[res] . identifier[__class__] != identifier[list] :
identifier[k] =[]
identifier[k] . identifier[append] ( identifier[res] )
keyword[else] :
identifier[k] = identifier[res]
keyword[return] [ identifier[z] [ literal[string] ][ literal[string] ][ literal[string] ] keyword[for] identifier[z] keyword[in] identifier[k] ] | def registration_agency(self, ids, **kwargs):
"""
Determine registration agency for DOIs
:param ids: [Array] DOIs (digital object identifier) or other identifiers
:param kwargs: additional named arguments passed on to `requests.get`, e.g., field
queries (see examples)
:return: list of DOI minting agencies
Usage::
from habanero import Crossref
cr = Crossref()
cr.registration_agency('10.1371/journal.pone.0033693')
cr.registration_agency(ids = ['10.1007/12080.1874-1746','10.1007/10452.1573-5125', '10.1111/(issn)1442-9993'])
"""
check_kwargs(['query', 'filter', 'offset', 'limit', 'sample', 'sort', 'order', 'facet', 'works'], kwargs)
res = request(self.mailto, self.base_url, '/works/', ids, None, None, None, None, None, None, None, None, None, None, None, None, True, **kwargs)
if res.__class__ != list:
k = []
k.append(res) # depends on [control=['if'], data=[]]
else:
k = res
return [z['message']['agency']['label'] for z in k] |
def build_simple_cnn_text_classifier(tok2vec, nr_class, exclusive_classes=False, **cfg):
"""
Build a simple CNN text classifier, given a token-to-vector model as inputs.
If exclusive_classes=True, a softmax non-linearity is applied, so that the
outputs sum to 1. If exclusive_classes=False, a logistic non-linearity
is applied instead, so that outputs are in the range [0, 1].
"""
with Model.define_operators({">>": chain}):
if exclusive_classes:
output_layer = Softmax(nr_class, tok2vec.nO)
else:
output_layer = (
zero_init(Affine(nr_class, tok2vec.nO, drop_factor=0.0)) >> logistic
)
model = tok2vec >> flatten_add_lengths >> Pooling(mean_pool) >> output_layer
model.tok2vec = chain(tok2vec, flatten)
model.nO = nr_class
return model | def function[build_simple_cnn_text_classifier, parameter[tok2vec, nr_class, exclusive_classes]]:
constant[
Build a simple CNN text classifier, given a token-to-vector model as inputs.
If exclusive_classes=True, a softmax non-linearity is applied, so that the
outputs sum to 1. If exclusive_classes=False, a logistic non-linearity
is applied instead, so that outputs are in the range [0, 1].
]
with call[name[Model].define_operators, parameter[dictionary[[<ast.Constant object at 0x7da1b1ef8370>], [<ast.Name object at 0x7da1b1ef82b0>]]]] begin[:]
if name[exclusive_classes] begin[:]
variable[output_layer] assign[=] call[name[Softmax], parameter[name[nr_class], name[tok2vec].nO]]
variable[model] assign[=] binary_operation[binary_operation[binary_operation[name[tok2vec] <ast.RShift object at 0x7da2590d6a40> name[flatten_add_lengths]] <ast.RShift object at 0x7da2590d6a40> call[name[Pooling], parameter[name[mean_pool]]]] <ast.RShift object at 0x7da2590d6a40> name[output_layer]]
name[model].tok2vec assign[=] call[name[chain], parameter[name[tok2vec], name[flatten]]]
name[model].nO assign[=] name[nr_class]
return[name[model]] | keyword[def] identifier[build_simple_cnn_text_classifier] ( identifier[tok2vec] , identifier[nr_class] , identifier[exclusive_classes] = keyword[False] ,** identifier[cfg] ):
literal[string]
keyword[with] identifier[Model] . identifier[define_operators] ({ literal[string] : identifier[chain] }):
keyword[if] identifier[exclusive_classes] :
identifier[output_layer] = identifier[Softmax] ( identifier[nr_class] , identifier[tok2vec] . identifier[nO] )
keyword[else] :
identifier[output_layer] =(
identifier[zero_init] ( identifier[Affine] ( identifier[nr_class] , identifier[tok2vec] . identifier[nO] , identifier[drop_factor] = literal[int] ))>> identifier[logistic]
)
identifier[model] = identifier[tok2vec] >> identifier[flatten_add_lengths] >> identifier[Pooling] ( identifier[mean_pool] )>> identifier[output_layer]
identifier[model] . identifier[tok2vec] = identifier[chain] ( identifier[tok2vec] , identifier[flatten] )
identifier[model] . identifier[nO] = identifier[nr_class]
keyword[return] identifier[model] | def build_simple_cnn_text_classifier(tok2vec, nr_class, exclusive_classes=False, **cfg):
"""
Build a simple CNN text classifier, given a token-to-vector model as inputs.
If exclusive_classes=True, a softmax non-linearity is applied, so that the
outputs sum to 1. If exclusive_classes=False, a logistic non-linearity
is applied instead, so that outputs are in the range [0, 1].
"""
with Model.define_operators({'>>': chain}):
if exclusive_classes:
output_layer = Softmax(nr_class, tok2vec.nO) # depends on [control=['if'], data=[]]
else:
output_layer = zero_init(Affine(nr_class, tok2vec.nO, drop_factor=0.0)) >> logistic
model = tok2vec >> flatten_add_lengths >> Pooling(mean_pool) >> output_layer # depends on [control=['with'], data=[]]
model.tok2vec = chain(tok2vec, flatten)
model.nO = nr_class
return model |
def URL(base, path, segments=None, defaults=None):
"""
URL segment handler capable of getting and setting segments by name. The
URL is constructed by joining base, path and segments.
For each segment a property capable of getting and setting that segment is
created dynamically.
"""
# Make a copy of the Segments class
url_class = type(Segments.__name__, Segments.__bases__,
dict(Segments.__dict__))
segments = [] if segments is None else segments
defaults = [] if defaults is None else defaults
# For each segment attach a property capable of getting and setting it
for segment in segments:
setattr(url_class, segment, url_class._segment(segment))
# Instantiate the class with the actual parameters
return url_class(base, path, segments, defaults) | def function[URL, parameter[base, path, segments, defaults]]:
constant[
URL segment handler capable of getting and setting segments by name. The
URL is constructed by joining base, path and segments.
For each segment a property capable of getting and setting that segment is
created dynamically.
]
variable[url_class] assign[=] call[name[type], parameter[name[Segments].__name__, name[Segments].__bases__, call[name[dict], parameter[name[Segments].__dict__]]]]
variable[segments] assign[=] <ast.IfExp object at 0x7da1b2345f60>
variable[defaults] assign[=] <ast.IfExp object at 0x7da1b23470d0>
for taget[name[segment]] in starred[name[segments]] begin[:]
call[name[setattr], parameter[name[url_class], name[segment], call[name[url_class]._segment, parameter[name[segment]]]]]
return[call[name[url_class], parameter[name[base], name[path], name[segments], name[defaults]]]] | keyword[def] identifier[URL] ( identifier[base] , identifier[path] , identifier[segments] = keyword[None] , identifier[defaults] = keyword[None] ):
literal[string]
identifier[url_class] = identifier[type] ( identifier[Segments] . identifier[__name__] , identifier[Segments] . identifier[__bases__] ,
identifier[dict] ( identifier[Segments] . identifier[__dict__] ))
identifier[segments] =[] keyword[if] identifier[segments] keyword[is] keyword[None] keyword[else] identifier[segments]
identifier[defaults] =[] keyword[if] identifier[defaults] keyword[is] keyword[None] keyword[else] identifier[defaults]
keyword[for] identifier[segment] keyword[in] identifier[segments] :
identifier[setattr] ( identifier[url_class] , identifier[segment] , identifier[url_class] . identifier[_segment] ( identifier[segment] ))
keyword[return] identifier[url_class] ( identifier[base] , identifier[path] , identifier[segments] , identifier[defaults] ) | def URL(base, path, segments=None, defaults=None):
"""
URL segment handler capable of getting and setting segments by name. The
URL is constructed by joining base, path and segments.
For each segment a property capable of getting and setting that segment is
created dynamically.
"""
# Make a copy of the Segments class
url_class = type(Segments.__name__, Segments.__bases__, dict(Segments.__dict__))
segments = [] if segments is None else segments
defaults = [] if defaults is None else defaults
# For each segment attach a property capable of getting and setting it
for segment in segments:
setattr(url_class, segment, url_class._segment(segment)) # depends on [control=['for'], data=['segment']]
# Instantiate the class with the actual parameters
return url_class(base, path, segments, defaults) |
def list(self, date_created_after=values.unset,
date_created_before=values.unset, track=values.unset,
publisher=values.unset, kind=values.unset, limit=None, page_size=None):
"""
Lists SubscribedTrackInstance records from the API as a list.
Unlike stream(), this operation is eager and will load `limit` records into
memory before returning.
:param datetime date_created_after: The date_created_after
:param datetime date_created_before: The date_created_before
:param unicode track: The track
:param unicode publisher: The publisher
:param SubscribedTrackInstance.Kind kind: The kind
:param int limit: Upper limit for the number of records to return. list() guarantees
never to return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, list() will attempt to read the limit
with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.video.v1.room.room_participant.room_participant_subscribed_track.SubscribedTrackInstance]
"""
return list(self.stream(
date_created_after=date_created_after,
date_created_before=date_created_before,
track=track,
publisher=publisher,
kind=kind,
limit=limit,
page_size=page_size,
)) | def function[list, parameter[self, date_created_after, date_created_before, track, publisher, kind, limit, page_size]]:
constant[
Lists SubscribedTrackInstance records from the API as a list.
Unlike stream(), this operation is eager and will load `limit` records into
memory before returning.
:param datetime date_created_after: The date_created_after
:param datetime date_created_before: The date_created_before
:param unicode track: The track
:param unicode publisher: The publisher
:param SubscribedTrackInstance.Kind kind: The kind
:param int limit: Upper limit for the number of records to return. list() guarantees
never to return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, list() will attempt to read the limit
with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.video.v1.room.room_participant.room_participant_subscribed_track.SubscribedTrackInstance]
]
return[call[name[list], parameter[call[name[self].stream, parameter[]]]]] | keyword[def] identifier[list] ( identifier[self] , identifier[date_created_after] = identifier[values] . identifier[unset] ,
identifier[date_created_before] = identifier[values] . identifier[unset] , identifier[track] = identifier[values] . identifier[unset] ,
identifier[publisher] = identifier[values] . identifier[unset] , identifier[kind] = identifier[values] . identifier[unset] , identifier[limit] = keyword[None] , identifier[page_size] = keyword[None] ):
literal[string]
keyword[return] identifier[list] ( identifier[self] . identifier[stream] (
identifier[date_created_after] = identifier[date_created_after] ,
identifier[date_created_before] = identifier[date_created_before] ,
identifier[track] = identifier[track] ,
identifier[publisher] = identifier[publisher] ,
identifier[kind] = identifier[kind] ,
identifier[limit] = identifier[limit] ,
identifier[page_size] = identifier[page_size] ,
)) | def list(self, date_created_after=values.unset, date_created_before=values.unset, track=values.unset, publisher=values.unset, kind=values.unset, limit=None, page_size=None):
"""
Lists SubscribedTrackInstance records from the API as a list.
Unlike stream(), this operation is eager and will load `limit` records into
memory before returning.
:param datetime date_created_after: The date_created_after
:param datetime date_created_before: The date_created_before
:param unicode track: The track
:param unicode publisher: The publisher
:param SubscribedTrackInstance.Kind kind: The kind
:param int limit: Upper limit for the number of records to return. list() guarantees
never to return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, list() will attempt to read the limit
with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.video.v1.room.room_participant.room_participant_subscribed_track.SubscribedTrackInstance]
"""
return list(self.stream(date_created_after=date_created_after, date_created_before=date_created_before, track=track, publisher=publisher, kind=kind, limit=limit, page_size=page_size)) |
def path_exists_or_creatable_portable(pathname: str) -> bool:
"""OS-portable check for whether current path exists or is creatable.
This function is guaranteed to _never_ raise exceptions.
Returns
------
`True` if the passed pathname is a valid pathname on the current OS _and_
either currently exists or is hypothetically creatable in a cross-platform
manner optimized for POSIX-unfriendly filesystems; `False` otherwise.
"""
try:
# To prevent "os" module calls from raising undesirable exceptions on
# invalid pathnames, is_pathname_valid() is explicitly called first.
return is_pathname_valid(pathname) and (
os.path.exists(pathname) or is_path_sibling_creatable(pathname))
# Report failure on non-fatal filesystem complaints (e.g., connection
# timeouts, permissions issues) implying this path to be inaccessible. All
# other exceptions are unrelated fatal issues and should not be caught
# here.
except OSError:
return False | def function[path_exists_or_creatable_portable, parameter[pathname]]:
constant[OS-portable check for whether current path exists or is creatable.
This function is guaranteed to _never_ raise exceptions.
Returns
------
`True` if the passed pathname is a valid pathname on the current OS _and_
either currently exists or is hypothetically creatable in a cross-platform
manner optimized for POSIX-unfriendly filesystems; `False` otherwise.
]
<ast.Try object at 0x7da1b1443640> | keyword[def] identifier[path_exists_or_creatable_portable] ( identifier[pathname] : identifier[str] )-> identifier[bool] :
literal[string]
keyword[try] :
keyword[return] identifier[is_pathname_valid] ( identifier[pathname] ) keyword[and] (
identifier[os] . identifier[path] . identifier[exists] ( identifier[pathname] ) keyword[or] identifier[is_path_sibling_creatable] ( identifier[pathname] ))
keyword[except] identifier[OSError] :
keyword[return] keyword[False] | def path_exists_or_creatable_portable(pathname: str) -> bool:
"""OS-portable check for whether current path exists or is creatable.
This function is guaranteed to _never_ raise exceptions.
Returns
------
`True` if the passed pathname is a valid pathname on the current OS _and_
either currently exists or is hypothetically creatable in a cross-platform
manner optimized for POSIX-unfriendly filesystems; `False` otherwise.
"""
try:
# To prevent "os" module calls from raising undesirable exceptions on
# invalid pathnames, is_pathname_valid() is explicitly called first.
return is_pathname_valid(pathname) and (os.path.exists(pathname) or is_path_sibling_creatable(pathname)) # depends on [control=['try'], data=[]]
# Report failure on non-fatal filesystem complaints (e.g., connection
# timeouts, permissions issues) implying this path to be inaccessible. All
# other exceptions are unrelated fatal issues and should not be caught
# here.
except OSError:
return False # depends on [control=['except'], data=[]] |
def _read_byte(self):
"""Read a byte from input."""
to_return = ""
if (self._mode == PROP_MODE_SERIAL):
to_return = self._serial.read(1)
elif (self._mode == PROP_MODE_TCP):
to_return = self._socket.recv(1)
elif (self._mode == PROP_MODE_FILE):
to_return = struct.pack("B", int(self._file.readline()))
_LOGGER.debug("READ: " + str(ord(to_return)))
self._logdata.append(ord(to_return))
if (len(self._logdata) > self._logdatalen):
self._logdata = self._logdata[len(self._logdata) - self._logdatalen:]
self._debug(PROP_LOGLEVEL_TRACE, "READ: " + str(ord(to_return)))
return to_return | def function[_read_byte, parameter[self]]:
constant[Read a byte from input.]
variable[to_return] assign[=] constant[]
if compare[name[self]._mode equal[==] name[PROP_MODE_SERIAL]] begin[:]
variable[to_return] assign[=] call[name[self]._serial.read, parameter[constant[1]]]
call[name[_LOGGER].debug, parameter[binary_operation[constant[READ: ] + call[name[str], parameter[call[name[ord], parameter[name[to_return]]]]]]]]
call[name[self]._logdata.append, parameter[call[name[ord], parameter[name[to_return]]]]]
if compare[call[name[len], parameter[name[self]._logdata]] greater[>] name[self]._logdatalen] begin[:]
name[self]._logdata assign[=] call[name[self]._logdata][<ast.Slice object at 0x7da1b2864850>]
call[name[self]._debug, parameter[name[PROP_LOGLEVEL_TRACE], binary_operation[constant[READ: ] + call[name[str], parameter[call[name[ord], parameter[name[to_return]]]]]]]]
return[name[to_return]] | keyword[def] identifier[_read_byte] ( identifier[self] ):
literal[string]
identifier[to_return] = literal[string]
keyword[if] ( identifier[self] . identifier[_mode] == identifier[PROP_MODE_SERIAL] ):
identifier[to_return] = identifier[self] . identifier[_serial] . identifier[read] ( literal[int] )
keyword[elif] ( identifier[self] . identifier[_mode] == identifier[PROP_MODE_TCP] ):
identifier[to_return] = identifier[self] . identifier[_socket] . identifier[recv] ( literal[int] )
keyword[elif] ( identifier[self] . identifier[_mode] == identifier[PROP_MODE_FILE] ):
identifier[to_return] = identifier[struct] . identifier[pack] ( literal[string] , identifier[int] ( identifier[self] . identifier[_file] . identifier[readline] ()))
identifier[_LOGGER] . identifier[debug] ( literal[string] + identifier[str] ( identifier[ord] ( identifier[to_return] )))
identifier[self] . identifier[_logdata] . identifier[append] ( identifier[ord] ( identifier[to_return] ))
keyword[if] ( identifier[len] ( identifier[self] . identifier[_logdata] )> identifier[self] . identifier[_logdatalen] ):
identifier[self] . identifier[_logdata] = identifier[self] . identifier[_logdata] [ identifier[len] ( identifier[self] . identifier[_logdata] )- identifier[self] . identifier[_logdatalen] :]
identifier[self] . identifier[_debug] ( identifier[PROP_LOGLEVEL_TRACE] , literal[string] + identifier[str] ( identifier[ord] ( identifier[to_return] )))
keyword[return] identifier[to_return] | def _read_byte(self):
"""Read a byte from input."""
to_return = ''
if self._mode == PROP_MODE_SERIAL:
to_return = self._serial.read(1) # depends on [control=['if'], data=[]]
elif self._mode == PROP_MODE_TCP:
to_return = self._socket.recv(1) # depends on [control=['if'], data=[]]
elif self._mode == PROP_MODE_FILE:
to_return = struct.pack('B', int(self._file.readline())) # depends on [control=['if'], data=[]]
_LOGGER.debug('READ: ' + str(ord(to_return)))
self._logdata.append(ord(to_return))
if len(self._logdata) > self._logdatalen:
self._logdata = self._logdata[len(self._logdata) - self._logdatalen:] # depends on [control=['if'], data=[]]
self._debug(PROP_LOGLEVEL_TRACE, 'READ: ' + str(ord(to_return)))
return to_return |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.