code stringlengths 75 104k | code_sememe stringlengths 47 309k | token_type stringlengths 215 214k | code_dependency stringlengths 75 155k |
|---|---|---|---|
def _set_cpu_throttling(self):
"""
Limits the CPU usage for current QEMU process.
"""
if not self.is_running():
return
try:
if sys.platform.startswith("win") and hasattr(sys, "frozen"):
cpulimit_exec = os.path.join(os.path.dirname(os.path.abspath(sys.executable)), "cpulimit", "cpulimit.exe")
else:
cpulimit_exec = "cpulimit"
subprocess.Popen([cpulimit_exec, "--lazy", "--pid={}".format(self._process.pid), "--limit={}".format(self._cpu_throttling)], cwd=self.working_dir)
log.info("CPU throttled to {}%".format(self._cpu_throttling))
except FileNotFoundError:
raise QemuError("cpulimit could not be found, please install it or deactivate CPU throttling")
except (OSError, subprocess.SubprocessError) as e:
raise QemuError("Could not throttle CPU: {}".format(e)) | def function[_set_cpu_throttling, parameter[self]]:
constant[
Limits the CPU usage for current QEMU process.
]
if <ast.UnaryOp object at 0x7da20c6c58a0> begin[:]
return[None]
<ast.Try object at 0x7da20c6c7b80> | keyword[def] identifier[_set_cpu_throttling] ( identifier[self] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[is_running] ():
keyword[return]
keyword[try] :
keyword[if] identifier[sys] . identifier[platform] . identifier[startswith] ( literal[string] ) keyword[and] identifier[hasattr] ( identifier[sys] , literal[string] ):
identifier[cpulimit_exec] = identifier[os] . identifier[path] . identifier[join] ( identifier[os] . identifier[path] . identifier[dirname] ( identifier[os] . identifier[path] . identifier[abspath] ( identifier[sys] . identifier[executable] )), literal[string] , literal[string] )
keyword[else] :
identifier[cpulimit_exec] = literal[string]
identifier[subprocess] . identifier[Popen] ([ identifier[cpulimit_exec] , literal[string] , literal[string] . identifier[format] ( identifier[self] . identifier[_process] . identifier[pid] ), literal[string] . identifier[format] ( identifier[self] . identifier[_cpu_throttling] )], identifier[cwd] = identifier[self] . identifier[working_dir] )
identifier[log] . identifier[info] ( literal[string] . identifier[format] ( identifier[self] . identifier[_cpu_throttling] ))
keyword[except] identifier[FileNotFoundError] :
keyword[raise] identifier[QemuError] ( literal[string] )
keyword[except] ( identifier[OSError] , identifier[subprocess] . identifier[SubprocessError] ) keyword[as] identifier[e] :
keyword[raise] identifier[QemuError] ( literal[string] . identifier[format] ( identifier[e] )) | def _set_cpu_throttling(self):
"""
Limits the CPU usage for current QEMU process.
"""
if not self.is_running():
return # depends on [control=['if'], data=[]]
try:
if sys.platform.startswith('win') and hasattr(sys, 'frozen'):
cpulimit_exec = os.path.join(os.path.dirname(os.path.abspath(sys.executable)), 'cpulimit', 'cpulimit.exe') # depends on [control=['if'], data=[]]
else:
cpulimit_exec = 'cpulimit'
subprocess.Popen([cpulimit_exec, '--lazy', '--pid={}'.format(self._process.pid), '--limit={}'.format(self._cpu_throttling)], cwd=self.working_dir)
log.info('CPU throttled to {}%'.format(self._cpu_throttling)) # depends on [control=['try'], data=[]]
except FileNotFoundError:
raise QemuError('cpulimit could not be found, please install it or deactivate CPU throttling') # depends on [control=['except'], data=[]]
except (OSError, subprocess.SubprocessError) as e:
raise QemuError('Could not throttle CPU: {}'.format(e)) # depends on [control=['except'], data=['e']] |
def shutdown(self, service_thread_map):
"""Gracefully terminate all services and kill the main PantsDaemon loop."""
with self._services.lifecycle_lock:
for service, service_thread in service_thread_map.items():
self._logger.info('terminating pantsd service: {}'.format(service))
service.terminate()
service_thread.join(self.JOIN_TIMEOUT_SECONDS)
self._logger.info('terminating pantsd')
self._kill_switch.set() | def function[shutdown, parameter[self, service_thread_map]]:
constant[Gracefully terminate all services and kill the main PantsDaemon loop.]
with name[self]._services.lifecycle_lock begin[:]
for taget[tuple[[<ast.Name object at 0x7da1b22a74f0>, <ast.Name object at 0x7da1b22a6f50>]]] in starred[call[name[service_thread_map].items, parameter[]]] begin[:]
call[name[self]._logger.info, parameter[call[constant[terminating pantsd service: {}].format, parameter[name[service]]]]]
call[name[service].terminate, parameter[]]
call[name[service_thread].join, parameter[name[self].JOIN_TIMEOUT_SECONDS]]
call[name[self]._logger.info, parameter[constant[terminating pantsd]]]
call[name[self]._kill_switch.set, parameter[]] | keyword[def] identifier[shutdown] ( identifier[self] , identifier[service_thread_map] ):
literal[string]
keyword[with] identifier[self] . identifier[_services] . identifier[lifecycle_lock] :
keyword[for] identifier[service] , identifier[service_thread] keyword[in] identifier[service_thread_map] . identifier[items] ():
identifier[self] . identifier[_logger] . identifier[info] ( literal[string] . identifier[format] ( identifier[service] ))
identifier[service] . identifier[terminate] ()
identifier[service_thread] . identifier[join] ( identifier[self] . identifier[JOIN_TIMEOUT_SECONDS] )
identifier[self] . identifier[_logger] . identifier[info] ( literal[string] )
identifier[self] . identifier[_kill_switch] . identifier[set] () | def shutdown(self, service_thread_map):
"""Gracefully terminate all services and kill the main PantsDaemon loop."""
with self._services.lifecycle_lock:
for (service, service_thread) in service_thread_map.items():
self._logger.info('terminating pantsd service: {}'.format(service))
service.terminate()
service_thread.join(self.JOIN_TIMEOUT_SECONDS) # depends on [control=['for'], data=[]]
self._logger.info('terminating pantsd')
self._kill_switch.set() # depends on [control=['with'], data=[]] |
def ASRS(self, params):
"""
ASRS [Ra,] Ra, Rc
ASRS [Ra,] Rb, #imm5_counting
Arithmetic shift right Rb by Rc or imm5_counting and store the result in Ra
imm5 counting is [1, 32]
In the register shift, the first two operands must be the same register
Ra, Rb, and Rc must be low registers
If Ra is omitted, then it is assumed to be Rb
"""
# This instruction allows for an optional destination register
# If it is omitted, then it is assumed to be Rb
# As defined in http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.dui0662b/index.html
try:
Ra, Rb, Rc = self.get_three_parameters(self.THREE_PARAMETER_COMMA_SEPARATED, params)
except iarm.exceptions.ParsingError:
Rb, Rc = self.get_two_parameters(self.TWO_PARAMETER_COMMA_SEPARATED, params)
Ra = Rb
if self.is_register(Rc):
# ASRS Ra, Ra, Rb
self.check_arguments(low_registers=(Ra, Rc))
self.match_first_two_parameters(Ra, Rb)
def ASRS_func():
# Set the C flag, or the last shifted out bit
if (self.register[Rc] > 0) and (self.register[Rb] & (1 << (self.register[Rc] - 1))):
self.set_APSR_flag_to_value('C', 1)
else:
self.set_APSR_flag_to_value('C', 0)
if self.register[Ra] & (1 << (self._bit_width - 1)):
self.register[Ra] = (self.register[Ra] >> self.register[Rc]) | (
int('1' * self.register[Rc], 2) << (self._bit_width - self.register[Rc]))
else:
self.register[Ra] = self.register[Ra] >> self.register[Rc]
self.set_NZ_flags(self.register[Ra])
else:
# ASRS Ra, Rb, #imm5_counting
self.check_arguments(low_registers=(Ra, Rb), imm5_counting=(Rc,))
shift_amount = self.check_immediate(Rc)
def ASRS_func():
# Set the C flag, or the last shifted out bit
if self.register[Rb] & (1 << (shift_amount - 1)):
self.set_APSR_flag_to_value('C', 1)
else:
self.set_APSR_flag_to_value('C', 0)
if self.register[Ra] & (1 << (self._bit_width - 1)):
self.register[Ra] = (self.register[Ra] >> shift_amount) | (
int('1' * shift_amount, 2) << (self._bit_width - shift_amount))
else:
self.register[Ra] = self.register[Rb] >> shift_amount
self.set_NZ_flags(self.register[Ra])
return ASRS_func | def function[ASRS, parameter[self, params]]:
constant[
ASRS [Ra,] Ra, Rc
ASRS [Ra,] Rb, #imm5_counting
Arithmetic shift right Rb by Rc or imm5_counting and store the result in Ra
imm5 counting is [1, 32]
In the register shift, the first two operands must be the same register
Ra, Rb, and Rc must be low registers
If Ra is omitted, then it is assumed to be Rb
]
<ast.Try object at 0x7da18f58cfa0>
if call[name[self].is_register, parameter[name[Rc]]] begin[:]
call[name[self].check_arguments, parameter[]]
call[name[self].match_first_two_parameters, parameter[name[Ra], name[Rb]]]
def function[ASRS_func, parameter[]]:
if <ast.BoolOp object at 0x7da18f58df90> begin[:]
call[name[self].set_APSR_flag_to_value, parameter[constant[C], constant[1]]]
if binary_operation[call[name[self].register][name[Ra]] <ast.BitAnd object at 0x7da2590d6b60> binary_operation[constant[1] <ast.LShift object at 0x7da2590d69e0> binary_operation[name[self]._bit_width - constant[1]]]] begin[:]
call[name[self].register][name[Ra]] assign[=] binary_operation[binary_operation[call[name[self].register][name[Ra]] <ast.RShift object at 0x7da2590d6a40> call[name[self].register][name[Rc]]] <ast.BitOr object at 0x7da2590d6aa0> binary_operation[call[name[int], parameter[binary_operation[constant[1] * call[name[self].register][name[Rc]]], constant[2]]] <ast.LShift object at 0x7da2590d69e0> binary_operation[name[self]._bit_width - call[name[self].register][name[Rc]]]]]
call[name[self].set_NZ_flags, parameter[call[name[self].register][name[Ra]]]]
return[name[ASRS_func]] | keyword[def] identifier[ASRS] ( identifier[self] , identifier[params] ):
literal[string]
keyword[try] :
identifier[Ra] , identifier[Rb] , identifier[Rc] = identifier[self] . identifier[get_three_parameters] ( identifier[self] . identifier[THREE_PARAMETER_COMMA_SEPARATED] , identifier[params] )
keyword[except] identifier[iarm] . identifier[exceptions] . identifier[ParsingError] :
identifier[Rb] , identifier[Rc] = identifier[self] . identifier[get_two_parameters] ( identifier[self] . identifier[TWO_PARAMETER_COMMA_SEPARATED] , identifier[params] )
identifier[Ra] = identifier[Rb]
keyword[if] identifier[self] . identifier[is_register] ( identifier[Rc] ):
identifier[self] . identifier[check_arguments] ( identifier[low_registers] =( identifier[Ra] , identifier[Rc] ))
identifier[self] . identifier[match_first_two_parameters] ( identifier[Ra] , identifier[Rb] )
keyword[def] identifier[ASRS_func] ():
keyword[if] ( identifier[self] . identifier[register] [ identifier[Rc] ]> literal[int] ) keyword[and] ( identifier[self] . identifier[register] [ identifier[Rb] ]&( literal[int] <<( identifier[self] . identifier[register] [ identifier[Rc] ]- literal[int] ))):
identifier[self] . identifier[set_APSR_flag_to_value] ( literal[string] , literal[int] )
keyword[else] :
identifier[self] . identifier[set_APSR_flag_to_value] ( literal[string] , literal[int] )
keyword[if] identifier[self] . identifier[register] [ identifier[Ra] ]&( literal[int] <<( identifier[self] . identifier[_bit_width] - literal[int] )):
identifier[self] . identifier[register] [ identifier[Ra] ]=( identifier[self] . identifier[register] [ identifier[Ra] ]>> identifier[self] . identifier[register] [ identifier[Rc] ])|(
identifier[int] ( literal[string] * identifier[self] . identifier[register] [ identifier[Rc] ], literal[int] )<<( identifier[self] . identifier[_bit_width] - identifier[self] . identifier[register] [ identifier[Rc] ]))
keyword[else] :
identifier[self] . identifier[register] [ identifier[Ra] ]= identifier[self] . identifier[register] [ identifier[Ra] ]>> identifier[self] . identifier[register] [ identifier[Rc] ]
identifier[self] . identifier[set_NZ_flags] ( identifier[self] . identifier[register] [ identifier[Ra] ])
keyword[else] :
identifier[self] . identifier[check_arguments] ( identifier[low_registers] =( identifier[Ra] , identifier[Rb] ), identifier[imm5_counting] =( identifier[Rc] ,))
identifier[shift_amount] = identifier[self] . identifier[check_immediate] ( identifier[Rc] )
keyword[def] identifier[ASRS_func] ():
keyword[if] identifier[self] . identifier[register] [ identifier[Rb] ]&( literal[int] <<( identifier[shift_amount] - literal[int] )):
identifier[self] . identifier[set_APSR_flag_to_value] ( literal[string] , literal[int] )
keyword[else] :
identifier[self] . identifier[set_APSR_flag_to_value] ( literal[string] , literal[int] )
keyword[if] identifier[self] . identifier[register] [ identifier[Ra] ]&( literal[int] <<( identifier[self] . identifier[_bit_width] - literal[int] )):
identifier[self] . identifier[register] [ identifier[Ra] ]=( identifier[self] . identifier[register] [ identifier[Ra] ]>> identifier[shift_amount] )|(
identifier[int] ( literal[string] * identifier[shift_amount] , literal[int] )<<( identifier[self] . identifier[_bit_width] - identifier[shift_amount] ))
keyword[else] :
identifier[self] . identifier[register] [ identifier[Ra] ]= identifier[self] . identifier[register] [ identifier[Rb] ]>> identifier[shift_amount]
identifier[self] . identifier[set_NZ_flags] ( identifier[self] . identifier[register] [ identifier[Ra] ])
keyword[return] identifier[ASRS_func] | def ASRS(self, params):
"""
ASRS [Ra,] Ra, Rc
ASRS [Ra,] Rb, #imm5_counting
Arithmetic shift right Rb by Rc or imm5_counting and store the result in Ra
imm5 counting is [1, 32]
In the register shift, the first two operands must be the same register
Ra, Rb, and Rc must be low registers
If Ra is omitted, then it is assumed to be Rb
"""
# This instruction allows for an optional destination register
# If it is omitted, then it is assumed to be Rb
# As defined in http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.dui0662b/index.html
try:
(Ra, Rb, Rc) = self.get_three_parameters(self.THREE_PARAMETER_COMMA_SEPARATED, params) # depends on [control=['try'], data=[]]
except iarm.exceptions.ParsingError:
(Rb, Rc) = self.get_two_parameters(self.TWO_PARAMETER_COMMA_SEPARATED, params)
Ra = Rb # depends on [control=['except'], data=[]]
if self.is_register(Rc):
# ASRS Ra, Ra, Rb
self.check_arguments(low_registers=(Ra, Rc))
self.match_first_two_parameters(Ra, Rb)
def ASRS_func():
# Set the C flag, or the last shifted out bit
if self.register[Rc] > 0 and self.register[Rb] & 1 << self.register[Rc] - 1:
self.set_APSR_flag_to_value('C', 1) # depends on [control=['if'], data=[]]
else:
self.set_APSR_flag_to_value('C', 0)
if self.register[Ra] & 1 << self._bit_width - 1:
self.register[Ra] = self.register[Ra] >> self.register[Rc] | int('1' * self.register[Rc], 2) << self._bit_width - self.register[Rc] # depends on [control=['if'], data=[]]
else:
self.register[Ra] = self.register[Ra] >> self.register[Rc]
self.set_NZ_flags(self.register[Ra]) # depends on [control=['if'], data=[]]
else:
# ASRS Ra, Rb, #imm5_counting
self.check_arguments(low_registers=(Ra, Rb), imm5_counting=(Rc,))
shift_amount = self.check_immediate(Rc)
def ASRS_func():
# Set the C flag, or the last shifted out bit
if self.register[Rb] & 1 << shift_amount - 1:
self.set_APSR_flag_to_value('C', 1) # depends on [control=['if'], data=[]]
else:
self.set_APSR_flag_to_value('C', 0)
if self.register[Ra] & 1 << self._bit_width - 1:
self.register[Ra] = self.register[Ra] >> shift_amount | int('1' * shift_amount, 2) << self._bit_width - shift_amount # depends on [control=['if'], data=[]]
else:
self.register[Ra] = self.register[Rb] >> shift_amount
self.set_NZ_flags(self.register[Ra])
return ASRS_func |
def get_chat_members(
self,
chat_id: Union[int, str],
offset: int = 0,
limit: int = 200,
query: str = "",
filter: str = Filters.ALL
) -> "pyrogram.ChatMembers":
"""Use this method to get a chunk of the members list of a chat.
You can get up to 200 chat members at once.
A chat can be either a basic group, a supergroup or a channel.
You must be admin to retrieve the members list of a channel (also known as "subscribers").
For a more convenient way of getting chat members see :meth:`iter_chat_members`.
Args:
chat_id (``int`` | ``str``):
Unique identifier (int) or username (str) of the target chat.
offset (``int``, *optional*):
Sequential number of the first member to be returned.
Defaults to 0 [1]_.
limit (``int``, *optional*):
Limits the number of members to be retrieved.
Defaults to 200, which is also the maximum server limit allowed per method call.
query (``str``, *optional*):
Query string to filter members based on their display names and usernames.
Defaults to "" (empty string) [2]_.
filter (``str``, *optional*):
Filter used to select the kind of members you want to retrieve. Only applicable for supergroups
and channels. It can be any of the followings:
*"all"* - all kind of members,
*"kicked"* - kicked (banned) members only,
*"restricted"* - restricted members only,
*"bots"* - bots only,
*"recent"* - recent members only,
*"administrators"* - chat administrators only.
Defaults to *"all"*.
.. [1] Server limit: on supergroups, you can get up to 10,000 members for a single query and up to 200 members
on channels.
.. [2] A query string is applicable only for *"all"*, *"kicked"* and *"restricted"* filters only.
Returns:
On success, a :obj:`ChatMembers` object is returned.
Raises:
:class:`RPCError <pyrogram.RPCError>` in case of a Telegram RPC error.
``ValueError`` if you used an invalid filter or a chat_id that belongs to a user.
"""
peer = self.resolve_peer(chat_id)
if isinstance(peer, types.InputPeerChat):
return pyrogram.ChatMembers._parse(
self,
self.send(
functions.messages.GetFullChat(
chat_id=peer.chat_id
)
)
)
elif isinstance(peer, types.InputPeerChannel):
filter = filter.lower()
if filter == Filters.ALL:
filter = types.ChannelParticipantsSearch(q=query)
elif filter == Filters.KICKED:
filter = types.ChannelParticipantsKicked(q=query)
elif filter == Filters.RESTRICTED:
filter = types.ChannelParticipantsBanned(q=query)
elif filter == Filters.BOTS:
filter = types.ChannelParticipantsBots()
elif filter == Filters.RECENT:
filter = types.ChannelParticipantsRecent()
elif filter == Filters.ADMINISTRATORS:
filter = types.ChannelParticipantsAdmins()
else:
raise ValueError("Invalid filter \"{}\"".format(filter))
while True:
try:
return pyrogram.ChatMembers._parse(
self,
self.send(
functions.channels.GetParticipants(
channel=peer,
filter=filter,
offset=offset,
limit=limit,
hash=0
)
)
)
except FloodWait as e:
log.warning("Sleeping for {}s".format(e.x))
time.sleep(e.x)
else:
raise ValueError("The chat_id \"{}\" belongs to a user".format(chat_id)) | def function[get_chat_members, parameter[self, chat_id, offset, limit, query, filter]]:
constant[Use this method to get a chunk of the members list of a chat.
You can get up to 200 chat members at once.
A chat can be either a basic group, a supergroup or a channel.
You must be admin to retrieve the members list of a channel (also known as "subscribers").
For a more convenient way of getting chat members see :meth:`iter_chat_members`.
Args:
chat_id (``int`` | ``str``):
Unique identifier (int) or username (str) of the target chat.
offset (``int``, *optional*):
Sequential number of the first member to be returned.
Defaults to 0 [1]_.
limit (``int``, *optional*):
Limits the number of members to be retrieved.
Defaults to 200, which is also the maximum server limit allowed per method call.
query (``str``, *optional*):
Query string to filter members based on their display names and usernames.
Defaults to "" (empty string) [2]_.
filter (``str``, *optional*):
Filter used to select the kind of members you want to retrieve. Only applicable for supergroups
and channels. It can be any of the followings:
*"all"* - all kind of members,
*"kicked"* - kicked (banned) members only,
*"restricted"* - restricted members only,
*"bots"* - bots only,
*"recent"* - recent members only,
*"administrators"* - chat administrators only.
Defaults to *"all"*.
.. [1] Server limit: on supergroups, you can get up to 10,000 members for a single query and up to 200 members
on channels.
.. [2] A query string is applicable only for *"all"*, *"kicked"* and *"restricted"* filters only.
Returns:
On success, a :obj:`ChatMembers` object is returned.
Raises:
:class:`RPCError <pyrogram.RPCError>` in case of a Telegram RPC error.
``ValueError`` if you used an invalid filter or a chat_id that belongs to a user.
]
variable[peer] assign[=] call[name[self].resolve_peer, parameter[name[chat_id]]]
if call[name[isinstance], parameter[name[peer], name[types].InputPeerChat]] begin[:]
return[call[name[pyrogram].ChatMembers._parse, parameter[name[self], call[name[self].send, parameter[call[name[functions].messages.GetFullChat, parameter[]]]]]]] | keyword[def] identifier[get_chat_members] (
identifier[self] ,
identifier[chat_id] : identifier[Union] [ identifier[int] , identifier[str] ],
identifier[offset] : identifier[int] = literal[int] ,
identifier[limit] : identifier[int] = literal[int] ,
identifier[query] : identifier[str] = literal[string] ,
identifier[filter] : identifier[str] = identifier[Filters] . identifier[ALL]
)-> literal[string] :
literal[string]
identifier[peer] = identifier[self] . identifier[resolve_peer] ( identifier[chat_id] )
keyword[if] identifier[isinstance] ( identifier[peer] , identifier[types] . identifier[InputPeerChat] ):
keyword[return] identifier[pyrogram] . identifier[ChatMembers] . identifier[_parse] (
identifier[self] ,
identifier[self] . identifier[send] (
identifier[functions] . identifier[messages] . identifier[GetFullChat] (
identifier[chat_id] = identifier[peer] . identifier[chat_id]
)
)
)
keyword[elif] identifier[isinstance] ( identifier[peer] , identifier[types] . identifier[InputPeerChannel] ):
identifier[filter] = identifier[filter] . identifier[lower] ()
keyword[if] identifier[filter] == identifier[Filters] . identifier[ALL] :
identifier[filter] = identifier[types] . identifier[ChannelParticipantsSearch] ( identifier[q] = identifier[query] )
keyword[elif] identifier[filter] == identifier[Filters] . identifier[KICKED] :
identifier[filter] = identifier[types] . identifier[ChannelParticipantsKicked] ( identifier[q] = identifier[query] )
keyword[elif] identifier[filter] == identifier[Filters] . identifier[RESTRICTED] :
identifier[filter] = identifier[types] . identifier[ChannelParticipantsBanned] ( identifier[q] = identifier[query] )
keyword[elif] identifier[filter] == identifier[Filters] . identifier[BOTS] :
identifier[filter] = identifier[types] . identifier[ChannelParticipantsBots] ()
keyword[elif] identifier[filter] == identifier[Filters] . identifier[RECENT] :
identifier[filter] = identifier[types] . identifier[ChannelParticipantsRecent] ()
keyword[elif] identifier[filter] == identifier[Filters] . identifier[ADMINISTRATORS] :
identifier[filter] = identifier[types] . identifier[ChannelParticipantsAdmins] ()
keyword[else] :
keyword[raise] identifier[ValueError] ( literal[string] . identifier[format] ( identifier[filter] ))
keyword[while] keyword[True] :
keyword[try] :
keyword[return] identifier[pyrogram] . identifier[ChatMembers] . identifier[_parse] (
identifier[self] ,
identifier[self] . identifier[send] (
identifier[functions] . identifier[channels] . identifier[GetParticipants] (
identifier[channel] = identifier[peer] ,
identifier[filter] = identifier[filter] ,
identifier[offset] = identifier[offset] ,
identifier[limit] = identifier[limit] ,
identifier[hash] = literal[int]
)
)
)
keyword[except] identifier[FloodWait] keyword[as] identifier[e] :
identifier[log] . identifier[warning] ( literal[string] . identifier[format] ( identifier[e] . identifier[x] ))
identifier[time] . identifier[sleep] ( identifier[e] . identifier[x] )
keyword[else] :
keyword[raise] identifier[ValueError] ( literal[string] . identifier[format] ( identifier[chat_id] )) | def get_chat_members(self, chat_id: Union[int, str], offset: int=0, limit: int=200, query: str='', filter: str=Filters.ALL) -> 'pyrogram.ChatMembers':
"""Use this method to get a chunk of the members list of a chat.
You can get up to 200 chat members at once.
A chat can be either a basic group, a supergroup or a channel.
You must be admin to retrieve the members list of a channel (also known as "subscribers").
For a more convenient way of getting chat members see :meth:`iter_chat_members`.
Args:
chat_id (``int`` | ``str``):
Unique identifier (int) or username (str) of the target chat.
offset (``int``, *optional*):
Sequential number of the first member to be returned.
Defaults to 0 [1]_.
limit (``int``, *optional*):
Limits the number of members to be retrieved.
Defaults to 200, which is also the maximum server limit allowed per method call.
query (``str``, *optional*):
Query string to filter members based on their display names and usernames.
Defaults to "" (empty string) [2]_.
filter (``str``, *optional*):
Filter used to select the kind of members you want to retrieve. Only applicable for supergroups
and channels. It can be any of the followings:
*"all"* - all kind of members,
*"kicked"* - kicked (banned) members only,
*"restricted"* - restricted members only,
*"bots"* - bots only,
*"recent"* - recent members only,
*"administrators"* - chat administrators only.
Defaults to *"all"*.
.. [1] Server limit: on supergroups, you can get up to 10,000 members for a single query and up to 200 members
on channels.
.. [2] A query string is applicable only for *"all"*, *"kicked"* and *"restricted"* filters only.
Returns:
On success, a :obj:`ChatMembers` object is returned.
Raises:
:class:`RPCError <pyrogram.RPCError>` in case of a Telegram RPC error.
``ValueError`` if you used an invalid filter or a chat_id that belongs to a user.
"""
peer = self.resolve_peer(chat_id)
if isinstance(peer, types.InputPeerChat):
return pyrogram.ChatMembers._parse(self, self.send(functions.messages.GetFullChat(chat_id=peer.chat_id))) # depends on [control=['if'], data=[]]
elif isinstance(peer, types.InputPeerChannel):
filter = filter.lower()
if filter == Filters.ALL:
filter = types.ChannelParticipantsSearch(q=query) # depends on [control=['if'], data=['filter']]
elif filter == Filters.KICKED:
filter = types.ChannelParticipantsKicked(q=query) # depends on [control=['if'], data=['filter']]
elif filter == Filters.RESTRICTED:
filter = types.ChannelParticipantsBanned(q=query) # depends on [control=['if'], data=['filter']]
elif filter == Filters.BOTS:
filter = types.ChannelParticipantsBots() # depends on [control=['if'], data=['filter']]
elif filter == Filters.RECENT:
filter = types.ChannelParticipantsRecent() # depends on [control=['if'], data=['filter']]
elif filter == Filters.ADMINISTRATORS:
filter = types.ChannelParticipantsAdmins() # depends on [control=['if'], data=['filter']]
else:
raise ValueError('Invalid filter "{}"'.format(filter))
while True:
try:
return pyrogram.ChatMembers._parse(self, self.send(functions.channels.GetParticipants(channel=peer, filter=filter, offset=offset, limit=limit, hash=0))) # depends on [control=['try'], data=[]]
except FloodWait as e:
log.warning('Sleeping for {}s'.format(e.x))
time.sleep(e.x) # depends on [control=['except'], data=['e']] # depends on [control=['while'], data=[]] # depends on [control=['if'], data=[]]
else:
raise ValueError('The chat_id "{}" belongs to a user'.format(chat_id)) |
def _finish_log_prob_for_one_fiber(self, y, x, ildj, event_ndims,
**distribution_kwargs):
"""Finish computation of log_prob on one element of the inverse image."""
x = self._maybe_rotate_dims(x, rotate_right=True)
log_prob = self.distribution.log_prob(x, **distribution_kwargs)
if self._is_maybe_event_override:
log_prob = tf.reduce_sum(
input_tensor=log_prob, axis=self._reduce_event_indices)
log_prob += tf.cast(ildj, log_prob.dtype)
if self._is_maybe_event_override and isinstance(event_ndims, int):
tensorshape_util.set_shape(
log_prob,
tf.broadcast_static_shape(
tensorshape_util.with_rank_at_least(y.shape, 1)[:-event_ndims],
self.batch_shape))
return log_prob | def function[_finish_log_prob_for_one_fiber, parameter[self, y, x, ildj, event_ndims]]:
constant[Finish computation of log_prob on one element of the inverse image.]
variable[x] assign[=] call[name[self]._maybe_rotate_dims, parameter[name[x]]]
variable[log_prob] assign[=] call[name[self].distribution.log_prob, parameter[name[x]]]
if name[self]._is_maybe_event_override begin[:]
variable[log_prob] assign[=] call[name[tf].reduce_sum, parameter[]]
<ast.AugAssign object at 0x7da1b03e3e50>
if <ast.BoolOp object at 0x7da1b03e2fe0> begin[:]
call[name[tensorshape_util].set_shape, parameter[name[log_prob], call[name[tf].broadcast_static_shape, parameter[call[call[name[tensorshape_util].with_rank_at_least, parameter[name[y].shape, constant[1]]]][<ast.Slice object at 0x7da1b03e0340>], name[self].batch_shape]]]]
return[name[log_prob]] | keyword[def] identifier[_finish_log_prob_for_one_fiber] ( identifier[self] , identifier[y] , identifier[x] , identifier[ildj] , identifier[event_ndims] ,
** identifier[distribution_kwargs] ):
literal[string]
identifier[x] = identifier[self] . identifier[_maybe_rotate_dims] ( identifier[x] , identifier[rotate_right] = keyword[True] )
identifier[log_prob] = identifier[self] . identifier[distribution] . identifier[log_prob] ( identifier[x] ,** identifier[distribution_kwargs] )
keyword[if] identifier[self] . identifier[_is_maybe_event_override] :
identifier[log_prob] = identifier[tf] . identifier[reduce_sum] (
identifier[input_tensor] = identifier[log_prob] , identifier[axis] = identifier[self] . identifier[_reduce_event_indices] )
identifier[log_prob] += identifier[tf] . identifier[cast] ( identifier[ildj] , identifier[log_prob] . identifier[dtype] )
keyword[if] identifier[self] . identifier[_is_maybe_event_override] keyword[and] identifier[isinstance] ( identifier[event_ndims] , identifier[int] ):
identifier[tensorshape_util] . identifier[set_shape] (
identifier[log_prob] ,
identifier[tf] . identifier[broadcast_static_shape] (
identifier[tensorshape_util] . identifier[with_rank_at_least] ( identifier[y] . identifier[shape] , literal[int] )[:- identifier[event_ndims] ],
identifier[self] . identifier[batch_shape] ))
keyword[return] identifier[log_prob] | def _finish_log_prob_for_one_fiber(self, y, x, ildj, event_ndims, **distribution_kwargs):
"""Finish computation of log_prob on one element of the inverse image."""
x = self._maybe_rotate_dims(x, rotate_right=True)
log_prob = self.distribution.log_prob(x, **distribution_kwargs)
if self._is_maybe_event_override:
log_prob = tf.reduce_sum(input_tensor=log_prob, axis=self._reduce_event_indices) # depends on [control=['if'], data=[]]
log_prob += tf.cast(ildj, log_prob.dtype)
if self._is_maybe_event_override and isinstance(event_ndims, int):
tensorshape_util.set_shape(log_prob, tf.broadcast_static_shape(tensorshape_util.with_rank_at_least(y.shape, 1)[:-event_ndims], self.batch_shape)) # depends on [control=['if'], data=[]]
return log_prob |
def _prepare_draw(self, view=None):
"""This method is called immediately before each draw.
The *view* argument indicates which view is about to be drawn.
"""
if self._changed['pos']:
self.pos_buf.set_data(self._pos)
self._changed['pos'] = False
if self._changed['color']:
self.color_buf.set_data(self._color)
self._program.vert['color'] = self.color_buf
self._changed['color'] = False
return True | def function[_prepare_draw, parameter[self, view]]:
constant[This method is called immediately before each draw.
The *view* argument indicates which view is about to be drawn.
]
if call[name[self]._changed][constant[pos]] begin[:]
call[name[self].pos_buf.set_data, parameter[name[self]._pos]]
call[name[self]._changed][constant[pos]] assign[=] constant[False]
if call[name[self]._changed][constant[color]] begin[:]
call[name[self].color_buf.set_data, parameter[name[self]._color]]
call[name[self]._program.vert][constant[color]] assign[=] name[self].color_buf
call[name[self]._changed][constant[color]] assign[=] constant[False]
return[constant[True]] | keyword[def] identifier[_prepare_draw] ( identifier[self] , identifier[view] = keyword[None] ):
literal[string]
keyword[if] identifier[self] . identifier[_changed] [ literal[string] ]:
identifier[self] . identifier[pos_buf] . identifier[set_data] ( identifier[self] . identifier[_pos] )
identifier[self] . identifier[_changed] [ literal[string] ]= keyword[False]
keyword[if] identifier[self] . identifier[_changed] [ literal[string] ]:
identifier[self] . identifier[color_buf] . identifier[set_data] ( identifier[self] . identifier[_color] )
identifier[self] . identifier[_program] . identifier[vert] [ literal[string] ]= identifier[self] . identifier[color_buf]
identifier[self] . identifier[_changed] [ literal[string] ]= keyword[False]
keyword[return] keyword[True] | def _prepare_draw(self, view=None):
"""This method is called immediately before each draw.
The *view* argument indicates which view is about to be drawn.
"""
if self._changed['pos']:
self.pos_buf.set_data(self._pos)
self._changed['pos'] = False # depends on [control=['if'], data=[]]
if self._changed['color']:
self.color_buf.set_data(self._color)
self._program.vert['color'] = self.color_buf
self._changed['color'] = False # depends on [control=['if'], data=[]]
return True |
def nodeid(self, iv, quantifier=False):
"""
Return the nodeid of the predication selected by *iv*.
Args:
iv: the intrinsic variable of the predication to select
quantifier: if `True`, treat *iv* as a bound variable and
find its quantifier; otherwise the non-quantifier will
be returned
"""
return next(iter(self.nodeids(ivs=[iv], quantifier=quantifier)), None) | def function[nodeid, parameter[self, iv, quantifier]]:
constant[
Return the nodeid of the predication selected by *iv*.
Args:
iv: the intrinsic variable of the predication to select
quantifier: if `True`, treat *iv* as a bound variable and
find its quantifier; otherwise the non-quantifier will
be returned
]
return[call[name[next], parameter[call[name[iter], parameter[call[name[self].nodeids, parameter[]]]], constant[None]]]] | keyword[def] identifier[nodeid] ( identifier[self] , identifier[iv] , identifier[quantifier] = keyword[False] ):
literal[string]
keyword[return] identifier[next] ( identifier[iter] ( identifier[self] . identifier[nodeids] ( identifier[ivs] =[ identifier[iv] ], identifier[quantifier] = identifier[quantifier] )), keyword[None] ) | def nodeid(self, iv, quantifier=False):
"""
Return the nodeid of the predication selected by *iv*.
Args:
iv: the intrinsic variable of the predication to select
quantifier: if `True`, treat *iv* as a bound variable and
find its quantifier; otherwise the non-quantifier will
be returned
"""
return next(iter(self.nodeids(ivs=[iv], quantifier=quantifier)), None) |
def post(self, request, bot_id, id, format=None):
"""
Add a new telegram recipient to a handler
---
serializer: TelegramRecipientSerializer
responseMessages:
- code: 401
message: Not authenticated
- code: 400
message: Not valid request
"""
return super(TelegramRecipientList, self).post(request, bot_id, id, format) | def function[post, parameter[self, request, bot_id, id, format]]:
constant[
Add a new telegram recipient to a handler
---
serializer: TelegramRecipientSerializer
responseMessages:
- code: 401
message: Not authenticated
- code: 400
message: Not valid request
]
return[call[call[name[super], parameter[name[TelegramRecipientList], name[self]]].post, parameter[name[request], name[bot_id], name[id], name[format]]]] | keyword[def] identifier[post] ( identifier[self] , identifier[request] , identifier[bot_id] , identifier[id] , identifier[format] = keyword[None] ):
literal[string]
keyword[return] identifier[super] ( identifier[TelegramRecipientList] , identifier[self] ). identifier[post] ( identifier[request] , identifier[bot_id] , identifier[id] , identifier[format] ) | def post(self, request, bot_id, id, format=None):
"""
Add a new telegram recipient to a handler
---
serializer: TelegramRecipientSerializer
responseMessages:
- code: 401
message: Not authenticated
- code: 400
message: Not valid request
"""
return super(TelegramRecipientList, self).post(request, bot_id, id, format) |
def get_site_pattern(agent):
"""Construct a dictionary of Monomer site states from an Agent.
This crates the mapping to the associated PySB monomer from an
INDRA Agent object."""
if not isinstance(agent, ist.Agent):
return {}
pattern = {}
# Handle bound conditions
for bc in agent.bound_conditions:
# Here we make the assumption that the binding site
# is simply named after the binding partner
if bc.is_bound:
pattern[get_binding_site_name(bc.agent)] = ANY
else:
pattern[get_binding_site_name(bc.agent)] = None
# Handle modifications
for mod in agent.mods:
mod_site_str = abbrevs[mod.mod_type]
if mod.residue is not None:
mod_site_str = mod.residue
mod_pos_str = mod.position if mod.position is not None else ''
mod_site = ('%s%s' % (mod_site_str, mod_pos_str))
site_states = states[mod.mod_type]
if mod.is_modified:
pattern[mod_site] = (site_states[1], WILD)
else:
pattern[mod_site] = (site_states[0], WILD)
# Handle mutations
for mc in agent.mutations:
res_from = mc.residue_from if mc.residue_from else 'mut'
res_to = mc.residue_to if mc.residue_to else 'X'
if mc.position is None:
mut_site_name = res_from
else:
mut_site_name = res_from + mc.position
pattern[mut_site_name] = res_to
# Handle location
if agent.location is not None:
pattern['loc'] = _n(agent.location)
# Handle activity
if agent.activity is not None:
active_site_name = agent.activity.activity_type
if agent.activity.is_active:
active_site_state = 'active'
else:
active_site_state = 'inactive'
pattern[active_site_name] = active_site_state
return pattern | def function[get_site_pattern, parameter[agent]]:
constant[Construct a dictionary of Monomer site states from an Agent.
This crates the mapping to the associated PySB monomer from an
INDRA Agent object.]
if <ast.UnaryOp object at 0x7da20c991ff0> begin[:]
return[dictionary[[], []]]
variable[pattern] assign[=] dictionary[[], []]
for taget[name[bc]] in starred[name[agent].bound_conditions] begin[:]
if name[bc].is_bound begin[:]
call[name[pattern]][call[name[get_binding_site_name], parameter[name[bc].agent]]] assign[=] name[ANY]
for taget[name[mod]] in starred[name[agent].mods] begin[:]
variable[mod_site_str] assign[=] call[name[abbrevs]][name[mod].mod_type]
if compare[name[mod].residue is_not constant[None]] begin[:]
variable[mod_site_str] assign[=] name[mod].residue
variable[mod_pos_str] assign[=] <ast.IfExp object at 0x7da1b26ace50>
variable[mod_site] assign[=] binary_operation[constant[%s%s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b26adf00>, <ast.Name object at 0x7da1b26ae320>]]]
variable[site_states] assign[=] call[name[states]][name[mod].mod_type]
if name[mod].is_modified begin[:]
call[name[pattern]][name[mod_site]] assign[=] tuple[[<ast.Subscript object at 0x7da1b26ac940>, <ast.Name object at 0x7da1b26ad3c0>]]
for taget[name[mc]] in starred[name[agent].mutations] begin[:]
variable[res_from] assign[=] <ast.IfExp object at 0x7da1b26ae980>
variable[res_to] assign[=] <ast.IfExp object at 0x7da1b26afbb0>
if compare[name[mc].position is constant[None]] begin[:]
variable[mut_site_name] assign[=] name[res_from]
call[name[pattern]][name[mut_site_name]] assign[=] name[res_to]
if compare[name[agent].location is_not constant[None]] begin[:]
call[name[pattern]][constant[loc]] assign[=] call[name[_n], parameter[name[agent].location]]
if compare[name[agent].activity is_not constant[None]] begin[:]
variable[active_site_name] assign[=] name[agent].activity.activity_type
if name[agent].activity.is_active begin[:]
variable[active_site_state] assign[=] constant[active]
call[name[pattern]][name[active_site_name]] assign[=] name[active_site_state]
return[name[pattern]] | keyword[def] identifier[get_site_pattern] ( identifier[agent] ):
literal[string]
keyword[if] keyword[not] identifier[isinstance] ( identifier[agent] , identifier[ist] . identifier[Agent] ):
keyword[return] {}
identifier[pattern] ={}
keyword[for] identifier[bc] keyword[in] identifier[agent] . identifier[bound_conditions] :
keyword[if] identifier[bc] . identifier[is_bound] :
identifier[pattern] [ identifier[get_binding_site_name] ( identifier[bc] . identifier[agent] )]= identifier[ANY]
keyword[else] :
identifier[pattern] [ identifier[get_binding_site_name] ( identifier[bc] . identifier[agent] )]= keyword[None]
keyword[for] identifier[mod] keyword[in] identifier[agent] . identifier[mods] :
identifier[mod_site_str] = identifier[abbrevs] [ identifier[mod] . identifier[mod_type] ]
keyword[if] identifier[mod] . identifier[residue] keyword[is] keyword[not] keyword[None] :
identifier[mod_site_str] = identifier[mod] . identifier[residue]
identifier[mod_pos_str] = identifier[mod] . identifier[position] keyword[if] identifier[mod] . identifier[position] keyword[is] keyword[not] keyword[None] keyword[else] literal[string]
identifier[mod_site] =( literal[string] %( identifier[mod_site_str] , identifier[mod_pos_str] ))
identifier[site_states] = identifier[states] [ identifier[mod] . identifier[mod_type] ]
keyword[if] identifier[mod] . identifier[is_modified] :
identifier[pattern] [ identifier[mod_site] ]=( identifier[site_states] [ literal[int] ], identifier[WILD] )
keyword[else] :
identifier[pattern] [ identifier[mod_site] ]=( identifier[site_states] [ literal[int] ], identifier[WILD] )
keyword[for] identifier[mc] keyword[in] identifier[agent] . identifier[mutations] :
identifier[res_from] = identifier[mc] . identifier[residue_from] keyword[if] identifier[mc] . identifier[residue_from] keyword[else] literal[string]
identifier[res_to] = identifier[mc] . identifier[residue_to] keyword[if] identifier[mc] . identifier[residue_to] keyword[else] literal[string]
keyword[if] identifier[mc] . identifier[position] keyword[is] keyword[None] :
identifier[mut_site_name] = identifier[res_from]
keyword[else] :
identifier[mut_site_name] = identifier[res_from] + identifier[mc] . identifier[position]
identifier[pattern] [ identifier[mut_site_name] ]= identifier[res_to]
keyword[if] identifier[agent] . identifier[location] keyword[is] keyword[not] keyword[None] :
identifier[pattern] [ literal[string] ]= identifier[_n] ( identifier[agent] . identifier[location] )
keyword[if] identifier[agent] . identifier[activity] keyword[is] keyword[not] keyword[None] :
identifier[active_site_name] = identifier[agent] . identifier[activity] . identifier[activity_type]
keyword[if] identifier[agent] . identifier[activity] . identifier[is_active] :
identifier[active_site_state] = literal[string]
keyword[else] :
identifier[active_site_state] = literal[string]
identifier[pattern] [ identifier[active_site_name] ]= identifier[active_site_state]
keyword[return] identifier[pattern] | def get_site_pattern(agent):
"""Construct a dictionary of Monomer site states from an Agent.
This crates the mapping to the associated PySB monomer from an
INDRA Agent object."""
if not isinstance(agent, ist.Agent):
return {} # depends on [control=['if'], data=[]]
pattern = {}
# Handle bound conditions
for bc in agent.bound_conditions:
# Here we make the assumption that the binding site
# is simply named after the binding partner
if bc.is_bound:
pattern[get_binding_site_name(bc.agent)] = ANY # depends on [control=['if'], data=[]]
else:
pattern[get_binding_site_name(bc.agent)] = None # depends on [control=['for'], data=['bc']]
# Handle modifications
for mod in agent.mods:
mod_site_str = abbrevs[mod.mod_type]
if mod.residue is not None:
mod_site_str = mod.residue # depends on [control=['if'], data=[]]
mod_pos_str = mod.position if mod.position is not None else ''
mod_site = '%s%s' % (mod_site_str, mod_pos_str)
site_states = states[mod.mod_type]
if mod.is_modified:
pattern[mod_site] = (site_states[1], WILD) # depends on [control=['if'], data=[]]
else:
pattern[mod_site] = (site_states[0], WILD) # depends on [control=['for'], data=['mod']]
# Handle mutations
for mc in agent.mutations:
res_from = mc.residue_from if mc.residue_from else 'mut'
res_to = mc.residue_to if mc.residue_to else 'X'
if mc.position is None:
mut_site_name = res_from # depends on [control=['if'], data=[]]
else:
mut_site_name = res_from + mc.position
pattern[mut_site_name] = res_to # depends on [control=['for'], data=['mc']]
# Handle location
if agent.location is not None:
pattern['loc'] = _n(agent.location) # depends on [control=['if'], data=[]]
# Handle activity
if agent.activity is not None:
active_site_name = agent.activity.activity_type
if agent.activity.is_active:
active_site_state = 'active' # depends on [control=['if'], data=[]]
else:
active_site_state = 'inactive'
pattern[active_site_name] = active_site_state # depends on [control=['if'], data=[]]
return pattern |
def fluctuability(netin, calc='global'):
r"""
Fluctuability of temporal networks. This is the variation of the network's edges over time. [fluct-1]_
This is the unique number of edges through time divided by the overall number of edges.
Parameters
----------
netin : array or dict
Temporal network input (graphlet or contact) (nettype: 'bd', 'bu', 'wu', 'wd')
calc : str
Version of fluctuabiility to calcualte. 'global'
Returns
-------
fluct : array
Fluctuability
Notes
------
Fluctuability quantifies the variability of edges.
Given x number of edges, F is higher when those are repeated edges among a smaller set of edges
and lower when there are distributed across more edges.
.. math:: F = {{\sum_{i,j} H_{i,j}} \over {\sum_{i,j,t} G_{i,j,t}}}
where :math:`H_{i,j}` is a binary matrix where it is 1 if there is at least one t such that G_{i,j,t} = 1 (i.e. at least one temporal edge exists).
F is not normalized which makes comparisions of F across very different networks difficult (could be added).
Examples
--------
This example compares the fluctability of two different networks with the same number of edges.
Below two temporal networks, both with 3 nodes and 3 time-points.
Both get 3 connections.
>>> import teneto
>>> import numpy as np
>>> # Manually specify node (i,j) and temporal (t) indicies.
>>> ind_highF_i = [0,0,1]
>>> ind_highF_j = [1,2,2]
>>> ind_highF_t = [1,2,2]
>>> ind_lowF_i = [0,0,0]
>>> ind_lowF_j = [1,1,1]
>>> ind_lowF_t = [0,1,2]
>>> # Define 2 networks below and set above edges to 1
>>> G_highF = np.zeros([3,3,3])
>>> G_lowF = np.zeros([3,3,3])
>>> G_highF[ind_highF_i,ind_highF_j,ind_highF_t] = 1
>>> G_lowF[ind_lowF_i,ind_lowF_j,ind_lowF_t] = 1
The two different networks look like this:
.. plot::
import teneto
import numpy as np
import matplotlib.pyplot as plt
# Manually specify node (i,j) and temporal (t) indicies.
ind_highF_i = [0,0,1]
ind_highF_j = [1,2,2]
ind_highF_t = [1,2,2]
ind_lowF_i = [0,0,0]
ind_lowF_j = [1,1,1]
ind_lowF_t = [0,1,2]
# Define 2 networks below and set above edges to 1
G_highF = np.zeros([3,3,3])
G_lowF = np.zeros([3,3,3])
G_highF[ind_highF_i,ind_highF_j,ind_highF_t] = 1
G_lowF[ind_lowF_i,ind_lowF_j,ind_lowF_t] = 1
fig, ax = plt.subplots(1,2)
teneto.plot.slice_plot(G_highF, ax[0], cmap='Pastel2', nodesize=20, nLabs=['0', '1', '2'])
teneto.plot.slice_plot(G_lowF, ax[1], cmap='Pastel2', nodesize=20, nLabs=['0', '1', '2'])
ax[0].set_title('G_highF')
ax[1].set_title('G_lowF')
ax[0].set_ylim([-0.25,2.25])
ax[1].set_ylim([-0.25,2.25])
plt.tight_layout()
fig.show()
Now calculate the fluctability of the two networks above.
>>> F_high = teneto.networkmeasures.fluctuability(G_highF)
>>> F_high
1.0
>>> F_low = teneto.networkmeasures.fluctuability(G_lowF)
>>> F_low
0.3333333333333333
Here we see that the network with more unique connections has the higher fluctuability.
Reference
---------
.. [fluct-1] Thompson et al (2017) "From static to temporal network theory applications to functional brain connectivity." Network Neuroscience, 2: 1. p.69-99 [`Link <https://www.mitpressjournals.org/doi/abs/10.1162/NETN_a_00011>`_]
"""
# Get input type (C or G)
netin, _ = process_input(netin, ['C', 'G', 'TN'])
netin[netin != 0] = 1
unique_edges = np.sum(netin, axis=2)
unique_edges[unique_edges > 0] = 1
unique_edges[unique_edges == 0] = 0
fluct = (np.sum(unique_edges)) / np.sum(netin)
return fluct | def function[fluctuability, parameter[netin, calc]]:
constant[
Fluctuability of temporal networks. This is the variation of the network's edges over time. [fluct-1]_
This is the unique number of edges through time divided by the overall number of edges.
Parameters
----------
netin : array or dict
Temporal network input (graphlet or contact) (nettype: 'bd', 'bu', 'wu', 'wd')
calc : str
Version of fluctuabiility to calcualte. 'global'
Returns
-------
fluct : array
Fluctuability
Notes
------
Fluctuability quantifies the variability of edges.
Given x number of edges, F is higher when those are repeated edges among a smaller set of edges
and lower when there are distributed across more edges.
.. math:: F = {{\sum_{i,j} H_{i,j}} \over {\sum_{i,j,t} G_{i,j,t}}}
where :math:`H_{i,j}` is a binary matrix where it is 1 if there is at least one t such that G_{i,j,t} = 1 (i.e. at least one temporal edge exists).
F is not normalized which makes comparisions of F across very different networks difficult (could be added).
Examples
--------
This example compares the fluctability of two different networks with the same number of edges.
Below two temporal networks, both with 3 nodes and 3 time-points.
Both get 3 connections.
>>> import teneto
>>> import numpy as np
>>> # Manually specify node (i,j) and temporal (t) indicies.
>>> ind_highF_i = [0,0,1]
>>> ind_highF_j = [1,2,2]
>>> ind_highF_t = [1,2,2]
>>> ind_lowF_i = [0,0,0]
>>> ind_lowF_j = [1,1,1]
>>> ind_lowF_t = [0,1,2]
>>> # Define 2 networks below and set above edges to 1
>>> G_highF = np.zeros([3,3,3])
>>> G_lowF = np.zeros([3,3,3])
>>> G_highF[ind_highF_i,ind_highF_j,ind_highF_t] = 1
>>> G_lowF[ind_lowF_i,ind_lowF_j,ind_lowF_t] = 1
The two different networks look like this:
.. plot::
import teneto
import numpy as np
import matplotlib.pyplot as plt
# Manually specify node (i,j) and temporal (t) indicies.
ind_highF_i = [0,0,1]
ind_highF_j = [1,2,2]
ind_highF_t = [1,2,2]
ind_lowF_i = [0,0,0]
ind_lowF_j = [1,1,1]
ind_lowF_t = [0,1,2]
# Define 2 networks below and set above edges to 1
G_highF = np.zeros([3,3,3])
G_lowF = np.zeros([3,3,3])
G_highF[ind_highF_i,ind_highF_j,ind_highF_t] = 1
G_lowF[ind_lowF_i,ind_lowF_j,ind_lowF_t] = 1
fig, ax = plt.subplots(1,2)
teneto.plot.slice_plot(G_highF, ax[0], cmap='Pastel2', nodesize=20, nLabs=['0', '1', '2'])
teneto.plot.slice_plot(G_lowF, ax[1], cmap='Pastel2', nodesize=20, nLabs=['0', '1', '2'])
ax[0].set_title('G_highF')
ax[1].set_title('G_lowF')
ax[0].set_ylim([-0.25,2.25])
ax[1].set_ylim([-0.25,2.25])
plt.tight_layout()
fig.show()
Now calculate the fluctability of the two networks above.
>>> F_high = teneto.networkmeasures.fluctuability(G_highF)
>>> F_high
1.0
>>> F_low = teneto.networkmeasures.fluctuability(G_lowF)
>>> F_low
0.3333333333333333
Here we see that the network with more unique connections has the higher fluctuability.
Reference
---------
.. [fluct-1] Thompson et al (2017) "From static to temporal network theory applications to functional brain connectivity." Network Neuroscience, 2: 1. p.69-99 [`Link <https://www.mitpressjournals.org/doi/abs/10.1162/NETN_a_00011>`_]
]
<ast.Tuple object at 0x7da20c6ab1c0> assign[=] call[name[process_input], parameter[name[netin], list[[<ast.Constant object at 0x7da20c6ab400>, <ast.Constant object at 0x7da20c6a86a0>, <ast.Constant object at 0x7da20c6ab6a0>]]]]
call[name[netin]][compare[name[netin] not_equal[!=] constant[0]]] assign[=] constant[1]
variable[unique_edges] assign[=] call[name[np].sum, parameter[name[netin]]]
call[name[unique_edges]][compare[name[unique_edges] greater[>] constant[0]]] assign[=] constant[1]
call[name[unique_edges]][compare[name[unique_edges] equal[==] constant[0]]] assign[=] constant[0]
variable[fluct] assign[=] binary_operation[call[name[np].sum, parameter[name[unique_edges]]] / call[name[np].sum, parameter[name[netin]]]]
return[name[fluct]] | keyword[def] identifier[fluctuability] ( identifier[netin] , identifier[calc] = literal[string] ):
literal[string]
identifier[netin] , identifier[_] = identifier[process_input] ( identifier[netin] ,[ literal[string] , literal[string] , literal[string] ])
identifier[netin] [ identifier[netin] != literal[int] ]= literal[int]
identifier[unique_edges] = identifier[np] . identifier[sum] ( identifier[netin] , identifier[axis] = literal[int] )
identifier[unique_edges] [ identifier[unique_edges] > literal[int] ]= literal[int]
identifier[unique_edges] [ identifier[unique_edges] == literal[int] ]= literal[int]
identifier[fluct] =( identifier[np] . identifier[sum] ( identifier[unique_edges] ))/ identifier[np] . identifier[sum] ( identifier[netin] )
keyword[return] identifier[fluct] | def fluctuability(netin, calc='global'):
"""
Fluctuability of temporal networks. This is the variation of the network's edges over time. [fluct-1]_
This is the unique number of edges through time divided by the overall number of edges.
Parameters
----------
netin : array or dict
Temporal network input (graphlet or contact) (nettype: 'bd', 'bu', 'wu', 'wd')
calc : str
Version of fluctuabiility to calcualte. 'global'
Returns
-------
fluct : array
Fluctuability
Notes
------
Fluctuability quantifies the variability of edges.
Given x number of edges, F is higher when those are repeated edges among a smaller set of edges
and lower when there are distributed across more edges.
.. math:: F = {{\\sum_{i,j} H_{i,j}} \\over {\\sum_{i,j,t} G_{i,j,t}}}
where :math:`H_{i,j}` is a binary matrix where it is 1 if there is at least one t such that G_{i,j,t} = 1 (i.e. at least one temporal edge exists).
F is not normalized which makes comparisions of F across very different networks difficult (could be added).
Examples
--------
This example compares the fluctability of two different networks with the same number of edges.
Below two temporal networks, both with 3 nodes and 3 time-points.
Both get 3 connections.
>>> import teneto
>>> import numpy as np
>>> # Manually specify node (i,j) and temporal (t) indicies.
>>> ind_highF_i = [0,0,1]
>>> ind_highF_j = [1,2,2]
>>> ind_highF_t = [1,2,2]
>>> ind_lowF_i = [0,0,0]
>>> ind_lowF_j = [1,1,1]
>>> ind_lowF_t = [0,1,2]
>>> # Define 2 networks below and set above edges to 1
>>> G_highF = np.zeros([3,3,3])
>>> G_lowF = np.zeros([3,3,3])
>>> G_highF[ind_highF_i,ind_highF_j,ind_highF_t] = 1
>>> G_lowF[ind_lowF_i,ind_lowF_j,ind_lowF_t] = 1
The two different networks look like this:
.. plot::
import teneto
import numpy as np
import matplotlib.pyplot as plt
# Manually specify node (i,j) and temporal (t) indicies.
ind_highF_i = [0,0,1]
ind_highF_j = [1,2,2]
ind_highF_t = [1,2,2]
ind_lowF_i = [0,0,0]
ind_lowF_j = [1,1,1]
ind_lowF_t = [0,1,2]
# Define 2 networks below and set above edges to 1
G_highF = np.zeros([3,3,3])
G_lowF = np.zeros([3,3,3])
G_highF[ind_highF_i,ind_highF_j,ind_highF_t] = 1
G_lowF[ind_lowF_i,ind_lowF_j,ind_lowF_t] = 1
fig, ax = plt.subplots(1,2)
teneto.plot.slice_plot(G_highF, ax[0], cmap='Pastel2', nodesize=20, nLabs=['0', '1', '2'])
teneto.plot.slice_plot(G_lowF, ax[1], cmap='Pastel2', nodesize=20, nLabs=['0', '1', '2'])
ax[0].set_title('G_highF')
ax[1].set_title('G_lowF')
ax[0].set_ylim([-0.25,2.25])
ax[1].set_ylim([-0.25,2.25])
plt.tight_layout()
fig.show()
Now calculate the fluctability of the two networks above.
>>> F_high = teneto.networkmeasures.fluctuability(G_highF)
>>> F_high
1.0
>>> F_low = teneto.networkmeasures.fluctuability(G_lowF)
>>> F_low
0.3333333333333333
Here we see that the network with more unique connections has the higher fluctuability.
Reference
---------
.. [fluct-1] Thompson et al (2017) "From static to temporal network theory applications to functional brain connectivity." Network Neuroscience, 2: 1. p.69-99 [`Link <https://www.mitpressjournals.org/doi/abs/10.1162/NETN_a_00011>`_]
"""
# Get input type (C or G)
(netin, _) = process_input(netin, ['C', 'G', 'TN'])
netin[netin != 0] = 1
unique_edges = np.sum(netin, axis=2)
unique_edges[unique_edges > 0] = 1
unique_edges[unique_edges == 0] = 0
fluct = np.sum(unique_edges) / np.sum(netin)
return fluct |
def merge_list(list1, list2):
"""
Merges the contents of two lists into a new list.
:param list1: the first list
:type list1: list
:param list2: the second list
:type list2: list
:returns: list
"""
merged = list(list1)
for value in list2:
if value not in merged:
merged.append(value)
return merged | def function[merge_list, parameter[list1, list2]]:
constant[
Merges the contents of two lists into a new list.
:param list1: the first list
:type list1: list
:param list2: the second list
:type list2: list
:returns: list
]
variable[merged] assign[=] call[name[list], parameter[name[list1]]]
for taget[name[value]] in starred[name[list2]] begin[:]
if compare[name[value] <ast.NotIn object at 0x7da2590d7190> name[merged]] begin[:]
call[name[merged].append, parameter[name[value]]]
return[name[merged]] | keyword[def] identifier[merge_list] ( identifier[list1] , identifier[list2] ):
literal[string]
identifier[merged] = identifier[list] ( identifier[list1] )
keyword[for] identifier[value] keyword[in] identifier[list2] :
keyword[if] identifier[value] keyword[not] keyword[in] identifier[merged] :
identifier[merged] . identifier[append] ( identifier[value] )
keyword[return] identifier[merged] | def merge_list(list1, list2):
"""
Merges the contents of two lists into a new list.
:param list1: the first list
:type list1: list
:param list2: the second list
:type list2: list
:returns: list
"""
merged = list(list1)
for value in list2:
if value not in merged:
merged.append(value) # depends on [control=['if'], data=['value', 'merged']] # depends on [control=['for'], data=['value']]
return merged |
def execute_single(self, request):
"""
Builds, sends and handles the response to a single request, returning
the response.
"""
if self.logger:
self.logger.debug('Executing single request: %s', request)
self.removeRequest(request)
body = remoting.encode(self.getAMFRequest([request]), strict=self.strict)
http_request = urllib2.Request(self._root_url, body.getvalue(),
self._get_execute_headers())
if self.proxy_args:
http_request.set_proxy(*self.proxy_args)
envelope = self._getResponse(http_request)
return envelope[request.id] | def function[execute_single, parameter[self, request]]:
constant[
Builds, sends and handles the response to a single request, returning
the response.
]
if name[self].logger begin[:]
call[name[self].logger.debug, parameter[constant[Executing single request: %s], name[request]]]
call[name[self].removeRequest, parameter[name[request]]]
variable[body] assign[=] call[name[remoting].encode, parameter[call[name[self].getAMFRequest, parameter[list[[<ast.Name object at 0x7da1b143ee90>]]]]]]
variable[http_request] assign[=] call[name[urllib2].Request, parameter[name[self]._root_url, call[name[body].getvalue, parameter[]], call[name[self]._get_execute_headers, parameter[]]]]
if name[self].proxy_args begin[:]
call[name[http_request].set_proxy, parameter[<ast.Starred object at 0x7da1b143e5f0>]]
variable[envelope] assign[=] call[name[self]._getResponse, parameter[name[http_request]]]
return[call[name[envelope]][name[request].id]] | keyword[def] identifier[execute_single] ( identifier[self] , identifier[request] ):
literal[string]
keyword[if] identifier[self] . identifier[logger] :
identifier[self] . identifier[logger] . identifier[debug] ( literal[string] , identifier[request] )
identifier[self] . identifier[removeRequest] ( identifier[request] )
identifier[body] = identifier[remoting] . identifier[encode] ( identifier[self] . identifier[getAMFRequest] ([ identifier[request] ]), identifier[strict] = identifier[self] . identifier[strict] )
identifier[http_request] = identifier[urllib2] . identifier[Request] ( identifier[self] . identifier[_root_url] , identifier[body] . identifier[getvalue] (),
identifier[self] . identifier[_get_execute_headers] ())
keyword[if] identifier[self] . identifier[proxy_args] :
identifier[http_request] . identifier[set_proxy] (* identifier[self] . identifier[proxy_args] )
identifier[envelope] = identifier[self] . identifier[_getResponse] ( identifier[http_request] )
keyword[return] identifier[envelope] [ identifier[request] . identifier[id] ] | def execute_single(self, request):
"""
Builds, sends and handles the response to a single request, returning
the response.
"""
if self.logger:
self.logger.debug('Executing single request: %s', request) # depends on [control=['if'], data=[]]
self.removeRequest(request)
body = remoting.encode(self.getAMFRequest([request]), strict=self.strict)
http_request = urllib2.Request(self._root_url, body.getvalue(), self._get_execute_headers())
if self.proxy_args:
http_request.set_proxy(*self.proxy_args) # depends on [control=['if'], data=[]]
envelope = self._getResponse(http_request)
return envelope[request.id] |
def ping_send(self, time_usec, seq, target_system, target_component, force_mavlink1=False):
'''
A ping message either requesting or responding to a ping. This allows
to measure the system latencies, including serial
port, radio modem and UDP connections.
time_usec : Unix timestamp in microseconds or since system boot if smaller than MAVLink epoch (1.1.2009) (uint64_t)
seq : PING sequence (uint32_t)
target_system : 0: request ping from all receiving systems, if greater than 0: message is a ping response and number is the system id of the requesting system (uint8_t)
target_component : 0: request ping from all receiving components, if greater than 0: message is a ping response and number is the system id of the requesting system (uint8_t)
'''
return self.send(self.ping_encode(time_usec, seq, target_system, target_component), force_mavlink1=force_mavlink1) | def function[ping_send, parameter[self, time_usec, seq, target_system, target_component, force_mavlink1]]:
constant[
A ping message either requesting or responding to a ping. This allows
to measure the system latencies, including serial
port, radio modem and UDP connections.
time_usec : Unix timestamp in microseconds or since system boot if smaller than MAVLink epoch (1.1.2009) (uint64_t)
seq : PING sequence (uint32_t)
target_system : 0: request ping from all receiving systems, if greater than 0: message is a ping response and number is the system id of the requesting system (uint8_t)
target_component : 0: request ping from all receiving components, if greater than 0: message is a ping response and number is the system id of the requesting system (uint8_t)
]
return[call[name[self].send, parameter[call[name[self].ping_encode, parameter[name[time_usec], name[seq], name[target_system], name[target_component]]]]]] | keyword[def] identifier[ping_send] ( identifier[self] , identifier[time_usec] , identifier[seq] , identifier[target_system] , identifier[target_component] , identifier[force_mavlink1] = keyword[False] ):
literal[string]
keyword[return] identifier[self] . identifier[send] ( identifier[self] . identifier[ping_encode] ( identifier[time_usec] , identifier[seq] , identifier[target_system] , identifier[target_component] ), identifier[force_mavlink1] = identifier[force_mavlink1] ) | def ping_send(self, time_usec, seq, target_system, target_component, force_mavlink1=False):
"""
A ping message either requesting or responding to a ping. This allows
to measure the system latencies, including serial
port, radio modem and UDP connections.
time_usec : Unix timestamp in microseconds or since system boot if smaller than MAVLink epoch (1.1.2009) (uint64_t)
seq : PING sequence (uint32_t)
target_system : 0: request ping from all receiving systems, if greater than 0: message is a ping response and number is the system id of the requesting system (uint8_t)
target_component : 0: request ping from all receiving components, if greater than 0: message is a ping response and number is the system id of the requesting system (uint8_t)
"""
return self.send(self.ping_encode(time_usec, seq, target_system, target_component), force_mavlink1=force_mavlink1) |
def perform_command(self):
"""
Perform command and return the appropriate exit code.
:rtype: int
"""
if len(self.actual_arguments) < 2:
return self.print_help()
input_file_path = self.actual_arguments[0]
output_file_path = self.actual_arguments[1]
if not self.check_input_file(input_file_path):
return self.ERROR_EXIT_CODE
if not self.check_output_file(output_file_path):
return self.ERROR_EXIT_CODE
try:
converter = FFMPEGWrapper(rconf=self.rconf, logger=self.logger)
converter.convert(input_file_path, output_file_path)
self.print_success(u"Converted '%s' into '%s'" % (input_file_path, output_file_path))
return self.NO_ERROR_EXIT_CODE
except FFMPEGPathError:
self.print_error(u"Unable to call the ffmpeg executable '%s'" % (self.rconf[RuntimeConfiguration.FFMPEG_PATH]))
self.print_error(u"Make sure the path to ffmpeg is correct")
except OSError:
self.print_error(u"Cannot convert file '%s' into '%s'" % (input_file_path, output_file_path))
self.print_error(u"Make sure the input file has a format supported by ffmpeg")
return self.ERROR_EXIT_CODE | def function[perform_command, parameter[self]]:
constant[
Perform command and return the appropriate exit code.
:rtype: int
]
if compare[call[name[len], parameter[name[self].actual_arguments]] less[<] constant[2]] begin[:]
return[call[name[self].print_help, parameter[]]]
variable[input_file_path] assign[=] call[name[self].actual_arguments][constant[0]]
variable[output_file_path] assign[=] call[name[self].actual_arguments][constant[1]]
if <ast.UnaryOp object at 0x7da207f9b8b0> begin[:]
return[name[self].ERROR_EXIT_CODE]
if <ast.UnaryOp object at 0x7da207f987f0> begin[:]
return[name[self].ERROR_EXIT_CODE]
<ast.Try object at 0x7da207f9b220>
return[name[self].ERROR_EXIT_CODE] | keyword[def] identifier[perform_command] ( identifier[self] ):
literal[string]
keyword[if] identifier[len] ( identifier[self] . identifier[actual_arguments] )< literal[int] :
keyword[return] identifier[self] . identifier[print_help] ()
identifier[input_file_path] = identifier[self] . identifier[actual_arguments] [ literal[int] ]
identifier[output_file_path] = identifier[self] . identifier[actual_arguments] [ literal[int] ]
keyword[if] keyword[not] identifier[self] . identifier[check_input_file] ( identifier[input_file_path] ):
keyword[return] identifier[self] . identifier[ERROR_EXIT_CODE]
keyword[if] keyword[not] identifier[self] . identifier[check_output_file] ( identifier[output_file_path] ):
keyword[return] identifier[self] . identifier[ERROR_EXIT_CODE]
keyword[try] :
identifier[converter] = identifier[FFMPEGWrapper] ( identifier[rconf] = identifier[self] . identifier[rconf] , identifier[logger] = identifier[self] . identifier[logger] )
identifier[converter] . identifier[convert] ( identifier[input_file_path] , identifier[output_file_path] )
identifier[self] . identifier[print_success] ( literal[string] %( identifier[input_file_path] , identifier[output_file_path] ))
keyword[return] identifier[self] . identifier[NO_ERROR_EXIT_CODE]
keyword[except] identifier[FFMPEGPathError] :
identifier[self] . identifier[print_error] ( literal[string] %( identifier[self] . identifier[rconf] [ identifier[RuntimeConfiguration] . identifier[FFMPEG_PATH] ]))
identifier[self] . identifier[print_error] ( literal[string] )
keyword[except] identifier[OSError] :
identifier[self] . identifier[print_error] ( literal[string] %( identifier[input_file_path] , identifier[output_file_path] ))
identifier[self] . identifier[print_error] ( literal[string] )
keyword[return] identifier[self] . identifier[ERROR_EXIT_CODE] | def perform_command(self):
"""
Perform command and return the appropriate exit code.
:rtype: int
"""
if len(self.actual_arguments) < 2:
return self.print_help() # depends on [control=['if'], data=[]]
input_file_path = self.actual_arguments[0]
output_file_path = self.actual_arguments[1]
if not self.check_input_file(input_file_path):
return self.ERROR_EXIT_CODE # depends on [control=['if'], data=[]]
if not self.check_output_file(output_file_path):
return self.ERROR_EXIT_CODE # depends on [control=['if'], data=[]]
try:
converter = FFMPEGWrapper(rconf=self.rconf, logger=self.logger)
converter.convert(input_file_path, output_file_path)
self.print_success(u"Converted '%s' into '%s'" % (input_file_path, output_file_path))
return self.NO_ERROR_EXIT_CODE # depends on [control=['try'], data=[]]
except FFMPEGPathError:
self.print_error(u"Unable to call the ffmpeg executable '%s'" % self.rconf[RuntimeConfiguration.FFMPEG_PATH])
self.print_error(u'Make sure the path to ffmpeg is correct') # depends on [control=['except'], data=[]]
except OSError:
self.print_error(u"Cannot convert file '%s' into '%s'" % (input_file_path, output_file_path))
self.print_error(u'Make sure the input file has a format supported by ffmpeg') # depends on [control=['except'], data=[]]
return self.ERROR_EXIT_CODE |
def SURFstar_compute_scores(inst, attr, nan_entries, num_attributes, mcmap, NN_near, NN_far, headers, class_type, X, y, labels_std, data_type):
""" Unique scoring procedure for SURFstar algorithm. Scoring based on nearest neighbors within defined radius, as well as
'anti-scoring' of far instances outside of radius of current target instance"""
scores = np.zeros(num_attributes)
for feature_num in range(num_attributes):
if len(NN_near) > 0:
scores[feature_num] += compute_score(attr, mcmap, NN_near, feature_num, inst,
nan_entries, headers, class_type, X, y, labels_std, data_type)
# Note that we are using the near scoring loop in 'compute_score' and then just subtracting it here, in line with original SURF* paper.
if len(NN_far) > 0:
scores[feature_num] -= compute_score(attr, mcmap, NN_far, feature_num, inst,
nan_entries, headers, class_type, X, y, labels_std, data_type)
return scores | def function[SURFstar_compute_scores, parameter[inst, attr, nan_entries, num_attributes, mcmap, NN_near, NN_far, headers, class_type, X, y, labels_std, data_type]]:
constant[ Unique scoring procedure for SURFstar algorithm. Scoring based on nearest neighbors within defined radius, as well as
'anti-scoring' of far instances outside of radius of current target instance]
variable[scores] assign[=] call[name[np].zeros, parameter[name[num_attributes]]]
for taget[name[feature_num]] in starred[call[name[range], parameter[name[num_attributes]]]] begin[:]
if compare[call[name[len], parameter[name[NN_near]]] greater[>] constant[0]] begin[:]
<ast.AugAssign object at 0x7da1b0b30490>
if compare[call[name[len], parameter[name[NN_far]]] greater[>] constant[0]] begin[:]
<ast.AugAssign object at 0x7da1b0b30040>
return[name[scores]] | keyword[def] identifier[SURFstar_compute_scores] ( identifier[inst] , identifier[attr] , identifier[nan_entries] , identifier[num_attributes] , identifier[mcmap] , identifier[NN_near] , identifier[NN_far] , identifier[headers] , identifier[class_type] , identifier[X] , identifier[y] , identifier[labels_std] , identifier[data_type] ):
literal[string]
identifier[scores] = identifier[np] . identifier[zeros] ( identifier[num_attributes] )
keyword[for] identifier[feature_num] keyword[in] identifier[range] ( identifier[num_attributes] ):
keyword[if] identifier[len] ( identifier[NN_near] )> literal[int] :
identifier[scores] [ identifier[feature_num] ]+= identifier[compute_score] ( identifier[attr] , identifier[mcmap] , identifier[NN_near] , identifier[feature_num] , identifier[inst] ,
identifier[nan_entries] , identifier[headers] , identifier[class_type] , identifier[X] , identifier[y] , identifier[labels_std] , identifier[data_type] )
keyword[if] identifier[len] ( identifier[NN_far] )> literal[int] :
identifier[scores] [ identifier[feature_num] ]-= identifier[compute_score] ( identifier[attr] , identifier[mcmap] , identifier[NN_far] , identifier[feature_num] , identifier[inst] ,
identifier[nan_entries] , identifier[headers] , identifier[class_type] , identifier[X] , identifier[y] , identifier[labels_std] , identifier[data_type] )
keyword[return] identifier[scores] | def SURFstar_compute_scores(inst, attr, nan_entries, num_attributes, mcmap, NN_near, NN_far, headers, class_type, X, y, labels_std, data_type):
""" Unique scoring procedure for SURFstar algorithm. Scoring based on nearest neighbors within defined radius, as well as
'anti-scoring' of far instances outside of radius of current target instance"""
scores = np.zeros(num_attributes)
for feature_num in range(num_attributes):
if len(NN_near) > 0:
scores[feature_num] += compute_score(attr, mcmap, NN_near, feature_num, inst, nan_entries, headers, class_type, X, y, labels_std, data_type) # depends on [control=['if'], data=[]] # Note that we are using the near scoring loop in 'compute_score' and then just subtracting it here, in line with original SURF* paper.
if len(NN_far) > 0:
scores[feature_num] -= compute_score(attr, mcmap, NN_far, feature_num, inst, nan_entries, headers, class_type, X, y, labels_std, data_type) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['feature_num']]
return scores |
def match_and_print(name_list, context_name, do_approximate_matching, include_dubious, include_deprecated,
include_subtree, output):
"""Demonstrates how to read the response from a match_names query when peyotl's wrap_response option is
used.
If the context_name is not recognized, the attempt to match_names will generate a ValueError exception.
Here this is caught, and we call the tnrs/contexts web service to get the list of valid context_names
to provide the user of the script with some hints.
"""
from peyotl.sugar import tnrs
try:
# Perform the match_names, and return the peyotl wrapper around the response.
result = ot_tnrs_match_names(name_list,
context_name=context_name,
do_approximate_matching=do_approximate_matching,
include_dubious=include_dubious,
include_deprecated=include_deprecated,
tnrs_wrapper=tnrs)
except Exception as x:
msg = str(x)
if 'is not a valid context name' in msg and context_name is not None:
# Here is a wrapper around the call to get the context names
valid_contexts = tnrs.contexts()
m = 'The valid context names are the strings in the values of the following "tnrs/contexts" dict:\n'
sys.stderr.write(m)
epp = pprint.PrettyPrinter(indent=4, stream=sys.stderr)
epp.pprint(valid_contexts)
raise RuntimeError('ot-tnrs-match-names: exception raised. {}'.format(x))
# The code below demonstrates how to access the information from the response in the wrapper
# that is created by using the wrap_response option in the call
output.write('A v2/tnrs/match_names query was performed using: {} \n'.format(tnrs.endpoint))
output.write('The taxonomy being served by that server is:')
output.write(' {}'.format(result.taxonomy.source))
output.write(' by {}\n'.format(result.taxonomy.author))
output.write('Information for the taxonomy can be found at {}\n'.format(result.taxonomy.weburl))
output.write('{} out of {} queried name(s) were matched\n'.format(len(result.matched_name_ids), len(name_list)))
output.write('{} out of {} queried name(s) were unambiguously matched\n'.format(len(result.unambiguous_name_ids),
len(name_list)))
output.write('The context_name for the matched names was "{}"'.format(result.context))
if result.context_inferred:
output.write(' (this context was inferred based on the matches).\n')
else:
output.write(' (this context was supplied as an argument to speed up the name matching).\n')
output.write('The name matching result(s) used approximate/fuzzy string matching? {}\n'.format(
result.includes_approximate_matches))
output.write('The name matching result(s) included dubious names? {}\n'.format(result.includes_dubious_names))
output.write('The name matching result(s) included deprecated taxa? {}\n'.format(result.includes_deprecated_taxa))
for name in name_list:
match_tuple = result[name]
output.write('The query name "{}" produced {} result(s):\n'.format(name, len(match_tuple)))
for match_ind, match in enumerate(match_tuple):
output.write(' Match #{}\n'.format(match_ind))
output.write(' OTT ID (ot:ottId) = {}\n'.format(match.ott_id))
output.write(' name (ot:ottTaxonName) = "{}"\n'.format(match.name))
output.write(' query was matched using fuzzy/approximate string matching? {}\n'.format(
match.is_approximate_match))
output.write(' match score = {}\n'.format(match.score))
output.write(' query name is a junior synonym of this match? {}\n'.format(match.is_synonym))
output.write(' is deprecated from OTT? {}\n'.format(match.is_deprecated))
output.write(' is dubious taxon? {}\n'.format(match.is_dubious))
if match.synonyms:
output.write(' known synonyms: "{}"\n'.format('", "'.join(match.synonyms)))
else:
output.write(' known synonyms: \n')
output.write(' OTT flags for this taxon: {}\n'.format(match.flags))
output.write(' The taxonomic rank associated with this name is: {}\n'.format(match.rank))
output.write(' The nomenclatural code for this name is: {}\n'.format(match.nomenclature_code))
output.write(' The (unstable) node ID in the current taxomachine instance is: {}\n'.format(
match.taxomachine_node_id))
if len(match_tuple) == 1:
sys.stderr.write('\nOnly one match found, so we will request the info on the ancestors, too...\n')
match = match_tuple[0]
ott_id = match.ott_id
fetch_and_write_taxon_info(id_list=[ott_id], include_anc=True, list_tips=False, output=output)
if include_subtree:
from peyotl.sugar import taxonomy
subtree = taxonomy.subtree(ott_id)['subtree']
output.write('The taxononmic subtree is:\n')
output.write(subtree)
output.write('\n')
else:
if include_subtree:
sys.stderr.write(
'\nMultiple matches found - ancestor info and subtreesuppressed.\nSee ot-taxon-info.py and ot-taxon-subtree.py which can be called with an OTT ID\n')
else:
sys.stderr.write(
'\nMultiple matches found - ancestor info suppressed.\nSee ot-taxon-info.py which can be called with an OTT ID\n') | def function[match_and_print, parameter[name_list, context_name, do_approximate_matching, include_dubious, include_deprecated, include_subtree, output]]:
constant[Demonstrates how to read the response from a match_names query when peyotl's wrap_response option is
used.
If the context_name is not recognized, the attempt to match_names will generate a ValueError exception.
Here this is caught, and we call the tnrs/contexts web service to get the list of valid context_names
to provide the user of the script with some hints.
]
from relative_module[peyotl.sugar] import module[tnrs]
<ast.Try object at 0x7da18ede75e0>
call[name[output].write, parameter[call[constant[A v2/tnrs/match_names query was performed using: {}
].format, parameter[name[tnrs].endpoint]]]]
call[name[output].write, parameter[constant[The taxonomy being served by that server is:]]]
call[name[output].write, parameter[call[constant[ {}].format, parameter[name[result].taxonomy.source]]]]
call[name[output].write, parameter[call[constant[ by {}
].format, parameter[name[result].taxonomy.author]]]]
call[name[output].write, parameter[call[constant[Information for the taxonomy can be found at {}
].format, parameter[name[result].taxonomy.weburl]]]]
call[name[output].write, parameter[call[constant[{} out of {} queried name(s) were matched
].format, parameter[call[name[len], parameter[name[result].matched_name_ids]], call[name[len], parameter[name[name_list]]]]]]]
call[name[output].write, parameter[call[constant[{} out of {} queried name(s) were unambiguously matched
].format, parameter[call[name[len], parameter[name[result].unambiguous_name_ids]], call[name[len], parameter[name[name_list]]]]]]]
call[name[output].write, parameter[call[constant[The context_name for the matched names was "{}"].format, parameter[name[result].context]]]]
if name[result].context_inferred begin[:]
call[name[output].write, parameter[constant[ (this context was inferred based on the matches).
]]]
call[name[output].write, parameter[call[constant[The name matching result(s) used approximate/fuzzy string matching? {}
].format, parameter[name[result].includes_approximate_matches]]]]
call[name[output].write, parameter[call[constant[The name matching result(s) included dubious names? {}
].format, parameter[name[result].includes_dubious_names]]]]
call[name[output].write, parameter[call[constant[The name matching result(s) included deprecated taxa? {}
].format, parameter[name[result].includes_deprecated_taxa]]]]
for taget[name[name]] in starred[name[name_list]] begin[:]
variable[match_tuple] assign[=] call[name[result]][name[name]]
call[name[output].write, parameter[call[constant[The query name "{}" produced {} result(s):
].format, parameter[name[name], call[name[len], parameter[name[match_tuple]]]]]]]
for taget[tuple[[<ast.Name object at 0x7da18ede5390>, <ast.Name object at 0x7da18ede6bc0>]]] in starred[call[name[enumerate], parameter[name[match_tuple]]]] begin[:]
call[name[output].write, parameter[call[constant[ Match #{}
].format, parameter[name[match_ind]]]]]
call[name[output].write, parameter[call[constant[ OTT ID (ot:ottId) = {}
].format, parameter[name[match].ott_id]]]]
call[name[output].write, parameter[call[constant[ name (ot:ottTaxonName) = "{}"
].format, parameter[name[match].name]]]]
call[name[output].write, parameter[call[constant[ query was matched using fuzzy/approximate string matching? {}
].format, parameter[name[match].is_approximate_match]]]]
call[name[output].write, parameter[call[constant[ match score = {}
].format, parameter[name[match].score]]]]
call[name[output].write, parameter[call[constant[ query name is a junior synonym of this match? {}
].format, parameter[name[match].is_synonym]]]]
call[name[output].write, parameter[call[constant[ is deprecated from OTT? {}
].format, parameter[name[match].is_deprecated]]]]
call[name[output].write, parameter[call[constant[ is dubious taxon? {}
].format, parameter[name[match].is_dubious]]]]
if name[match].synonyms begin[:]
call[name[output].write, parameter[call[constant[ known synonyms: "{}"
].format, parameter[call[constant[", "].join, parameter[name[match].synonyms]]]]]]
call[name[output].write, parameter[call[constant[ OTT flags for this taxon: {}
].format, parameter[name[match].flags]]]]
call[name[output].write, parameter[call[constant[ The taxonomic rank associated with this name is: {}
].format, parameter[name[match].rank]]]]
call[name[output].write, parameter[call[constant[ The nomenclatural code for this name is: {}
].format, parameter[name[match].nomenclature_code]]]]
call[name[output].write, parameter[call[constant[ The (unstable) node ID in the current taxomachine instance is: {}
].format, parameter[name[match].taxomachine_node_id]]]]
if compare[call[name[len], parameter[name[match_tuple]]] equal[==] constant[1]] begin[:]
call[name[sys].stderr.write, parameter[constant[
Only one match found, so we will request the info on the ancestors, too...
]]]
variable[match] assign[=] call[name[match_tuple]][constant[0]]
variable[ott_id] assign[=] name[match].ott_id
call[name[fetch_and_write_taxon_info], parameter[]]
if name[include_subtree] begin[:]
from relative_module[peyotl.sugar] import module[taxonomy]
variable[subtree] assign[=] call[call[name[taxonomy].subtree, parameter[name[ott_id]]]][constant[subtree]]
call[name[output].write, parameter[constant[The taxononmic subtree is:
]]]
call[name[output].write, parameter[name[subtree]]]
call[name[output].write, parameter[constant[
]]] | keyword[def] identifier[match_and_print] ( identifier[name_list] , identifier[context_name] , identifier[do_approximate_matching] , identifier[include_dubious] , identifier[include_deprecated] ,
identifier[include_subtree] , identifier[output] ):
literal[string]
keyword[from] identifier[peyotl] . identifier[sugar] keyword[import] identifier[tnrs]
keyword[try] :
identifier[result] = identifier[ot_tnrs_match_names] ( identifier[name_list] ,
identifier[context_name] = identifier[context_name] ,
identifier[do_approximate_matching] = identifier[do_approximate_matching] ,
identifier[include_dubious] = identifier[include_dubious] ,
identifier[include_deprecated] = identifier[include_deprecated] ,
identifier[tnrs_wrapper] = identifier[tnrs] )
keyword[except] identifier[Exception] keyword[as] identifier[x] :
identifier[msg] = identifier[str] ( identifier[x] )
keyword[if] literal[string] keyword[in] identifier[msg] keyword[and] identifier[context_name] keyword[is] keyword[not] keyword[None] :
identifier[valid_contexts] = identifier[tnrs] . identifier[contexts] ()
identifier[m] = literal[string]
identifier[sys] . identifier[stderr] . identifier[write] ( identifier[m] )
identifier[epp] = identifier[pprint] . identifier[PrettyPrinter] ( identifier[indent] = literal[int] , identifier[stream] = identifier[sys] . identifier[stderr] )
identifier[epp] . identifier[pprint] ( identifier[valid_contexts] )
keyword[raise] identifier[RuntimeError] ( literal[string] . identifier[format] ( identifier[x] ))
identifier[output] . identifier[write] ( literal[string] . identifier[format] ( identifier[tnrs] . identifier[endpoint] ))
identifier[output] . identifier[write] ( literal[string] )
identifier[output] . identifier[write] ( literal[string] . identifier[format] ( identifier[result] . identifier[taxonomy] . identifier[source] ))
identifier[output] . identifier[write] ( literal[string] . identifier[format] ( identifier[result] . identifier[taxonomy] . identifier[author] ))
identifier[output] . identifier[write] ( literal[string] . identifier[format] ( identifier[result] . identifier[taxonomy] . identifier[weburl] ))
identifier[output] . identifier[write] ( literal[string] . identifier[format] ( identifier[len] ( identifier[result] . identifier[matched_name_ids] ), identifier[len] ( identifier[name_list] )))
identifier[output] . identifier[write] ( literal[string] . identifier[format] ( identifier[len] ( identifier[result] . identifier[unambiguous_name_ids] ),
identifier[len] ( identifier[name_list] )))
identifier[output] . identifier[write] ( literal[string] . identifier[format] ( identifier[result] . identifier[context] ))
keyword[if] identifier[result] . identifier[context_inferred] :
identifier[output] . identifier[write] ( literal[string] )
keyword[else] :
identifier[output] . identifier[write] ( literal[string] )
identifier[output] . identifier[write] ( literal[string] . identifier[format] (
identifier[result] . identifier[includes_approximate_matches] ))
identifier[output] . identifier[write] ( literal[string] . identifier[format] ( identifier[result] . identifier[includes_dubious_names] ))
identifier[output] . identifier[write] ( literal[string] . identifier[format] ( identifier[result] . identifier[includes_deprecated_taxa] ))
keyword[for] identifier[name] keyword[in] identifier[name_list] :
identifier[match_tuple] = identifier[result] [ identifier[name] ]
identifier[output] . identifier[write] ( literal[string] . identifier[format] ( identifier[name] , identifier[len] ( identifier[match_tuple] )))
keyword[for] identifier[match_ind] , identifier[match] keyword[in] identifier[enumerate] ( identifier[match_tuple] ):
identifier[output] . identifier[write] ( literal[string] . identifier[format] ( identifier[match_ind] ))
identifier[output] . identifier[write] ( literal[string] . identifier[format] ( identifier[match] . identifier[ott_id] ))
identifier[output] . identifier[write] ( literal[string] . identifier[format] ( identifier[match] . identifier[name] ))
identifier[output] . identifier[write] ( literal[string] . identifier[format] (
identifier[match] . identifier[is_approximate_match] ))
identifier[output] . identifier[write] ( literal[string] . identifier[format] ( identifier[match] . identifier[score] ))
identifier[output] . identifier[write] ( literal[string] . identifier[format] ( identifier[match] . identifier[is_synonym] ))
identifier[output] . identifier[write] ( literal[string] . identifier[format] ( identifier[match] . identifier[is_deprecated] ))
identifier[output] . identifier[write] ( literal[string] . identifier[format] ( identifier[match] . identifier[is_dubious] ))
keyword[if] identifier[match] . identifier[synonyms] :
identifier[output] . identifier[write] ( literal[string] . identifier[format] ( literal[string] . identifier[join] ( identifier[match] . identifier[synonyms] )))
keyword[else] :
identifier[output] . identifier[write] ( literal[string] )
identifier[output] . identifier[write] ( literal[string] . identifier[format] ( identifier[match] . identifier[flags] ))
identifier[output] . identifier[write] ( literal[string] . identifier[format] ( identifier[match] . identifier[rank] ))
identifier[output] . identifier[write] ( literal[string] . identifier[format] ( identifier[match] . identifier[nomenclature_code] ))
identifier[output] . identifier[write] ( literal[string] . identifier[format] (
identifier[match] . identifier[taxomachine_node_id] ))
keyword[if] identifier[len] ( identifier[match_tuple] )== literal[int] :
identifier[sys] . identifier[stderr] . identifier[write] ( literal[string] )
identifier[match] = identifier[match_tuple] [ literal[int] ]
identifier[ott_id] = identifier[match] . identifier[ott_id]
identifier[fetch_and_write_taxon_info] ( identifier[id_list] =[ identifier[ott_id] ], identifier[include_anc] = keyword[True] , identifier[list_tips] = keyword[False] , identifier[output] = identifier[output] )
keyword[if] identifier[include_subtree] :
keyword[from] identifier[peyotl] . identifier[sugar] keyword[import] identifier[taxonomy]
identifier[subtree] = identifier[taxonomy] . identifier[subtree] ( identifier[ott_id] )[ literal[string] ]
identifier[output] . identifier[write] ( literal[string] )
identifier[output] . identifier[write] ( identifier[subtree] )
identifier[output] . identifier[write] ( literal[string] )
keyword[else] :
keyword[if] identifier[include_subtree] :
identifier[sys] . identifier[stderr] . identifier[write] (
literal[string] )
keyword[else] :
identifier[sys] . identifier[stderr] . identifier[write] (
literal[string] ) | def match_and_print(name_list, context_name, do_approximate_matching, include_dubious, include_deprecated, include_subtree, output):
"""Demonstrates how to read the response from a match_names query when peyotl's wrap_response option is
used.
If the context_name is not recognized, the attempt to match_names will generate a ValueError exception.
Here this is caught, and we call the tnrs/contexts web service to get the list of valid context_names
to provide the user of the script with some hints.
"""
from peyotl.sugar import tnrs
try:
# Perform the match_names, and return the peyotl wrapper around the response.
result = ot_tnrs_match_names(name_list, context_name=context_name, do_approximate_matching=do_approximate_matching, include_dubious=include_dubious, include_deprecated=include_deprecated, tnrs_wrapper=tnrs) # depends on [control=['try'], data=[]]
except Exception as x:
msg = str(x)
if 'is not a valid context name' in msg and context_name is not None:
# Here is a wrapper around the call to get the context names
valid_contexts = tnrs.contexts()
m = 'The valid context names are the strings in the values of the following "tnrs/contexts" dict:\n'
sys.stderr.write(m)
epp = pprint.PrettyPrinter(indent=4, stream=sys.stderr)
epp.pprint(valid_contexts) # depends on [control=['if'], data=[]]
raise RuntimeError('ot-tnrs-match-names: exception raised. {}'.format(x)) # depends on [control=['except'], data=['x']]
# The code below demonstrates how to access the information from the response in the wrapper
# that is created by using the wrap_response option in the call
output.write('A v2/tnrs/match_names query was performed using: {} \n'.format(tnrs.endpoint))
output.write('The taxonomy being served by that server is:')
output.write(' {}'.format(result.taxonomy.source))
output.write(' by {}\n'.format(result.taxonomy.author))
output.write('Information for the taxonomy can be found at {}\n'.format(result.taxonomy.weburl))
output.write('{} out of {} queried name(s) were matched\n'.format(len(result.matched_name_ids), len(name_list)))
output.write('{} out of {} queried name(s) were unambiguously matched\n'.format(len(result.unambiguous_name_ids), len(name_list)))
output.write('The context_name for the matched names was "{}"'.format(result.context))
if result.context_inferred:
output.write(' (this context was inferred based on the matches).\n') # depends on [control=['if'], data=[]]
else:
output.write(' (this context was supplied as an argument to speed up the name matching).\n')
output.write('The name matching result(s) used approximate/fuzzy string matching? {}\n'.format(result.includes_approximate_matches))
output.write('The name matching result(s) included dubious names? {}\n'.format(result.includes_dubious_names))
output.write('The name matching result(s) included deprecated taxa? {}\n'.format(result.includes_deprecated_taxa))
for name in name_list:
match_tuple = result[name]
output.write('The query name "{}" produced {} result(s):\n'.format(name, len(match_tuple)))
for (match_ind, match) in enumerate(match_tuple):
output.write(' Match #{}\n'.format(match_ind))
output.write(' OTT ID (ot:ottId) = {}\n'.format(match.ott_id))
output.write(' name (ot:ottTaxonName) = "{}"\n'.format(match.name))
output.write(' query was matched using fuzzy/approximate string matching? {}\n'.format(match.is_approximate_match))
output.write(' match score = {}\n'.format(match.score))
output.write(' query name is a junior synonym of this match? {}\n'.format(match.is_synonym))
output.write(' is deprecated from OTT? {}\n'.format(match.is_deprecated))
output.write(' is dubious taxon? {}\n'.format(match.is_dubious))
if match.synonyms:
output.write(' known synonyms: "{}"\n'.format('", "'.join(match.synonyms))) # depends on [control=['if'], data=[]]
else:
output.write(' known synonyms: \n')
output.write(' OTT flags for this taxon: {}\n'.format(match.flags))
output.write(' The taxonomic rank associated with this name is: {}\n'.format(match.rank))
output.write(' The nomenclatural code for this name is: {}\n'.format(match.nomenclature_code))
output.write(' The (unstable) node ID in the current taxomachine instance is: {}\n'.format(match.taxomachine_node_id)) # depends on [control=['for'], data=[]]
if len(match_tuple) == 1:
sys.stderr.write('\nOnly one match found, so we will request the info on the ancestors, too...\n')
match = match_tuple[0]
ott_id = match.ott_id
fetch_and_write_taxon_info(id_list=[ott_id], include_anc=True, list_tips=False, output=output)
if include_subtree:
from peyotl.sugar import taxonomy
subtree = taxonomy.subtree(ott_id)['subtree']
output.write('The taxononmic subtree is:\n')
output.write(subtree)
output.write('\n') # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif include_subtree:
sys.stderr.write('\nMultiple matches found - ancestor info and subtreesuppressed.\nSee ot-taxon-info.py and ot-taxon-subtree.py which can be called with an OTT ID\n') # depends on [control=['if'], data=[]]
else:
sys.stderr.write('\nMultiple matches found - ancestor info suppressed.\nSee ot-taxon-info.py which can be called with an OTT ID\n') # depends on [control=['for'], data=['name']] |
def init(banner, hidden, backup):
"""Initialize a manage shell in current directory
$ manage init --banner="My awesome app shell"
initializing manage...
creating manage.yml
"""
manage_file = HIDDEN_MANAGE_FILE if hidden else MANAGE_FILE
if os.path.exists(manage_file):
if not click.confirm('Rewrite {0}?'.format(manage_file)):
return
if backup:
bck = '.bck_{0}'.format(manage_file)
with open(manage_file, 'r') as source, open(bck, 'w') as bck_file:
bck_file.write(source.read())
with open(manage_file, 'w') as output:
data = default_manage_dict
if banner:
data['shell']['banner']['message'] = banner
output.write(yaml.dump(data, default_flow_style=False)) | def function[init, parameter[banner, hidden, backup]]:
constant[Initialize a manage shell in current directory
$ manage init --banner="My awesome app shell"
initializing manage...
creating manage.yml
]
variable[manage_file] assign[=] <ast.IfExp object at 0x7da1b0cf72e0>
if call[name[os].path.exists, parameter[name[manage_file]]] begin[:]
if <ast.UnaryOp object at 0x7da1b0cf6860> begin[:]
return[None]
if name[backup] begin[:]
variable[bck] assign[=] call[constant[.bck_{0}].format, parameter[name[manage_file]]]
with call[name[open], parameter[name[manage_file], constant[r]]] begin[:]
call[name[bck_file].write, parameter[call[name[source].read, parameter[]]]]
with call[name[open], parameter[name[manage_file], constant[w]]] begin[:]
variable[data] assign[=] name[default_manage_dict]
if name[banner] begin[:]
call[call[call[name[data]][constant[shell]]][constant[banner]]][constant[message]] assign[=] name[banner]
call[name[output].write, parameter[call[name[yaml].dump, parameter[name[data]]]]] | keyword[def] identifier[init] ( identifier[banner] , identifier[hidden] , identifier[backup] ):
literal[string]
identifier[manage_file] = identifier[HIDDEN_MANAGE_FILE] keyword[if] identifier[hidden] keyword[else] identifier[MANAGE_FILE]
keyword[if] identifier[os] . identifier[path] . identifier[exists] ( identifier[manage_file] ):
keyword[if] keyword[not] identifier[click] . identifier[confirm] ( literal[string] . identifier[format] ( identifier[manage_file] )):
keyword[return]
keyword[if] identifier[backup] :
identifier[bck] = literal[string] . identifier[format] ( identifier[manage_file] )
keyword[with] identifier[open] ( identifier[manage_file] , literal[string] ) keyword[as] identifier[source] , identifier[open] ( identifier[bck] , literal[string] ) keyword[as] identifier[bck_file] :
identifier[bck_file] . identifier[write] ( identifier[source] . identifier[read] ())
keyword[with] identifier[open] ( identifier[manage_file] , literal[string] ) keyword[as] identifier[output] :
identifier[data] = identifier[default_manage_dict]
keyword[if] identifier[banner] :
identifier[data] [ literal[string] ][ literal[string] ][ literal[string] ]= identifier[banner]
identifier[output] . identifier[write] ( identifier[yaml] . identifier[dump] ( identifier[data] , identifier[default_flow_style] = keyword[False] )) | def init(banner, hidden, backup):
"""Initialize a manage shell in current directory
$ manage init --banner="My awesome app shell"
initializing manage...
creating manage.yml
"""
manage_file = HIDDEN_MANAGE_FILE if hidden else MANAGE_FILE
if os.path.exists(manage_file):
if not click.confirm('Rewrite {0}?'.format(manage_file)):
return # depends on [control=['if'], data=[]]
if backup:
bck = '.bck_{0}'.format(manage_file)
with open(manage_file, 'r') as source, open(bck, 'w') as bck_file:
bck_file.write(source.read()) # depends on [control=['with'], data=['source']] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
with open(manage_file, 'w') as output:
data = default_manage_dict
if banner:
data['shell']['banner']['message'] = banner # depends on [control=['if'], data=[]]
output.write(yaml.dump(data, default_flow_style=False)) # depends on [control=['with'], data=['output']] |
def cells(self):
"""The number of cells in the MOC.
This gives the total number of cells at all orders,
with cells from every order counted equally.
>>> m = MOC(0, (1, 2))
>>> m.cells
2
"""
n = 0
for (order, cells) in self:
n += len(cells)
return n | def function[cells, parameter[self]]:
constant[The number of cells in the MOC.
This gives the total number of cells at all orders,
with cells from every order counted equally.
>>> m = MOC(0, (1, 2))
>>> m.cells
2
]
variable[n] assign[=] constant[0]
for taget[tuple[[<ast.Name object at 0x7da1b0915600>, <ast.Name object at 0x7da1b0914a60>]]] in starred[name[self]] begin[:]
<ast.AugAssign object at 0x7da1b09154e0>
return[name[n]] | keyword[def] identifier[cells] ( identifier[self] ):
literal[string]
identifier[n] = literal[int]
keyword[for] ( identifier[order] , identifier[cells] ) keyword[in] identifier[self] :
identifier[n] += identifier[len] ( identifier[cells] )
keyword[return] identifier[n] | def cells(self):
"""The number of cells in the MOC.
This gives the total number of cells at all orders,
with cells from every order counted equally.
>>> m = MOC(0, (1, 2))
>>> m.cells
2
"""
n = 0
for (order, cells) in self:
n += len(cells) # depends on [control=['for'], data=[]]
return n |
def add_health_monitor(self, loadbalancer, type, delay=10, timeout=10,
attemptsBeforeDeactivation=3, path="/", statusRegex=None,
bodyRegex=None, hostHeader=None):
"""
Adds a health monitor to the load balancer. If a monitor already
exists, it is updated with the supplied settings.
"""
uri = "/loadbalancers/%s/healthmonitor" % utils.get_id(loadbalancer)
req_body = {"healthMonitor": {
"type": type,
"delay": delay,
"timeout": timeout,
"attemptsBeforeDeactivation": attemptsBeforeDeactivation,
}}
uptype = type.upper()
if uptype.startswith("HTTP"):
lb = self._get_lb(loadbalancer)
if uptype != lb.protocol:
raise exc.ProtocolMismatch("Cannot set the Health Monitor type "
"to '%s' when the Load Balancer's protocol is '%s'." %
(type, lb.protocol))
if not all((path, statusRegex, bodyRegex)):
raise exc.MissingHealthMonitorSettings("When creating an HTTP(S) "
"monitor, you must provide the 'path', 'statusRegex' and "
"'bodyRegex' parameters.")
body_hm = req_body["healthMonitor"]
body_hm["path"] = path
body_hm["statusRegex"] = statusRegex
body_hm["bodyRegex"] = bodyRegex
if hostHeader:
body_hm["hostHeader"] = hostHeader
resp, body = self.api.method_put(uri, body=req_body)
return body | def function[add_health_monitor, parameter[self, loadbalancer, type, delay, timeout, attemptsBeforeDeactivation, path, statusRegex, bodyRegex, hostHeader]]:
constant[
Adds a health monitor to the load balancer. If a monitor already
exists, it is updated with the supplied settings.
]
variable[uri] assign[=] binary_operation[constant[/loadbalancers/%s/healthmonitor] <ast.Mod object at 0x7da2590d6920> call[name[utils].get_id, parameter[name[loadbalancer]]]]
variable[req_body] assign[=] dictionary[[<ast.Constant object at 0x7da1b056f280>], [<ast.Dict object at 0x7da1b056db40>]]
variable[uptype] assign[=] call[name[type].upper, parameter[]]
if call[name[uptype].startswith, parameter[constant[HTTP]]] begin[:]
variable[lb] assign[=] call[name[self]._get_lb, parameter[name[loadbalancer]]]
if compare[name[uptype] not_equal[!=] name[lb].protocol] begin[:]
<ast.Raise object at 0x7da1b056c100>
if <ast.UnaryOp object at 0x7da1b056e140> begin[:]
<ast.Raise object at 0x7da1b056fa00>
variable[body_hm] assign[=] call[name[req_body]][constant[healthMonitor]]
call[name[body_hm]][constant[path]] assign[=] name[path]
call[name[body_hm]][constant[statusRegex]] assign[=] name[statusRegex]
call[name[body_hm]][constant[bodyRegex]] assign[=] name[bodyRegex]
if name[hostHeader] begin[:]
call[name[body_hm]][constant[hostHeader]] assign[=] name[hostHeader]
<ast.Tuple object at 0x7da1b0559990> assign[=] call[name[self].api.method_put, parameter[name[uri]]]
return[name[body]] | keyword[def] identifier[add_health_monitor] ( identifier[self] , identifier[loadbalancer] , identifier[type] , identifier[delay] = literal[int] , identifier[timeout] = literal[int] ,
identifier[attemptsBeforeDeactivation] = literal[int] , identifier[path] = literal[string] , identifier[statusRegex] = keyword[None] ,
identifier[bodyRegex] = keyword[None] , identifier[hostHeader] = keyword[None] ):
literal[string]
identifier[uri] = literal[string] % identifier[utils] . identifier[get_id] ( identifier[loadbalancer] )
identifier[req_body] ={ literal[string] :{
literal[string] : identifier[type] ,
literal[string] : identifier[delay] ,
literal[string] : identifier[timeout] ,
literal[string] : identifier[attemptsBeforeDeactivation] ,
}}
identifier[uptype] = identifier[type] . identifier[upper] ()
keyword[if] identifier[uptype] . identifier[startswith] ( literal[string] ):
identifier[lb] = identifier[self] . identifier[_get_lb] ( identifier[loadbalancer] )
keyword[if] identifier[uptype] != identifier[lb] . identifier[protocol] :
keyword[raise] identifier[exc] . identifier[ProtocolMismatch] ( literal[string]
literal[string] %
( identifier[type] , identifier[lb] . identifier[protocol] ))
keyword[if] keyword[not] identifier[all] (( identifier[path] , identifier[statusRegex] , identifier[bodyRegex] )):
keyword[raise] identifier[exc] . identifier[MissingHealthMonitorSettings] ( literal[string]
literal[string]
literal[string] )
identifier[body_hm] = identifier[req_body] [ literal[string] ]
identifier[body_hm] [ literal[string] ]= identifier[path]
identifier[body_hm] [ literal[string] ]= identifier[statusRegex]
identifier[body_hm] [ literal[string] ]= identifier[bodyRegex]
keyword[if] identifier[hostHeader] :
identifier[body_hm] [ literal[string] ]= identifier[hostHeader]
identifier[resp] , identifier[body] = identifier[self] . identifier[api] . identifier[method_put] ( identifier[uri] , identifier[body] = identifier[req_body] )
keyword[return] identifier[body] | def add_health_monitor(self, loadbalancer, type, delay=10, timeout=10, attemptsBeforeDeactivation=3, path='/', statusRegex=None, bodyRegex=None, hostHeader=None):
"""
Adds a health monitor to the load balancer. If a monitor already
exists, it is updated with the supplied settings.
"""
uri = '/loadbalancers/%s/healthmonitor' % utils.get_id(loadbalancer)
req_body = {'healthMonitor': {'type': type, 'delay': delay, 'timeout': timeout, 'attemptsBeforeDeactivation': attemptsBeforeDeactivation}}
uptype = type.upper()
if uptype.startswith('HTTP'):
lb = self._get_lb(loadbalancer)
if uptype != lb.protocol:
raise exc.ProtocolMismatch("Cannot set the Health Monitor type to '%s' when the Load Balancer's protocol is '%s'." % (type, lb.protocol)) # depends on [control=['if'], data=[]]
if not all((path, statusRegex, bodyRegex)):
raise exc.MissingHealthMonitorSettings("When creating an HTTP(S) monitor, you must provide the 'path', 'statusRegex' and 'bodyRegex' parameters.") # depends on [control=['if'], data=[]]
body_hm = req_body['healthMonitor']
body_hm['path'] = path
body_hm['statusRegex'] = statusRegex
body_hm['bodyRegex'] = bodyRegex
if hostHeader:
body_hm['hostHeader'] = hostHeader # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
(resp, body) = self.api.method_put(uri, body=req_body)
return body |
def get_stored_variation(self, experiment, user_profile):
""" Determine if the user has a stored variation available for the given experiment and return that.
Args:
experiment: Object representing the experiment for which user is to be bucketed.
user_profile: UserProfile object representing the user's profile.
Returns:
Variation if available. None otherwise.
"""
user_id = user_profile.user_id
variation_id = user_profile.get_variation_for_experiment(experiment.id)
if variation_id:
variation = self.config.get_variation_from_id(experiment.key, variation_id)
if variation:
self.logger.info('Found a stored decision. User "%s" is in variation "%s" of experiment "%s".' % (
user_id,
variation.key,
experiment.key
))
return variation
return None | def function[get_stored_variation, parameter[self, experiment, user_profile]]:
constant[ Determine if the user has a stored variation available for the given experiment and return that.
Args:
experiment: Object representing the experiment for which user is to be bucketed.
user_profile: UserProfile object representing the user's profile.
Returns:
Variation if available. None otherwise.
]
variable[user_id] assign[=] name[user_profile].user_id
variable[variation_id] assign[=] call[name[user_profile].get_variation_for_experiment, parameter[name[experiment].id]]
if name[variation_id] begin[:]
variable[variation] assign[=] call[name[self].config.get_variation_from_id, parameter[name[experiment].key, name[variation_id]]]
if name[variation] begin[:]
call[name[self].logger.info, parameter[binary_operation[constant[Found a stored decision. User "%s" is in variation "%s" of experiment "%s".] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b11ab3d0>, <ast.Attribute object at 0x7da1b11a8a30>, <ast.Attribute object at 0x7da1b11aae90>]]]]]
return[name[variation]]
return[constant[None]] | keyword[def] identifier[get_stored_variation] ( identifier[self] , identifier[experiment] , identifier[user_profile] ):
literal[string]
identifier[user_id] = identifier[user_profile] . identifier[user_id]
identifier[variation_id] = identifier[user_profile] . identifier[get_variation_for_experiment] ( identifier[experiment] . identifier[id] )
keyword[if] identifier[variation_id] :
identifier[variation] = identifier[self] . identifier[config] . identifier[get_variation_from_id] ( identifier[experiment] . identifier[key] , identifier[variation_id] )
keyword[if] identifier[variation] :
identifier[self] . identifier[logger] . identifier[info] ( literal[string] %(
identifier[user_id] ,
identifier[variation] . identifier[key] ,
identifier[experiment] . identifier[key]
))
keyword[return] identifier[variation]
keyword[return] keyword[None] | def get_stored_variation(self, experiment, user_profile):
""" Determine if the user has a stored variation available for the given experiment and return that.
Args:
experiment: Object representing the experiment for which user is to be bucketed.
user_profile: UserProfile object representing the user's profile.
Returns:
Variation if available. None otherwise.
"""
user_id = user_profile.user_id
variation_id = user_profile.get_variation_for_experiment(experiment.id)
if variation_id:
variation = self.config.get_variation_from_id(experiment.key, variation_id)
if variation:
self.logger.info('Found a stored decision. User "%s" is in variation "%s" of experiment "%s".' % (user_id, variation.key, experiment.key))
return variation # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
return None |
def _updatePropensities(self, lastState, lastAction, reward):
""" Update the propensities for all actions. The propensity for last
action chosen will be updated using the feedback value that resulted
from performing the action.
If j is the index of the last action chosen, r_j is the reward received
for performing j, i is the current action being updated, q_i is the
propensity for i, and phi is the recency parameter, then this update
function can be expressed as::
q_i = (1-phi) * q_i + E(i, r_j)
"""
phi = self.recency
for action in range(self.module.numActions):
carryOver = (1 - phi) * self.module.getValue(lastState, action)
experience = self._experience(lastState, action, lastAction,reward)
self.module.updateValue(lastState, action, carryOver + experience) | def function[_updatePropensities, parameter[self, lastState, lastAction, reward]]:
constant[ Update the propensities for all actions. The propensity for last
action chosen will be updated using the feedback value that resulted
from performing the action.
If j is the index of the last action chosen, r_j is the reward received
for performing j, i is the current action being updated, q_i is the
propensity for i, and phi is the recency parameter, then this update
function can be expressed as::
q_i = (1-phi) * q_i + E(i, r_j)
]
variable[phi] assign[=] name[self].recency
for taget[name[action]] in starred[call[name[range], parameter[name[self].module.numActions]]] begin[:]
variable[carryOver] assign[=] binary_operation[binary_operation[constant[1] - name[phi]] * call[name[self].module.getValue, parameter[name[lastState], name[action]]]]
variable[experience] assign[=] call[name[self]._experience, parameter[name[lastState], name[action], name[lastAction], name[reward]]]
call[name[self].module.updateValue, parameter[name[lastState], name[action], binary_operation[name[carryOver] + name[experience]]]] | keyword[def] identifier[_updatePropensities] ( identifier[self] , identifier[lastState] , identifier[lastAction] , identifier[reward] ):
literal[string]
identifier[phi] = identifier[self] . identifier[recency]
keyword[for] identifier[action] keyword[in] identifier[range] ( identifier[self] . identifier[module] . identifier[numActions] ):
identifier[carryOver] =( literal[int] - identifier[phi] )* identifier[self] . identifier[module] . identifier[getValue] ( identifier[lastState] , identifier[action] )
identifier[experience] = identifier[self] . identifier[_experience] ( identifier[lastState] , identifier[action] , identifier[lastAction] , identifier[reward] )
identifier[self] . identifier[module] . identifier[updateValue] ( identifier[lastState] , identifier[action] , identifier[carryOver] + identifier[experience] ) | def _updatePropensities(self, lastState, lastAction, reward):
""" Update the propensities for all actions. The propensity for last
action chosen will be updated using the feedback value that resulted
from performing the action.
If j is the index of the last action chosen, r_j is the reward received
for performing j, i is the current action being updated, q_i is the
propensity for i, and phi is the recency parameter, then this update
function can be expressed as::
q_i = (1-phi) * q_i + E(i, r_j)
"""
phi = self.recency
for action in range(self.module.numActions):
carryOver = (1 - phi) * self.module.getValue(lastState, action)
experience = self._experience(lastState, action, lastAction, reward)
self.module.updateValue(lastState, action, carryOver + experience) # depends on [control=['for'], data=['action']] |
def get_address(self, stream):
"""Text representation of the network address of a connection stream.
Notes
-----
This method is thread-safe
"""
try:
addr = ":".join(str(part) for part in stream.KATCPServer_address)
except AttributeError:
# Something weird happened, but keep trucking
addr = '<error>'
self._logger.warn('Could not determine address of stream',
exc_info=True)
return addr | def function[get_address, parameter[self, stream]]:
constant[Text representation of the network address of a connection stream.
Notes
-----
This method is thread-safe
]
<ast.Try object at 0x7da1b05d8040>
return[name[addr]] | keyword[def] identifier[get_address] ( identifier[self] , identifier[stream] ):
literal[string]
keyword[try] :
identifier[addr] = literal[string] . identifier[join] ( identifier[str] ( identifier[part] ) keyword[for] identifier[part] keyword[in] identifier[stream] . identifier[KATCPServer_address] )
keyword[except] identifier[AttributeError] :
identifier[addr] = literal[string]
identifier[self] . identifier[_logger] . identifier[warn] ( literal[string] ,
identifier[exc_info] = keyword[True] )
keyword[return] identifier[addr] | def get_address(self, stream):
"""Text representation of the network address of a connection stream.
Notes
-----
This method is thread-safe
"""
try:
addr = ':'.join((str(part) for part in stream.KATCPServer_address)) # depends on [control=['try'], data=[]]
except AttributeError:
# Something weird happened, but keep trucking
addr = '<error>'
self._logger.warn('Could not determine address of stream', exc_info=True) # depends on [control=['except'], data=[]]
return addr |
async def open(self):
"""Open receiver connection and authenticate session.
If the receiver is already open, this operation will do nothing.
This method will be called automatically when one starts to iterate
messages in the receiver, so there should be no need to call it directly.
A receiver opened with this method must be explicitly closed.
It is recommended to open a handler within a context manager as
opposed to calling the method directly.
.. note:: This operation is not thread-safe.
"""
if self.running:
return
self.running = True
try:
await self._handler.open_async(connection=self.connection)
self.message_iter = self._handler.receive_messages_iter_async()
while not await self._handler.auth_complete_async():
await asyncio.sleep(0.05)
await self._build_receiver()
while not await self._handler.client_ready_async():
await asyncio.sleep(0.05)
except Exception as e: # pylint: disable=broad-except
try:
await self._handle_exception(e)
except:
self.running = False
raise | <ast.AsyncFunctionDef object at 0x7da1b05be9e0> | keyword[async] keyword[def] identifier[open] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[running] :
keyword[return]
identifier[self] . identifier[running] = keyword[True]
keyword[try] :
keyword[await] identifier[self] . identifier[_handler] . identifier[open_async] ( identifier[connection] = identifier[self] . identifier[connection] )
identifier[self] . identifier[message_iter] = identifier[self] . identifier[_handler] . identifier[receive_messages_iter_async] ()
keyword[while] keyword[not] keyword[await] identifier[self] . identifier[_handler] . identifier[auth_complete_async] ():
keyword[await] identifier[asyncio] . identifier[sleep] ( literal[int] )
keyword[await] identifier[self] . identifier[_build_receiver] ()
keyword[while] keyword[not] keyword[await] identifier[self] . identifier[_handler] . identifier[client_ready_async] ():
keyword[await] identifier[asyncio] . identifier[sleep] ( literal[int] )
keyword[except] identifier[Exception] keyword[as] identifier[e] :
keyword[try] :
keyword[await] identifier[self] . identifier[_handle_exception] ( identifier[e] )
keyword[except] :
identifier[self] . identifier[running] = keyword[False]
keyword[raise] | async def open(self):
"""Open receiver connection and authenticate session.
If the receiver is already open, this operation will do nothing.
This method will be called automatically when one starts to iterate
messages in the receiver, so there should be no need to call it directly.
A receiver opened with this method must be explicitly closed.
It is recommended to open a handler within a context manager as
opposed to calling the method directly.
.. note:: This operation is not thread-safe.
"""
if self.running:
return # depends on [control=['if'], data=[]]
self.running = True
try:
await self._handler.open_async(connection=self.connection)
self.message_iter = self._handler.receive_messages_iter_async()
while not await self._handler.auth_complete_async():
await asyncio.sleep(0.05) # depends on [control=['while'], data=[]]
await self._build_receiver()
while not await self._handler.client_ready_async():
await asyncio.sleep(0.05) # depends on [control=['while'], data=[]] # depends on [control=['try'], data=[]]
except Exception as e: # pylint: disable=broad-except
try:
await self._handle_exception(e) # depends on [control=['try'], data=[]]
except:
self.running = False
raise # depends on [control=['except'], data=[]] # depends on [control=['except'], data=['e']] |
def LazyField(lookup_name, scope):
"""Super non-standard stuff here. Dynamically changing the base
class using the scope and the lazy name when the class is
instantiated. This works as long as the original base class is
not directly inheriting from object (which we're not, since
our original base class is fields.Field).
"""
def __init__(self, stream=None):
base_cls = self._pfp__scope.get_id(self._pfp__lazy_name)
self.__class__.__bases__ = (base_cls,)
base_cls.__init__(self, stream)
new_class = type(lookup_name + "_lazy", (fields.Field,), {
"__init__" : __init__,
"_pfp__scope" : scope,
"_pfp__lazy_name" : lookup_name
})
return new_class | def function[LazyField, parameter[lookup_name, scope]]:
constant[Super non-standard stuff here. Dynamically changing the base
class using the scope and the lazy name when the class is
instantiated. This works as long as the original base class is
not directly inheriting from object (which we're not, since
our original base class is fields.Field).
]
def function[__init__, parameter[self, stream]]:
variable[base_cls] assign[=] call[name[self]._pfp__scope.get_id, parameter[name[self]._pfp__lazy_name]]
name[self].__class__.__bases__ assign[=] tuple[[<ast.Name object at 0x7da2041db670>]]
call[name[base_cls].__init__, parameter[name[self], name[stream]]]
variable[new_class] assign[=] call[name[type], parameter[binary_operation[name[lookup_name] + constant[_lazy]], tuple[[<ast.Attribute object at 0x7da2041db250>]], dictionary[[<ast.Constant object at 0x7da2041d8580>, <ast.Constant object at 0x7da2041da9b0>, <ast.Constant object at 0x7da2041d9690>], [<ast.Name object at 0x7da2041db3a0>, <ast.Name object at 0x7da2041d8e50>, <ast.Name object at 0x7da2041db100>]]]]
return[name[new_class]] | keyword[def] identifier[LazyField] ( identifier[lookup_name] , identifier[scope] ):
literal[string]
keyword[def] identifier[__init__] ( identifier[self] , identifier[stream] = keyword[None] ):
identifier[base_cls] = identifier[self] . identifier[_pfp__scope] . identifier[get_id] ( identifier[self] . identifier[_pfp__lazy_name] )
identifier[self] . identifier[__class__] . identifier[__bases__] =( identifier[base_cls] ,)
identifier[base_cls] . identifier[__init__] ( identifier[self] , identifier[stream] )
identifier[new_class] = identifier[type] ( identifier[lookup_name] + literal[string] ,( identifier[fields] . identifier[Field] ,),{
literal[string] : identifier[__init__] ,
literal[string] : identifier[scope] ,
literal[string] : identifier[lookup_name]
})
keyword[return] identifier[new_class] | def LazyField(lookup_name, scope):
"""Super non-standard stuff here. Dynamically changing the base
class using the scope and the lazy name when the class is
instantiated. This works as long as the original base class is
not directly inheriting from object (which we're not, since
our original base class is fields.Field).
"""
def __init__(self, stream=None):
base_cls = self._pfp__scope.get_id(self._pfp__lazy_name)
self.__class__.__bases__ = (base_cls,)
base_cls.__init__(self, stream)
new_class = type(lookup_name + '_lazy', (fields.Field,), {'__init__': __init__, '_pfp__scope': scope, '_pfp__lazy_name': lookup_name})
return new_class |
def pre_process_datasets(encoded_datasets, input_len, cap_length, start_token, delimiter_token, clf_token):
""" Pre-process datasets containing lists of tuples(story, 1st continuation, 2nd continuation, label)
To Transformer inputs of shape (n_batch, n_alternative, length) comprising for each batch, continuation:
input_ids[batch, alternative, :] = [start_token] + story[:cap_length] + [delimiter_token] + cont1[:cap_length] + [clf_token]
"""
tensor_datasets = []
for dataset in encoded_datasets:
n_batch = len(dataset)
input_ids = np.zeros((n_batch, 2, input_len), dtype=np.int64)
mc_token_ids = np.zeros((n_batch, 2), dtype=np.int64)
lm_labels = np.full((n_batch, 2, input_len), fill_value=-1, dtype=np.int64)
mc_labels = np.zeros((n_batch,), dtype=np.int64)
for i, (story, cont1, cont2, mc_label), in enumerate(dataset):
with_cont1 = [start_token] + story[:cap_length] + [delimiter_token] + cont1[:cap_length] + [clf_token]
with_cont2 = [start_token] + story[:cap_length] + [delimiter_token] + cont2[:cap_length] + [clf_token]
input_ids[i, 0, :len(with_cont1)] = with_cont1
input_ids[i, 1, :len(with_cont2)] = with_cont2
mc_token_ids[i, 0] = len(with_cont1) - 1
mc_token_ids[i, 1] = len(with_cont2) - 1
lm_labels[i, 0, :len(with_cont1)-1] = with_cont1[1:]
lm_labels[i, 1, :len(with_cont2)-1] = with_cont2[1:]
mc_labels[i] = mc_label
all_inputs = (input_ids, mc_token_ids, lm_labels, mc_labels)
tensor_datasets.append(tuple(torch.tensor(t) for t in all_inputs))
return tensor_datasets | def function[pre_process_datasets, parameter[encoded_datasets, input_len, cap_length, start_token, delimiter_token, clf_token]]:
constant[ Pre-process datasets containing lists of tuples(story, 1st continuation, 2nd continuation, label)
To Transformer inputs of shape (n_batch, n_alternative, length) comprising for each batch, continuation:
input_ids[batch, alternative, :] = [start_token] + story[:cap_length] + [delimiter_token] + cont1[:cap_length] + [clf_token]
]
variable[tensor_datasets] assign[=] list[[]]
for taget[name[dataset]] in starred[name[encoded_datasets]] begin[:]
variable[n_batch] assign[=] call[name[len], parameter[name[dataset]]]
variable[input_ids] assign[=] call[name[np].zeros, parameter[tuple[[<ast.Name object at 0x7da20c990370>, <ast.Constant object at 0x7da20c9908e0>, <ast.Name object at 0x7da20c9923e0>]]]]
variable[mc_token_ids] assign[=] call[name[np].zeros, parameter[tuple[[<ast.Name object at 0x7da20c990040>, <ast.Constant object at 0x7da20c993430>]]]]
variable[lm_labels] assign[=] call[name[np].full, parameter[tuple[[<ast.Name object at 0x7da1b2345a20>, <ast.Constant object at 0x7da1b2345270>, <ast.Name object at 0x7da1b23442e0>]]]]
variable[mc_labels] assign[=] call[name[np].zeros, parameter[tuple[[<ast.Name object at 0x7da1b23455d0>]]]]
for taget[tuple[[<ast.Name object at 0x7da1b2345900>, <ast.Tuple object at 0x7da1b23450c0>]]] in starred[call[name[enumerate], parameter[name[dataset]]]] begin[:]
variable[with_cont1] assign[=] binary_operation[binary_operation[binary_operation[binary_operation[list[[<ast.Name object at 0x7da1b23459c0>]] + call[name[story]][<ast.Slice object at 0x7da1b2345fc0>]] + list[[<ast.Name object at 0x7da1b2345480>]]] + call[name[cont1]][<ast.Slice object at 0x7da1b23451e0>]] + list[[<ast.Name object at 0x7da1b23440a0>]]]
variable[with_cont2] assign[=] binary_operation[binary_operation[binary_operation[binary_operation[list[[<ast.Name object at 0x7da1b2344bb0>]] + call[name[story]][<ast.Slice object at 0x7da1b2345060>]] + list[[<ast.Name object at 0x7da1b23441c0>]]] + call[name[cont2]][<ast.Slice object at 0x7da1b23461a0>]] + list[[<ast.Name object at 0x7da1b2346fe0>]]]
call[name[input_ids]][tuple[[<ast.Name object at 0x7da1b2347370>, <ast.Constant object at 0x7da1b2345390>, <ast.Slice object at 0x7da1b2346320>]]] assign[=] name[with_cont1]
call[name[input_ids]][tuple[[<ast.Name object at 0x7da1b2345d80>, <ast.Constant object at 0x7da1b2347790>, <ast.Slice object at 0x7da1b2344b20>]]] assign[=] name[with_cont2]
call[name[mc_token_ids]][tuple[[<ast.Name object at 0x7da1b2347cd0>, <ast.Constant object at 0x7da1b2347280>]]] assign[=] binary_operation[call[name[len], parameter[name[with_cont1]]] - constant[1]]
call[name[mc_token_ids]][tuple[[<ast.Name object at 0x7da1b2345810>, <ast.Constant object at 0x7da1b23475e0>]]] assign[=] binary_operation[call[name[len], parameter[name[with_cont2]]] - constant[1]]
call[name[lm_labels]][tuple[[<ast.Name object at 0x7da1b2347e50>, <ast.Constant object at 0x7da1b23476a0>, <ast.Slice object at 0x7da1b2346800>]]] assign[=] call[name[with_cont1]][<ast.Slice object at 0x7da1b2345d50>]
call[name[lm_labels]][tuple[[<ast.Name object at 0x7da1b23475b0>, <ast.Constant object at 0x7da1b2344ac0>, <ast.Slice object at 0x7da1b2344790>]]] assign[=] call[name[with_cont2]][<ast.Slice object at 0x7da1b2346f50>]
call[name[mc_labels]][name[i]] assign[=] name[mc_label]
variable[all_inputs] assign[=] tuple[[<ast.Name object at 0x7da1b2345b70>, <ast.Name object at 0x7da1b23478b0>, <ast.Name object at 0x7da1b23454e0>, <ast.Name object at 0x7da1b2347b80>]]
call[name[tensor_datasets].append, parameter[call[name[tuple], parameter[<ast.GeneratorExp object at 0x7da1b2345d20>]]]]
return[name[tensor_datasets]] | keyword[def] identifier[pre_process_datasets] ( identifier[encoded_datasets] , identifier[input_len] , identifier[cap_length] , identifier[start_token] , identifier[delimiter_token] , identifier[clf_token] ):
literal[string]
identifier[tensor_datasets] =[]
keyword[for] identifier[dataset] keyword[in] identifier[encoded_datasets] :
identifier[n_batch] = identifier[len] ( identifier[dataset] )
identifier[input_ids] = identifier[np] . identifier[zeros] (( identifier[n_batch] , literal[int] , identifier[input_len] ), identifier[dtype] = identifier[np] . identifier[int64] )
identifier[mc_token_ids] = identifier[np] . identifier[zeros] (( identifier[n_batch] , literal[int] ), identifier[dtype] = identifier[np] . identifier[int64] )
identifier[lm_labels] = identifier[np] . identifier[full] (( identifier[n_batch] , literal[int] , identifier[input_len] ), identifier[fill_value] =- literal[int] , identifier[dtype] = identifier[np] . identifier[int64] )
identifier[mc_labels] = identifier[np] . identifier[zeros] (( identifier[n_batch] ,), identifier[dtype] = identifier[np] . identifier[int64] )
keyword[for] identifier[i] ,( identifier[story] , identifier[cont1] , identifier[cont2] , identifier[mc_label] ), keyword[in] identifier[enumerate] ( identifier[dataset] ):
identifier[with_cont1] =[ identifier[start_token] ]+ identifier[story] [: identifier[cap_length] ]+[ identifier[delimiter_token] ]+ identifier[cont1] [: identifier[cap_length] ]+[ identifier[clf_token] ]
identifier[with_cont2] =[ identifier[start_token] ]+ identifier[story] [: identifier[cap_length] ]+[ identifier[delimiter_token] ]+ identifier[cont2] [: identifier[cap_length] ]+[ identifier[clf_token] ]
identifier[input_ids] [ identifier[i] , literal[int] ,: identifier[len] ( identifier[with_cont1] )]= identifier[with_cont1]
identifier[input_ids] [ identifier[i] , literal[int] ,: identifier[len] ( identifier[with_cont2] )]= identifier[with_cont2]
identifier[mc_token_ids] [ identifier[i] , literal[int] ]= identifier[len] ( identifier[with_cont1] )- literal[int]
identifier[mc_token_ids] [ identifier[i] , literal[int] ]= identifier[len] ( identifier[with_cont2] )- literal[int]
identifier[lm_labels] [ identifier[i] , literal[int] ,: identifier[len] ( identifier[with_cont1] )- literal[int] ]= identifier[with_cont1] [ literal[int] :]
identifier[lm_labels] [ identifier[i] , literal[int] ,: identifier[len] ( identifier[with_cont2] )- literal[int] ]= identifier[with_cont2] [ literal[int] :]
identifier[mc_labels] [ identifier[i] ]= identifier[mc_label]
identifier[all_inputs] =( identifier[input_ids] , identifier[mc_token_ids] , identifier[lm_labels] , identifier[mc_labels] )
identifier[tensor_datasets] . identifier[append] ( identifier[tuple] ( identifier[torch] . identifier[tensor] ( identifier[t] ) keyword[for] identifier[t] keyword[in] identifier[all_inputs] ))
keyword[return] identifier[tensor_datasets] | def pre_process_datasets(encoded_datasets, input_len, cap_length, start_token, delimiter_token, clf_token):
""" Pre-process datasets containing lists of tuples(story, 1st continuation, 2nd continuation, label)
To Transformer inputs of shape (n_batch, n_alternative, length) comprising for each batch, continuation:
input_ids[batch, alternative, :] = [start_token] + story[:cap_length] + [delimiter_token] + cont1[:cap_length] + [clf_token]
"""
tensor_datasets = []
for dataset in encoded_datasets:
n_batch = len(dataset)
input_ids = np.zeros((n_batch, 2, input_len), dtype=np.int64)
mc_token_ids = np.zeros((n_batch, 2), dtype=np.int64)
lm_labels = np.full((n_batch, 2, input_len), fill_value=-1, dtype=np.int64)
mc_labels = np.zeros((n_batch,), dtype=np.int64)
for (i, (story, cont1, cont2, mc_label)) in enumerate(dataset):
with_cont1 = [start_token] + story[:cap_length] + [delimiter_token] + cont1[:cap_length] + [clf_token]
with_cont2 = [start_token] + story[:cap_length] + [delimiter_token] + cont2[:cap_length] + [clf_token]
input_ids[i, 0, :len(with_cont1)] = with_cont1
input_ids[i, 1, :len(with_cont2)] = with_cont2
mc_token_ids[i, 0] = len(with_cont1) - 1
mc_token_ids[i, 1] = len(with_cont2) - 1
lm_labels[i, 0, :len(with_cont1) - 1] = with_cont1[1:]
lm_labels[i, 1, :len(with_cont2) - 1] = with_cont2[1:]
mc_labels[i] = mc_label # depends on [control=['for'], data=[]]
all_inputs = (input_ids, mc_token_ids, lm_labels, mc_labels)
tensor_datasets.append(tuple((torch.tensor(t) for t in all_inputs))) # depends on [control=['for'], data=['dataset']]
return tensor_datasets |
def clients(self):
'''Generates a list of all Clients.'''
clients_response = self.get_request('clients/')
return [Client(self, cjson['client']) for cjson in clients_response] | def function[clients, parameter[self]]:
constant[Generates a list of all Clients.]
variable[clients_response] assign[=] call[name[self].get_request, parameter[constant[clients/]]]
return[<ast.ListComp object at 0x7da1b1fba380>] | keyword[def] identifier[clients] ( identifier[self] ):
literal[string]
identifier[clients_response] = identifier[self] . identifier[get_request] ( literal[string] )
keyword[return] [ identifier[Client] ( identifier[self] , identifier[cjson] [ literal[string] ]) keyword[for] identifier[cjson] keyword[in] identifier[clients_response] ] | def clients(self):
"""Generates a list of all Clients."""
clients_response = self.get_request('clients/')
return [Client(self, cjson['client']) for cjson in clients_response] |
def get_package_data():
"""
Return package data
For example:
{'': ['*.txt', '*.rst'],
'hello': ['*.msg']}
means:
- If any package contains *.txt or *.rst files,
include them
- And include any *.msg files found in
the 'hello' package, too:
"""
baseline_images = [
'tests/baseline_images/%s/*' % x
for x in os.listdir('plotnine/tests/baseline_images')]
csv_data = ['data/*.csv']
package_data = {'plotnine': baseline_images + csv_data}
return package_data | def function[get_package_data, parameter[]]:
constant[
Return package data
For example:
{'': ['*.txt', '*.rst'],
'hello': ['*.msg']}
means:
- If any package contains *.txt or *.rst files,
include them
- And include any *.msg files found in
the 'hello' package, too:
]
variable[baseline_images] assign[=] <ast.ListComp object at 0x7da20c795600>
variable[csv_data] assign[=] list[[<ast.Constant object at 0x7da20c794310>]]
variable[package_data] assign[=] dictionary[[<ast.Constant object at 0x7da20c796350>], [<ast.BinOp object at 0x7da20c795030>]]
return[name[package_data]] | keyword[def] identifier[get_package_data] ():
literal[string]
identifier[baseline_images] =[
literal[string] % identifier[x]
keyword[for] identifier[x] keyword[in] identifier[os] . identifier[listdir] ( literal[string] )]
identifier[csv_data] =[ literal[string] ]
identifier[package_data] ={ literal[string] : identifier[baseline_images] + identifier[csv_data] }
keyword[return] identifier[package_data] | def get_package_data():
"""
Return package data
For example:
{'': ['*.txt', '*.rst'],
'hello': ['*.msg']}
means:
- If any package contains *.txt or *.rst files,
include them
- And include any *.msg files found in
the 'hello' package, too:
"""
baseline_images = ['tests/baseline_images/%s/*' % x for x in os.listdir('plotnine/tests/baseline_images')]
csv_data = ['data/*.csv']
package_data = {'plotnine': baseline_images + csv_data}
return package_data |
def _set_view(self):
"""Assign a view to current graph"""
view_class = ReverseView if self.inverse_y_axis else View
self.view = view_class(
self.width - self.margin_box.x, self.height - self.margin_box.y,
self._box
) | def function[_set_view, parameter[self]]:
constant[Assign a view to current graph]
variable[view_class] assign[=] <ast.IfExp object at 0x7da20c6c4e50>
name[self].view assign[=] call[name[view_class], parameter[binary_operation[name[self].width - name[self].margin_box.x], binary_operation[name[self].height - name[self].margin_box.y], name[self]._box]] | keyword[def] identifier[_set_view] ( identifier[self] ):
literal[string]
identifier[view_class] = identifier[ReverseView] keyword[if] identifier[self] . identifier[inverse_y_axis] keyword[else] identifier[View]
identifier[self] . identifier[view] = identifier[view_class] (
identifier[self] . identifier[width] - identifier[self] . identifier[margin_box] . identifier[x] , identifier[self] . identifier[height] - identifier[self] . identifier[margin_box] . identifier[y] ,
identifier[self] . identifier[_box]
) | def _set_view(self):
"""Assign a view to current graph"""
view_class = ReverseView if self.inverse_y_axis else View
self.view = view_class(self.width - self.margin_box.x, self.height - self.margin_box.y, self._box) |
def _sim_prediction_bayes(self, h, simulations):
""" Simulates a h-step ahead mean prediction
Parameters
----------
h : int
How many steps ahead for the prediction
simulations : int
How many simulations to perform
Returns
----------
Matrix of simulations
"""
sim_vector = np.zeros([simulations,h])
for n in range(0,simulations):
t_z = self.draw_latent_variables(nsims=1).T[0]
lmda, Y, scores = self._model(t_z)
t_z = np.array([self.latent_variables.z_list[k].prior.transform(t_z[k]) for k in range(t_z.shape[0])])
# Create arrays to iteratre over
lmda_exp = lmda.copy()
scores_exp = scores.copy()
Y_exp = Y.copy()
# Loop over h time periods
for t in range(0,h):
new_value = t_z[0]
if self.p != 0:
for j in range(1,self.p+1):
new_value += t_z[j]*lmda_exp[-j]
if self.q != 0:
for k in range(1,self.q+1):
new_value += t_z[k+self.p]*scores_exp[-k]
if self.leverage is True:
new_value += t_z[1+self.p+self.q]*np.sign(-(Y_exp[-1]-t_z[-2]-t_z[-1]*np.exp(lmda_exp[-1]/2.0)))*(scores_exp[-1]+1)
lmda_exp = np.append(lmda_exp,[new_value]) # For indexing consistency
scores_exp = np.append(scores_exp,scores[np.random.randint(scores.shape[0])]) # expectation of score is zero
Y_exp = np.append(Y_exp,Y[np.random.randint(Y.shape[0])]) # bootstrap returns
sim_vector[n] = lmda_exp[-h:]
return np.transpose(sim_vector) | def function[_sim_prediction_bayes, parameter[self, h, simulations]]:
constant[ Simulates a h-step ahead mean prediction
Parameters
----------
h : int
How many steps ahead for the prediction
simulations : int
How many simulations to perform
Returns
----------
Matrix of simulations
]
variable[sim_vector] assign[=] call[name[np].zeros, parameter[list[[<ast.Name object at 0x7da20e9b31c0>, <ast.Name object at 0x7da20e9b3790>]]]]
for taget[name[n]] in starred[call[name[range], parameter[constant[0], name[simulations]]]] begin[:]
variable[t_z] assign[=] call[call[name[self].draw_latent_variables, parameter[]].T][constant[0]]
<ast.Tuple object at 0x7da20e9b1cf0> assign[=] call[name[self]._model, parameter[name[t_z]]]
variable[t_z] assign[=] call[name[np].array, parameter[<ast.ListComp object at 0x7da20e9b12a0>]]
variable[lmda_exp] assign[=] call[name[lmda].copy, parameter[]]
variable[scores_exp] assign[=] call[name[scores].copy, parameter[]]
variable[Y_exp] assign[=] call[name[Y].copy, parameter[]]
for taget[name[t]] in starred[call[name[range], parameter[constant[0], name[h]]]] begin[:]
variable[new_value] assign[=] call[name[t_z]][constant[0]]
if compare[name[self].p not_equal[!=] constant[0]] begin[:]
for taget[name[j]] in starred[call[name[range], parameter[constant[1], binary_operation[name[self].p + constant[1]]]]] begin[:]
<ast.AugAssign object at 0x7da20e9b0b50>
if compare[name[self].q not_equal[!=] constant[0]] begin[:]
for taget[name[k]] in starred[call[name[range], parameter[constant[1], binary_operation[name[self].q + constant[1]]]]] begin[:]
<ast.AugAssign object at 0x7da20e9b0fd0>
if compare[name[self].leverage is constant[True]] begin[:]
<ast.AugAssign object at 0x7da20e9b2380>
variable[lmda_exp] assign[=] call[name[np].append, parameter[name[lmda_exp], list[[<ast.Name object at 0x7da20e962680>]]]]
variable[scores_exp] assign[=] call[name[np].append, parameter[name[scores_exp], call[name[scores]][call[name[np].random.randint, parameter[call[name[scores].shape][constant[0]]]]]]]
variable[Y_exp] assign[=] call[name[np].append, parameter[name[Y_exp], call[name[Y]][call[name[np].random.randint, parameter[call[name[Y].shape][constant[0]]]]]]]
call[name[sim_vector]][name[n]] assign[=] call[name[lmda_exp]][<ast.Slice object at 0x7da2049629b0>]
return[call[name[np].transpose, parameter[name[sim_vector]]]] | keyword[def] identifier[_sim_prediction_bayes] ( identifier[self] , identifier[h] , identifier[simulations] ):
literal[string]
identifier[sim_vector] = identifier[np] . identifier[zeros] ([ identifier[simulations] , identifier[h] ])
keyword[for] identifier[n] keyword[in] identifier[range] ( literal[int] , identifier[simulations] ):
identifier[t_z] = identifier[self] . identifier[draw_latent_variables] ( identifier[nsims] = literal[int] ). identifier[T] [ literal[int] ]
identifier[lmda] , identifier[Y] , identifier[scores] = identifier[self] . identifier[_model] ( identifier[t_z] )
identifier[t_z] = identifier[np] . identifier[array] ([ identifier[self] . identifier[latent_variables] . identifier[z_list] [ identifier[k] ]. identifier[prior] . identifier[transform] ( identifier[t_z] [ identifier[k] ]) keyword[for] identifier[k] keyword[in] identifier[range] ( identifier[t_z] . identifier[shape] [ literal[int] ])])
identifier[lmda_exp] = identifier[lmda] . identifier[copy] ()
identifier[scores_exp] = identifier[scores] . identifier[copy] ()
identifier[Y_exp] = identifier[Y] . identifier[copy] ()
keyword[for] identifier[t] keyword[in] identifier[range] ( literal[int] , identifier[h] ):
identifier[new_value] = identifier[t_z] [ literal[int] ]
keyword[if] identifier[self] . identifier[p] != literal[int] :
keyword[for] identifier[j] keyword[in] identifier[range] ( literal[int] , identifier[self] . identifier[p] + literal[int] ):
identifier[new_value] += identifier[t_z] [ identifier[j] ]* identifier[lmda_exp] [- identifier[j] ]
keyword[if] identifier[self] . identifier[q] != literal[int] :
keyword[for] identifier[k] keyword[in] identifier[range] ( literal[int] , identifier[self] . identifier[q] + literal[int] ):
identifier[new_value] += identifier[t_z] [ identifier[k] + identifier[self] . identifier[p] ]* identifier[scores_exp] [- identifier[k] ]
keyword[if] identifier[self] . identifier[leverage] keyword[is] keyword[True] :
identifier[new_value] += identifier[t_z] [ literal[int] + identifier[self] . identifier[p] + identifier[self] . identifier[q] ]* identifier[np] . identifier[sign] (-( identifier[Y_exp] [- literal[int] ]- identifier[t_z] [- literal[int] ]- identifier[t_z] [- literal[int] ]* identifier[np] . identifier[exp] ( identifier[lmda_exp] [- literal[int] ]/ literal[int] )))*( identifier[scores_exp] [- literal[int] ]+ literal[int] )
identifier[lmda_exp] = identifier[np] . identifier[append] ( identifier[lmda_exp] ,[ identifier[new_value] ])
identifier[scores_exp] = identifier[np] . identifier[append] ( identifier[scores_exp] , identifier[scores] [ identifier[np] . identifier[random] . identifier[randint] ( identifier[scores] . identifier[shape] [ literal[int] ])])
identifier[Y_exp] = identifier[np] . identifier[append] ( identifier[Y_exp] , identifier[Y] [ identifier[np] . identifier[random] . identifier[randint] ( identifier[Y] . identifier[shape] [ literal[int] ])])
identifier[sim_vector] [ identifier[n] ]= identifier[lmda_exp] [- identifier[h] :]
keyword[return] identifier[np] . identifier[transpose] ( identifier[sim_vector] ) | def _sim_prediction_bayes(self, h, simulations):
""" Simulates a h-step ahead mean prediction
Parameters
----------
h : int
How many steps ahead for the prediction
simulations : int
How many simulations to perform
Returns
----------
Matrix of simulations
"""
sim_vector = np.zeros([simulations, h])
for n in range(0, simulations):
t_z = self.draw_latent_variables(nsims=1).T[0]
(lmda, Y, scores) = self._model(t_z)
t_z = np.array([self.latent_variables.z_list[k].prior.transform(t_z[k]) for k in range(t_z.shape[0])]) # Create arrays to iteratre over
lmda_exp = lmda.copy()
scores_exp = scores.copy()
Y_exp = Y.copy() # Loop over h time periods
for t in range(0, h):
new_value = t_z[0]
if self.p != 0:
for j in range(1, self.p + 1):
new_value += t_z[j] * lmda_exp[-j] # depends on [control=['for'], data=['j']] # depends on [control=['if'], data=[]]
if self.q != 0:
for k in range(1, self.q + 1):
new_value += t_z[k + self.p] * scores_exp[-k] # depends on [control=['for'], data=['k']] # depends on [control=['if'], data=[]]
if self.leverage is True:
new_value += t_z[1 + self.p + self.q] * np.sign(-(Y_exp[-1] - t_z[-2] - t_z[-1] * np.exp(lmda_exp[-1] / 2.0))) * (scores_exp[-1] + 1) # depends on [control=['if'], data=[]]
lmda_exp = np.append(lmda_exp, [new_value]) # For indexing consistency
scores_exp = np.append(scores_exp, scores[np.random.randint(scores.shape[0])]) # expectation of score is zero
Y_exp = np.append(Y_exp, Y[np.random.randint(Y.shape[0])]) # bootstrap returns # depends on [control=['for'], data=[]]
sim_vector[n] = lmda_exp[-h:] # depends on [control=['for'], data=['n']]
return np.transpose(sim_vector) |
def _serializeExtraData(self, extraDataDir):
""" [virtual method override] This method is called during serialization
with an external directory path that can be used to bypass pickle for saving
large binary states.
extraDataDir:
Model's extra data directory path
"""
makeDirectoryFromAbsolutePath(extraDataDir)
#--------------------------------------------------
# Save the network
outputDir = self.__getNetworkStateDirectory(extraDataDir=extraDataDir)
self.__logger.debug("Serializing network...")
self._netInfo.net.save(outputDir)
self.__logger.debug("Finished serializing network")
return | def function[_serializeExtraData, parameter[self, extraDataDir]]:
constant[ [virtual method override] This method is called during serialization
with an external directory path that can be used to bypass pickle for saving
large binary states.
extraDataDir:
Model's extra data directory path
]
call[name[makeDirectoryFromAbsolutePath], parameter[name[extraDataDir]]]
variable[outputDir] assign[=] call[name[self].__getNetworkStateDirectory, parameter[]]
call[name[self].__logger.debug, parameter[constant[Serializing network...]]]
call[name[self]._netInfo.net.save, parameter[name[outputDir]]]
call[name[self].__logger.debug, parameter[constant[Finished serializing network]]]
return[None] | keyword[def] identifier[_serializeExtraData] ( identifier[self] , identifier[extraDataDir] ):
literal[string]
identifier[makeDirectoryFromAbsolutePath] ( identifier[extraDataDir] )
identifier[outputDir] = identifier[self] . identifier[__getNetworkStateDirectory] ( identifier[extraDataDir] = identifier[extraDataDir] )
identifier[self] . identifier[__logger] . identifier[debug] ( literal[string] )
identifier[self] . identifier[_netInfo] . identifier[net] . identifier[save] ( identifier[outputDir] )
identifier[self] . identifier[__logger] . identifier[debug] ( literal[string] )
keyword[return] | def _serializeExtraData(self, extraDataDir):
""" [virtual method override] This method is called during serialization
with an external directory path that can be used to bypass pickle for saving
large binary states.
extraDataDir:
Model's extra data directory path
"""
makeDirectoryFromAbsolutePath(extraDataDir)
#--------------------------------------------------
# Save the network
outputDir = self.__getNetworkStateDirectory(extraDataDir=extraDataDir)
self.__logger.debug('Serializing network...')
self._netInfo.net.save(outputDir)
self.__logger.debug('Finished serializing network')
return |
def evaluate(self, name, variables=None):
"""
Evaluate a named rule.
:param name: The name of the rule to evaluate.
:param variables: An optional dictionary of variables to make
available during evaluation of the rule.
:returns: An instance of
``policies.authorization.Authorization`` with the
result of the rule evaluation. This will include
any authorization attributes.
"""
# Get the rule and predeclaration
rule = self._rules.get(name)
default = self._defaults.get(name)
# Short-circuit if we don't have either
if rule is None and default is None:
return authorization.Authorization(False)
# Marry the attribute defaults
attrs = {}
if default:
attrs.update(default.attrs)
if rule:
attrs.update(rule.attrs)
# Select the rule we'll actually use
if rule is None:
rule = default
# Construct the context
ctxt = self.context_class(self, attrs, variables or {})
# Execute the rule
try:
with ctxt.push_rule(name):
rule.instructions(ctxt)
except Exception as exc:
# Fail closed
return authorization.Authorization(False, attrs)
# Return the authorization result
return ctxt.authz | def function[evaluate, parameter[self, name, variables]]:
constant[
Evaluate a named rule.
:param name: The name of the rule to evaluate.
:param variables: An optional dictionary of variables to make
available during evaluation of the rule.
:returns: An instance of
``policies.authorization.Authorization`` with the
result of the rule evaluation. This will include
any authorization attributes.
]
variable[rule] assign[=] call[name[self]._rules.get, parameter[name[name]]]
variable[default] assign[=] call[name[self]._defaults.get, parameter[name[name]]]
if <ast.BoolOp object at 0x7da1b16a8400> begin[:]
return[call[name[authorization].Authorization, parameter[constant[False]]]]
variable[attrs] assign[=] dictionary[[], []]
if name[default] begin[:]
call[name[attrs].update, parameter[name[default].attrs]]
if name[rule] begin[:]
call[name[attrs].update, parameter[name[rule].attrs]]
if compare[name[rule] is constant[None]] begin[:]
variable[rule] assign[=] name[default]
variable[ctxt] assign[=] call[name[self].context_class, parameter[name[self], name[attrs], <ast.BoolOp object at 0x7da20e957be0>]]
<ast.Try object at 0x7da20e955b70>
return[name[ctxt].authz] | keyword[def] identifier[evaluate] ( identifier[self] , identifier[name] , identifier[variables] = keyword[None] ):
literal[string]
identifier[rule] = identifier[self] . identifier[_rules] . identifier[get] ( identifier[name] )
identifier[default] = identifier[self] . identifier[_defaults] . identifier[get] ( identifier[name] )
keyword[if] identifier[rule] keyword[is] keyword[None] keyword[and] identifier[default] keyword[is] keyword[None] :
keyword[return] identifier[authorization] . identifier[Authorization] ( keyword[False] )
identifier[attrs] ={}
keyword[if] identifier[default] :
identifier[attrs] . identifier[update] ( identifier[default] . identifier[attrs] )
keyword[if] identifier[rule] :
identifier[attrs] . identifier[update] ( identifier[rule] . identifier[attrs] )
keyword[if] identifier[rule] keyword[is] keyword[None] :
identifier[rule] = identifier[default]
identifier[ctxt] = identifier[self] . identifier[context_class] ( identifier[self] , identifier[attrs] , identifier[variables] keyword[or] {})
keyword[try] :
keyword[with] identifier[ctxt] . identifier[push_rule] ( identifier[name] ):
identifier[rule] . identifier[instructions] ( identifier[ctxt] )
keyword[except] identifier[Exception] keyword[as] identifier[exc] :
keyword[return] identifier[authorization] . identifier[Authorization] ( keyword[False] , identifier[attrs] )
keyword[return] identifier[ctxt] . identifier[authz] | def evaluate(self, name, variables=None):
"""
Evaluate a named rule.
:param name: The name of the rule to evaluate.
:param variables: An optional dictionary of variables to make
available during evaluation of the rule.
:returns: An instance of
``policies.authorization.Authorization`` with the
result of the rule evaluation. This will include
any authorization attributes.
"""
# Get the rule and predeclaration
rule = self._rules.get(name)
default = self._defaults.get(name)
# Short-circuit if we don't have either
if rule is None and default is None:
return authorization.Authorization(False) # depends on [control=['if'], data=[]]
# Marry the attribute defaults
attrs = {}
if default:
attrs.update(default.attrs) # depends on [control=['if'], data=[]]
if rule:
attrs.update(rule.attrs) # depends on [control=['if'], data=[]]
# Select the rule we'll actually use
if rule is None:
rule = default # depends on [control=['if'], data=['rule']]
# Construct the context
ctxt = self.context_class(self, attrs, variables or {})
# Execute the rule
try:
with ctxt.push_rule(name):
rule.instructions(ctxt) # depends on [control=['with'], data=[]] # depends on [control=['try'], data=[]]
except Exception as exc:
# Fail closed
return authorization.Authorization(False, attrs) # depends on [control=['except'], data=[]]
# Return the authorization result
return ctxt.authz |
def _is_in_directory(table, user_id, db_dirname):
"""
Return a WHERE clause that matches entries in a directory.
Parameterized on table because this clause is re-used between files and
directories.
"""
return and_(
table.c.parent_name == db_dirname,
table.c.user_id == user_id,
) | def function[_is_in_directory, parameter[table, user_id, db_dirname]]:
constant[
Return a WHERE clause that matches entries in a directory.
Parameterized on table because this clause is re-used between files and
directories.
]
return[call[name[and_], parameter[compare[name[table].c.parent_name equal[==] name[db_dirname]], compare[name[table].c.user_id equal[==] name[user_id]]]]] | keyword[def] identifier[_is_in_directory] ( identifier[table] , identifier[user_id] , identifier[db_dirname] ):
literal[string]
keyword[return] identifier[and_] (
identifier[table] . identifier[c] . identifier[parent_name] == identifier[db_dirname] ,
identifier[table] . identifier[c] . identifier[user_id] == identifier[user_id] ,
) | def _is_in_directory(table, user_id, db_dirname):
"""
Return a WHERE clause that matches entries in a directory.
Parameterized on table because this clause is re-used between files and
directories.
"""
return and_(table.c.parent_name == db_dirname, table.c.user_id == user_id) |
def parser(self):
"""A :class:`configparser.RawConfigParser` object with :attr:`available_files` loaded."""
parser = configparser.RawConfigParser()
for filename in self.available_files:
friendly_name = format_path(filename)
logger.debug("Loading configuration file: %s", friendly_name)
loaded_files = parser.read(filename)
if len(loaded_files) == 0:
self.report_issue("Failed to load configuration file! (%s)", friendly_name)
logger.debug("Loaded %s from %s.",
pluralize(len(parser.sections()), "section"),
pluralize(len(self.available_files), "configuration file"))
return parser | def function[parser, parameter[self]]:
constant[A :class:`configparser.RawConfigParser` object with :attr:`available_files` loaded.]
variable[parser] assign[=] call[name[configparser].RawConfigParser, parameter[]]
for taget[name[filename]] in starred[name[self].available_files] begin[:]
variable[friendly_name] assign[=] call[name[format_path], parameter[name[filename]]]
call[name[logger].debug, parameter[constant[Loading configuration file: %s], name[friendly_name]]]
variable[loaded_files] assign[=] call[name[parser].read, parameter[name[filename]]]
if compare[call[name[len], parameter[name[loaded_files]]] equal[==] constant[0]] begin[:]
call[name[self].report_issue, parameter[constant[Failed to load configuration file! (%s)], name[friendly_name]]]
call[name[logger].debug, parameter[constant[Loaded %s from %s.], call[name[pluralize], parameter[call[name[len], parameter[call[name[parser].sections, parameter[]]]], constant[section]]], call[name[pluralize], parameter[call[name[len], parameter[name[self].available_files]], constant[configuration file]]]]]
return[name[parser]] | keyword[def] identifier[parser] ( identifier[self] ):
literal[string]
identifier[parser] = identifier[configparser] . identifier[RawConfigParser] ()
keyword[for] identifier[filename] keyword[in] identifier[self] . identifier[available_files] :
identifier[friendly_name] = identifier[format_path] ( identifier[filename] )
identifier[logger] . identifier[debug] ( literal[string] , identifier[friendly_name] )
identifier[loaded_files] = identifier[parser] . identifier[read] ( identifier[filename] )
keyword[if] identifier[len] ( identifier[loaded_files] )== literal[int] :
identifier[self] . identifier[report_issue] ( literal[string] , identifier[friendly_name] )
identifier[logger] . identifier[debug] ( literal[string] ,
identifier[pluralize] ( identifier[len] ( identifier[parser] . identifier[sections] ()), literal[string] ),
identifier[pluralize] ( identifier[len] ( identifier[self] . identifier[available_files] ), literal[string] ))
keyword[return] identifier[parser] | def parser(self):
"""A :class:`configparser.RawConfigParser` object with :attr:`available_files` loaded."""
parser = configparser.RawConfigParser()
for filename in self.available_files:
friendly_name = format_path(filename)
logger.debug('Loading configuration file: %s', friendly_name)
loaded_files = parser.read(filename)
if len(loaded_files) == 0:
self.report_issue('Failed to load configuration file! (%s)', friendly_name) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['filename']]
logger.debug('Loaded %s from %s.', pluralize(len(parser.sections()), 'section'), pluralize(len(self.available_files), 'configuration file'))
return parser |
def delete(self, path):
"""Send a delete request to the given path of the CRUD API. This deletes the object. Or at least tries to."""
return self.handleresult(self.r.delete(urljoin(self.url + CRUD_PATH,
path))) | def function[delete, parameter[self, path]]:
constant[Send a delete request to the given path of the CRUD API. This deletes the object. Or at least tries to.]
return[call[name[self].handleresult, parameter[call[name[self].r.delete, parameter[call[name[urljoin], parameter[binary_operation[name[self].url + name[CRUD_PATH]], name[path]]]]]]]] | keyword[def] identifier[delete] ( identifier[self] , identifier[path] ):
literal[string]
keyword[return] identifier[self] . identifier[handleresult] ( identifier[self] . identifier[r] . identifier[delete] ( identifier[urljoin] ( identifier[self] . identifier[url] + identifier[CRUD_PATH] ,
identifier[path] ))) | def delete(self, path):
"""Send a delete request to the given path of the CRUD API. This deletes the object. Or at least tries to."""
return self.handleresult(self.r.delete(urljoin(self.url + CRUD_PATH, path))) |
def turn_on_syncing(for_post_save=True, for_post_delete=True, for_m2m_changed=True, for_post_bulk_operation=False):
"""
Enables all of the signals for syncing entities. Everything is True by default, except for the post_bulk_operation
signal. The reason for this is because when any bulk operation occurs on any mirrored entity model, it will
result in every single entity being synced again. This is not a desired behavior by the majority of users, and
should only be turned on explicitly.
"""
if for_post_save:
post_save.connect(save_entity_signal_handler, dispatch_uid='save_entity_signal_handler')
if for_post_delete:
post_delete.connect(delete_entity_signal_handler, dispatch_uid='delete_entity_signal_handler')
if for_m2m_changed:
m2m_changed.connect(m2m_changed_entity_signal_handler, dispatch_uid='m2m_changed_entity_signal_handler')
if for_post_bulk_operation:
post_bulk_operation.connect(bulk_operation_signal_handler, dispatch_uid='bulk_operation_signal_handler') | def function[turn_on_syncing, parameter[for_post_save, for_post_delete, for_m2m_changed, for_post_bulk_operation]]:
constant[
Enables all of the signals for syncing entities. Everything is True by default, except for the post_bulk_operation
signal. The reason for this is because when any bulk operation occurs on any mirrored entity model, it will
result in every single entity being synced again. This is not a desired behavior by the majority of users, and
should only be turned on explicitly.
]
if name[for_post_save] begin[:]
call[name[post_save].connect, parameter[name[save_entity_signal_handler]]]
if name[for_post_delete] begin[:]
call[name[post_delete].connect, parameter[name[delete_entity_signal_handler]]]
if name[for_m2m_changed] begin[:]
call[name[m2m_changed].connect, parameter[name[m2m_changed_entity_signal_handler]]]
if name[for_post_bulk_operation] begin[:]
call[name[post_bulk_operation].connect, parameter[name[bulk_operation_signal_handler]]] | keyword[def] identifier[turn_on_syncing] ( identifier[for_post_save] = keyword[True] , identifier[for_post_delete] = keyword[True] , identifier[for_m2m_changed] = keyword[True] , identifier[for_post_bulk_operation] = keyword[False] ):
literal[string]
keyword[if] identifier[for_post_save] :
identifier[post_save] . identifier[connect] ( identifier[save_entity_signal_handler] , identifier[dispatch_uid] = literal[string] )
keyword[if] identifier[for_post_delete] :
identifier[post_delete] . identifier[connect] ( identifier[delete_entity_signal_handler] , identifier[dispatch_uid] = literal[string] )
keyword[if] identifier[for_m2m_changed] :
identifier[m2m_changed] . identifier[connect] ( identifier[m2m_changed_entity_signal_handler] , identifier[dispatch_uid] = literal[string] )
keyword[if] identifier[for_post_bulk_operation] :
identifier[post_bulk_operation] . identifier[connect] ( identifier[bulk_operation_signal_handler] , identifier[dispatch_uid] = literal[string] ) | def turn_on_syncing(for_post_save=True, for_post_delete=True, for_m2m_changed=True, for_post_bulk_operation=False):
"""
Enables all of the signals for syncing entities. Everything is True by default, except for the post_bulk_operation
signal. The reason for this is because when any bulk operation occurs on any mirrored entity model, it will
result in every single entity being synced again. This is not a desired behavior by the majority of users, and
should only be turned on explicitly.
"""
if for_post_save:
post_save.connect(save_entity_signal_handler, dispatch_uid='save_entity_signal_handler') # depends on [control=['if'], data=[]]
if for_post_delete:
post_delete.connect(delete_entity_signal_handler, dispatch_uid='delete_entity_signal_handler') # depends on [control=['if'], data=[]]
if for_m2m_changed:
m2m_changed.connect(m2m_changed_entity_signal_handler, dispatch_uid='m2m_changed_entity_signal_handler') # depends on [control=['if'], data=[]]
if for_post_bulk_operation:
post_bulk_operation.connect(bulk_operation_signal_handler, dispatch_uid='bulk_operation_signal_handler') # depends on [control=['if'], data=[]] |
def scale_to_control(x, axis_scale=350., min_v=-1.0, max_v=1.0):
"""Normalize raw HID readings to target range."""
x = x / axis_scale
x = min(max(x, min_v), max_v)
return x | def function[scale_to_control, parameter[x, axis_scale, min_v, max_v]]:
constant[Normalize raw HID readings to target range.]
variable[x] assign[=] binary_operation[name[x] / name[axis_scale]]
variable[x] assign[=] call[name[min], parameter[call[name[max], parameter[name[x], name[min_v]]], name[max_v]]]
return[name[x]] | keyword[def] identifier[scale_to_control] ( identifier[x] , identifier[axis_scale] = literal[int] , identifier[min_v] =- literal[int] , identifier[max_v] = literal[int] ):
literal[string]
identifier[x] = identifier[x] / identifier[axis_scale]
identifier[x] = identifier[min] ( identifier[max] ( identifier[x] , identifier[min_v] ), identifier[max_v] )
keyword[return] identifier[x] | def scale_to_control(x, axis_scale=350.0, min_v=-1.0, max_v=1.0):
"""Normalize raw HID readings to target range."""
x = x / axis_scale
x = min(max(x, min_v), max_v)
return x |
def enable_chimera_inline():
"""
Enable IPython magic commands to run some Chimera actions
Currently supported:
- %chimera_export_3D [<model>]:
Depicts the Chimera 3D canvas in a WebGL iframe. Requires
a headless Chimera build and a Notebook instance. SLOW.
- %chimera_run <command>:
Runs Chimera commands meant to be input in the GUI command line
"""
from IPython.display import IFrame
from IPython.core.magic import register_line_magic
import chimera
import Midas
@register_line_magic
def chimera_export_3D(line):
if chimera.viewer.__class__.__name__ == 'NoGuiViewer':
print('This magic requires a headless Chimera build. '
'Check http://www.cgl.ucsf.edu/chimera/download.html#unsupported.',
file=sys.stderr)
return
models = eval(line) if line else []
def html(*models):
if models:
for m in chimera.openModels.list():
m.display = False
chimera.selection.clearCurrent()
for model in models:
model.display = True
chimera.selection.addCurrent(model)
chimera.runCommand('focus sel')
chimera.viewer.windowSize = 800, 600
path = 'chimera_scene_export.html'
Midas.export(filename=path, format='WebGL')
return IFrame(path, *[x + 20 for x in chimera.viewer.windowSize])
return html(*models)
del chimera_export_3D
@register_line_magic
def chimera_run(line):
if not line:
print("Usage: %chimera_run <chimera command>", file=sys.stderr)
return
chimera.runCommand(line)
del chimera_run | def function[enable_chimera_inline, parameter[]]:
constant[
Enable IPython magic commands to run some Chimera actions
Currently supported:
- %chimera_export_3D [<model>]:
Depicts the Chimera 3D canvas in a WebGL iframe. Requires
a headless Chimera build and a Notebook instance. SLOW.
- %chimera_run <command>:
Runs Chimera commands meant to be input in the GUI command line
]
from relative_module[IPython.display] import module[IFrame]
from relative_module[IPython.core.magic] import module[register_line_magic]
import module[chimera]
import module[Midas]
def function[chimera_export_3D, parameter[line]]:
if compare[name[chimera].viewer.__class__.__name__ equal[==] constant[NoGuiViewer]] begin[:]
call[name[print], parameter[constant[This magic requires a headless Chimera build. Check http://www.cgl.ucsf.edu/chimera/download.html#unsupported.]]]
return[None]
variable[models] assign[=] <ast.IfExp object at 0x7da18eb566e0>
def function[html, parameter[]]:
if name[models] begin[:]
for taget[name[m]] in starred[call[name[chimera].openModels.list, parameter[]]] begin[:]
name[m].display assign[=] constant[False]
call[name[chimera].selection.clearCurrent, parameter[]]
for taget[name[model]] in starred[name[models]] begin[:]
name[model].display assign[=] constant[True]
call[name[chimera].selection.addCurrent, parameter[name[model]]]
call[name[chimera].runCommand, parameter[constant[focus sel]]]
name[chimera].viewer.windowSize assign[=] tuple[[<ast.Constant object at 0x7da1b0c50eb0>, <ast.Constant object at 0x7da1b0c53b20>]]
variable[path] assign[=] constant[chimera_scene_export.html]
call[name[Midas].export, parameter[]]
return[call[name[IFrame], parameter[name[path], <ast.Starred object at 0x7da1b0c52cb0>]]]
return[call[name[html], parameter[<ast.Starred object at 0x7da1b0c53640>]]]
<ast.Delete object at 0x7da1b0c511e0>
def function[chimera_run, parameter[line]]:
if <ast.UnaryOp object at 0x7da1b0c50670> begin[:]
call[name[print], parameter[constant[Usage: %chimera_run <chimera command>]]]
return[None]
call[name[chimera].runCommand, parameter[name[line]]]
<ast.Delete object at 0x7da1b0c53ca0> | keyword[def] identifier[enable_chimera_inline] ():
literal[string]
keyword[from] identifier[IPython] . identifier[display] keyword[import] identifier[IFrame]
keyword[from] identifier[IPython] . identifier[core] . identifier[magic] keyword[import] identifier[register_line_magic]
keyword[import] identifier[chimera]
keyword[import] identifier[Midas]
@ identifier[register_line_magic]
keyword[def] identifier[chimera_export_3D] ( identifier[line] ):
keyword[if] identifier[chimera] . identifier[viewer] . identifier[__class__] . identifier[__name__] == literal[string] :
identifier[print] ( literal[string]
literal[string] ,
identifier[file] = identifier[sys] . identifier[stderr] )
keyword[return]
identifier[models] = identifier[eval] ( identifier[line] ) keyword[if] identifier[line] keyword[else] []
keyword[def] identifier[html] (* identifier[models] ):
keyword[if] identifier[models] :
keyword[for] identifier[m] keyword[in] identifier[chimera] . identifier[openModels] . identifier[list] ():
identifier[m] . identifier[display] = keyword[False]
identifier[chimera] . identifier[selection] . identifier[clearCurrent] ()
keyword[for] identifier[model] keyword[in] identifier[models] :
identifier[model] . identifier[display] = keyword[True]
identifier[chimera] . identifier[selection] . identifier[addCurrent] ( identifier[model] )
identifier[chimera] . identifier[runCommand] ( literal[string] )
identifier[chimera] . identifier[viewer] . identifier[windowSize] = literal[int] , literal[int]
identifier[path] = literal[string]
identifier[Midas] . identifier[export] ( identifier[filename] = identifier[path] , identifier[format] = literal[string] )
keyword[return] identifier[IFrame] ( identifier[path] ,*[ identifier[x] + literal[int] keyword[for] identifier[x] keyword[in] identifier[chimera] . identifier[viewer] . identifier[windowSize] ])
keyword[return] identifier[html] (* identifier[models] )
keyword[del] identifier[chimera_export_3D]
@ identifier[register_line_magic]
keyword[def] identifier[chimera_run] ( identifier[line] ):
keyword[if] keyword[not] identifier[line] :
identifier[print] ( literal[string] , identifier[file] = identifier[sys] . identifier[stderr] )
keyword[return]
identifier[chimera] . identifier[runCommand] ( identifier[line] )
keyword[del] identifier[chimera_run] | def enable_chimera_inline():
"""
Enable IPython magic commands to run some Chimera actions
Currently supported:
- %chimera_export_3D [<model>]:
Depicts the Chimera 3D canvas in a WebGL iframe. Requires
a headless Chimera build and a Notebook instance. SLOW.
- %chimera_run <command>:
Runs Chimera commands meant to be input in the GUI command line
"""
from IPython.display import IFrame
from IPython.core.magic import register_line_magic
import chimera
import Midas
@register_line_magic
def chimera_export_3D(line):
if chimera.viewer.__class__.__name__ == 'NoGuiViewer':
print('This magic requires a headless Chimera build. Check http://www.cgl.ucsf.edu/chimera/download.html#unsupported.', file=sys.stderr)
return # depends on [control=['if'], data=[]]
models = eval(line) if line else []
def html(*models):
if models:
for m in chimera.openModels.list():
m.display = False # depends on [control=['for'], data=['m']]
chimera.selection.clearCurrent()
for model in models:
model.display = True
chimera.selection.addCurrent(model)
chimera.runCommand('focus sel') # depends on [control=['for'], data=['model']] # depends on [control=['if'], data=[]]
chimera.viewer.windowSize = (800, 600)
path = 'chimera_scene_export.html'
Midas.export(filename=path, format='WebGL')
return IFrame(path, *[x + 20 for x in chimera.viewer.windowSize])
return html(*models)
del chimera_export_3D
@register_line_magic
def chimera_run(line):
if not line:
print('Usage: %chimera_run <chimera command>', file=sys.stderr)
return # depends on [control=['if'], data=[]]
chimera.runCommand(line)
del chimera_run |
def get(config, messages, freq, pidDir=None, reactor=None):
"""Return a service which monitors processes based on directory contents
Construct and return a service that, when started, will run processes
based on the contents of the 'config' directory, restarting them
if file contents change and stopping them if the file is removed.
It also listens for restart and restart-all messages on the 'messages'
directory.
:param config: string, location of configuration directory
:param messages: string, location of messages directory
:param freq: number, frequency to check for new messages and configuration
updates
:param pidDir: {twisted.python.filepath.FilePath} or None,
location to keep pid files
:param reactor: something implementing the interfaces
{twisted.internet.interfaces.IReactorTime} and
{twisted.internet.interfaces.IReactorProcess} and
:returns: service, {twisted.application.interfaces.IService}
"""
ret = taservice.MultiService()
args = ()
if reactor is not None:
args = reactor,
procmon = procmonlib.ProcessMonitor(*args)
if pidDir is not None:
protocols = TransportDirectoryDict(pidDir)
procmon.protocols = protocols
procmon.setName('procmon')
receiver = process_events.Receiver(procmon)
confcheck = directory_monitor.checker(config, receiver)
confserv = internet.TimerService(freq, confcheck)
confserv.setServiceParent(ret)
messagecheck = directory_monitor.messages(messages, receiver)
messageserv = internet.TimerService(freq, messagecheck)
messageserv.setServiceParent(ret)
procmon.setServiceParent(ret)
return ret | def function[get, parameter[config, messages, freq, pidDir, reactor]]:
constant[Return a service which monitors processes based on directory contents
Construct and return a service that, when started, will run processes
based on the contents of the 'config' directory, restarting them
if file contents change and stopping them if the file is removed.
It also listens for restart and restart-all messages on the 'messages'
directory.
:param config: string, location of configuration directory
:param messages: string, location of messages directory
:param freq: number, frequency to check for new messages and configuration
updates
:param pidDir: {twisted.python.filepath.FilePath} or None,
location to keep pid files
:param reactor: something implementing the interfaces
{twisted.internet.interfaces.IReactorTime} and
{twisted.internet.interfaces.IReactorProcess} and
:returns: service, {twisted.application.interfaces.IService}
]
variable[ret] assign[=] call[name[taservice].MultiService, parameter[]]
variable[args] assign[=] tuple[[]]
if compare[name[reactor] is_not constant[None]] begin[:]
variable[args] assign[=] tuple[[<ast.Name object at 0x7da20c795030>]]
variable[procmon] assign[=] call[name[procmonlib].ProcessMonitor, parameter[<ast.Starred object at 0x7da20c796470>]]
if compare[name[pidDir] is_not constant[None]] begin[:]
variable[protocols] assign[=] call[name[TransportDirectoryDict], parameter[name[pidDir]]]
name[procmon].protocols assign[=] name[protocols]
call[name[procmon].setName, parameter[constant[procmon]]]
variable[receiver] assign[=] call[name[process_events].Receiver, parameter[name[procmon]]]
variable[confcheck] assign[=] call[name[directory_monitor].checker, parameter[name[config], name[receiver]]]
variable[confserv] assign[=] call[name[internet].TimerService, parameter[name[freq], name[confcheck]]]
call[name[confserv].setServiceParent, parameter[name[ret]]]
variable[messagecheck] assign[=] call[name[directory_monitor].messages, parameter[name[messages], name[receiver]]]
variable[messageserv] assign[=] call[name[internet].TimerService, parameter[name[freq], name[messagecheck]]]
call[name[messageserv].setServiceParent, parameter[name[ret]]]
call[name[procmon].setServiceParent, parameter[name[ret]]]
return[name[ret]] | keyword[def] identifier[get] ( identifier[config] , identifier[messages] , identifier[freq] , identifier[pidDir] = keyword[None] , identifier[reactor] = keyword[None] ):
literal[string]
identifier[ret] = identifier[taservice] . identifier[MultiService] ()
identifier[args] =()
keyword[if] identifier[reactor] keyword[is] keyword[not] keyword[None] :
identifier[args] = identifier[reactor] ,
identifier[procmon] = identifier[procmonlib] . identifier[ProcessMonitor] (* identifier[args] )
keyword[if] identifier[pidDir] keyword[is] keyword[not] keyword[None] :
identifier[protocols] = identifier[TransportDirectoryDict] ( identifier[pidDir] )
identifier[procmon] . identifier[protocols] = identifier[protocols]
identifier[procmon] . identifier[setName] ( literal[string] )
identifier[receiver] = identifier[process_events] . identifier[Receiver] ( identifier[procmon] )
identifier[confcheck] = identifier[directory_monitor] . identifier[checker] ( identifier[config] , identifier[receiver] )
identifier[confserv] = identifier[internet] . identifier[TimerService] ( identifier[freq] , identifier[confcheck] )
identifier[confserv] . identifier[setServiceParent] ( identifier[ret] )
identifier[messagecheck] = identifier[directory_monitor] . identifier[messages] ( identifier[messages] , identifier[receiver] )
identifier[messageserv] = identifier[internet] . identifier[TimerService] ( identifier[freq] , identifier[messagecheck] )
identifier[messageserv] . identifier[setServiceParent] ( identifier[ret] )
identifier[procmon] . identifier[setServiceParent] ( identifier[ret] )
keyword[return] identifier[ret] | def get(config, messages, freq, pidDir=None, reactor=None):
"""Return a service which monitors processes based on directory contents
Construct and return a service that, when started, will run processes
based on the contents of the 'config' directory, restarting them
if file contents change and stopping them if the file is removed.
It also listens for restart and restart-all messages on the 'messages'
directory.
:param config: string, location of configuration directory
:param messages: string, location of messages directory
:param freq: number, frequency to check for new messages and configuration
updates
:param pidDir: {twisted.python.filepath.FilePath} or None,
location to keep pid files
:param reactor: something implementing the interfaces
{twisted.internet.interfaces.IReactorTime} and
{twisted.internet.interfaces.IReactorProcess} and
:returns: service, {twisted.application.interfaces.IService}
"""
ret = taservice.MultiService()
args = ()
if reactor is not None:
args = (reactor,) # depends on [control=['if'], data=['reactor']]
procmon = procmonlib.ProcessMonitor(*args)
if pidDir is not None:
protocols = TransportDirectoryDict(pidDir)
procmon.protocols = protocols # depends on [control=['if'], data=['pidDir']]
procmon.setName('procmon')
receiver = process_events.Receiver(procmon)
confcheck = directory_monitor.checker(config, receiver)
confserv = internet.TimerService(freq, confcheck)
confserv.setServiceParent(ret)
messagecheck = directory_monitor.messages(messages, receiver)
messageserv = internet.TimerService(freq, messagecheck)
messageserv.setServiceParent(ret)
procmon.setServiceParent(ret)
return ret |
def _add_overlay(self, center):
"""
Add a grid from a differently-centered stereonet. This is useful for
making "polar stereonets" that still use the same coordinate system as
a standard stereonet. (i.e. a plane/line/whatever will have the same
representation on both, but the grid is displayed differently.)
To display a polar grid on a stereonet, use ``kind="polar"``.
It is also often useful to display a grid relative to an arbitrary
measurement (e.g. a lineation axis). In that case, use the
``lon_center`` and ``lat_center`` arguments. Note that these are in
radians in "stereonet coordinates". Therefore, you'll often want to
use one of the functions in ``stereonet_math`` to convert a
line/plane/rake into the longitude and latitude you'd input here. For
example: ``add_overlay(center=stereonet_math.line(plunge, bearing))``.
If no parameters are specified, this is equivalent to turning on the
standard grid.
Parameters
----------
center: 2-item tuple of numbers
A tuple of (longitude, latitude) in radians that the overlay is
centered on.
"""
plunge, bearing = stereonet_math.geographic2plunge_bearing(*center)
lon0, lat0 = center
fig = self.get_figure()
self._overlay_axes = fig.add_axes(self.get_position(True),
frameon=False, projection=self.name,
center_longitude=0,
center_latitude=np.radians(plunge),
label='overlay',
rotation=bearing)
self._overlay_axes._polar.remove()
self._overlay_axes.format_coord = self._overlay_format_coord
self._overlay_axes.grid(True) | def function[_add_overlay, parameter[self, center]]:
constant[
Add a grid from a differently-centered stereonet. This is useful for
making "polar stereonets" that still use the same coordinate system as
a standard stereonet. (i.e. a plane/line/whatever will have the same
representation on both, but the grid is displayed differently.)
To display a polar grid on a stereonet, use ``kind="polar"``.
It is also often useful to display a grid relative to an arbitrary
measurement (e.g. a lineation axis). In that case, use the
``lon_center`` and ``lat_center`` arguments. Note that these are in
radians in "stereonet coordinates". Therefore, you'll often want to
use one of the functions in ``stereonet_math`` to convert a
line/plane/rake into the longitude and latitude you'd input here. For
example: ``add_overlay(center=stereonet_math.line(plunge, bearing))``.
If no parameters are specified, this is equivalent to turning on the
standard grid.
Parameters
----------
center: 2-item tuple of numbers
A tuple of (longitude, latitude) in radians that the overlay is
centered on.
]
<ast.Tuple object at 0x7da2054a4760> assign[=] call[name[stereonet_math].geographic2plunge_bearing, parameter[<ast.Starred object at 0x7da2054a5b40>]]
<ast.Tuple object at 0x7da2054a68c0> assign[=] name[center]
variable[fig] assign[=] call[name[self].get_figure, parameter[]]
name[self]._overlay_axes assign[=] call[name[fig].add_axes, parameter[call[name[self].get_position, parameter[constant[True]]]]]
call[name[self]._overlay_axes._polar.remove, parameter[]]
name[self]._overlay_axes.format_coord assign[=] name[self]._overlay_format_coord
call[name[self]._overlay_axes.grid, parameter[constant[True]]] | keyword[def] identifier[_add_overlay] ( identifier[self] , identifier[center] ):
literal[string]
identifier[plunge] , identifier[bearing] = identifier[stereonet_math] . identifier[geographic2plunge_bearing] (* identifier[center] )
identifier[lon0] , identifier[lat0] = identifier[center]
identifier[fig] = identifier[self] . identifier[get_figure] ()
identifier[self] . identifier[_overlay_axes] = identifier[fig] . identifier[add_axes] ( identifier[self] . identifier[get_position] ( keyword[True] ),
identifier[frameon] = keyword[False] , identifier[projection] = identifier[self] . identifier[name] ,
identifier[center_longitude] = literal[int] ,
identifier[center_latitude] = identifier[np] . identifier[radians] ( identifier[plunge] ),
identifier[label] = literal[string] ,
identifier[rotation] = identifier[bearing] )
identifier[self] . identifier[_overlay_axes] . identifier[_polar] . identifier[remove] ()
identifier[self] . identifier[_overlay_axes] . identifier[format_coord] = identifier[self] . identifier[_overlay_format_coord]
identifier[self] . identifier[_overlay_axes] . identifier[grid] ( keyword[True] ) | def _add_overlay(self, center):
"""
Add a grid from a differently-centered stereonet. This is useful for
making "polar stereonets" that still use the same coordinate system as
a standard stereonet. (i.e. a plane/line/whatever will have the same
representation on both, but the grid is displayed differently.)
To display a polar grid on a stereonet, use ``kind="polar"``.
It is also often useful to display a grid relative to an arbitrary
measurement (e.g. a lineation axis). In that case, use the
``lon_center`` and ``lat_center`` arguments. Note that these are in
radians in "stereonet coordinates". Therefore, you'll often want to
use one of the functions in ``stereonet_math`` to convert a
line/plane/rake into the longitude and latitude you'd input here. For
example: ``add_overlay(center=stereonet_math.line(plunge, bearing))``.
If no parameters are specified, this is equivalent to turning on the
standard grid.
Parameters
----------
center: 2-item tuple of numbers
A tuple of (longitude, latitude) in radians that the overlay is
centered on.
"""
(plunge, bearing) = stereonet_math.geographic2plunge_bearing(*center)
(lon0, lat0) = center
fig = self.get_figure()
self._overlay_axes = fig.add_axes(self.get_position(True), frameon=False, projection=self.name, center_longitude=0, center_latitude=np.radians(plunge), label='overlay', rotation=bearing)
self._overlay_axes._polar.remove()
self._overlay_axes.format_coord = self._overlay_format_coord
self._overlay_axes.grid(True) |
def upload_backend(index='dev', user=None):
"""
Build the backend and upload it to the remote server at the given index
"""
get_vars()
use_devpi(index=index)
with fab.lcd('../application'):
fab.local('make upload') | def function[upload_backend, parameter[index, user]]:
constant[
Build the backend and upload it to the remote server at the given index
]
call[name[get_vars], parameter[]]
call[name[use_devpi], parameter[]]
with call[name[fab].lcd, parameter[constant[../application]]] begin[:]
call[name[fab].local, parameter[constant[make upload]]] | keyword[def] identifier[upload_backend] ( identifier[index] = literal[string] , identifier[user] = keyword[None] ):
literal[string]
identifier[get_vars] ()
identifier[use_devpi] ( identifier[index] = identifier[index] )
keyword[with] identifier[fab] . identifier[lcd] ( literal[string] ):
identifier[fab] . identifier[local] ( literal[string] ) | def upload_backend(index='dev', user=None):
"""
Build the backend and upload it to the remote server at the given index
"""
get_vars()
use_devpi(index=index)
with fab.lcd('../application'):
fab.local('make upload') # depends on [control=['with'], data=[]] |
def create_blog_pages(self, posts, blog_index, *args, **options):
"""create Blog post entries from wordpress data"""
for post in posts:
post_id = post.get('ID')
title = post.get('title')
if title:
new_title = self.convert_html_entities(title)
title = new_title
slug = post.get('slug')
description = post.get('description')
if description:
description = self.convert_html_entities(description)
body = post.get('content')
if not "<p>" in body:
body = linebreaks(body)
# get image info from content and create image objects
body = self.create_images_from_urls_in_content(body)
# author/user data
author = post.get('author')
user = self.create_user(author)
categories = post.get('terms')
# format the date
date = post.get('date')[:10]
try:
new_entry = BlogPage.objects.get(slug=slug)
new_entry.title = title
new_entry.body = body
new_entry.owner = user
new_entry.save()
except BlogPage.DoesNotExist:
new_entry = blog_index.add_child(instance=BlogPage(
title=title, slug=slug, search_description="description",
date=date, body=body, owner=user))
featured_image = post.get('featured_image')
if featured_image is not None:
title = post['featured_image']['title']
source = post['featured_image']['source']
path, file_ = os.path.split(source)
source = source.replace('stage.swoon', 'swoon')
try:
remote_image = urllib.request.urlretrieve(
self.prepare_url(source))
width = 640
height = 290
header_image = Image(title=title, width=width, height=height)
header_image.file.save(
file_, File(open(remote_image[0], 'rb')))
header_image.save()
except UnicodeEncodeError:
header_image = None
print('unable to set header image {}'.format(source))
else:
header_image = None
new_entry.header_image = header_image
new_entry.save()
if categories:
self.create_categories_and_tags(new_entry, categories)
if self.should_import_comments:
self.import_comments(post_id, slug) | def function[create_blog_pages, parameter[self, posts, blog_index]]:
constant[create Blog post entries from wordpress data]
for taget[name[post]] in starred[name[posts]] begin[:]
variable[post_id] assign[=] call[name[post].get, parameter[constant[ID]]]
variable[title] assign[=] call[name[post].get, parameter[constant[title]]]
if name[title] begin[:]
variable[new_title] assign[=] call[name[self].convert_html_entities, parameter[name[title]]]
variable[title] assign[=] name[new_title]
variable[slug] assign[=] call[name[post].get, parameter[constant[slug]]]
variable[description] assign[=] call[name[post].get, parameter[constant[description]]]
if name[description] begin[:]
variable[description] assign[=] call[name[self].convert_html_entities, parameter[name[description]]]
variable[body] assign[=] call[name[post].get, parameter[constant[content]]]
if <ast.UnaryOp object at 0x7da1b1933490> begin[:]
variable[body] assign[=] call[name[linebreaks], parameter[name[body]]]
variable[body] assign[=] call[name[self].create_images_from_urls_in_content, parameter[name[body]]]
variable[author] assign[=] call[name[post].get, parameter[constant[author]]]
variable[user] assign[=] call[name[self].create_user, parameter[name[author]]]
variable[categories] assign[=] call[name[post].get, parameter[constant[terms]]]
variable[date] assign[=] call[call[name[post].get, parameter[constant[date]]]][<ast.Slice object at 0x7da1b1933d00>]
<ast.Try object at 0x7da1b1933dc0>
variable[featured_image] assign[=] call[name[post].get, parameter[constant[featured_image]]]
if compare[name[featured_image] is_not constant[None]] begin[:]
variable[title] assign[=] call[call[name[post]][constant[featured_image]]][constant[title]]
variable[source] assign[=] call[call[name[post]][constant[featured_image]]][constant[source]]
<ast.Tuple object at 0x7da1b1952380> assign[=] call[name[os].path.split, parameter[name[source]]]
variable[source] assign[=] call[name[source].replace, parameter[constant[stage.swoon], constant[swoon]]]
<ast.Try object at 0x7da1b1952020>
name[new_entry].header_image assign[=] name[header_image]
call[name[new_entry].save, parameter[]]
if name[categories] begin[:]
call[name[self].create_categories_and_tags, parameter[name[new_entry], name[categories]]]
if name[self].should_import_comments begin[:]
call[name[self].import_comments, parameter[name[post_id], name[slug]]] | keyword[def] identifier[create_blog_pages] ( identifier[self] , identifier[posts] , identifier[blog_index] ,* identifier[args] ,** identifier[options] ):
literal[string]
keyword[for] identifier[post] keyword[in] identifier[posts] :
identifier[post_id] = identifier[post] . identifier[get] ( literal[string] )
identifier[title] = identifier[post] . identifier[get] ( literal[string] )
keyword[if] identifier[title] :
identifier[new_title] = identifier[self] . identifier[convert_html_entities] ( identifier[title] )
identifier[title] = identifier[new_title]
identifier[slug] = identifier[post] . identifier[get] ( literal[string] )
identifier[description] = identifier[post] . identifier[get] ( literal[string] )
keyword[if] identifier[description] :
identifier[description] = identifier[self] . identifier[convert_html_entities] ( identifier[description] )
identifier[body] = identifier[post] . identifier[get] ( literal[string] )
keyword[if] keyword[not] literal[string] keyword[in] identifier[body] :
identifier[body] = identifier[linebreaks] ( identifier[body] )
identifier[body] = identifier[self] . identifier[create_images_from_urls_in_content] ( identifier[body] )
identifier[author] = identifier[post] . identifier[get] ( literal[string] )
identifier[user] = identifier[self] . identifier[create_user] ( identifier[author] )
identifier[categories] = identifier[post] . identifier[get] ( literal[string] )
identifier[date] = identifier[post] . identifier[get] ( literal[string] )[: literal[int] ]
keyword[try] :
identifier[new_entry] = identifier[BlogPage] . identifier[objects] . identifier[get] ( identifier[slug] = identifier[slug] )
identifier[new_entry] . identifier[title] = identifier[title]
identifier[new_entry] . identifier[body] = identifier[body]
identifier[new_entry] . identifier[owner] = identifier[user]
identifier[new_entry] . identifier[save] ()
keyword[except] identifier[BlogPage] . identifier[DoesNotExist] :
identifier[new_entry] = identifier[blog_index] . identifier[add_child] ( identifier[instance] = identifier[BlogPage] (
identifier[title] = identifier[title] , identifier[slug] = identifier[slug] , identifier[search_description] = literal[string] ,
identifier[date] = identifier[date] , identifier[body] = identifier[body] , identifier[owner] = identifier[user] ))
identifier[featured_image] = identifier[post] . identifier[get] ( literal[string] )
keyword[if] identifier[featured_image] keyword[is] keyword[not] keyword[None] :
identifier[title] = identifier[post] [ literal[string] ][ literal[string] ]
identifier[source] = identifier[post] [ literal[string] ][ literal[string] ]
identifier[path] , identifier[file_] = identifier[os] . identifier[path] . identifier[split] ( identifier[source] )
identifier[source] = identifier[source] . identifier[replace] ( literal[string] , literal[string] )
keyword[try] :
identifier[remote_image] = identifier[urllib] . identifier[request] . identifier[urlretrieve] (
identifier[self] . identifier[prepare_url] ( identifier[source] ))
identifier[width] = literal[int]
identifier[height] = literal[int]
identifier[header_image] = identifier[Image] ( identifier[title] = identifier[title] , identifier[width] = identifier[width] , identifier[height] = identifier[height] )
identifier[header_image] . identifier[file] . identifier[save] (
identifier[file_] , identifier[File] ( identifier[open] ( identifier[remote_image] [ literal[int] ], literal[string] )))
identifier[header_image] . identifier[save] ()
keyword[except] identifier[UnicodeEncodeError] :
identifier[header_image] = keyword[None]
identifier[print] ( literal[string] . identifier[format] ( identifier[source] ))
keyword[else] :
identifier[header_image] = keyword[None]
identifier[new_entry] . identifier[header_image] = identifier[header_image]
identifier[new_entry] . identifier[save] ()
keyword[if] identifier[categories] :
identifier[self] . identifier[create_categories_and_tags] ( identifier[new_entry] , identifier[categories] )
keyword[if] identifier[self] . identifier[should_import_comments] :
identifier[self] . identifier[import_comments] ( identifier[post_id] , identifier[slug] ) | def create_blog_pages(self, posts, blog_index, *args, **options):
"""create Blog post entries from wordpress data"""
for post in posts:
post_id = post.get('ID')
title = post.get('title')
if title:
new_title = self.convert_html_entities(title)
title = new_title # depends on [control=['if'], data=[]]
slug = post.get('slug')
description = post.get('description')
if description:
description = self.convert_html_entities(description) # depends on [control=['if'], data=[]]
body = post.get('content')
if not '<p>' in body:
body = linebreaks(body) # depends on [control=['if'], data=[]]
# get image info from content and create image objects
body = self.create_images_from_urls_in_content(body)
# author/user data
author = post.get('author')
user = self.create_user(author)
categories = post.get('terms')
# format the date
date = post.get('date')[:10]
try:
new_entry = BlogPage.objects.get(slug=slug)
new_entry.title = title
new_entry.body = body
new_entry.owner = user
new_entry.save() # depends on [control=['try'], data=[]]
except BlogPage.DoesNotExist:
new_entry = blog_index.add_child(instance=BlogPage(title=title, slug=slug, search_description='description', date=date, body=body, owner=user)) # depends on [control=['except'], data=[]]
featured_image = post.get('featured_image')
if featured_image is not None:
title = post['featured_image']['title']
source = post['featured_image']['source']
(path, file_) = os.path.split(source)
source = source.replace('stage.swoon', 'swoon')
try:
remote_image = urllib.request.urlretrieve(self.prepare_url(source))
width = 640
height = 290
header_image = Image(title=title, width=width, height=height)
header_image.file.save(file_, File(open(remote_image[0], 'rb')))
header_image.save() # depends on [control=['try'], data=[]]
except UnicodeEncodeError:
header_image = None
print('unable to set header image {}'.format(source)) # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
else:
header_image = None
new_entry.header_image = header_image
new_entry.save()
if categories:
self.create_categories_and_tags(new_entry, categories) # depends on [control=['if'], data=[]]
if self.should_import_comments:
self.import_comments(post_id, slug) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['post']] |
def _interpolate(self, factor, minGlyph, maxGlyph,
round=True, suppressError=True):
"""
Subclasses may override this method.
"""
minGlyph = minGlyph._toMathGlyph()
maxGlyph = maxGlyph._toMathGlyph()
try:
result = interpolate(minGlyph, maxGlyph, factor)
except IndexError:
result = None
if result is None and not suppressError:
raise FontPartsError(("Glyphs '%s' and '%s' could not be "
"interpolated.")
% (minGlyph.name, maxGlyph.name))
if result is not None:
if round:
result = result.round()
self._fromMathGlyph(result, toThisGlyph=True) | def function[_interpolate, parameter[self, factor, minGlyph, maxGlyph, round, suppressError]]:
constant[
Subclasses may override this method.
]
variable[minGlyph] assign[=] call[name[minGlyph]._toMathGlyph, parameter[]]
variable[maxGlyph] assign[=] call[name[maxGlyph]._toMathGlyph, parameter[]]
<ast.Try object at 0x7da20e74b2e0>
if <ast.BoolOp object at 0x7da20e74b070> begin[:]
<ast.Raise object at 0x7da20e74bb80>
if compare[name[result] is_not constant[None]] begin[:]
if name[round] begin[:]
variable[result] assign[=] call[name[result].round, parameter[]]
call[name[self]._fromMathGlyph, parameter[name[result]]] | keyword[def] identifier[_interpolate] ( identifier[self] , identifier[factor] , identifier[minGlyph] , identifier[maxGlyph] ,
identifier[round] = keyword[True] , identifier[suppressError] = keyword[True] ):
literal[string]
identifier[minGlyph] = identifier[minGlyph] . identifier[_toMathGlyph] ()
identifier[maxGlyph] = identifier[maxGlyph] . identifier[_toMathGlyph] ()
keyword[try] :
identifier[result] = identifier[interpolate] ( identifier[minGlyph] , identifier[maxGlyph] , identifier[factor] )
keyword[except] identifier[IndexError] :
identifier[result] = keyword[None]
keyword[if] identifier[result] keyword[is] keyword[None] keyword[and] keyword[not] identifier[suppressError] :
keyword[raise] identifier[FontPartsError] (( literal[string]
literal[string] )
%( identifier[minGlyph] . identifier[name] , identifier[maxGlyph] . identifier[name] ))
keyword[if] identifier[result] keyword[is] keyword[not] keyword[None] :
keyword[if] identifier[round] :
identifier[result] = identifier[result] . identifier[round] ()
identifier[self] . identifier[_fromMathGlyph] ( identifier[result] , identifier[toThisGlyph] = keyword[True] ) | def _interpolate(self, factor, minGlyph, maxGlyph, round=True, suppressError=True):
"""
Subclasses may override this method.
"""
minGlyph = minGlyph._toMathGlyph()
maxGlyph = maxGlyph._toMathGlyph()
try:
result = interpolate(minGlyph, maxGlyph, factor) # depends on [control=['try'], data=[]]
except IndexError:
result = None # depends on [control=['except'], data=[]]
if result is None and (not suppressError):
raise FontPartsError("Glyphs '%s' and '%s' could not be interpolated." % (minGlyph.name, maxGlyph.name)) # depends on [control=['if'], data=[]]
if result is not None:
if round:
result = result.round() # depends on [control=['if'], data=[]]
self._fromMathGlyph(result, toThisGlyph=True) # depends on [control=['if'], data=['result']] |
def get_settings(self, service_id, version_number):
"""Get the settings for a particular service and version."""
content = self._fetch("/service/%s/version/%d/settings" % (service_id, version_number))
return FastlySettings(self, content) | def function[get_settings, parameter[self, service_id, version_number]]:
constant[Get the settings for a particular service and version.]
variable[content] assign[=] call[name[self]._fetch, parameter[binary_operation[constant[/service/%s/version/%d/settings] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b0f10310>, <ast.Name object at 0x7da1b0f10c40>]]]]]
return[call[name[FastlySettings], parameter[name[self], name[content]]]] | keyword[def] identifier[get_settings] ( identifier[self] , identifier[service_id] , identifier[version_number] ):
literal[string]
identifier[content] = identifier[self] . identifier[_fetch] ( literal[string] %( identifier[service_id] , identifier[version_number] ))
keyword[return] identifier[FastlySettings] ( identifier[self] , identifier[content] ) | def get_settings(self, service_id, version_number):
"""Get the settings for a particular service and version."""
content = self._fetch('/service/%s/version/%d/settings' % (service_id, version_number))
return FastlySettings(self, content) |
def natsort_keygen(key=None, alg=ns.DEFAULT):
"""
Generate a key to sort strings and numbers naturally.
This key is designed for use as the `key` argument to
functions such as the `sorted` builtin.
The user may customize the generated function with the
arguments to `natsort_keygen`, including an optional
`key` function.
Parameters
----------
key : callable, optional
A key used to manipulate the input value before parsing for
numbers. It is **not** applied recursively.
It should accept a single argument and return a single value.
alg : ns enum, optional
This option is used to control which algorithm `natsort`
uses when sorting. For details into these options, please see
the :class:`ns` class documentation. The default is `ns.INT`.
Returns
-------
out : function
A function that parses input for natural sorting that is
suitable for passing as the `key` argument to functions
such as `sorted`.
See Also
--------
natsorted
natsort_key
Examples
--------
`natsort_keygen` is a convenient way to create a custom key
to sort lists in-place (for example).::
>>> a = ['num5.10', 'num-3', 'num5.3', 'num2']
>>> a.sort(key=natsort_keygen(alg=ns.REAL))
>>> a
[{u}'num-3', {u}'num2', {u}'num5.10', {u}'num5.3']
"""
try:
ns.DEFAULT | alg
except TypeError:
msg = "natsort_keygen: 'alg' argument must be from the enum 'ns'"
raise ValueError(msg + ", got {}".format(py23_str(alg)))
# Add the NS_DUMB option if the locale library is broken.
if alg & ns.LOCALEALPHA and natsort.compat.locale.dumb_sort():
alg |= NS_DUMB
# Set some variables that will be passed to the factory functions
if alg & ns.NUMAFTER:
if alg & ns.LOCALEALPHA:
sep = natsort.compat.locale.null_string_locale_max
else:
sep = natsort.compat.locale.null_string_max
pre_sep = natsort.compat.locale.null_string_max
else:
if alg & ns.LOCALEALPHA:
sep = natsort.compat.locale.null_string_locale
else:
sep = natsort.compat.locale.null_string
pre_sep = natsort.compat.locale.null_string
regex = utils.regex_chooser(alg)
# Create the functions that will be used to split strings.
input_transform = utils.input_string_transform_factory(alg)
component_transform = utils.string_component_transform_factory(alg)
final_transform = utils.final_data_transform_factory(alg, sep, pre_sep)
# Create the high-level parsing functions for strings, bytes, and numbers.
string_func = utils.parse_string_factory(
alg, sep, regex.split, input_transform, component_transform, final_transform
)
if alg & ns.PATH:
string_func = utils.parse_path_factory(string_func)
bytes_func = utils.parse_bytes_factory(alg)
num_func = utils.parse_number_factory(alg, sep, pre_sep)
# Return the natsort key with the parsing path pre-chosen.
return partial(
utils.natsort_key,
key=key,
string_func=string_func,
bytes_func=bytes_func,
num_func=num_func,
) | def function[natsort_keygen, parameter[key, alg]]:
constant[
Generate a key to sort strings and numbers naturally.
This key is designed for use as the `key` argument to
functions such as the `sorted` builtin.
The user may customize the generated function with the
arguments to `natsort_keygen`, including an optional
`key` function.
Parameters
----------
key : callable, optional
A key used to manipulate the input value before parsing for
numbers. It is **not** applied recursively.
It should accept a single argument and return a single value.
alg : ns enum, optional
This option is used to control which algorithm `natsort`
uses when sorting. For details into these options, please see
the :class:`ns` class documentation. The default is `ns.INT`.
Returns
-------
out : function
A function that parses input for natural sorting that is
suitable for passing as the `key` argument to functions
such as `sorted`.
See Also
--------
natsorted
natsort_key
Examples
--------
`natsort_keygen` is a convenient way to create a custom key
to sort lists in-place (for example).::
>>> a = ['num5.10', 'num-3', 'num5.3', 'num2']
>>> a.sort(key=natsort_keygen(alg=ns.REAL))
>>> a
[{u}'num-3', {u}'num2', {u}'num5.10', {u}'num5.3']
]
<ast.Try object at 0x7da1b0bdb520>
if <ast.BoolOp object at 0x7da1b0bda140> begin[:]
<ast.AugAssign object at 0x7da1b0bda2c0>
if binary_operation[name[alg] <ast.BitAnd object at 0x7da2590d6b60> name[ns].NUMAFTER] begin[:]
if binary_operation[name[alg] <ast.BitAnd object at 0x7da2590d6b60> name[ns].LOCALEALPHA] begin[:]
variable[sep] assign[=] name[natsort].compat.locale.null_string_locale_max
variable[pre_sep] assign[=] name[natsort].compat.locale.null_string_max
variable[regex] assign[=] call[name[utils].regex_chooser, parameter[name[alg]]]
variable[input_transform] assign[=] call[name[utils].input_string_transform_factory, parameter[name[alg]]]
variable[component_transform] assign[=] call[name[utils].string_component_transform_factory, parameter[name[alg]]]
variable[final_transform] assign[=] call[name[utils].final_data_transform_factory, parameter[name[alg], name[sep], name[pre_sep]]]
variable[string_func] assign[=] call[name[utils].parse_string_factory, parameter[name[alg], name[sep], name[regex].split, name[input_transform], name[component_transform], name[final_transform]]]
if binary_operation[name[alg] <ast.BitAnd object at 0x7da2590d6b60> name[ns].PATH] begin[:]
variable[string_func] assign[=] call[name[utils].parse_path_factory, parameter[name[string_func]]]
variable[bytes_func] assign[=] call[name[utils].parse_bytes_factory, parameter[name[alg]]]
variable[num_func] assign[=] call[name[utils].parse_number_factory, parameter[name[alg], name[sep], name[pre_sep]]]
return[call[name[partial], parameter[name[utils].natsort_key]]] | keyword[def] identifier[natsort_keygen] ( identifier[key] = keyword[None] , identifier[alg] = identifier[ns] . identifier[DEFAULT] ):
literal[string]
keyword[try] :
identifier[ns] . identifier[DEFAULT] | identifier[alg]
keyword[except] identifier[TypeError] :
identifier[msg] = literal[string]
keyword[raise] identifier[ValueError] ( identifier[msg] + literal[string] . identifier[format] ( identifier[py23_str] ( identifier[alg] )))
keyword[if] identifier[alg] & identifier[ns] . identifier[LOCALEALPHA] keyword[and] identifier[natsort] . identifier[compat] . identifier[locale] . identifier[dumb_sort] ():
identifier[alg] |= identifier[NS_DUMB]
keyword[if] identifier[alg] & identifier[ns] . identifier[NUMAFTER] :
keyword[if] identifier[alg] & identifier[ns] . identifier[LOCALEALPHA] :
identifier[sep] = identifier[natsort] . identifier[compat] . identifier[locale] . identifier[null_string_locale_max]
keyword[else] :
identifier[sep] = identifier[natsort] . identifier[compat] . identifier[locale] . identifier[null_string_max]
identifier[pre_sep] = identifier[natsort] . identifier[compat] . identifier[locale] . identifier[null_string_max]
keyword[else] :
keyword[if] identifier[alg] & identifier[ns] . identifier[LOCALEALPHA] :
identifier[sep] = identifier[natsort] . identifier[compat] . identifier[locale] . identifier[null_string_locale]
keyword[else] :
identifier[sep] = identifier[natsort] . identifier[compat] . identifier[locale] . identifier[null_string]
identifier[pre_sep] = identifier[natsort] . identifier[compat] . identifier[locale] . identifier[null_string]
identifier[regex] = identifier[utils] . identifier[regex_chooser] ( identifier[alg] )
identifier[input_transform] = identifier[utils] . identifier[input_string_transform_factory] ( identifier[alg] )
identifier[component_transform] = identifier[utils] . identifier[string_component_transform_factory] ( identifier[alg] )
identifier[final_transform] = identifier[utils] . identifier[final_data_transform_factory] ( identifier[alg] , identifier[sep] , identifier[pre_sep] )
identifier[string_func] = identifier[utils] . identifier[parse_string_factory] (
identifier[alg] , identifier[sep] , identifier[regex] . identifier[split] , identifier[input_transform] , identifier[component_transform] , identifier[final_transform]
)
keyword[if] identifier[alg] & identifier[ns] . identifier[PATH] :
identifier[string_func] = identifier[utils] . identifier[parse_path_factory] ( identifier[string_func] )
identifier[bytes_func] = identifier[utils] . identifier[parse_bytes_factory] ( identifier[alg] )
identifier[num_func] = identifier[utils] . identifier[parse_number_factory] ( identifier[alg] , identifier[sep] , identifier[pre_sep] )
keyword[return] identifier[partial] (
identifier[utils] . identifier[natsort_key] ,
identifier[key] = identifier[key] ,
identifier[string_func] = identifier[string_func] ,
identifier[bytes_func] = identifier[bytes_func] ,
identifier[num_func] = identifier[num_func] ,
) | def natsort_keygen(key=None, alg=ns.DEFAULT):
"""
Generate a key to sort strings and numbers naturally.
This key is designed for use as the `key` argument to
functions such as the `sorted` builtin.
The user may customize the generated function with the
arguments to `natsort_keygen`, including an optional
`key` function.
Parameters
----------
key : callable, optional
A key used to manipulate the input value before parsing for
numbers. It is **not** applied recursively.
It should accept a single argument and return a single value.
alg : ns enum, optional
This option is used to control which algorithm `natsort`
uses when sorting. For details into these options, please see
the :class:`ns` class documentation. The default is `ns.INT`.
Returns
-------
out : function
A function that parses input for natural sorting that is
suitable for passing as the `key` argument to functions
such as `sorted`.
See Also
--------
natsorted
natsort_key
Examples
--------
`natsort_keygen` is a convenient way to create a custom key
to sort lists in-place (for example).::
>>> a = ['num5.10', 'num-3', 'num5.3', 'num2']
>>> a.sort(key=natsort_keygen(alg=ns.REAL))
>>> a
[{u}'num-3', {u}'num2', {u}'num5.10', {u}'num5.3']
"""
try:
ns.DEFAULT | alg # depends on [control=['try'], data=[]]
except TypeError:
msg = "natsort_keygen: 'alg' argument must be from the enum 'ns'"
raise ValueError(msg + ', got {}'.format(py23_str(alg))) # depends on [control=['except'], data=[]]
# Add the NS_DUMB option if the locale library is broken.
if alg & ns.LOCALEALPHA and natsort.compat.locale.dumb_sort():
alg |= NS_DUMB # depends on [control=['if'], data=[]]
# Set some variables that will be passed to the factory functions
if alg & ns.NUMAFTER:
if alg & ns.LOCALEALPHA:
sep = natsort.compat.locale.null_string_locale_max # depends on [control=['if'], data=[]]
else:
sep = natsort.compat.locale.null_string_max
pre_sep = natsort.compat.locale.null_string_max # depends on [control=['if'], data=[]]
else:
if alg & ns.LOCALEALPHA:
sep = natsort.compat.locale.null_string_locale # depends on [control=['if'], data=[]]
else:
sep = natsort.compat.locale.null_string
pre_sep = natsort.compat.locale.null_string
regex = utils.regex_chooser(alg)
# Create the functions that will be used to split strings.
input_transform = utils.input_string_transform_factory(alg)
component_transform = utils.string_component_transform_factory(alg)
final_transform = utils.final_data_transform_factory(alg, sep, pre_sep)
# Create the high-level parsing functions for strings, bytes, and numbers.
string_func = utils.parse_string_factory(alg, sep, regex.split, input_transform, component_transform, final_transform)
if alg & ns.PATH:
string_func = utils.parse_path_factory(string_func) # depends on [control=['if'], data=[]]
bytes_func = utils.parse_bytes_factory(alg)
num_func = utils.parse_number_factory(alg, sep, pre_sep)
# Return the natsort key with the parsing path pre-chosen.
return partial(utils.natsort_key, key=key, string_func=string_func, bytes_func=bytes_func, num_func=num_func) |
def _draw_lines_internal(self, coords, colour, bg):
"""Helper to draw lines connecting a set of nodes that are scaled for the Screen."""
for i, (x, y) in enumerate(coords):
if i == 0:
self._screen.move(x, y)
else:
self._screen.draw(x, y, colour=colour, bg=bg, thin=True) | def function[_draw_lines_internal, parameter[self, coords, colour, bg]]:
constant[Helper to draw lines connecting a set of nodes that are scaled for the Screen.]
for taget[tuple[[<ast.Name object at 0x7da1b1d534f0>, <ast.Tuple object at 0x7da1b1d50af0>]]] in starred[call[name[enumerate], parameter[name[coords]]]] begin[:]
if compare[name[i] equal[==] constant[0]] begin[:]
call[name[self]._screen.move, parameter[name[x], name[y]]] | keyword[def] identifier[_draw_lines_internal] ( identifier[self] , identifier[coords] , identifier[colour] , identifier[bg] ):
literal[string]
keyword[for] identifier[i] ,( identifier[x] , identifier[y] ) keyword[in] identifier[enumerate] ( identifier[coords] ):
keyword[if] identifier[i] == literal[int] :
identifier[self] . identifier[_screen] . identifier[move] ( identifier[x] , identifier[y] )
keyword[else] :
identifier[self] . identifier[_screen] . identifier[draw] ( identifier[x] , identifier[y] , identifier[colour] = identifier[colour] , identifier[bg] = identifier[bg] , identifier[thin] = keyword[True] ) | def _draw_lines_internal(self, coords, colour, bg):
"""Helper to draw lines connecting a set of nodes that are scaled for the Screen."""
for (i, (x, y)) in enumerate(coords):
if i == 0:
self._screen.move(x, y) # depends on [control=['if'], data=[]]
else:
self._screen.draw(x, y, colour=colour, bg=bg, thin=True) # depends on [control=['for'], data=[]] |
def pre_save(self, model_instance, add):
"""
Converts the value being saved based on `populate_from` and
`time_override`
"""
# pylint: disable=newstyle
# Retrieve the currently entered datetime
value = super(
LinkedTZDateTimeField,
self
).pre_save(
model_instance=model_instance,
add=add
)
# Convert the value to the correct time/timezone
value = self._convert_value(
value=value,
model_instance=model_instance,
add=add
)
setattr(model_instance, self.attname, value)
return value | def function[pre_save, parameter[self, model_instance, add]]:
constant[
Converts the value being saved based on `populate_from` and
`time_override`
]
variable[value] assign[=] call[call[name[super], parameter[name[LinkedTZDateTimeField], name[self]]].pre_save, parameter[]]
variable[value] assign[=] call[name[self]._convert_value, parameter[]]
call[name[setattr], parameter[name[model_instance], name[self].attname, name[value]]]
return[name[value]] | keyword[def] identifier[pre_save] ( identifier[self] , identifier[model_instance] , identifier[add] ):
literal[string]
identifier[value] = identifier[super] (
identifier[LinkedTZDateTimeField] ,
identifier[self]
). identifier[pre_save] (
identifier[model_instance] = identifier[model_instance] ,
identifier[add] = identifier[add]
)
identifier[value] = identifier[self] . identifier[_convert_value] (
identifier[value] = identifier[value] ,
identifier[model_instance] = identifier[model_instance] ,
identifier[add] = identifier[add]
)
identifier[setattr] ( identifier[model_instance] , identifier[self] . identifier[attname] , identifier[value] )
keyword[return] identifier[value] | def pre_save(self, model_instance, add):
"""
Converts the value being saved based on `populate_from` and
`time_override`
"""
# pylint: disable=newstyle
# Retrieve the currently entered datetime
value = super(LinkedTZDateTimeField, self).pre_save(model_instance=model_instance, add=add)
# Convert the value to the correct time/timezone
value = self._convert_value(value=value, model_instance=model_instance, add=add)
setattr(model_instance, self.attname, value)
return value |
def tree(alias, title='', items=None, **kwargs):
"""Dynamically creates and returns a sitetree.
:param str|unicode alias:
:param str|unicode title:
:param iterable items: dynamic sitetree items objects created by `item` function.
:param kwargs: Additional arguments to pass to tree item initializer.
:rtype: TreeBase
"""
tree_obj = get_tree_model()(alias=alias, title=title, **kwargs)
tree_obj.id = generate_id_for(tree_obj)
tree_obj.is_dynamic = True
if items is not None:
tree_obj.dynamic_items = []
def traverse(items):
for item in items:
item.tree = tree_obj
tree_obj.dynamic_items.append(item)
if hasattr(item, 'dynamic_children'):
traverse(item.dynamic_children)
traverse(items)
return tree_obj | def function[tree, parameter[alias, title, items]]:
constant[Dynamically creates and returns a sitetree.
:param str|unicode alias:
:param str|unicode title:
:param iterable items: dynamic sitetree items objects created by `item` function.
:param kwargs: Additional arguments to pass to tree item initializer.
:rtype: TreeBase
]
variable[tree_obj] assign[=] call[call[name[get_tree_model], parameter[]], parameter[]]
name[tree_obj].id assign[=] call[name[generate_id_for], parameter[name[tree_obj]]]
name[tree_obj].is_dynamic assign[=] constant[True]
if compare[name[items] is_not constant[None]] begin[:]
name[tree_obj].dynamic_items assign[=] list[[]]
def function[traverse, parameter[items]]:
for taget[name[item]] in starred[name[items]] begin[:]
name[item].tree assign[=] name[tree_obj]
call[name[tree_obj].dynamic_items.append, parameter[name[item]]]
if call[name[hasattr], parameter[name[item], constant[dynamic_children]]] begin[:]
call[name[traverse], parameter[name[item].dynamic_children]]
call[name[traverse], parameter[name[items]]]
return[name[tree_obj]] | keyword[def] identifier[tree] ( identifier[alias] , identifier[title] = literal[string] , identifier[items] = keyword[None] ,** identifier[kwargs] ):
literal[string]
identifier[tree_obj] = identifier[get_tree_model] ()( identifier[alias] = identifier[alias] , identifier[title] = identifier[title] ,** identifier[kwargs] )
identifier[tree_obj] . identifier[id] = identifier[generate_id_for] ( identifier[tree_obj] )
identifier[tree_obj] . identifier[is_dynamic] = keyword[True]
keyword[if] identifier[items] keyword[is] keyword[not] keyword[None] :
identifier[tree_obj] . identifier[dynamic_items] =[]
keyword[def] identifier[traverse] ( identifier[items] ):
keyword[for] identifier[item] keyword[in] identifier[items] :
identifier[item] . identifier[tree] = identifier[tree_obj]
identifier[tree_obj] . identifier[dynamic_items] . identifier[append] ( identifier[item] )
keyword[if] identifier[hasattr] ( identifier[item] , literal[string] ):
identifier[traverse] ( identifier[item] . identifier[dynamic_children] )
identifier[traverse] ( identifier[items] )
keyword[return] identifier[tree_obj] | def tree(alias, title='', items=None, **kwargs):
"""Dynamically creates and returns a sitetree.
:param str|unicode alias:
:param str|unicode title:
:param iterable items: dynamic sitetree items objects created by `item` function.
:param kwargs: Additional arguments to pass to tree item initializer.
:rtype: TreeBase
"""
tree_obj = get_tree_model()(alias=alias, title=title, **kwargs)
tree_obj.id = generate_id_for(tree_obj)
tree_obj.is_dynamic = True
if items is not None:
tree_obj.dynamic_items = []
def traverse(items):
for item in items:
item.tree = tree_obj
tree_obj.dynamic_items.append(item)
if hasattr(item, 'dynamic_children'):
traverse(item.dynamic_children) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['item']]
traverse(items) # depends on [control=['if'], data=['items']]
return tree_obj |
def adjust_version_as_of(version, relations_as_of):
"""
Adjusts the passed version's as_of time to an appropriate value, and
returns it.
``relations_as_of`` is used to fix the point in time for the version;
this affects which related objects are returned when querying for
object relations.
Valid ``relations_as_of`` values and how this affects the returned
version's as_of attribute:
- 'start': version start date
- 'end': version end date - 1 microsecond (no effect if version is
current version)
- datetime object: given datetime (raises ValueError if given datetime
not valid for version)
- None: unset (related object queries will not be restricted to a
point in time)
:param Versionable object: object whose as_of will be adjusted as
requested.
:param mixed relations_as_of: valid values are the strings 'start' or
'end', or a datetime object.
:return: Versionable
"""
if not version:
return version
if relations_as_of == 'end':
if version.is_current:
# Ensure that version._querytime is active, in case it wasn't
# before.
version.as_of = None
else:
version.as_of = version.version_end_date - datetime.timedelta(
microseconds=1)
elif relations_as_of == 'start':
version.as_of = version.version_start_date
elif isinstance(relations_as_of, datetime.datetime):
as_of = relations_as_of.astimezone(utc)
if not as_of >= version.version_start_date:
raise ValueError(
"Provided as_of '{}' is earlier than version's start "
"time '{}'".format(
as_of.isoformat(),
version.version_start_date.isoformat()
)
)
if version.version_end_date is not None \
and as_of >= version.version_end_date:
raise ValueError(
"Provided as_of '{}' is later than version's start "
"time '{}'".format(
as_of.isoformat(),
version.version_end_date.isoformat()
)
)
version.as_of = as_of
elif relations_as_of is None:
version._querytime = QueryTime(time=None, active=False)
else:
raise TypeError(
"as_of parameter must be 'start', 'end', None, or datetime "
"object")
return version | def function[adjust_version_as_of, parameter[version, relations_as_of]]:
constant[
Adjusts the passed version's as_of time to an appropriate value, and
returns it.
``relations_as_of`` is used to fix the point in time for the version;
this affects which related objects are returned when querying for
object relations.
Valid ``relations_as_of`` values and how this affects the returned
version's as_of attribute:
- 'start': version start date
- 'end': version end date - 1 microsecond (no effect if version is
current version)
- datetime object: given datetime (raises ValueError if given datetime
not valid for version)
- None: unset (related object queries will not be restricted to a
point in time)
:param Versionable object: object whose as_of will be adjusted as
requested.
:param mixed relations_as_of: valid values are the strings 'start' or
'end', or a datetime object.
:return: Versionable
]
if <ast.UnaryOp object at 0x7da1b1042350> begin[:]
return[name[version]]
if compare[name[relations_as_of] equal[==] constant[end]] begin[:]
if name[version].is_current begin[:]
name[version].as_of assign[=] constant[None]
return[name[version]] | keyword[def] identifier[adjust_version_as_of] ( identifier[version] , identifier[relations_as_of] ):
literal[string]
keyword[if] keyword[not] identifier[version] :
keyword[return] identifier[version]
keyword[if] identifier[relations_as_of] == literal[string] :
keyword[if] identifier[version] . identifier[is_current] :
identifier[version] . identifier[as_of] = keyword[None]
keyword[else] :
identifier[version] . identifier[as_of] = identifier[version] . identifier[version_end_date] - identifier[datetime] . identifier[timedelta] (
identifier[microseconds] = literal[int] )
keyword[elif] identifier[relations_as_of] == literal[string] :
identifier[version] . identifier[as_of] = identifier[version] . identifier[version_start_date]
keyword[elif] identifier[isinstance] ( identifier[relations_as_of] , identifier[datetime] . identifier[datetime] ):
identifier[as_of] = identifier[relations_as_of] . identifier[astimezone] ( identifier[utc] )
keyword[if] keyword[not] identifier[as_of] >= identifier[version] . identifier[version_start_date] :
keyword[raise] identifier[ValueError] (
literal[string]
literal[string] . identifier[format] (
identifier[as_of] . identifier[isoformat] (),
identifier[version] . identifier[version_start_date] . identifier[isoformat] ()
)
)
keyword[if] identifier[version] . identifier[version_end_date] keyword[is] keyword[not] keyword[None] keyword[and] identifier[as_of] >= identifier[version] . identifier[version_end_date] :
keyword[raise] identifier[ValueError] (
literal[string]
literal[string] . identifier[format] (
identifier[as_of] . identifier[isoformat] (),
identifier[version] . identifier[version_end_date] . identifier[isoformat] ()
)
)
identifier[version] . identifier[as_of] = identifier[as_of]
keyword[elif] identifier[relations_as_of] keyword[is] keyword[None] :
identifier[version] . identifier[_querytime] = identifier[QueryTime] ( identifier[time] = keyword[None] , identifier[active] = keyword[False] )
keyword[else] :
keyword[raise] identifier[TypeError] (
literal[string]
literal[string] )
keyword[return] identifier[version] | def adjust_version_as_of(version, relations_as_of):
"""
Adjusts the passed version's as_of time to an appropriate value, and
returns it.
``relations_as_of`` is used to fix the point in time for the version;
this affects which related objects are returned when querying for
object relations.
Valid ``relations_as_of`` values and how this affects the returned
version's as_of attribute:
- 'start': version start date
- 'end': version end date - 1 microsecond (no effect if version is
current version)
- datetime object: given datetime (raises ValueError if given datetime
not valid for version)
- None: unset (related object queries will not be restricted to a
point in time)
:param Versionable object: object whose as_of will be adjusted as
requested.
:param mixed relations_as_of: valid values are the strings 'start' or
'end', or a datetime object.
:return: Versionable
"""
if not version:
return version # depends on [control=['if'], data=[]]
if relations_as_of == 'end':
if version.is_current:
# Ensure that version._querytime is active, in case it wasn't
# before.
version.as_of = None # depends on [control=['if'], data=[]]
else:
version.as_of = version.version_end_date - datetime.timedelta(microseconds=1) # depends on [control=['if'], data=[]]
elif relations_as_of == 'start':
version.as_of = version.version_start_date # depends on [control=['if'], data=[]]
elif isinstance(relations_as_of, datetime.datetime):
as_of = relations_as_of.astimezone(utc)
if not as_of >= version.version_start_date:
raise ValueError("Provided as_of '{}' is earlier than version's start time '{}'".format(as_of.isoformat(), version.version_start_date.isoformat())) # depends on [control=['if'], data=[]]
if version.version_end_date is not None and as_of >= version.version_end_date:
raise ValueError("Provided as_of '{}' is later than version's start time '{}'".format(as_of.isoformat(), version.version_end_date.isoformat())) # depends on [control=['if'], data=[]]
version.as_of = as_of # depends on [control=['if'], data=[]]
elif relations_as_of is None:
version._querytime = QueryTime(time=None, active=False) # depends on [control=['if'], data=[]]
else:
raise TypeError("as_of parameter must be 'start', 'end', None, or datetime object")
return version |
def _shallow_copy_with_infer(self, values, **kwargs):
"""
Create a new Index inferring the class with passed value, don't copy
the data, use the same object attributes with passed in attributes
taking precedence.
*this is an internal non-public method*
Parameters
----------
values : the values to create the new Index, optional
kwargs : updates the default attributes for this Index
"""
attributes = self._get_attributes_dict()
attributes.update(kwargs)
attributes['copy'] = False
if not len(values) and 'dtype' not in kwargs:
attributes['dtype'] = self.dtype
if self._infer_as_myclass:
try:
return self._constructor(values, **attributes)
except (TypeError, ValueError):
pass
return Index(values, **attributes) | def function[_shallow_copy_with_infer, parameter[self, values]]:
constant[
Create a new Index inferring the class with passed value, don't copy
the data, use the same object attributes with passed in attributes
taking precedence.
*this is an internal non-public method*
Parameters
----------
values : the values to create the new Index, optional
kwargs : updates the default attributes for this Index
]
variable[attributes] assign[=] call[name[self]._get_attributes_dict, parameter[]]
call[name[attributes].update, parameter[name[kwargs]]]
call[name[attributes]][constant[copy]] assign[=] constant[False]
if <ast.BoolOp object at 0x7da18fe90c40> begin[:]
call[name[attributes]][constant[dtype]] assign[=] name[self].dtype
if name[self]._infer_as_myclass begin[:]
<ast.Try object at 0x7da1b2346b30>
return[call[name[Index], parameter[name[values]]]] | keyword[def] identifier[_shallow_copy_with_infer] ( identifier[self] , identifier[values] ,** identifier[kwargs] ):
literal[string]
identifier[attributes] = identifier[self] . identifier[_get_attributes_dict] ()
identifier[attributes] . identifier[update] ( identifier[kwargs] )
identifier[attributes] [ literal[string] ]= keyword[False]
keyword[if] keyword[not] identifier[len] ( identifier[values] ) keyword[and] literal[string] keyword[not] keyword[in] identifier[kwargs] :
identifier[attributes] [ literal[string] ]= identifier[self] . identifier[dtype]
keyword[if] identifier[self] . identifier[_infer_as_myclass] :
keyword[try] :
keyword[return] identifier[self] . identifier[_constructor] ( identifier[values] ,** identifier[attributes] )
keyword[except] ( identifier[TypeError] , identifier[ValueError] ):
keyword[pass]
keyword[return] identifier[Index] ( identifier[values] ,** identifier[attributes] ) | def _shallow_copy_with_infer(self, values, **kwargs):
"""
Create a new Index inferring the class with passed value, don't copy
the data, use the same object attributes with passed in attributes
taking precedence.
*this is an internal non-public method*
Parameters
----------
values : the values to create the new Index, optional
kwargs : updates the default attributes for this Index
"""
attributes = self._get_attributes_dict()
attributes.update(kwargs)
attributes['copy'] = False
if not len(values) and 'dtype' not in kwargs:
attributes['dtype'] = self.dtype # depends on [control=['if'], data=[]]
if self._infer_as_myclass:
try:
return self._constructor(values, **attributes) # depends on [control=['try'], data=[]]
except (TypeError, ValueError):
pass # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
return Index(values, **attributes) |
def remove(mod, persist=False, comment=True):
'''
Remove the specified kernel module
mod
Name of module to remove
persist
Also remove module from /boot/loader.conf
comment
If persist is set don't remove line from /boot/loader.conf but only
comment it
CLI Example:
.. code-block:: bash
salt '*' kmod.remove vmm
'''
pre_mods = lsmod()
res = __salt__['cmd.run_all']('kldunload {0}'.format(mod),
python_shell=False)
if res['retcode'] == 0:
post_mods = lsmod()
mods = _rm_mods(pre_mods, post_mods)
persist_mods = set()
if persist:
persist_mods = _remove_persistent_module(mod, comment)
return sorted(list(mods | persist_mods))
else:
return 'Error removing module {0}: {1}'.format(mod, res['stderr']) | def function[remove, parameter[mod, persist, comment]]:
constant[
Remove the specified kernel module
mod
Name of module to remove
persist
Also remove module from /boot/loader.conf
comment
If persist is set don't remove line from /boot/loader.conf but only
comment it
CLI Example:
.. code-block:: bash
salt '*' kmod.remove vmm
]
variable[pre_mods] assign[=] call[name[lsmod], parameter[]]
variable[res] assign[=] call[call[name[__salt__]][constant[cmd.run_all]], parameter[call[constant[kldunload {0}].format, parameter[name[mod]]]]]
if compare[call[name[res]][constant[retcode]] equal[==] constant[0]] begin[:]
variable[post_mods] assign[=] call[name[lsmod], parameter[]]
variable[mods] assign[=] call[name[_rm_mods], parameter[name[pre_mods], name[post_mods]]]
variable[persist_mods] assign[=] call[name[set], parameter[]]
if name[persist] begin[:]
variable[persist_mods] assign[=] call[name[_remove_persistent_module], parameter[name[mod], name[comment]]]
return[call[name[sorted], parameter[call[name[list], parameter[binary_operation[name[mods] <ast.BitOr object at 0x7da2590d6aa0> name[persist_mods]]]]]]] | keyword[def] identifier[remove] ( identifier[mod] , identifier[persist] = keyword[False] , identifier[comment] = keyword[True] ):
literal[string]
identifier[pre_mods] = identifier[lsmod] ()
identifier[res] = identifier[__salt__] [ literal[string] ]( literal[string] . identifier[format] ( identifier[mod] ),
identifier[python_shell] = keyword[False] )
keyword[if] identifier[res] [ literal[string] ]== literal[int] :
identifier[post_mods] = identifier[lsmod] ()
identifier[mods] = identifier[_rm_mods] ( identifier[pre_mods] , identifier[post_mods] )
identifier[persist_mods] = identifier[set] ()
keyword[if] identifier[persist] :
identifier[persist_mods] = identifier[_remove_persistent_module] ( identifier[mod] , identifier[comment] )
keyword[return] identifier[sorted] ( identifier[list] ( identifier[mods] | identifier[persist_mods] ))
keyword[else] :
keyword[return] literal[string] . identifier[format] ( identifier[mod] , identifier[res] [ literal[string] ]) | def remove(mod, persist=False, comment=True):
"""
Remove the specified kernel module
mod
Name of module to remove
persist
Also remove module from /boot/loader.conf
comment
If persist is set don't remove line from /boot/loader.conf but only
comment it
CLI Example:
.. code-block:: bash
salt '*' kmod.remove vmm
"""
pre_mods = lsmod()
res = __salt__['cmd.run_all']('kldunload {0}'.format(mod), python_shell=False)
if res['retcode'] == 0:
post_mods = lsmod()
mods = _rm_mods(pre_mods, post_mods)
persist_mods = set()
if persist:
persist_mods = _remove_persistent_module(mod, comment) # depends on [control=['if'], data=[]]
return sorted(list(mods | persist_mods)) # depends on [control=['if'], data=[]]
else:
return 'Error removing module {0}: {1}'.format(mod, res['stderr']) |
def candidates(word):
"Generate possible spelling corrections for word."
return (known([word]) or known(edits1(word)) or known(edits2(word)) or [word]) | def function[candidates, parameter[word]]:
constant[Generate possible spelling corrections for word.]
return[<ast.BoolOp object at 0x7da20c6aba30>] | keyword[def] identifier[candidates] ( identifier[word] ):
literal[string]
keyword[return] ( identifier[known] ([ identifier[word] ]) keyword[or] identifier[known] ( identifier[edits1] ( identifier[word] )) keyword[or] identifier[known] ( identifier[edits2] ( identifier[word] )) keyword[or] [ identifier[word] ]) | def candidates(word):
"""Generate possible spelling corrections for word."""
return known([word]) or known(edits1(word)) or known(edits2(word)) or [word] |
def tolist(obj, flat=True, split=True):
'''
Returns `obj` as a list: if it is falsy, returns an empty list; if
it is a string and `split` is truthy, then it is split into
substrings using Unix shell semantics; if it is sequence-like, a
list is returned optionally flattened if `flat` is truthy (see
:func:`flatten`).
'''
# todo: it would be "pretty awesome" if this could auto-detect
# comma-separation rather than space-separation
if not obj:
return []
if isseq(obj):
return flatten(obj) if flat else list(obj)
if isstr(obj) and split:
return shlex.split(obj)
return [obj] | def function[tolist, parameter[obj, flat, split]]:
constant[
Returns `obj` as a list: if it is falsy, returns an empty list; if
it is a string and `split` is truthy, then it is split into
substrings using Unix shell semantics; if it is sequence-like, a
list is returned optionally flattened if `flat` is truthy (see
:func:`flatten`).
]
if <ast.UnaryOp object at 0x7da1b0aa3610> begin[:]
return[list[[]]]
if call[name[isseq], parameter[name[obj]]] begin[:]
return[<ast.IfExp object at 0x7da1b0aa35b0>]
if <ast.BoolOp object at 0x7da1b0aa3b50> begin[:]
return[call[name[shlex].split, parameter[name[obj]]]]
return[list[[<ast.Name object at 0x7da1b0aa2ec0>]]] | keyword[def] identifier[tolist] ( identifier[obj] , identifier[flat] = keyword[True] , identifier[split] = keyword[True] ):
literal[string]
keyword[if] keyword[not] identifier[obj] :
keyword[return] []
keyword[if] identifier[isseq] ( identifier[obj] ):
keyword[return] identifier[flatten] ( identifier[obj] ) keyword[if] identifier[flat] keyword[else] identifier[list] ( identifier[obj] )
keyword[if] identifier[isstr] ( identifier[obj] ) keyword[and] identifier[split] :
keyword[return] identifier[shlex] . identifier[split] ( identifier[obj] )
keyword[return] [ identifier[obj] ] | def tolist(obj, flat=True, split=True):
"""
Returns `obj` as a list: if it is falsy, returns an empty list; if
it is a string and `split` is truthy, then it is split into
substrings using Unix shell semantics; if it is sequence-like, a
list is returned optionally flattened if `flat` is truthy (see
:func:`flatten`).
"""
# todo: it would be "pretty awesome" if this could auto-detect
# comma-separation rather than space-separation
if not obj:
return [] # depends on [control=['if'], data=[]]
if isseq(obj):
return flatten(obj) if flat else list(obj) # depends on [control=['if'], data=[]]
if isstr(obj) and split:
return shlex.split(obj) # depends on [control=['if'], data=[]]
return [obj] |
def call(self, obj, method, *args, **selectors):
"""
This keyword can use object method from original python uiautomator
See more details from https://github.com/xiaocong/uiautomator
Example:
| ${accessibility_text} | Get Object | text=Accessibility | # Get the UI object |
| Call | ${accessibility_text} | click | # Call the method of the UI object 'click' |
"""
func = getattr(obj, method)
return func(**selectors) | def function[call, parameter[self, obj, method]]:
constant[
This keyword can use object method from original python uiautomator
See more details from https://github.com/xiaocong/uiautomator
Example:
| ${accessibility_text} | Get Object | text=Accessibility | # Get the UI object |
| Call | ${accessibility_text} | click | # Call the method of the UI object 'click' |
]
variable[func] assign[=] call[name[getattr], parameter[name[obj], name[method]]]
return[call[name[func], parameter[]]] | keyword[def] identifier[call] ( identifier[self] , identifier[obj] , identifier[method] ,* identifier[args] ,** identifier[selectors] ):
literal[string]
identifier[func] = identifier[getattr] ( identifier[obj] , identifier[method] )
keyword[return] identifier[func] (** identifier[selectors] ) | def call(self, obj, method, *args, **selectors):
"""
This keyword can use object method from original python uiautomator
See more details from https://github.com/xiaocong/uiautomator
Example:
| ${accessibility_text} | Get Object | text=Accessibility | # Get the UI object |
| Call | ${accessibility_text} | click | # Call the method of the UI object 'click' |
"""
func = getattr(obj, method)
return func(**selectors) |
def get_file_mode_for_reading(context):
"""Get file mode for reading from tar['format'].
This should return r:*, r:gz, r:bz2 or r:xz. If user specified something
wacky in tar.Format, that's their business.
In theory r:* will auto-deduce the correct format.
"""
format = context['tar'].get('format', None)
if format or format == '':
mode = f"r:{context.get_formatted_string(format)}"
else:
mode = 'r:*'
return mode | def function[get_file_mode_for_reading, parameter[context]]:
constant[Get file mode for reading from tar['format'].
This should return r:*, r:gz, r:bz2 or r:xz. If user specified something
wacky in tar.Format, that's their business.
In theory r:* will auto-deduce the correct format.
]
variable[format] assign[=] call[call[name[context]][constant[tar]].get, parameter[constant[format], constant[None]]]
if <ast.BoolOp object at 0x7da18eb56fb0> begin[:]
variable[mode] assign[=] <ast.JoinedStr object at 0x7da18eb558d0>
return[name[mode]] | keyword[def] identifier[get_file_mode_for_reading] ( identifier[context] ):
literal[string]
identifier[format] = identifier[context] [ literal[string] ]. identifier[get] ( literal[string] , keyword[None] )
keyword[if] identifier[format] keyword[or] identifier[format] == literal[string] :
identifier[mode] = literal[string]
keyword[else] :
identifier[mode] = literal[string]
keyword[return] identifier[mode] | def get_file_mode_for_reading(context):
"""Get file mode for reading from tar['format'].
This should return r:*, r:gz, r:bz2 or r:xz. If user specified something
wacky in tar.Format, that's their business.
In theory r:* will auto-deduce the correct format.
"""
format = context['tar'].get('format', None)
if format or format == '':
mode = f'r:{context.get_formatted_string(format)}' # depends on [control=['if'], data=[]]
else:
mode = 'r:*'
return mode |
def parse(self, input):
"""Passes input to each QueryLineHandler in use"""
query = None
for handler in self._line_handlers:
try:
query = handler.handle(input)
except Exception as e:
query = None
finally:
if query is not None:
return query
return None | def function[parse, parameter[self, input]]:
constant[Passes input to each QueryLineHandler in use]
variable[query] assign[=] constant[None]
for taget[name[handler]] in starred[name[self]._line_handlers] begin[:]
<ast.Try object at 0x7da1b0479060>
return[constant[None]] | keyword[def] identifier[parse] ( identifier[self] , identifier[input] ):
literal[string]
identifier[query] = keyword[None]
keyword[for] identifier[handler] keyword[in] identifier[self] . identifier[_line_handlers] :
keyword[try] :
identifier[query] = identifier[handler] . identifier[handle] ( identifier[input] )
keyword[except] identifier[Exception] keyword[as] identifier[e] :
identifier[query] = keyword[None]
keyword[finally] :
keyword[if] identifier[query] keyword[is] keyword[not] keyword[None] :
keyword[return] identifier[query]
keyword[return] keyword[None] | def parse(self, input):
"""Passes input to each QueryLineHandler in use"""
query = None
for handler in self._line_handlers:
try:
query = handler.handle(input) # depends on [control=['try'], data=[]]
except Exception as e:
query = None # depends on [control=['except'], data=[]]
finally:
if query is not None:
return query # depends on [control=['if'], data=['query']] # depends on [control=['for'], data=['handler']]
return None |
def _insertSegment(self, index=None, type=None, points=None,
smooth=False, **kwargs):
"""
Subclasses may override this method.
"""
onCurve = points[-1]
offCurve = points[:-1]
segments = self.segments
ptCount = sum([len(segments[s].points) for s in range(index)]) + 1
self.insertPoint(ptCount, onCurve, type=type, smooth=smooth)
for offCurvePoint in reversed(offCurve):
self.insertPoint(ptCount, offCurvePoint, type="offcurve") | def function[_insertSegment, parameter[self, index, type, points, smooth]]:
constant[
Subclasses may override this method.
]
variable[onCurve] assign[=] call[name[points]][<ast.UnaryOp object at 0x7da20c76f6a0>]
variable[offCurve] assign[=] call[name[points]][<ast.Slice object at 0x7da20c76f730>]
variable[segments] assign[=] name[self].segments
variable[ptCount] assign[=] binary_operation[call[name[sum], parameter[<ast.ListComp object at 0x7da20c9925c0>]] + constant[1]]
call[name[self].insertPoint, parameter[name[ptCount], name[onCurve]]]
for taget[name[offCurvePoint]] in starred[call[name[reversed], parameter[name[offCurve]]]] begin[:]
call[name[self].insertPoint, parameter[name[ptCount], name[offCurvePoint]]] | keyword[def] identifier[_insertSegment] ( identifier[self] , identifier[index] = keyword[None] , identifier[type] = keyword[None] , identifier[points] = keyword[None] ,
identifier[smooth] = keyword[False] ,** identifier[kwargs] ):
literal[string]
identifier[onCurve] = identifier[points] [- literal[int] ]
identifier[offCurve] = identifier[points] [:- literal[int] ]
identifier[segments] = identifier[self] . identifier[segments]
identifier[ptCount] = identifier[sum] ([ identifier[len] ( identifier[segments] [ identifier[s] ]. identifier[points] ) keyword[for] identifier[s] keyword[in] identifier[range] ( identifier[index] )])+ literal[int]
identifier[self] . identifier[insertPoint] ( identifier[ptCount] , identifier[onCurve] , identifier[type] = identifier[type] , identifier[smooth] = identifier[smooth] )
keyword[for] identifier[offCurvePoint] keyword[in] identifier[reversed] ( identifier[offCurve] ):
identifier[self] . identifier[insertPoint] ( identifier[ptCount] , identifier[offCurvePoint] , identifier[type] = literal[string] ) | def _insertSegment(self, index=None, type=None, points=None, smooth=False, **kwargs):
"""
Subclasses may override this method.
"""
onCurve = points[-1]
offCurve = points[:-1]
segments = self.segments
ptCount = sum([len(segments[s].points) for s in range(index)]) + 1
self.insertPoint(ptCount, onCurve, type=type, smooth=smooth)
for offCurvePoint in reversed(offCurve):
self.insertPoint(ptCount, offCurvePoint, type='offcurve') # depends on [control=['for'], data=['offCurvePoint']] |
def create_label_map(self, inplace=True):
"""
Creates mapping df based on ann_label_table and self.custom_labels.
Table composed of entire WFDB standard annotation table, overwritten/appended
with custom_labels if any. Sets __label_map__ attribute, or returns value.
"""
label_map = ann_label_table.copy()
if self.custom_labels is not None:
self.standardize_custom_labels()
for i in self.custom_labels.index:
label_map.loc[i] = self.custom_labels.loc[i]
if inplace:
self.__label_map__ = label_map
else:
return label_map | def function[create_label_map, parameter[self, inplace]]:
constant[
Creates mapping df based on ann_label_table and self.custom_labels.
Table composed of entire WFDB standard annotation table, overwritten/appended
with custom_labels if any. Sets __label_map__ attribute, or returns value.
]
variable[label_map] assign[=] call[name[ann_label_table].copy, parameter[]]
if compare[name[self].custom_labels is_not constant[None]] begin[:]
call[name[self].standardize_custom_labels, parameter[]]
for taget[name[i]] in starred[name[self].custom_labels.index] begin[:]
call[name[label_map].loc][name[i]] assign[=] call[name[self].custom_labels.loc][name[i]]
if name[inplace] begin[:]
name[self].__label_map__ assign[=] name[label_map] | keyword[def] identifier[create_label_map] ( identifier[self] , identifier[inplace] = keyword[True] ):
literal[string]
identifier[label_map] = identifier[ann_label_table] . identifier[copy] ()
keyword[if] identifier[self] . identifier[custom_labels] keyword[is] keyword[not] keyword[None] :
identifier[self] . identifier[standardize_custom_labels] ()
keyword[for] identifier[i] keyword[in] identifier[self] . identifier[custom_labels] . identifier[index] :
identifier[label_map] . identifier[loc] [ identifier[i] ]= identifier[self] . identifier[custom_labels] . identifier[loc] [ identifier[i] ]
keyword[if] identifier[inplace] :
identifier[self] . identifier[__label_map__] = identifier[label_map]
keyword[else] :
keyword[return] identifier[label_map] | def create_label_map(self, inplace=True):
"""
Creates mapping df based on ann_label_table and self.custom_labels.
Table composed of entire WFDB standard annotation table, overwritten/appended
with custom_labels if any. Sets __label_map__ attribute, or returns value.
"""
label_map = ann_label_table.copy()
if self.custom_labels is not None:
self.standardize_custom_labels()
for i in self.custom_labels.index:
label_map.loc[i] = self.custom_labels.loc[i] # depends on [control=['for'], data=['i']] # depends on [control=['if'], data=[]]
if inplace:
self.__label_map__ = label_map # depends on [control=['if'], data=[]]
else:
return label_map |
def update_order(self, order_id, order_deets):
"""Updates an existing order transaction."""
request = self._put("transactions/orders/" + str(order_id), order_deets)
return self.responder(request) | def function[update_order, parameter[self, order_id, order_deets]]:
constant[Updates an existing order transaction.]
variable[request] assign[=] call[name[self]._put, parameter[binary_operation[constant[transactions/orders/] + call[name[str], parameter[name[order_id]]]], name[order_deets]]]
return[call[name[self].responder, parameter[name[request]]]] | keyword[def] identifier[update_order] ( identifier[self] , identifier[order_id] , identifier[order_deets] ):
literal[string]
identifier[request] = identifier[self] . identifier[_put] ( literal[string] + identifier[str] ( identifier[order_id] ), identifier[order_deets] )
keyword[return] identifier[self] . identifier[responder] ( identifier[request] ) | def update_order(self, order_id, order_deets):
"""Updates an existing order transaction."""
request = self._put('transactions/orders/' + str(order_id), order_deets)
return self.responder(request) |
def get_all_user_policies(user_name, marker=None, max_items=None, region=None, key=None, keyid=None, profile=None):
'''
Get all user policies.
.. versionadded:: 2015.8.0
CLI Example:
.. code-block:: bash
salt myminion boto_iam.get_all_user_policies myuser
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
info = conn.get_all_user_policies(user_name, marker, max_items)
if not info:
return False
_list = info.list_user_policies_response.list_user_policies_result
return _list.policy_names
except boto.exception.BotoServerError as e:
log.debug(e)
log.error('Failed to get policies for user %s.', user_name)
return False | def function[get_all_user_policies, parameter[user_name, marker, max_items, region, key, keyid, profile]]:
constant[
Get all user policies.
.. versionadded:: 2015.8.0
CLI Example:
.. code-block:: bash
salt myminion boto_iam.get_all_user_policies myuser
]
variable[conn] assign[=] call[name[_get_conn], parameter[]]
<ast.Try object at 0x7da1b21a1a50> | keyword[def] identifier[get_all_user_policies] ( identifier[user_name] , identifier[marker] = keyword[None] , identifier[max_items] = keyword[None] , identifier[region] = keyword[None] , identifier[key] = keyword[None] , identifier[keyid] = keyword[None] , identifier[profile] = keyword[None] ):
literal[string]
identifier[conn] = identifier[_get_conn] ( identifier[region] = identifier[region] , identifier[key] = identifier[key] , identifier[keyid] = identifier[keyid] , identifier[profile] = identifier[profile] )
keyword[try] :
identifier[info] = identifier[conn] . identifier[get_all_user_policies] ( identifier[user_name] , identifier[marker] , identifier[max_items] )
keyword[if] keyword[not] identifier[info] :
keyword[return] keyword[False]
identifier[_list] = identifier[info] . identifier[list_user_policies_response] . identifier[list_user_policies_result]
keyword[return] identifier[_list] . identifier[policy_names]
keyword[except] identifier[boto] . identifier[exception] . identifier[BotoServerError] keyword[as] identifier[e] :
identifier[log] . identifier[debug] ( identifier[e] )
identifier[log] . identifier[error] ( literal[string] , identifier[user_name] )
keyword[return] keyword[False] | def get_all_user_policies(user_name, marker=None, max_items=None, region=None, key=None, keyid=None, profile=None):
"""
Get all user policies.
.. versionadded:: 2015.8.0
CLI Example:
.. code-block:: bash
salt myminion boto_iam.get_all_user_policies myuser
"""
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
info = conn.get_all_user_policies(user_name, marker, max_items)
if not info:
return False # depends on [control=['if'], data=[]]
_list = info.list_user_policies_response.list_user_policies_result
return _list.policy_names # depends on [control=['try'], data=[]]
except boto.exception.BotoServerError as e:
log.debug(e)
log.error('Failed to get policies for user %s.', user_name)
return False # depends on [control=['except'], data=['e']] |
def sort_queryset(queryset, request, context=None):
""" Returns a sorted queryset
The context argument is only used in the template tag
"""
sort_by = request.GET.get('sort_by')
if sort_by:
if sort_by in [el.name for el in queryset.model._meta.fields]:
queryset = queryset.order_by(sort_by)
else:
if sort_by in request.session:
sort_by = request.session[sort_by]
try:
queryset = queryset.order_by(sort_by)
except:
raise
# added else to fix a bug when using changelist
# TODO: use less ifs and more standard sorting
elif context is not None:
# sorted ascending
if sort_by[0] != '-':
sort_by = context['cl'].list_display[int(sort_by) - 1]
# sorted descending
else:
sort_by = '-' + context['cl'].list_display[abs(int(sort_by)) - 1]
queryset = queryset.order_by(sort_by)
return queryset | def function[sort_queryset, parameter[queryset, request, context]]:
constant[ Returns a sorted queryset
The context argument is only used in the template tag
]
variable[sort_by] assign[=] call[name[request].GET.get, parameter[constant[sort_by]]]
if name[sort_by] begin[:]
if compare[name[sort_by] in <ast.ListComp object at 0x7da1b0cfee30>] begin[:]
variable[queryset] assign[=] call[name[queryset].order_by, parameter[name[sort_by]]]
return[name[queryset]] | keyword[def] identifier[sort_queryset] ( identifier[queryset] , identifier[request] , identifier[context] = keyword[None] ):
literal[string]
identifier[sort_by] = identifier[request] . identifier[GET] . identifier[get] ( literal[string] )
keyword[if] identifier[sort_by] :
keyword[if] identifier[sort_by] keyword[in] [ identifier[el] . identifier[name] keyword[for] identifier[el] keyword[in] identifier[queryset] . identifier[model] . identifier[_meta] . identifier[fields] ]:
identifier[queryset] = identifier[queryset] . identifier[order_by] ( identifier[sort_by] )
keyword[else] :
keyword[if] identifier[sort_by] keyword[in] identifier[request] . identifier[session] :
identifier[sort_by] = identifier[request] . identifier[session] [ identifier[sort_by] ]
keyword[try] :
identifier[queryset] = identifier[queryset] . identifier[order_by] ( identifier[sort_by] )
keyword[except] :
keyword[raise]
keyword[elif] identifier[context] keyword[is] keyword[not] keyword[None] :
keyword[if] identifier[sort_by] [ literal[int] ]!= literal[string] :
identifier[sort_by] = identifier[context] [ literal[string] ]. identifier[list_display] [ identifier[int] ( identifier[sort_by] )- literal[int] ]
keyword[else] :
identifier[sort_by] = literal[string] + identifier[context] [ literal[string] ]. identifier[list_display] [ identifier[abs] ( identifier[int] ( identifier[sort_by] ))- literal[int] ]
identifier[queryset] = identifier[queryset] . identifier[order_by] ( identifier[sort_by] )
keyword[return] identifier[queryset] | def sort_queryset(queryset, request, context=None):
""" Returns a sorted queryset
The context argument is only used in the template tag
"""
sort_by = request.GET.get('sort_by')
if sort_by:
if sort_by in [el.name for el in queryset.model._meta.fields]:
queryset = queryset.order_by(sort_by) # depends on [control=['if'], data=['sort_by']]
elif sort_by in request.session:
sort_by = request.session[sort_by]
try:
queryset = queryset.order_by(sort_by) # depends on [control=['try'], data=[]]
except:
raise # depends on [control=['except'], data=[]] # depends on [control=['if'], data=['sort_by']]
# added else to fix a bug when using changelist
# TODO: use less ifs and more standard sorting
elif context is not None:
# sorted ascending
if sort_by[0] != '-':
sort_by = context['cl'].list_display[int(sort_by) - 1] # depends on [control=['if'], data=[]]
else:
# sorted descending
sort_by = '-' + context['cl'].list_display[abs(int(sort_by)) - 1]
queryset = queryset.order_by(sort_by) # depends on [control=['if'], data=['context']] # depends on [control=['if'], data=[]]
return queryset |
def clear_errors():
""" Clears the errors register of all Herkulex servos
Args:
none
"""
data = []
data.append(0x0B)
data.append(BROADCAST_ID)
data.append(RAM_WRITE_REQ)
data.append(STATUS_ERROR_RAM)
data.append(BYTE2)
data.append(0x00)
data.append(0x00)
send_data(data) | def function[clear_errors, parameter[]]:
constant[ Clears the errors register of all Herkulex servos
Args:
none
]
variable[data] assign[=] list[[]]
call[name[data].append, parameter[constant[11]]]
call[name[data].append, parameter[name[BROADCAST_ID]]]
call[name[data].append, parameter[name[RAM_WRITE_REQ]]]
call[name[data].append, parameter[name[STATUS_ERROR_RAM]]]
call[name[data].append, parameter[name[BYTE2]]]
call[name[data].append, parameter[constant[0]]]
call[name[data].append, parameter[constant[0]]]
call[name[send_data], parameter[name[data]]] | keyword[def] identifier[clear_errors] ():
literal[string]
identifier[data] =[]
identifier[data] . identifier[append] ( literal[int] )
identifier[data] . identifier[append] ( identifier[BROADCAST_ID] )
identifier[data] . identifier[append] ( identifier[RAM_WRITE_REQ] )
identifier[data] . identifier[append] ( identifier[STATUS_ERROR_RAM] )
identifier[data] . identifier[append] ( identifier[BYTE2] )
identifier[data] . identifier[append] ( literal[int] )
identifier[data] . identifier[append] ( literal[int] )
identifier[send_data] ( identifier[data] ) | def clear_errors():
""" Clears the errors register of all Herkulex servos
Args:
none
"""
data = []
data.append(11)
data.append(BROADCAST_ID)
data.append(RAM_WRITE_REQ)
data.append(STATUS_ERROR_RAM)
data.append(BYTE2)
data.append(0)
data.append(0)
send_data(data) |
def iriref_to_shexj_iriref(self, ref: ShExDocParser.IRIREF) -> ShExJ.IRIREF:
""" IRIREF: '<' (~[\u0000-\u0020=<>\"{}|^`\\] | UCHAR)* '>'
IRI: (PN_CHARS | '!' | ''.' | ':' | '/' | '\\' | '#' | '@' | '%' | '&' | UCHAR)* """
return ShExJ.IRIREF(self.iriref_to_str(ref)) | def function[iriref_to_shexj_iriref, parameter[self, ref]]:
constant[ IRIREF: '<' (~[ - =<>"{}|^`\] | UCHAR)* '>'
IRI: (PN_CHARS | '!' | ''.' | ':' | '/' | '\' | '#' | '@' | '%' | '&' | UCHAR)* ]
return[call[name[ShExJ].IRIREF, parameter[call[name[self].iriref_to_str, parameter[name[ref]]]]]] | keyword[def] identifier[iriref_to_shexj_iriref] ( identifier[self] , identifier[ref] : identifier[ShExDocParser] . identifier[IRIREF] )-> identifier[ShExJ] . identifier[IRIREF] :
literal[string]
keyword[return] identifier[ShExJ] . identifier[IRIREF] ( identifier[self] . identifier[iriref_to_str] ( identifier[ref] )) | def iriref_to_shexj_iriref(self, ref: ShExDocParser.IRIREF) -> ShExJ.IRIREF:
""" IRIREF: '<' (~[\x00- =<>"{}|^`\\] | UCHAR)* '>'
IRI: (PN_CHARS | '!' | ''.' | ':' | '/' | '\\' | '#' | '@' | '%' | '&' | UCHAR)* """
return ShExJ.IRIREF(self.iriref_to_str(ref)) |
def get_option_parser(defaults):
"""Create and return an OptionParser with the given defaults."""
# based on recipe from:
# http://stackoverflow.com/questions/1880404/using-a-file-to-store-optparse-arguments
# process command line parameters
# e.g. skosify yso.owl -o yso-skos.rdf
usage = "Usage: %prog [options] voc1 [voc2 ...]"
parser = optparse.OptionParser(usage=usage)
parser.set_defaults(**defaults)
parser.add_option('-c', '--config', type='string',
help='Read default options '
'and transformation definitions '
'from the given configuration file.')
parser.add_option('-o', '--output', type='string',
help='Output file name. Default is "-" (stdout).')
parser.add_option('-D', '--debug', action="store_true",
help='Show debug output.')
parser.add_option('-d', '--no-debug', dest="debug",
action="store_false", help='Hide debug output.')
parser.add_option('-O', '--log', type='string',
help='Log file name. Default is to use standard error.')
group = optparse.OptionGroup(parser, "Input and Output Options")
group.add_option('-f', '--from-format', type='string',
help='Input format. '
'Default is to detect format '
'based on file extension. '
'Possible values: xml, n3, turtle, nt...')
group.add_option('-F', '--to-format', type='string',
help='Output format. '
'Default is to detect format '
'based on file extension. '
'Possible values: xml, n3, turtle, nt...')
group.add_option('--update-query', type='string',
help='SPARQL update query. '
'This query is executed against the input '
'data before processing it. '
'The value can be either the actual query, '
'or "@filename".')
group.add_option('--construct-query', type='string',
help='SPARQL CONSTRUCT query. '
'This query is executed against the input '
'data and the result graph is used as the '
'actual input. '
'The value can be either the actual query, '
'or "@filename".')
group.add_option('-I', '--infer', action="store_true",
help='Perform RDFS subclass/subproperty inference '
'before transforming input.')
group.add_option('-i', '--no-infer', dest="infer", action="store_false",
help="Don't perform RDFS subclass/subproperty inference "
"before transforming input.")
parser.add_option_group(group)
group = optparse.OptionGroup(
parser, "Concept Scheme and Labelling Options")
group.add_option('-s', '--namespace', type='string',
help='Namespace of vocabulary '
'(usually optional; used to create a ConceptScheme)')
group.add_option('-L', '--label', type='string',
help='Label/title for the vocabulary '
'(usually optional; used to label a ConceptScheme)')
group.add_option('-l', '--default-language', type='string',
help='Language tag to set for labels '
'with no defined language.')
group.add_option('-p', '--preflabel-policy', type='string',
help='Policy for handling multiple prefLabels '
'with the same language tag. '
'Possible values: shortest, longest, all.')
group.add_option('--set-modified', dest="set_modified",
action="store_true",
help='Set modification date on the ConceptScheme')
group.add_option('--no-set-modified', dest="set_modified",
action="store_false",
help="Don't set modification date on the ConceptScheme")
parser.add_option_group(group)
group = optparse.OptionGroup(parser, "Vocabulary Structure Options")
group.add_option('-E', '--mark-top-concepts', action="store_true",
help='Mark top-level concepts in the hierarchy '
'as top concepts (entry points).')
group.add_option('-e', '--no-mark-top-concepts',
dest="mark_top_concepts", action="store_false",
help="Don't mark top-level concepts in the hierarchy "
"as top concepts.")
group.add_option('-N', '--narrower', action="store_true",
help='Include narrower/narrowerGeneric/narrowerPartitive '
'relationships in the output vocabulary.')
group.add_option('-n', '--no-narrower',
dest="narrower", action="store_false",
help="Don't include "
"narrower/narrowerGeneric/narrowerPartitive "
"relationships in the output vocabulary.")
group.add_option('-T', '--transitive', action="store_true",
help='Include transitive hierarchy relationships '
'in the output vocabulary.')
group.add_option('-t', '--no-transitive',
dest="transitive", action="store_false",
help="Don't include transitive hierarchy relationships "
"in the output vocabulary.")
group.add_option('-M', '--enrich-mappings', action="store_true",
help='Perform SKOS enrichments on mapping relationships.')
group.add_option('-m', '--no-enrich-mappings', dest="enrich_mappings",
action="store_false",
help="Don't perform SKOS enrichments "
"on mapping relationships.")
group.add_option('-A', '--aggregates', action="store_true",
help='Keep AggregateConcepts completely '
'in the output vocabulary.')
group.add_option('-a', '--no-aggregates',
dest="aggregates", action="store_false",
help='Remove AggregateConcepts completely '
'from the output vocabulary.')
group.add_option('-R', '--keep-related', action="store_true",
help="Keep skos:related relationships "
"within the same hierarchy.")
group.add_option('-r', '--no-keep-related',
dest="keep_related", action="store_false",
help="Remove skos:related relationships "
"within the same hierarchy.")
group.add_option('-B', '--break-cycles', action="store_true",
help="Break any cycles in the skos:broader hierarchy.")
group.add_option('-b', '--no-break-cycles', dest="break_cycles",
action="store_false",
help="Don't break cycles in the skos:broader hierarchy.")
group.add_option('--eliminate-redundancy', action="store_true",
help="Eliminate hierarchical redundancy in the "
"skos:broader hierarchy.")
group.add_option('--no-eliminate-redundancy', dest="eliminate_redundancy",
action="store_false",
help="Don't eliminate hierarchical redundancy in the "
"skos:broader hierarchy.")
parser.add_option_group(group)
group = optparse.OptionGroup(parser, "Cleanup Options")
group.add_option('--cleanup-classes', action="store_true",
help="Remove definitions of classes with no instances.")
group.add_option('--no-cleanup-classes', dest='cleanup_classes',
action="store_false",
help="Don't remove definitions "
"of classes with no instances.")
group.add_option('--cleanup-properties', action="store_true",
help="Remove definitions of properties "
"which have not been used.")
group.add_option('--no-cleanup-properties', action="store_false",
dest='cleanup_properties',
help="Don't remove definitions of properties "
"which have not been used.")
group.add_option('--cleanup-unreachable', action="store_true",
help="Remove triples which can not be reached "
"by a traversal from the main vocabulary graph.")
group.add_option('--no-cleanup-unreachable', action="store_false",
dest='cleanup_unreachable',
help="Don't remove triples which can not be reached "
"by a traversal from the main vocabulary graph.")
parser.add_option_group(group)
return parser | def function[get_option_parser, parameter[defaults]]:
constant[Create and return an OptionParser with the given defaults.]
variable[usage] assign[=] constant[Usage: %prog [options] voc1 [voc2 ...]]
variable[parser] assign[=] call[name[optparse].OptionParser, parameter[]]
call[name[parser].set_defaults, parameter[]]
call[name[parser].add_option, parameter[constant[-c], constant[--config]]]
call[name[parser].add_option, parameter[constant[-o], constant[--output]]]
call[name[parser].add_option, parameter[constant[-D], constant[--debug]]]
call[name[parser].add_option, parameter[constant[-d], constant[--no-debug]]]
call[name[parser].add_option, parameter[constant[-O], constant[--log]]]
variable[group] assign[=] call[name[optparse].OptionGroup, parameter[name[parser], constant[Input and Output Options]]]
call[name[group].add_option, parameter[constant[-f], constant[--from-format]]]
call[name[group].add_option, parameter[constant[-F], constant[--to-format]]]
call[name[group].add_option, parameter[constant[--update-query]]]
call[name[group].add_option, parameter[constant[--construct-query]]]
call[name[group].add_option, parameter[constant[-I], constant[--infer]]]
call[name[group].add_option, parameter[constant[-i], constant[--no-infer]]]
call[name[parser].add_option_group, parameter[name[group]]]
variable[group] assign[=] call[name[optparse].OptionGroup, parameter[name[parser], constant[Concept Scheme and Labelling Options]]]
call[name[group].add_option, parameter[constant[-s], constant[--namespace]]]
call[name[group].add_option, parameter[constant[-L], constant[--label]]]
call[name[group].add_option, parameter[constant[-l], constant[--default-language]]]
call[name[group].add_option, parameter[constant[-p], constant[--preflabel-policy]]]
call[name[group].add_option, parameter[constant[--set-modified]]]
call[name[group].add_option, parameter[constant[--no-set-modified]]]
call[name[parser].add_option_group, parameter[name[group]]]
variable[group] assign[=] call[name[optparse].OptionGroup, parameter[name[parser], constant[Vocabulary Structure Options]]]
call[name[group].add_option, parameter[constant[-E], constant[--mark-top-concepts]]]
call[name[group].add_option, parameter[constant[-e], constant[--no-mark-top-concepts]]]
call[name[group].add_option, parameter[constant[-N], constant[--narrower]]]
call[name[group].add_option, parameter[constant[-n], constant[--no-narrower]]]
call[name[group].add_option, parameter[constant[-T], constant[--transitive]]]
call[name[group].add_option, parameter[constant[-t], constant[--no-transitive]]]
call[name[group].add_option, parameter[constant[-M], constant[--enrich-mappings]]]
call[name[group].add_option, parameter[constant[-m], constant[--no-enrich-mappings]]]
call[name[group].add_option, parameter[constant[-A], constant[--aggregates]]]
call[name[group].add_option, parameter[constant[-a], constant[--no-aggregates]]]
call[name[group].add_option, parameter[constant[-R], constant[--keep-related]]]
call[name[group].add_option, parameter[constant[-r], constant[--no-keep-related]]]
call[name[group].add_option, parameter[constant[-B], constant[--break-cycles]]]
call[name[group].add_option, parameter[constant[-b], constant[--no-break-cycles]]]
call[name[group].add_option, parameter[constant[--eliminate-redundancy]]]
call[name[group].add_option, parameter[constant[--no-eliminate-redundancy]]]
call[name[parser].add_option_group, parameter[name[group]]]
variable[group] assign[=] call[name[optparse].OptionGroup, parameter[name[parser], constant[Cleanup Options]]]
call[name[group].add_option, parameter[constant[--cleanup-classes]]]
call[name[group].add_option, parameter[constant[--no-cleanup-classes]]]
call[name[group].add_option, parameter[constant[--cleanup-properties]]]
call[name[group].add_option, parameter[constant[--no-cleanup-properties]]]
call[name[group].add_option, parameter[constant[--cleanup-unreachable]]]
call[name[group].add_option, parameter[constant[--no-cleanup-unreachable]]]
call[name[parser].add_option_group, parameter[name[group]]]
return[name[parser]] | keyword[def] identifier[get_option_parser] ( identifier[defaults] ):
literal[string]
identifier[usage] = literal[string]
identifier[parser] = identifier[optparse] . identifier[OptionParser] ( identifier[usage] = identifier[usage] )
identifier[parser] . identifier[set_defaults] (** identifier[defaults] )
identifier[parser] . identifier[add_option] ( literal[string] , literal[string] , identifier[type] = literal[string] ,
identifier[help] = literal[string]
literal[string]
literal[string] )
identifier[parser] . identifier[add_option] ( literal[string] , literal[string] , identifier[type] = literal[string] ,
identifier[help] = literal[string] )
identifier[parser] . identifier[add_option] ( literal[string] , literal[string] , identifier[action] = literal[string] ,
identifier[help] = literal[string] )
identifier[parser] . identifier[add_option] ( literal[string] , literal[string] , identifier[dest] = literal[string] ,
identifier[action] = literal[string] , identifier[help] = literal[string] )
identifier[parser] . identifier[add_option] ( literal[string] , literal[string] , identifier[type] = literal[string] ,
identifier[help] = literal[string] )
identifier[group] = identifier[optparse] . identifier[OptionGroup] ( identifier[parser] , literal[string] )
identifier[group] . identifier[add_option] ( literal[string] , literal[string] , identifier[type] = literal[string] ,
identifier[help] = literal[string]
literal[string]
literal[string]
literal[string] )
identifier[group] . identifier[add_option] ( literal[string] , literal[string] , identifier[type] = literal[string] ,
identifier[help] = literal[string]
literal[string]
literal[string]
literal[string] )
identifier[group] . identifier[add_option] ( literal[string] , identifier[type] = literal[string] ,
identifier[help] = literal[string]
literal[string]
literal[string]
literal[string]
literal[string] )
identifier[group] . identifier[add_option] ( literal[string] , identifier[type] = literal[string] ,
identifier[help] = literal[string]
literal[string]
literal[string]
literal[string]
literal[string]
literal[string] )
identifier[group] . identifier[add_option] ( literal[string] , literal[string] , identifier[action] = literal[string] ,
identifier[help] = literal[string]
literal[string] )
identifier[group] . identifier[add_option] ( literal[string] , literal[string] , identifier[dest] = literal[string] , identifier[action] = literal[string] ,
identifier[help] = literal[string]
literal[string] )
identifier[parser] . identifier[add_option_group] ( identifier[group] )
identifier[group] = identifier[optparse] . identifier[OptionGroup] (
identifier[parser] , literal[string] )
identifier[group] . identifier[add_option] ( literal[string] , literal[string] , identifier[type] = literal[string] ,
identifier[help] = literal[string]
literal[string] )
identifier[group] . identifier[add_option] ( literal[string] , literal[string] , identifier[type] = literal[string] ,
identifier[help] = literal[string]
literal[string] )
identifier[group] . identifier[add_option] ( literal[string] , literal[string] , identifier[type] = literal[string] ,
identifier[help] = literal[string]
literal[string] )
identifier[group] . identifier[add_option] ( literal[string] , literal[string] , identifier[type] = literal[string] ,
identifier[help] = literal[string]
literal[string]
literal[string] )
identifier[group] . identifier[add_option] ( literal[string] , identifier[dest] = literal[string] ,
identifier[action] = literal[string] ,
identifier[help] = literal[string] )
identifier[group] . identifier[add_option] ( literal[string] , identifier[dest] = literal[string] ,
identifier[action] = literal[string] ,
identifier[help] = literal[string] )
identifier[parser] . identifier[add_option_group] ( identifier[group] )
identifier[group] = identifier[optparse] . identifier[OptionGroup] ( identifier[parser] , literal[string] )
identifier[group] . identifier[add_option] ( literal[string] , literal[string] , identifier[action] = literal[string] ,
identifier[help] = literal[string]
literal[string] )
identifier[group] . identifier[add_option] ( literal[string] , literal[string] ,
identifier[dest] = literal[string] , identifier[action] = literal[string] ,
identifier[help] = literal[string]
literal[string] )
identifier[group] . identifier[add_option] ( literal[string] , literal[string] , identifier[action] = literal[string] ,
identifier[help] = literal[string]
literal[string] )
identifier[group] . identifier[add_option] ( literal[string] , literal[string] ,
identifier[dest] = literal[string] , identifier[action] = literal[string] ,
identifier[help] = literal[string]
literal[string]
literal[string] )
identifier[group] . identifier[add_option] ( literal[string] , literal[string] , identifier[action] = literal[string] ,
identifier[help] = literal[string]
literal[string] )
identifier[group] . identifier[add_option] ( literal[string] , literal[string] ,
identifier[dest] = literal[string] , identifier[action] = literal[string] ,
identifier[help] = literal[string]
literal[string] )
identifier[group] . identifier[add_option] ( literal[string] , literal[string] , identifier[action] = literal[string] ,
identifier[help] = literal[string] )
identifier[group] . identifier[add_option] ( literal[string] , literal[string] , identifier[dest] = literal[string] ,
identifier[action] = literal[string] ,
identifier[help] = literal[string]
literal[string] )
identifier[group] . identifier[add_option] ( literal[string] , literal[string] , identifier[action] = literal[string] ,
identifier[help] = literal[string]
literal[string] )
identifier[group] . identifier[add_option] ( literal[string] , literal[string] ,
identifier[dest] = literal[string] , identifier[action] = literal[string] ,
identifier[help] = literal[string]
literal[string] )
identifier[group] . identifier[add_option] ( literal[string] , literal[string] , identifier[action] = literal[string] ,
identifier[help] = literal[string]
literal[string] )
identifier[group] . identifier[add_option] ( literal[string] , literal[string] ,
identifier[dest] = literal[string] , identifier[action] = literal[string] ,
identifier[help] = literal[string]
literal[string] )
identifier[group] . identifier[add_option] ( literal[string] , literal[string] , identifier[action] = literal[string] ,
identifier[help] = literal[string] )
identifier[group] . identifier[add_option] ( literal[string] , literal[string] , identifier[dest] = literal[string] ,
identifier[action] = literal[string] ,
identifier[help] = literal[string] )
identifier[group] . identifier[add_option] ( literal[string] , identifier[action] = literal[string] ,
identifier[help] = literal[string]
literal[string] )
identifier[group] . identifier[add_option] ( literal[string] , identifier[dest] = literal[string] ,
identifier[action] = literal[string] ,
identifier[help] = literal[string]
literal[string] )
identifier[parser] . identifier[add_option_group] ( identifier[group] )
identifier[group] = identifier[optparse] . identifier[OptionGroup] ( identifier[parser] , literal[string] )
identifier[group] . identifier[add_option] ( literal[string] , identifier[action] = literal[string] ,
identifier[help] = literal[string] )
identifier[group] . identifier[add_option] ( literal[string] , identifier[dest] = literal[string] ,
identifier[action] = literal[string] ,
identifier[help] = literal[string]
literal[string] )
identifier[group] . identifier[add_option] ( literal[string] , identifier[action] = literal[string] ,
identifier[help] = literal[string]
literal[string] )
identifier[group] . identifier[add_option] ( literal[string] , identifier[action] = literal[string] ,
identifier[dest] = literal[string] ,
identifier[help] = literal[string]
literal[string] )
identifier[group] . identifier[add_option] ( literal[string] , identifier[action] = literal[string] ,
identifier[help] = literal[string]
literal[string] )
identifier[group] . identifier[add_option] ( literal[string] , identifier[action] = literal[string] ,
identifier[dest] = literal[string] ,
identifier[help] = literal[string]
literal[string] )
identifier[parser] . identifier[add_option_group] ( identifier[group] )
keyword[return] identifier[parser] | def get_option_parser(defaults):
"""Create and return an OptionParser with the given defaults."""
# based on recipe from:
# http://stackoverflow.com/questions/1880404/using-a-file-to-store-optparse-arguments
# process command line parameters
# e.g. skosify yso.owl -o yso-skos.rdf
usage = 'Usage: %prog [options] voc1 [voc2 ...]'
parser = optparse.OptionParser(usage=usage)
parser.set_defaults(**defaults)
parser.add_option('-c', '--config', type='string', help='Read default options and transformation definitions from the given configuration file.')
parser.add_option('-o', '--output', type='string', help='Output file name. Default is "-" (stdout).')
parser.add_option('-D', '--debug', action='store_true', help='Show debug output.')
parser.add_option('-d', '--no-debug', dest='debug', action='store_false', help='Hide debug output.')
parser.add_option('-O', '--log', type='string', help='Log file name. Default is to use standard error.')
group = optparse.OptionGroup(parser, 'Input and Output Options')
group.add_option('-f', '--from-format', type='string', help='Input format. Default is to detect format based on file extension. Possible values: xml, n3, turtle, nt...')
group.add_option('-F', '--to-format', type='string', help='Output format. Default is to detect format based on file extension. Possible values: xml, n3, turtle, nt...')
group.add_option('--update-query', type='string', help='SPARQL update query. This query is executed against the input data before processing it. The value can be either the actual query, or "@filename".')
group.add_option('--construct-query', type='string', help='SPARQL CONSTRUCT query. This query is executed against the input data and the result graph is used as the actual input. The value can be either the actual query, or "@filename".')
group.add_option('-I', '--infer', action='store_true', help='Perform RDFS subclass/subproperty inference before transforming input.')
group.add_option('-i', '--no-infer', dest='infer', action='store_false', help="Don't perform RDFS subclass/subproperty inference before transforming input.")
parser.add_option_group(group)
group = optparse.OptionGroup(parser, 'Concept Scheme and Labelling Options')
group.add_option('-s', '--namespace', type='string', help='Namespace of vocabulary (usually optional; used to create a ConceptScheme)')
group.add_option('-L', '--label', type='string', help='Label/title for the vocabulary (usually optional; used to label a ConceptScheme)')
group.add_option('-l', '--default-language', type='string', help='Language tag to set for labels with no defined language.')
group.add_option('-p', '--preflabel-policy', type='string', help='Policy for handling multiple prefLabels with the same language tag. Possible values: shortest, longest, all.')
group.add_option('--set-modified', dest='set_modified', action='store_true', help='Set modification date on the ConceptScheme')
group.add_option('--no-set-modified', dest='set_modified', action='store_false', help="Don't set modification date on the ConceptScheme")
parser.add_option_group(group)
group = optparse.OptionGroup(parser, 'Vocabulary Structure Options')
group.add_option('-E', '--mark-top-concepts', action='store_true', help='Mark top-level concepts in the hierarchy as top concepts (entry points).')
group.add_option('-e', '--no-mark-top-concepts', dest='mark_top_concepts', action='store_false', help="Don't mark top-level concepts in the hierarchy as top concepts.")
group.add_option('-N', '--narrower', action='store_true', help='Include narrower/narrowerGeneric/narrowerPartitive relationships in the output vocabulary.')
group.add_option('-n', '--no-narrower', dest='narrower', action='store_false', help="Don't include narrower/narrowerGeneric/narrowerPartitive relationships in the output vocabulary.")
group.add_option('-T', '--transitive', action='store_true', help='Include transitive hierarchy relationships in the output vocabulary.')
group.add_option('-t', '--no-transitive', dest='transitive', action='store_false', help="Don't include transitive hierarchy relationships in the output vocabulary.")
group.add_option('-M', '--enrich-mappings', action='store_true', help='Perform SKOS enrichments on mapping relationships.')
group.add_option('-m', '--no-enrich-mappings', dest='enrich_mappings', action='store_false', help="Don't perform SKOS enrichments on mapping relationships.")
group.add_option('-A', '--aggregates', action='store_true', help='Keep AggregateConcepts completely in the output vocabulary.')
group.add_option('-a', '--no-aggregates', dest='aggregates', action='store_false', help='Remove AggregateConcepts completely from the output vocabulary.')
group.add_option('-R', '--keep-related', action='store_true', help='Keep skos:related relationships within the same hierarchy.')
group.add_option('-r', '--no-keep-related', dest='keep_related', action='store_false', help='Remove skos:related relationships within the same hierarchy.')
group.add_option('-B', '--break-cycles', action='store_true', help='Break any cycles in the skos:broader hierarchy.')
group.add_option('-b', '--no-break-cycles', dest='break_cycles', action='store_false', help="Don't break cycles in the skos:broader hierarchy.")
group.add_option('--eliminate-redundancy', action='store_true', help='Eliminate hierarchical redundancy in the skos:broader hierarchy.')
group.add_option('--no-eliminate-redundancy', dest='eliminate_redundancy', action='store_false', help="Don't eliminate hierarchical redundancy in the skos:broader hierarchy.")
parser.add_option_group(group)
group = optparse.OptionGroup(parser, 'Cleanup Options')
group.add_option('--cleanup-classes', action='store_true', help='Remove definitions of classes with no instances.')
group.add_option('--no-cleanup-classes', dest='cleanup_classes', action='store_false', help="Don't remove definitions of classes with no instances.")
group.add_option('--cleanup-properties', action='store_true', help='Remove definitions of properties which have not been used.')
group.add_option('--no-cleanup-properties', action='store_false', dest='cleanup_properties', help="Don't remove definitions of properties which have not been used.")
group.add_option('--cleanup-unreachable', action='store_true', help='Remove triples which can not be reached by a traversal from the main vocabulary graph.')
group.add_option('--no-cleanup-unreachable', action='store_false', dest='cleanup_unreachable', help="Don't remove triples which can not be reached by a traversal from the main vocabulary graph.")
parser.add_option_group(group)
return parser |
def add_nodes(network_id, nodes,**kwargs):
"""
Add nodes to network
"""
start_time = datetime.datetime.now()
names=[] # used to check uniqueness of node name
for n_i in nodes:
if n_i.name in names:
raise HydraError("Duplicate Node Name: %s"%(n_i.name))
names.append(n_i.name)
user_id = kwargs.get('user_id')
try:
net_i = db.DBSession.query(Network).filter(Network.id == network_id).one()
net_i.check_write_permission(user_id)
except NoResultFound:
raise ResourceNotFoundError("Network %s not found"%(network_id))
_add_nodes_to_database(net_i, nodes)
net_i.project_id=net_i.project_id
db.DBSession.flush()
node_s = db.DBSession.query(Node).filter(Node.network_id==network_id).all()
#Maps temporary node_ids to real node_ids
node_id_map = dict()
iface_nodes = dict()
for n_i in node_s:
iface_nodes[n_i.name] = n_i
for node in nodes:
node_id_map[node.id] = iface_nodes[node.name]
_bulk_add_resource_attrs(network_id, 'NODE', nodes, iface_nodes)
log.info("Nodes added in %s", get_timing(start_time))
return node_s | def function[add_nodes, parameter[network_id, nodes]]:
constant[
Add nodes to network
]
variable[start_time] assign[=] call[name[datetime].datetime.now, parameter[]]
variable[names] assign[=] list[[]]
for taget[name[n_i]] in starred[name[nodes]] begin[:]
if compare[name[n_i].name in name[names]] begin[:]
<ast.Raise object at 0x7da18f00da80>
call[name[names].append, parameter[name[n_i].name]]
variable[user_id] assign[=] call[name[kwargs].get, parameter[constant[user_id]]]
<ast.Try object at 0x7da18f00e7a0>
call[name[_add_nodes_to_database], parameter[name[net_i], name[nodes]]]
name[net_i].project_id assign[=] name[net_i].project_id
call[name[db].DBSession.flush, parameter[]]
variable[node_s] assign[=] call[call[call[name[db].DBSession.query, parameter[name[Node]]].filter, parameter[compare[name[Node].network_id equal[==] name[network_id]]]].all, parameter[]]
variable[node_id_map] assign[=] call[name[dict], parameter[]]
variable[iface_nodes] assign[=] call[name[dict], parameter[]]
for taget[name[n_i]] in starred[name[node_s]] begin[:]
call[name[iface_nodes]][name[n_i].name] assign[=] name[n_i]
for taget[name[node]] in starred[name[nodes]] begin[:]
call[name[node_id_map]][name[node].id] assign[=] call[name[iface_nodes]][name[node].name]
call[name[_bulk_add_resource_attrs], parameter[name[network_id], constant[NODE], name[nodes], name[iface_nodes]]]
call[name[log].info, parameter[constant[Nodes added in %s], call[name[get_timing], parameter[name[start_time]]]]]
return[name[node_s]] | keyword[def] identifier[add_nodes] ( identifier[network_id] , identifier[nodes] ,** identifier[kwargs] ):
literal[string]
identifier[start_time] = identifier[datetime] . identifier[datetime] . identifier[now] ()
identifier[names] =[]
keyword[for] identifier[n_i] keyword[in] identifier[nodes] :
keyword[if] identifier[n_i] . identifier[name] keyword[in] identifier[names] :
keyword[raise] identifier[HydraError] ( literal[string] %( identifier[n_i] . identifier[name] ))
identifier[names] . identifier[append] ( identifier[n_i] . identifier[name] )
identifier[user_id] = identifier[kwargs] . identifier[get] ( literal[string] )
keyword[try] :
identifier[net_i] = identifier[db] . identifier[DBSession] . identifier[query] ( identifier[Network] ). identifier[filter] ( identifier[Network] . identifier[id] == identifier[network_id] ). identifier[one] ()
identifier[net_i] . identifier[check_write_permission] ( identifier[user_id] )
keyword[except] identifier[NoResultFound] :
keyword[raise] identifier[ResourceNotFoundError] ( literal[string] %( identifier[network_id] ))
identifier[_add_nodes_to_database] ( identifier[net_i] , identifier[nodes] )
identifier[net_i] . identifier[project_id] = identifier[net_i] . identifier[project_id]
identifier[db] . identifier[DBSession] . identifier[flush] ()
identifier[node_s] = identifier[db] . identifier[DBSession] . identifier[query] ( identifier[Node] ). identifier[filter] ( identifier[Node] . identifier[network_id] == identifier[network_id] ). identifier[all] ()
identifier[node_id_map] = identifier[dict] ()
identifier[iface_nodes] = identifier[dict] ()
keyword[for] identifier[n_i] keyword[in] identifier[node_s] :
identifier[iface_nodes] [ identifier[n_i] . identifier[name] ]= identifier[n_i]
keyword[for] identifier[node] keyword[in] identifier[nodes] :
identifier[node_id_map] [ identifier[node] . identifier[id] ]= identifier[iface_nodes] [ identifier[node] . identifier[name] ]
identifier[_bulk_add_resource_attrs] ( identifier[network_id] , literal[string] , identifier[nodes] , identifier[iface_nodes] )
identifier[log] . identifier[info] ( literal[string] , identifier[get_timing] ( identifier[start_time] ))
keyword[return] identifier[node_s] | def add_nodes(network_id, nodes, **kwargs):
"""
Add nodes to network
"""
start_time = datetime.datetime.now()
names = [] # used to check uniqueness of node name
for n_i in nodes:
if n_i.name in names:
raise HydraError('Duplicate Node Name: %s' % n_i.name) # depends on [control=['if'], data=[]]
names.append(n_i.name) # depends on [control=['for'], data=['n_i']]
user_id = kwargs.get('user_id')
try:
net_i = db.DBSession.query(Network).filter(Network.id == network_id).one()
net_i.check_write_permission(user_id) # depends on [control=['try'], data=[]]
except NoResultFound:
raise ResourceNotFoundError('Network %s not found' % network_id) # depends on [control=['except'], data=[]]
_add_nodes_to_database(net_i, nodes)
net_i.project_id = net_i.project_id
db.DBSession.flush()
node_s = db.DBSession.query(Node).filter(Node.network_id == network_id).all()
#Maps temporary node_ids to real node_ids
node_id_map = dict()
iface_nodes = dict()
for n_i in node_s:
iface_nodes[n_i.name] = n_i # depends on [control=['for'], data=['n_i']]
for node in nodes:
node_id_map[node.id] = iface_nodes[node.name] # depends on [control=['for'], data=['node']]
_bulk_add_resource_attrs(network_id, 'NODE', nodes, iface_nodes)
log.info('Nodes added in %s', get_timing(start_time))
return node_s |
def hex_to_name(hex_value, spec=u'css3'):
"""
Convert a hexadecimal color value to its corresponding normalized
color name, if any such name exists.
The optional keyword argument ``spec`` determines which
specification's list of color names will be used; valid values are
``html4``, ``css2``, ``css21`` and ``css3``, and the default is
``css3``.
When no color name for the value is found in the given
specification, ``ValueError`` is raised.
"""
if spec not in SUPPORTED_SPECIFICATIONS:
raise ValueError(SPECIFICATION_ERROR_TEMPLATE.format(spec=spec))
normalized = normalize_hex(hex_value)
name = {u'css2': CSS2_HEX_TO_NAMES,
u'css21': CSS21_HEX_TO_NAMES,
u'css3': CSS3_HEX_TO_NAMES,
u'html4': HTML4_HEX_TO_NAMES}[spec].get(normalized)
if name is None:
raise ValueError(
u"'{}' has no defined color name in {}".format(hex_value, spec)
)
return name | def function[hex_to_name, parameter[hex_value, spec]]:
constant[
Convert a hexadecimal color value to its corresponding normalized
color name, if any such name exists.
The optional keyword argument ``spec`` determines which
specification's list of color names will be used; valid values are
``html4``, ``css2``, ``css21`` and ``css3``, and the default is
``css3``.
When no color name for the value is found in the given
specification, ``ValueError`` is raised.
]
if compare[name[spec] <ast.NotIn object at 0x7da2590d7190> name[SUPPORTED_SPECIFICATIONS]] begin[:]
<ast.Raise object at 0x7da1b0ca6710>
variable[normalized] assign[=] call[name[normalize_hex], parameter[name[hex_value]]]
variable[name] assign[=] call[call[dictionary[[<ast.Constant object at 0x7da1b0ca58d0>, <ast.Constant object at 0x7da1b0ca44c0>, <ast.Constant object at 0x7da1b0ca4550>, <ast.Constant object at 0x7da1b0ca44f0>], [<ast.Name object at 0x7da1b0ca5d80>, <ast.Name object at 0x7da1b0ca4be0>, <ast.Name object at 0x7da1b0ca4b80>, <ast.Name object at 0x7da1b0ca7340>]]][name[spec]].get, parameter[name[normalized]]]
if compare[name[name] is constant[None]] begin[:]
<ast.Raise object at 0x7da1b0e2c460>
return[name[name]] | keyword[def] identifier[hex_to_name] ( identifier[hex_value] , identifier[spec] = literal[string] ):
literal[string]
keyword[if] identifier[spec] keyword[not] keyword[in] identifier[SUPPORTED_SPECIFICATIONS] :
keyword[raise] identifier[ValueError] ( identifier[SPECIFICATION_ERROR_TEMPLATE] . identifier[format] ( identifier[spec] = identifier[spec] ))
identifier[normalized] = identifier[normalize_hex] ( identifier[hex_value] )
identifier[name] ={ literal[string] : identifier[CSS2_HEX_TO_NAMES] ,
literal[string] : identifier[CSS21_HEX_TO_NAMES] ,
literal[string] : identifier[CSS3_HEX_TO_NAMES] ,
literal[string] : identifier[HTML4_HEX_TO_NAMES] }[ identifier[spec] ]. identifier[get] ( identifier[normalized] )
keyword[if] identifier[name] keyword[is] keyword[None] :
keyword[raise] identifier[ValueError] (
literal[string] . identifier[format] ( identifier[hex_value] , identifier[spec] )
)
keyword[return] identifier[name] | def hex_to_name(hex_value, spec=u'css3'):
"""
Convert a hexadecimal color value to its corresponding normalized
color name, if any such name exists.
The optional keyword argument ``spec`` determines which
specification's list of color names will be used; valid values are
``html4``, ``css2``, ``css21`` and ``css3``, and the default is
``css3``.
When no color name for the value is found in the given
specification, ``ValueError`` is raised.
"""
if spec not in SUPPORTED_SPECIFICATIONS:
raise ValueError(SPECIFICATION_ERROR_TEMPLATE.format(spec=spec)) # depends on [control=['if'], data=['spec']]
normalized = normalize_hex(hex_value)
name = {u'css2': CSS2_HEX_TO_NAMES, u'css21': CSS21_HEX_TO_NAMES, u'css3': CSS3_HEX_TO_NAMES, u'html4': HTML4_HEX_TO_NAMES}[spec].get(normalized)
if name is None:
raise ValueError(u"'{}' has no defined color name in {}".format(hex_value, spec)) # depends on [control=['if'], data=[]]
return name |
def _bounds(component, glyph_set):
"""Return the (xmin, ymin) of the bounds of `component`."""
if hasattr(component, "bounds"): # e.g. defcon
return component.bounds[:2]
elif hasattr(component, "draw"): # e.g. ufoLib2
pen = fontTools.pens.boundsPen.BoundsPen(glyphSet=glyph_set)
component.draw(pen)
return pen.bounds[:2]
else:
raise ValueError(
"Don't know to to compute the bounds of component '{}' ".format(component)
) | def function[_bounds, parameter[component, glyph_set]]:
constant[Return the (xmin, ymin) of the bounds of `component`.]
if call[name[hasattr], parameter[name[component], constant[bounds]]] begin[:]
return[call[name[component].bounds][<ast.Slice object at 0x7da20c9924a0>]] | keyword[def] identifier[_bounds] ( identifier[component] , identifier[glyph_set] ):
literal[string]
keyword[if] identifier[hasattr] ( identifier[component] , literal[string] ):
keyword[return] identifier[component] . identifier[bounds] [: literal[int] ]
keyword[elif] identifier[hasattr] ( identifier[component] , literal[string] ):
identifier[pen] = identifier[fontTools] . identifier[pens] . identifier[boundsPen] . identifier[BoundsPen] ( identifier[glyphSet] = identifier[glyph_set] )
identifier[component] . identifier[draw] ( identifier[pen] )
keyword[return] identifier[pen] . identifier[bounds] [: literal[int] ]
keyword[else] :
keyword[raise] identifier[ValueError] (
literal[string] . identifier[format] ( identifier[component] )
) | def _bounds(component, glyph_set):
"""Return the (xmin, ymin) of the bounds of `component`."""
if hasattr(component, 'bounds'): # e.g. defcon
return component.bounds[:2] # depends on [control=['if'], data=[]]
elif hasattr(component, 'draw'): # e.g. ufoLib2
pen = fontTools.pens.boundsPen.BoundsPen(glyphSet=glyph_set)
component.draw(pen)
return pen.bounds[:2] # depends on [control=['if'], data=[]]
else:
raise ValueError("Don't know to to compute the bounds of component '{}' ".format(component)) |
def parse(self, valstr):
# type: (bytes) -> None
'''
Parse an El Torito section header from a string.
Parameters:
valstr - The string to parse.
Returns:
Nothing.
'''
if self._initialized:
raise pycdlibexception.PyCdlibInternalError('El Torito Section Header already initialized')
(self.header_indicator, self.platform_id, self.num_section_entries,
self.id_string) = struct.unpack_from(self.FMT, valstr, 0)
self._initialized = True | def function[parse, parameter[self, valstr]]:
constant[
Parse an El Torito section header from a string.
Parameters:
valstr - The string to parse.
Returns:
Nothing.
]
if name[self]._initialized begin[:]
<ast.Raise object at 0x7da1b0f60c40>
<ast.Tuple object at 0x7da1b0f61930> assign[=] call[name[struct].unpack_from, parameter[name[self].FMT, name[valstr], constant[0]]]
name[self]._initialized assign[=] constant[True] | keyword[def] identifier[parse] ( identifier[self] , identifier[valstr] ):
literal[string]
keyword[if] identifier[self] . identifier[_initialized] :
keyword[raise] identifier[pycdlibexception] . identifier[PyCdlibInternalError] ( literal[string] )
( identifier[self] . identifier[header_indicator] , identifier[self] . identifier[platform_id] , identifier[self] . identifier[num_section_entries] ,
identifier[self] . identifier[id_string] )= identifier[struct] . identifier[unpack_from] ( identifier[self] . identifier[FMT] , identifier[valstr] , literal[int] )
identifier[self] . identifier[_initialized] = keyword[True] | def parse(self, valstr):
# type: (bytes) -> None
'\n Parse an El Torito section header from a string.\n\n Parameters:\n valstr - The string to parse.\n Returns:\n Nothing.\n '
if self._initialized:
raise pycdlibexception.PyCdlibInternalError('El Torito Section Header already initialized') # depends on [control=['if'], data=[]]
(self.header_indicator, self.platform_id, self.num_section_entries, self.id_string) = struct.unpack_from(self.FMT, valstr, 0)
self._initialized = True |
def save(self):
"""Save this config to disk.
If the charm is using the :mod:`Services Framework <services.base>`
or :meth:'@hook <Hooks.hook>' decorator, this
is called automatically at the end of successful hook execution.
Otherwise, it should be called directly by user code.
To disable automatic saves, set ``implicit_save=False`` on this
instance.
"""
with open(self.path, 'w') as f:
os.fchmod(f.fileno(), 0o600)
json.dump(self, f) | def function[save, parameter[self]]:
constant[Save this config to disk.
If the charm is using the :mod:`Services Framework <services.base>`
or :meth:'@hook <Hooks.hook>' decorator, this
is called automatically at the end of successful hook execution.
Otherwise, it should be called directly by user code.
To disable automatic saves, set ``implicit_save=False`` on this
instance.
]
with call[name[open], parameter[name[self].path, constant[w]]] begin[:]
call[name[os].fchmod, parameter[call[name[f].fileno, parameter[]], constant[384]]]
call[name[json].dump, parameter[name[self], name[f]]] | keyword[def] identifier[save] ( identifier[self] ):
literal[string]
keyword[with] identifier[open] ( identifier[self] . identifier[path] , literal[string] ) keyword[as] identifier[f] :
identifier[os] . identifier[fchmod] ( identifier[f] . identifier[fileno] (), literal[int] )
identifier[json] . identifier[dump] ( identifier[self] , identifier[f] ) | def save(self):
"""Save this config to disk.
If the charm is using the :mod:`Services Framework <services.base>`
or :meth:'@hook <Hooks.hook>' decorator, this
is called automatically at the end of successful hook execution.
Otherwise, it should be called directly by user code.
To disable automatic saves, set ``implicit_save=False`` on this
instance.
"""
with open(self.path, 'w') as f:
os.fchmod(f.fileno(), 384)
json.dump(self, f) # depends on [control=['with'], data=['f']] |
def render(template='', data={}, partials_path='.', partials_ext='mustache',
partials_dict={}, padding='', def_ldel='{{', def_rdel='}}',
scopes=None):
"""Render a mustache template.
Renders a mustache template with a data scope and partial capability.
Given the file structure...
╷
├─╼ main.py
├─╼ main.ms
└─┮ partials
└── part.ms
then main.py would make the following call:
render(open('main.ms', 'r'), {...}, 'partials', 'ms')
Arguments:
template -- A file-like object or a string containing the template
data -- A python dictionary with your data scope
partials_path -- The path to where your partials are stored
(defaults to '.')
partials_ext -- The extension that you want the parser to look for
(defaults to 'mustache')
partials_dict -- A python dictionary which will be search for partials
before the filesystem is. {'include': 'foo'} is the same
as a file called include.mustache
(defaults to {})
padding -- This is for padding partials, and shouldn't be used
(but can be if you really want to)
def_ldel -- The default left delimiter
("{{" by default, as in spec compliant mustache)
def_rdel -- The default right delimiter
("}}" by default, as in spec compliant mustache)
scopes -- The list of scopes that get_key will look through
Returns:
A string containing the rendered template.
"""
# If the template is a seqeuence but not derived from a string
if isinstance(template, Sequence) and \
not isinstance(template, string_type):
# Then we don't need to tokenize it
# But it does need to be a generator
tokens = (token for token in template)
else:
if template in g_token_cache:
tokens = (token for token in g_token_cache[template])
else:
# Otherwise make a generator
tokens = tokenize(template, def_ldel, def_rdel)
output = unicode('', 'utf-8')
if scopes is None:
scopes = [data]
# Run through the tokens
for tag, key in tokens:
# Set the current scope
current_scope = scopes[0]
# If we're an end tag
if tag == 'end':
# Pop out of the latest scope
del scopes[0]
# If the current scope is falsy and not the only scope
elif not current_scope and len(scopes) != 1:
if tag in ['section', 'inverted section']:
# Set the most recent scope to a falsy value
# (I heard False is a good one)
scopes.insert(0, False)
# If we're a literal tag
elif tag == 'literal':
# Add padding to the key and add it to the output
if not isinstance(key, unicode_type): # python 2
key = unicode(key, 'utf-8')
output += key.replace('\n', '\n' + padding)
# If we're a variable tag
elif tag == 'variable':
# Add the html escaped key to the output
thing = _get_key(key, scopes)
if thing is True and key == '.':
# if we've coerced into a boolean by accident
# (inverted tags do this)
# then get the un-coerced object (next in the stack)
thing = scopes[1]
if not isinstance(thing, unicode_type):
thing = unicode(str(thing), 'utf-8')
output += _html_escape(thing)
# If we're a no html escape tag
elif tag == 'no escape':
# Just lookup the key and add it
thing = _get_key(key, scopes)
if not isinstance(thing, unicode_type):
thing = unicode(str(thing), 'utf-8')
output += thing
# If we're a section tag
elif tag == 'section':
# Get the sections scope
scope = _get_key(key, scopes)
# If the scope is a callable (as described in
# https://mustache.github.io/mustache.5.html)
if isinstance(scope, Callable):
# Generate template text from tags
text = unicode('', 'utf-8')
tags = []
for tag in tokens:
if tag == ('end', key):
break
tags.append(tag)
tag_type, tag_key = tag
if tag_type == 'literal':
text += tag_key
elif tag_type == 'no escape':
text += "%s& %s %s" % (def_ldel, tag_key, def_rdel)
else:
text += "%s%s %s%s" % (def_ldel, {
'commment': '!',
'section': '#',
'inverted section': '^',
'end': '/',
'partial': '>',
'set delimiter': '=',
'no escape': '&',
'variable': ''
}[tag_type], tag_key, def_rdel)
g_token_cache[text] = tags
rend = scope(text, lambda template, data=None: render(template,
data={},
partials_path=partials_path,
partials_ext=partials_ext,
partials_dict=partials_dict,
padding=padding,
def_ldel=def_ldel, def_rdel=def_rdel,
scopes=data and [data]+scopes or scopes))
if python3:
output += rend
else: # python 2
output += rend.decode('utf-8')
# If the scope is a sequence, an iterator or generator but not
# derived from a string
elif isinstance(scope, (Sequence, Iterator)) and \
not isinstance(scope, string_type):
# Then we need to do some looping
# Gather up all the tags inside the section
# (And don't be tricked by nested end tags with the same key)
# TODO: This feels like it still has edge cases, no?
tags = []
tags_with_same_key = 0
for tag in tokens:
if tag == ('section', key):
tags_with_same_key += 1
if tag == ('end', key):
tags_with_same_key -= 1
if tags_with_same_key < 0:
break
tags.append(tag)
# For every item in the scope
for thing in scope:
# Append it as the most recent scope and render
new_scope = [thing] + scopes
rend = render(template=tags, scopes=new_scope,
partials_path=partials_path,
partials_ext=partials_ext,
partials_dict=partials_dict,
def_ldel=def_ldel, def_rdel=def_rdel)
if python3:
output += rend
else: # python 2
output += rend.decode('utf-8')
else:
# Otherwise we're just a scope section
scopes.insert(0, scope)
# If we're an inverted section
elif tag == 'inverted section':
# Add the flipped scope to the scopes
scope = _get_key(key, scopes)
scopes.insert(0, not scope)
# If we're a partial
elif tag == 'partial':
# Load the partial
partial = _get_partial(key, partials_dict,
partials_path, partials_ext)
# Find what to pad the partial with
left = output.split('\n')[-1]
part_padding = padding
if left.isspace():
part_padding += left
# Render the partial
part_out = render(template=partial, partials_path=partials_path,
partials_ext=partials_ext,
partials_dict=partials_dict,
def_ldel=def_ldel, def_rdel=def_rdel,
padding=part_padding, scopes=scopes)
# If the partial was indented
if left.isspace():
# then remove the spaces from the end
part_out = part_out.rstrip(' \t')
# Add the partials output to the ouput
if python3:
output += part_out
else: # python 2
output += part_out.decode('utf-8')
if python3:
return output
else: # python 2
return output.encode('utf-8') | def function[render, parameter[template, data, partials_path, partials_ext, partials_dict, padding, def_ldel, def_rdel, scopes]]:
constant[Render a mustache template.
Renders a mustache template with a data scope and partial capability.
Given the file structure...
╷
├─╼ main.py
├─╼ main.ms
└─┮ partials
└── part.ms
then main.py would make the following call:
render(open('main.ms', 'r'), {...}, 'partials', 'ms')
Arguments:
template -- A file-like object or a string containing the template
data -- A python dictionary with your data scope
partials_path -- The path to where your partials are stored
(defaults to '.')
partials_ext -- The extension that you want the parser to look for
(defaults to 'mustache')
partials_dict -- A python dictionary which will be search for partials
before the filesystem is. {'include': 'foo'} is the same
as a file called include.mustache
(defaults to {})
padding -- This is for padding partials, and shouldn't be used
(but can be if you really want to)
def_ldel -- The default left delimiter
("{{" by default, as in spec compliant mustache)
def_rdel -- The default right delimiter
("}}" by default, as in spec compliant mustache)
scopes -- The list of scopes that get_key will look through
Returns:
A string containing the rendered template.
]
if <ast.BoolOp object at 0x7da1b18aafb0> begin[:]
variable[tokens] assign[=] <ast.GeneratorExp object at 0x7da1b18a9db0>
variable[output] assign[=] call[name[unicode], parameter[constant[], constant[utf-8]]]
if compare[name[scopes] is constant[None]] begin[:]
variable[scopes] assign[=] list[[<ast.Name object at 0x7da1b18ab4c0>]]
for taget[tuple[[<ast.Name object at 0x7da1b18abeb0>, <ast.Name object at 0x7da1b18abf40>]]] in starred[name[tokens]] begin[:]
variable[current_scope] assign[=] call[name[scopes]][constant[0]]
if compare[name[tag] equal[==] constant[end]] begin[:]
<ast.Delete object at 0x7da1b18ab820>
if name[python3] begin[:]
return[name[output]] | keyword[def] identifier[render] ( identifier[template] = literal[string] , identifier[data] ={}, identifier[partials_path] = literal[string] , identifier[partials_ext] = literal[string] ,
identifier[partials_dict] ={}, identifier[padding] = literal[string] , identifier[def_ldel] = literal[string] , identifier[def_rdel] = literal[string] ,
identifier[scopes] = keyword[None] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[template] , identifier[Sequence] ) keyword[and] keyword[not] identifier[isinstance] ( identifier[template] , identifier[string_type] ):
identifier[tokens] =( identifier[token] keyword[for] identifier[token] keyword[in] identifier[template] )
keyword[else] :
keyword[if] identifier[template] keyword[in] identifier[g_token_cache] :
identifier[tokens] =( identifier[token] keyword[for] identifier[token] keyword[in] identifier[g_token_cache] [ identifier[template] ])
keyword[else] :
identifier[tokens] = identifier[tokenize] ( identifier[template] , identifier[def_ldel] , identifier[def_rdel] )
identifier[output] = identifier[unicode] ( literal[string] , literal[string] )
keyword[if] identifier[scopes] keyword[is] keyword[None] :
identifier[scopes] =[ identifier[data] ]
keyword[for] identifier[tag] , identifier[key] keyword[in] identifier[tokens] :
identifier[current_scope] = identifier[scopes] [ literal[int] ]
keyword[if] identifier[tag] == literal[string] :
keyword[del] identifier[scopes] [ literal[int] ]
keyword[elif] keyword[not] identifier[current_scope] keyword[and] identifier[len] ( identifier[scopes] )!= literal[int] :
keyword[if] identifier[tag] keyword[in] [ literal[string] , literal[string] ]:
identifier[scopes] . identifier[insert] ( literal[int] , keyword[False] )
keyword[elif] identifier[tag] == literal[string] :
keyword[if] keyword[not] identifier[isinstance] ( identifier[key] , identifier[unicode_type] ):
identifier[key] = identifier[unicode] ( identifier[key] , literal[string] )
identifier[output] += identifier[key] . identifier[replace] ( literal[string] , literal[string] + identifier[padding] )
keyword[elif] identifier[tag] == literal[string] :
identifier[thing] = identifier[_get_key] ( identifier[key] , identifier[scopes] )
keyword[if] identifier[thing] keyword[is] keyword[True] keyword[and] identifier[key] == literal[string] :
identifier[thing] = identifier[scopes] [ literal[int] ]
keyword[if] keyword[not] identifier[isinstance] ( identifier[thing] , identifier[unicode_type] ):
identifier[thing] = identifier[unicode] ( identifier[str] ( identifier[thing] ), literal[string] )
identifier[output] += identifier[_html_escape] ( identifier[thing] )
keyword[elif] identifier[tag] == literal[string] :
identifier[thing] = identifier[_get_key] ( identifier[key] , identifier[scopes] )
keyword[if] keyword[not] identifier[isinstance] ( identifier[thing] , identifier[unicode_type] ):
identifier[thing] = identifier[unicode] ( identifier[str] ( identifier[thing] ), literal[string] )
identifier[output] += identifier[thing]
keyword[elif] identifier[tag] == literal[string] :
identifier[scope] = identifier[_get_key] ( identifier[key] , identifier[scopes] )
keyword[if] identifier[isinstance] ( identifier[scope] , identifier[Callable] ):
identifier[text] = identifier[unicode] ( literal[string] , literal[string] )
identifier[tags] =[]
keyword[for] identifier[tag] keyword[in] identifier[tokens] :
keyword[if] identifier[tag] ==( literal[string] , identifier[key] ):
keyword[break]
identifier[tags] . identifier[append] ( identifier[tag] )
identifier[tag_type] , identifier[tag_key] = identifier[tag]
keyword[if] identifier[tag_type] == literal[string] :
identifier[text] += identifier[tag_key]
keyword[elif] identifier[tag_type] == literal[string] :
identifier[text] += literal[string] %( identifier[def_ldel] , identifier[tag_key] , identifier[def_rdel] )
keyword[else] :
identifier[text] += literal[string] %( identifier[def_ldel] ,{
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string]
}[ identifier[tag_type] ], identifier[tag_key] , identifier[def_rdel] )
identifier[g_token_cache] [ identifier[text] ]= identifier[tags]
identifier[rend] = identifier[scope] ( identifier[text] , keyword[lambda] identifier[template] , identifier[data] = keyword[None] : identifier[render] ( identifier[template] ,
identifier[data] ={},
identifier[partials_path] = identifier[partials_path] ,
identifier[partials_ext] = identifier[partials_ext] ,
identifier[partials_dict] = identifier[partials_dict] ,
identifier[padding] = identifier[padding] ,
identifier[def_ldel] = identifier[def_ldel] , identifier[def_rdel] = identifier[def_rdel] ,
identifier[scopes] = identifier[data] keyword[and] [ identifier[data] ]+ identifier[scopes] keyword[or] identifier[scopes] ))
keyword[if] identifier[python3] :
identifier[output] += identifier[rend]
keyword[else] :
identifier[output] += identifier[rend] . identifier[decode] ( literal[string] )
keyword[elif] identifier[isinstance] ( identifier[scope] ,( identifier[Sequence] , identifier[Iterator] )) keyword[and] keyword[not] identifier[isinstance] ( identifier[scope] , identifier[string_type] ):
identifier[tags] =[]
identifier[tags_with_same_key] = literal[int]
keyword[for] identifier[tag] keyword[in] identifier[tokens] :
keyword[if] identifier[tag] ==( literal[string] , identifier[key] ):
identifier[tags_with_same_key] += literal[int]
keyword[if] identifier[tag] ==( literal[string] , identifier[key] ):
identifier[tags_with_same_key] -= literal[int]
keyword[if] identifier[tags_with_same_key] < literal[int] :
keyword[break]
identifier[tags] . identifier[append] ( identifier[tag] )
keyword[for] identifier[thing] keyword[in] identifier[scope] :
identifier[new_scope] =[ identifier[thing] ]+ identifier[scopes]
identifier[rend] = identifier[render] ( identifier[template] = identifier[tags] , identifier[scopes] = identifier[new_scope] ,
identifier[partials_path] = identifier[partials_path] ,
identifier[partials_ext] = identifier[partials_ext] ,
identifier[partials_dict] = identifier[partials_dict] ,
identifier[def_ldel] = identifier[def_ldel] , identifier[def_rdel] = identifier[def_rdel] )
keyword[if] identifier[python3] :
identifier[output] += identifier[rend]
keyword[else] :
identifier[output] += identifier[rend] . identifier[decode] ( literal[string] )
keyword[else] :
identifier[scopes] . identifier[insert] ( literal[int] , identifier[scope] )
keyword[elif] identifier[tag] == literal[string] :
identifier[scope] = identifier[_get_key] ( identifier[key] , identifier[scopes] )
identifier[scopes] . identifier[insert] ( literal[int] , keyword[not] identifier[scope] )
keyword[elif] identifier[tag] == literal[string] :
identifier[partial] = identifier[_get_partial] ( identifier[key] , identifier[partials_dict] ,
identifier[partials_path] , identifier[partials_ext] )
identifier[left] = identifier[output] . identifier[split] ( literal[string] )[- literal[int] ]
identifier[part_padding] = identifier[padding]
keyword[if] identifier[left] . identifier[isspace] ():
identifier[part_padding] += identifier[left]
identifier[part_out] = identifier[render] ( identifier[template] = identifier[partial] , identifier[partials_path] = identifier[partials_path] ,
identifier[partials_ext] = identifier[partials_ext] ,
identifier[partials_dict] = identifier[partials_dict] ,
identifier[def_ldel] = identifier[def_ldel] , identifier[def_rdel] = identifier[def_rdel] ,
identifier[padding] = identifier[part_padding] , identifier[scopes] = identifier[scopes] )
keyword[if] identifier[left] . identifier[isspace] ():
identifier[part_out] = identifier[part_out] . identifier[rstrip] ( literal[string] )
keyword[if] identifier[python3] :
identifier[output] += identifier[part_out]
keyword[else] :
identifier[output] += identifier[part_out] . identifier[decode] ( literal[string] )
keyword[if] identifier[python3] :
keyword[return] identifier[output]
keyword[else] :
keyword[return] identifier[output] . identifier[encode] ( literal[string] ) | def render(template='', data={}, partials_path='.', partials_ext='mustache', partials_dict={}, padding='', def_ldel='{{', def_rdel='}}', scopes=None):
"""Render a mustache template.
Renders a mustache template with a data scope and partial capability.
Given the file structure...
╷
├─╼ main.py
├─╼ main.ms
└─┮ partials
└── part.ms
then main.py would make the following call:
render(open('main.ms', 'r'), {...}, 'partials', 'ms')
Arguments:
template -- A file-like object or a string containing the template
data -- A python dictionary with your data scope
partials_path -- The path to where your partials are stored
(defaults to '.')
partials_ext -- The extension that you want the parser to look for
(defaults to 'mustache')
partials_dict -- A python dictionary which will be search for partials
before the filesystem is. {'include': 'foo'} is the same
as a file called include.mustache
(defaults to {})
padding -- This is for padding partials, and shouldn't be used
(but can be if you really want to)
def_ldel -- The default left delimiter
("{{" by default, as in spec compliant mustache)
def_rdel -- The default right delimiter
("}}" by default, as in spec compliant mustache)
scopes -- The list of scopes that get_key will look through
Returns:
A string containing the rendered template.
"""
# If the template is a seqeuence but not derived from a string
if isinstance(template, Sequence) and (not isinstance(template, string_type)):
# Then we don't need to tokenize it
# But it does need to be a generator
tokens = (token for token in template) # depends on [control=['if'], data=[]]
elif template in g_token_cache:
tokens = (token for token in g_token_cache[template]) # depends on [control=['if'], data=['template', 'g_token_cache']]
else:
# Otherwise make a generator
tokens = tokenize(template, def_ldel, def_rdel)
output = unicode('', 'utf-8')
if scopes is None:
scopes = [data] # depends on [control=['if'], data=['scopes']]
# Run through the tokens
for (tag, key) in tokens:
# Set the current scope
current_scope = scopes[0]
# If we're an end tag
if tag == 'end':
# Pop out of the latest scope
del scopes[0] # depends on [control=['if'], data=[]]
# If the current scope is falsy and not the only scope
elif not current_scope and len(scopes) != 1:
if tag in ['section', 'inverted section']:
# Set the most recent scope to a falsy value
# (I heard False is a good one)
scopes.insert(0, False) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
# If we're a literal tag
elif tag == 'literal':
# Add padding to the key and add it to the output
if not isinstance(key, unicode_type): # python 2
key = unicode(key, 'utf-8') # depends on [control=['if'], data=[]]
output += key.replace('\n', '\n' + padding) # depends on [control=['if'], data=[]]
# If we're a variable tag
elif tag == 'variable':
# Add the html escaped key to the output
thing = _get_key(key, scopes)
if thing is True and key == '.':
# if we've coerced into a boolean by accident
# (inverted tags do this)
# then get the un-coerced object (next in the stack)
thing = scopes[1] # depends on [control=['if'], data=[]]
if not isinstance(thing, unicode_type):
thing = unicode(str(thing), 'utf-8') # depends on [control=['if'], data=[]]
output += _html_escape(thing) # depends on [control=['if'], data=[]]
# If we're a no html escape tag
elif tag == 'no escape':
# Just lookup the key and add it
thing = _get_key(key, scopes)
if not isinstance(thing, unicode_type):
thing = unicode(str(thing), 'utf-8') # depends on [control=['if'], data=[]]
output += thing # depends on [control=['if'], data=[]]
# If we're a section tag
elif tag == 'section':
# Get the sections scope
scope = _get_key(key, scopes)
# If the scope is a callable (as described in
# https://mustache.github.io/mustache.5.html)
if isinstance(scope, Callable):
# Generate template text from tags
text = unicode('', 'utf-8')
tags = []
for tag in tokens:
if tag == ('end', key):
break # depends on [control=['if'], data=[]]
tags.append(tag)
(tag_type, tag_key) = tag
if tag_type == 'literal':
text += tag_key # depends on [control=['if'], data=[]]
elif tag_type == 'no escape':
text += '%s& %s %s' % (def_ldel, tag_key, def_rdel) # depends on [control=['if'], data=[]]
else:
text += '%s%s %s%s' % (def_ldel, {'commment': '!', 'section': '#', 'inverted section': '^', 'end': '/', 'partial': '>', 'set delimiter': '=', 'no escape': '&', 'variable': ''}[tag_type], tag_key, def_rdel) # depends on [control=['for'], data=['tag']]
g_token_cache[text] = tags
rend = scope(text, lambda template, data=None: render(template, data={}, partials_path=partials_path, partials_ext=partials_ext, partials_dict=partials_dict, padding=padding, def_ldel=def_ldel, def_rdel=def_rdel, scopes=data and [data] + scopes or scopes))
if python3:
output += rend # depends on [control=['if'], data=[]]
else: # python 2
output += rend.decode('utf-8') # depends on [control=['if'], data=[]]
# If the scope is a sequence, an iterator or generator but not
# derived from a string
elif isinstance(scope, (Sequence, Iterator)) and (not isinstance(scope, string_type)):
# Then we need to do some looping
# Gather up all the tags inside the section
# (And don't be tricked by nested end tags with the same key)
# TODO: This feels like it still has edge cases, no?
tags = []
tags_with_same_key = 0
for tag in tokens:
if tag == ('section', key):
tags_with_same_key += 1 # depends on [control=['if'], data=[]]
if tag == ('end', key):
tags_with_same_key -= 1
if tags_with_same_key < 0:
break # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
tags.append(tag) # depends on [control=['for'], data=['tag']]
# For every item in the scope
for thing in scope:
# Append it as the most recent scope and render
new_scope = [thing] + scopes
rend = render(template=tags, scopes=new_scope, partials_path=partials_path, partials_ext=partials_ext, partials_dict=partials_dict, def_ldel=def_ldel, def_rdel=def_rdel)
if python3:
output += rend # depends on [control=['if'], data=[]]
else: # python 2
output += rend.decode('utf-8') # depends on [control=['for'], data=['thing']] # depends on [control=['if'], data=[]]
else:
# Otherwise we're just a scope section
scopes.insert(0, scope) # depends on [control=['if'], data=['tag']]
# If we're an inverted section
elif tag == 'inverted section':
# Add the flipped scope to the scopes
scope = _get_key(key, scopes)
scopes.insert(0, not scope) # depends on [control=['if'], data=[]]
# If we're a partial
elif tag == 'partial':
# Load the partial
partial = _get_partial(key, partials_dict, partials_path, partials_ext)
# Find what to pad the partial with
left = output.split('\n')[-1]
part_padding = padding
if left.isspace():
part_padding += left # depends on [control=['if'], data=[]]
# Render the partial
part_out = render(template=partial, partials_path=partials_path, partials_ext=partials_ext, partials_dict=partials_dict, def_ldel=def_ldel, def_rdel=def_rdel, padding=part_padding, scopes=scopes)
# If the partial was indented
if left.isspace():
# then remove the spaces from the end
part_out = part_out.rstrip(' \t') # depends on [control=['if'], data=[]]
# Add the partials output to the ouput
if python3:
output += part_out # depends on [control=['if'], data=[]]
else: # python 2
output += part_out.decode('utf-8') # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
if python3:
return output # depends on [control=['if'], data=[]]
else: # python 2
return output.encode('utf-8') |
def liquid_precipitation_quantity(self, value=99.0):
"""Corresponds to IDD Field `liquid_precipitation_quantity`
Args:
value (float): value for IDD Field `liquid_precipitation_quantity`
Unit: hr
Missing value: 99.0
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
"""
if value is not None:
try:
value = float(value)
except ValueError:
raise ValueError(
'value {} need to be of type float '
'for field `liquid_precipitation_quantity`'.format(value))
self._liquid_precipitation_quantity = value | def function[liquid_precipitation_quantity, parameter[self, value]]:
constant[Corresponds to IDD Field `liquid_precipitation_quantity`
Args:
value (float): value for IDD Field `liquid_precipitation_quantity`
Unit: hr
Missing value: 99.0
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
]
if compare[name[value] is_not constant[None]] begin[:]
<ast.Try object at 0x7da1b0fb08e0>
name[self]._liquid_precipitation_quantity assign[=] name[value] | keyword[def] identifier[liquid_precipitation_quantity] ( identifier[self] , identifier[value] = literal[int] ):
literal[string]
keyword[if] identifier[value] keyword[is] keyword[not] keyword[None] :
keyword[try] :
identifier[value] = identifier[float] ( identifier[value] )
keyword[except] identifier[ValueError] :
keyword[raise] identifier[ValueError] (
literal[string]
literal[string] . identifier[format] ( identifier[value] ))
identifier[self] . identifier[_liquid_precipitation_quantity] = identifier[value] | def liquid_precipitation_quantity(self, value=99.0):
"""Corresponds to IDD Field `liquid_precipitation_quantity`
Args:
value (float): value for IDD Field `liquid_precipitation_quantity`
Unit: hr
Missing value: 99.0
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
"""
if value is not None:
try:
value = float(value) # depends on [control=['try'], data=[]]
except ValueError:
raise ValueError('value {} need to be of type float for field `liquid_precipitation_quantity`'.format(value)) # depends on [control=['except'], data=[]] # depends on [control=['if'], data=['value']]
self._liquid_precipitation_quantity = value |
def submit(self, command='sleep 1', blocksize=1, tasks_per_node=1, job_name="parsl.auto"):
"""Submit the command onto a freshly instantiated AWS EC2 instance.
Submit returns an ID that corresponds to the task that was just submitted.
Parameters
----------
command : str
Command to be invoked on the remote side.
blocksize : int
Number of blocks requested.
tasks_per_node : int (default=1)
Number of command invocations to be launched per node
job_name : str
Prefix for the job name.
Returns
-------
None or str
If at capacity, None will be returned. Otherwise, the job identifier will be returned.
"""
job_name = "parsl.auto.{0}".format(time.time())
wrapped_cmd = self.launcher(command,
tasks_per_node,
self.nodes_per_block)
[instance, *rest] = self.spin_up_instance(command=wrapped_cmd, job_name=job_name)
if not instance:
logger.error("Failed to submit request to EC2")
return None
logger.debug("Started instance_id: {0}".format(instance.instance_id))
state = translate_table.get(instance.state['Name'], "PENDING")
self.resources[instance.instance_id] = {
"job_id": instance.instance_id,
"instance": instance,
"status": state
}
return instance.instance_id | def function[submit, parameter[self, command, blocksize, tasks_per_node, job_name]]:
constant[Submit the command onto a freshly instantiated AWS EC2 instance.
Submit returns an ID that corresponds to the task that was just submitted.
Parameters
----------
command : str
Command to be invoked on the remote side.
blocksize : int
Number of blocks requested.
tasks_per_node : int (default=1)
Number of command invocations to be launched per node
job_name : str
Prefix for the job name.
Returns
-------
None or str
If at capacity, None will be returned. Otherwise, the job identifier will be returned.
]
variable[job_name] assign[=] call[constant[parsl.auto.{0}].format, parameter[call[name[time].time, parameter[]]]]
variable[wrapped_cmd] assign[=] call[name[self].launcher, parameter[name[command], name[tasks_per_node], name[self].nodes_per_block]]
<ast.List object at 0x7da1b01d8cd0> assign[=] call[name[self].spin_up_instance, parameter[]]
if <ast.UnaryOp object at 0x7da1b01d9660> begin[:]
call[name[logger].error, parameter[constant[Failed to submit request to EC2]]]
return[constant[None]]
call[name[logger].debug, parameter[call[constant[Started instance_id: {0}].format, parameter[name[instance].instance_id]]]]
variable[state] assign[=] call[name[translate_table].get, parameter[call[name[instance].state][constant[Name]], constant[PENDING]]]
call[name[self].resources][name[instance].instance_id] assign[=] dictionary[[<ast.Constant object at 0x7da1b01db640>, <ast.Constant object at 0x7da1b01daec0>, <ast.Constant object at 0x7da1b01d9360>], [<ast.Attribute object at 0x7da1b01d9540>, <ast.Name object at 0x7da1b01d8e50>, <ast.Name object at 0x7da1b01daf20>]]
return[name[instance].instance_id] | keyword[def] identifier[submit] ( identifier[self] , identifier[command] = literal[string] , identifier[blocksize] = literal[int] , identifier[tasks_per_node] = literal[int] , identifier[job_name] = literal[string] ):
literal[string]
identifier[job_name] = literal[string] . identifier[format] ( identifier[time] . identifier[time] ())
identifier[wrapped_cmd] = identifier[self] . identifier[launcher] ( identifier[command] ,
identifier[tasks_per_node] ,
identifier[self] . identifier[nodes_per_block] )
[ identifier[instance] ,* identifier[rest] ]= identifier[self] . identifier[spin_up_instance] ( identifier[command] = identifier[wrapped_cmd] , identifier[job_name] = identifier[job_name] )
keyword[if] keyword[not] identifier[instance] :
identifier[logger] . identifier[error] ( literal[string] )
keyword[return] keyword[None]
identifier[logger] . identifier[debug] ( literal[string] . identifier[format] ( identifier[instance] . identifier[instance_id] ))
identifier[state] = identifier[translate_table] . identifier[get] ( identifier[instance] . identifier[state] [ literal[string] ], literal[string] )
identifier[self] . identifier[resources] [ identifier[instance] . identifier[instance_id] ]={
literal[string] : identifier[instance] . identifier[instance_id] ,
literal[string] : identifier[instance] ,
literal[string] : identifier[state]
}
keyword[return] identifier[instance] . identifier[instance_id] | def submit(self, command='sleep 1', blocksize=1, tasks_per_node=1, job_name='parsl.auto'):
"""Submit the command onto a freshly instantiated AWS EC2 instance.
Submit returns an ID that corresponds to the task that was just submitted.
Parameters
----------
command : str
Command to be invoked on the remote side.
blocksize : int
Number of blocks requested.
tasks_per_node : int (default=1)
Number of command invocations to be launched per node
job_name : str
Prefix for the job name.
Returns
-------
None or str
If at capacity, None will be returned. Otherwise, the job identifier will be returned.
"""
job_name = 'parsl.auto.{0}'.format(time.time())
wrapped_cmd = self.launcher(command, tasks_per_node, self.nodes_per_block)
[instance, *rest] = self.spin_up_instance(command=wrapped_cmd, job_name=job_name)
if not instance:
logger.error('Failed to submit request to EC2')
return None # depends on [control=['if'], data=[]]
logger.debug('Started instance_id: {0}'.format(instance.instance_id))
state = translate_table.get(instance.state['Name'], 'PENDING')
self.resources[instance.instance_id] = {'job_id': instance.instance_id, 'instance': instance, 'status': state}
return instance.instance_id |
def get_default_target_names(estimator, num_targets=None):
"""
Return a vector of target names: "y" if there is only one target,
and "y0", "y1", ... if there are multiple targets.
"""
if num_targets is None:
if len(estimator.coef_.shape) <= 1:
num_targets = 1
else:
num_targets, _ = estimator.coef_.shape
if num_targets == 1:
target_names = ['y']
else:
target_names = ['y%d' % i for i in range(num_targets)]
return np.array(target_names) | def function[get_default_target_names, parameter[estimator, num_targets]]:
constant[
Return a vector of target names: "y" if there is only one target,
and "y0", "y1", ... if there are multiple targets.
]
if compare[name[num_targets] is constant[None]] begin[:]
if compare[call[name[len], parameter[name[estimator].coef_.shape]] less_or_equal[<=] constant[1]] begin[:]
variable[num_targets] assign[=] constant[1]
if compare[name[num_targets] equal[==] constant[1]] begin[:]
variable[target_names] assign[=] list[[<ast.Constant object at 0x7da1b1f39e40>]]
return[call[name[np].array, parameter[name[target_names]]]] | keyword[def] identifier[get_default_target_names] ( identifier[estimator] , identifier[num_targets] = keyword[None] ):
literal[string]
keyword[if] identifier[num_targets] keyword[is] keyword[None] :
keyword[if] identifier[len] ( identifier[estimator] . identifier[coef_] . identifier[shape] )<= literal[int] :
identifier[num_targets] = literal[int]
keyword[else] :
identifier[num_targets] , identifier[_] = identifier[estimator] . identifier[coef_] . identifier[shape]
keyword[if] identifier[num_targets] == literal[int] :
identifier[target_names] =[ literal[string] ]
keyword[else] :
identifier[target_names] =[ literal[string] % identifier[i] keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[num_targets] )]
keyword[return] identifier[np] . identifier[array] ( identifier[target_names] ) | def get_default_target_names(estimator, num_targets=None):
"""
Return a vector of target names: "y" if there is only one target,
and "y0", "y1", ... if there are multiple targets.
"""
if num_targets is None:
if len(estimator.coef_.shape) <= 1:
num_targets = 1 # depends on [control=['if'], data=[]]
else:
(num_targets, _) = estimator.coef_.shape # depends on [control=['if'], data=['num_targets']]
if num_targets == 1:
target_names = ['y'] # depends on [control=['if'], data=[]]
else:
target_names = ['y%d' % i for i in range(num_targets)]
return np.array(target_names) |
def xml_entity_escape(data):
"""
replace special characters with their XML entity versions
"""
data = data.replace("&", "&")
data = data.replace(">", ">")
data = data.replace("<", "<")
return data | def function[xml_entity_escape, parameter[data]]:
constant[
replace special characters with their XML entity versions
]
variable[data] assign[=] call[name[data].replace, parameter[constant[&], constant[&]]]
variable[data] assign[=] call[name[data].replace, parameter[constant[>], constant[>]]]
variable[data] assign[=] call[name[data].replace, parameter[constant[<], constant[<]]]
return[name[data]] | keyword[def] identifier[xml_entity_escape] ( identifier[data] ):
literal[string]
identifier[data] = identifier[data] . identifier[replace] ( literal[string] , literal[string] )
identifier[data] = identifier[data] . identifier[replace] ( literal[string] , literal[string] )
identifier[data] = identifier[data] . identifier[replace] ( literal[string] , literal[string] )
keyword[return] identifier[data] | def xml_entity_escape(data):
"""
replace special characters with their XML entity versions
"""
data = data.replace('&', '&')
data = data.replace('>', '>')
data = data.replace('<', '<')
return data |
def clean_text(self, name, **kwargs):
"""Basic clean-up."""
name = strip_quotes(name)
name = collapse_spaces(name)
return name | def function[clean_text, parameter[self, name]]:
constant[Basic clean-up.]
variable[name] assign[=] call[name[strip_quotes], parameter[name[name]]]
variable[name] assign[=] call[name[collapse_spaces], parameter[name[name]]]
return[name[name]] | keyword[def] identifier[clean_text] ( identifier[self] , identifier[name] ,** identifier[kwargs] ):
literal[string]
identifier[name] = identifier[strip_quotes] ( identifier[name] )
identifier[name] = identifier[collapse_spaces] ( identifier[name] )
keyword[return] identifier[name] | def clean_text(self, name, **kwargs):
"""Basic clean-up."""
name = strip_quotes(name)
name = collapse_spaces(name)
return name |
def events_view(request):
"""Events homepage.
Shows a list of events occurring in the next week, month, and
future.
"""
is_events_admin = request.user.has_admin_permission('events')
if request.method == "POST":
if "approve" in request.POST and is_events_admin:
event_id = request.POST.get('approve')
event = get_object_or_404(Event, id=event_id)
event.rejected = False
event.approved = True
event.approved_by = request.user
event.save()
messages.success(request, "Approved event {}".format(event))
if "reject" in request.POST and is_events_admin:
event_id = request.POST.get('reject')
event = get_object_or_404(Event, id=event_id)
event.approved = False
event.rejected = True
event.rejected_by = request.user
event.save()
messages.success(request, "Rejected event {}".format(event))
if is_events_admin and "show_all" in request.GET:
viewable_events = (Event.objects.all().this_year().prefetch_related("groups"))
else:
viewable_events = (Event.objects.visible_to_user(request.user).this_year().prefetch_related("groups"))
# get date objects for week and month
today = datetime.date.today()
delta = today - datetime.timedelta(days=today.weekday())
this_week = (delta, delta + datetime.timedelta(days=7))
this_month = (this_week[1], this_week[1] + datetime.timedelta(days=31))
events_categories = [{
"title": "This week",
"events": viewable_events.filter(time__gte=this_week[0], time__lt=this_week[1])
}, {
"title": "This month",
"events": viewable_events.filter(time__gte=this_month[0], time__lt=this_month[1])
}, {
"title": "Future",
"events": viewable_events.filter(time__gte=this_month[1])
}]
if is_events_admin:
unapproved_events = (Event.objects.filter(approved=False, rejected=False).prefetch_related("groups"))
events_categories = [{"title": "Awaiting Approval", "events": unapproved_events}] + events_categories
if is_events_admin and "show_all" in request.GET:
events_categories.append({"title": "Past", "events": viewable_events.filter(time__lt=this_week[0])})
context = {
"events": events_categories,
"num_events": sum([x["events"].count() for x in events_categories]),
"is_events_admin": is_events_admin,
"events_admin": is_events_admin,
"show_attend": True,
"show_icon": True
}
return render(request, "events/home.html", context) | def function[events_view, parameter[request]]:
constant[Events homepage.
Shows a list of events occurring in the next week, month, and
future.
]
variable[is_events_admin] assign[=] call[name[request].user.has_admin_permission, parameter[constant[events]]]
if compare[name[request].method equal[==] constant[POST]] begin[:]
if <ast.BoolOp object at 0x7da20c6ab6a0> begin[:]
variable[event_id] assign[=] call[name[request].POST.get, parameter[constant[approve]]]
variable[event] assign[=] call[name[get_object_or_404], parameter[name[Event]]]
name[event].rejected assign[=] constant[False]
name[event].approved assign[=] constant[True]
name[event].approved_by assign[=] name[request].user
call[name[event].save, parameter[]]
call[name[messages].success, parameter[name[request], call[constant[Approved event {}].format, parameter[name[event]]]]]
if <ast.BoolOp object at 0x7da20c6aac50> begin[:]
variable[event_id] assign[=] call[name[request].POST.get, parameter[constant[reject]]]
variable[event] assign[=] call[name[get_object_or_404], parameter[name[Event]]]
name[event].approved assign[=] constant[False]
name[event].rejected assign[=] constant[True]
name[event].rejected_by assign[=] name[request].user
call[name[event].save, parameter[]]
call[name[messages].success, parameter[name[request], call[constant[Rejected event {}].format, parameter[name[event]]]]]
if <ast.BoolOp object at 0x7da20c6ab610> begin[:]
variable[viewable_events] assign[=] call[call[call[name[Event].objects.all, parameter[]].this_year, parameter[]].prefetch_related, parameter[constant[groups]]]
variable[today] assign[=] call[name[datetime].date.today, parameter[]]
variable[delta] assign[=] binary_operation[name[today] - call[name[datetime].timedelta, parameter[]]]
variable[this_week] assign[=] tuple[[<ast.Name object at 0x7da20c6a98d0>, <ast.BinOp object at 0x7da20c6a9090>]]
variable[this_month] assign[=] tuple[[<ast.Subscript object at 0x7da20c6a92a0>, <ast.BinOp object at 0x7da1b04d5990>]]
variable[events_categories] assign[=] list[[<ast.Dict object at 0x7da1b04d6e00>, <ast.Dict object at 0x7da1b04d77c0>, <ast.Dict object at 0x7da1b04d68c0>]]
if name[is_events_admin] begin[:]
variable[unapproved_events] assign[=] call[call[name[Event].objects.filter, parameter[]].prefetch_related, parameter[constant[groups]]]
variable[events_categories] assign[=] binary_operation[list[[<ast.Dict object at 0x7da1b04d78e0>]] + name[events_categories]]
if <ast.BoolOp object at 0x7da1b04d62f0> begin[:]
call[name[events_categories].append, parameter[dictionary[[<ast.Constant object at 0x7da1b04d6980>, <ast.Constant object at 0x7da1b04d5600>], [<ast.Constant object at 0x7da1b04d7610>, <ast.Call object at 0x7da1b04d6e60>]]]]
variable[context] assign[=] dictionary[[<ast.Constant object at 0x7da1b04d5480>, <ast.Constant object at 0x7da1b04d7e50>, <ast.Constant object at 0x7da1b04d7040>, <ast.Constant object at 0x7da1b04d55d0>, <ast.Constant object at 0x7da1b04d6140>, <ast.Constant object at 0x7da1b04d6b00>], [<ast.Name object at 0x7da1b04d4a00>, <ast.Call object at 0x7da1b04d6770>, <ast.Name object at 0x7da1b04d73d0>, <ast.Name object at 0x7da1b04d63b0>, <ast.Constant object at 0x7da1b04d5a80>, <ast.Constant object at 0x7da1b04d6c20>]]
return[call[name[render], parameter[name[request], constant[events/home.html], name[context]]]] | keyword[def] identifier[events_view] ( identifier[request] ):
literal[string]
identifier[is_events_admin] = identifier[request] . identifier[user] . identifier[has_admin_permission] ( literal[string] )
keyword[if] identifier[request] . identifier[method] == literal[string] :
keyword[if] literal[string] keyword[in] identifier[request] . identifier[POST] keyword[and] identifier[is_events_admin] :
identifier[event_id] = identifier[request] . identifier[POST] . identifier[get] ( literal[string] )
identifier[event] = identifier[get_object_or_404] ( identifier[Event] , identifier[id] = identifier[event_id] )
identifier[event] . identifier[rejected] = keyword[False]
identifier[event] . identifier[approved] = keyword[True]
identifier[event] . identifier[approved_by] = identifier[request] . identifier[user]
identifier[event] . identifier[save] ()
identifier[messages] . identifier[success] ( identifier[request] , literal[string] . identifier[format] ( identifier[event] ))
keyword[if] literal[string] keyword[in] identifier[request] . identifier[POST] keyword[and] identifier[is_events_admin] :
identifier[event_id] = identifier[request] . identifier[POST] . identifier[get] ( literal[string] )
identifier[event] = identifier[get_object_or_404] ( identifier[Event] , identifier[id] = identifier[event_id] )
identifier[event] . identifier[approved] = keyword[False]
identifier[event] . identifier[rejected] = keyword[True]
identifier[event] . identifier[rejected_by] = identifier[request] . identifier[user]
identifier[event] . identifier[save] ()
identifier[messages] . identifier[success] ( identifier[request] , literal[string] . identifier[format] ( identifier[event] ))
keyword[if] identifier[is_events_admin] keyword[and] literal[string] keyword[in] identifier[request] . identifier[GET] :
identifier[viewable_events] =( identifier[Event] . identifier[objects] . identifier[all] (). identifier[this_year] (). identifier[prefetch_related] ( literal[string] ))
keyword[else] :
identifier[viewable_events] =( identifier[Event] . identifier[objects] . identifier[visible_to_user] ( identifier[request] . identifier[user] ). identifier[this_year] (). identifier[prefetch_related] ( literal[string] ))
identifier[today] = identifier[datetime] . identifier[date] . identifier[today] ()
identifier[delta] = identifier[today] - identifier[datetime] . identifier[timedelta] ( identifier[days] = identifier[today] . identifier[weekday] ())
identifier[this_week] =( identifier[delta] , identifier[delta] + identifier[datetime] . identifier[timedelta] ( identifier[days] = literal[int] ))
identifier[this_month] =( identifier[this_week] [ literal[int] ], identifier[this_week] [ literal[int] ]+ identifier[datetime] . identifier[timedelta] ( identifier[days] = literal[int] ))
identifier[events_categories] =[{
literal[string] : literal[string] ,
literal[string] : identifier[viewable_events] . identifier[filter] ( identifier[time__gte] = identifier[this_week] [ literal[int] ], identifier[time__lt] = identifier[this_week] [ literal[int] ])
},{
literal[string] : literal[string] ,
literal[string] : identifier[viewable_events] . identifier[filter] ( identifier[time__gte] = identifier[this_month] [ literal[int] ], identifier[time__lt] = identifier[this_month] [ literal[int] ])
},{
literal[string] : literal[string] ,
literal[string] : identifier[viewable_events] . identifier[filter] ( identifier[time__gte] = identifier[this_month] [ literal[int] ])
}]
keyword[if] identifier[is_events_admin] :
identifier[unapproved_events] =( identifier[Event] . identifier[objects] . identifier[filter] ( identifier[approved] = keyword[False] , identifier[rejected] = keyword[False] ). identifier[prefetch_related] ( literal[string] ))
identifier[events_categories] =[{ literal[string] : literal[string] , literal[string] : identifier[unapproved_events] }]+ identifier[events_categories]
keyword[if] identifier[is_events_admin] keyword[and] literal[string] keyword[in] identifier[request] . identifier[GET] :
identifier[events_categories] . identifier[append] ({ literal[string] : literal[string] , literal[string] : identifier[viewable_events] . identifier[filter] ( identifier[time__lt] = identifier[this_week] [ literal[int] ])})
identifier[context] ={
literal[string] : identifier[events_categories] ,
literal[string] : identifier[sum] ([ identifier[x] [ literal[string] ]. identifier[count] () keyword[for] identifier[x] keyword[in] identifier[events_categories] ]),
literal[string] : identifier[is_events_admin] ,
literal[string] : identifier[is_events_admin] ,
literal[string] : keyword[True] ,
literal[string] : keyword[True]
}
keyword[return] identifier[render] ( identifier[request] , literal[string] , identifier[context] ) | def events_view(request):
"""Events homepage.
Shows a list of events occurring in the next week, month, and
future.
"""
is_events_admin = request.user.has_admin_permission('events')
if request.method == 'POST':
if 'approve' in request.POST and is_events_admin:
event_id = request.POST.get('approve')
event = get_object_or_404(Event, id=event_id)
event.rejected = False
event.approved = True
event.approved_by = request.user
event.save()
messages.success(request, 'Approved event {}'.format(event)) # depends on [control=['if'], data=[]]
if 'reject' in request.POST and is_events_admin:
event_id = request.POST.get('reject')
event = get_object_or_404(Event, id=event_id)
event.approved = False
event.rejected = True
event.rejected_by = request.user
event.save()
messages.success(request, 'Rejected event {}'.format(event)) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
if is_events_admin and 'show_all' in request.GET:
viewable_events = Event.objects.all().this_year().prefetch_related('groups') # depends on [control=['if'], data=[]]
else:
viewable_events = Event.objects.visible_to_user(request.user).this_year().prefetch_related('groups')
# get date objects for week and month
today = datetime.date.today()
delta = today - datetime.timedelta(days=today.weekday())
this_week = (delta, delta + datetime.timedelta(days=7))
this_month = (this_week[1], this_week[1] + datetime.timedelta(days=31))
events_categories = [{'title': 'This week', 'events': viewable_events.filter(time__gte=this_week[0], time__lt=this_week[1])}, {'title': 'This month', 'events': viewable_events.filter(time__gte=this_month[0], time__lt=this_month[1])}, {'title': 'Future', 'events': viewable_events.filter(time__gte=this_month[1])}]
if is_events_admin:
unapproved_events = Event.objects.filter(approved=False, rejected=False).prefetch_related('groups')
events_categories = [{'title': 'Awaiting Approval', 'events': unapproved_events}] + events_categories # depends on [control=['if'], data=[]]
if is_events_admin and 'show_all' in request.GET:
events_categories.append({'title': 'Past', 'events': viewable_events.filter(time__lt=this_week[0])}) # depends on [control=['if'], data=[]]
context = {'events': events_categories, 'num_events': sum([x['events'].count() for x in events_categories]), 'is_events_admin': is_events_admin, 'events_admin': is_events_admin, 'show_attend': True, 'show_icon': True}
return render(request, 'events/home.html', context) |
def remove_uid(self, uid):
"""
Remove all references to a specific User ID
:param uid: A User ID
"""
for sid in self.get('uid2sid', uid):
self.remove('sid2uid', sid, uid)
self.delete('uid2sid', uid) | def function[remove_uid, parameter[self, uid]]:
constant[
Remove all references to a specific User ID
:param uid: A User ID
]
for taget[name[sid]] in starred[call[name[self].get, parameter[constant[uid2sid], name[uid]]]] begin[:]
call[name[self].remove, parameter[constant[sid2uid], name[sid], name[uid]]]
call[name[self].delete, parameter[constant[uid2sid], name[uid]]] | keyword[def] identifier[remove_uid] ( identifier[self] , identifier[uid] ):
literal[string]
keyword[for] identifier[sid] keyword[in] identifier[self] . identifier[get] ( literal[string] , identifier[uid] ):
identifier[self] . identifier[remove] ( literal[string] , identifier[sid] , identifier[uid] )
identifier[self] . identifier[delete] ( literal[string] , identifier[uid] ) | def remove_uid(self, uid):
"""
Remove all references to a specific User ID
:param uid: A User ID
"""
for sid in self.get('uid2sid', uid):
self.remove('sid2uid', sid, uid) # depends on [control=['for'], data=['sid']]
self.delete('uid2sid', uid) |
def cf_number_from_integer(integer):
"""
Creates a CFNumber object from an integer
:param integer:
The integer to create the CFNumber for
:return:
A CFNumber
"""
integer_as_long = ffi.new('long *', integer)
return CoreFoundation.CFNumberCreate(
CoreFoundation.kCFAllocatorDefault,
kCFNumberCFIndexType,
integer_as_long
) | def function[cf_number_from_integer, parameter[integer]]:
constant[
Creates a CFNumber object from an integer
:param integer:
The integer to create the CFNumber for
:return:
A CFNumber
]
variable[integer_as_long] assign[=] call[name[ffi].new, parameter[constant[long *], name[integer]]]
return[call[name[CoreFoundation].CFNumberCreate, parameter[name[CoreFoundation].kCFAllocatorDefault, name[kCFNumberCFIndexType], name[integer_as_long]]]] | keyword[def] identifier[cf_number_from_integer] ( identifier[integer] ):
literal[string]
identifier[integer_as_long] = identifier[ffi] . identifier[new] ( literal[string] , identifier[integer] )
keyword[return] identifier[CoreFoundation] . identifier[CFNumberCreate] (
identifier[CoreFoundation] . identifier[kCFAllocatorDefault] ,
identifier[kCFNumberCFIndexType] ,
identifier[integer_as_long]
) | def cf_number_from_integer(integer):
"""
Creates a CFNumber object from an integer
:param integer:
The integer to create the CFNumber for
:return:
A CFNumber
"""
integer_as_long = ffi.new('long *', integer)
return CoreFoundation.CFNumberCreate(CoreFoundation.kCFAllocatorDefault, kCFNumberCFIndexType, integer_as_long) |
def get_connection(self, *args, **kwargs):
"""
Create a new connection, or return an existing one from the cache. Uses Fabric's current ``env.host_string``
and the URL to the Docker service.
:param args: Additional arguments for the client constructor, if a new client has to be instantiated.
:param kwargs: Additional keyword args for the client constructor, if a new client has to be instantiated.
"""
key = env.get('host_string'), kwargs.get('base_url', env.get('docker_base_url'))
default_config = _get_default_config(None)
if default_config:
if key not in self:
self[key] = default_config
return default_config.get_client()
config = self.get_or_create_connection(key, self.configuration_class, *args, **kwargs)
return config.get_client() | def function[get_connection, parameter[self]]:
constant[
Create a new connection, or return an existing one from the cache. Uses Fabric's current ``env.host_string``
and the URL to the Docker service.
:param args: Additional arguments for the client constructor, if a new client has to be instantiated.
:param kwargs: Additional keyword args for the client constructor, if a new client has to be instantiated.
]
variable[key] assign[=] tuple[[<ast.Call object at 0x7da20c76c400>, <ast.Call object at 0x7da20c76c1f0>]]
variable[default_config] assign[=] call[name[_get_default_config], parameter[constant[None]]]
if name[default_config] begin[:]
if compare[name[key] <ast.NotIn object at 0x7da2590d7190> name[self]] begin[:]
call[name[self]][name[key]] assign[=] name[default_config]
return[call[name[default_config].get_client, parameter[]]]
variable[config] assign[=] call[name[self].get_or_create_connection, parameter[name[key], name[self].configuration_class, <ast.Starred object at 0x7da20c76f1c0>]]
return[call[name[config].get_client, parameter[]]] | keyword[def] identifier[get_connection] ( identifier[self] ,* identifier[args] ,** identifier[kwargs] ):
literal[string]
identifier[key] = identifier[env] . identifier[get] ( literal[string] ), identifier[kwargs] . identifier[get] ( literal[string] , identifier[env] . identifier[get] ( literal[string] ))
identifier[default_config] = identifier[_get_default_config] ( keyword[None] )
keyword[if] identifier[default_config] :
keyword[if] identifier[key] keyword[not] keyword[in] identifier[self] :
identifier[self] [ identifier[key] ]= identifier[default_config]
keyword[return] identifier[default_config] . identifier[get_client] ()
identifier[config] = identifier[self] . identifier[get_or_create_connection] ( identifier[key] , identifier[self] . identifier[configuration_class] ,* identifier[args] ,** identifier[kwargs] )
keyword[return] identifier[config] . identifier[get_client] () | def get_connection(self, *args, **kwargs):
"""
Create a new connection, or return an existing one from the cache. Uses Fabric's current ``env.host_string``
and the URL to the Docker service.
:param args: Additional arguments for the client constructor, if a new client has to be instantiated.
:param kwargs: Additional keyword args for the client constructor, if a new client has to be instantiated.
"""
key = (env.get('host_string'), kwargs.get('base_url', env.get('docker_base_url')))
default_config = _get_default_config(None)
if default_config:
if key not in self:
self[key] = default_config # depends on [control=['if'], data=['key', 'self']]
return default_config.get_client() # depends on [control=['if'], data=[]]
config = self.get_or_create_connection(key, self.configuration_class, *args, **kwargs)
return config.get_client() |
def ingest(self, **kwargs):
'''
a core method to ingest and validate arbitrary keyword data
**NOTE: data is always returned with this method**
for each key in the model, a value is returned according
to the following priority:
1. value in kwargs if field passes validation test
2. default value declared for the key in the model
3. empty value appropriate to datatype of key in the model
**NOTE: as long as a default value is provided for each key-
value, returned data will be model valid
**NOTE: if 'extra_fields' is True for a dictionary, the key-
value pair of all fields in kwargs which are not declared in
the model will also be added to the corresponding dictionary
data
**NOTE: if 'max_size' is declared for a list, method will
stop adding input to the list once it reaches max size
:param kwargs: key, value pairs
:return: dictionary with keys and value
'''
__name__ = '%s.ingest' % self.__class__.__name__
schema_dict = self.schema
path_to_root = '.'
valid_data = self._ingest_dict(kwargs, schema_dict, path_to_root)
return valid_data | def function[ingest, parameter[self]]:
constant[
a core method to ingest and validate arbitrary keyword data
**NOTE: data is always returned with this method**
for each key in the model, a value is returned according
to the following priority:
1. value in kwargs if field passes validation test
2. default value declared for the key in the model
3. empty value appropriate to datatype of key in the model
**NOTE: as long as a default value is provided for each key-
value, returned data will be model valid
**NOTE: if 'extra_fields' is True for a dictionary, the key-
value pair of all fields in kwargs which are not declared in
the model will also be added to the corresponding dictionary
data
**NOTE: if 'max_size' is declared for a list, method will
stop adding input to the list once it reaches max size
:param kwargs: key, value pairs
:return: dictionary with keys and value
]
variable[__name__] assign[=] binary_operation[constant[%s.ingest] <ast.Mod object at 0x7da2590d6920> name[self].__class__.__name__]
variable[schema_dict] assign[=] name[self].schema
variable[path_to_root] assign[=] constant[.]
variable[valid_data] assign[=] call[name[self]._ingest_dict, parameter[name[kwargs], name[schema_dict], name[path_to_root]]]
return[name[valid_data]] | keyword[def] identifier[ingest] ( identifier[self] ,** identifier[kwargs] ):
literal[string]
identifier[__name__] = literal[string] % identifier[self] . identifier[__class__] . identifier[__name__]
identifier[schema_dict] = identifier[self] . identifier[schema]
identifier[path_to_root] = literal[string]
identifier[valid_data] = identifier[self] . identifier[_ingest_dict] ( identifier[kwargs] , identifier[schema_dict] , identifier[path_to_root] )
keyword[return] identifier[valid_data] | def ingest(self, **kwargs):
"""
a core method to ingest and validate arbitrary keyword data
**NOTE: data is always returned with this method**
for each key in the model, a value is returned according
to the following priority:
1. value in kwargs if field passes validation test
2. default value declared for the key in the model
3. empty value appropriate to datatype of key in the model
**NOTE: as long as a default value is provided for each key-
value, returned data will be model valid
**NOTE: if 'extra_fields' is True for a dictionary, the key-
value pair of all fields in kwargs which are not declared in
the model will also be added to the corresponding dictionary
data
**NOTE: if 'max_size' is declared for a list, method will
stop adding input to the list once it reaches max size
:param kwargs: key, value pairs
:return: dictionary with keys and value
"""
__name__ = '%s.ingest' % self.__class__.__name__
schema_dict = self.schema
path_to_root = '.'
valid_data = self._ingest_dict(kwargs, schema_dict, path_to_root)
return valid_data |
def generate(self, request, **kwargs):
""" proxy for the tileset.generate method """
# method check to avoid bad requests
self.method_check(request, allowed=['get'])
# create a basic bundle object for self.get_cached_obj_get.
basic_bundle = self.build_bundle(request=request)
# using the primary key defined in the url, obtain the tileset
tileset = self.cached_obj_get(
bundle=basic_bundle,
**self.remove_api_resource_names(kwargs))
# Return what the method output, tastypie will handle the serialization
return self.create_response(request, tileset.generate()) | def function[generate, parameter[self, request]]:
constant[ proxy for the tileset.generate method ]
call[name[self].method_check, parameter[name[request]]]
variable[basic_bundle] assign[=] call[name[self].build_bundle, parameter[]]
variable[tileset] assign[=] call[name[self].cached_obj_get, parameter[]]
return[call[name[self].create_response, parameter[name[request], call[name[tileset].generate, parameter[]]]]] | keyword[def] identifier[generate] ( identifier[self] , identifier[request] ,** identifier[kwargs] ):
literal[string]
identifier[self] . identifier[method_check] ( identifier[request] , identifier[allowed] =[ literal[string] ])
identifier[basic_bundle] = identifier[self] . identifier[build_bundle] ( identifier[request] = identifier[request] )
identifier[tileset] = identifier[self] . identifier[cached_obj_get] (
identifier[bundle] = identifier[basic_bundle] ,
** identifier[self] . identifier[remove_api_resource_names] ( identifier[kwargs] ))
keyword[return] identifier[self] . identifier[create_response] ( identifier[request] , identifier[tileset] . identifier[generate] ()) | def generate(self, request, **kwargs):
""" proxy for the tileset.generate method """
# method check to avoid bad requests
self.method_check(request, allowed=['get'])
# create a basic bundle object for self.get_cached_obj_get.
basic_bundle = self.build_bundle(request=request)
# using the primary key defined in the url, obtain the tileset
tileset = self.cached_obj_get(bundle=basic_bundle, **self.remove_api_resource_names(kwargs))
# Return what the method output, tastypie will handle the serialization
return self.create_response(request, tileset.generate()) |
def bind(self, container, attr_name):
""" Get an instance of this Dependency to bind to `container` with
`attr_name`.
"""
instance = super(DependencyProvider, self).bind(container)
instance.attr_name = attr_name
self.attr_name = attr_name
return instance | def function[bind, parameter[self, container, attr_name]]:
constant[ Get an instance of this Dependency to bind to `container` with
`attr_name`.
]
variable[instance] assign[=] call[call[name[super], parameter[name[DependencyProvider], name[self]]].bind, parameter[name[container]]]
name[instance].attr_name assign[=] name[attr_name]
name[self].attr_name assign[=] name[attr_name]
return[name[instance]] | keyword[def] identifier[bind] ( identifier[self] , identifier[container] , identifier[attr_name] ):
literal[string]
identifier[instance] = identifier[super] ( identifier[DependencyProvider] , identifier[self] ). identifier[bind] ( identifier[container] )
identifier[instance] . identifier[attr_name] = identifier[attr_name]
identifier[self] . identifier[attr_name] = identifier[attr_name]
keyword[return] identifier[instance] | def bind(self, container, attr_name):
""" Get an instance of this Dependency to bind to `container` with
`attr_name`.
"""
instance = super(DependencyProvider, self).bind(container)
instance.attr_name = attr_name
self.attr_name = attr_name
return instance |
def append(self, row):
'''
The idea is, we recognize when we have a new testcase by
checking the first cell. If it's not empty and not a comment,
we have a new test case.
'''
if len(row) == 0:
# blank line. Should we throw it away, or append a BlankLine object?
return
if (row[0] != "" and
(not row[0].lstrip().startswith("#"))):
# we have a new child table
self._children.append(self._childClass(self.parent, row.linenumber, row[0]))
if len(row.cells) > 1:
# It appears the first row -- which contains the test case or
# keyword name -- also has the first logical row of cells.
# We'll create a Row, but we'll make the first cell empty instead
# of leaving the name in it, since other code always assumes the
# first cell is empty.
#
# To be honest, I'm not sure this is the Right Thing To Do, but
# I'm too lazy to audit the code to see if it matters if we keep
# the first cell intact. Sorry if this ends up causing you grief
# some day...
row[0] = ""
self._children[-1].append(row.linenumber, row.raw_text, row.cells)
elif len(self._children) == 0:
# something before the first test case
# For now, append it to self.comments; eventually we should flag
# an error if it's NOT a comment
self.comments.append(row)
else:
# another row for the testcase
if len(row.cells) > 0:
self._children[-1].append(row.linenumber, row.raw_text, row.cells) | def function[append, parameter[self, row]]:
constant[
The idea is, we recognize when we have a new testcase by
checking the first cell. If it's not empty and not a comment,
we have a new test case.
]
if compare[call[name[len], parameter[name[row]]] equal[==] constant[0]] begin[:]
return[None]
if <ast.BoolOp object at 0x7da18ede4ac0> begin[:]
call[name[self]._children.append, parameter[call[name[self]._childClass, parameter[name[self].parent, name[row].linenumber, call[name[row]][constant[0]]]]]]
if compare[call[name[len], parameter[name[row].cells]] greater[>] constant[1]] begin[:]
call[name[row]][constant[0]] assign[=] constant[]
call[call[name[self]._children][<ast.UnaryOp object at 0x7da18ede64d0>].append, parameter[name[row].linenumber, name[row].raw_text, name[row].cells]] | keyword[def] identifier[append] ( identifier[self] , identifier[row] ):
literal[string]
keyword[if] identifier[len] ( identifier[row] )== literal[int] :
keyword[return]
keyword[if] ( identifier[row] [ literal[int] ]!= literal[string] keyword[and]
( keyword[not] identifier[row] [ literal[int] ]. identifier[lstrip] (). identifier[startswith] ( literal[string] ))):
identifier[self] . identifier[_children] . identifier[append] ( identifier[self] . identifier[_childClass] ( identifier[self] . identifier[parent] , identifier[row] . identifier[linenumber] , identifier[row] [ literal[int] ]))
keyword[if] identifier[len] ( identifier[row] . identifier[cells] )> literal[int] :
identifier[row] [ literal[int] ]= literal[string]
identifier[self] . identifier[_children] [- literal[int] ]. identifier[append] ( identifier[row] . identifier[linenumber] , identifier[row] . identifier[raw_text] , identifier[row] . identifier[cells] )
keyword[elif] identifier[len] ( identifier[self] . identifier[_children] )== literal[int] :
identifier[self] . identifier[comments] . identifier[append] ( identifier[row] )
keyword[else] :
keyword[if] identifier[len] ( identifier[row] . identifier[cells] )> literal[int] :
identifier[self] . identifier[_children] [- literal[int] ]. identifier[append] ( identifier[row] . identifier[linenumber] , identifier[row] . identifier[raw_text] , identifier[row] . identifier[cells] ) | def append(self, row):
"""
The idea is, we recognize when we have a new testcase by
checking the first cell. If it's not empty and not a comment,
we have a new test case.
"""
if len(row) == 0:
# blank line. Should we throw it away, or append a BlankLine object?
return # depends on [control=['if'], data=[]]
if row[0] != '' and (not row[0].lstrip().startswith('#')):
# we have a new child table
self._children.append(self._childClass(self.parent, row.linenumber, row[0]))
if len(row.cells) > 1:
# It appears the first row -- which contains the test case or
# keyword name -- also has the first logical row of cells.
# We'll create a Row, but we'll make the first cell empty instead
# of leaving the name in it, since other code always assumes the
# first cell is empty.
#
# To be honest, I'm not sure this is the Right Thing To Do, but
# I'm too lazy to audit the code to see if it matters if we keep
# the first cell intact. Sorry if this ends up causing you grief
# some day...
row[0] = ''
self._children[-1].append(row.linenumber, row.raw_text, row.cells) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif len(self._children) == 0:
# something before the first test case
# For now, append it to self.comments; eventually we should flag
# an error if it's NOT a comment
self.comments.append(row) # depends on [control=['if'], data=[]]
# another row for the testcase
elif len(row.cells) > 0:
self._children[-1].append(row.linenumber, row.raw_text, row.cells) # depends on [control=['if'], data=[]] |
def prettyPrint(self, scope=0):
"""Return an object representation string.
Returns
-------
: :class:`str`
Human-friendly object representation.
"""
scope += 1
representation = self.__class__.__name__ + ':\n'
for idx, componentValue in enumerate(self._componentValues):
if componentValue is not noValue and componentValue.isValue:
representation += ' ' * scope
if self.componentType:
representation += self.componentType.getNameByPosition(idx)
else:
representation += self._dynamicNames.getNameByPosition(idx)
representation = '%s=%s\n' % (
representation, componentValue.prettyPrint(scope)
)
return representation | def function[prettyPrint, parameter[self, scope]]:
constant[Return an object representation string.
Returns
-------
: :class:`str`
Human-friendly object representation.
]
<ast.AugAssign object at 0x7da2047e81f0>
variable[representation] assign[=] binary_operation[name[self].__class__.__name__ + constant[:
]]
for taget[tuple[[<ast.Name object at 0x7da2047e9270>, <ast.Name object at 0x7da2047ea4d0>]]] in starred[call[name[enumerate], parameter[name[self]._componentValues]]] begin[:]
if <ast.BoolOp object at 0x7da2047e8b20> begin[:]
<ast.AugAssign object at 0x7da2047e8c40>
if name[self].componentType begin[:]
<ast.AugAssign object at 0x7da2047ead40>
variable[representation] assign[=] binary_operation[constant[%s=%s
] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da2047e8df0>, <ast.Call object at 0x7da2047e9120>]]]
return[name[representation]] | keyword[def] identifier[prettyPrint] ( identifier[self] , identifier[scope] = literal[int] ):
literal[string]
identifier[scope] += literal[int]
identifier[representation] = identifier[self] . identifier[__class__] . identifier[__name__] + literal[string]
keyword[for] identifier[idx] , identifier[componentValue] keyword[in] identifier[enumerate] ( identifier[self] . identifier[_componentValues] ):
keyword[if] identifier[componentValue] keyword[is] keyword[not] identifier[noValue] keyword[and] identifier[componentValue] . identifier[isValue] :
identifier[representation] += literal[string] * identifier[scope]
keyword[if] identifier[self] . identifier[componentType] :
identifier[representation] += identifier[self] . identifier[componentType] . identifier[getNameByPosition] ( identifier[idx] )
keyword[else] :
identifier[representation] += identifier[self] . identifier[_dynamicNames] . identifier[getNameByPosition] ( identifier[idx] )
identifier[representation] = literal[string] %(
identifier[representation] , identifier[componentValue] . identifier[prettyPrint] ( identifier[scope] )
)
keyword[return] identifier[representation] | def prettyPrint(self, scope=0):
"""Return an object representation string.
Returns
-------
: :class:`str`
Human-friendly object representation.
"""
scope += 1
representation = self.__class__.__name__ + ':\n'
for (idx, componentValue) in enumerate(self._componentValues):
if componentValue is not noValue and componentValue.isValue:
representation += ' ' * scope
if self.componentType:
representation += self.componentType.getNameByPosition(idx) # depends on [control=['if'], data=[]]
else:
representation += self._dynamicNames.getNameByPosition(idx)
representation = '%s=%s\n' % (representation, componentValue.prettyPrint(scope)) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
return representation |
def holds(snapshot, **kwargs):
'''
Lists all existing user references for the given snapshot or snapshots.
snapshot : string
name of snapshot
recursive : boolean
lists the holds that are set on the named descendent snapshots also.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' zfs.holds myzpool/mydataset@baseline
'''
## Configure command
# NOTE: initialize the defaults
flags = ['-H']
target = []
# NOTE: set extra config from kwargs
if kwargs.get('recursive', False):
flags.append('-r')
# NOTE: update target
target.append(snapshot)
## Lookup holds
res = __salt__['cmd.run_all'](
__utils__['zfs.zfs_command'](
command='holds',
flags=flags,
target=target,
),
python_shell=False,
)
ret = __utils__['zfs.parse_command_result'](res)
if res['retcode'] == 0:
for hold in res['stdout'].splitlines():
hold_data = OrderedDict(list(zip(
['name', 'tag', 'timestamp'],
hold.split("\t"),
)))
ret[hold_data['tag'].strip()] = hold_data['timestamp']
return ret | def function[holds, parameter[snapshot]]:
constant[
Lists all existing user references for the given snapshot or snapshots.
snapshot : string
name of snapshot
recursive : boolean
lists the holds that are set on the named descendent snapshots also.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' zfs.holds myzpool/mydataset@baseline
]
variable[flags] assign[=] list[[<ast.Constant object at 0x7da18dc9bfa0>]]
variable[target] assign[=] list[[]]
if call[name[kwargs].get, parameter[constant[recursive], constant[False]]] begin[:]
call[name[flags].append, parameter[constant[-r]]]
call[name[target].append, parameter[name[snapshot]]]
variable[res] assign[=] call[call[name[__salt__]][constant[cmd.run_all]], parameter[call[call[name[__utils__]][constant[zfs.zfs_command]], parameter[]]]]
variable[ret] assign[=] call[call[name[__utils__]][constant[zfs.parse_command_result]], parameter[name[res]]]
if compare[call[name[res]][constant[retcode]] equal[==] constant[0]] begin[:]
for taget[name[hold]] in starred[call[call[name[res]][constant[stdout]].splitlines, parameter[]]] begin[:]
variable[hold_data] assign[=] call[name[OrderedDict], parameter[call[name[list], parameter[call[name[zip], parameter[list[[<ast.Constant object at 0x7da1b21e0550>, <ast.Constant object at 0x7da1b21e2440>, <ast.Constant object at 0x7da1b21e18a0>]], call[name[hold].split, parameter[constant[ ]]]]]]]]]
call[name[ret]][call[call[name[hold_data]][constant[tag]].strip, parameter[]]] assign[=] call[name[hold_data]][constant[timestamp]]
return[name[ret]] | keyword[def] identifier[holds] ( identifier[snapshot] ,** identifier[kwargs] ):
literal[string]
identifier[flags] =[ literal[string] ]
identifier[target] =[]
keyword[if] identifier[kwargs] . identifier[get] ( literal[string] , keyword[False] ):
identifier[flags] . identifier[append] ( literal[string] )
identifier[target] . identifier[append] ( identifier[snapshot] )
identifier[res] = identifier[__salt__] [ literal[string] ](
identifier[__utils__] [ literal[string] ](
identifier[command] = literal[string] ,
identifier[flags] = identifier[flags] ,
identifier[target] = identifier[target] ,
),
identifier[python_shell] = keyword[False] ,
)
identifier[ret] = identifier[__utils__] [ literal[string] ]( identifier[res] )
keyword[if] identifier[res] [ literal[string] ]== literal[int] :
keyword[for] identifier[hold] keyword[in] identifier[res] [ literal[string] ]. identifier[splitlines] ():
identifier[hold_data] = identifier[OrderedDict] ( identifier[list] ( identifier[zip] (
[ literal[string] , literal[string] , literal[string] ],
identifier[hold] . identifier[split] ( literal[string] ),
)))
identifier[ret] [ identifier[hold_data] [ literal[string] ]. identifier[strip] ()]= identifier[hold_data] [ literal[string] ]
keyword[return] identifier[ret] | def holds(snapshot, **kwargs):
"""
Lists all existing user references for the given snapshot or snapshots.
snapshot : string
name of snapshot
recursive : boolean
lists the holds that are set on the named descendent snapshots also.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' zfs.holds myzpool/mydataset@baseline
"""
## Configure command
# NOTE: initialize the defaults
flags = ['-H']
target = []
# NOTE: set extra config from kwargs
if kwargs.get('recursive', False):
flags.append('-r') # depends on [control=['if'], data=[]]
# NOTE: update target
target.append(snapshot)
## Lookup holds
res = __salt__['cmd.run_all'](__utils__['zfs.zfs_command'](command='holds', flags=flags, target=target), python_shell=False)
ret = __utils__['zfs.parse_command_result'](res)
if res['retcode'] == 0:
for hold in res['stdout'].splitlines():
hold_data = OrderedDict(list(zip(['name', 'tag', 'timestamp'], hold.split('\t'))))
ret[hold_data['tag'].strip()] = hold_data['timestamp'] # depends on [control=['for'], data=['hold']] # depends on [control=['if'], data=[]]
return ret |
def all_selectors(Class, fn):
"""return a sorted list of selectors that occur in the stylesheet"""
selectors = []
cssparser = cssutils.CSSParser(validate=False)
css = cssparser.parseFile(fn)
for rule in [r for r in css.cssRules if type(r)==cssutils.css.CSSStyleRule]:
selectors += [sel.selectorText for sel in rule.selectorList]
selectors = sorted(list(set(selectors)))
return selectors | def function[all_selectors, parameter[Class, fn]]:
constant[return a sorted list of selectors that occur in the stylesheet]
variable[selectors] assign[=] list[[]]
variable[cssparser] assign[=] call[name[cssutils].CSSParser, parameter[]]
variable[css] assign[=] call[name[cssparser].parseFile, parameter[name[fn]]]
for taget[name[rule]] in starred[<ast.ListComp object at 0x7da1b26aead0>] begin[:]
<ast.AugAssign object at 0x7da1b26ae350>
variable[selectors] assign[=] call[name[sorted], parameter[call[name[list], parameter[call[name[set], parameter[name[selectors]]]]]]]
return[name[selectors]] | keyword[def] identifier[all_selectors] ( identifier[Class] , identifier[fn] ):
literal[string]
identifier[selectors] =[]
identifier[cssparser] = identifier[cssutils] . identifier[CSSParser] ( identifier[validate] = keyword[False] )
identifier[css] = identifier[cssparser] . identifier[parseFile] ( identifier[fn] )
keyword[for] identifier[rule] keyword[in] [ identifier[r] keyword[for] identifier[r] keyword[in] identifier[css] . identifier[cssRules] keyword[if] identifier[type] ( identifier[r] )== identifier[cssutils] . identifier[css] . identifier[CSSStyleRule] ]:
identifier[selectors] +=[ identifier[sel] . identifier[selectorText] keyword[for] identifier[sel] keyword[in] identifier[rule] . identifier[selectorList] ]
identifier[selectors] = identifier[sorted] ( identifier[list] ( identifier[set] ( identifier[selectors] )))
keyword[return] identifier[selectors] | def all_selectors(Class, fn):
"""return a sorted list of selectors that occur in the stylesheet"""
selectors = []
cssparser = cssutils.CSSParser(validate=False)
css = cssparser.parseFile(fn)
for rule in [r for r in css.cssRules if type(r) == cssutils.css.CSSStyleRule]:
selectors += [sel.selectorText for sel in rule.selectorList] # depends on [control=['for'], data=['rule']]
selectors = sorted(list(set(selectors)))
return selectors |
def _pad_bytes_new(name, length):
"""
Takes a bytes instance and pads it with null bytes until it's length chars.
"""
if isinstance(name, str):
name = bytes(name, 'utf-8')
return name + b'\x00' * (length - len(name)) | def function[_pad_bytes_new, parameter[name, length]]:
constant[
Takes a bytes instance and pads it with null bytes until it's length chars.
]
if call[name[isinstance], parameter[name[name], name[str]]] begin[:]
variable[name] assign[=] call[name[bytes], parameter[name[name], constant[utf-8]]]
return[binary_operation[name[name] + binary_operation[constant[b'\x00'] * binary_operation[name[length] - call[name[len], parameter[name[name]]]]]]] | keyword[def] identifier[_pad_bytes_new] ( identifier[name] , identifier[length] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[name] , identifier[str] ):
identifier[name] = identifier[bytes] ( identifier[name] , literal[string] )
keyword[return] identifier[name] + literal[string] *( identifier[length] - identifier[len] ( identifier[name] )) | def _pad_bytes_new(name, length):
"""
Takes a bytes instance and pads it with null bytes until it's length chars.
"""
if isinstance(name, str):
name = bytes(name, 'utf-8') # depends on [control=['if'], data=[]]
return name + b'\x00' * (length - len(name)) |
def _ncc_c_2dim(x, y):
"""
Variant of NCCc that operates with 2 dimensional X arrays and 1 dimensional
y vector
Returns a 2 dimensional array of normalized fourier transforms
"""
den = np.array(norm(x, axis=1) * norm(y))
den[den == 0] = np.Inf
x_len = x.shape[-1]
fft_size = 1 << (2*x_len-1).bit_length()
cc = ifft(fft(x, fft_size) * np.conj(fft(y, fft_size)))
cc = np.concatenate((cc[:,-(x_len-1):], cc[:,:x_len]), axis=1)
return np.real(cc) / den[:, np.newaxis] | def function[_ncc_c_2dim, parameter[x, y]]:
constant[
Variant of NCCc that operates with 2 dimensional X arrays and 1 dimensional
y vector
Returns a 2 dimensional array of normalized fourier transforms
]
variable[den] assign[=] call[name[np].array, parameter[binary_operation[call[name[norm], parameter[name[x]]] * call[name[norm], parameter[name[y]]]]]]
call[name[den]][compare[name[den] equal[==] constant[0]]] assign[=] name[np].Inf
variable[x_len] assign[=] call[name[x].shape][<ast.UnaryOp object at 0x7da1b1912a40>]
variable[fft_size] assign[=] binary_operation[constant[1] <ast.LShift object at 0x7da2590d69e0> call[binary_operation[binary_operation[constant[2] * name[x_len]] - constant[1]].bit_length, parameter[]]]
variable[cc] assign[=] call[name[ifft], parameter[binary_operation[call[name[fft], parameter[name[x], name[fft_size]]] * call[name[np].conj, parameter[call[name[fft], parameter[name[y], name[fft_size]]]]]]]]
variable[cc] assign[=] call[name[np].concatenate, parameter[tuple[[<ast.Subscript object at 0x7da1b1913400>, <ast.Subscript object at 0x7da1b1911180>]]]]
return[binary_operation[call[name[np].real, parameter[name[cc]]] / call[name[den]][tuple[[<ast.Slice object at 0x7da1b1910850>, <ast.Attribute object at 0x7da1b1912110>]]]]] | keyword[def] identifier[_ncc_c_2dim] ( identifier[x] , identifier[y] ):
literal[string]
identifier[den] = identifier[np] . identifier[array] ( identifier[norm] ( identifier[x] , identifier[axis] = literal[int] )* identifier[norm] ( identifier[y] ))
identifier[den] [ identifier[den] == literal[int] ]= identifier[np] . identifier[Inf]
identifier[x_len] = identifier[x] . identifier[shape] [- literal[int] ]
identifier[fft_size] = literal[int] <<( literal[int] * identifier[x_len] - literal[int] ). identifier[bit_length] ()
identifier[cc] = identifier[ifft] ( identifier[fft] ( identifier[x] , identifier[fft_size] )* identifier[np] . identifier[conj] ( identifier[fft] ( identifier[y] , identifier[fft_size] )))
identifier[cc] = identifier[np] . identifier[concatenate] (( identifier[cc] [:,-( identifier[x_len] - literal[int] ):], identifier[cc] [:,: identifier[x_len] ]), identifier[axis] = literal[int] )
keyword[return] identifier[np] . identifier[real] ( identifier[cc] )/ identifier[den] [:, identifier[np] . identifier[newaxis] ] | def _ncc_c_2dim(x, y):
"""
Variant of NCCc that operates with 2 dimensional X arrays and 1 dimensional
y vector
Returns a 2 dimensional array of normalized fourier transforms
"""
den = np.array(norm(x, axis=1) * norm(y))
den[den == 0] = np.Inf
x_len = x.shape[-1]
fft_size = 1 << (2 * x_len - 1).bit_length()
cc = ifft(fft(x, fft_size) * np.conj(fft(y, fft_size)))
cc = np.concatenate((cc[:, -(x_len - 1):], cc[:, :x_len]), axis=1)
return np.real(cc) / den[:, np.newaxis] |
def transform(self, Y):
"""Transform input data `Y` to reduced data space defined by `self.data`
Takes data in the same ambient space as `self.data` and transforms it
to be in the same reduced space as `self.data_nu`.
Parameters
----------
Y : array-like, shape=[n_samples_y, n_features]
n_features must be the same as `self.data`.
Returns
-------
Transformed data, shape=[n_samples_y, n_pca]
Raises
------
ValueError : if Y.shape[1] != self.data.shape[1]
"""
try:
# try PCA first
return self.data_pca.transform(Y)
except AttributeError: # no pca, try to return data
try:
if Y.shape[1] != self.data.shape[1]:
# shape is wrong
raise ValueError
return Y
except IndexError:
# len(Y.shape) < 2
raise ValueError
except ValueError:
# more informative error
raise ValueError("data of shape {} cannot be transformed"
" to graph built on data of shape {}".format(
Y.shape, self.data.shape)) | def function[transform, parameter[self, Y]]:
constant[Transform input data `Y` to reduced data space defined by `self.data`
Takes data in the same ambient space as `self.data` and transforms it
to be in the same reduced space as `self.data_nu`.
Parameters
----------
Y : array-like, shape=[n_samples_y, n_features]
n_features must be the same as `self.data`.
Returns
-------
Transformed data, shape=[n_samples_y, n_pca]
Raises
------
ValueError : if Y.shape[1] != self.data.shape[1]
]
<ast.Try object at 0x7da1b0b058d0> | keyword[def] identifier[transform] ( identifier[self] , identifier[Y] ):
literal[string]
keyword[try] :
keyword[return] identifier[self] . identifier[data_pca] . identifier[transform] ( identifier[Y] )
keyword[except] identifier[AttributeError] :
keyword[try] :
keyword[if] identifier[Y] . identifier[shape] [ literal[int] ]!= identifier[self] . identifier[data] . identifier[shape] [ literal[int] ]:
keyword[raise] identifier[ValueError]
keyword[return] identifier[Y]
keyword[except] identifier[IndexError] :
keyword[raise] identifier[ValueError]
keyword[except] identifier[ValueError] :
keyword[raise] identifier[ValueError] ( literal[string]
literal[string] . identifier[format] (
identifier[Y] . identifier[shape] , identifier[self] . identifier[data] . identifier[shape] )) | def transform(self, Y):
"""Transform input data `Y` to reduced data space defined by `self.data`
Takes data in the same ambient space as `self.data` and transforms it
to be in the same reduced space as `self.data_nu`.
Parameters
----------
Y : array-like, shape=[n_samples_y, n_features]
n_features must be the same as `self.data`.
Returns
-------
Transformed data, shape=[n_samples_y, n_pca]
Raises
------
ValueError : if Y.shape[1] != self.data.shape[1]
"""
try:
# try PCA first
return self.data_pca.transform(Y) # depends on [control=['try'], data=[]]
except AttributeError: # no pca, try to return data
try:
if Y.shape[1] != self.data.shape[1]:
# shape is wrong
raise ValueError # depends on [control=['if'], data=[]]
return Y # depends on [control=['try'], data=[]]
except IndexError:
# len(Y.shape) < 2
raise ValueError # depends on [control=['except'], data=[]] # depends on [control=['except'], data=[]]
except ValueError:
# more informative error
raise ValueError('data of shape {} cannot be transformed to graph built on data of shape {}'.format(Y.shape, self.data.shape)) # depends on [control=['except'], data=[]] |
def _load_fallback_states(self, pg):
"""
Load the last N deprioritized states will be extracted from the "deprioritized" stash and put to "active" stash.
N is controlled by 'num_fallback_states'.
:param SimulationManager pg: The simulation manager.
:return: None
"""
# take back some of the deprioritized states
l.debug("No more active states. Load some deprioritized states to 'active' stash.")
if 'deprioritized' in pg.stashes and pg.deprioritized:
pg.active.extend(pg.deprioritized[-self._num_fallback_states : ])
pg.stashes['deprioritized'] = pg.deprioritized[ : -self._num_fallback_states] | def function[_load_fallback_states, parameter[self, pg]]:
constant[
Load the last N deprioritized states will be extracted from the "deprioritized" stash and put to "active" stash.
N is controlled by 'num_fallback_states'.
:param SimulationManager pg: The simulation manager.
:return: None
]
call[name[l].debug, parameter[constant[No more active states. Load some deprioritized states to 'active' stash.]]]
if <ast.BoolOp object at 0x7da20c7c8e80> begin[:]
call[name[pg].active.extend, parameter[call[name[pg].deprioritized][<ast.Slice object at 0x7da204564400>]]]
call[name[pg].stashes][constant[deprioritized]] assign[=] call[name[pg].deprioritized][<ast.Slice object at 0x7da2045655a0>] | keyword[def] identifier[_load_fallback_states] ( identifier[self] , identifier[pg] ):
literal[string]
identifier[l] . identifier[debug] ( literal[string] )
keyword[if] literal[string] keyword[in] identifier[pg] . identifier[stashes] keyword[and] identifier[pg] . identifier[deprioritized] :
identifier[pg] . identifier[active] . identifier[extend] ( identifier[pg] . identifier[deprioritized] [- identifier[self] . identifier[_num_fallback_states] :])
identifier[pg] . identifier[stashes] [ literal[string] ]= identifier[pg] . identifier[deprioritized] [:- identifier[self] . identifier[_num_fallback_states] ] | def _load_fallback_states(self, pg):
"""
Load the last N deprioritized states will be extracted from the "deprioritized" stash and put to "active" stash.
N is controlled by 'num_fallback_states'.
:param SimulationManager pg: The simulation manager.
:return: None
"""
# take back some of the deprioritized states
l.debug("No more active states. Load some deprioritized states to 'active' stash.")
if 'deprioritized' in pg.stashes and pg.deprioritized:
pg.active.extend(pg.deprioritized[-self._num_fallback_states:])
pg.stashes['deprioritized'] = pg.deprioritized[:-self._num_fallback_states] # depends on [control=['if'], data=[]] |
def from_pysam_pileup_element(
cls,
pileup_element,
base0_position_before_variant,
base0_position_after_variant,
use_secondary_alignments,
use_duplicate_reads,
min_mapping_quality,
use_soft_clipped_bases=False):
"""
Parameters
----------
pileup_element : pysam.PileupRead
base0_position_before_variant : int
base0_position_after_variant : int
use_secondary_alignments : bool
use_duplicate_reads : bool
min_mapping_quality : int
use_soft_clipped_bases : bool. Default false; set to true to keep soft-clipped bases
Returns LocusRead or None
"""
read = pileup_element.alignment
# For future reference, may get overlapping reads
# which can be identified by having the same name
name = read.query_name
if name is None:
logger.warn(
"Read missing name at position %d",
base0_position_before_variant + 1)
return None
if read.is_unmapped:
logger.warn(
"How did we get unmapped read '%s' in a pileup?", name)
return None
if pileup_element.is_refskip:
# if read sequence doesn't actually align to the reference
# base before a variant, skip it
logger.debug("Skipping pileup element with CIGAR alignment N (intron)")
return None
elif pileup_element.is_del:
logger.debug(
"Skipping deletion at position %d (read name = %s)",
base0_position_before_variant + 1,
name)
return None
if read.is_secondary and not use_secondary_alignments:
logger.debug("Skipping secondary alignment of read '%s'", name)
return None
if read.is_duplicate and not use_duplicate_reads:
logger.debug("Skipping duplicate read '%s'", name)
return None
mapping_quality = read.mapping_quality
missing_mapping_quality = mapping_quality is None
if min_mapping_quality > 0 and missing_mapping_quality:
logger.debug("Skipping read '%s' due to missing MAPQ", name)
return None
elif mapping_quality < min_mapping_quality:
logger.debug(
"Skipping read '%s' due to low MAPQ: %d < %d",
read.mapping_quality,
mapping_quality,
min_mapping_quality)
return None
sequence = read.query_sequence
if sequence is None:
logger.warn("Read '%s' missing sequence", name)
return None
base_qualities = read.query_qualities
if base_qualities is None:
logger.warn("Read '%s' missing base qualities", name)
return None
# Documentation for pysam.AlignedSegment.get_reference_positions:
# ------------------------------------------------------------------
# By default, this method only returns positions in the reference
# that are within the alignment. If full_length is set, None values
# will be included for any soft-clipped or unaligned positions
# within the read. The returned list will thus be of the same length
# as the read.
#
# Source:
# http://pysam.readthedocs.org/en/latest/
# api.html#pysam.AlignedSegment.get_reference_positions
#
# We want a None value for every read position that does not have a
# corresponding reference position.
reference_positions = read.get_reference_positions(
full_length=True)
# pysam uses base-0 positions everywhere except region strings
# Source:
# http://pysam.readthedocs.org/en/latest/faq.html#pysam-coordinates-are-wrong
if base0_position_before_variant not in reference_positions:
logger.debug(
"Skipping read '%s' because first position %d not mapped",
name,
base0_position_before_variant)
return None
else:
base0_read_position_before_variant = reference_positions.index(
base0_position_before_variant)
if base0_position_after_variant not in reference_positions:
logger.debug(
"Skipping read '%s' because last position %d not mapped",
name,
base0_position_after_variant)
return None
else:
base0_read_position_after_variant = reference_positions.index(
base0_position_after_variant)
if isinstance(sequence, bytes):
sequence = sequence.decode('ascii')
if not use_soft_clipped_bases:
start = read.query_alignment_start
end = read.query_alignment_end
sequence = sequence[start:end]
reference_positions = reference_positions[start:end]
base_qualities = base_qualities[start:end]
base0_read_position_before_variant -= start
base0_read_position_after_variant -= start
return cls(
name=name,
sequence=sequence,
reference_positions=reference_positions,
quality_scores=base_qualities,
base0_read_position_before_variant=base0_read_position_before_variant,
base0_read_position_after_variant=base0_read_position_after_variant) | def function[from_pysam_pileup_element, parameter[cls, pileup_element, base0_position_before_variant, base0_position_after_variant, use_secondary_alignments, use_duplicate_reads, min_mapping_quality, use_soft_clipped_bases]]:
constant[
Parameters
----------
pileup_element : pysam.PileupRead
base0_position_before_variant : int
base0_position_after_variant : int
use_secondary_alignments : bool
use_duplicate_reads : bool
min_mapping_quality : int
use_soft_clipped_bases : bool. Default false; set to true to keep soft-clipped bases
Returns LocusRead or None
]
variable[read] assign[=] name[pileup_element].alignment
variable[name] assign[=] name[read].query_name
if compare[name[name] is constant[None]] begin[:]
call[name[logger].warn, parameter[constant[Read missing name at position %d], binary_operation[name[base0_position_before_variant] + constant[1]]]]
return[constant[None]]
if name[read].is_unmapped begin[:]
call[name[logger].warn, parameter[constant[How did we get unmapped read '%s' in a pileup?], name[name]]]
return[constant[None]]
if name[pileup_element].is_refskip begin[:]
call[name[logger].debug, parameter[constant[Skipping pileup element with CIGAR alignment N (intron)]]]
return[constant[None]]
if <ast.BoolOp object at 0x7da1b253b670> begin[:]
call[name[logger].debug, parameter[constant[Skipping secondary alignment of read '%s'], name[name]]]
return[constant[None]]
if <ast.BoolOp object at 0x7da1b25d9db0> begin[:]
call[name[logger].debug, parameter[constant[Skipping duplicate read '%s'], name[name]]]
return[constant[None]]
variable[mapping_quality] assign[=] name[read].mapping_quality
variable[missing_mapping_quality] assign[=] compare[name[mapping_quality] is constant[None]]
if <ast.BoolOp object at 0x7da1b25d9180> begin[:]
call[name[logger].debug, parameter[constant[Skipping read '%s' due to missing MAPQ], name[name]]]
return[constant[None]]
variable[sequence] assign[=] name[read].query_sequence
if compare[name[sequence] is constant[None]] begin[:]
call[name[logger].warn, parameter[constant[Read '%s' missing sequence], name[name]]]
return[constant[None]]
variable[base_qualities] assign[=] name[read].query_qualities
if compare[name[base_qualities] is constant[None]] begin[:]
call[name[logger].warn, parameter[constant[Read '%s' missing base qualities], name[name]]]
return[constant[None]]
variable[reference_positions] assign[=] call[name[read].get_reference_positions, parameter[]]
if compare[name[base0_position_before_variant] <ast.NotIn object at 0x7da2590d7190> name[reference_positions]] begin[:]
call[name[logger].debug, parameter[constant[Skipping read '%s' because first position %d not mapped], name[name], name[base0_position_before_variant]]]
return[constant[None]]
if compare[name[base0_position_after_variant] <ast.NotIn object at 0x7da2590d7190> name[reference_positions]] begin[:]
call[name[logger].debug, parameter[constant[Skipping read '%s' because last position %d not mapped], name[name], name[base0_position_after_variant]]]
return[constant[None]]
if call[name[isinstance], parameter[name[sequence], name[bytes]]] begin[:]
variable[sequence] assign[=] call[name[sequence].decode, parameter[constant[ascii]]]
if <ast.UnaryOp object at 0x7da1b25db1c0> begin[:]
variable[start] assign[=] name[read].query_alignment_start
variable[end] assign[=] name[read].query_alignment_end
variable[sequence] assign[=] call[name[sequence]][<ast.Slice object at 0x7da1b25589d0>]
variable[reference_positions] assign[=] call[name[reference_positions]][<ast.Slice object at 0x7da1b255a2f0>]
variable[base_qualities] assign[=] call[name[base_qualities]][<ast.Slice object at 0x7da1b2559db0>]
<ast.AugAssign object at 0x7da1b2558b80>
<ast.AugAssign object at 0x7da1b255a140>
return[call[name[cls], parameter[]]] | keyword[def] identifier[from_pysam_pileup_element] (
identifier[cls] ,
identifier[pileup_element] ,
identifier[base0_position_before_variant] ,
identifier[base0_position_after_variant] ,
identifier[use_secondary_alignments] ,
identifier[use_duplicate_reads] ,
identifier[min_mapping_quality] ,
identifier[use_soft_clipped_bases] = keyword[False] ):
literal[string]
identifier[read] = identifier[pileup_element] . identifier[alignment]
identifier[name] = identifier[read] . identifier[query_name]
keyword[if] identifier[name] keyword[is] keyword[None] :
identifier[logger] . identifier[warn] (
literal[string] ,
identifier[base0_position_before_variant] + literal[int] )
keyword[return] keyword[None]
keyword[if] identifier[read] . identifier[is_unmapped] :
identifier[logger] . identifier[warn] (
literal[string] , identifier[name] )
keyword[return] keyword[None]
keyword[if] identifier[pileup_element] . identifier[is_refskip] :
identifier[logger] . identifier[debug] ( literal[string] )
keyword[return] keyword[None]
keyword[elif] identifier[pileup_element] . identifier[is_del] :
identifier[logger] . identifier[debug] (
literal[string] ,
identifier[base0_position_before_variant] + literal[int] ,
identifier[name] )
keyword[return] keyword[None]
keyword[if] identifier[read] . identifier[is_secondary] keyword[and] keyword[not] identifier[use_secondary_alignments] :
identifier[logger] . identifier[debug] ( literal[string] , identifier[name] )
keyword[return] keyword[None]
keyword[if] identifier[read] . identifier[is_duplicate] keyword[and] keyword[not] identifier[use_duplicate_reads] :
identifier[logger] . identifier[debug] ( literal[string] , identifier[name] )
keyword[return] keyword[None]
identifier[mapping_quality] = identifier[read] . identifier[mapping_quality]
identifier[missing_mapping_quality] = identifier[mapping_quality] keyword[is] keyword[None]
keyword[if] identifier[min_mapping_quality] > literal[int] keyword[and] identifier[missing_mapping_quality] :
identifier[logger] . identifier[debug] ( literal[string] , identifier[name] )
keyword[return] keyword[None]
keyword[elif] identifier[mapping_quality] < identifier[min_mapping_quality] :
identifier[logger] . identifier[debug] (
literal[string] ,
identifier[read] . identifier[mapping_quality] ,
identifier[mapping_quality] ,
identifier[min_mapping_quality] )
keyword[return] keyword[None]
identifier[sequence] = identifier[read] . identifier[query_sequence]
keyword[if] identifier[sequence] keyword[is] keyword[None] :
identifier[logger] . identifier[warn] ( literal[string] , identifier[name] )
keyword[return] keyword[None]
identifier[base_qualities] = identifier[read] . identifier[query_qualities]
keyword[if] identifier[base_qualities] keyword[is] keyword[None] :
identifier[logger] . identifier[warn] ( literal[string] , identifier[name] )
keyword[return] keyword[None]
identifier[reference_positions] = identifier[read] . identifier[get_reference_positions] (
identifier[full_length] = keyword[True] )
keyword[if] identifier[base0_position_before_variant] keyword[not] keyword[in] identifier[reference_positions] :
identifier[logger] . identifier[debug] (
literal[string] ,
identifier[name] ,
identifier[base0_position_before_variant] )
keyword[return] keyword[None]
keyword[else] :
identifier[base0_read_position_before_variant] = identifier[reference_positions] . identifier[index] (
identifier[base0_position_before_variant] )
keyword[if] identifier[base0_position_after_variant] keyword[not] keyword[in] identifier[reference_positions] :
identifier[logger] . identifier[debug] (
literal[string] ,
identifier[name] ,
identifier[base0_position_after_variant] )
keyword[return] keyword[None]
keyword[else] :
identifier[base0_read_position_after_variant] = identifier[reference_positions] . identifier[index] (
identifier[base0_position_after_variant] )
keyword[if] identifier[isinstance] ( identifier[sequence] , identifier[bytes] ):
identifier[sequence] = identifier[sequence] . identifier[decode] ( literal[string] )
keyword[if] keyword[not] identifier[use_soft_clipped_bases] :
identifier[start] = identifier[read] . identifier[query_alignment_start]
identifier[end] = identifier[read] . identifier[query_alignment_end]
identifier[sequence] = identifier[sequence] [ identifier[start] : identifier[end] ]
identifier[reference_positions] = identifier[reference_positions] [ identifier[start] : identifier[end] ]
identifier[base_qualities] = identifier[base_qualities] [ identifier[start] : identifier[end] ]
identifier[base0_read_position_before_variant] -= identifier[start]
identifier[base0_read_position_after_variant] -= identifier[start]
keyword[return] identifier[cls] (
identifier[name] = identifier[name] ,
identifier[sequence] = identifier[sequence] ,
identifier[reference_positions] = identifier[reference_positions] ,
identifier[quality_scores] = identifier[base_qualities] ,
identifier[base0_read_position_before_variant] = identifier[base0_read_position_before_variant] ,
identifier[base0_read_position_after_variant] = identifier[base0_read_position_after_variant] ) | def from_pysam_pileup_element(cls, pileup_element, base0_position_before_variant, base0_position_after_variant, use_secondary_alignments, use_duplicate_reads, min_mapping_quality, use_soft_clipped_bases=False):
"""
Parameters
----------
pileup_element : pysam.PileupRead
base0_position_before_variant : int
base0_position_after_variant : int
use_secondary_alignments : bool
use_duplicate_reads : bool
min_mapping_quality : int
use_soft_clipped_bases : bool. Default false; set to true to keep soft-clipped bases
Returns LocusRead or None
"""
read = pileup_element.alignment
# For future reference, may get overlapping reads
# which can be identified by having the same name
name = read.query_name
if name is None:
logger.warn('Read missing name at position %d', base0_position_before_variant + 1)
return None # depends on [control=['if'], data=[]]
if read.is_unmapped:
logger.warn("How did we get unmapped read '%s' in a pileup?", name)
return None # depends on [control=['if'], data=[]]
if pileup_element.is_refskip:
# if read sequence doesn't actually align to the reference
# base before a variant, skip it
logger.debug('Skipping pileup element with CIGAR alignment N (intron)')
return None # depends on [control=['if'], data=[]]
elif pileup_element.is_del:
logger.debug('Skipping deletion at position %d (read name = %s)', base0_position_before_variant + 1, name)
return None # depends on [control=['if'], data=[]]
if read.is_secondary and (not use_secondary_alignments):
logger.debug("Skipping secondary alignment of read '%s'", name)
return None # depends on [control=['if'], data=[]]
if read.is_duplicate and (not use_duplicate_reads):
logger.debug("Skipping duplicate read '%s'", name)
return None # depends on [control=['if'], data=[]]
mapping_quality = read.mapping_quality
missing_mapping_quality = mapping_quality is None
if min_mapping_quality > 0 and missing_mapping_quality:
logger.debug("Skipping read '%s' due to missing MAPQ", name)
return None # depends on [control=['if'], data=[]]
elif mapping_quality < min_mapping_quality:
logger.debug("Skipping read '%s' due to low MAPQ: %d < %d", read.mapping_quality, mapping_quality, min_mapping_quality)
return None # depends on [control=['if'], data=['mapping_quality', 'min_mapping_quality']]
sequence = read.query_sequence
if sequence is None:
logger.warn("Read '%s' missing sequence", name)
return None # depends on [control=['if'], data=[]]
base_qualities = read.query_qualities
if base_qualities is None:
logger.warn("Read '%s' missing base qualities", name)
return None # depends on [control=['if'], data=[]]
# Documentation for pysam.AlignedSegment.get_reference_positions:
# ------------------------------------------------------------------
# By default, this method only returns positions in the reference
# that are within the alignment. If full_length is set, None values
# will be included for any soft-clipped or unaligned positions
# within the read. The returned list will thus be of the same length
# as the read.
#
# Source:
# http://pysam.readthedocs.org/en/latest/
# api.html#pysam.AlignedSegment.get_reference_positions
#
# We want a None value for every read position that does not have a
# corresponding reference position.
reference_positions = read.get_reference_positions(full_length=True)
# pysam uses base-0 positions everywhere except region strings
# Source:
# http://pysam.readthedocs.org/en/latest/faq.html#pysam-coordinates-are-wrong
if base0_position_before_variant not in reference_positions:
logger.debug("Skipping read '%s' because first position %d not mapped", name, base0_position_before_variant)
return None # depends on [control=['if'], data=['base0_position_before_variant']]
else:
base0_read_position_before_variant = reference_positions.index(base0_position_before_variant)
if base0_position_after_variant not in reference_positions:
logger.debug("Skipping read '%s' because last position %d not mapped", name, base0_position_after_variant)
return None # depends on [control=['if'], data=['base0_position_after_variant']]
else:
base0_read_position_after_variant = reference_positions.index(base0_position_after_variant)
if isinstance(sequence, bytes):
sequence = sequence.decode('ascii') # depends on [control=['if'], data=[]]
if not use_soft_clipped_bases:
start = read.query_alignment_start
end = read.query_alignment_end
sequence = sequence[start:end]
reference_positions = reference_positions[start:end]
base_qualities = base_qualities[start:end]
base0_read_position_before_variant -= start
base0_read_position_after_variant -= start # depends on [control=['if'], data=[]]
return cls(name=name, sequence=sequence, reference_positions=reference_positions, quality_scores=base_qualities, base0_read_position_before_variant=base0_read_position_before_variant, base0_read_position_after_variant=base0_read_position_after_variant) |
def mouse_release_event(self, event):
"""
Forward mouse release events to the example
"""
# Support left and right mouse button for now
if event.button() not in [1, 2]:
return
self.example.mouse_release_event(event.x(), event.y(), event.button()) | def function[mouse_release_event, parameter[self, event]]:
constant[
Forward mouse release events to the example
]
if compare[call[name[event].button, parameter[]] <ast.NotIn object at 0x7da2590d7190> list[[<ast.Constant object at 0x7da20c6e6740>, <ast.Constant object at 0x7da20c6e7e50>]]] begin[:]
return[None]
call[name[self].example.mouse_release_event, parameter[call[name[event].x, parameter[]], call[name[event].y, parameter[]], call[name[event].button, parameter[]]]] | keyword[def] identifier[mouse_release_event] ( identifier[self] , identifier[event] ):
literal[string]
keyword[if] identifier[event] . identifier[button] () keyword[not] keyword[in] [ literal[int] , literal[int] ]:
keyword[return]
identifier[self] . identifier[example] . identifier[mouse_release_event] ( identifier[event] . identifier[x] (), identifier[event] . identifier[y] (), identifier[event] . identifier[button] ()) | def mouse_release_event(self, event):
"""
Forward mouse release events to the example
""" # Support left and right mouse button for now
if event.button() not in [1, 2]:
return # depends on [control=['if'], data=[]]
self.example.mouse_release_event(event.x(), event.y(), event.button()) |
def _get_evidence(self, evidence_dict, time_slice, shift):
"""
Getting the evidence belonging to a particular timeslice.
Parameters:
----------
evidence: dict
a dict key, value pair as {var: state_of_var_observed}
None if no evidence
time: int
the evidence corresponding to the time slice
shift: int
shifting the evidence corresponding to the given time slice.
"""
if evidence_dict:
return {(node[0], shift): evidence_dict[node] for node in evidence_dict if node[1] == time_slice} | def function[_get_evidence, parameter[self, evidence_dict, time_slice, shift]]:
constant[
Getting the evidence belonging to a particular timeslice.
Parameters:
----------
evidence: dict
a dict key, value pair as {var: state_of_var_observed}
None if no evidence
time: int
the evidence corresponding to the time slice
shift: int
shifting the evidence corresponding to the given time slice.
]
if name[evidence_dict] begin[:]
return[<ast.DictComp object at 0x7da18ede6bc0>] | keyword[def] identifier[_get_evidence] ( identifier[self] , identifier[evidence_dict] , identifier[time_slice] , identifier[shift] ):
literal[string]
keyword[if] identifier[evidence_dict] :
keyword[return] {( identifier[node] [ literal[int] ], identifier[shift] ): identifier[evidence_dict] [ identifier[node] ] keyword[for] identifier[node] keyword[in] identifier[evidence_dict] keyword[if] identifier[node] [ literal[int] ]== identifier[time_slice] } | def _get_evidence(self, evidence_dict, time_slice, shift):
"""
Getting the evidence belonging to a particular timeslice.
Parameters:
----------
evidence: dict
a dict key, value pair as {var: state_of_var_observed}
None if no evidence
time: int
the evidence corresponding to the time slice
shift: int
shifting the evidence corresponding to the given time slice.
"""
if evidence_dict:
return {(node[0], shift): evidence_dict[node] for node in evidence_dict if node[1] == time_slice} # depends on [control=['if'], data=[]] |
def Delete(self, queue, tasks, mutation_pool=None):
"""Removes the tasks from the queue.
Note that tasks can already have been removed. It is not an error
to re-delete an already deleted task.
Args:
queue: A queue to clear.
tasks: A list of tasks to remove. Tasks may be Task() instances or integers
representing the task_id.
mutation_pool: A MutationPool object to schedule deletions on.
Raises:
ValueError: Mutation pool was not passed in.
"""
if queue is None:
return
if mutation_pool is None:
raise ValueError("Mutation pool can't be none.")
mutation_pool.QueueDeleteTasks(queue, tasks) | def function[Delete, parameter[self, queue, tasks, mutation_pool]]:
constant[Removes the tasks from the queue.
Note that tasks can already have been removed. It is not an error
to re-delete an already deleted task.
Args:
queue: A queue to clear.
tasks: A list of tasks to remove. Tasks may be Task() instances or integers
representing the task_id.
mutation_pool: A MutationPool object to schedule deletions on.
Raises:
ValueError: Mutation pool was not passed in.
]
if compare[name[queue] is constant[None]] begin[:]
return[None]
if compare[name[mutation_pool] is constant[None]] begin[:]
<ast.Raise object at 0x7da1b1c24880>
call[name[mutation_pool].QueueDeleteTasks, parameter[name[queue], name[tasks]]] | keyword[def] identifier[Delete] ( identifier[self] , identifier[queue] , identifier[tasks] , identifier[mutation_pool] = keyword[None] ):
literal[string]
keyword[if] identifier[queue] keyword[is] keyword[None] :
keyword[return]
keyword[if] identifier[mutation_pool] keyword[is] keyword[None] :
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[mutation_pool] . identifier[QueueDeleteTasks] ( identifier[queue] , identifier[tasks] ) | def Delete(self, queue, tasks, mutation_pool=None):
"""Removes the tasks from the queue.
Note that tasks can already have been removed. It is not an error
to re-delete an already deleted task.
Args:
queue: A queue to clear.
tasks: A list of tasks to remove. Tasks may be Task() instances or integers
representing the task_id.
mutation_pool: A MutationPool object to schedule deletions on.
Raises:
ValueError: Mutation pool was not passed in.
"""
if queue is None:
return # depends on [control=['if'], data=[]]
if mutation_pool is None:
raise ValueError("Mutation pool can't be none.") # depends on [control=['if'], data=[]]
mutation_pool.QueueDeleteTasks(queue, tasks) |
def _ApplySudsJurkoAppenderPatch(self):
"""Appends a Monkey Patch to the suds.mx.appender module.
This resolves an issue where empty objects are ignored and stripped from the
request output. More details can be found on the suds-jurko issue tracker:
https://goo.gl/uyYw0C
"""
def PatchedAppend(self, parent, content):
obj = content.value
child = self.node(content)
parent.append(child)
for item in obj:
cont = suds.mx.Content(tag=item[0], value=item[1])
suds.mx.appender.Appender.append(self, child, cont)
suds.mx.appender.ObjectAppender.append = PatchedAppend | def function[_ApplySudsJurkoAppenderPatch, parameter[self]]:
constant[Appends a Monkey Patch to the suds.mx.appender module.
This resolves an issue where empty objects are ignored and stripped from the
request output. More details can be found on the suds-jurko issue tracker:
https://goo.gl/uyYw0C
]
def function[PatchedAppend, parameter[self, parent, content]]:
variable[obj] assign[=] name[content].value
variable[child] assign[=] call[name[self].node, parameter[name[content]]]
call[name[parent].append, parameter[name[child]]]
for taget[name[item]] in starred[name[obj]] begin[:]
variable[cont] assign[=] call[name[suds].mx.Content, parameter[]]
call[name[suds].mx.appender.Appender.append, parameter[name[self], name[child], name[cont]]]
name[suds].mx.appender.ObjectAppender.append assign[=] name[PatchedAppend] | keyword[def] identifier[_ApplySudsJurkoAppenderPatch] ( identifier[self] ):
literal[string]
keyword[def] identifier[PatchedAppend] ( identifier[self] , identifier[parent] , identifier[content] ):
identifier[obj] = identifier[content] . identifier[value]
identifier[child] = identifier[self] . identifier[node] ( identifier[content] )
identifier[parent] . identifier[append] ( identifier[child] )
keyword[for] identifier[item] keyword[in] identifier[obj] :
identifier[cont] = identifier[suds] . identifier[mx] . identifier[Content] ( identifier[tag] = identifier[item] [ literal[int] ], identifier[value] = identifier[item] [ literal[int] ])
identifier[suds] . identifier[mx] . identifier[appender] . identifier[Appender] . identifier[append] ( identifier[self] , identifier[child] , identifier[cont] )
identifier[suds] . identifier[mx] . identifier[appender] . identifier[ObjectAppender] . identifier[append] = identifier[PatchedAppend] | def _ApplySudsJurkoAppenderPatch(self):
"""Appends a Monkey Patch to the suds.mx.appender module.
This resolves an issue where empty objects are ignored and stripped from the
request output. More details can be found on the suds-jurko issue tracker:
https://goo.gl/uyYw0C
"""
def PatchedAppend(self, parent, content):
obj = content.value
child = self.node(content)
parent.append(child)
for item in obj:
cont = suds.mx.Content(tag=item[0], value=item[1])
suds.mx.appender.Appender.append(self, child, cont) # depends on [control=['for'], data=['item']]
suds.mx.appender.ObjectAppender.append = PatchedAppend |
def sendUserLeft(self, context={}):
"""
Sent when user leaves organization
"""
self.__init__(context['organization'], async_mail=self.async_mail, override_receiver=context['user'].email, locale=context['user'].locale)
self.sendEmail('userLeft-toUser', 'You have left an organization', context)
self.__init__(context['organization'], async_mail=self.async_mail, override_receiver=context['organization'].owner.email, locale=context['organization'].owner.locale)
self.sendEmail('userLeft-toOwner', 'An user has left an organization you own', context) | def function[sendUserLeft, parameter[self, context]]:
constant[
Sent when user leaves organization
]
call[name[self].__init__, parameter[call[name[context]][constant[organization]]]]
call[name[self].sendEmail, parameter[constant[userLeft-toUser], constant[You have left an organization], name[context]]]
call[name[self].__init__, parameter[call[name[context]][constant[organization]]]]
call[name[self].sendEmail, parameter[constant[userLeft-toOwner], constant[An user has left an organization you own], name[context]]] | keyword[def] identifier[sendUserLeft] ( identifier[self] , identifier[context] ={}):
literal[string]
identifier[self] . identifier[__init__] ( identifier[context] [ literal[string] ], identifier[async_mail] = identifier[self] . identifier[async_mail] , identifier[override_receiver] = identifier[context] [ literal[string] ]. identifier[email] , identifier[locale] = identifier[context] [ literal[string] ]. identifier[locale] )
identifier[self] . identifier[sendEmail] ( literal[string] , literal[string] , identifier[context] )
identifier[self] . identifier[__init__] ( identifier[context] [ literal[string] ], identifier[async_mail] = identifier[self] . identifier[async_mail] , identifier[override_receiver] = identifier[context] [ literal[string] ]. identifier[owner] . identifier[email] , identifier[locale] = identifier[context] [ literal[string] ]. identifier[owner] . identifier[locale] )
identifier[self] . identifier[sendEmail] ( literal[string] , literal[string] , identifier[context] ) | def sendUserLeft(self, context={}):
"""
Sent when user leaves organization
"""
self.__init__(context['organization'], async_mail=self.async_mail, override_receiver=context['user'].email, locale=context['user'].locale)
self.sendEmail('userLeft-toUser', 'You have left an organization', context)
self.__init__(context['organization'], async_mail=self.async_mail, override_receiver=context['organization'].owner.email, locale=context['organization'].owner.locale)
self.sendEmail('userLeft-toOwner', 'An user has left an organization you own', context) |
def list_branch(self, repo_name):
"""
Lists the active Branch objects on a Repo.
Params:
* repo_name: The name of the repo.
"""
req = proto.ListBranchRequest(repo=proto.Repo(name=repo_name))
res = self.stub.ListBranch(req, metadata=self.metadata)
if hasattr(res, 'branch_info'):
return res.branch_info
return [] | def function[list_branch, parameter[self, repo_name]]:
constant[
Lists the active Branch objects on a Repo.
Params:
* repo_name: The name of the repo.
]
variable[req] assign[=] call[name[proto].ListBranchRequest, parameter[]]
variable[res] assign[=] call[name[self].stub.ListBranch, parameter[name[req]]]
if call[name[hasattr], parameter[name[res], constant[branch_info]]] begin[:]
return[name[res].branch_info]
return[list[[]]] | keyword[def] identifier[list_branch] ( identifier[self] , identifier[repo_name] ):
literal[string]
identifier[req] = identifier[proto] . identifier[ListBranchRequest] ( identifier[repo] = identifier[proto] . identifier[Repo] ( identifier[name] = identifier[repo_name] ))
identifier[res] = identifier[self] . identifier[stub] . identifier[ListBranch] ( identifier[req] , identifier[metadata] = identifier[self] . identifier[metadata] )
keyword[if] identifier[hasattr] ( identifier[res] , literal[string] ):
keyword[return] identifier[res] . identifier[branch_info]
keyword[return] [] | def list_branch(self, repo_name):
"""
Lists the active Branch objects on a Repo.
Params:
* repo_name: The name of the repo.
"""
req = proto.ListBranchRequest(repo=proto.Repo(name=repo_name))
res = self.stub.ListBranch(req, metadata=self.metadata)
if hasattr(res, 'branch_info'):
return res.branch_info # depends on [control=['if'], data=[]]
return [] |
def find_expectation_indexes(self,
expectation_type=None,
column=None,
expectation_kwargs=None
):
"""Find matching expectations within _expectation_config.
Args:
expectation_type=None : The name of the expectation type to be matched.
column=None : The name of the column to be matched.
expectation_kwargs=None : A dictionary of kwargs to match against.
Returns:
A list of indexes for matching expectation objects.
If there are no matches, the list will be empty.
"""
if expectation_kwargs == None:
expectation_kwargs = {}
if "column" in expectation_kwargs and column != None and column != expectation_kwargs["column"]:
raise ValueError("Conflicting column names in remove_expectation: %s and %s" % (
column, expectation_kwargs["column"]))
if column != None:
expectation_kwargs["column"] = column
match_indexes = []
for i, exp in enumerate(self._expectations_config.expectations):
if expectation_type == None or (expectation_type == exp['expectation_type']):
# if column == None or ('column' not in exp['kwargs']) or (exp['kwargs']['column'] == column) or (exp['kwargs']['column']==:
match = True
for k, v in expectation_kwargs.items():
if k in exp['kwargs'] and exp['kwargs'][k] == v:
continue
else:
match = False
if match:
match_indexes.append(i)
return match_indexes | def function[find_expectation_indexes, parameter[self, expectation_type, column, expectation_kwargs]]:
constant[Find matching expectations within _expectation_config.
Args:
expectation_type=None : The name of the expectation type to be matched.
column=None : The name of the column to be matched.
expectation_kwargs=None : A dictionary of kwargs to match against.
Returns:
A list of indexes for matching expectation objects.
If there are no matches, the list will be empty.
]
if compare[name[expectation_kwargs] equal[==] constant[None]] begin[:]
variable[expectation_kwargs] assign[=] dictionary[[], []]
if <ast.BoolOp object at 0x7da1b178c6a0> begin[:]
<ast.Raise object at 0x7da1b178cee0>
if compare[name[column] not_equal[!=] constant[None]] begin[:]
call[name[expectation_kwargs]][constant[column]] assign[=] name[column]
variable[match_indexes] assign[=] list[[]]
for taget[tuple[[<ast.Name object at 0x7da1b178e770>, <ast.Name object at 0x7da1b178cac0>]]] in starred[call[name[enumerate], parameter[name[self]._expectations_config.expectations]]] begin[:]
if <ast.BoolOp object at 0x7da1b178f610> begin[:]
variable[match] assign[=] constant[True]
for taget[tuple[[<ast.Name object at 0x7da1b178c340>, <ast.Name object at 0x7da1b178d660>]]] in starred[call[name[expectation_kwargs].items, parameter[]]] begin[:]
if <ast.BoolOp object at 0x7da1b178ebf0> begin[:]
continue
if name[match] begin[:]
call[name[match_indexes].append, parameter[name[i]]]
return[name[match_indexes]] | keyword[def] identifier[find_expectation_indexes] ( identifier[self] ,
identifier[expectation_type] = keyword[None] ,
identifier[column] = keyword[None] ,
identifier[expectation_kwargs] = keyword[None]
):
literal[string]
keyword[if] identifier[expectation_kwargs] == keyword[None] :
identifier[expectation_kwargs] ={}
keyword[if] literal[string] keyword[in] identifier[expectation_kwargs] keyword[and] identifier[column] != keyword[None] keyword[and] identifier[column] != identifier[expectation_kwargs] [ literal[string] ]:
keyword[raise] identifier[ValueError] ( literal[string] %(
identifier[column] , identifier[expectation_kwargs] [ literal[string] ]))
keyword[if] identifier[column] != keyword[None] :
identifier[expectation_kwargs] [ literal[string] ]= identifier[column]
identifier[match_indexes] =[]
keyword[for] identifier[i] , identifier[exp] keyword[in] identifier[enumerate] ( identifier[self] . identifier[_expectations_config] . identifier[expectations] ):
keyword[if] identifier[expectation_type] == keyword[None] keyword[or] ( identifier[expectation_type] == identifier[exp] [ literal[string] ]):
identifier[match] = keyword[True]
keyword[for] identifier[k] , identifier[v] keyword[in] identifier[expectation_kwargs] . identifier[items] ():
keyword[if] identifier[k] keyword[in] identifier[exp] [ literal[string] ] keyword[and] identifier[exp] [ literal[string] ][ identifier[k] ]== identifier[v] :
keyword[continue]
keyword[else] :
identifier[match] = keyword[False]
keyword[if] identifier[match] :
identifier[match_indexes] . identifier[append] ( identifier[i] )
keyword[return] identifier[match_indexes] | def find_expectation_indexes(self, expectation_type=None, column=None, expectation_kwargs=None):
"""Find matching expectations within _expectation_config.
Args:
expectation_type=None : The name of the expectation type to be matched.
column=None : The name of the column to be matched.
expectation_kwargs=None : A dictionary of kwargs to match against.
Returns:
A list of indexes for matching expectation objects.
If there are no matches, the list will be empty.
"""
if expectation_kwargs == None:
expectation_kwargs = {} # depends on [control=['if'], data=['expectation_kwargs']]
if 'column' in expectation_kwargs and column != None and (column != expectation_kwargs['column']):
raise ValueError('Conflicting column names in remove_expectation: %s and %s' % (column, expectation_kwargs['column'])) # depends on [control=['if'], data=[]]
if column != None:
expectation_kwargs['column'] = column # depends on [control=['if'], data=['column']]
match_indexes = []
for (i, exp) in enumerate(self._expectations_config.expectations):
if expectation_type == None or expectation_type == exp['expectation_type']:
# if column == None or ('column' not in exp['kwargs']) or (exp['kwargs']['column'] == column) or (exp['kwargs']['column']==:
match = True
for (k, v) in expectation_kwargs.items():
if k in exp['kwargs'] and exp['kwargs'][k] == v:
continue # depends on [control=['if'], data=[]]
else:
match = False # depends on [control=['for'], data=[]]
if match:
match_indexes.append(i) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
return match_indexes |
def simplify_data_representations(config: DictLike) -> DictLike:
""" Convert one entry lists to the scalar value
This step is necessary because anchors are not kept for scalar values - just for lists and dictionaries.
Now that we are done with all of our anchor references, we can convert these single entry lists to
just the scalar entry, which is more usable.
Some notes on anchors in ruamel.yaml are here: https://stackoverflow.com/a/48559644
Args:
config: The dict-like configuration from ruamel.yaml which should be simplified.
Returns:
The updated configuration.
"""
for k, v in config.items():
if v and isinstance(v, list) and len(v) == 1:
logger.debug("v: {}".format(v))
config[k] = v[0]
return config | def function[simplify_data_representations, parameter[config]]:
constant[ Convert one entry lists to the scalar value
This step is necessary because anchors are not kept for scalar values - just for lists and dictionaries.
Now that we are done with all of our anchor references, we can convert these single entry lists to
just the scalar entry, which is more usable.
Some notes on anchors in ruamel.yaml are here: https://stackoverflow.com/a/48559644
Args:
config: The dict-like configuration from ruamel.yaml which should be simplified.
Returns:
The updated configuration.
]
for taget[tuple[[<ast.Name object at 0x7da1b1d4cd00>, <ast.Name object at 0x7da1b1d4df30>]]] in starred[call[name[config].items, parameter[]]] begin[:]
if <ast.BoolOp object at 0x7da1b1d4de10> begin[:]
call[name[logger].debug, parameter[call[constant[v: {}].format, parameter[name[v]]]]]
call[name[config]][name[k]] assign[=] call[name[v]][constant[0]]
return[name[config]] | keyword[def] identifier[simplify_data_representations] ( identifier[config] : identifier[DictLike] )-> identifier[DictLike] :
literal[string]
keyword[for] identifier[k] , identifier[v] keyword[in] identifier[config] . identifier[items] ():
keyword[if] identifier[v] keyword[and] identifier[isinstance] ( identifier[v] , identifier[list] ) keyword[and] identifier[len] ( identifier[v] )== literal[int] :
identifier[logger] . identifier[debug] ( literal[string] . identifier[format] ( identifier[v] ))
identifier[config] [ identifier[k] ]= identifier[v] [ literal[int] ]
keyword[return] identifier[config] | def simplify_data_representations(config: DictLike) -> DictLike:
""" Convert one entry lists to the scalar value
This step is necessary because anchors are not kept for scalar values - just for lists and dictionaries.
Now that we are done with all of our anchor references, we can convert these single entry lists to
just the scalar entry, which is more usable.
Some notes on anchors in ruamel.yaml are here: https://stackoverflow.com/a/48559644
Args:
config: The dict-like configuration from ruamel.yaml which should be simplified.
Returns:
The updated configuration.
"""
for (k, v) in config.items():
if v and isinstance(v, list) and (len(v) == 1):
logger.debug('v: {}'.format(v))
config[k] = v[0] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
return config |
def _resolve_jars_info(self, targets, classpath_products):
"""Consults ivy_jar_products to export the external libraries.
:return: mapping of jar_id -> { 'default' : <jar_file>,
'sources' : <jar_file>,
'javadoc' : <jar_file>,
<other_confs> : <jar_file>,
}
"""
mapping = defaultdict(dict)
jar_products = classpath_products.get_artifact_classpath_entries_for_targets(
targets, respect_excludes=False)
for conf, jar_entry in jar_products:
conf = jar_entry.coordinate.classifier or 'default'
mapping[self._jar_id(jar_entry.coordinate)][conf] = jar_entry.cache_path
return mapping | def function[_resolve_jars_info, parameter[self, targets, classpath_products]]:
constant[Consults ivy_jar_products to export the external libraries.
:return: mapping of jar_id -> { 'default' : <jar_file>,
'sources' : <jar_file>,
'javadoc' : <jar_file>,
<other_confs> : <jar_file>,
}
]
variable[mapping] assign[=] call[name[defaultdict], parameter[name[dict]]]
variable[jar_products] assign[=] call[name[classpath_products].get_artifact_classpath_entries_for_targets, parameter[name[targets]]]
for taget[tuple[[<ast.Name object at 0x7da1b1eaf820>, <ast.Name object at 0x7da1b1eaf7f0>]]] in starred[name[jar_products]] begin[:]
variable[conf] assign[=] <ast.BoolOp object at 0x7da1b1eaf730>
call[call[name[mapping]][call[name[self]._jar_id, parameter[name[jar_entry].coordinate]]]][name[conf]] assign[=] name[jar_entry].cache_path
return[name[mapping]] | keyword[def] identifier[_resolve_jars_info] ( identifier[self] , identifier[targets] , identifier[classpath_products] ):
literal[string]
identifier[mapping] = identifier[defaultdict] ( identifier[dict] )
identifier[jar_products] = identifier[classpath_products] . identifier[get_artifact_classpath_entries_for_targets] (
identifier[targets] , identifier[respect_excludes] = keyword[False] )
keyword[for] identifier[conf] , identifier[jar_entry] keyword[in] identifier[jar_products] :
identifier[conf] = identifier[jar_entry] . identifier[coordinate] . identifier[classifier] keyword[or] literal[string]
identifier[mapping] [ identifier[self] . identifier[_jar_id] ( identifier[jar_entry] . identifier[coordinate] )][ identifier[conf] ]= identifier[jar_entry] . identifier[cache_path]
keyword[return] identifier[mapping] | def _resolve_jars_info(self, targets, classpath_products):
"""Consults ivy_jar_products to export the external libraries.
:return: mapping of jar_id -> { 'default' : <jar_file>,
'sources' : <jar_file>,
'javadoc' : <jar_file>,
<other_confs> : <jar_file>,
}
"""
mapping = defaultdict(dict)
jar_products = classpath_products.get_artifact_classpath_entries_for_targets(targets, respect_excludes=False)
for (conf, jar_entry) in jar_products:
conf = jar_entry.coordinate.classifier or 'default'
mapping[self._jar_id(jar_entry.coordinate)][conf] = jar_entry.cache_path # depends on [control=['for'], data=[]]
return mapping |
def add_datasets(self, datasets, datasets_to_check=None):
# type: (List[Union[hdx.data.dataset.Dataset,Dict,str]], List[hdx.data.dataset.Dataset]) -> bool
"""Add multiple datasets
Args:
datasets (List[Union[Dataset,Dict,str]]): A list of either dataset ids or dataset metadata from Dataset objects or dictionaries
datasets_to_check (List[Dataset]): List of datasets against which to check existence of dataset. Defaults to datasets in showcase.
Returns:
bool: True if all datasets added or False if any already present
"""
if datasets_to_check is None:
datasets_to_check = self.get_datasets()
alldatasetsadded = True
for dataset in datasets:
if not self.add_dataset(dataset, datasets_to_check=datasets_to_check):
alldatasetsadded = False
return alldatasetsadded | def function[add_datasets, parameter[self, datasets, datasets_to_check]]:
constant[Add multiple datasets
Args:
datasets (List[Union[Dataset,Dict,str]]): A list of either dataset ids or dataset metadata from Dataset objects or dictionaries
datasets_to_check (List[Dataset]): List of datasets against which to check existence of dataset. Defaults to datasets in showcase.
Returns:
bool: True if all datasets added or False if any already present
]
if compare[name[datasets_to_check] is constant[None]] begin[:]
variable[datasets_to_check] assign[=] call[name[self].get_datasets, parameter[]]
variable[alldatasetsadded] assign[=] constant[True]
for taget[name[dataset]] in starred[name[datasets]] begin[:]
if <ast.UnaryOp object at 0x7da20cabf9a0> begin[:]
variable[alldatasetsadded] assign[=] constant[False]
return[name[alldatasetsadded]] | keyword[def] identifier[add_datasets] ( identifier[self] , identifier[datasets] , identifier[datasets_to_check] = keyword[None] ):
literal[string]
keyword[if] identifier[datasets_to_check] keyword[is] keyword[None] :
identifier[datasets_to_check] = identifier[self] . identifier[get_datasets] ()
identifier[alldatasetsadded] = keyword[True]
keyword[for] identifier[dataset] keyword[in] identifier[datasets] :
keyword[if] keyword[not] identifier[self] . identifier[add_dataset] ( identifier[dataset] , identifier[datasets_to_check] = identifier[datasets_to_check] ):
identifier[alldatasetsadded] = keyword[False]
keyword[return] identifier[alldatasetsadded] | def add_datasets(self, datasets, datasets_to_check=None):
# type: (List[Union[hdx.data.dataset.Dataset,Dict,str]], List[hdx.data.dataset.Dataset]) -> bool
'Add multiple datasets\n\n Args:\n datasets (List[Union[Dataset,Dict,str]]): A list of either dataset ids or dataset metadata from Dataset objects or dictionaries\n datasets_to_check (List[Dataset]): List of datasets against which to check existence of dataset. Defaults to datasets in showcase.\n\n Returns:\n bool: True if all datasets added or False if any already present\n '
if datasets_to_check is None:
datasets_to_check = self.get_datasets() # depends on [control=['if'], data=['datasets_to_check']]
alldatasetsadded = True
for dataset in datasets:
if not self.add_dataset(dataset, datasets_to_check=datasets_to_check):
alldatasetsadded = False # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['dataset']]
return alldatasetsadded |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.