code stringlengths 75 104k | code_sememe stringlengths 47 309k | token_type stringlengths 215 214k | code_dependency stringlengths 75 155k |
|---|---|---|---|
def get_grading_agent_id(self):
"""Gets the ``Id`` of the ``Agent`` that created this entry.
return: (osid.id.Id) - the ``Id`` of the ``Agent``
raise: IllegalState - ``is_graded()`` is ``false`` or
``is_derived()`` is ``true``
*compliance: mandatory -- This method must be implemented.*
"""
if not self.is_graded or self.is_derived():
raise errors.IllegalState()
return Id(self._my_map['gradingAgentId']) | def function[get_grading_agent_id, parameter[self]]:
constant[Gets the ``Id`` of the ``Agent`` that created this entry.
return: (osid.id.Id) - the ``Id`` of the ``Agent``
raise: IllegalState - ``is_graded()`` is ``false`` or
``is_derived()`` is ``true``
*compliance: mandatory -- This method must be implemented.*
]
if <ast.BoolOp object at 0x7da20c7c85b0> begin[:]
<ast.Raise object at 0x7da1b26ad150>
return[call[name[Id], parameter[call[name[self]._my_map][constant[gradingAgentId]]]]] | keyword[def] identifier[get_grading_agent_id] ( identifier[self] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[is_graded] keyword[or] identifier[self] . identifier[is_derived] ():
keyword[raise] identifier[errors] . identifier[IllegalState] ()
keyword[return] identifier[Id] ( identifier[self] . identifier[_my_map] [ literal[string] ]) | def get_grading_agent_id(self):
"""Gets the ``Id`` of the ``Agent`` that created this entry.
return: (osid.id.Id) - the ``Id`` of the ``Agent``
raise: IllegalState - ``is_graded()`` is ``false`` or
``is_derived()`` is ``true``
*compliance: mandatory -- This method must be implemented.*
"""
if not self.is_graded or self.is_derived():
raise errors.IllegalState() # depends on [control=['if'], data=[]]
return Id(self._my_map['gradingAgentId']) |
def removeStage(self, personID, nextStageIndex):
"""removeStage(string, int)
Removes the nth next stage
nextStageIndex must be lower then value of getRemainingStages(personID)
nextStageIndex 0 immediately aborts the current stage and proceeds to the next stage
"""
self._connection._beginMessage(
tc.CMD_SET_PERSON_VARIABLE, tc.REMOVE_STAGE, personID, 1 + 4)
self._connection._string += struct.pack("!Bi",
tc.TYPE_INTEGER, nextStageIndex)
self._connection._sendExact() | def function[removeStage, parameter[self, personID, nextStageIndex]]:
constant[removeStage(string, int)
Removes the nth next stage
nextStageIndex must be lower then value of getRemainingStages(personID)
nextStageIndex 0 immediately aborts the current stage and proceeds to the next stage
]
call[name[self]._connection._beginMessage, parameter[name[tc].CMD_SET_PERSON_VARIABLE, name[tc].REMOVE_STAGE, name[personID], binary_operation[constant[1] + constant[4]]]]
<ast.AugAssign object at 0x7da1b09bbd90>
call[name[self]._connection._sendExact, parameter[]] | keyword[def] identifier[removeStage] ( identifier[self] , identifier[personID] , identifier[nextStageIndex] ):
literal[string]
identifier[self] . identifier[_connection] . identifier[_beginMessage] (
identifier[tc] . identifier[CMD_SET_PERSON_VARIABLE] , identifier[tc] . identifier[REMOVE_STAGE] , identifier[personID] , literal[int] + literal[int] )
identifier[self] . identifier[_connection] . identifier[_string] += identifier[struct] . identifier[pack] ( literal[string] ,
identifier[tc] . identifier[TYPE_INTEGER] , identifier[nextStageIndex] )
identifier[self] . identifier[_connection] . identifier[_sendExact] () | def removeStage(self, personID, nextStageIndex):
"""removeStage(string, int)
Removes the nth next stage
nextStageIndex must be lower then value of getRemainingStages(personID)
nextStageIndex 0 immediately aborts the current stage and proceeds to the next stage
"""
self._connection._beginMessage(tc.CMD_SET_PERSON_VARIABLE, tc.REMOVE_STAGE, personID, 1 + 4)
self._connection._string += struct.pack('!Bi', tc.TYPE_INTEGER, nextStageIndex)
self._connection._sendExact() |
def _handle_message(self, room, event) -> bool:
""" Handle text messages sent to listening rooms """
if (
event['type'] != 'm.room.message' or
event['content']['msgtype'] != 'm.text' or
self._stop_event.ready()
):
# Ignore non-messages and non-text messages
return False
sender_id = event['sender']
if sender_id == self._user_id:
# Ignore our own messages
return False
user = self._get_user(sender_id)
peer_address = validate_userid_signature(user)
if not peer_address:
self.log.debug(
'Message from invalid user displayName signature',
peer_user=user.user_id,
room=room,
)
return False
# don't proceed if user isn't whitelisted (yet)
if not self._address_mgr.is_address_known(peer_address):
# user not start_health_check'ed
self.log.debug(
'Message from non-whitelisted peer - ignoring',
sender=user,
sender_address=pex(peer_address),
room=room,
)
return False
# rooms we created and invited user, or were invited specifically by them
room_ids = self._get_room_ids_for_address(peer_address)
# TODO: Remove clause after `and` and check if things still don't hang
if room.room_id not in room_ids and (self._private_rooms and not room.invite_only):
# this should not happen, but is not fatal, as we may not know user yet
if self._private_rooms and not room.invite_only:
reason = 'required private room, but received message in a public'
else:
reason = 'unknown room for user'
self.log.debug(
'Ignoring invalid message',
peer_user=user.user_id,
peer_address=pex(peer_address),
room=room,
expected_room_ids=room_ids,
reason=reason,
)
return False
# TODO: With the condition in the TODO above restored this one won't have an effect, check
# if it can be removed after the above is solved
if not room_ids or room.room_id != room_ids[0]:
self.log.debug(
'Received message triggered new comms room for peer',
peer_user=user.user_id,
peer_address=pex(peer_address),
known_user_rooms=room_ids,
room=room,
)
self._set_room_id_for_address(peer_address, room.room_id)
is_peer_reachable = self._address_mgr.get_address_reachability(peer_address) is (
AddressReachability.REACHABLE
)
if not is_peer_reachable:
self.log.debug('Forcing presence update', peer_address=peer_address, user_id=sender_id)
self._address_mgr.force_user_presence(user, UserPresence.ONLINE)
self._address_mgr.refresh_address_presence(peer_address)
data = event['content']['body']
if not isinstance(data, str):
self.log.warning(
'Received message body not a string',
peer_user=user.user_id,
peer_address=to_checksum_address(peer_address),
room=room,
)
return False
messages: List[Message] = list()
if data.startswith('0x'):
try:
message = message_from_bytes(decode_hex(data))
if not message:
raise InvalidProtocolMessage
except (DecodeError, AssertionError) as ex:
self.log.warning(
"Can't parse message binary data",
message_data=data,
peer_address=pex(peer_address),
_exc=ex,
)
return False
except InvalidProtocolMessage as ex:
self.log.warning(
'Received message binary data is not a valid message',
message_data=data,
peer_address=pex(peer_address),
_exc=ex,
)
return False
else:
messages.append(message)
else:
for line in data.splitlines():
line = line.strip()
if not line:
continue
try:
message_dict = json.loads(line)
message = message_from_dict(message_dict)
except (UnicodeDecodeError, json.JSONDecodeError) as ex:
self.log.warning(
"Can't parse message data JSON",
message_data=line,
peer_address=pex(peer_address),
_exc=ex,
)
continue
except InvalidProtocolMessage as ex:
self.log.warning(
"Message data JSON are not a valid message",
message_data=line,
peer_address=pex(peer_address),
_exc=ex,
)
continue
if not isinstance(message, (SignedRetrieableMessage, SignedMessage)):
self.log.warning(
'Received invalid message',
message=message,
)
continue
elif message.sender != peer_address:
self.log.warning(
'Message not signed by sender!',
message=message,
signer=message.sender,
peer_address=peer_address,
)
continue
messages.append(message)
if not messages:
return False
self.log.debug(
'Incoming messages',
messages=messages,
sender=pex(peer_address),
sender_user=user,
room=room,
)
for message in messages:
if isinstance(message, Delivered):
self._receive_delivered(message)
elif isinstance(message, Processed):
self._receive_message(message)
else:
assert isinstance(message, SignedRetrieableMessage)
self._receive_message(message)
return True | def function[_handle_message, parameter[self, room, event]]:
constant[ Handle text messages sent to listening rooms ]
if <ast.BoolOp object at 0x7da1b170c130> begin[:]
return[constant[False]]
variable[sender_id] assign[=] call[name[event]][constant[sender]]
if compare[name[sender_id] equal[==] name[self]._user_id] begin[:]
return[constant[False]]
variable[user] assign[=] call[name[self]._get_user, parameter[name[sender_id]]]
variable[peer_address] assign[=] call[name[validate_userid_signature], parameter[name[user]]]
if <ast.UnaryOp object at 0x7da1b19f2680> begin[:]
call[name[self].log.debug, parameter[constant[Message from invalid user displayName signature]]]
return[constant[False]]
if <ast.UnaryOp object at 0x7da1b1950b50> begin[:]
call[name[self].log.debug, parameter[constant[Message from non-whitelisted peer - ignoring]]]
return[constant[False]]
variable[room_ids] assign[=] call[name[self]._get_room_ids_for_address, parameter[name[peer_address]]]
if <ast.BoolOp object at 0x7da1b1951090> begin[:]
if <ast.BoolOp object at 0x7da1b1950fd0> begin[:]
variable[reason] assign[=] constant[required private room, but received message in a public]
call[name[self].log.debug, parameter[constant[Ignoring invalid message]]]
return[constant[False]]
if <ast.BoolOp object at 0x7da1b1708250> begin[:]
call[name[self].log.debug, parameter[constant[Received message triggered new comms room for peer]]]
call[name[self]._set_room_id_for_address, parameter[name[peer_address], name[room].room_id]]
variable[is_peer_reachable] assign[=] compare[call[name[self]._address_mgr.get_address_reachability, parameter[name[peer_address]]] is name[AddressReachability].REACHABLE]
if <ast.UnaryOp object at 0x7da1b1709bd0> begin[:]
call[name[self].log.debug, parameter[constant[Forcing presence update]]]
call[name[self]._address_mgr.force_user_presence, parameter[name[user], name[UserPresence].ONLINE]]
call[name[self]._address_mgr.refresh_address_presence, parameter[name[peer_address]]]
variable[data] assign[=] call[call[name[event]][constant[content]]][constant[body]]
if <ast.UnaryOp object at 0x7da1b170bb50> begin[:]
call[name[self].log.warning, parameter[constant[Received message body not a string]]]
return[constant[False]]
<ast.AnnAssign object at 0x7da1b1709780>
if call[name[data].startswith, parameter[constant[0x]]] begin[:]
<ast.Try object at 0x7da1b170b880>
if <ast.UnaryOp object at 0x7da1b175ee30> begin[:]
return[constant[False]]
call[name[self].log.debug, parameter[constant[Incoming messages]]]
for taget[name[message]] in starred[name[messages]] begin[:]
if call[name[isinstance], parameter[name[message], name[Delivered]]] begin[:]
call[name[self]._receive_delivered, parameter[name[message]]]
return[constant[True]] | keyword[def] identifier[_handle_message] ( identifier[self] , identifier[room] , identifier[event] )-> identifier[bool] :
literal[string]
keyword[if] (
identifier[event] [ literal[string] ]!= literal[string] keyword[or]
identifier[event] [ literal[string] ][ literal[string] ]!= literal[string] keyword[or]
identifier[self] . identifier[_stop_event] . identifier[ready] ()
):
keyword[return] keyword[False]
identifier[sender_id] = identifier[event] [ literal[string] ]
keyword[if] identifier[sender_id] == identifier[self] . identifier[_user_id] :
keyword[return] keyword[False]
identifier[user] = identifier[self] . identifier[_get_user] ( identifier[sender_id] )
identifier[peer_address] = identifier[validate_userid_signature] ( identifier[user] )
keyword[if] keyword[not] identifier[peer_address] :
identifier[self] . identifier[log] . identifier[debug] (
literal[string] ,
identifier[peer_user] = identifier[user] . identifier[user_id] ,
identifier[room] = identifier[room] ,
)
keyword[return] keyword[False]
keyword[if] keyword[not] identifier[self] . identifier[_address_mgr] . identifier[is_address_known] ( identifier[peer_address] ):
identifier[self] . identifier[log] . identifier[debug] (
literal[string] ,
identifier[sender] = identifier[user] ,
identifier[sender_address] = identifier[pex] ( identifier[peer_address] ),
identifier[room] = identifier[room] ,
)
keyword[return] keyword[False]
identifier[room_ids] = identifier[self] . identifier[_get_room_ids_for_address] ( identifier[peer_address] )
keyword[if] identifier[room] . identifier[room_id] keyword[not] keyword[in] identifier[room_ids] keyword[and] ( identifier[self] . identifier[_private_rooms] keyword[and] keyword[not] identifier[room] . identifier[invite_only] ):
keyword[if] identifier[self] . identifier[_private_rooms] keyword[and] keyword[not] identifier[room] . identifier[invite_only] :
identifier[reason] = literal[string]
keyword[else] :
identifier[reason] = literal[string]
identifier[self] . identifier[log] . identifier[debug] (
literal[string] ,
identifier[peer_user] = identifier[user] . identifier[user_id] ,
identifier[peer_address] = identifier[pex] ( identifier[peer_address] ),
identifier[room] = identifier[room] ,
identifier[expected_room_ids] = identifier[room_ids] ,
identifier[reason] = identifier[reason] ,
)
keyword[return] keyword[False]
keyword[if] keyword[not] identifier[room_ids] keyword[or] identifier[room] . identifier[room_id] != identifier[room_ids] [ literal[int] ]:
identifier[self] . identifier[log] . identifier[debug] (
literal[string] ,
identifier[peer_user] = identifier[user] . identifier[user_id] ,
identifier[peer_address] = identifier[pex] ( identifier[peer_address] ),
identifier[known_user_rooms] = identifier[room_ids] ,
identifier[room] = identifier[room] ,
)
identifier[self] . identifier[_set_room_id_for_address] ( identifier[peer_address] , identifier[room] . identifier[room_id] )
identifier[is_peer_reachable] = identifier[self] . identifier[_address_mgr] . identifier[get_address_reachability] ( identifier[peer_address] ) keyword[is] (
identifier[AddressReachability] . identifier[REACHABLE]
)
keyword[if] keyword[not] identifier[is_peer_reachable] :
identifier[self] . identifier[log] . identifier[debug] ( literal[string] , identifier[peer_address] = identifier[peer_address] , identifier[user_id] = identifier[sender_id] )
identifier[self] . identifier[_address_mgr] . identifier[force_user_presence] ( identifier[user] , identifier[UserPresence] . identifier[ONLINE] )
identifier[self] . identifier[_address_mgr] . identifier[refresh_address_presence] ( identifier[peer_address] )
identifier[data] = identifier[event] [ literal[string] ][ literal[string] ]
keyword[if] keyword[not] identifier[isinstance] ( identifier[data] , identifier[str] ):
identifier[self] . identifier[log] . identifier[warning] (
literal[string] ,
identifier[peer_user] = identifier[user] . identifier[user_id] ,
identifier[peer_address] = identifier[to_checksum_address] ( identifier[peer_address] ),
identifier[room] = identifier[room] ,
)
keyword[return] keyword[False]
identifier[messages] : identifier[List] [ identifier[Message] ]= identifier[list] ()
keyword[if] identifier[data] . identifier[startswith] ( literal[string] ):
keyword[try] :
identifier[message] = identifier[message_from_bytes] ( identifier[decode_hex] ( identifier[data] ))
keyword[if] keyword[not] identifier[message] :
keyword[raise] identifier[InvalidProtocolMessage]
keyword[except] ( identifier[DecodeError] , identifier[AssertionError] ) keyword[as] identifier[ex] :
identifier[self] . identifier[log] . identifier[warning] (
literal[string] ,
identifier[message_data] = identifier[data] ,
identifier[peer_address] = identifier[pex] ( identifier[peer_address] ),
identifier[_exc] = identifier[ex] ,
)
keyword[return] keyword[False]
keyword[except] identifier[InvalidProtocolMessage] keyword[as] identifier[ex] :
identifier[self] . identifier[log] . identifier[warning] (
literal[string] ,
identifier[message_data] = identifier[data] ,
identifier[peer_address] = identifier[pex] ( identifier[peer_address] ),
identifier[_exc] = identifier[ex] ,
)
keyword[return] keyword[False]
keyword[else] :
identifier[messages] . identifier[append] ( identifier[message] )
keyword[else] :
keyword[for] identifier[line] keyword[in] identifier[data] . identifier[splitlines] ():
identifier[line] = identifier[line] . identifier[strip] ()
keyword[if] keyword[not] identifier[line] :
keyword[continue]
keyword[try] :
identifier[message_dict] = identifier[json] . identifier[loads] ( identifier[line] )
identifier[message] = identifier[message_from_dict] ( identifier[message_dict] )
keyword[except] ( identifier[UnicodeDecodeError] , identifier[json] . identifier[JSONDecodeError] ) keyword[as] identifier[ex] :
identifier[self] . identifier[log] . identifier[warning] (
literal[string] ,
identifier[message_data] = identifier[line] ,
identifier[peer_address] = identifier[pex] ( identifier[peer_address] ),
identifier[_exc] = identifier[ex] ,
)
keyword[continue]
keyword[except] identifier[InvalidProtocolMessage] keyword[as] identifier[ex] :
identifier[self] . identifier[log] . identifier[warning] (
literal[string] ,
identifier[message_data] = identifier[line] ,
identifier[peer_address] = identifier[pex] ( identifier[peer_address] ),
identifier[_exc] = identifier[ex] ,
)
keyword[continue]
keyword[if] keyword[not] identifier[isinstance] ( identifier[message] ,( identifier[SignedRetrieableMessage] , identifier[SignedMessage] )):
identifier[self] . identifier[log] . identifier[warning] (
literal[string] ,
identifier[message] = identifier[message] ,
)
keyword[continue]
keyword[elif] identifier[message] . identifier[sender] != identifier[peer_address] :
identifier[self] . identifier[log] . identifier[warning] (
literal[string] ,
identifier[message] = identifier[message] ,
identifier[signer] = identifier[message] . identifier[sender] ,
identifier[peer_address] = identifier[peer_address] ,
)
keyword[continue]
identifier[messages] . identifier[append] ( identifier[message] )
keyword[if] keyword[not] identifier[messages] :
keyword[return] keyword[False]
identifier[self] . identifier[log] . identifier[debug] (
literal[string] ,
identifier[messages] = identifier[messages] ,
identifier[sender] = identifier[pex] ( identifier[peer_address] ),
identifier[sender_user] = identifier[user] ,
identifier[room] = identifier[room] ,
)
keyword[for] identifier[message] keyword[in] identifier[messages] :
keyword[if] identifier[isinstance] ( identifier[message] , identifier[Delivered] ):
identifier[self] . identifier[_receive_delivered] ( identifier[message] )
keyword[elif] identifier[isinstance] ( identifier[message] , identifier[Processed] ):
identifier[self] . identifier[_receive_message] ( identifier[message] )
keyword[else] :
keyword[assert] identifier[isinstance] ( identifier[message] , identifier[SignedRetrieableMessage] )
identifier[self] . identifier[_receive_message] ( identifier[message] )
keyword[return] keyword[True] | def _handle_message(self, room, event) -> bool:
""" Handle text messages sent to listening rooms """
if event['type'] != 'm.room.message' or event['content']['msgtype'] != 'm.text' or self._stop_event.ready():
# Ignore non-messages and non-text messages
return False # depends on [control=['if'], data=[]]
sender_id = event['sender']
if sender_id == self._user_id:
# Ignore our own messages
return False # depends on [control=['if'], data=[]]
user = self._get_user(sender_id)
peer_address = validate_userid_signature(user)
if not peer_address:
self.log.debug('Message from invalid user displayName signature', peer_user=user.user_id, room=room)
return False # depends on [control=['if'], data=[]]
# don't proceed if user isn't whitelisted (yet)
if not self._address_mgr.is_address_known(peer_address):
# user not start_health_check'ed
self.log.debug('Message from non-whitelisted peer - ignoring', sender=user, sender_address=pex(peer_address), room=room)
return False # depends on [control=['if'], data=[]]
# rooms we created and invited user, or were invited specifically by them
room_ids = self._get_room_ids_for_address(peer_address)
# TODO: Remove clause after `and` and check if things still don't hang
if room.room_id not in room_ids and (self._private_rooms and (not room.invite_only)):
# this should not happen, but is not fatal, as we may not know user yet
if self._private_rooms and (not room.invite_only):
reason = 'required private room, but received message in a public' # depends on [control=['if'], data=[]]
else:
reason = 'unknown room for user'
self.log.debug('Ignoring invalid message', peer_user=user.user_id, peer_address=pex(peer_address), room=room, expected_room_ids=room_ids, reason=reason)
return False # depends on [control=['if'], data=[]]
# TODO: With the condition in the TODO above restored this one won't have an effect, check
# if it can be removed after the above is solved
if not room_ids or room.room_id != room_ids[0]:
self.log.debug('Received message triggered new comms room for peer', peer_user=user.user_id, peer_address=pex(peer_address), known_user_rooms=room_ids, room=room)
self._set_room_id_for_address(peer_address, room.room_id) # depends on [control=['if'], data=[]]
is_peer_reachable = self._address_mgr.get_address_reachability(peer_address) is AddressReachability.REACHABLE
if not is_peer_reachable:
self.log.debug('Forcing presence update', peer_address=peer_address, user_id=sender_id)
self._address_mgr.force_user_presence(user, UserPresence.ONLINE)
self._address_mgr.refresh_address_presence(peer_address) # depends on [control=['if'], data=[]]
data = event['content']['body']
if not isinstance(data, str):
self.log.warning('Received message body not a string', peer_user=user.user_id, peer_address=to_checksum_address(peer_address), room=room)
return False # depends on [control=['if'], data=[]]
messages: List[Message] = list()
if data.startswith('0x'):
try:
message = message_from_bytes(decode_hex(data))
if not message:
raise InvalidProtocolMessage # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]]
except (DecodeError, AssertionError) as ex:
self.log.warning("Can't parse message binary data", message_data=data, peer_address=pex(peer_address), _exc=ex)
return False # depends on [control=['except'], data=['ex']]
except InvalidProtocolMessage as ex:
self.log.warning('Received message binary data is not a valid message', message_data=data, peer_address=pex(peer_address), _exc=ex)
return False # depends on [control=['except'], data=['ex']]
else:
messages.append(message) # depends on [control=['if'], data=[]]
else:
for line in data.splitlines():
line = line.strip()
if not line:
continue # depends on [control=['if'], data=[]]
try:
message_dict = json.loads(line)
message = message_from_dict(message_dict) # depends on [control=['try'], data=[]]
except (UnicodeDecodeError, json.JSONDecodeError) as ex:
self.log.warning("Can't parse message data JSON", message_data=line, peer_address=pex(peer_address), _exc=ex)
continue # depends on [control=['except'], data=['ex']]
except InvalidProtocolMessage as ex:
self.log.warning('Message data JSON are not a valid message', message_data=line, peer_address=pex(peer_address), _exc=ex)
continue # depends on [control=['except'], data=['ex']]
if not isinstance(message, (SignedRetrieableMessage, SignedMessage)):
self.log.warning('Received invalid message', message=message)
continue # depends on [control=['if'], data=[]]
elif message.sender != peer_address:
self.log.warning('Message not signed by sender!', message=message, signer=message.sender, peer_address=peer_address)
continue # depends on [control=['if'], data=['peer_address']]
messages.append(message) # depends on [control=['for'], data=['line']]
if not messages:
return False # depends on [control=['if'], data=[]]
self.log.debug('Incoming messages', messages=messages, sender=pex(peer_address), sender_user=user, room=room)
for message in messages:
if isinstance(message, Delivered):
self._receive_delivered(message) # depends on [control=['if'], data=[]]
elif isinstance(message, Processed):
self._receive_message(message) # depends on [control=['if'], data=[]]
else:
assert isinstance(message, SignedRetrieableMessage)
self._receive_message(message) # depends on [control=['for'], data=['message']]
return True |
def sdram_alloc_as_filelike(self, size, tag=0, x=Required, y=Required,
app_id=Required, clear=False):
"""Like :py:meth:`.sdram_alloc` but returns a :py:class:`file-like
object <.MemoryIO>` which allows safe reading and writing to the block
that is allocated.
Returns
-------
:py:class:`.MemoryIO`
File-like object which allows accessing the newly allocated region
of memory. For example::
>>> # Read, write and seek through the allocated memory just
>>> # like a file
>>> mem = mc.sdram_alloc_as_filelike(12) # doctest: +SKIP
>>> mem.write(b"Hello, world") # doctest: +SKIP
12
>>> mem.seek(0) # doctest: +SKIP
>>> mem.read(5) # doctest: +SKIP
b"Hello"
>>> mem.read(7) # doctest: +SKIP
b", world"
>>> # Reads and writes are truncated to the allocated region,
>>> # preventing accidental clobbering/access of memory.
>>> mem.seek(0) # doctest: +SKIP
>>> mem.write(b"How are you today?") # doctest: +SKIP
12
>>> mem.seek(0) # doctest: +SKIP
>>> mem.read(100) # doctest: +SKIP
b"How are you "
See the :py:class:`.MemoryIO` class for details of other features
of these file-like views of SpiNNaker's memory.
Raises
------
rig.machine_control.machine_controller.SpiNNakerMemoryError
If the memory cannot be allocated, or the tag is already taken or
invalid.
"""
# Perform the malloc
start_address = self.sdram_alloc(size, tag, x, y, app_id, clear)
return MemoryIO(self, x, y, start_address, start_address + size) | def function[sdram_alloc_as_filelike, parameter[self, size, tag, x, y, app_id, clear]]:
constant[Like :py:meth:`.sdram_alloc` but returns a :py:class:`file-like
object <.MemoryIO>` which allows safe reading and writing to the block
that is allocated.
Returns
-------
:py:class:`.MemoryIO`
File-like object which allows accessing the newly allocated region
of memory. For example::
>>> # Read, write and seek through the allocated memory just
>>> # like a file
>>> mem = mc.sdram_alloc_as_filelike(12) # doctest: +SKIP
>>> mem.write(b"Hello, world") # doctest: +SKIP
12
>>> mem.seek(0) # doctest: +SKIP
>>> mem.read(5) # doctest: +SKIP
b"Hello"
>>> mem.read(7) # doctest: +SKIP
b", world"
>>> # Reads and writes are truncated to the allocated region,
>>> # preventing accidental clobbering/access of memory.
>>> mem.seek(0) # doctest: +SKIP
>>> mem.write(b"How are you today?") # doctest: +SKIP
12
>>> mem.seek(0) # doctest: +SKIP
>>> mem.read(100) # doctest: +SKIP
b"How are you "
See the :py:class:`.MemoryIO` class for details of other features
of these file-like views of SpiNNaker's memory.
Raises
------
rig.machine_control.machine_controller.SpiNNakerMemoryError
If the memory cannot be allocated, or the tag is already taken or
invalid.
]
variable[start_address] assign[=] call[name[self].sdram_alloc, parameter[name[size], name[tag], name[x], name[y], name[app_id], name[clear]]]
return[call[name[MemoryIO], parameter[name[self], name[x], name[y], name[start_address], binary_operation[name[start_address] + name[size]]]]] | keyword[def] identifier[sdram_alloc_as_filelike] ( identifier[self] , identifier[size] , identifier[tag] = literal[int] , identifier[x] = identifier[Required] , identifier[y] = identifier[Required] ,
identifier[app_id] = identifier[Required] , identifier[clear] = keyword[False] ):
literal[string]
identifier[start_address] = identifier[self] . identifier[sdram_alloc] ( identifier[size] , identifier[tag] , identifier[x] , identifier[y] , identifier[app_id] , identifier[clear] )
keyword[return] identifier[MemoryIO] ( identifier[self] , identifier[x] , identifier[y] , identifier[start_address] , identifier[start_address] + identifier[size] ) | def sdram_alloc_as_filelike(self, size, tag=0, x=Required, y=Required, app_id=Required, clear=False):
"""Like :py:meth:`.sdram_alloc` but returns a :py:class:`file-like
object <.MemoryIO>` which allows safe reading and writing to the block
that is allocated.
Returns
-------
:py:class:`.MemoryIO`
File-like object which allows accessing the newly allocated region
of memory. For example::
>>> # Read, write and seek through the allocated memory just
>>> # like a file
>>> mem = mc.sdram_alloc_as_filelike(12) # doctest: +SKIP
>>> mem.write(b"Hello, world") # doctest: +SKIP
12
>>> mem.seek(0) # doctest: +SKIP
>>> mem.read(5) # doctest: +SKIP
b"Hello"
>>> mem.read(7) # doctest: +SKIP
b", world"
>>> # Reads and writes are truncated to the allocated region,
>>> # preventing accidental clobbering/access of memory.
>>> mem.seek(0) # doctest: +SKIP
>>> mem.write(b"How are you today?") # doctest: +SKIP
12
>>> mem.seek(0) # doctest: +SKIP
>>> mem.read(100) # doctest: +SKIP
b"How are you "
See the :py:class:`.MemoryIO` class for details of other features
of these file-like views of SpiNNaker's memory.
Raises
------
rig.machine_control.machine_controller.SpiNNakerMemoryError
If the memory cannot be allocated, or the tag is already taken or
invalid.
"""
# Perform the malloc
start_address = self.sdram_alloc(size, tag, x, y, app_id, clear)
return MemoryIO(self, x, y, start_address, start_address + size) |
def _parse_wikilink(self):
"""Parse an internal wikilink at the head of the wikicode string."""
reset = self._head + 1
self._head += 2
try:
# If the wikilink looks like an external link, parse it as such:
link, extra, delta = self._really_parse_external_link(True)
except BadRoute:
self._head = reset + 1
try:
# Otherwise, actually parse it as a wikilink:
wikilink = self._parse(contexts.WIKILINK_TITLE)
except BadRoute:
self._head = reset
self._emit_text("[[")
else:
self._emit(tokens.WikilinkOpen())
self._emit_all(wikilink)
self._emit(tokens.WikilinkClose())
else:
if self._context & contexts.EXT_LINK_TITLE:
# In this exceptional case, an external link that looks like a
# wikilink inside of an external link is parsed as text:
self._head = reset
self._emit_text("[[")
return
self._emit_text("[")
self._emit(tokens.ExternalLinkOpen(brackets=True))
self._emit_all(link)
self._emit(tokens.ExternalLinkClose()) | def function[_parse_wikilink, parameter[self]]:
constant[Parse an internal wikilink at the head of the wikicode string.]
variable[reset] assign[=] binary_operation[name[self]._head + constant[1]]
<ast.AugAssign object at 0x7da18c4cea10>
<ast.Try object at 0x7da18c4ceef0> | keyword[def] identifier[_parse_wikilink] ( identifier[self] ):
literal[string]
identifier[reset] = identifier[self] . identifier[_head] + literal[int]
identifier[self] . identifier[_head] += literal[int]
keyword[try] :
identifier[link] , identifier[extra] , identifier[delta] = identifier[self] . identifier[_really_parse_external_link] ( keyword[True] )
keyword[except] identifier[BadRoute] :
identifier[self] . identifier[_head] = identifier[reset] + literal[int]
keyword[try] :
identifier[wikilink] = identifier[self] . identifier[_parse] ( identifier[contexts] . identifier[WIKILINK_TITLE] )
keyword[except] identifier[BadRoute] :
identifier[self] . identifier[_head] = identifier[reset]
identifier[self] . identifier[_emit_text] ( literal[string] )
keyword[else] :
identifier[self] . identifier[_emit] ( identifier[tokens] . identifier[WikilinkOpen] ())
identifier[self] . identifier[_emit_all] ( identifier[wikilink] )
identifier[self] . identifier[_emit] ( identifier[tokens] . identifier[WikilinkClose] ())
keyword[else] :
keyword[if] identifier[self] . identifier[_context] & identifier[contexts] . identifier[EXT_LINK_TITLE] :
identifier[self] . identifier[_head] = identifier[reset]
identifier[self] . identifier[_emit_text] ( literal[string] )
keyword[return]
identifier[self] . identifier[_emit_text] ( literal[string] )
identifier[self] . identifier[_emit] ( identifier[tokens] . identifier[ExternalLinkOpen] ( identifier[brackets] = keyword[True] ))
identifier[self] . identifier[_emit_all] ( identifier[link] )
identifier[self] . identifier[_emit] ( identifier[tokens] . identifier[ExternalLinkClose] ()) | def _parse_wikilink(self):
"""Parse an internal wikilink at the head of the wikicode string."""
reset = self._head + 1
self._head += 2
try:
# If the wikilink looks like an external link, parse it as such:
(link, extra, delta) = self._really_parse_external_link(True) # depends on [control=['try'], data=[]]
except BadRoute:
self._head = reset + 1
try:
# Otherwise, actually parse it as a wikilink:
wikilink = self._parse(contexts.WIKILINK_TITLE) # depends on [control=['try'], data=[]]
except BadRoute:
self._head = reset
self._emit_text('[[') # depends on [control=['except'], data=[]]
else:
self._emit(tokens.WikilinkOpen())
self._emit_all(wikilink)
self._emit(tokens.WikilinkClose()) # depends on [control=['except'], data=[]]
else:
if self._context & contexts.EXT_LINK_TITLE:
# In this exceptional case, an external link that looks like a
# wikilink inside of an external link is parsed as text:
self._head = reset
self._emit_text('[[')
return # depends on [control=['if'], data=[]]
self._emit_text('[')
self._emit(tokens.ExternalLinkOpen(brackets=True))
self._emit_all(link)
self._emit(tokens.ExternalLinkClose()) |
def get_organizations(self, permission='read'):
# type: (str) -> List['Organization']
"""Get organizations in HDX that this user is a member of.
Args:
permission (str): Permission to check for. Defaults to 'read'.
Returns:
List[Organization]: List of organizations in HDX that this user is a member of
"""
success, result = self._read_from_hdx('user', self.data['name'], 'id', self.actions()['listorgs'],
permission=permission)
organizations = list()
if success:
for organizationdict in result:
organization = hdx.data.organization.Organization.read_from_hdx(organizationdict['id'])
organizations.append(organization)
return organizations | def function[get_organizations, parameter[self, permission]]:
constant[Get organizations in HDX that this user is a member of.
Args:
permission (str): Permission to check for. Defaults to 'read'.
Returns:
List[Organization]: List of organizations in HDX that this user is a member of
]
<ast.Tuple object at 0x7da1b0e306d0> assign[=] call[name[self]._read_from_hdx, parameter[constant[user], call[name[self].data][constant[name]], constant[id], call[call[name[self].actions, parameter[]]][constant[listorgs]]]]
variable[organizations] assign[=] call[name[list], parameter[]]
if name[success] begin[:]
for taget[name[organizationdict]] in starred[name[result]] begin[:]
variable[organization] assign[=] call[name[hdx].data.organization.Organization.read_from_hdx, parameter[call[name[organizationdict]][constant[id]]]]
call[name[organizations].append, parameter[name[organization]]]
return[name[organizations]] | keyword[def] identifier[get_organizations] ( identifier[self] , identifier[permission] = literal[string] ):
literal[string]
identifier[success] , identifier[result] = identifier[self] . identifier[_read_from_hdx] ( literal[string] , identifier[self] . identifier[data] [ literal[string] ], literal[string] , identifier[self] . identifier[actions] ()[ literal[string] ],
identifier[permission] = identifier[permission] )
identifier[organizations] = identifier[list] ()
keyword[if] identifier[success] :
keyword[for] identifier[organizationdict] keyword[in] identifier[result] :
identifier[organization] = identifier[hdx] . identifier[data] . identifier[organization] . identifier[Organization] . identifier[read_from_hdx] ( identifier[organizationdict] [ literal[string] ])
identifier[organizations] . identifier[append] ( identifier[organization] )
keyword[return] identifier[organizations] | def get_organizations(self, permission='read'):
# type: (str) -> List['Organization']
"Get organizations in HDX that this user is a member of.\n\n Args:\n permission (str): Permission to check for. Defaults to 'read'.\n\n Returns:\n List[Organization]: List of organizations in HDX that this user is a member of\n "
(success, result) = self._read_from_hdx('user', self.data['name'], 'id', self.actions()['listorgs'], permission=permission)
organizations = list()
if success:
for organizationdict in result:
organization = hdx.data.organization.Organization.read_from_hdx(organizationdict['id'])
organizations.append(organization) # depends on [control=['for'], data=['organizationdict']] # depends on [control=['if'], data=[]]
return organizations |
def console_print_rect_ex(
con: tcod.console.Console,
x: int,
y: int,
w: int,
h: int,
flag: int,
alignment: int,
fmt: str,
) -> int:
"""Print a string constrained to a rectangle with blend and alignment.
Returns:
int: The number of lines of text once word-wrapped.
.. deprecated:: 8.5
Use :any:`Console.print_rect` instead.
"""
return int(
lib.TCOD_console_printf_rect_ex(
_console(con), x, y, w, h, flag, alignment, _fmt(fmt)
)
) | def function[console_print_rect_ex, parameter[con, x, y, w, h, flag, alignment, fmt]]:
constant[Print a string constrained to a rectangle with blend and alignment.
Returns:
int: The number of lines of text once word-wrapped.
.. deprecated:: 8.5
Use :any:`Console.print_rect` instead.
]
return[call[name[int], parameter[call[name[lib].TCOD_console_printf_rect_ex, parameter[call[name[_console], parameter[name[con]]], name[x], name[y], name[w], name[h], name[flag], name[alignment], call[name[_fmt], parameter[name[fmt]]]]]]]] | keyword[def] identifier[console_print_rect_ex] (
identifier[con] : identifier[tcod] . identifier[console] . identifier[Console] ,
identifier[x] : identifier[int] ,
identifier[y] : identifier[int] ,
identifier[w] : identifier[int] ,
identifier[h] : identifier[int] ,
identifier[flag] : identifier[int] ,
identifier[alignment] : identifier[int] ,
identifier[fmt] : identifier[str] ,
)-> identifier[int] :
literal[string]
keyword[return] identifier[int] (
identifier[lib] . identifier[TCOD_console_printf_rect_ex] (
identifier[_console] ( identifier[con] ), identifier[x] , identifier[y] , identifier[w] , identifier[h] , identifier[flag] , identifier[alignment] , identifier[_fmt] ( identifier[fmt] )
)
) | def console_print_rect_ex(con: tcod.console.Console, x: int, y: int, w: int, h: int, flag: int, alignment: int, fmt: str) -> int:
"""Print a string constrained to a rectangle with blend and alignment.
Returns:
int: The number of lines of text once word-wrapped.
.. deprecated:: 8.5
Use :any:`Console.print_rect` instead.
"""
return int(lib.TCOD_console_printf_rect_ex(_console(con), x, y, w, h, flag, alignment, _fmt(fmt))) |
def valid(self,individuals=None,F=None):
"""returns the sublist of individuals with valid fitness."""
if F:
valid_locs = self.valid_loc(F)
else:
valid_locs = self.valid_loc(self.F)
if individuals:
return [ind for i,ind in enumerate(individuals) if i in valid_locs]
else:
return [ind for i,ind in enumerate(self.pop.individuals) if i in valid_locs] | def function[valid, parameter[self, individuals, F]]:
constant[returns the sublist of individuals with valid fitness.]
if name[F] begin[:]
variable[valid_locs] assign[=] call[name[self].valid_loc, parameter[name[F]]]
if name[individuals] begin[:]
return[<ast.ListComp object at 0x7da1b1970190>] | keyword[def] identifier[valid] ( identifier[self] , identifier[individuals] = keyword[None] , identifier[F] = keyword[None] ):
literal[string]
keyword[if] identifier[F] :
identifier[valid_locs] = identifier[self] . identifier[valid_loc] ( identifier[F] )
keyword[else] :
identifier[valid_locs] = identifier[self] . identifier[valid_loc] ( identifier[self] . identifier[F] )
keyword[if] identifier[individuals] :
keyword[return] [ identifier[ind] keyword[for] identifier[i] , identifier[ind] keyword[in] identifier[enumerate] ( identifier[individuals] ) keyword[if] identifier[i] keyword[in] identifier[valid_locs] ]
keyword[else] :
keyword[return] [ identifier[ind] keyword[for] identifier[i] , identifier[ind] keyword[in] identifier[enumerate] ( identifier[self] . identifier[pop] . identifier[individuals] ) keyword[if] identifier[i] keyword[in] identifier[valid_locs] ] | def valid(self, individuals=None, F=None):
"""returns the sublist of individuals with valid fitness."""
if F:
valid_locs = self.valid_loc(F) # depends on [control=['if'], data=[]]
else:
valid_locs = self.valid_loc(self.F)
if individuals:
return [ind for (i, ind) in enumerate(individuals) if i in valid_locs] # depends on [control=['if'], data=[]]
else:
return [ind for (i, ind) in enumerate(self.pop.individuals) if i in valid_locs] |
def parse_signature(cls, function):
'''Parses the signature of a method and its annotations to swagger.
Return a dictionary {arg_name: info}.
'''
annotations = function.__annotations__.copy()
del annotations['return']
result = []
for param_name, (param_type, param_obj) in annotations.items():
sig_param = function.signature.parameters[param_name]
param_description = {
'paramType': param_type,
'name': param_name,
'required': sig_param.default is inspect.Parameter.empty}
param_description.update(param_obj.describe())
result.append(param_description)
return result | def function[parse_signature, parameter[cls, function]]:
constant[Parses the signature of a method and its annotations to swagger.
Return a dictionary {arg_name: info}.
]
variable[annotations] assign[=] call[name[function].__annotations__.copy, parameter[]]
<ast.Delete object at 0x7da1b1528460>
variable[result] assign[=] list[[]]
for taget[tuple[[<ast.Name object at 0x7da1b15c1210>, <ast.Tuple object at 0x7da1b15c1930>]]] in starred[call[name[annotations].items, parameter[]]] begin[:]
variable[sig_param] assign[=] call[name[function].signature.parameters][name[param_name]]
variable[param_description] assign[=] dictionary[[<ast.Constant object at 0x7da1b15f2470>, <ast.Constant object at 0x7da1b15f3d60>, <ast.Constant object at 0x7da1b15f3730>], [<ast.Name object at 0x7da1b15f37c0>, <ast.Name object at 0x7da1b15f3b80>, <ast.Compare object at 0x7da1b15f3d30>]]
call[name[param_description].update, parameter[call[name[param_obj].describe, parameter[]]]]
call[name[result].append, parameter[name[param_description]]]
return[name[result]] | keyword[def] identifier[parse_signature] ( identifier[cls] , identifier[function] ):
literal[string]
identifier[annotations] = identifier[function] . identifier[__annotations__] . identifier[copy] ()
keyword[del] identifier[annotations] [ literal[string] ]
identifier[result] =[]
keyword[for] identifier[param_name] ,( identifier[param_type] , identifier[param_obj] ) keyword[in] identifier[annotations] . identifier[items] ():
identifier[sig_param] = identifier[function] . identifier[signature] . identifier[parameters] [ identifier[param_name] ]
identifier[param_description] ={
literal[string] : identifier[param_type] ,
literal[string] : identifier[param_name] ,
literal[string] : identifier[sig_param] . identifier[default] keyword[is] identifier[inspect] . identifier[Parameter] . identifier[empty] }
identifier[param_description] . identifier[update] ( identifier[param_obj] . identifier[describe] ())
identifier[result] . identifier[append] ( identifier[param_description] )
keyword[return] identifier[result] | def parse_signature(cls, function):
"""Parses the signature of a method and its annotations to swagger.
Return a dictionary {arg_name: info}.
"""
annotations = function.__annotations__.copy()
del annotations['return']
result = []
for (param_name, (param_type, param_obj)) in annotations.items():
sig_param = function.signature.parameters[param_name]
param_description = {'paramType': param_type, 'name': param_name, 'required': sig_param.default is inspect.Parameter.empty}
param_description.update(param_obj.describe())
result.append(param_description) # depends on [control=['for'], data=[]]
return result |
def get(self, session, fields=[], **kwargs):
'''taobao.items.get 搜索商品信息
根据传入的搜索条件,获取商品列表(类似于淘宝页面上的商品搜索功能,但是只有搜索到的商品列表,不包含商品的ItemCategory列表) 只能获得商品的部分信息,商品的详细信息请通过taobao.item.get获取 如果只输入fields其他条件都不输入,系统会因为搜索条件不足而报错。 不能通过设置cid=0来查询。'''
request = TOPRequest('taobao.items.get')
if not fields:
item = Item()
fields = item.fields
request['fields'] = fields
for k, v in kwargs.iteritems():
if k not in ('q', 'nicks', 'cid', 'props', 'product_id', 'page_no', 'order_by', 'ww_status', 'post_free', 'location_state', 'location_city', 'is_3D', 'start_score', 'end_score', 'start_volume', 'end_volume', 'one_station', 'is_cod', 'is_mall', 'is_prepay', 'genuine_security', 'stuff_status', 'start_price', 'end_price', 'page_size', 'promoted_service', 'is_xinpin') and v==None: continue
if k == 'location_state': k = 'location.state'
if k == 'location_city': k = 'location.city'
request[k] = v
self.create(self.execute(request, session))
return self.items | def function[get, parameter[self, session, fields]]:
constant[taobao.items.get 搜索商品信息
根据传入的搜索条件,获取商品列表(类似于淘宝页面上的商品搜索功能,但是只有搜索到的商品列表,不包含商品的ItemCategory列表) 只能获得商品的部分信息,商品的详细信息请通过taobao.item.get获取 如果只输入fields其他条件都不输入,系统会因为搜索条件不足而报错。 不能通过设置cid=0来查询。]
variable[request] assign[=] call[name[TOPRequest], parameter[constant[taobao.items.get]]]
if <ast.UnaryOp object at 0x7da1b253b7c0> begin[:]
variable[item] assign[=] call[name[Item], parameter[]]
variable[fields] assign[=] name[item].fields
call[name[request]][constant[fields]] assign[=] name[fields]
for taget[tuple[[<ast.Name object at 0x7da1b25385e0>, <ast.Name object at 0x7da1b2539570>]]] in starred[call[name[kwargs].iteritems, parameter[]]] begin[:]
if <ast.BoolOp object at 0x7da1b2538670> begin[:]
continue
if compare[name[k] equal[==] constant[location_state]] begin[:]
variable[k] assign[=] constant[location.state]
if compare[name[k] equal[==] constant[location_city]] begin[:]
variable[k] assign[=] constant[location.city]
call[name[request]][name[k]] assign[=] name[v]
call[name[self].create, parameter[call[name[self].execute, parameter[name[request], name[session]]]]]
return[name[self].items] | keyword[def] identifier[get] ( identifier[self] , identifier[session] , identifier[fields] =[],** identifier[kwargs] ):
literal[string]
identifier[request] = identifier[TOPRequest] ( literal[string] )
keyword[if] keyword[not] identifier[fields] :
identifier[item] = identifier[Item] ()
identifier[fields] = identifier[item] . identifier[fields]
identifier[request] [ literal[string] ]= identifier[fields]
keyword[for] identifier[k] , identifier[v] keyword[in] identifier[kwargs] . identifier[iteritems] ():
keyword[if] identifier[k] keyword[not] keyword[in] ( literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ) keyword[and] identifier[v] == keyword[None] : keyword[continue]
keyword[if] identifier[k] == literal[string] : identifier[k] = literal[string]
keyword[if] identifier[k] == literal[string] : identifier[k] = literal[string]
identifier[request] [ identifier[k] ]= identifier[v]
identifier[self] . identifier[create] ( identifier[self] . identifier[execute] ( identifier[request] , identifier[session] ))
keyword[return] identifier[self] . identifier[items] | def get(self, session, fields=[], **kwargs):
"""taobao.items.get 搜索商品信息
根据传入的搜索条件,获取商品列表(类似于淘宝页面上的商品搜索功能,但是只有搜索到的商品列表,不包含商品的ItemCategory列表) 只能获得商品的部分信息,商品的详细信息请通过taobao.item.get获取 如果只输入fields其他条件都不输入,系统会因为搜索条件不足而报错。 不能通过设置cid=0来查询。"""
request = TOPRequest('taobao.items.get')
if not fields:
item = Item()
fields = item.fields # depends on [control=['if'], data=[]]
request['fields'] = fields
for (k, v) in kwargs.iteritems():
if k not in ('q', 'nicks', 'cid', 'props', 'product_id', 'page_no', 'order_by', 'ww_status', 'post_free', 'location_state', 'location_city', 'is_3D', 'start_score', 'end_score', 'start_volume', 'end_volume', 'one_station', 'is_cod', 'is_mall', 'is_prepay', 'genuine_security', 'stuff_status', 'start_price', 'end_price', 'page_size', 'promoted_service', 'is_xinpin') and v == None:
continue # depends on [control=['if'], data=[]]
if k == 'location_state':
k = 'location.state' # depends on [control=['if'], data=['k']]
if k == 'location_city':
k = 'location.city' # depends on [control=['if'], data=['k']]
request[k] = v # depends on [control=['for'], data=[]]
self.create(self.execute(request, session))
return self.items |
def get_rdf_es_idx_map(cls, idx_obj):
"""
Returns an elasticsearch mapping for the specified index based off
of the mapping defined by rdf class definitions
args:
idx_obj: Dictionary of the index and a list of rdfclasses
included in the mapping
"""
idx_name = list(idx_obj)[0]
es_map = {
"index": idx_name,
"body" : {
"mappings": {},
"settings": {
# "read_only_allow_delete": False,
"index": {
# "blocks" : {
# "read_only_allow_delete" : "false"
# },
"analysis": {
"analyzer": {
"keylower": {
"tokenizer": "keyword",
"type": "custom",
"filter": "lowercase",
"ignore_above" : 256
}
}
}
}
}
}
}
for idx_cls in idx_obj[idx_name]:
# pdb.set_trace()
es_map['body']['mappings'][idx_cls.es_defs['kds_esDocType'][0]] = \
{'properties': idx_cls.es_mapping(idx_cls)}
return es_map | def function[get_rdf_es_idx_map, parameter[cls, idx_obj]]:
constant[
Returns an elasticsearch mapping for the specified index based off
of the mapping defined by rdf class definitions
args:
idx_obj: Dictionary of the index and a list of rdfclasses
included in the mapping
]
variable[idx_name] assign[=] call[call[name[list], parameter[name[idx_obj]]]][constant[0]]
variable[es_map] assign[=] dictionary[[<ast.Constant object at 0x7da1b138df00>, <ast.Constant object at 0x7da1b138e4d0>], [<ast.Name object at 0x7da1b138e740>, <ast.Dict object at 0x7da1b138e770>]]
for taget[name[idx_cls]] in starred[call[name[idx_obj]][name[idx_name]]] begin[:]
call[call[call[name[es_map]][constant[body]]][constant[mappings]]][call[call[name[idx_cls].es_defs][constant[kds_esDocType]]][constant[0]]] assign[=] dictionary[[<ast.Constant object at 0x7da1b138e410>], [<ast.Call object at 0x7da1b138da50>]]
return[name[es_map]] | keyword[def] identifier[get_rdf_es_idx_map] ( identifier[cls] , identifier[idx_obj] ):
literal[string]
identifier[idx_name] = identifier[list] ( identifier[idx_obj] )[ literal[int] ]
identifier[es_map] ={
literal[string] : identifier[idx_name] ,
literal[string] :{
literal[string] :{},
literal[string] :{
literal[string] :{
literal[string] :{
literal[string] :{
literal[string] :{
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[int]
}
}
}
}
}
}
}
keyword[for] identifier[idx_cls] keyword[in] identifier[idx_obj] [ identifier[idx_name] ]:
identifier[es_map] [ literal[string] ][ literal[string] ][ identifier[idx_cls] . identifier[es_defs] [ literal[string] ][ literal[int] ]]={ literal[string] : identifier[idx_cls] . identifier[es_mapping] ( identifier[idx_cls] )}
keyword[return] identifier[es_map] | def get_rdf_es_idx_map(cls, idx_obj):
"""
Returns an elasticsearch mapping for the specified index based off
of the mapping defined by rdf class definitions
args:
idx_obj: Dictionary of the index and a list of rdfclasses
included in the mapping
"""
idx_name = list(idx_obj)[0] # "read_only_allow_delete": False,
# "blocks" : {
# "read_only_allow_delete" : "false"
# },
es_map = {'index': idx_name, 'body': {'mappings': {}, 'settings': {'index': {'analysis': {'analyzer': {'keylower': {'tokenizer': 'keyword', 'type': 'custom', 'filter': 'lowercase', 'ignore_above': 256}}}}}}}
for idx_cls in idx_obj[idx_name]: # pdb.set_trace()
es_map['body']['mappings'][idx_cls.es_defs['kds_esDocType'][0]] = {'properties': idx_cls.es_mapping(idx_cls)} # depends on [control=['for'], data=['idx_cls']]
return es_map |
def update(self, webhook_method=values.unset, webhook_filters=values.unset,
pre_webhook_url=values.unset, post_webhook_url=values.unset,
pre_webhook_retry_count=values.unset,
post_webhook_retry_count=values.unset, target=values.unset):
"""
Update the WebhookInstance
:param unicode webhook_method: The HTTP method to be used when sending a webhook request.
:param unicode webhook_filters: The list of webhook event triggers that are enabled for this Service.
:param unicode pre_webhook_url: The absolute url the pre-event webhook request should be sent to.
:param unicode post_webhook_url: The absolute url the post-event webhook request should be sent to.
:param unicode pre_webhook_retry_count: The number of retries in case of pre-event webhook request failures.
:param unicode post_webhook_retry_count: The number of retries in case of post-event webhook request failures.
:param WebhookInstance.Target target: The routing target of the webhook.
:returns: Updated WebhookInstance
:rtype: twilio.rest.messaging.v1.webhook.WebhookInstance
"""
data = values.of({
'WebhookMethod': webhook_method,
'WebhookFilters': serialize.map(webhook_filters, lambda e: e),
'PreWebhookUrl': pre_webhook_url,
'PostWebhookUrl': post_webhook_url,
'PreWebhookRetryCount': pre_webhook_retry_count,
'PostWebhookRetryCount': post_webhook_retry_count,
'Target': target,
})
payload = self._version.update(
'POST',
self._uri,
data=data,
)
return WebhookInstance(self._version, payload, ) | def function[update, parameter[self, webhook_method, webhook_filters, pre_webhook_url, post_webhook_url, pre_webhook_retry_count, post_webhook_retry_count, target]]:
constant[
Update the WebhookInstance
:param unicode webhook_method: The HTTP method to be used when sending a webhook request.
:param unicode webhook_filters: The list of webhook event triggers that are enabled for this Service.
:param unicode pre_webhook_url: The absolute url the pre-event webhook request should be sent to.
:param unicode post_webhook_url: The absolute url the post-event webhook request should be sent to.
:param unicode pre_webhook_retry_count: The number of retries in case of pre-event webhook request failures.
:param unicode post_webhook_retry_count: The number of retries in case of post-event webhook request failures.
:param WebhookInstance.Target target: The routing target of the webhook.
:returns: Updated WebhookInstance
:rtype: twilio.rest.messaging.v1.webhook.WebhookInstance
]
variable[data] assign[=] call[name[values].of, parameter[dictionary[[<ast.Constant object at 0x7da18dc9a230>, <ast.Constant object at 0x7da18dc9b730>, <ast.Constant object at 0x7da18dc9b130>, <ast.Constant object at 0x7da18dc9add0>, <ast.Constant object at 0x7da18dc99540>, <ast.Constant object at 0x7da18dc993f0>, <ast.Constant object at 0x7da18dc9a0b0>], [<ast.Name object at 0x7da18dc98730>, <ast.Call object at 0x7da18dc99510>, <ast.Name object at 0x7da18dc9bbe0>, <ast.Name object at 0x7da18dc9b9a0>, <ast.Name object at 0x7da18dc9b850>, <ast.Name object at 0x7da18dc99480>, <ast.Name object at 0x7da18dc98100>]]]]
variable[payload] assign[=] call[name[self]._version.update, parameter[constant[POST], name[self]._uri]]
return[call[name[WebhookInstance], parameter[name[self]._version, name[payload]]]] | keyword[def] identifier[update] ( identifier[self] , identifier[webhook_method] = identifier[values] . identifier[unset] , identifier[webhook_filters] = identifier[values] . identifier[unset] ,
identifier[pre_webhook_url] = identifier[values] . identifier[unset] , identifier[post_webhook_url] = identifier[values] . identifier[unset] ,
identifier[pre_webhook_retry_count] = identifier[values] . identifier[unset] ,
identifier[post_webhook_retry_count] = identifier[values] . identifier[unset] , identifier[target] = identifier[values] . identifier[unset] ):
literal[string]
identifier[data] = identifier[values] . identifier[of] ({
literal[string] : identifier[webhook_method] ,
literal[string] : identifier[serialize] . identifier[map] ( identifier[webhook_filters] , keyword[lambda] identifier[e] : identifier[e] ),
literal[string] : identifier[pre_webhook_url] ,
literal[string] : identifier[post_webhook_url] ,
literal[string] : identifier[pre_webhook_retry_count] ,
literal[string] : identifier[post_webhook_retry_count] ,
literal[string] : identifier[target] ,
})
identifier[payload] = identifier[self] . identifier[_version] . identifier[update] (
literal[string] ,
identifier[self] . identifier[_uri] ,
identifier[data] = identifier[data] ,
)
keyword[return] identifier[WebhookInstance] ( identifier[self] . identifier[_version] , identifier[payload] ,) | def update(self, webhook_method=values.unset, webhook_filters=values.unset, pre_webhook_url=values.unset, post_webhook_url=values.unset, pre_webhook_retry_count=values.unset, post_webhook_retry_count=values.unset, target=values.unset):
"""
Update the WebhookInstance
:param unicode webhook_method: The HTTP method to be used when sending a webhook request.
:param unicode webhook_filters: The list of webhook event triggers that are enabled for this Service.
:param unicode pre_webhook_url: The absolute url the pre-event webhook request should be sent to.
:param unicode post_webhook_url: The absolute url the post-event webhook request should be sent to.
:param unicode pre_webhook_retry_count: The number of retries in case of pre-event webhook request failures.
:param unicode post_webhook_retry_count: The number of retries in case of post-event webhook request failures.
:param WebhookInstance.Target target: The routing target of the webhook.
:returns: Updated WebhookInstance
:rtype: twilio.rest.messaging.v1.webhook.WebhookInstance
"""
data = values.of({'WebhookMethod': webhook_method, 'WebhookFilters': serialize.map(webhook_filters, lambda e: e), 'PreWebhookUrl': pre_webhook_url, 'PostWebhookUrl': post_webhook_url, 'PreWebhookRetryCount': pre_webhook_retry_count, 'PostWebhookRetryCount': post_webhook_retry_count, 'Target': target})
payload = self._version.update('POST', self._uri, data=data)
return WebhookInstance(self._version, payload) |
def _init_config(self):
"""return command's configuration from call's arguments
"""
options = self.parser.parse_args()
if options.config is None and options.input is None:
self.parser.print_help()
sys.exit(2)
if options.config is not None:
configFactory = ConfigFactory()
config = configFactory.load_from_file(options.config)
else:
config = ConfigObject()
if options.input is not None:
config["input"]["locations"] = [str(x) for x in options.input]
if options.arguments is not None:
config["input"]["arguments"] = dict((x.partition("=")[0], x.partition("=")[2]) for x in options.arguments)
if options.output is not None:
config["output"]["location"] = options.output
if options.no_validate is not None:
config["input"]["validate"] = not options.no_validate
if options.dry_run is not None:
self.dry_run = options.dry_run
if options.watch is not None:
self.watch = options.watch
if options.traceback is not None:
self.traceback = options.traceback
if options.quiet is not None:
self.logger.setLevel(logging.WARNING)
if options.silence is not None:
logging.disable(logging.CRITICAL)
configService = ConfigService()
configService.validate(config)
self.config = config | def function[_init_config, parameter[self]]:
constant[return command's configuration from call's arguments
]
variable[options] assign[=] call[name[self].parser.parse_args, parameter[]]
if <ast.BoolOp object at 0x7da207f02d40> begin[:]
call[name[self].parser.print_help, parameter[]]
call[name[sys].exit, parameter[constant[2]]]
if compare[name[options].config is_not constant[None]] begin[:]
variable[configFactory] assign[=] call[name[ConfigFactory], parameter[]]
variable[config] assign[=] call[name[configFactory].load_from_file, parameter[name[options].config]]
if compare[name[options].input is_not constant[None]] begin[:]
call[call[name[config]][constant[input]]][constant[locations]] assign[=] <ast.ListComp object at 0x7da207f00100>
if compare[name[options].arguments is_not constant[None]] begin[:]
call[call[name[config]][constant[input]]][constant[arguments]] assign[=] call[name[dict], parameter[<ast.GeneratorExp object at 0x7da207f02b90>]]
if compare[name[options].output is_not constant[None]] begin[:]
call[call[name[config]][constant[output]]][constant[location]] assign[=] name[options].output
if compare[name[options].no_validate is_not constant[None]] begin[:]
call[call[name[config]][constant[input]]][constant[validate]] assign[=] <ast.UnaryOp object at 0x7da207f03d60>
if compare[name[options].dry_run is_not constant[None]] begin[:]
name[self].dry_run assign[=] name[options].dry_run
if compare[name[options].watch is_not constant[None]] begin[:]
name[self].watch assign[=] name[options].watch
if compare[name[options].traceback is_not constant[None]] begin[:]
name[self].traceback assign[=] name[options].traceback
if compare[name[options].quiet is_not constant[None]] begin[:]
call[name[self].logger.setLevel, parameter[name[logging].WARNING]]
if compare[name[options].silence is_not constant[None]] begin[:]
call[name[logging].disable, parameter[name[logging].CRITICAL]]
variable[configService] assign[=] call[name[ConfigService], parameter[]]
call[name[configService].validate, parameter[name[config]]]
name[self].config assign[=] name[config] | keyword[def] identifier[_init_config] ( identifier[self] ):
literal[string]
identifier[options] = identifier[self] . identifier[parser] . identifier[parse_args] ()
keyword[if] identifier[options] . identifier[config] keyword[is] keyword[None] keyword[and] identifier[options] . identifier[input] keyword[is] keyword[None] :
identifier[self] . identifier[parser] . identifier[print_help] ()
identifier[sys] . identifier[exit] ( literal[int] )
keyword[if] identifier[options] . identifier[config] keyword[is] keyword[not] keyword[None] :
identifier[configFactory] = identifier[ConfigFactory] ()
identifier[config] = identifier[configFactory] . identifier[load_from_file] ( identifier[options] . identifier[config] )
keyword[else] :
identifier[config] = identifier[ConfigObject] ()
keyword[if] identifier[options] . identifier[input] keyword[is] keyword[not] keyword[None] :
identifier[config] [ literal[string] ][ literal[string] ]=[ identifier[str] ( identifier[x] ) keyword[for] identifier[x] keyword[in] identifier[options] . identifier[input] ]
keyword[if] identifier[options] . identifier[arguments] keyword[is] keyword[not] keyword[None] :
identifier[config] [ literal[string] ][ literal[string] ]= identifier[dict] (( identifier[x] . identifier[partition] ( literal[string] )[ literal[int] ], identifier[x] . identifier[partition] ( literal[string] )[ literal[int] ]) keyword[for] identifier[x] keyword[in] identifier[options] . identifier[arguments] )
keyword[if] identifier[options] . identifier[output] keyword[is] keyword[not] keyword[None] :
identifier[config] [ literal[string] ][ literal[string] ]= identifier[options] . identifier[output]
keyword[if] identifier[options] . identifier[no_validate] keyword[is] keyword[not] keyword[None] :
identifier[config] [ literal[string] ][ literal[string] ]= keyword[not] identifier[options] . identifier[no_validate]
keyword[if] identifier[options] . identifier[dry_run] keyword[is] keyword[not] keyword[None] :
identifier[self] . identifier[dry_run] = identifier[options] . identifier[dry_run]
keyword[if] identifier[options] . identifier[watch] keyword[is] keyword[not] keyword[None] :
identifier[self] . identifier[watch] = identifier[options] . identifier[watch]
keyword[if] identifier[options] . identifier[traceback] keyword[is] keyword[not] keyword[None] :
identifier[self] . identifier[traceback] = identifier[options] . identifier[traceback]
keyword[if] identifier[options] . identifier[quiet] keyword[is] keyword[not] keyword[None] :
identifier[self] . identifier[logger] . identifier[setLevel] ( identifier[logging] . identifier[WARNING] )
keyword[if] identifier[options] . identifier[silence] keyword[is] keyword[not] keyword[None] :
identifier[logging] . identifier[disable] ( identifier[logging] . identifier[CRITICAL] )
identifier[configService] = identifier[ConfigService] ()
identifier[configService] . identifier[validate] ( identifier[config] )
identifier[self] . identifier[config] = identifier[config] | def _init_config(self):
"""return command's configuration from call's arguments
"""
options = self.parser.parse_args()
if options.config is None and options.input is None:
self.parser.print_help()
sys.exit(2) # depends on [control=['if'], data=[]]
if options.config is not None:
configFactory = ConfigFactory()
config = configFactory.load_from_file(options.config) # depends on [control=['if'], data=[]]
else:
config = ConfigObject()
if options.input is not None:
config['input']['locations'] = [str(x) for x in options.input] # depends on [control=['if'], data=[]]
if options.arguments is not None:
config['input']['arguments'] = dict(((x.partition('=')[0], x.partition('=')[2]) for x in options.arguments)) # depends on [control=['if'], data=[]]
if options.output is not None:
config['output']['location'] = options.output # depends on [control=['if'], data=[]]
if options.no_validate is not None:
config['input']['validate'] = not options.no_validate # depends on [control=['if'], data=[]]
if options.dry_run is not None:
self.dry_run = options.dry_run # depends on [control=['if'], data=[]]
if options.watch is not None:
self.watch = options.watch # depends on [control=['if'], data=[]]
if options.traceback is not None:
self.traceback = options.traceback # depends on [control=['if'], data=[]]
if options.quiet is not None:
self.logger.setLevel(logging.WARNING) # depends on [control=['if'], data=[]]
if options.silence is not None:
logging.disable(logging.CRITICAL) # depends on [control=['if'], data=[]]
configService = ConfigService()
configService.validate(config)
self.config = config |
def hicexplorer_create_plot(self, pKeyList, pTitle, pId):
"""Create the graphics containing information about the read quality."""
keys = OrderedDict()
for i, key_ in enumerate(pKeyList):
keys[key_] = {'color': self.colors[i]}
data = {}
for data_ in self.mod_data:
data['{}'.format(self.mod_data[data_]['File'][0])] = {}
for key_ in pKeyList:
data['{}'.format(self.mod_data[data_]['File'][0])][key_] = self.mod_data[data_][key_][0]
config = {
'id': 'hicexplorer_' + pId + '_plot',
'title': pTitle,
'ylab': 'Number of Reads',
'cpswitch_counts_label': 'Number of Reads'
}
return bargraph.plot(data, keys, config) | def function[hicexplorer_create_plot, parameter[self, pKeyList, pTitle, pId]]:
constant[Create the graphics containing information about the read quality.]
variable[keys] assign[=] call[name[OrderedDict], parameter[]]
for taget[tuple[[<ast.Name object at 0x7da207f00a90>, <ast.Name object at 0x7da207f039a0>]]] in starred[call[name[enumerate], parameter[name[pKeyList]]]] begin[:]
call[name[keys]][name[key_]] assign[=] dictionary[[<ast.Constant object at 0x7da207f02ec0>], [<ast.Subscript object at 0x7da207f03250>]]
variable[data] assign[=] dictionary[[], []]
for taget[name[data_]] in starred[name[self].mod_data] begin[:]
call[name[data]][call[constant[{}].format, parameter[call[call[call[name[self].mod_data][name[data_]]][constant[File]]][constant[0]]]]] assign[=] dictionary[[], []]
for taget[name[key_]] in starred[name[pKeyList]] begin[:]
call[call[name[data]][call[constant[{}].format, parameter[call[call[call[name[self].mod_data][name[data_]]][constant[File]]][constant[0]]]]]][name[key_]] assign[=] call[call[call[name[self].mod_data][name[data_]]][name[key_]]][constant[0]]
variable[config] assign[=] dictionary[[<ast.Constant object at 0x7da207f034f0>, <ast.Constant object at 0x7da207f02bf0>, <ast.Constant object at 0x7da207f02c80>, <ast.Constant object at 0x7da207f028c0>], [<ast.BinOp object at 0x7da207f03640>, <ast.Name object at 0x7da207f00790>, <ast.Constant object at 0x7da207f00f70>, <ast.Constant object at 0x7da207f02290>]]
return[call[name[bargraph].plot, parameter[name[data], name[keys], name[config]]]] | keyword[def] identifier[hicexplorer_create_plot] ( identifier[self] , identifier[pKeyList] , identifier[pTitle] , identifier[pId] ):
literal[string]
identifier[keys] = identifier[OrderedDict] ()
keyword[for] identifier[i] , identifier[key_] keyword[in] identifier[enumerate] ( identifier[pKeyList] ):
identifier[keys] [ identifier[key_] ]={ literal[string] : identifier[self] . identifier[colors] [ identifier[i] ]}
identifier[data] ={}
keyword[for] identifier[data_] keyword[in] identifier[self] . identifier[mod_data] :
identifier[data] [ literal[string] . identifier[format] ( identifier[self] . identifier[mod_data] [ identifier[data_] ][ literal[string] ][ literal[int] ])]={}
keyword[for] identifier[key_] keyword[in] identifier[pKeyList] :
identifier[data] [ literal[string] . identifier[format] ( identifier[self] . identifier[mod_data] [ identifier[data_] ][ literal[string] ][ literal[int] ])][ identifier[key_] ]= identifier[self] . identifier[mod_data] [ identifier[data_] ][ identifier[key_] ][ literal[int] ]
identifier[config] ={
literal[string] : literal[string] + identifier[pId] + literal[string] ,
literal[string] : identifier[pTitle] ,
literal[string] : literal[string] ,
literal[string] : literal[string]
}
keyword[return] identifier[bargraph] . identifier[plot] ( identifier[data] , identifier[keys] , identifier[config] ) | def hicexplorer_create_plot(self, pKeyList, pTitle, pId):
"""Create the graphics containing information about the read quality."""
keys = OrderedDict()
for (i, key_) in enumerate(pKeyList):
keys[key_] = {'color': self.colors[i]} # depends on [control=['for'], data=[]]
data = {}
for data_ in self.mod_data:
data['{}'.format(self.mod_data[data_]['File'][0])] = {}
for key_ in pKeyList:
data['{}'.format(self.mod_data[data_]['File'][0])][key_] = self.mod_data[data_][key_][0] # depends on [control=['for'], data=['key_']] # depends on [control=['for'], data=['data_']]
config = {'id': 'hicexplorer_' + pId + '_plot', 'title': pTitle, 'ylab': 'Number of Reads', 'cpswitch_counts_label': 'Number of Reads'}
return bargraph.plot(data, keys, config) |
def parse(self, stream, media_type=None, parser_context=None):
"""
Parses the incoming bytestream as a URL encoded form,
and returns the resulting QueryDict.
"""
parser_context = parser_context or {}
encoding = parser_context.get('encoding', settings.DEFAULT_CHARSET)
data = QueryDict(stream.read(), encoding=encoding)
return data | def function[parse, parameter[self, stream, media_type, parser_context]]:
constant[
Parses the incoming bytestream as a URL encoded form,
and returns the resulting QueryDict.
]
variable[parser_context] assign[=] <ast.BoolOp object at 0x7da18fe92c80>
variable[encoding] assign[=] call[name[parser_context].get, parameter[constant[encoding], name[settings].DEFAULT_CHARSET]]
variable[data] assign[=] call[name[QueryDict], parameter[call[name[stream].read, parameter[]]]]
return[name[data]] | keyword[def] identifier[parse] ( identifier[self] , identifier[stream] , identifier[media_type] = keyword[None] , identifier[parser_context] = keyword[None] ):
literal[string]
identifier[parser_context] = identifier[parser_context] keyword[or] {}
identifier[encoding] = identifier[parser_context] . identifier[get] ( literal[string] , identifier[settings] . identifier[DEFAULT_CHARSET] )
identifier[data] = identifier[QueryDict] ( identifier[stream] . identifier[read] (), identifier[encoding] = identifier[encoding] )
keyword[return] identifier[data] | def parse(self, stream, media_type=None, parser_context=None):
"""
Parses the incoming bytestream as a URL encoded form,
and returns the resulting QueryDict.
"""
parser_context = parser_context or {}
encoding = parser_context.get('encoding', settings.DEFAULT_CHARSET)
data = QueryDict(stream.read(), encoding=encoding)
return data |
def iterator(plugins, context):
"""An iterator for plug-in and instance pairs"""
test = pyblish.logic.registered_test()
state = {
"nextOrder": None,
"ordersWithError": set()
}
for plugin in plugins:
state["nextOrder"] = plugin.order
message = test(**state)
if message:
raise StopIteration("Stopped due to %s" % message)
instances = pyblish.api.instances_by_plugin(context, plugin)
if plugin.__instanceEnabled__:
for instance in instances:
yield plugin, instance
else:
yield plugin, None | def function[iterator, parameter[plugins, context]]:
constant[An iterator for plug-in and instance pairs]
variable[test] assign[=] call[name[pyblish].logic.registered_test, parameter[]]
variable[state] assign[=] dictionary[[<ast.Constant object at 0x7da1b0882290>, <ast.Constant object at 0x7da1b08827a0>], [<ast.Constant object at 0x7da1b0883070>, <ast.Call object at 0x7da1b08822c0>]]
for taget[name[plugin]] in starred[name[plugins]] begin[:]
call[name[state]][constant[nextOrder]] assign[=] name[plugin].order
variable[message] assign[=] call[name[test], parameter[]]
if name[message] begin[:]
<ast.Raise object at 0x7da1b0882f20>
variable[instances] assign[=] call[name[pyblish].api.instances_by_plugin, parameter[name[context], name[plugin]]]
if name[plugin].__instanceEnabled__ begin[:]
for taget[name[instance]] in starred[name[instances]] begin[:]
<ast.Yield object at 0x7da1b0881390> | keyword[def] identifier[iterator] ( identifier[plugins] , identifier[context] ):
literal[string]
identifier[test] = identifier[pyblish] . identifier[logic] . identifier[registered_test] ()
identifier[state] ={
literal[string] : keyword[None] ,
literal[string] : identifier[set] ()
}
keyword[for] identifier[plugin] keyword[in] identifier[plugins] :
identifier[state] [ literal[string] ]= identifier[plugin] . identifier[order]
identifier[message] = identifier[test] (** identifier[state] )
keyword[if] identifier[message] :
keyword[raise] identifier[StopIteration] ( literal[string] % identifier[message] )
identifier[instances] = identifier[pyblish] . identifier[api] . identifier[instances_by_plugin] ( identifier[context] , identifier[plugin] )
keyword[if] identifier[plugin] . identifier[__instanceEnabled__] :
keyword[for] identifier[instance] keyword[in] identifier[instances] :
keyword[yield] identifier[plugin] , identifier[instance]
keyword[else] :
keyword[yield] identifier[plugin] , keyword[None] | def iterator(plugins, context):
"""An iterator for plug-in and instance pairs"""
test = pyblish.logic.registered_test()
state = {'nextOrder': None, 'ordersWithError': set()}
for plugin in plugins:
state['nextOrder'] = plugin.order
message = test(**state)
if message:
raise StopIteration('Stopped due to %s' % message) # depends on [control=['if'], data=[]]
instances = pyblish.api.instances_by_plugin(context, plugin)
if plugin.__instanceEnabled__:
for instance in instances:
yield (plugin, instance) # depends on [control=['for'], data=['instance']] # depends on [control=['if'], data=[]]
else:
yield (plugin, None) # depends on [control=['for'], data=['plugin']] |
def get_physical_drive_by_id(self, id):
"""Get a PhysicalDrive object for given id.
This method examines both assigned and unassigned physical
drives of the controller and returns the physical drive.
:param id: id of physical drive, for example '5I:1:1'.
:returns: PhysicalDrive object having the id, or None if
physical drive is not found.
"""
for phy_drive in self.unassigned_physical_drives:
if phy_drive.id == id:
return phy_drive
for array in self.raid_arrays:
for phy_drive in array.physical_drives:
if phy_drive.id == id:
return phy_drive
return None | def function[get_physical_drive_by_id, parameter[self, id]]:
constant[Get a PhysicalDrive object for given id.
This method examines both assigned and unassigned physical
drives of the controller and returns the physical drive.
:param id: id of physical drive, for example '5I:1:1'.
:returns: PhysicalDrive object having the id, or None if
physical drive is not found.
]
for taget[name[phy_drive]] in starred[name[self].unassigned_physical_drives] begin[:]
if compare[name[phy_drive].id equal[==] name[id]] begin[:]
return[name[phy_drive]]
for taget[name[array]] in starred[name[self].raid_arrays] begin[:]
for taget[name[phy_drive]] in starred[name[array].physical_drives] begin[:]
if compare[name[phy_drive].id equal[==] name[id]] begin[:]
return[name[phy_drive]]
return[constant[None]] | keyword[def] identifier[get_physical_drive_by_id] ( identifier[self] , identifier[id] ):
literal[string]
keyword[for] identifier[phy_drive] keyword[in] identifier[self] . identifier[unassigned_physical_drives] :
keyword[if] identifier[phy_drive] . identifier[id] == identifier[id] :
keyword[return] identifier[phy_drive]
keyword[for] identifier[array] keyword[in] identifier[self] . identifier[raid_arrays] :
keyword[for] identifier[phy_drive] keyword[in] identifier[array] . identifier[physical_drives] :
keyword[if] identifier[phy_drive] . identifier[id] == identifier[id] :
keyword[return] identifier[phy_drive]
keyword[return] keyword[None] | def get_physical_drive_by_id(self, id):
"""Get a PhysicalDrive object for given id.
This method examines both assigned and unassigned physical
drives of the controller and returns the physical drive.
:param id: id of physical drive, for example '5I:1:1'.
:returns: PhysicalDrive object having the id, or None if
physical drive is not found.
"""
for phy_drive in self.unassigned_physical_drives:
if phy_drive.id == id:
return phy_drive # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['phy_drive']]
for array in self.raid_arrays:
for phy_drive in array.physical_drives:
if phy_drive.id == id:
return phy_drive # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['phy_drive']] # depends on [control=['for'], data=['array']]
return None |
def local_method(f):
'''Decorator to be used in conjunction with :class:`LocalMixin` methods.
'''
name = f.__name__
def _(self, *args):
local = self.local
if name not in local:
setattr(local, name, f(self, *args))
return getattr(local, name)
return _ | def function[local_method, parameter[f]]:
constant[Decorator to be used in conjunction with :class:`LocalMixin` methods.
]
variable[name] assign[=] name[f].__name__
def function[_, parameter[self]]:
variable[local] assign[=] name[self].local
if compare[name[name] <ast.NotIn object at 0x7da2590d7190> name[local]] begin[:]
call[name[setattr], parameter[name[local], name[name], call[name[f], parameter[name[self], <ast.Starred object at 0x7da18eb572b0>]]]]
return[call[name[getattr], parameter[name[local], name[name]]]]
return[name[_]] | keyword[def] identifier[local_method] ( identifier[f] ):
literal[string]
identifier[name] = identifier[f] . identifier[__name__]
keyword[def] identifier[_] ( identifier[self] ,* identifier[args] ):
identifier[local] = identifier[self] . identifier[local]
keyword[if] identifier[name] keyword[not] keyword[in] identifier[local] :
identifier[setattr] ( identifier[local] , identifier[name] , identifier[f] ( identifier[self] ,* identifier[args] ))
keyword[return] identifier[getattr] ( identifier[local] , identifier[name] )
keyword[return] identifier[_] | def local_method(f):
"""Decorator to be used in conjunction with :class:`LocalMixin` methods.
"""
name = f.__name__
def _(self, *args):
local = self.local
if name not in local:
setattr(local, name, f(self, *args)) # depends on [control=['if'], data=['name', 'local']]
return getattr(local, name)
return _ |
def bar(h: Histogram2D, *,
barmode: str = DEFAULT_BARMODE,
alpha: float = DEFAULT_ALPHA,
**kwargs):
"""Bar plot.
Parameters
----------
alpha: Opacity (0.0 - 1.0)
barmode : "overlay" | "group" | "stack"
"""
get_data_kwargs = pop_many(kwargs, "density", "cumulative", "flatten")
data = [go.Bar(
x=histogram.bin_centers,
y=get_data(histogram, **get_data_kwargs),
width=histogram.bin_widths,
name=histogram.name,
opacity=alpha,
**kwargs
) for histogram in h]
layout = go.Layout(barmode=barmode)
_add_ticks(layout.xaxis, h[0], kwargs)
figure = go.Figure(data=data, layout=layout)
return figure | def function[bar, parameter[h]]:
constant[Bar plot.
Parameters
----------
alpha: Opacity (0.0 - 1.0)
barmode : "overlay" | "group" | "stack"
]
variable[get_data_kwargs] assign[=] call[name[pop_many], parameter[name[kwargs], constant[density], constant[cumulative], constant[flatten]]]
variable[data] assign[=] <ast.ListComp object at 0x7da207f9a7d0>
variable[layout] assign[=] call[name[go].Layout, parameter[]]
call[name[_add_ticks], parameter[name[layout].xaxis, call[name[h]][constant[0]], name[kwargs]]]
variable[figure] assign[=] call[name[go].Figure, parameter[]]
return[name[figure]] | keyword[def] identifier[bar] ( identifier[h] : identifier[Histogram2D] ,*,
identifier[barmode] : identifier[str] = identifier[DEFAULT_BARMODE] ,
identifier[alpha] : identifier[float] = identifier[DEFAULT_ALPHA] ,
** identifier[kwargs] ):
literal[string]
identifier[get_data_kwargs] = identifier[pop_many] ( identifier[kwargs] , literal[string] , literal[string] , literal[string] )
identifier[data] =[ identifier[go] . identifier[Bar] (
identifier[x] = identifier[histogram] . identifier[bin_centers] ,
identifier[y] = identifier[get_data] ( identifier[histogram] ,** identifier[get_data_kwargs] ),
identifier[width] = identifier[histogram] . identifier[bin_widths] ,
identifier[name] = identifier[histogram] . identifier[name] ,
identifier[opacity] = identifier[alpha] ,
** identifier[kwargs]
) keyword[for] identifier[histogram] keyword[in] identifier[h] ]
identifier[layout] = identifier[go] . identifier[Layout] ( identifier[barmode] = identifier[barmode] )
identifier[_add_ticks] ( identifier[layout] . identifier[xaxis] , identifier[h] [ literal[int] ], identifier[kwargs] )
identifier[figure] = identifier[go] . identifier[Figure] ( identifier[data] = identifier[data] , identifier[layout] = identifier[layout] )
keyword[return] identifier[figure] | def bar(h: Histogram2D, *, barmode: str=DEFAULT_BARMODE, alpha: float=DEFAULT_ALPHA, **kwargs):
"""Bar plot.
Parameters
----------
alpha: Opacity (0.0 - 1.0)
barmode : "overlay" | "group" | "stack"
"""
get_data_kwargs = pop_many(kwargs, 'density', 'cumulative', 'flatten')
data = [go.Bar(x=histogram.bin_centers, y=get_data(histogram, **get_data_kwargs), width=histogram.bin_widths, name=histogram.name, opacity=alpha, **kwargs) for histogram in h]
layout = go.Layout(barmode=barmode)
_add_ticks(layout.xaxis, h[0], kwargs)
figure = go.Figure(data=data, layout=layout)
return figure |
def set_value(self, value):
"""
Sets the user value (mode) of the choice. Like for Symbol.set_value(),
the visibility might truncate the value. Choices without the 'optional'
attribute (is_optional) can never be in n mode, but 0/"n" is still
accepted since it's not a malformed value (though it will have no
effect).
Returns True if the value is valid for the type of the choice, and
False otherwise. This only looks at the form of the value. Check the
Choice.assignable attribute to see what values are currently in range
and would actually be reflected in the mode of the choice.
"""
if value == self.user_value:
# We know the value must be valid if it was successfully set
# previously
self._was_set = True
return True
if not ((self.orig_type is BOOL and value in (2, 0, "y", "n") ) or
(self.orig_type is TRISTATE and value in (2, 1, 0, "y", "m", "n"))):
# Display tristate values as n, m, y in the warning
self.kconfig._warn(
"the value {} is invalid for {}, which has type {} -- "
"assignment ignored"
.format(TRI_TO_STR[value] if value in (0, 1, 2) else
"'{}'".format(value),
_name_and_loc(self),
TYPE_TO_STR[self.orig_type]))
return False
if value in ("y", "m", "n"):
value = STR_TO_TRI[value]
self.user_value = value
self._was_set = True
self._rec_invalidate()
return True | def function[set_value, parameter[self, value]]:
constant[
Sets the user value (mode) of the choice. Like for Symbol.set_value(),
the visibility might truncate the value. Choices without the 'optional'
attribute (is_optional) can never be in n mode, but 0/"n" is still
accepted since it's not a malformed value (though it will have no
effect).
Returns True if the value is valid for the type of the choice, and
False otherwise. This only looks at the form of the value. Check the
Choice.assignable attribute to see what values are currently in range
and would actually be reflected in the mode of the choice.
]
if compare[name[value] equal[==] name[self].user_value] begin[:]
name[self]._was_set assign[=] constant[True]
return[constant[True]]
if <ast.UnaryOp object at 0x7da1b20b57b0> begin[:]
call[name[self].kconfig._warn, parameter[call[constant[the value {} is invalid for {}, which has type {} -- assignment ignored].format, parameter[<ast.IfExp object at 0x7da1b20b4a00>, call[name[_name_and_loc], parameter[name[self]]], call[name[TYPE_TO_STR]][name[self].orig_type]]]]]
return[constant[False]]
if compare[name[value] in tuple[[<ast.Constant object at 0x7da1b20b4070>, <ast.Constant object at 0x7da1b20b6620>, <ast.Constant object at 0x7da1b20b60b0>]]] begin[:]
variable[value] assign[=] call[name[STR_TO_TRI]][name[value]]
name[self].user_value assign[=] name[value]
name[self]._was_set assign[=] constant[True]
call[name[self]._rec_invalidate, parameter[]]
return[constant[True]] | keyword[def] identifier[set_value] ( identifier[self] , identifier[value] ):
literal[string]
keyword[if] identifier[value] == identifier[self] . identifier[user_value] :
identifier[self] . identifier[_was_set] = keyword[True]
keyword[return] keyword[True]
keyword[if] keyword[not] (( identifier[self] . identifier[orig_type] keyword[is] identifier[BOOL] keyword[and] identifier[value] keyword[in] ( literal[int] , literal[int] , literal[string] , literal[string] )) keyword[or]
( identifier[self] . identifier[orig_type] keyword[is] identifier[TRISTATE] keyword[and] identifier[value] keyword[in] ( literal[int] , literal[int] , literal[int] , literal[string] , literal[string] , literal[string] ))):
identifier[self] . identifier[kconfig] . identifier[_warn] (
literal[string]
literal[string]
. identifier[format] ( identifier[TRI_TO_STR] [ identifier[value] ] keyword[if] identifier[value] keyword[in] ( literal[int] , literal[int] , literal[int] ) keyword[else]
literal[string] . identifier[format] ( identifier[value] ),
identifier[_name_and_loc] ( identifier[self] ),
identifier[TYPE_TO_STR] [ identifier[self] . identifier[orig_type] ]))
keyword[return] keyword[False]
keyword[if] identifier[value] keyword[in] ( literal[string] , literal[string] , literal[string] ):
identifier[value] = identifier[STR_TO_TRI] [ identifier[value] ]
identifier[self] . identifier[user_value] = identifier[value]
identifier[self] . identifier[_was_set] = keyword[True]
identifier[self] . identifier[_rec_invalidate] ()
keyword[return] keyword[True] | def set_value(self, value):
"""
Sets the user value (mode) of the choice. Like for Symbol.set_value(),
the visibility might truncate the value. Choices without the 'optional'
attribute (is_optional) can never be in n mode, but 0/"n" is still
accepted since it's not a malformed value (though it will have no
effect).
Returns True if the value is valid for the type of the choice, and
False otherwise. This only looks at the form of the value. Check the
Choice.assignable attribute to see what values are currently in range
and would actually be reflected in the mode of the choice.
"""
if value == self.user_value:
# We know the value must be valid if it was successfully set
# previously
self._was_set = True
return True # depends on [control=['if'], data=[]]
if not (self.orig_type is BOOL and value in (2, 0, 'y', 'n') or (self.orig_type is TRISTATE and value in (2, 1, 0, 'y', 'm', 'n'))):
# Display tristate values as n, m, y in the warning
self.kconfig._warn('the value {} is invalid for {}, which has type {} -- assignment ignored'.format(TRI_TO_STR[value] if value in (0, 1, 2) else "'{}'".format(value), _name_and_loc(self), TYPE_TO_STR[self.orig_type]))
return False # depends on [control=['if'], data=[]]
if value in ('y', 'm', 'n'):
value = STR_TO_TRI[value] # depends on [control=['if'], data=['value']]
self.user_value = value
self._was_set = True
self._rec_invalidate()
return True |
def get_help_datapacks(module_name, server_prefix):
"""
Get the help datapacks for a module
Args:
module_name (str): The module to get help data for
server_prefix (str): The command prefix for this server
Returns:
datapacks (list): The help datapacks for the module
"""
_dir = os.path.realpath(
os.path.join(os.getcwd(), os.path.dirname(__file__)))
module_dir = "{}/../{}".format(_dir, module_name, "_help.json")
if os.path.isdir(module_dir):
module_help_path = "{}/{}".format(module_dir, "_help.json")
if os.path.isfile(module_help_path):
return helptools.get_help_datapacks(module_help_path, server_prefix)
else:
return [("Help", "{} does not have a help.json file".format(module_name), False)]
else:
return [("Help", "No module found called {}".format(module_name), False)] | def function[get_help_datapacks, parameter[module_name, server_prefix]]:
constant[
Get the help datapacks for a module
Args:
module_name (str): The module to get help data for
server_prefix (str): The command prefix for this server
Returns:
datapacks (list): The help datapacks for the module
]
variable[_dir] assign[=] call[name[os].path.realpath, parameter[call[name[os].path.join, parameter[call[name[os].getcwd, parameter[]], call[name[os].path.dirname, parameter[name[__file__]]]]]]]
variable[module_dir] assign[=] call[constant[{}/../{}].format, parameter[name[_dir], name[module_name], constant[_help.json]]]
if call[name[os].path.isdir, parameter[name[module_dir]]] begin[:]
variable[module_help_path] assign[=] call[constant[{}/{}].format, parameter[name[module_dir], constant[_help.json]]]
if call[name[os].path.isfile, parameter[name[module_help_path]]] begin[:]
return[call[name[helptools].get_help_datapacks, parameter[name[module_help_path], name[server_prefix]]]] | keyword[def] identifier[get_help_datapacks] ( identifier[module_name] , identifier[server_prefix] ):
literal[string]
identifier[_dir] = identifier[os] . identifier[path] . identifier[realpath] (
identifier[os] . identifier[path] . identifier[join] ( identifier[os] . identifier[getcwd] (), identifier[os] . identifier[path] . identifier[dirname] ( identifier[__file__] )))
identifier[module_dir] = literal[string] . identifier[format] ( identifier[_dir] , identifier[module_name] , literal[string] )
keyword[if] identifier[os] . identifier[path] . identifier[isdir] ( identifier[module_dir] ):
identifier[module_help_path] = literal[string] . identifier[format] ( identifier[module_dir] , literal[string] )
keyword[if] identifier[os] . identifier[path] . identifier[isfile] ( identifier[module_help_path] ):
keyword[return] identifier[helptools] . identifier[get_help_datapacks] ( identifier[module_help_path] , identifier[server_prefix] )
keyword[else] :
keyword[return] [( literal[string] , literal[string] . identifier[format] ( identifier[module_name] ), keyword[False] )]
keyword[else] :
keyword[return] [( literal[string] , literal[string] . identifier[format] ( identifier[module_name] ), keyword[False] )] | def get_help_datapacks(module_name, server_prefix):
"""
Get the help datapacks for a module
Args:
module_name (str): The module to get help data for
server_prefix (str): The command prefix for this server
Returns:
datapacks (list): The help datapacks for the module
"""
_dir = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
module_dir = '{}/../{}'.format(_dir, module_name, '_help.json')
if os.path.isdir(module_dir):
module_help_path = '{}/{}'.format(module_dir, '_help.json')
if os.path.isfile(module_help_path):
return helptools.get_help_datapacks(module_help_path, server_prefix) # depends on [control=['if'], data=[]]
else:
return [('Help', '{} does not have a help.json file'.format(module_name), False)] # depends on [control=['if'], data=[]]
else:
return [('Help', 'No module found called {}'.format(module_name), False)] |
def generate_usgs_avg_daily_flows_opt(self,
reach_id_gage_id_file,
start_datetime,
end_datetime,
out_streamflow_file,
out_stream_id_file):
"""
Generate daily streamflow file and stream id file required for
calibration or for substituting flows based on USGS gage ids
associated with stream ids.
Parameters
----------
reach_id_gage_id_file: str
Path to reach_id_gage_id file.
start_datetime: datetime
A datetime object with the start date to download data.
end_datetime: datetime
A datetime object with the end date to download data.
out_streamflow_file: str
The path to output the streamflow file for RAPID.
out_stream_id_file: str
The path to output the stream ID file associated with the
streamflow file for RAPID.
Example *reach_id_gage_id_file*::
COMID, USGS_GAGE_ID
2000, 503944
...
.. warning:: Overuse will get you blocked from downloading data from
USGS.
.. warning:: This code does not clean the data in any way. Thus, you
are likely to run into issues if you simply use the raw
data.
.. warning:: The code skips gages that do not have data
for the entire time period.
Simple Example:
.. code:: python
import datetime
from os.path import join
from RAPIDpy import RAPID
main_path = "/home/username/data"
rapid_manager = RAPID()
rapid_manager.generate_usgs_avg_daily_flows_opt(
reach_id_gage_id_file=join(main_path, "usgsgage_id_comid.csv"),
start_datetime=datetime.datetime(2000,1,1),
end_datetime=datetime.datetime(2014,12,31),
out_streamflow_file=join(main_path,"streamflow_2000_2014.csv"),
out_stream_id_file=join(main_path,"streamid_2000_2014.csv")
)
Complex Example:
.. code:: python
import datetime
from os.path import join
from RAPIDpy import RAPID
main_path = "/home/username/data"
rapid_manager = RAPID(
rapid_executable_location='~/work/rapid/run/rapid'
use_all_processors=True,
ZS_TauR=24*3600,
ZS_dtR=15*60,
ZS_TauM=365*24*3600,
ZS_dtM=24*3600
)
rapid_manager.update_parameters(
rapid_connect_file='../rapid-io/input/rapid_connect.csv',
Vlat_file='../rapid-io/input/m3_riv.nc',
riv_bas_id_file='../rapid-io/input/riv_bas_id.csv',
k_file='../rapid-io/input/k.csv',
x_file='../rapid-io/input/x.csv',
Qout_file='../rapid-io/output/Qout.nc',
)
rapid_manager.update_reach_number_data()
rapid_manager.update_simulation_runtime()
rapid_manager.generate_usgs_avg_daily_flows_opt(
reach_id_gage_id_file=join(main_path, "usgsgage_id_comid.csv"),
start_datetime=datetime.datetime(2000,1,1),
end_datetime=datetime.datetime(2014,12,31),
out_streamflow_file=join(main_path,"streamflow_2000_2014.csv"),
out_stream_id_file=join(main_path,"streamid_2000_2014.csv")
)
rapid_manager.run()
"""
log("Generating avg streamflow file and stream id file "
"required for calibration ...",
"INFO")
log("Generating avg streamflow file and stream id file "
"required for calibration ...",
"INFO")
reach_id_gage_id_list = csv_to_list(reach_id_gage_id_file)
gage_data_matrix = []
valid_comid_list = []
# add extra day as it includes the start date
# (e.g. 7-5 is 2 days, but have data for 5,6,7, so +1)
num_days_needed = (end_datetime-start_datetime).days + 1
gage_id_list = []
for row in reach_id_gage_id_list[1:]:
station_id = row[1]
if len(row[1]) == 7:
station_id = '0' + row[1]
gage_id_list.append(station_id)
num_gage_id_list = np.array(gage_id_list, dtype=np.int32)
log("Querying Server for Data ...",
"INFO")
query_params = {
'format': 'json',
'sites': ",".join(gage_id_list),
'startDT': start_datetime.strftime("%Y-%m-%d"),
'endDT': end_datetime.strftime("%Y-%m-%d"),
'parameterCd': '00060', # streamflow
'statCd': '00003' # average
}
response = get("http://waterservices.usgs.gov/nwis/dv",
params=query_params)
if not response.ok:
log("USGS query error ...",
"WARNING")
return
requested_data = None
try:
requested_data = response.json()['value']['timeSeries']
except IndexError:
pass
if requested_data is not None:
for time_series in enumerate(requested_data):
usgs_station_full_name = time_series[1]['name']
usgs_station_id = usgs_station_full_name.split(":")[1]
gage_data = []
for time_step in time_series[1]['values'][0]['value']:
local_datetime = parse(time_step['dateTime'])
if local_datetime > end_datetime:
break
if local_datetime >= start_datetime:
if not time_step['value']:
log("MISSING DATA for USGS Station {0} {1} {2}"
.format(usgs_station_id,
local_datetime,
time_step['value']),
"WARNING")
gage_data.append(
float(time_step['value']) / 35.3146667)
try:
# get where streamids associated with USGS station ID
streamid_index = \
np.where(num_gage_id_list ==
int(float(usgs_station_id)))[0][0]+1
except (IndexError, ValueError):
log("USGS Station {0} not found in list ..."
.format(usgs_station_id),
"WARNING")
raise
if len(gage_data) == num_days_needed:
gage_data_matrix.append(gage_data)
valid_comid_list.append(
reach_id_gage_id_list[streamid_index][0])
else:
log("StreamID {0} USGS Station {1} MISSING {2} "
"DATA VALUES".format(
reach_id_gage_id_list[streamid_index][0],
usgs_station_id,
num_days_needed-len(gage_data)),
"WARNING")
if gage_data_matrix and valid_comid_list:
log("Writing Output ...",
"INFO")
np_array = np.array(gage_data_matrix).transpose()
with open_csv(out_streamflow_file, 'w') as gage_data:
wgd = csvwriter(gage_data)
for row in np_array:
wgd.writerow(row)
with open_csv(out_stream_id_file, 'w') as comid_data:
wcd = csvwriter(comid_data)
for row in valid_comid_list:
wcd.writerow([int(float(row))])
# set parameters for RAPID run
self.IS_obs_tot = len(valid_comid_list)
self.obs_tot_id_file = out_stream_id_file
self.Qobs_file = out_streamflow_file
self.IS_obs_use = len(valid_comid_list)
self.obs_use_id_file = out_stream_id_file
else:
log("No valid data returned ...",
"WARNING") | def function[generate_usgs_avg_daily_flows_opt, parameter[self, reach_id_gage_id_file, start_datetime, end_datetime, out_streamflow_file, out_stream_id_file]]:
constant[
Generate daily streamflow file and stream id file required for
calibration or for substituting flows based on USGS gage ids
associated with stream ids.
Parameters
----------
reach_id_gage_id_file: str
Path to reach_id_gage_id file.
start_datetime: datetime
A datetime object with the start date to download data.
end_datetime: datetime
A datetime object with the end date to download data.
out_streamflow_file: str
The path to output the streamflow file for RAPID.
out_stream_id_file: str
The path to output the stream ID file associated with the
streamflow file for RAPID.
Example *reach_id_gage_id_file*::
COMID, USGS_GAGE_ID
2000, 503944
...
.. warning:: Overuse will get you blocked from downloading data from
USGS.
.. warning:: This code does not clean the data in any way. Thus, you
are likely to run into issues if you simply use the raw
data.
.. warning:: The code skips gages that do not have data
for the entire time period.
Simple Example:
.. code:: python
import datetime
from os.path import join
from RAPIDpy import RAPID
main_path = "/home/username/data"
rapid_manager = RAPID()
rapid_manager.generate_usgs_avg_daily_flows_opt(
reach_id_gage_id_file=join(main_path, "usgsgage_id_comid.csv"),
start_datetime=datetime.datetime(2000,1,1),
end_datetime=datetime.datetime(2014,12,31),
out_streamflow_file=join(main_path,"streamflow_2000_2014.csv"),
out_stream_id_file=join(main_path,"streamid_2000_2014.csv")
)
Complex Example:
.. code:: python
import datetime
from os.path import join
from RAPIDpy import RAPID
main_path = "/home/username/data"
rapid_manager = RAPID(
rapid_executable_location='~/work/rapid/run/rapid'
use_all_processors=True,
ZS_TauR=24*3600,
ZS_dtR=15*60,
ZS_TauM=365*24*3600,
ZS_dtM=24*3600
)
rapid_manager.update_parameters(
rapid_connect_file='../rapid-io/input/rapid_connect.csv',
Vlat_file='../rapid-io/input/m3_riv.nc',
riv_bas_id_file='../rapid-io/input/riv_bas_id.csv',
k_file='../rapid-io/input/k.csv',
x_file='../rapid-io/input/x.csv',
Qout_file='../rapid-io/output/Qout.nc',
)
rapid_manager.update_reach_number_data()
rapid_manager.update_simulation_runtime()
rapid_manager.generate_usgs_avg_daily_flows_opt(
reach_id_gage_id_file=join(main_path, "usgsgage_id_comid.csv"),
start_datetime=datetime.datetime(2000,1,1),
end_datetime=datetime.datetime(2014,12,31),
out_streamflow_file=join(main_path,"streamflow_2000_2014.csv"),
out_stream_id_file=join(main_path,"streamid_2000_2014.csv")
)
rapid_manager.run()
]
call[name[log], parameter[constant[Generating avg streamflow file and stream id file required for calibration ...], constant[INFO]]]
call[name[log], parameter[constant[Generating avg streamflow file and stream id file required for calibration ...], constant[INFO]]]
variable[reach_id_gage_id_list] assign[=] call[name[csv_to_list], parameter[name[reach_id_gage_id_file]]]
variable[gage_data_matrix] assign[=] list[[]]
variable[valid_comid_list] assign[=] list[[]]
variable[num_days_needed] assign[=] binary_operation[binary_operation[name[end_datetime] - name[start_datetime]].days + constant[1]]
variable[gage_id_list] assign[=] list[[]]
for taget[name[row]] in starred[call[name[reach_id_gage_id_list]][<ast.Slice object at 0x7da18f720520>]] begin[:]
variable[station_id] assign[=] call[name[row]][constant[1]]
if compare[call[name[len], parameter[call[name[row]][constant[1]]]] equal[==] constant[7]] begin[:]
variable[station_id] assign[=] binary_operation[constant[0] + call[name[row]][constant[1]]]
call[name[gage_id_list].append, parameter[name[station_id]]]
variable[num_gage_id_list] assign[=] call[name[np].array, parameter[name[gage_id_list]]]
call[name[log], parameter[constant[Querying Server for Data ...], constant[INFO]]]
variable[query_params] assign[=] dictionary[[<ast.Constant object at 0x7da18f722710>, <ast.Constant object at 0x7da18f7231c0>, <ast.Constant object at 0x7da18f7232b0>, <ast.Constant object at 0x7da18f7201c0>, <ast.Constant object at 0x7da18f720400>, <ast.Constant object at 0x7da18f721750>], [<ast.Constant object at 0x7da18f720370>, <ast.Call object at 0x7da18f721c90>, <ast.Call object at 0x7da18f720790>, <ast.Call object at 0x7da18f720490>, <ast.Constant object at 0x7da18f7232e0>, <ast.Constant object at 0x7da18f720220>]]
variable[response] assign[=] call[name[get], parameter[constant[http://waterservices.usgs.gov/nwis/dv]]]
if <ast.UnaryOp object at 0x7da18f722860> begin[:]
call[name[log], parameter[constant[USGS query error ...], constant[WARNING]]]
return[None]
variable[requested_data] assign[=] constant[None]
<ast.Try object at 0x7da18f723e80>
if compare[name[requested_data] is_not constant[None]] begin[:]
for taget[name[time_series]] in starred[call[name[enumerate], parameter[name[requested_data]]]] begin[:]
variable[usgs_station_full_name] assign[=] call[call[name[time_series]][constant[1]]][constant[name]]
variable[usgs_station_id] assign[=] call[call[name[usgs_station_full_name].split, parameter[constant[:]]]][constant[1]]
variable[gage_data] assign[=] list[[]]
for taget[name[time_step]] in starred[call[call[call[call[name[time_series]][constant[1]]][constant[values]]][constant[0]]][constant[value]]] begin[:]
variable[local_datetime] assign[=] call[name[parse], parameter[call[name[time_step]][constant[dateTime]]]]
if compare[name[local_datetime] greater[>] name[end_datetime]] begin[:]
break
if compare[name[local_datetime] greater_or_equal[>=] name[start_datetime]] begin[:]
if <ast.UnaryOp object at 0x7da1b0ba63b0> begin[:]
call[name[log], parameter[call[constant[MISSING DATA for USGS Station {0} {1} {2}].format, parameter[name[usgs_station_id], name[local_datetime], call[name[time_step]][constant[value]]]], constant[WARNING]]]
call[name[gage_data].append, parameter[binary_operation[call[name[float], parameter[call[name[time_step]][constant[value]]]] / constant[35.3146667]]]]
<ast.Try object at 0x7da1b0ba7f10>
if compare[call[name[len], parameter[name[gage_data]]] equal[==] name[num_days_needed]] begin[:]
call[name[gage_data_matrix].append, parameter[name[gage_data]]]
call[name[valid_comid_list].append, parameter[call[call[name[reach_id_gage_id_list]][name[streamid_index]]][constant[0]]]]
if <ast.BoolOp object at 0x7da204567790> begin[:]
call[name[log], parameter[constant[Writing Output ...], constant[INFO]]]
variable[np_array] assign[=] call[call[name[np].array, parameter[name[gage_data_matrix]]].transpose, parameter[]]
with call[name[open_csv], parameter[name[out_streamflow_file], constant[w]]] begin[:]
variable[wgd] assign[=] call[name[csvwriter], parameter[name[gage_data]]]
for taget[name[row]] in starred[name[np_array]] begin[:]
call[name[wgd].writerow, parameter[name[row]]]
with call[name[open_csv], parameter[name[out_stream_id_file], constant[w]]] begin[:]
variable[wcd] assign[=] call[name[csvwriter], parameter[name[comid_data]]]
for taget[name[row]] in starred[name[valid_comid_list]] begin[:]
call[name[wcd].writerow, parameter[list[[<ast.Call object at 0x7da204564fd0>]]]]
name[self].IS_obs_tot assign[=] call[name[len], parameter[name[valid_comid_list]]]
name[self].obs_tot_id_file assign[=] name[out_stream_id_file]
name[self].Qobs_file assign[=] name[out_streamflow_file]
name[self].IS_obs_use assign[=] call[name[len], parameter[name[valid_comid_list]]]
name[self].obs_use_id_file assign[=] name[out_stream_id_file] | keyword[def] identifier[generate_usgs_avg_daily_flows_opt] ( identifier[self] ,
identifier[reach_id_gage_id_file] ,
identifier[start_datetime] ,
identifier[end_datetime] ,
identifier[out_streamflow_file] ,
identifier[out_stream_id_file] ):
literal[string]
identifier[log] ( literal[string]
literal[string] ,
literal[string] )
identifier[log] ( literal[string]
literal[string] ,
literal[string] )
identifier[reach_id_gage_id_list] = identifier[csv_to_list] ( identifier[reach_id_gage_id_file] )
identifier[gage_data_matrix] =[]
identifier[valid_comid_list] =[]
identifier[num_days_needed] =( identifier[end_datetime] - identifier[start_datetime] ). identifier[days] + literal[int]
identifier[gage_id_list] =[]
keyword[for] identifier[row] keyword[in] identifier[reach_id_gage_id_list] [ literal[int] :]:
identifier[station_id] = identifier[row] [ literal[int] ]
keyword[if] identifier[len] ( identifier[row] [ literal[int] ])== literal[int] :
identifier[station_id] = literal[string] + identifier[row] [ literal[int] ]
identifier[gage_id_list] . identifier[append] ( identifier[station_id] )
identifier[num_gage_id_list] = identifier[np] . identifier[array] ( identifier[gage_id_list] , identifier[dtype] = identifier[np] . identifier[int32] )
identifier[log] ( literal[string] ,
literal[string] )
identifier[query_params] ={
literal[string] : literal[string] ,
literal[string] : literal[string] . identifier[join] ( identifier[gage_id_list] ),
literal[string] : identifier[start_datetime] . identifier[strftime] ( literal[string] ),
literal[string] : identifier[end_datetime] . identifier[strftime] ( literal[string] ),
literal[string] : literal[string] ,
literal[string] : literal[string]
}
identifier[response] = identifier[get] ( literal[string] ,
identifier[params] = identifier[query_params] )
keyword[if] keyword[not] identifier[response] . identifier[ok] :
identifier[log] ( literal[string] ,
literal[string] )
keyword[return]
identifier[requested_data] = keyword[None]
keyword[try] :
identifier[requested_data] = identifier[response] . identifier[json] ()[ literal[string] ][ literal[string] ]
keyword[except] identifier[IndexError] :
keyword[pass]
keyword[if] identifier[requested_data] keyword[is] keyword[not] keyword[None] :
keyword[for] identifier[time_series] keyword[in] identifier[enumerate] ( identifier[requested_data] ):
identifier[usgs_station_full_name] = identifier[time_series] [ literal[int] ][ literal[string] ]
identifier[usgs_station_id] = identifier[usgs_station_full_name] . identifier[split] ( literal[string] )[ literal[int] ]
identifier[gage_data] =[]
keyword[for] identifier[time_step] keyword[in] identifier[time_series] [ literal[int] ][ literal[string] ][ literal[int] ][ literal[string] ]:
identifier[local_datetime] = identifier[parse] ( identifier[time_step] [ literal[string] ])
keyword[if] identifier[local_datetime] > identifier[end_datetime] :
keyword[break]
keyword[if] identifier[local_datetime] >= identifier[start_datetime] :
keyword[if] keyword[not] identifier[time_step] [ literal[string] ]:
identifier[log] ( literal[string]
. identifier[format] ( identifier[usgs_station_id] ,
identifier[local_datetime] ,
identifier[time_step] [ literal[string] ]),
literal[string] )
identifier[gage_data] . identifier[append] (
identifier[float] ( identifier[time_step] [ literal[string] ])/ literal[int] )
keyword[try] :
identifier[streamid_index] = identifier[np] . identifier[where] ( identifier[num_gage_id_list] ==
identifier[int] ( identifier[float] ( identifier[usgs_station_id] )))[ literal[int] ][ literal[int] ]+ literal[int]
keyword[except] ( identifier[IndexError] , identifier[ValueError] ):
identifier[log] ( literal[string]
. identifier[format] ( identifier[usgs_station_id] ),
literal[string] )
keyword[raise]
keyword[if] identifier[len] ( identifier[gage_data] )== identifier[num_days_needed] :
identifier[gage_data_matrix] . identifier[append] ( identifier[gage_data] )
identifier[valid_comid_list] . identifier[append] (
identifier[reach_id_gage_id_list] [ identifier[streamid_index] ][ literal[int] ])
keyword[else] :
identifier[log] ( literal[string]
literal[string] . identifier[format] (
identifier[reach_id_gage_id_list] [ identifier[streamid_index] ][ literal[int] ],
identifier[usgs_station_id] ,
identifier[num_days_needed] - identifier[len] ( identifier[gage_data] )),
literal[string] )
keyword[if] identifier[gage_data_matrix] keyword[and] identifier[valid_comid_list] :
identifier[log] ( literal[string] ,
literal[string] )
identifier[np_array] = identifier[np] . identifier[array] ( identifier[gage_data_matrix] ). identifier[transpose] ()
keyword[with] identifier[open_csv] ( identifier[out_streamflow_file] , literal[string] ) keyword[as] identifier[gage_data] :
identifier[wgd] = identifier[csvwriter] ( identifier[gage_data] )
keyword[for] identifier[row] keyword[in] identifier[np_array] :
identifier[wgd] . identifier[writerow] ( identifier[row] )
keyword[with] identifier[open_csv] ( identifier[out_stream_id_file] , literal[string] ) keyword[as] identifier[comid_data] :
identifier[wcd] = identifier[csvwriter] ( identifier[comid_data] )
keyword[for] identifier[row] keyword[in] identifier[valid_comid_list] :
identifier[wcd] . identifier[writerow] ([ identifier[int] ( identifier[float] ( identifier[row] ))])
identifier[self] . identifier[IS_obs_tot] = identifier[len] ( identifier[valid_comid_list] )
identifier[self] . identifier[obs_tot_id_file] = identifier[out_stream_id_file]
identifier[self] . identifier[Qobs_file] = identifier[out_streamflow_file]
identifier[self] . identifier[IS_obs_use] = identifier[len] ( identifier[valid_comid_list] )
identifier[self] . identifier[obs_use_id_file] = identifier[out_stream_id_file]
keyword[else] :
identifier[log] ( literal[string] ,
literal[string] ) | def generate_usgs_avg_daily_flows_opt(self, reach_id_gage_id_file, start_datetime, end_datetime, out_streamflow_file, out_stream_id_file):
"""
Generate daily streamflow file and stream id file required for
calibration or for substituting flows based on USGS gage ids
associated with stream ids.
Parameters
----------
reach_id_gage_id_file: str
Path to reach_id_gage_id file.
start_datetime: datetime
A datetime object with the start date to download data.
end_datetime: datetime
A datetime object with the end date to download data.
out_streamflow_file: str
The path to output the streamflow file for RAPID.
out_stream_id_file: str
The path to output the stream ID file associated with the
streamflow file for RAPID.
Example *reach_id_gage_id_file*::
COMID, USGS_GAGE_ID
2000, 503944
...
.. warning:: Overuse will get you blocked from downloading data from
USGS.
.. warning:: This code does not clean the data in any way. Thus, you
are likely to run into issues if you simply use the raw
data.
.. warning:: The code skips gages that do not have data
for the entire time period.
Simple Example:
.. code:: python
import datetime
from os.path import join
from RAPIDpy import RAPID
main_path = "/home/username/data"
rapid_manager = RAPID()
rapid_manager.generate_usgs_avg_daily_flows_opt(
reach_id_gage_id_file=join(main_path, "usgsgage_id_comid.csv"),
start_datetime=datetime.datetime(2000,1,1),
end_datetime=datetime.datetime(2014,12,31),
out_streamflow_file=join(main_path,"streamflow_2000_2014.csv"),
out_stream_id_file=join(main_path,"streamid_2000_2014.csv")
)
Complex Example:
.. code:: python
import datetime
from os.path import join
from RAPIDpy import RAPID
main_path = "/home/username/data"
rapid_manager = RAPID(
rapid_executable_location='~/work/rapid/run/rapid'
use_all_processors=True,
ZS_TauR=24*3600,
ZS_dtR=15*60,
ZS_TauM=365*24*3600,
ZS_dtM=24*3600
)
rapid_manager.update_parameters(
rapid_connect_file='../rapid-io/input/rapid_connect.csv',
Vlat_file='../rapid-io/input/m3_riv.nc',
riv_bas_id_file='../rapid-io/input/riv_bas_id.csv',
k_file='../rapid-io/input/k.csv',
x_file='../rapid-io/input/x.csv',
Qout_file='../rapid-io/output/Qout.nc',
)
rapid_manager.update_reach_number_data()
rapid_manager.update_simulation_runtime()
rapid_manager.generate_usgs_avg_daily_flows_opt(
reach_id_gage_id_file=join(main_path, "usgsgage_id_comid.csv"),
start_datetime=datetime.datetime(2000,1,1),
end_datetime=datetime.datetime(2014,12,31),
out_streamflow_file=join(main_path,"streamflow_2000_2014.csv"),
out_stream_id_file=join(main_path,"streamid_2000_2014.csv")
)
rapid_manager.run()
"""
log('Generating avg streamflow file and stream id file required for calibration ...', 'INFO')
log('Generating avg streamflow file and stream id file required for calibration ...', 'INFO')
reach_id_gage_id_list = csv_to_list(reach_id_gage_id_file)
gage_data_matrix = []
valid_comid_list = []
# add extra day as it includes the start date
# (e.g. 7-5 is 2 days, but have data for 5,6,7, so +1)
num_days_needed = (end_datetime - start_datetime).days + 1
gage_id_list = []
for row in reach_id_gage_id_list[1:]:
station_id = row[1]
if len(row[1]) == 7:
station_id = '0' + row[1] # depends on [control=['if'], data=[]]
gage_id_list.append(station_id) # depends on [control=['for'], data=['row']]
num_gage_id_list = np.array(gage_id_list, dtype=np.int32)
log('Querying Server for Data ...', 'INFO') # streamflow
# average
query_params = {'format': 'json', 'sites': ','.join(gage_id_list), 'startDT': start_datetime.strftime('%Y-%m-%d'), 'endDT': end_datetime.strftime('%Y-%m-%d'), 'parameterCd': '00060', 'statCd': '00003'}
response = get('http://waterservices.usgs.gov/nwis/dv', params=query_params)
if not response.ok:
log('USGS query error ...', 'WARNING')
return # depends on [control=['if'], data=[]]
requested_data = None
try:
requested_data = response.json()['value']['timeSeries'] # depends on [control=['try'], data=[]]
except IndexError:
pass # depends on [control=['except'], data=[]]
if requested_data is not None:
for time_series in enumerate(requested_data):
usgs_station_full_name = time_series[1]['name']
usgs_station_id = usgs_station_full_name.split(':')[1]
gage_data = []
for time_step in time_series[1]['values'][0]['value']:
local_datetime = parse(time_step['dateTime'])
if local_datetime > end_datetime:
break # depends on [control=['if'], data=[]]
if local_datetime >= start_datetime:
if not time_step['value']:
log('MISSING DATA for USGS Station {0} {1} {2}'.format(usgs_station_id, local_datetime, time_step['value']), 'WARNING') # depends on [control=['if'], data=[]]
gage_data.append(float(time_step['value']) / 35.3146667) # depends on [control=['if'], data=['local_datetime']] # depends on [control=['for'], data=['time_step']]
try:
# get where streamids associated with USGS station ID
streamid_index = np.where(num_gage_id_list == int(float(usgs_station_id)))[0][0] + 1 # depends on [control=['try'], data=[]]
except (IndexError, ValueError):
log('USGS Station {0} not found in list ...'.format(usgs_station_id), 'WARNING')
raise # depends on [control=['except'], data=[]]
if len(gage_data) == num_days_needed:
gage_data_matrix.append(gage_data)
valid_comid_list.append(reach_id_gage_id_list[streamid_index][0]) # depends on [control=['if'], data=[]]
else:
log('StreamID {0} USGS Station {1} MISSING {2} DATA VALUES'.format(reach_id_gage_id_list[streamid_index][0], usgs_station_id, num_days_needed - len(gage_data)), 'WARNING') # depends on [control=['for'], data=['time_series']] # depends on [control=['if'], data=['requested_data']]
if gage_data_matrix and valid_comid_list:
log('Writing Output ...', 'INFO')
np_array = np.array(gage_data_matrix).transpose()
with open_csv(out_streamflow_file, 'w') as gage_data:
wgd = csvwriter(gage_data)
for row in np_array:
wgd.writerow(row) # depends on [control=['for'], data=['row']] # depends on [control=['with'], data=['gage_data']]
with open_csv(out_stream_id_file, 'w') as comid_data:
wcd = csvwriter(comid_data)
for row in valid_comid_list:
wcd.writerow([int(float(row))]) # depends on [control=['for'], data=['row']] # depends on [control=['with'], data=['comid_data']]
# set parameters for RAPID run
self.IS_obs_tot = len(valid_comid_list)
self.obs_tot_id_file = out_stream_id_file
self.Qobs_file = out_streamflow_file
self.IS_obs_use = len(valid_comid_list)
self.obs_use_id_file = out_stream_id_file # depends on [control=['if'], data=[]]
else:
log('No valid data returned ...', 'WARNING') |
def object_formatter(v, c, m, p):
"""Format object view link."""
endpoint = current_app.config['PIDSTORE_OBJECT_ENDPOINTS'].get(
m.object_type)
if endpoint and m.object_uuid:
return Markup('<a href="{0}">{1}</a>'.format(
url_for(endpoint, id=m.object_uuid),
_('View')))
return '' | def function[object_formatter, parameter[v, c, m, p]]:
constant[Format object view link.]
variable[endpoint] assign[=] call[call[name[current_app].config][constant[PIDSTORE_OBJECT_ENDPOINTS]].get, parameter[name[m].object_type]]
if <ast.BoolOp object at 0x7da18eb57880> begin[:]
return[call[name[Markup], parameter[call[constant[<a href="{0}">{1}</a>].format, parameter[call[name[url_for], parameter[name[endpoint]]], call[name[_], parameter[constant[View]]]]]]]]
return[constant[]] | keyword[def] identifier[object_formatter] ( identifier[v] , identifier[c] , identifier[m] , identifier[p] ):
literal[string]
identifier[endpoint] = identifier[current_app] . identifier[config] [ literal[string] ]. identifier[get] (
identifier[m] . identifier[object_type] )
keyword[if] identifier[endpoint] keyword[and] identifier[m] . identifier[object_uuid] :
keyword[return] identifier[Markup] ( literal[string] . identifier[format] (
identifier[url_for] ( identifier[endpoint] , identifier[id] = identifier[m] . identifier[object_uuid] ),
identifier[_] ( literal[string] )))
keyword[return] literal[string] | def object_formatter(v, c, m, p):
"""Format object view link."""
endpoint = current_app.config['PIDSTORE_OBJECT_ENDPOINTS'].get(m.object_type)
if endpoint and m.object_uuid:
return Markup('<a href="{0}">{1}</a>'.format(url_for(endpoint, id=m.object_uuid), _('View'))) # depends on [control=['if'], data=[]]
return '' |
def _parse_boxscore(self, game_data):
"""
Parses the boxscore URI for the game.
The boxscore is embedded within the HTML tag and needs a special
parsing scheme in order to be extracted.
Parameters
----------
game_data : PyQuery object
A PyQuery object containing the information specific to a game.
"""
boxscore = game_data('td[data-stat="box_score_text"]:first')
boxscore = re.sub(r'.*/boxscores/', '', str(boxscore))
boxscore = re.sub(r'\.html.*', '', boxscore)
setattr(self, '_boxscore', boxscore) | def function[_parse_boxscore, parameter[self, game_data]]:
constant[
Parses the boxscore URI for the game.
The boxscore is embedded within the HTML tag and needs a special
parsing scheme in order to be extracted.
Parameters
----------
game_data : PyQuery object
A PyQuery object containing the information specific to a game.
]
variable[boxscore] assign[=] call[name[game_data], parameter[constant[td[data-stat="box_score_text"]:first]]]
variable[boxscore] assign[=] call[name[re].sub, parameter[constant[.*/boxscores/], constant[], call[name[str], parameter[name[boxscore]]]]]
variable[boxscore] assign[=] call[name[re].sub, parameter[constant[\.html.*], constant[], name[boxscore]]]
call[name[setattr], parameter[name[self], constant[_boxscore], name[boxscore]]] | keyword[def] identifier[_parse_boxscore] ( identifier[self] , identifier[game_data] ):
literal[string]
identifier[boxscore] = identifier[game_data] ( literal[string] )
identifier[boxscore] = identifier[re] . identifier[sub] ( literal[string] , literal[string] , identifier[str] ( identifier[boxscore] ))
identifier[boxscore] = identifier[re] . identifier[sub] ( literal[string] , literal[string] , identifier[boxscore] )
identifier[setattr] ( identifier[self] , literal[string] , identifier[boxscore] ) | def _parse_boxscore(self, game_data):
"""
Parses the boxscore URI for the game.
The boxscore is embedded within the HTML tag and needs a special
parsing scheme in order to be extracted.
Parameters
----------
game_data : PyQuery object
A PyQuery object containing the information specific to a game.
"""
boxscore = game_data('td[data-stat="box_score_text"]:first')
boxscore = re.sub('.*/boxscores/', '', str(boxscore))
boxscore = re.sub('\\.html.*', '', boxscore)
setattr(self, '_boxscore', boxscore) |
def begin_run_group(project):
"""
Begin a run_group in the database.
A run_group groups a set of runs for a given project. This models a series
of runs that form a complete binary runtime test.
Args:
project: The project we begin a new run_group for.
Returns:
``(group, session)`` where group is the created group in the
database and session is the database session this group lives in.
"""
from benchbuild.utils.db import create_run_group
from datetime import datetime
group, session = create_run_group(project)
group.begin = datetime.now()
group.status = 'running'
session.commit()
return group, session | def function[begin_run_group, parameter[project]]:
constant[
Begin a run_group in the database.
A run_group groups a set of runs for a given project. This models a series
of runs that form a complete binary runtime test.
Args:
project: The project we begin a new run_group for.
Returns:
``(group, session)`` where group is the created group in the
database and session is the database session this group lives in.
]
from relative_module[benchbuild.utils.db] import module[create_run_group]
from relative_module[datetime] import module[datetime]
<ast.Tuple object at 0x7da204623760> assign[=] call[name[create_run_group], parameter[name[project]]]
name[group].begin assign[=] call[name[datetime].now, parameter[]]
name[group].status assign[=] constant[running]
call[name[session].commit, parameter[]]
return[tuple[[<ast.Name object at 0x7da20c9937c0>, <ast.Name object at 0x7da20c991a80>]]] | keyword[def] identifier[begin_run_group] ( identifier[project] ):
literal[string]
keyword[from] identifier[benchbuild] . identifier[utils] . identifier[db] keyword[import] identifier[create_run_group]
keyword[from] identifier[datetime] keyword[import] identifier[datetime]
identifier[group] , identifier[session] = identifier[create_run_group] ( identifier[project] )
identifier[group] . identifier[begin] = identifier[datetime] . identifier[now] ()
identifier[group] . identifier[status] = literal[string]
identifier[session] . identifier[commit] ()
keyword[return] identifier[group] , identifier[session] | def begin_run_group(project):
"""
Begin a run_group in the database.
A run_group groups a set of runs for a given project. This models a series
of runs that form a complete binary runtime test.
Args:
project: The project we begin a new run_group for.
Returns:
``(group, session)`` where group is the created group in the
database and session is the database session this group lives in.
"""
from benchbuild.utils.db import create_run_group
from datetime import datetime
(group, session) = create_run_group(project)
group.begin = datetime.now()
group.status = 'running'
session.commit()
return (group, session) |
def make_layout(maxval):
"""Make the physical layout of the MinION flowcell.
based on https://bioinformatics.stackexchange.com/a/749/681
returned as a numpy array
"""
if maxval > 512:
return Layout(
structure=np.concatenate([np.array([list(range(10 * i + 1, i * 10 + 11))
for i in range(25)]) + j
for j in range(0, 3000, 250)],
axis=1),
template=np.zeros((25, 120)),
xticks=range(1, 121),
yticks=range(1, 26))
else:
layoutlist = []
for i, j in zip(
[33, 481, 417, 353, 289, 225, 161, 97],
[8, 456, 392, 328, 264, 200, 136, 72]):
for n in range(4):
layoutlist.append(list(range(i + n * 8, (i + n * 8) + 8, 1)) +
list(range(j + n * 8, (j + n * 8) - 8, -1)))
return Layout(
structure=np.array(layoutlist).transpose(),
template=np.zeros((16, 32)),
xticks=range(1, 33),
yticks=range(1, 17)) | def function[make_layout, parameter[maxval]]:
constant[Make the physical layout of the MinION flowcell.
based on https://bioinformatics.stackexchange.com/a/749/681
returned as a numpy array
]
if compare[name[maxval] greater[>] constant[512]] begin[:]
return[call[name[Layout], parameter[]]] | keyword[def] identifier[make_layout] ( identifier[maxval] ):
literal[string]
keyword[if] identifier[maxval] > literal[int] :
keyword[return] identifier[Layout] (
identifier[structure] = identifier[np] . identifier[concatenate] ([ identifier[np] . identifier[array] ([ identifier[list] ( identifier[range] ( literal[int] * identifier[i] + literal[int] , identifier[i] * literal[int] + literal[int] ))
keyword[for] identifier[i] keyword[in] identifier[range] ( literal[int] )])+ identifier[j]
keyword[for] identifier[j] keyword[in] identifier[range] ( literal[int] , literal[int] , literal[int] )],
identifier[axis] = literal[int] ),
identifier[template] = identifier[np] . identifier[zeros] (( literal[int] , literal[int] )),
identifier[xticks] = identifier[range] ( literal[int] , literal[int] ),
identifier[yticks] = identifier[range] ( literal[int] , literal[int] ))
keyword[else] :
identifier[layoutlist] =[]
keyword[for] identifier[i] , identifier[j] keyword[in] identifier[zip] (
[ literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] ],
[ literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] ]):
keyword[for] identifier[n] keyword[in] identifier[range] ( literal[int] ):
identifier[layoutlist] . identifier[append] ( identifier[list] ( identifier[range] ( identifier[i] + identifier[n] * literal[int] ,( identifier[i] + identifier[n] * literal[int] )+ literal[int] , literal[int] ))+
identifier[list] ( identifier[range] ( identifier[j] + identifier[n] * literal[int] ,( identifier[j] + identifier[n] * literal[int] )- literal[int] ,- literal[int] )))
keyword[return] identifier[Layout] (
identifier[structure] = identifier[np] . identifier[array] ( identifier[layoutlist] ). identifier[transpose] (),
identifier[template] = identifier[np] . identifier[zeros] (( literal[int] , literal[int] )),
identifier[xticks] = identifier[range] ( literal[int] , literal[int] ),
identifier[yticks] = identifier[range] ( literal[int] , literal[int] )) | def make_layout(maxval):
"""Make the physical layout of the MinION flowcell.
based on https://bioinformatics.stackexchange.com/a/749/681
returned as a numpy array
"""
if maxval > 512:
return Layout(structure=np.concatenate([np.array([list(range(10 * i + 1, i * 10 + 11)) for i in range(25)]) + j for j in range(0, 3000, 250)], axis=1), template=np.zeros((25, 120)), xticks=range(1, 121), yticks=range(1, 26)) # depends on [control=['if'], data=[]]
else:
layoutlist = []
for (i, j) in zip([33, 481, 417, 353, 289, 225, 161, 97], [8, 456, 392, 328, 264, 200, 136, 72]):
for n in range(4):
layoutlist.append(list(range(i + n * 8, i + n * 8 + 8, 1)) + list(range(j + n * 8, j + n * 8 - 8, -1))) # depends on [control=['for'], data=['n']] # depends on [control=['for'], data=[]]
return Layout(structure=np.array(layoutlist).transpose(), template=np.zeros((16, 32)), xticks=range(1, 33), yticks=range(1, 17)) |
def devserver(port, admin_port, clear):
# type: (int, int, bool) -> None
""" Run devserver.
Args:
port (int):
Port on which the app will be served.
admin_port (int):
Port on which the admin interface is served.
clear (bool):
If set to **True**, clear the datastore on startup.
"""
admin_port = admin_port or (port + 1)
args = [
'--port={}'.format(port),
'--admin_port={}'.format(admin_port)
]
if clear:
args += ['--clear_datastore=yes']
with conf.within_proj_dir():
shell.run('dev_appserver.py . {args}'.format(args=' '.join(args))) | def function[devserver, parameter[port, admin_port, clear]]:
constant[ Run devserver.
Args:
port (int):
Port on which the app will be served.
admin_port (int):
Port on which the admin interface is served.
clear (bool):
If set to **True**, clear the datastore on startup.
]
variable[admin_port] assign[=] <ast.BoolOp object at 0x7da1b10a78b0>
variable[args] assign[=] list[[<ast.Call object at 0x7da1b10a76d0>, <ast.Call object at 0x7da1b10a5ff0>]]
if name[clear] begin[:]
<ast.AugAssign object at 0x7da1b10a75b0>
with call[name[conf].within_proj_dir, parameter[]] begin[:]
call[name[shell].run, parameter[call[constant[dev_appserver.py . {args}].format, parameter[]]]] | keyword[def] identifier[devserver] ( identifier[port] , identifier[admin_port] , identifier[clear] ):
literal[string]
identifier[admin_port] = identifier[admin_port] keyword[or] ( identifier[port] + literal[int] )
identifier[args] =[
literal[string] . identifier[format] ( identifier[port] ),
literal[string] . identifier[format] ( identifier[admin_port] )
]
keyword[if] identifier[clear] :
identifier[args] +=[ literal[string] ]
keyword[with] identifier[conf] . identifier[within_proj_dir] ():
identifier[shell] . identifier[run] ( literal[string] . identifier[format] ( identifier[args] = literal[string] . identifier[join] ( identifier[args] ))) | def devserver(port, admin_port, clear):
# type: (int, int, bool) -> None
' Run devserver.\n\n Args:\n port (int):\n Port on which the app will be served.\n admin_port (int):\n Port on which the admin interface is served.\n clear (bool):\n If set to **True**, clear the datastore on startup.\n '
admin_port = admin_port or port + 1
args = ['--port={}'.format(port), '--admin_port={}'.format(admin_port)]
if clear:
args += ['--clear_datastore=yes'] # depends on [control=['if'], data=[]]
with conf.within_proj_dir():
shell.run('dev_appserver.py . {args}'.format(args=' '.join(args))) # depends on [control=['with'], data=[]] |
def accountreport_get(self, start_created, end_created, session, fields=[]):
'''taobao.topats.trade.accountreport.get 异步获取淘宝卖家绑定的支付宝账户的财务明细
- 1.提供异步下载用户支付宝对账信息接口
- 2.一次调用最多支持下载3个月的对账信息
- 3.仅能获取2010年6月10日以后的信息
- 4.提交任务会进行初步任务校验,如果成功会返回任务号和创建时间,如果失败就报错
- 5.可以接收淘宝发出的任务完成消息,也可以过一段时间来取结果。获取结果接口为taobao.topats.result.get
- 6.支付宝证书签名方法见文档:“http://open.taobao.com/dev/index.php/如何数字证书签名”
- 7.此api执行完成发送的通知消息格式为{"task":{"task_id":123456,"created":"2010-8-19"}}
- 8.此任务是大数据任务,获取任务结果时只能得到下载url
- 9.子任务结果解析见TradeAccountDetail结构体说明
- 10.此接口执行任务时间段为:00:00:00-09:30:00;11:00:00-14:00:00;17:00:00-20:00:00;22:30:00-23:59:59,只有在这段时间内才能返回查询结果'''
request = TOPRequest('taobao.topats.trade.accountreport.get')
request['start_created'] = start_created
request['end_created'] = end_created
if not fields:
tradeAccountDetail = TradeAccountDetail()
fields = tradeAccountDetail.fields
request['fields'] = fields
self.create(self.execute(request, session)['task'])
return self | def function[accountreport_get, parameter[self, start_created, end_created, session, fields]]:
constant[taobao.topats.trade.accountreport.get 异步获取淘宝卖家绑定的支付宝账户的财务明细
- 1.提供异步下载用户支付宝对账信息接口
- 2.一次调用最多支持下载3个月的对账信息
- 3.仅能获取2010年6月10日以后的信息
- 4.提交任务会进行初步任务校验,如果成功会返回任务号和创建时间,如果失败就报错
- 5.可以接收淘宝发出的任务完成消息,也可以过一段时间来取结果。获取结果接口为taobao.topats.result.get
- 6.支付宝证书签名方法见文档:“http://open.taobao.com/dev/index.php/如何数字证书签名”
- 7.此api执行完成发送的通知消息格式为{"task":{"task_id":123456,"created":"2010-8-19"}}
- 8.此任务是大数据任务,获取任务结果时只能得到下载url
- 9.子任务结果解析见TradeAccountDetail结构体说明
- 10.此接口执行任务时间段为:00:00:00-09:30:00;11:00:00-14:00:00;17:00:00-20:00:00;22:30:00-23:59:59,只有在这段时间内才能返回查询结果]
variable[request] assign[=] call[name[TOPRequest], parameter[constant[taobao.topats.trade.accountreport.get]]]
call[name[request]][constant[start_created]] assign[=] name[start_created]
call[name[request]][constant[end_created]] assign[=] name[end_created]
if <ast.UnaryOp object at 0x7da1b2615ff0> begin[:]
variable[tradeAccountDetail] assign[=] call[name[TradeAccountDetail], parameter[]]
variable[fields] assign[=] name[tradeAccountDetail].fields
call[name[request]][constant[fields]] assign[=] name[fields]
call[name[self].create, parameter[call[call[name[self].execute, parameter[name[request], name[session]]]][constant[task]]]]
return[name[self]] | keyword[def] identifier[accountreport_get] ( identifier[self] , identifier[start_created] , identifier[end_created] , identifier[session] , identifier[fields] =[]):
literal[string]
identifier[request] = identifier[TOPRequest] ( literal[string] )
identifier[request] [ literal[string] ]= identifier[start_created]
identifier[request] [ literal[string] ]= identifier[end_created]
keyword[if] keyword[not] identifier[fields] :
identifier[tradeAccountDetail] = identifier[TradeAccountDetail] ()
identifier[fields] = identifier[tradeAccountDetail] . identifier[fields]
identifier[request] [ literal[string] ]= identifier[fields]
identifier[self] . identifier[create] ( identifier[self] . identifier[execute] ( identifier[request] , identifier[session] )[ literal[string] ])
keyword[return] identifier[self] | def accountreport_get(self, start_created, end_created, session, fields=[]):
"""taobao.topats.trade.accountreport.get 异步获取淘宝卖家绑定的支付宝账户的财务明细
- 1.提供异步下载用户支付宝对账信息接口
- 2.一次调用最多支持下载3个月的对账信息
- 3.仅能获取2010年6月10日以后的信息
- 4.提交任务会进行初步任务校验,如果成功会返回任务号和创建时间,如果失败就报错
- 5.可以接收淘宝发出的任务完成消息,也可以过一段时间来取结果。获取结果接口为taobao.topats.result.get
- 6.支付宝证书签名方法见文档:“http://open.taobao.com/dev/index.php/如何数字证书签名”
- 7.此api执行完成发送的通知消息格式为{"task":{"task_id":123456,"created":"2010-8-19"}}
- 8.此任务是大数据任务,获取任务结果时只能得到下载url
- 9.子任务结果解析见TradeAccountDetail结构体说明
- 10.此接口执行任务时间段为:00:00:00-09:30:00;11:00:00-14:00:00;17:00:00-20:00:00;22:30:00-23:59:59,只有在这段时间内才能返回查询结果"""
request = TOPRequest('taobao.topats.trade.accountreport.get')
request['start_created'] = start_created
request['end_created'] = end_created
if not fields:
tradeAccountDetail = TradeAccountDetail()
fields = tradeAccountDetail.fields # depends on [control=['if'], data=[]]
request['fields'] = fields
self.create(self.execute(request, session)['task'])
return self |
def _lower_left_xy(self):
"""
Compute lower left `xy` position.
This is used for the conversion to matplotlib in ``as_artist``
Taken from http://photutils.readthedocs.io/en/latest/_modules/photutils/aperture/rectangle.html#RectangularAperture.plot
"""
hw = self.width / 2.
hh = self.height / 2.
sint = np.sin(self.angle)
cost = np.cos(self.angle)
dx = (hh * sint) - (hw * cost)
dy = -(hh * cost) - (hw * sint)
x = self.center.x + dx
y = self.center.y + dy
return x, y | def function[_lower_left_xy, parameter[self]]:
constant[
Compute lower left `xy` position.
This is used for the conversion to matplotlib in ``as_artist``
Taken from http://photutils.readthedocs.io/en/latest/_modules/photutils/aperture/rectangle.html#RectangularAperture.plot
]
variable[hw] assign[=] binary_operation[name[self].width / constant[2.0]]
variable[hh] assign[=] binary_operation[name[self].height / constant[2.0]]
variable[sint] assign[=] call[name[np].sin, parameter[name[self].angle]]
variable[cost] assign[=] call[name[np].cos, parameter[name[self].angle]]
variable[dx] assign[=] binary_operation[binary_operation[name[hh] * name[sint]] - binary_operation[name[hw] * name[cost]]]
variable[dy] assign[=] binary_operation[<ast.UnaryOp object at 0x7da18dc06350> - binary_operation[name[hw] * name[sint]]]
variable[x] assign[=] binary_operation[name[self].center.x + name[dx]]
variable[y] assign[=] binary_operation[name[self].center.y + name[dy]]
return[tuple[[<ast.Name object at 0x7da18dc05240>, <ast.Name object at 0x7da18dc05bd0>]]] | keyword[def] identifier[_lower_left_xy] ( identifier[self] ):
literal[string]
identifier[hw] = identifier[self] . identifier[width] / literal[int]
identifier[hh] = identifier[self] . identifier[height] / literal[int]
identifier[sint] = identifier[np] . identifier[sin] ( identifier[self] . identifier[angle] )
identifier[cost] = identifier[np] . identifier[cos] ( identifier[self] . identifier[angle] )
identifier[dx] =( identifier[hh] * identifier[sint] )-( identifier[hw] * identifier[cost] )
identifier[dy] =-( identifier[hh] * identifier[cost] )-( identifier[hw] * identifier[sint] )
identifier[x] = identifier[self] . identifier[center] . identifier[x] + identifier[dx]
identifier[y] = identifier[self] . identifier[center] . identifier[y] + identifier[dy]
keyword[return] identifier[x] , identifier[y] | def _lower_left_xy(self):
"""
Compute lower left `xy` position.
This is used for the conversion to matplotlib in ``as_artist``
Taken from http://photutils.readthedocs.io/en/latest/_modules/photutils/aperture/rectangle.html#RectangularAperture.plot
"""
hw = self.width / 2.0
hh = self.height / 2.0
sint = np.sin(self.angle)
cost = np.cos(self.angle)
dx = hh * sint - hw * cost
dy = -(hh * cost) - hw * sint
x = self.center.x + dx
y = self.center.y + dy
return (x, y) |
def get_response(self):
"""Generate the response block of this request.
Careful: it only sets the fields which can be set from the request
"""
res = ARBlockRes()
for field in ["ARType", "ARUUID", "SessionKey"]:
res.setfieldval(field, self.getfieldval(field))
return res | def function[get_response, parameter[self]]:
constant[Generate the response block of this request.
Careful: it only sets the fields which can be set from the request
]
variable[res] assign[=] call[name[ARBlockRes], parameter[]]
for taget[name[field]] in starred[list[[<ast.Constant object at 0x7da1b21a0790>, <ast.Constant object at 0x7da1b21a1240>, <ast.Constant object at 0x7da1b21a2e90>]]] begin[:]
call[name[res].setfieldval, parameter[name[field], call[name[self].getfieldval, parameter[name[field]]]]]
return[name[res]] | keyword[def] identifier[get_response] ( identifier[self] ):
literal[string]
identifier[res] = identifier[ARBlockRes] ()
keyword[for] identifier[field] keyword[in] [ literal[string] , literal[string] , literal[string] ]:
identifier[res] . identifier[setfieldval] ( identifier[field] , identifier[self] . identifier[getfieldval] ( identifier[field] ))
keyword[return] identifier[res] | def get_response(self):
"""Generate the response block of this request.
Careful: it only sets the fields which can be set from the request
"""
res = ARBlockRes()
for field in ['ARType', 'ARUUID', 'SessionKey']:
res.setfieldval(field, self.getfieldval(field)) # depends on [control=['for'], data=['field']]
return res |
def get_last_result(self):
"""Read the last conversion result when in continuous conversion mode.
Will return a signed integer value.
"""
# Retrieve the conversion register value, convert to a signed int, and
# return it.
result = self._device.readList(ADS1x15_POINTER_CONVERSION, 2)
return self._conversion_value(result[1], result[0]) | def function[get_last_result, parameter[self]]:
constant[Read the last conversion result when in continuous conversion mode.
Will return a signed integer value.
]
variable[result] assign[=] call[name[self]._device.readList, parameter[name[ADS1x15_POINTER_CONVERSION], constant[2]]]
return[call[name[self]._conversion_value, parameter[call[name[result]][constant[1]], call[name[result]][constant[0]]]]] | keyword[def] identifier[get_last_result] ( identifier[self] ):
literal[string]
identifier[result] = identifier[self] . identifier[_device] . identifier[readList] ( identifier[ADS1x15_POINTER_CONVERSION] , literal[int] )
keyword[return] identifier[self] . identifier[_conversion_value] ( identifier[result] [ literal[int] ], identifier[result] [ literal[int] ]) | def get_last_result(self):
"""Read the last conversion result when in continuous conversion mode.
Will return a signed integer value.
"""
# Retrieve the conversion register value, convert to a signed int, and
# return it.
result = self._device.readList(ADS1x15_POINTER_CONVERSION, 2)
return self._conversion_value(result[1], result[0]) |
def _get_current_tags(name, runas=None):
'''
Whether Rabbitmq user's tags need to be changed
'''
try:
return list(__salt__['rabbitmq.list_users'](runas=runas)[name])
except CommandExecutionError as err:
log.error('Error: %s', err)
return [] | def function[_get_current_tags, parameter[name, runas]]:
constant[
Whether Rabbitmq user's tags need to be changed
]
<ast.Try object at 0x7da20c6a9a80> | keyword[def] identifier[_get_current_tags] ( identifier[name] , identifier[runas] = keyword[None] ):
literal[string]
keyword[try] :
keyword[return] identifier[list] ( identifier[__salt__] [ literal[string] ]( identifier[runas] = identifier[runas] )[ identifier[name] ])
keyword[except] identifier[CommandExecutionError] keyword[as] identifier[err] :
identifier[log] . identifier[error] ( literal[string] , identifier[err] )
keyword[return] [] | def _get_current_tags(name, runas=None):
"""
Whether Rabbitmq user's tags need to be changed
"""
try:
return list(__salt__['rabbitmq.list_users'](runas=runas)[name]) # depends on [control=['try'], data=[]]
except CommandExecutionError as err:
log.error('Error: %s', err)
return [] # depends on [control=['except'], data=['err']] |
def is_response_correct(self, response):
"""returns True if response evaluates to an Item Answer that is 100 percent correct"""
for answer in self.my_osid_object.get_answers():
if self._is_match(response, answer):
return True
return False | def function[is_response_correct, parameter[self, response]]:
constant[returns True if response evaluates to an Item Answer that is 100 percent correct]
for taget[name[answer]] in starred[call[name[self].my_osid_object.get_answers, parameter[]]] begin[:]
if call[name[self]._is_match, parameter[name[response], name[answer]]] begin[:]
return[constant[True]]
return[constant[False]] | keyword[def] identifier[is_response_correct] ( identifier[self] , identifier[response] ):
literal[string]
keyword[for] identifier[answer] keyword[in] identifier[self] . identifier[my_osid_object] . identifier[get_answers] ():
keyword[if] identifier[self] . identifier[_is_match] ( identifier[response] , identifier[answer] ):
keyword[return] keyword[True]
keyword[return] keyword[False] | def is_response_correct(self, response):
"""returns True if response evaluates to an Item Answer that is 100 percent correct"""
for answer in self.my_osid_object.get_answers():
if self._is_match(response, answer):
return True # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['answer']]
return False |
def on_load(self, event):
'''called on load button'''
dlg = wx.FileDialog(None, self.settings.get_title(), '', "", '*.*', wx.FD_OPEN)
if dlg.ShowModal() == wx.ID_OK:
self.settings.load(dlg.GetPath())
# update the controls with new values
for label in self.setting_map.keys():
setting = self.setting_map[label]
ctrl = self.controls[label]
value = ctrl.GetValue()
if isinstance(value, str) or isinstance(value, unicode):
ctrl.SetValue(str(setting.value))
else:
ctrl.SetValue(setting.value) | def function[on_load, parameter[self, event]]:
constant[called on load button]
variable[dlg] assign[=] call[name[wx].FileDialog, parameter[constant[None], call[name[self].settings.get_title, parameter[]], constant[], constant[], constant[*.*], name[wx].FD_OPEN]]
if compare[call[name[dlg].ShowModal, parameter[]] equal[==] name[wx].ID_OK] begin[:]
call[name[self].settings.load, parameter[call[name[dlg].GetPath, parameter[]]]]
for taget[name[label]] in starred[call[name[self].setting_map.keys, parameter[]]] begin[:]
variable[setting] assign[=] call[name[self].setting_map][name[label]]
variable[ctrl] assign[=] call[name[self].controls][name[label]]
variable[value] assign[=] call[name[ctrl].GetValue, parameter[]]
if <ast.BoolOp object at 0x7da1b162b3a0> begin[:]
call[name[ctrl].SetValue, parameter[call[name[str], parameter[name[setting].value]]]] | keyword[def] identifier[on_load] ( identifier[self] , identifier[event] ):
literal[string]
identifier[dlg] = identifier[wx] . identifier[FileDialog] ( keyword[None] , identifier[self] . identifier[settings] . identifier[get_title] (), literal[string] , literal[string] , literal[string] , identifier[wx] . identifier[FD_OPEN] )
keyword[if] identifier[dlg] . identifier[ShowModal] ()== identifier[wx] . identifier[ID_OK] :
identifier[self] . identifier[settings] . identifier[load] ( identifier[dlg] . identifier[GetPath] ())
keyword[for] identifier[label] keyword[in] identifier[self] . identifier[setting_map] . identifier[keys] ():
identifier[setting] = identifier[self] . identifier[setting_map] [ identifier[label] ]
identifier[ctrl] = identifier[self] . identifier[controls] [ identifier[label] ]
identifier[value] = identifier[ctrl] . identifier[GetValue] ()
keyword[if] identifier[isinstance] ( identifier[value] , identifier[str] ) keyword[or] identifier[isinstance] ( identifier[value] , identifier[unicode] ):
identifier[ctrl] . identifier[SetValue] ( identifier[str] ( identifier[setting] . identifier[value] ))
keyword[else] :
identifier[ctrl] . identifier[SetValue] ( identifier[setting] . identifier[value] ) | def on_load(self, event):
"""called on load button"""
dlg = wx.FileDialog(None, self.settings.get_title(), '', '', '*.*', wx.FD_OPEN)
if dlg.ShowModal() == wx.ID_OK:
self.settings.load(dlg.GetPath()) # depends on [control=['if'], data=[]]
# update the controls with new values
for label in self.setting_map.keys():
setting = self.setting_map[label]
ctrl = self.controls[label]
value = ctrl.GetValue()
if isinstance(value, str) or isinstance(value, unicode):
ctrl.SetValue(str(setting.value)) # depends on [control=['if'], data=[]]
else:
ctrl.SetValue(setting.value) # depends on [control=['for'], data=['label']] |
def get_or(self, cache_key, callback, *args, **kwargs):
""" Get or set the cache using callback and arguments
:param cache_key: Cache key for given resource
:param callback: Callback if object does not exist
:param args: Ordered Argument for the callback
:param kwargs: Keyword argument for the callback
:return: Output of the callback
"""
cached = self.cache.get(cache_key)
if cached is not None:
return cached
else:
try:
output = callback(*args, **kwargs)
except MyCapytain.errors.UnknownCollection as E:
raise UnknownCollection(str(E))
except Exception as E:
raise E
self.cache.set(cache_key, output, self.TIMEOUT)
return output | def function[get_or, parameter[self, cache_key, callback]]:
constant[ Get or set the cache using callback and arguments
:param cache_key: Cache key for given resource
:param callback: Callback if object does not exist
:param args: Ordered Argument for the callback
:param kwargs: Keyword argument for the callback
:return: Output of the callback
]
variable[cached] assign[=] call[name[self].cache.get, parameter[name[cache_key]]]
if compare[name[cached] is_not constant[None]] begin[:]
return[name[cached]] | keyword[def] identifier[get_or] ( identifier[self] , identifier[cache_key] , identifier[callback] ,* identifier[args] ,** identifier[kwargs] ):
literal[string]
identifier[cached] = identifier[self] . identifier[cache] . identifier[get] ( identifier[cache_key] )
keyword[if] identifier[cached] keyword[is] keyword[not] keyword[None] :
keyword[return] identifier[cached]
keyword[else] :
keyword[try] :
identifier[output] = identifier[callback] (* identifier[args] ,** identifier[kwargs] )
keyword[except] identifier[MyCapytain] . identifier[errors] . identifier[UnknownCollection] keyword[as] identifier[E] :
keyword[raise] identifier[UnknownCollection] ( identifier[str] ( identifier[E] ))
keyword[except] identifier[Exception] keyword[as] identifier[E] :
keyword[raise] identifier[E]
identifier[self] . identifier[cache] . identifier[set] ( identifier[cache_key] , identifier[output] , identifier[self] . identifier[TIMEOUT] )
keyword[return] identifier[output] | def get_or(self, cache_key, callback, *args, **kwargs):
""" Get or set the cache using callback and arguments
:param cache_key: Cache key for given resource
:param callback: Callback if object does not exist
:param args: Ordered Argument for the callback
:param kwargs: Keyword argument for the callback
:return: Output of the callback
"""
cached = self.cache.get(cache_key)
if cached is not None:
return cached # depends on [control=['if'], data=['cached']]
else:
try:
output = callback(*args, **kwargs) # depends on [control=['try'], data=[]]
except MyCapytain.errors.UnknownCollection as E:
raise UnknownCollection(str(E)) # depends on [control=['except'], data=['E']]
except Exception as E:
raise E # depends on [control=['except'], data=['E']]
self.cache.set(cache_key, output, self.TIMEOUT)
return output |
def pixbuf_to_cairo_slices(pixbuf):
"""Convert from PixBuf to ImageSurface, using slice-based byte swapping.
This method is 2~5x slower than GDK but does not support an alpha channel.
(cairo uses pre-multiplied alpha, but not Pixbuf.)
"""
assert pixbuf.get_colorspace() == gdk_pixbuf.GDK_COLORSPACE_RGB
assert pixbuf.get_n_channels() == 3
assert pixbuf.get_bits_per_sample() == 8
width = pixbuf.get_width()
height = pixbuf.get_height()
rowstride = pixbuf.get_rowstride()
pixels = ffi.buffer(pixbuf.get_pixels(), pixbuf.get_byte_length())
# TODO: remove this when cffi buffers support slicing with a stride.
pixels = pixels[:]
# Convert GdkPixbuf’s big-endian RGBA to cairo’s native-endian ARGB
cairo_stride = ImageSurface.format_stride_for_width(
constants.FORMAT_RGB24, width)
data = bytearray(cairo_stride * height)
big_endian = sys.byteorder == 'big'
pixbuf_row_length = width * 3 # stride == row_length + padding
cairo_row_length = width * 4 # stride == row_length + padding
alpha = b'\xff' * width # opaque
for y in range(height):
offset = rowstride * y
end = offset + pixbuf_row_length
red = pixels[offset:end:3]
green = pixels[offset + 1:end:3]
blue = pixels[offset + 2:end:3]
offset = cairo_stride * y
end = offset + cairo_row_length
if big_endian: # pragma: no cover
data[offset:end:4] = alpha
data[offset + 1:end:4] = red
data[offset + 2:end:4] = green
data[offset + 3:end:4] = blue
else:
data[offset + 3:end:4] = alpha
data[offset + 2:end:4] = red
data[offset + 1:end:4] = green
data[offset:end:4] = blue
data = array('B', data)
return ImageSurface(constants.FORMAT_RGB24,
width, height, data, cairo_stride) | def function[pixbuf_to_cairo_slices, parameter[pixbuf]]:
constant[Convert from PixBuf to ImageSurface, using slice-based byte swapping.
This method is 2~5x slower than GDK but does not support an alpha channel.
(cairo uses pre-multiplied alpha, but not Pixbuf.)
]
assert[compare[call[name[pixbuf].get_colorspace, parameter[]] equal[==] name[gdk_pixbuf].GDK_COLORSPACE_RGB]]
assert[compare[call[name[pixbuf].get_n_channels, parameter[]] equal[==] constant[3]]]
assert[compare[call[name[pixbuf].get_bits_per_sample, parameter[]] equal[==] constant[8]]]
variable[width] assign[=] call[name[pixbuf].get_width, parameter[]]
variable[height] assign[=] call[name[pixbuf].get_height, parameter[]]
variable[rowstride] assign[=] call[name[pixbuf].get_rowstride, parameter[]]
variable[pixels] assign[=] call[name[ffi].buffer, parameter[call[name[pixbuf].get_pixels, parameter[]], call[name[pixbuf].get_byte_length, parameter[]]]]
variable[pixels] assign[=] call[name[pixels]][<ast.Slice object at 0x7da1b1117460>]
variable[cairo_stride] assign[=] call[name[ImageSurface].format_stride_for_width, parameter[name[constants].FORMAT_RGB24, name[width]]]
variable[data] assign[=] call[name[bytearray], parameter[binary_operation[name[cairo_stride] * name[height]]]]
variable[big_endian] assign[=] compare[name[sys].byteorder equal[==] constant[big]]
variable[pixbuf_row_length] assign[=] binary_operation[name[width] * constant[3]]
variable[cairo_row_length] assign[=] binary_operation[name[width] * constant[4]]
variable[alpha] assign[=] binary_operation[constant[b'\xff'] * name[width]]
for taget[name[y]] in starred[call[name[range], parameter[name[height]]]] begin[:]
variable[offset] assign[=] binary_operation[name[rowstride] * name[y]]
variable[end] assign[=] binary_operation[name[offset] + name[pixbuf_row_length]]
variable[red] assign[=] call[name[pixels]][<ast.Slice object at 0x7da1b1115c90>]
variable[green] assign[=] call[name[pixels]][<ast.Slice object at 0x7da1b1117fa0>]
variable[blue] assign[=] call[name[pixels]][<ast.Slice object at 0x7da1b1116c80>]
variable[offset] assign[=] binary_operation[name[cairo_stride] * name[y]]
variable[end] assign[=] binary_operation[name[offset] + name[cairo_row_length]]
if name[big_endian] begin[:]
call[name[data]][<ast.Slice object at 0x7da1b104abc0>] assign[=] name[alpha]
call[name[data]][<ast.Slice object at 0x7da1b104b6a0>] assign[=] name[red]
call[name[data]][<ast.Slice object at 0x7da1b1049e70>] assign[=] name[green]
call[name[data]][<ast.Slice object at 0x7da1b1049cc0>] assign[=] name[blue]
variable[data] assign[=] call[name[array], parameter[constant[B], name[data]]]
return[call[name[ImageSurface], parameter[name[constants].FORMAT_RGB24, name[width], name[height], name[data], name[cairo_stride]]]] | keyword[def] identifier[pixbuf_to_cairo_slices] ( identifier[pixbuf] ):
literal[string]
keyword[assert] identifier[pixbuf] . identifier[get_colorspace] ()== identifier[gdk_pixbuf] . identifier[GDK_COLORSPACE_RGB]
keyword[assert] identifier[pixbuf] . identifier[get_n_channels] ()== literal[int]
keyword[assert] identifier[pixbuf] . identifier[get_bits_per_sample] ()== literal[int]
identifier[width] = identifier[pixbuf] . identifier[get_width] ()
identifier[height] = identifier[pixbuf] . identifier[get_height] ()
identifier[rowstride] = identifier[pixbuf] . identifier[get_rowstride] ()
identifier[pixels] = identifier[ffi] . identifier[buffer] ( identifier[pixbuf] . identifier[get_pixels] (), identifier[pixbuf] . identifier[get_byte_length] ())
identifier[pixels] = identifier[pixels] [:]
identifier[cairo_stride] = identifier[ImageSurface] . identifier[format_stride_for_width] (
identifier[constants] . identifier[FORMAT_RGB24] , identifier[width] )
identifier[data] = identifier[bytearray] ( identifier[cairo_stride] * identifier[height] )
identifier[big_endian] = identifier[sys] . identifier[byteorder] == literal[string]
identifier[pixbuf_row_length] = identifier[width] * literal[int]
identifier[cairo_row_length] = identifier[width] * literal[int]
identifier[alpha] = literal[string] * identifier[width]
keyword[for] identifier[y] keyword[in] identifier[range] ( identifier[height] ):
identifier[offset] = identifier[rowstride] * identifier[y]
identifier[end] = identifier[offset] + identifier[pixbuf_row_length]
identifier[red] = identifier[pixels] [ identifier[offset] : identifier[end] : literal[int] ]
identifier[green] = identifier[pixels] [ identifier[offset] + literal[int] : identifier[end] : literal[int] ]
identifier[blue] = identifier[pixels] [ identifier[offset] + literal[int] : identifier[end] : literal[int] ]
identifier[offset] = identifier[cairo_stride] * identifier[y]
identifier[end] = identifier[offset] + identifier[cairo_row_length]
keyword[if] identifier[big_endian] :
identifier[data] [ identifier[offset] : identifier[end] : literal[int] ]= identifier[alpha]
identifier[data] [ identifier[offset] + literal[int] : identifier[end] : literal[int] ]= identifier[red]
identifier[data] [ identifier[offset] + literal[int] : identifier[end] : literal[int] ]= identifier[green]
identifier[data] [ identifier[offset] + literal[int] : identifier[end] : literal[int] ]= identifier[blue]
keyword[else] :
identifier[data] [ identifier[offset] + literal[int] : identifier[end] : literal[int] ]= identifier[alpha]
identifier[data] [ identifier[offset] + literal[int] : identifier[end] : literal[int] ]= identifier[red]
identifier[data] [ identifier[offset] + literal[int] : identifier[end] : literal[int] ]= identifier[green]
identifier[data] [ identifier[offset] : identifier[end] : literal[int] ]= identifier[blue]
identifier[data] = identifier[array] ( literal[string] , identifier[data] )
keyword[return] identifier[ImageSurface] ( identifier[constants] . identifier[FORMAT_RGB24] ,
identifier[width] , identifier[height] , identifier[data] , identifier[cairo_stride] ) | def pixbuf_to_cairo_slices(pixbuf):
"""Convert from PixBuf to ImageSurface, using slice-based byte swapping.
This method is 2~5x slower than GDK but does not support an alpha channel.
(cairo uses pre-multiplied alpha, but not Pixbuf.)
"""
assert pixbuf.get_colorspace() == gdk_pixbuf.GDK_COLORSPACE_RGB
assert pixbuf.get_n_channels() == 3
assert pixbuf.get_bits_per_sample() == 8
width = pixbuf.get_width()
height = pixbuf.get_height()
rowstride = pixbuf.get_rowstride()
pixels = ffi.buffer(pixbuf.get_pixels(), pixbuf.get_byte_length())
# TODO: remove this when cffi buffers support slicing with a stride.
pixels = pixels[:]
# Convert GdkPixbuf’s big-endian RGBA to cairo’s native-endian ARGB
cairo_stride = ImageSurface.format_stride_for_width(constants.FORMAT_RGB24, width)
data = bytearray(cairo_stride * height)
big_endian = sys.byteorder == 'big'
pixbuf_row_length = width * 3 # stride == row_length + padding
cairo_row_length = width * 4 # stride == row_length + padding
alpha = b'\xff' * width # opaque
for y in range(height):
offset = rowstride * y
end = offset + pixbuf_row_length
red = pixels[offset:end:3]
green = pixels[offset + 1:end:3]
blue = pixels[offset + 2:end:3]
offset = cairo_stride * y
end = offset + cairo_row_length
if big_endian: # pragma: no cover
data[offset:end:4] = alpha
data[offset + 1:end:4] = red
data[offset + 2:end:4] = green
data[offset + 3:end:4] = blue # depends on [control=['if'], data=[]]
else:
data[offset + 3:end:4] = alpha
data[offset + 2:end:4] = red
data[offset + 1:end:4] = green
data[offset:end:4] = blue # depends on [control=['for'], data=['y']]
data = array('B', data)
return ImageSurface(constants.FORMAT_RGB24, width, height, data, cairo_stride) |
def _handle_exception():
"""Print exceptions raised by subscribers to stderr."""
# Heavily influenced by logging.Handler.handleError.
# See note here:
# https://docs.python.org/3.4/library/sys.html#sys.__stderr__
if sys.stderr:
einfo = sys.exc_info()
try:
traceback.print_exception(einfo[0], einfo[1], einfo[2],
None, sys.stderr)
except IOError:
pass
finally:
del einfo | def function[_handle_exception, parameter[]]:
constant[Print exceptions raised by subscribers to stderr.]
if name[sys].stderr begin[:]
variable[einfo] assign[=] call[name[sys].exc_info, parameter[]]
<ast.Try object at 0x7da20c7c8970> | keyword[def] identifier[_handle_exception] ():
literal[string]
keyword[if] identifier[sys] . identifier[stderr] :
identifier[einfo] = identifier[sys] . identifier[exc_info] ()
keyword[try] :
identifier[traceback] . identifier[print_exception] ( identifier[einfo] [ literal[int] ], identifier[einfo] [ literal[int] ], identifier[einfo] [ literal[int] ],
keyword[None] , identifier[sys] . identifier[stderr] )
keyword[except] identifier[IOError] :
keyword[pass]
keyword[finally] :
keyword[del] identifier[einfo] | def _handle_exception():
"""Print exceptions raised by subscribers to stderr."""
# Heavily influenced by logging.Handler.handleError.
# See note here:
# https://docs.python.org/3.4/library/sys.html#sys.__stderr__
if sys.stderr:
einfo = sys.exc_info()
try:
traceback.print_exception(einfo[0], einfo[1], einfo[2], None, sys.stderr) # depends on [control=['try'], data=[]]
except IOError:
pass # depends on [control=['except'], data=[]]
finally:
del einfo # depends on [control=['if'], data=[]] |
def str(cls, value):
'''Convert status (id) to its string name.'''
for k, v in cls.__dict__.items():
if k[0] in string.ascii_uppercase and v == value:
return k.lower().replace('_', ' ') | def function[str, parameter[cls, value]]:
constant[Convert status (id) to its string name.]
for taget[tuple[[<ast.Name object at 0x7da18f722770>, <ast.Name object at 0x7da18f720790>]]] in starred[call[name[cls].__dict__.items, parameter[]]] begin[:]
if <ast.BoolOp object at 0x7da18f722d70> begin[:]
return[call[call[name[k].lower, parameter[]].replace, parameter[constant[_], constant[ ]]]] | keyword[def] identifier[str] ( identifier[cls] , identifier[value] ):
literal[string]
keyword[for] identifier[k] , identifier[v] keyword[in] identifier[cls] . identifier[__dict__] . identifier[items] ():
keyword[if] identifier[k] [ literal[int] ] keyword[in] identifier[string] . identifier[ascii_uppercase] keyword[and] identifier[v] == identifier[value] :
keyword[return] identifier[k] . identifier[lower] (). identifier[replace] ( literal[string] , literal[string] ) | def str(cls, value):
"""Convert status (id) to its string name."""
for (k, v) in cls.__dict__.items():
if k[0] in string.ascii_uppercase and v == value:
return k.lower().replace('_', ' ') # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] |
def _clean_page_unique_slug_required(self, slug):
"""See if this slug exists already"""
if hasattr(self, 'instance') and self.instance.id:
if Content.objects.exclude(page=self.instance).filter(
body=slug, type="slug").count():
raise forms.ValidationError(self.err_dict['another_page_error'])
elif Content.objects.filter(body=slug, type="slug").count():
raise forms.ValidationError(self.err_dict['another_page_error'])
return slug | def function[_clean_page_unique_slug_required, parameter[self, slug]]:
constant[See if this slug exists already]
if <ast.BoolOp object at 0x7da18f00ec20> begin[:]
if call[call[call[name[Content].objects.exclude, parameter[]].filter, parameter[]].count, parameter[]] begin[:]
<ast.Raise object at 0x7da18f00d510>
return[name[slug]] | keyword[def] identifier[_clean_page_unique_slug_required] ( identifier[self] , identifier[slug] ):
literal[string]
keyword[if] identifier[hasattr] ( identifier[self] , literal[string] ) keyword[and] identifier[self] . identifier[instance] . identifier[id] :
keyword[if] identifier[Content] . identifier[objects] . identifier[exclude] ( identifier[page] = identifier[self] . identifier[instance] ). identifier[filter] (
identifier[body] = identifier[slug] , identifier[type] = literal[string] ). identifier[count] ():
keyword[raise] identifier[forms] . identifier[ValidationError] ( identifier[self] . identifier[err_dict] [ literal[string] ])
keyword[elif] identifier[Content] . identifier[objects] . identifier[filter] ( identifier[body] = identifier[slug] , identifier[type] = literal[string] ). identifier[count] ():
keyword[raise] identifier[forms] . identifier[ValidationError] ( identifier[self] . identifier[err_dict] [ literal[string] ])
keyword[return] identifier[slug] | def _clean_page_unique_slug_required(self, slug):
"""See if this slug exists already"""
if hasattr(self, 'instance') and self.instance.id:
if Content.objects.exclude(page=self.instance).filter(body=slug, type='slug').count():
raise forms.ValidationError(self.err_dict['another_page_error']) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif Content.objects.filter(body=slug, type='slug').count():
raise forms.ValidationError(self.err_dict['another_page_error']) # depends on [control=['if'], data=[]]
return slug |
def breeding_change(request, breeding_id):
"""This view is used to generate a form by which to change pups which belong to a particular breeding set.
This view typically is used to modify existing pups. This might include marking animals as sacrificed, entering genotype or marking information or entering movement of mice to another cage. It is used to show and modify several animals at once.
It takes a request in the form /breeding/(breeding_id)/change/ and returns a form specific to the breeding set defined in breeding_id. breeding_id is the background identification number of the breeding set and does not refer to the barcode of any breeding cage.
This view returns a formset in which one row represents one animal. To add extra animals to a breeding set use /breeding/(breeding_id)/pups/.
This view is restricted to those with the permission animal.change_animal.
"""
breeding = Breeding.objects.select_related().get(id=breeding_id)
strain = breeding.Strain
PupsFormSet = inlineformset_factory(Breeding, Animal, extra=0, exclude=('Alive','Father', 'Mother', 'Breeding', 'Notes'))
if request.method =="POST":
formset = PupsFormSet(request.POST, instance=breeding)
if formset.is_valid():
formset.save()
return HttpResponseRedirect( breeding.get_absolute_url() )
else:
formset = PupsFormSet(instance=breeding,)
return render(request, "breeding_change.html", {"formset":formset, 'breeding':breeding}) | def function[breeding_change, parameter[request, breeding_id]]:
constant[This view is used to generate a form by which to change pups which belong to a particular breeding set.
This view typically is used to modify existing pups. This might include marking animals as sacrificed, entering genotype or marking information or entering movement of mice to another cage. It is used to show and modify several animals at once.
It takes a request in the form /breeding/(breeding_id)/change/ and returns a form specific to the breeding set defined in breeding_id. breeding_id is the background identification number of the breeding set and does not refer to the barcode of any breeding cage.
This view returns a formset in which one row represents one animal. To add extra animals to a breeding set use /breeding/(breeding_id)/pups/.
This view is restricted to those with the permission animal.change_animal.
]
variable[breeding] assign[=] call[call[name[Breeding].objects.select_related, parameter[]].get, parameter[]]
variable[strain] assign[=] name[breeding].Strain
variable[PupsFormSet] assign[=] call[name[inlineformset_factory], parameter[name[Breeding], name[Animal]]]
if compare[name[request].method equal[==] constant[POST]] begin[:]
variable[formset] assign[=] call[name[PupsFormSet], parameter[name[request].POST]]
if call[name[formset].is_valid, parameter[]] begin[:]
call[name[formset].save, parameter[]]
return[call[name[HttpResponseRedirect], parameter[call[name[breeding].get_absolute_url, parameter[]]]]]
return[call[name[render], parameter[name[request], constant[breeding_change.html], dictionary[[<ast.Constant object at 0x7da20c6aa2c0>, <ast.Constant object at 0x7da20c6a8af0>], [<ast.Name object at 0x7da20c6a8040>, <ast.Name object at 0x7da20c6ab340>]]]]] | keyword[def] identifier[breeding_change] ( identifier[request] , identifier[breeding_id] ):
literal[string]
identifier[breeding] = identifier[Breeding] . identifier[objects] . identifier[select_related] (). identifier[get] ( identifier[id] = identifier[breeding_id] )
identifier[strain] = identifier[breeding] . identifier[Strain]
identifier[PupsFormSet] = identifier[inlineformset_factory] ( identifier[Breeding] , identifier[Animal] , identifier[extra] = literal[int] , identifier[exclude] =( literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ))
keyword[if] identifier[request] . identifier[method] == literal[string] :
identifier[formset] = identifier[PupsFormSet] ( identifier[request] . identifier[POST] , identifier[instance] = identifier[breeding] )
keyword[if] identifier[formset] . identifier[is_valid] ():
identifier[formset] . identifier[save] ()
keyword[return] identifier[HttpResponseRedirect] ( identifier[breeding] . identifier[get_absolute_url] ())
keyword[else] :
identifier[formset] = identifier[PupsFormSet] ( identifier[instance] = identifier[breeding] ,)
keyword[return] identifier[render] ( identifier[request] , literal[string] ,{ literal[string] : identifier[formset] , literal[string] : identifier[breeding] }) | def breeding_change(request, breeding_id):
"""This view is used to generate a form by which to change pups which belong to a particular breeding set.
This view typically is used to modify existing pups. This might include marking animals as sacrificed, entering genotype or marking information or entering movement of mice to another cage. It is used to show and modify several animals at once.
It takes a request in the form /breeding/(breeding_id)/change/ and returns a form specific to the breeding set defined in breeding_id. breeding_id is the background identification number of the breeding set and does not refer to the barcode of any breeding cage.
This view returns a formset in which one row represents one animal. To add extra animals to a breeding set use /breeding/(breeding_id)/pups/.
This view is restricted to those with the permission animal.change_animal.
"""
breeding = Breeding.objects.select_related().get(id=breeding_id)
strain = breeding.Strain
PupsFormSet = inlineformset_factory(Breeding, Animal, extra=0, exclude=('Alive', 'Father', 'Mother', 'Breeding', 'Notes'))
if request.method == 'POST':
formset = PupsFormSet(request.POST, instance=breeding)
if formset.is_valid():
formset.save()
return HttpResponseRedirect(breeding.get_absolute_url()) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
else:
formset = PupsFormSet(instance=breeding)
return render(request, 'breeding_change.html', {'formset': formset, 'breeding': breeding}) |
def find_synonym(self, word):
"""
Given a string and a dict of synonyms, returns the 'preferred'
word. Case insensitive.
Args:
word (str): A word.
Returns:
str: The preferred word, or the input word if not found.
Example:
>>> syn = {'snake': ['python', 'adder']}
>>> find_synonym('adder', syn)
'snake'
>>> find_synonym('rattler', syn)
'rattler'
TODO:
Make it handle case, returning the same case it received.
"""
if word and self.synonyms:
# Make the reverse look-up table.
reverse_lookup = {}
for k, v in self.synonyms.items():
for i in v:
reverse_lookup[i.lower()] = k.lower()
# Now check words against this table.
if word.lower() in reverse_lookup:
return reverse_lookup[word.lower()]
return word | def function[find_synonym, parameter[self, word]]:
constant[
Given a string and a dict of synonyms, returns the 'preferred'
word. Case insensitive.
Args:
word (str): A word.
Returns:
str: The preferred word, or the input word if not found.
Example:
>>> syn = {'snake': ['python', 'adder']}
>>> find_synonym('adder', syn)
'snake'
>>> find_synonym('rattler', syn)
'rattler'
TODO:
Make it handle case, returning the same case it received.
]
if <ast.BoolOp object at 0x7da1b26ae5c0> begin[:]
variable[reverse_lookup] assign[=] dictionary[[], []]
for taget[tuple[[<ast.Name object at 0x7da1b26aecb0>, <ast.Name object at 0x7da1b26af1c0>]]] in starred[call[name[self].synonyms.items, parameter[]]] begin[:]
for taget[name[i]] in starred[name[v]] begin[:]
call[name[reverse_lookup]][call[name[i].lower, parameter[]]] assign[=] call[name[k].lower, parameter[]]
if compare[call[name[word].lower, parameter[]] in name[reverse_lookup]] begin[:]
return[call[name[reverse_lookup]][call[name[word].lower, parameter[]]]]
return[name[word]] | keyword[def] identifier[find_synonym] ( identifier[self] , identifier[word] ):
literal[string]
keyword[if] identifier[word] keyword[and] identifier[self] . identifier[synonyms] :
identifier[reverse_lookup] ={}
keyword[for] identifier[k] , identifier[v] keyword[in] identifier[self] . identifier[synonyms] . identifier[items] ():
keyword[for] identifier[i] keyword[in] identifier[v] :
identifier[reverse_lookup] [ identifier[i] . identifier[lower] ()]= identifier[k] . identifier[lower] ()
keyword[if] identifier[word] . identifier[lower] () keyword[in] identifier[reverse_lookup] :
keyword[return] identifier[reverse_lookup] [ identifier[word] . identifier[lower] ()]
keyword[return] identifier[word] | def find_synonym(self, word):
"""
Given a string and a dict of synonyms, returns the 'preferred'
word. Case insensitive.
Args:
word (str): A word.
Returns:
str: The preferred word, or the input word if not found.
Example:
>>> syn = {'snake': ['python', 'adder']}
>>> find_synonym('adder', syn)
'snake'
>>> find_synonym('rattler', syn)
'rattler'
TODO:
Make it handle case, returning the same case it received.
"""
if word and self.synonyms:
# Make the reverse look-up table.
reverse_lookup = {}
for (k, v) in self.synonyms.items():
for i in v:
reverse_lookup[i.lower()] = k.lower() # depends on [control=['for'], data=['i']] # depends on [control=['for'], data=[]]
# Now check words against this table.
if word.lower() in reverse_lookup:
return reverse_lookup[word.lower()] # depends on [control=['if'], data=['reverse_lookup']] # depends on [control=['if'], data=[]]
return word |
def iteritems(self):
"""Iterate over all header lines, including duplicate ones."""
for key in self:
vals = self._container[key.lower()]
for val in vals[1:]:
yield vals[0], val | def function[iteritems, parameter[self]]:
constant[Iterate over all header lines, including duplicate ones.]
for taget[name[key]] in starred[name[self]] begin[:]
variable[vals] assign[=] call[name[self]._container][call[name[key].lower, parameter[]]]
for taget[name[val]] in starred[call[name[vals]][<ast.Slice object at 0x7da1b1ea3e20>]] begin[:]
<ast.Yield object at 0x7da1b1ea3ee0> | keyword[def] identifier[iteritems] ( identifier[self] ):
literal[string]
keyword[for] identifier[key] keyword[in] identifier[self] :
identifier[vals] = identifier[self] . identifier[_container] [ identifier[key] . identifier[lower] ()]
keyword[for] identifier[val] keyword[in] identifier[vals] [ literal[int] :]:
keyword[yield] identifier[vals] [ literal[int] ], identifier[val] | def iteritems(self):
"""Iterate over all header lines, including duplicate ones."""
for key in self:
vals = self._container[key.lower()]
for val in vals[1:]:
yield (vals[0], val) # depends on [control=['for'], data=['val']] # depends on [control=['for'], data=['key']] |
def login_required(func):
'''
If you decorate a view with this, it will ensure that the current user is
logged in and authenticated before calling the actual view. (If they are
not, it calls the :attr:`LoginManager.unauthorized` callback.) For
example::
@app.route('/post')
@login_required
def post():
pass
If there are only certain times you need to require that your user is
logged in, you can do so with::
if not current_user.is_authenticated:
return current_app.login_manager.unauthorized()
...which is essentially the code that this function adds to your views.
It can be convenient to globally turn off authentication when unit testing.
To enable this, if the application configuration variable `LOGIN_DISABLED`
is set to `True`, this decorator will be ignored.
.. Note ::
Per `W3 guidelines for CORS preflight requests
<http://www.w3.org/TR/cors/#cross-origin-request-with-preflight-0>`_,
HTTP ``OPTIONS`` requests are exempt from login checks.
:param func: The view function to decorate.
:type func: function
'''
@wraps(func)
def decorated_view(*args, **kwargs):
if request.method in EXEMPT_METHODS:
return func(*args, **kwargs)
elif current_app.config.get('LOGIN_DISABLED'):
return func(*args, **kwargs)
elif not current_user.is_authenticated:
return current_app.login_manager.unauthorized()
return func(*args, **kwargs)
return decorated_view | def function[login_required, parameter[func]]:
constant[
If you decorate a view with this, it will ensure that the current user is
logged in and authenticated before calling the actual view. (If they are
not, it calls the :attr:`LoginManager.unauthorized` callback.) For
example::
@app.route('/post')
@login_required
def post():
pass
If there are only certain times you need to require that your user is
logged in, you can do so with::
if not current_user.is_authenticated:
return current_app.login_manager.unauthorized()
...which is essentially the code that this function adds to your views.
It can be convenient to globally turn off authentication when unit testing.
To enable this, if the application configuration variable `LOGIN_DISABLED`
is set to `True`, this decorator will be ignored.
.. Note ::
Per `W3 guidelines for CORS preflight requests
<http://www.w3.org/TR/cors/#cross-origin-request-with-preflight-0>`_,
HTTP ``OPTIONS`` requests are exempt from login checks.
:param func: The view function to decorate.
:type func: function
]
def function[decorated_view, parameter[]]:
if compare[name[request].method in name[EXEMPT_METHODS]] begin[:]
return[call[name[func], parameter[<ast.Starred object at 0x7da1b1d674c0>]]]
return[call[name[func], parameter[<ast.Starred object at 0x7da1b1d64670>]]]
return[name[decorated_view]] | keyword[def] identifier[login_required] ( identifier[func] ):
literal[string]
@ identifier[wraps] ( identifier[func] )
keyword[def] identifier[decorated_view] (* identifier[args] ,** identifier[kwargs] ):
keyword[if] identifier[request] . identifier[method] keyword[in] identifier[EXEMPT_METHODS] :
keyword[return] identifier[func] (* identifier[args] ,** identifier[kwargs] )
keyword[elif] identifier[current_app] . identifier[config] . identifier[get] ( literal[string] ):
keyword[return] identifier[func] (* identifier[args] ,** identifier[kwargs] )
keyword[elif] keyword[not] identifier[current_user] . identifier[is_authenticated] :
keyword[return] identifier[current_app] . identifier[login_manager] . identifier[unauthorized] ()
keyword[return] identifier[func] (* identifier[args] ,** identifier[kwargs] )
keyword[return] identifier[decorated_view] | def login_required(func):
"""
If you decorate a view with this, it will ensure that the current user is
logged in and authenticated before calling the actual view. (If they are
not, it calls the :attr:`LoginManager.unauthorized` callback.) For
example::
@app.route('/post')
@login_required
def post():
pass
If there are only certain times you need to require that your user is
logged in, you can do so with::
if not current_user.is_authenticated:
return current_app.login_manager.unauthorized()
...which is essentially the code that this function adds to your views.
It can be convenient to globally turn off authentication when unit testing.
To enable this, if the application configuration variable `LOGIN_DISABLED`
is set to `True`, this decorator will be ignored.
.. Note ::
Per `W3 guidelines for CORS preflight requests
<http://www.w3.org/TR/cors/#cross-origin-request-with-preflight-0>`_,
HTTP ``OPTIONS`` requests are exempt from login checks.
:param func: The view function to decorate.
:type func: function
"""
@wraps(func)
def decorated_view(*args, **kwargs):
if request.method in EXEMPT_METHODS:
return func(*args, **kwargs) # depends on [control=['if'], data=[]]
elif current_app.config.get('LOGIN_DISABLED'):
return func(*args, **kwargs) # depends on [control=['if'], data=[]]
elif not current_user.is_authenticated:
return current_app.login_manager.unauthorized() # depends on [control=['if'], data=[]]
return func(*args, **kwargs)
return decorated_view |
def getOrigin(self, cartesian=False):
"""Return the ra/decs of the channel corners if the S/C
is pointed at the origin (ra,dec = 0,0)
Inputs:
cartesian (bool) If True, return each channel corner
as a unit vector
Returns:
A 2d numpy array. Each row represents a channel corner
The columns are module, output, channel, ra, dec
If cartestian is True, ra, and dec are replaced by the
coordinates of a 3 vector
"""
out = self.origin.copy()
if cartesian is False:
out = self.getRaDecs(out)
return out | def function[getOrigin, parameter[self, cartesian]]:
constant[Return the ra/decs of the channel corners if the S/C
is pointed at the origin (ra,dec = 0,0)
Inputs:
cartesian (bool) If True, return each channel corner
as a unit vector
Returns:
A 2d numpy array. Each row represents a channel corner
The columns are module, output, channel, ra, dec
If cartestian is True, ra, and dec are replaced by the
coordinates of a 3 vector
]
variable[out] assign[=] call[name[self].origin.copy, parameter[]]
if compare[name[cartesian] is constant[False]] begin[:]
variable[out] assign[=] call[name[self].getRaDecs, parameter[name[out]]]
return[name[out]] | keyword[def] identifier[getOrigin] ( identifier[self] , identifier[cartesian] = keyword[False] ):
literal[string]
identifier[out] = identifier[self] . identifier[origin] . identifier[copy] ()
keyword[if] identifier[cartesian] keyword[is] keyword[False] :
identifier[out] = identifier[self] . identifier[getRaDecs] ( identifier[out] )
keyword[return] identifier[out] | def getOrigin(self, cartesian=False):
"""Return the ra/decs of the channel corners if the S/C
is pointed at the origin (ra,dec = 0,0)
Inputs:
cartesian (bool) If True, return each channel corner
as a unit vector
Returns:
A 2d numpy array. Each row represents a channel corner
The columns are module, output, channel, ra, dec
If cartestian is True, ra, and dec are replaced by the
coordinates of a 3 vector
"""
out = self.origin.copy()
if cartesian is False:
out = self.getRaDecs(out) # depends on [control=['if'], data=[]]
return out |
def plot(feature, mp=None, style_function=None, **map_kwargs):
"""Plots a GeoVector in an ipyleaflet map.
Parameters
----------
feature : telluric.vectors.GeoVector, telluric.features.GeoFeature, telluric.collections.BaseCollection
Data to plot.
mp : ipyleaflet.Map, optional
Map in which to plot, default to None (creates a new one).
style_function : func
Function that returns an style dictionary for
map_kwargs : kwargs, optional
Extra parameters to send to ipyleaflet.Map.
"""
map_kwargs.setdefault('basemap', basemaps.Stamen.Terrain)
if feature.is_empty:
warnings.warn("The geometry is empty.")
mp = Map(**map_kwargs) if mp is None else mp
else:
if mp is None:
center = feature.envelope.centroid.reproject(WGS84_CRS)
zoom = zoom_level_from_geometry(feature.envelope)
mp = Map(center=(center.y, center.x), zoom=zoom, **map_kwargs)
mp.add_layer(layer_from_element(feature, style_function))
return mp | def function[plot, parameter[feature, mp, style_function]]:
constant[Plots a GeoVector in an ipyleaflet map.
Parameters
----------
feature : telluric.vectors.GeoVector, telluric.features.GeoFeature, telluric.collections.BaseCollection
Data to plot.
mp : ipyleaflet.Map, optional
Map in which to plot, default to None (creates a new one).
style_function : func
Function that returns an style dictionary for
map_kwargs : kwargs, optional
Extra parameters to send to ipyleaflet.Map.
]
call[name[map_kwargs].setdefault, parameter[constant[basemap], name[basemaps].Stamen.Terrain]]
if name[feature].is_empty begin[:]
call[name[warnings].warn, parameter[constant[The geometry is empty.]]]
variable[mp] assign[=] <ast.IfExp object at 0x7da2041dbd00>
return[name[mp]] | keyword[def] identifier[plot] ( identifier[feature] , identifier[mp] = keyword[None] , identifier[style_function] = keyword[None] ,** identifier[map_kwargs] ):
literal[string]
identifier[map_kwargs] . identifier[setdefault] ( literal[string] , identifier[basemaps] . identifier[Stamen] . identifier[Terrain] )
keyword[if] identifier[feature] . identifier[is_empty] :
identifier[warnings] . identifier[warn] ( literal[string] )
identifier[mp] = identifier[Map] (** identifier[map_kwargs] ) keyword[if] identifier[mp] keyword[is] keyword[None] keyword[else] identifier[mp]
keyword[else] :
keyword[if] identifier[mp] keyword[is] keyword[None] :
identifier[center] = identifier[feature] . identifier[envelope] . identifier[centroid] . identifier[reproject] ( identifier[WGS84_CRS] )
identifier[zoom] = identifier[zoom_level_from_geometry] ( identifier[feature] . identifier[envelope] )
identifier[mp] = identifier[Map] ( identifier[center] =( identifier[center] . identifier[y] , identifier[center] . identifier[x] ), identifier[zoom] = identifier[zoom] ,** identifier[map_kwargs] )
identifier[mp] . identifier[add_layer] ( identifier[layer_from_element] ( identifier[feature] , identifier[style_function] ))
keyword[return] identifier[mp] | def plot(feature, mp=None, style_function=None, **map_kwargs):
"""Plots a GeoVector in an ipyleaflet map.
Parameters
----------
feature : telluric.vectors.GeoVector, telluric.features.GeoFeature, telluric.collections.BaseCollection
Data to plot.
mp : ipyleaflet.Map, optional
Map in which to plot, default to None (creates a new one).
style_function : func
Function that returns an style dictionary for
map_kwargs : kwargs, optional
Extra parameters to send to ipyleaflet.Map.
"""
map_kwargs.setdefault('basemap', basemaps.Stamen.Terrain)
if feature.is_empty:
warnings.warn('The geometry is empty.')
mp = Map(**map_kwargs) if mp is None else mp # depends on [control=['if'], data=[]]
else:
if mp is None:
center = feature.envelope.centroid.reproject(WGS84_CRS)
zoom = zoom_level_from_geometry(feature.envelope)
mp = Map(center=(center.y, center.x), zoom=zoom, **map_kwargs) # depends on [control=['if'], data=['mp']]
mp.add_layer(layer_from_element(feature, style_function))
return mp |
def linkcode_resolve(domain, info):
"""
Determine the URL corresponding to Python object
"""
if domain != 'py':
return None
modname = info['module']
fullname = info['fullname']
submod = sys.modules.get(modname)
if submod is None:
return None
obj = submod
for part in fullname.split('.'):
try:
obj = getattr(obj, part)
except AttributeError:
return None
try:
# inspect.unwrap() was added in Python version 3.4
if sys.version_info >= (3, 5):
fn = inspect.getsourcefile(inspect.unwrap(obj))
else:
fn = inspect.getsourcefile(obj)
except TypeError:
fn = None
if not fn:
return None
try:
source, lineno = inspect.getsourcelines(obj)
except OSError:
lineno = None
if lineno:
linespec = "#L{:d}-L{:d}".format(lineno, lineno + len(source) - 1)
else:
linespec = ""
fn = os.path.relpath(fn, start=os.path.dirname(pandas.__file__))
if '+' in pandas.__version__:
return ("http://github.com/pandas-dev/pandas/blob/master/pandas/"
"{}{}".format(fn, linespec))
else:
return ("http://github.com/pandas-dev/pandas/blob/"
"v{}/pandas/{}{}".format(pandas.__version__, fn, linespec)) | def function[linkcode_resolve, parameter[domain, info]]:
constant[
Determine the URL corresponding to Python object
]
if compare[name[domain] not_equal[!=] constant[py]] begin[:]
return[constant[None]]
variable[modname] assign[=] call[name[info]][constant[module]]
variable[fullname] assign[=] call[name[info]][constant[fullname]]
variable[submod] assign[=] call[name[sys].modules.get, parameter[name[modname]]]
if compare[name[submod] is constant[None]] begin[:]
return[constant[None]]
variable[obj] assign[=] name[submod]
for taget[name[part]] in starred[call[name[fullname].split, parameter[constant[.]]]] begin[:]
<ast.Try object at 0x7da18f00e6e0>
<ast.Try object at 0x7da18f00fd60>
if <ast.UnaryOp object at 0x7da18f00cd60> begin[:]
return[constant[None]]
<ast.Try object at 0x7da18f00d5a0>
if name[lineno] begin[:]
variable[linespec] assign[=] call[constant[#L{:d}-L{:d}].format, parameter[name[lineno], binary_operation[binary_operation[name[lineno] + call[name[len], parameter[name[source]]]] - constant[1]]]]
variable[fn] assign[=] call[name[os].path.relpath, parameter[name[fn]]]
if compare[constant[+] in name[pandas].__version__] begin[:]
return[call[constant[http://github.com/pandas-dev/pandas/blob/master/pandas/{}{}].format, parameter[name[fn], name[linespec]]]] | keyword[def] identifier[linkcode_resolve] ( identifier[domain] , identifier[info] ):
literal[string]
keyword[if] identifier[domain] != literal[string] :
keyword[return] keyword[None]
identifier[modname] = identifier[info] [ literal[string] ]
identifier[fullname] = identifier[info] [ literal[string] ]
identifier[submod] = identifier[sys] . identifier[modules] . identifier[get] ( identifier[modname] )
keyword[if] identifier[submod] keyword[is] keyword[None] :
keyword[return] keyword[None]
identifier[obj] = identifier[submod]
keyword[for] identifier[part] keyword[in] identifier[fullname] . identifier[split] ( literal[string] ):
keyword[try] :
identifier[obj] = identifier[getattr] ( identifier[obj] , identifier[part] )
keyword[except] identifier[AttributeError] :
keyword[return] keyword[None]
keyword[try] :
keyword[if] identifier[sys] . identifier[version_info] >=( literal[int] , literal[int] ):
identifier[fn] = identifier[inspect] . identifier[getsourcefile] ( identifier[inspect] . identifier[unwrap] ( identifier[obj] ))
keyword[else] :
identifier[fn] = identifier[inspect] . identifier[getsourcefile] ( identifier[obj] )
keyword[except] identifier[TypeError] :
identifier[fn] = keyword[None]
keyword[if] keyword[not] identifier[fn] :
keyword[return] keyword[None]
keyword[try] :
identifier[source] , identifier[lineno] = identifier[inspect] . identifier[getsourcelines] ( identifier[obj] )
keyword[except] identifier[OSError] :
identifier[lineno] = keyword[None]
keyword[if] identifier[lineno] :
identifier[linespec] = literal[string] . identifier[format] ( identifier[lineno] , identifier[lineno] + identifier[len] ( identifier[source] )- literal[int] )
keyword[else] :
identifier[linespec] = literal[string]
identifier[fn] = identifier[os] . identifier[path] . identifier[relpath] ( identifier[fn] , identifier[start] = identifier[os] . identifier[path] . identifier[dirname] ( identifier[pandas] . identifier[__file__] ))
keyword[if] literal[string] keyword[in] identifier[pandas] . identifier[__version__] :
keyword[return] ( literal[string]
literal[string] . identifier[format] ( identifier[fn] , identifier[linespec] ))
keyword[else] :
keyword[return] ( literal[string]
literal[string] . identifier[format] ( identifier[pandas] . identifier[__version__] , identifier[fn] , identifier[linespec] )) | def linkcode_resolve(domain, info):
"""
Determine the URL corresponding to Python object
"""
if domain != 'py':
return None # depends on [control=['if'], data=[]]
modname = info['module']
fullname = info['fullname']
submod = sys.modules.get(modname)
if submod is None:
return None # depends on [control=['if'], data=[]]
obj = submod
for part in fullname.split('.'):
try:
obj = getattr(obj, part) # depends on [control=['try'], data=[]]
except AttributeError:
return None # depends on [control=['except'], data=[]] # depends on [control=['for'], data=['part']]
try:
# inspect.unwrap() was added in Python version 3.4
if sys.version_info >= (3, 5):
fn = inspect.getsourcefile(inspect.unwrap(obj)) # depends on [control=['if'], data=[]]
else:
fn = inspect.getsourcefile(obj) # depends on [control=['try'], data=[]]
except TypeError:
fn = None # depends on [control=['except'], data=[]]
if not fn:
return None # depends on [control=['if'], data=[]]
try:
(source, lineno) = inspect.getsourcelines(obj) # depends on [control=['try'], data=[]]
except OSError:
lineno = None # depends on [control=['except'], data=[]]
if lineno:
linespec = '#L{:d}-L{:d}'.format(lineno, lineno + len(source) - 1) # depends on [control=['if'], data=[]]
else:
linespec = ''
fn = os.path.relpath(fn, start=os.path.dirname(pandas.__file__))
if '+' in pandas.__version__:
return 'http://github.com/pandas-dev/pandas/blob/master/pandas/{}{}'.format(fn, linespec) # depends on [control=['if'], data=[]]
else:
return 'http://github.com/pandas-dev/pandas/blob/v{}/pandas/{}{}'.format(pandas.__version__, fn, linespec) |
def include_feature(self, name):
"""Request inclusion of feature named 'name'"""
if self.feature_is_included(name) == 0:
descr = self.features[name].description
raise DistutilsOptionError(
descr + " is required, but was excluded or is not available"
)
self.features[name].include_in(self)
self._set_feature(name, 1) | def function[include_feature, parameter[self, name]]:
constant[Request inclusion of feature named 'name']
if compare[call[name[self].feature_is_included, parameter[name[name]]] equal[==] constant[0]] begin[:]
variable[descr] assign[=] call[name[self].features][name[name]].description
<ast.Raise object at 0x7da1b1b17df0>
call[call[name[self].features][name[name]].include_in, parameter[name[self]]]
call[name[self]._set_feature, parameter[name[name], constant[1]]] | keyword[def] identifier[include_feature] ( identifier[self] , identifier[name] ):
literal[string]
keyword[if] identifier[self] . identifier[feature_is_included] ( identifier[name] )== literal[int] :
identifier[descr] = identifier[self] . identifier[features] [ identifier[name] ]. identifier[description]
keyword[raise] identifier[DistutilsOptionError] (
identifier[descr] + literal[string]
)
identifier[self] . identifier[features] [ identifier[name] ]. identifier[include_in] ( identifier[self] )
identifier[self] . identifier[_set_feature] ( identifier[name] , literal[int] ) | def include_feature(self, name):
"""Request inclusion of feature named 'name'"""
if self.feature_is_included(name) == 0:
descr = self.features[name].description
raise DistutilsOptionError(descr + ' is required, but was excluded or is not available') # depends on [control=['if'], data=[]]
self.features[name].include_in(self)
self._set_feature(name, 1) |
def remove_autosave_file(self, fileinfo):
"""
Remove autosave file for specified file.
This function also updates `self.autosave_mapping` and clears the
`changed_since_autosave` flag.
"""
filename = fileinfo.filename
if filename not in self.name_mapping:
return
autosave_filename = self.name_mapping[filename]
try:
os.remove(autosave_filename)
except EnvironmentError as error:
action = (_('Error while removing autosave file {}')
.format(autosave_filename))
msgbox = AutosaveErrorDialog(action, error)
msgbox.exec_if_enabled()
del self.name_mapping[filename]
self.stack.sig_option_changed.emit(
'autosave_mapping', self.name_mapping)
logger.debug('Removing autosave file %s', autosave_filename) | def function[remove_autosave_file, parameter[self, fileinfo]]:
constant[
Remove autosave file for specified file.
This function also updates `self.autosave_mapping` and clears the
`changed_since_autosave` flag.
]
variable[filename] assign[=] name[fileinfo].filename
if compare[name[filename] <ast.NotIn object at 0x7da2590d7190> name[self].name_mapping] begin[:]
return[None]
variable[autosave_filename] assign[=] call[name[self].name_mapping][name[filename]]
<ast.Try object at 0x7da18bcc9f30>
<ast.Delete object at 0x7da2041db4c0>
call[name[self].stack.sig_option_changed.emit, parameter[constant[autosave_mapping], name[self].name_mapping]]
call[name[logger].debug, parameter[constant[Removing autosave file %s], name[autosave_filename]]] | keyword[def] identifier[remove_autosave_file] ( identifier[self] , identifier[fileinfo] ):
literal[string]
identifier[filename] = identifier[fileinfo] . identifier[filename]
keyword[if] identifier[filename] keyword[not] keyword[in] identifier[self] . identifier[name_mapping] :
keyword[return]
identifier[autosave_filename] = identifier[self] . identifier[name_mapping] [ identifier[filename] ]
keyword[try] :
identifier[os] . identifier[remove] ( identifier[autosave_filename] )
keyword[except] identifier[EnvironmentError] keyword[as] identifier[error] :
identifier[action] =( identifier[_] ( literal[string] )
. identifier[format] ( identifier[autosave_filename] ))
identifier[msgbox] = identifier[AutosaveErrorDialog] ( identifier[action] , identifier[error] )
identifier[msgbox] . identifier[exec_if_enabled] ()
keyword[del] identifier[self] . identifier[name_mapping] [ identifier[filename] ]
identifier[self] . identifier[stack] . identifier[sig_option_changed] . identifier[emit] (
literal[string] , identifier[self] . identifier[name_mapping] )
identifier[logger] . identifier[debug] ( literal[string] , identifier[autosave_filename] ) | def remove_autosave_file(self, fileinfo):
"""
Remove autosave file for specified file.
This function also updates `self.autosave_mapping` and clears the
`changed_since_autosave` flag.
"""
filename = fileinfo.filename
if filename not in self.name_mapping:
return # depends on [control=['if'], data=[]]
autosave_filename = self.name_mapping[filename]
try:
os.remove(autosave_filename) # depends on [control=['try'], data=[]]
except EnvironmentError as error:
action = _('Error while removing autosave file {}').format(autosave_filename)
msgbox = AutosaveErrorDialog(action, error)
msgbox.exec_if_enabled() # depends on [control=['except'], data=['error']]
del self.name_mapping[filename]
self.stack.sig_option_changed.emit('autosave_mapping', self.name_mapping)
logger.debug('Removing autosave file %s', autosave_filename) |
def getAvailableInstruments(self):
""" Returns the instruments available for this service.
If the service has the getInstrumentEntryOfResults(), returns
the instruments capable to perform this service. Otherwhise,
returns an empty list.
"""
instruments = self.getInstruments() \
if self.getInstrumentEntryOfResults() is True \
else None
return instruments if instruments else [] | def function[getAvailableInstruments, parameter[self]]:
constant[ Returns the instruments available for this service.
If the service has the getInstrumentEntryOfResults(), returns
the instruments capable to perform this service. Otherwhise,
returns an empty list.
]
variable[instruments] assign[=] <ast.IfExp object at 0x7da1b231dd20>
return[<ast.IfExp object at 0x7da1b1d483a0>] | keyword[def] identifier[getAvailableInstruments] ( identifier[self] ):
literal[string]
identifier[instruments] = identifier[self] . identifier[getInstruments] () keyword[if] identifier[self] . identifier[getInstrumentEntryOfResults] () keyword[is] keyword[True] keyword[else] keyword[None]
keyword[return] identifier[instruments] keyword[if] identifier[instruments] keyword[else] [] | def getAvailableInstruments(self):
""" Returns the instruments available for this service.
If the service has the getInstrumentEntryOfResults(), returns
the instruments capable to perform this service. Otherwhise,
returns an empty list.
"""
instruments = self.getInstruments() if self.getInstrumentEntryOfResults() is True else None
return instruments if instruments else [] |
def get_attribute(self, node, column):
"""
Returns the given Node attribute associated to the given column.
:param node: Node.
:type node: AbstractCompositeNode or GraphModelNode
:param column: Column.
:type column: int
:return: Attribute.
:rtype: Attribute
"""
if column > 0 and column < len(self.__horizontal_headers):
return node.get(self.__horizontal_headers[self.__horizontal_headers.keys()[column]], None) | def function[get_attribute, parameter[self, node, column]]:
constant[
Returns the given Node attribute associated to the given column.
:param node: Node.
:type node: AbstractCompositeNode or GraphModelNode
:param column: Column.
:type column: int
:return: Attribute.
:rtype: Attribute
]
if <ast.BoolOp object at 0x7da1b0912c80> begin[:]
return[call[name[node].get, parameter[call[name[self].__horizontal_headers][call[call[name[self].__horizontal_headers.keys, parameter[]]][name[column]]], constant[None]]]] | keyword[def] identifier[get_attribute] ( identifier[self] , identifier[node] , identifier[column] ):
literal[string]
keyword[if] identifier[column] > literal[int] keyword[and] identifier[column] < identifier[len] ( identifier[self] . identifier[__horizontal_headers] ):
keyword[return] identifier[node] . identifier[get] ( identifier[self] . identifier[__horizontal_headers] [ identifier[self] . identifier[__horizontal_headers] . identifier[keys] ()[ identifier[column] ]], keyword[None] ) | def get_attribute(self, node, column):
"""
Returns the given Node attribute associated to the given column.
:param node: Node.
:type node: AbstractCompositeNode or GraphModelNode
:param column: Column.
:type column: int
:return: Attribute.
:rtype: Attribute
"""
if column > 0 and column < len(self.__horizontal_headers):
return node.get(self.__horizontal_headers[self.__horizontal_headers.keys()[column]], None) # depends on [control=['if'], data=[]] |
def extract_statements(self):
"""Process the table to extract Statements."""
for _, (tf, target, effect, refs) in self.df.iterrows():
tf_agent = get_grounded_agent(tf)
target_agent = get_grounded_agent(target)
if effect == 'Activation':
stmt_cls = IncreaseAmount
elif effect == 'Repression':
stmt_cls = DecreaseAmount
else:
continue
pmids = refs.split(';')
for pmid in pmids:
stmt = make_stmt(stmt_cls, tf_agent, target_agent, pmid)
self.statements.append(stmt) | def function[extract_statements, parameter[self]]:
constant[Process the table to extract Statements.]
for taget[tuple[[<ast.Name object at 0x7da18bccabf0>, <ast.Tuple object at 0x7da18bccaf50>]]] in starred[call[name[self].df.iterrows, parameter[]]] begin[:]
variable[tf_agent] assign[=] call[name[get_grounded_agent], parameter[name[tf]]]
variable[target_agent] assign[=] call[name[get_grounded_agent], parameter[name[target]]]
if compare[name[effect] equal[==] constant[Activation]] begin[:]
variable[stmt_cls] assign[=] name[IncreaseAmount]
variable[pmids] assign[=] call[name[refs].split, parameter[constant[;]]]
for taget[name[pmid]] in starred[name[pmids]] begin[:]
variable[stmt] assign[=] call[name[make_stmt], parameter[name[stmt_cls], name[tf_agent], name[target_agent], name[pmid]]]
call[name[self].statements.append, parameter[name[stmt]]] | keyword[def] identifier[extract_statements] ( identifier[self] ):
literal[string]
keyword[for] identifier[_] ,( identifier[tf] , identifier[target] , identifier[effect] , identifier[refs] ) keyword[in] identifier[self] . identifier[df] . identifier[iterrows] ():
identifier[tf_agent] = identifier[get_grounded_agent] ( identifier[tf] )
identifier[target_agent] = identifier[get_grounded_agent] ( identifier[target] )
keyword[if] identifier[effect] == literal[string] :
identifier[stmt_cls] = identifier[IncreaseAmount]
keyword[elif] identifier[effect] == literal[string] :
identifier[stmt_cls] = identifier[DecreaseAmount]
keyword[else] :
keyword[continue]
identifier[pmids] = identifier[refs] . identifier[split] ( literal[string] )
keyword[for] identifier[pmid] keyword[in] identifier[pmids] :
identifier[stmt] = identifier[make_stmt] ( identifier[stmt_cls] , identifier[tf_agent] , identifier[target_agent] , identifier[pmid] )
identifier[self] . identifier[statements] . identifier[append] ( identifier[stmt] ) | def extract_statements(self):
"""Process the table to extract Statements."""
for (_, (tf, target, effect, refs)) in self.df.iterrows():
tf_agent = get_grounded_agent(tf)
target_agent = get_grounded_agent(target)
if effect == 'Activation':
stmt_cls = IncreaseAmount # depends on [control=['if'], data=[]]
elif effect == 'Repression':
stmt_cls = DecreaseAmount # depends on [control=['if'], data=[]]
else:
continue
pmids = refs.split(';')
for pmid in pmids:
stmt = make_stmt(stmt_cls, tf_agent, target_agent, pmid)
self.statements.append(stmt) # depends on [control=['for'], data=['pmid']] # depends on [control=['for'], data=[]] |
def Sin(input_vertex: vertex_constructor_param_types, label: Optional[str]=None) -> Vertex:
"""
Takes the sine of a vertex. Sin(vertex).
:param input_vertex: the vertex
"""
return Double(context.jvm_view().SinVertex, label, cast_to_double_vertex(input_vertex)) | def function[Sin, parameter[input_vertex, label]]:
constant[
Takes the sine of a vertex. Sin(vertex).
:param input_vertex: the vertex
]
return[call[name[Double], parameter[call[name[context].jvm_view, parameter[]].SinVertex, name[label], call[name[cast_to_double_vertex], parameter[name[input_vertex]]]]]] | keyword[def] identifier[Sin] ( identifier[input_vertex] : identifier[vertex_constructor_param_types] , identifier[label] : identifier[Optional] [ identifier[str] ]= keyword[None] )-> identifier[Vertex] :
literal[string]
keyword[return] identifier[Double] ( identifier[context] . identifier[jvm_view] (). identifier[SinVertex] , identifier[label] , identifier[cast_to_double_vertex] ( identifier[input_vertex] )) | def Sin(input_vertex: vertex_constructor_param_types, label: Optional[str]=None) -> Vertex:
"""
Takes the sine of a vertex. Sin(vertex).
:param input_vertex: the vertex
"""
return Double(context.jvm_view().SinVertex, label, cast_to_double_vertex(input_vertex)) |
def rebase(config):
"""Rebase the current branch against origin/master"""
repo = config.repo
active_branch = repo.active_branch
if active_branch.name == "master":
error_out("You're already on the master branch.")
active_branch_name = active_branch.name
if repo.is_dirty():
error_out(
'Repo is "dirty". ({})'.format(
", ".join([repr(x.b_path) for x in repo.index.diff(None)])
)
)
state = read(config.configfile)
origin_name = state.get("ORIGIN_NAME", "origin")
upstream_remote = None
for remote in repo.remotes:
if remote.name == origin_name:
upstream_remote = remote
break
if not upstream_remote:
error_out("No remote called {!r} found".format(origin_name))
repo.heads.master.checkout()
repo.remotes[origin_name].pull("master")
repo.heads[active_branch_name].checkout()
print(repo.git.rebase("master"))
success_out("Rebased against {}/master".format(origin_name))
info_out("If you wanto start interactive rebase run:\n\n\tgit rebase -i master\n") | def function[rebase, parameter[config]]:
constant[Rebase the current branch against origin/master]
variable[repo] assign[=] name[config].repo
variable[active_branch] assign[=] name[repo].active_branch
if compare[name[active_branch].name equal[==] constant[master]] begin[:]
call[name[error_out], parameter[constant[You're already on the master branch.]]]
variable[active_branch_name] assign[=] name[active_branch].name
if call[name[repo].is_dirty, parameter[]] begin[:]
call[name[error_out], parameter[call[constant[Repo is "dirty". ({})].format, parameter[call[constant[, ].join, parameter[<ast.ListComp object at 0x7da2044c2890>]]]]]]
variable[state] assign[=] call[name[read], parameter[name[config].configfile]]
variable[origin_name] assign[=] call[name[state].get, parameter[constant[ORIGIN_NAME], constant[origin]]]
variable[upstream_remote] assign[=] constant[None]
for taget[name[remote]] in starred[name[repo].remotes] begin[:]
if compare[name[remote].name equal[==] name[origin_name]] begin[:]
variable[upstream_remote] assign[=] name[remote]
break
if <ast.UnaryOp object at 0x7da2044c3dc0> begin[:]
call[name[error_out], parameter[call[constant[No remote called {!r} found].format, parameter[name[origin_name]]]]]
call[name[repo].heads.master.checkout, parameter[]]
call[call[name[repo].remotes][name[origin_name]].pull, parameter[constant[master]]]
call[call[name[repo].heads][name[active_branch_name]].checkout, parameter[]]
call[name[print], parameter[call[name[repo].git.rebase, parameter[constant[master]]]]]
call[name[success_out], parameter[call[constant[Rebased against {}/master].format, parameter[name[origin_name]]]]]
call[name[info_out], parameter[constant[If you wanto start interactive rebase run:
git rebase -i master
]]] | keyword[def] identifier[rebase] ( identifier[config] ):
literal[string]
identifier[repo] = identifier[config] . identifier[repo]
identifier[active_branch] = identifier[repo] . identifier[active_branch]
keyword[if] identifier[active_branch] . identifier[name] == literal[string] :
identifier[error_out] ( literal[string] )
identifier[active_branch_name] = identifier[active_branch] . identifier[name]
keyword[if] identifier[repo] . identifier[is_dirty] ():
identifier[error_out] (
literal[string] . identifier[format] (
literal[string] . identifier[join] ([ identifier[repr] ( identifier[x] . identifier[b_path] ) keyword[for] identifier[x] keyword[in] identifier[repo] . identifier[index] . identifier[diff] ( keyword[None] )])
)
)
identifier[state] = identifier[read] ( identifier[config] . identifier[configfile] )
identifier[origin_name] = identifier[state] . identifier[get] ( literal[string] , literal[string] )
identifier[upstream_remote] = keyword[None]
keyword[for] identifier[remote] keyword[in] identifier[repo] . identifier[remotes] :
keyword[if] identifier[remote] . identifier[name] == identifier[origin_name] :
identifier[upstream_remote] = identifier[remote]
keyword[break]
keyword[if] keyword[not] identifier[upstream_remote] :
identifier[error_out] ( literal[string] . identifier[format] ( identifier[origin_name] ))
identifier[repo] . identifier[heads] . identifier[master] . identifier[checkout] ()
identifier[repo] . identifier[remotes] [ identifier[origin_name] ]. identifier[pull] ( literal[string] )
identifier[repo] . identifier[heads] [ identifier[active_branch_name] ]. identifier[checkout] ()
identifier[print] ( identifier[repo] . identifier[git] . identifier[rebase] ( literal[string] ))
identifier[success_out] ( literal[string] . identifier[format] ( identifier[origin_name] ))
identifier[info_out] ( literal[string] ) | def rebase(config):
"""Rebase the current branch against origin/master"""
repo = config.repo
active_branch = repo.active_branch
if active_branch.name == 'master':
error_out("You're already on the master branch.") # depends on [control=['if'], data=[]]
active_branch_name = active_branch.name
if repo.is_dirty():
error_out('Repo is "dirty". ({})'.format(', '.join([repr(x.b_path) for x in repo.index.diff(None)]))) # depends on [control=['if'], data=[]]
state = read(config.configfile)
origin_name = state.get('ORIGIN_NAME', 'origin')
upstream_remote = None
for remote in repo.remotes:
if remote.name == origin_name:
upstream_remote = remote
break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['remote']]
if not upstream_remote:
error_out('No remote called {!r} found'.format(origin_name)) # depends on [control=['if'], data=[]]
repo.heads.master.checkout()
repo.remotes[origin_name].pull('master')
repo.heads[active_branch_name].checkout()
print(repo.git.rebase('master'))
success_out('Rebased against {}/master'.format(origin_name))
info_out('If you wanto start interactive rebase run:\n\n\tgit rebase -i master\n') |
def _fetch_cached_output(self, items, result):
"""
First try to fetch all items from the cache.
The items are 'non-polymorphic', so only point to their base class.
If these are found, there is no need to query the derived data from the database.
"""
if not appsettings.FLUENT_CONTENTS_CACHE_OUTPUT or not self.use_cached_output:
result.add_remaining_list(items)
return
for contentitem in items:
result.add_ordering(contentitem)
output = None
try:
plugin = contentitem.plugin
except PluginNotFound as ex:
result.store_exception(contentitem, ex) # Will deal with that later.
logger.debug("- item #%s has no matching plugin: %s", contentitem.pk, str(ex))
continue
# Respect the cache output setting of the plugin
if self.can_use_cached_output(contentitem):
result.add_plugin_timeout(plugin)
output = plugin.get_cached_output(result.placeholder_name, contentitem)
# Support transition to new output format.
if output is not None and not isinstance(output, ContentItemOutput):
output = None
logger.debug("Flushed cached output of {0}#{1} to store new ContentItemOutput format (key: {2})".format(
plugin.type_name,
contentitem.pk,
get_placeholder_name(contentitem.placeholder)
))
# For debugging, ignore cached values when the template is updated.
if output and settings.DEBUG:
cachekey = get_rendering_cache_key(result.placeholder_name, contentitem)
if is_template_updated(self.request, contentitem, cachekey):
output = None
if output:
result.store_output(contentitem, output)
else:
result.add_remaining(contentitem) | def function[_fetch_cached_output, parameter[self, items, result]]:
constant[
First try to fetch all items from the cache.
The items are 'non-polymorphic', so only point to their base class.
If these are found, there is no need to query the derived data from the database.
]
if <ast.BoolOp object at 0x7da1b1079300> begin[:]
call[name[result].add_remaining_list, parameter[name[items]]]
return[None]
for taget[name[contentitem]] in starred[name[items]] begin[:]
call[name[result].add_ordering, parameter[name[contentitem]]]
variable[output] assign[=] constant[None]
<ast.Try object at 0x7da1b1078cd0>
if call[name[self].can_use_cached_output, parameter[name[contentitem]]] begin[:]
call[name[result].add_plugin_timeout, parameter[name[plugin]]]
variable[output] assign[=] call[name[plugin].get_cached_output, parameter[name[result].placeholder_name, name[contentitem]]]
if <ast.BoolOp object at 0x7da1b1078b20> begin[:]
variable[output] assign[=] constant[None]
call[name[logger].debug, parameter[call[constant[Flushed cached output of {0}#{1} to store new ContentItemOutput format (key: {2})].format, parameter[name[plugin].type_name, name[contentitem].pk, call[name[get_placeholder_name], parameter[name[contentitem].placeholder]]]]]]
if <ast.BoolOp object at 0x7da1b107a5c0> begin[:]
variable[cachekey] assign[=] call[name[get_rendering_cache_key], parameter[name[result].placeholder_name, name[contentitem]]]
if call[name[is_template_updated], parameter[name[self].request, name[contentitem], name[cachekey]]] begin[:]
variable[output] assign[=] constant[None]
if name[output] begin[:]
call[name[result].store_output, parameter[name[contentitem], name[output]]] | keyword[def] identifier[_fetch_cached_output] ( identifier[self] , identifier[items] , identifier[result] ):
literal[string]
keyword[if] keyword[not] identifier[appsettings] . identifier[FLUENT_CONTENTS_CACHE_OUTPUT] keyword[or] keyword[not] identifier[self] . identifier[use_cached_output] :
identifier[result] . identifier[add_remaining_list] ( identifier[items] )
keyword[return]
keyword[for] identifier[contentitem] keyword[in] identifier[items] :
identifier[result] . identifier[add_ordering] ( identifier[contentitem] )
identifier[output] = keyword[None]
keyword[try] :
identifier[plugin] = identifier[contentitem] . identifier[plugin]
keyword[except] identifier[PluginNotFound] keyword[as] identifier[ex] :
identifier[result] . identifier[store_exception] ( identifier[contentitem] , identifier[ex] )
identifier[logger] . identifier[debug] ( literal[string] , identifier[contentitem] . identifier[pk] , identifier[str] ( identifier[ex] ))
keyword[continue]
keyword[if] identifier[self] . identifier[can_use_cached_output] ( identifier[contentitem] ):
identifier[result] . identifier[add_plugin_timeout] ( identifier[plugin] )
identifier[output] = identifier[plugin] . identifier[get_cached_output] ( identifier[result] . identifier[placeholder_name] , identifier[contentitem] )
keyword[if] identifier[output] keyword[is] keyword[not] keyword[None] keyword[and] keyword[not] identifier[isinstance] ( identifier[output] , identifier[ContentItemOutput] ):
identifier[output] = keyword[None]
identifier[logger] . identifier[debug] ( literal[string] . identifier[format] (
identifier[plugin] . identifier[type_name] ,
identifier[contentitem] . identifier[pk] ,
identifier[get_placeholder_name] ( identifier[contentitem] . identifier[placeholder] )
))
keyword[if] identifier[output] keyword[and] identifier[settings] . identifier[DEBUG] :
identifier[cachekey] = identifier[get_rendering_cache_key] ( identifier[result] . identifier[placeholder_name] , identifier[contentitem] )
keyword[if] identifier[is_template_updated] ( identifier[self] . identifier[request] , identifier[contentitem] , identifier[cachekey] ):
identifier[output] = keyword[None]
keyword[if] identifier[output] :
identifier[result] . identifier[store_output] ( identifier[contentitem] , identifier[output] )
keyword[else] :
identifier[result] . identifier[add_remaining] ( identifier[contentitem] ) | def _fetch_cached_output(self, items, result):
"""
First try to fetch all items from the cache.
The items are 'non-polymorphic', so only point to their base class.
If these are found, there is no need to query the derived data from the database.
"""
if not appsettings.FLUENT_CONTENTS_CACHE_OUTPUT or not self.use_cached_output:
result.add_remaining_list(items)
return # depends on [control=['if'], data=[]]
for contentitem in items:
result.add_ordering(contentitem)
output = None
try:
plugin = contentitem.plugin # depends on [control=['try'], data=[]]
except PluginNotFound as ex:
result.store_exception(contentitem, ex) # Will deal with that later.
logger.debug('- item #%s has no matching plugin: %s', contentitem.pk, str(ex))
continue # depends on [control=['except'], data=['ex']]
# Respect the cache output setting of the plugin
if self.can_use_cached_output(contentitem):
result.add_plugin_timeout(plugin)
output = plugin.get_cached_output(result.placeholder_name, contentitem)
# Support transition to new output format.
if output is not None and (not isinstance(output, ContentItemOutput)):
output = None
logger.debug('Flushed cached output of {0}#{1} to store new ContentItemOutput format (key: {2})'.format(plugin.type_name, contentitem.pk, get_placeholder_name(contentitem.placeholder))) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
# For debugging, ignore cached values when the template is updated.
if output and settings.DEBUG:
cachekey = get_rendering_cache_key(result.placeholder_name, contentitem)
if is_template_updated(self.request, contentitem, cachekey):
output = None # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
if output:
result.store_output(contentitem, output) # depends on [control=['if'], data=[]]
else:
result.add_remaining(contentitem) # depends on [control=['for'], data=['contentitem']] |
def _set_new_connection(self, conn):
"""
Replace existing connection (if there is one) and close it.
"""
with self._lock:
old = self._connection
self._connection = conn
if old:
log.debug("[control connection] Closing old connection %r, replacing with %r", old, conn)
old.close() | def function[_set_new_connection, parameter[self, conn]]:
constant[
Replace existing connection (if there is one) and close it.
]
with name[self]._lock begin[:]
variable[old] assign[=] name[self]._connection
name[self]._connection assign[=] name[conn]
if name[old] begin[:]
call[name[log].debug, parameter[constant[[control connection] Closing old connection %r, replacing with %r], name[old], name[conn]]]
call[name[old].close, parameter[]] | keyword[def] identifier[_set_new_connection] ( identifier[self] , identifier[conn] ):
literal[string]
keyword[with] identifier[self] . identifier[_lock] :
identifier[old] = identifier[self] . identifier[_connection]
identifier[self] . identifier[_connection] = identifier[conn]
keyword[if] identifier[old] :
identifier[log] . identifier[debug] ( literal[string] , identifier[old] , identifier[conn] )
identifier[old] . identifier[close] () | def _set_new_connection(self, conn):
"""
Replace existing connection (if there is one) and close it.
"""
with self._lock:
old = self._connection
self._connection = conn # depends on [control=['with'], data=[]]
if old:
log.debug('[control connection] Closing old connection %r, replacing with %r', old, conn)
old.close() # depends on [control=['if'], data=[]] |
def find_duplicate_schedule_items(all_items):
"""Find talks / pages assigned to mulitple schedule items"""
duplicates = []
seen_talks = {}
for item in all_items:
if item.talk and item.talk in seen_talks:
duplicates.append(item)
if seen_talks[item.talk] not in duplicates:
duplicates.append(seen_talks[item.talk])
else:
seen_talks[item.talk] = item
# We currently allow duplicate pages for cases were we need disjoint
# schedule items, like multiple open space sessions on different
# days and similar cases. This may be revisited later
return duplicates | def function[find_duplicate_schedule_items, parameter[all_items]]:
constant[Find talks / pages assigned to mulitple schedule items]
variable[duplicates] assign[=] list[[]]
variable[seen_talks] assign[=] dictionary[[], []]
for taget[name[item]] in starred[name[all_items]] begin[:]
if <ast.BoolOp object at 0x7da18f00d810> begin[:]
call[name[duplicates].append, parameter[name[item]]]
if compare[call[name[seen_talks]][name[item].talk] <ast.NotIn object at 0x7da2590d7190> name[duplicates]] begin[:]
call[name[duplicates].append, parameter[call[name[seen_talks]][name[item].talk]]]
return[name[duplicates]] | keyword[def] identifier[find_duplicate_schedule_items] ( identifier[all_items] ):
literal[string]
identifier[duplicates] =[]
identifier[seen_talks] ={}
keyword[for] identifier[item] keyword[in] identifier[all_items] :
keyword[if] identifier[item] . identifier[talk] keyword[and] identifier[item] . identifier[talk] keyword[in] identifier[seen_talks] :
identifier[duplicates] . identifier[append] ( identifier[item] )
keyword[if] identifier[seen_talks] [ identifier[item] . identifier[talk] ] keyword[not] keyword[in] identifier[duplicates] :
identifier[duplicates] . identifier[append] ( identifier[seen_talks] [ identifier[item] . identifier[talk] ])
keyword[else] :
identifier[seen_talks] [ identifier[item] . identifier[talk] ]= identifier[item]
keyword[return] identifier[duplicates] | def find_duplicate_schedule_items(all_items):
"""Find talks / pages assigned to mulitple schedule items"""
duplicates = []
seen_talks = {}
for item in all_items:
if item.talk and item.talk in seen_talks:
duplicates.append(item)
if seen_talks[item.talk] not in duplicates:
duplicates.append(seen_talks[item.talk]) # depends on [control=['if'], data=['duplicates']] # depends on [control=['if'], data=[]]
else:
seen_talks[item.talk] = item # depends on [control=['for'], data=['item']]
# We currently allow duplicate pages for cases were we need disjoint
# schedule items, like multiple open space sessions on different
# days and similar cases. This may be revisited later
return duplicates |
def update(self, data):
"""Updates the object information based on live data, if there were any changes made. Any changes will be
automatically applied to the object, but will not be automatically persisted. You must manually call
`db.session.add(ami)` on the object.
Args:
data (bunch): Data fetched from AWS API
Returns:
True if there were any changes to the object, else false
"""
updated = self.set_property('description', data.description)
updated |= self.set_property('state', data.state)
tags = {x['Key']: x['Value'] for x in data.tags or {}}
existing_tags = {x.key: x for x in self.tags}
# Check for new tags
for key, value in list(tags.items()):
updated |= self.set_tag(key, value)
# Check for updated or removed tags
for key in list(existing_tags.keys()):
if key not in tags:
updated |= self.delete_tag(key)
return updated | def function[update, parameter[self, data]]:
constant[Updates the object information based on live data, if there were any changes made. Any changes will be
automatically applied to the object, but will not be automatically persisted. You must manually call
`db.session.add(ami)` on the object.
Args:
data (bunch): Data fetched from AWS API
Returns:
True if there were any changes to the object, else false
]
variable[updated] assign[=] call[name[self].set_property, parameter[constant[description], name[data].description]]
<ast.AugAssign object at 0x7da1b2049810>
variable[tags] assign[=] <ast.DictComp object at 0x7da1b204b490>
variable[existing_tags] assign[=] <ast.DictComp object at 0x7da1b20494b0>
for taget[tuple[[<ast.Name object at 0x7da1b204b0d0>, <ast.Name object at 0x7da1b2049a20>]]] in starred[call[name[list], parameter[call[name[tags].items, parameter[]]]]] begin[:]
<ast.AugAssign object at 0x7da1b2049fc0>
for taget[name[key]] in starred[call[name[list], parameter[call[name[existing_tags].keys, parameter[]]]]] begin[:]
if compare[name[key] <ast.NotIn object at 0x7da2590d7190> name[tags]] begin[:]
<ast.AugAssign object at 0x7da1b204b5b0>
return[name[updated]] | keyword[def] identifier[update] ( identifier[self] , identifier[data] ):
literal[string]
identifier[updated] = identifier[self] . identifier[set_property] ( literal[string] , identifier[data] . identifier[description] )
identifier[updated] |= identifier[self] . identifier[set_property] ( literal[string] , identifier[data] . identifier[state] )
identifier[tags] ={ identifier[x] [ literal[string] ]: identifier[x] [ literal[string] ] keyword[for] identifier[x] keyword[in] identifier[data] . identifier[tags] keyword[or] {}}
identifier[existing_tags] ={ identifier[x] . identifier[key] : identifier[x] keyword[for] identifier[x] keyword[in] identifier[self] . identifier[tags] }
keyword[for] identifier[key] , identifier[value] keyword[in] identifier[list] ( identifier[tags] . identifier[items] ()):
identifier[updated] |= identifier[self] . identifier[set_tag] ( identifier[key] , identifier[value] )
keyword[for] identifier[key] keyword[in] identifier[list] ( identifier[existing_tags] . identifier[keys] ()):
keyword[if] identifier[key] keyword[not] keyword[in] identifier[tags] :
identifier[updated] |= identifier[self] . identifier[delete_tag] ( identifier[key] )
keyword[return] identifier[updated] | def update(self, data):
"""Updates the object information based on live data, if there were any changes made. Any changes will be
automatically applied to the object, but will not be automatically persisted. You must manually call
`db.session.add(ami)` on the object.
Args:
data (bunch): Data fetched from AWS API
Returns:
True if there were any changes to the object, else false
"""
updated = self.set_property('description', data.description)
updated |= self.set_property('state', data.state)
tags = {x['Key']: x['Value'] for x in data.tags or {}}
existing_tags = {x.key: x for x in self.tags}
# Check for new tags
for (key, value) in list(tags.items()):
updated |= self.set_tag(key, value) # depends on [control=['for'], data=[]]
# Check for updated or removed tags
for key in list(existing_tags.keys()):
if key not in tags:
updated |= self.delete_tag(key) # depends on [control=['if'], data=['key']] # depends on [control=['for'], data=['key']]
return updated |
def Write(self, schedule, output_file):
"""Writes out a feed as KML.
Args:
schedule: A transitfeed.Schedule object containing the feed to write.
output_file: The name of the output KML file, or file object to use.
"""
# Generate the DOM to write
root = ET.Element('kml')
root.attrib['xmlns'] = 'http://earth.google.com/kml/2.1'
doc = ET.SubElement(root, 'Document')
open_tag = ET.SubElement(doc, 'open')
open_tag.text = '1'
self._CreateStopsFolder(schedule, doc)
if self.split_routes:
route_types = set()
for route in schedule.GetRouteList():
route_types.add(route.route_type)
route_types = list(route_types)
route_types.sort()
for route_type in route_types:
self._CreateRoutesFolder(schedule, doc, route_type)
else:
self._CreateRoutesFolder(schedule, doc)
self._CreateShapesFolder(schedule, doc)
# Make sure we pretty-print
self._SetIndentation(root)
# Now write the output
if isinstance(output_file, file):
output = output_file
else:
output = open(output_file, 'w')
output.write("""<?xml version="1.0" encoding="UTF-8"?>\n""")
ET.ElementTree(root).write(output, 'utf-8') | def function[Write, parameter[self, schedule, output_file]]:
constant[Writes out a feed as KML.
Args:
schedule: A transitfeed.Schedule object containing the feed to write.
output_file: The name of the output KML file, or file object to use.
]
variable[root] assign[=] call[name[ET].Element, parameter[constant[kml]]]
call[name[root].attrib][constant[xmlns]] assign[=] constant[http://earth.google.com/kml/2.1]
variable[doc] assign[=] call[name[ET].SubElement, parameter[name[root], constant[Document]]]
variable[open_tag] assign[=] call[name[ET].SubElement, parameter[name[doc], constant[open]]]
name[open_tag].text assign[=] constant[1]
call[name[self]._CreateStopsFolder, parameter[name[schedule], name[doc]]]
if name[self].split_routes begin[:]
variable[route_types] assign[=] call[name[set], parameter[]]
for taget[name[route]] in starred[call[name[schedule].GetRouteList, parameter[]]] begin[:]
call[name[route_types].add, parameter[name[route].route_type]]
variable[route_types] assign[=] call[name[list], parameter[name[route_types]]]
call[name[route_types].sort, parameter[]]
for taget[name[route_type]] in starred[name[route_types]] begin[:]
call[name[self]._CreateRoutesFolder, parameter[name[schedule], name[doc], name[route_type]]]
call[name[self]._CreateShapesFolder, parameter[name[schedule], name[doc]]]
call[name[self]._SetIndentation, parameter[name[root]]]
if call[name[isinstance], parameter[name[output_file], name[file]]] begin[:]
variable[output] assign[=] name[output_file]
call[name[output].write, parameter[constant[<?xml version="1.0" encoding="UTF-8"?>
]]]
call[call[name[ET].ElementTree, parameter[name[root]]].write, parameter[name[output], constant[utf-8]]] | keyword[def] identifier[Write] ( identifier[self] , identifier[schedule] , identifier[output_file] ):
literal[string]
identifier[root] = identifier[ET] . identifier[Element] ( literal[string] )
identifier[root] . identifier[attrib] [ literal[string] ]= literal[string]
identifier[doc] = identifier[ET] . identifier[SubElement] ( identifier[root] , literal[string] )
identifier[open_tag] = identifier[ET] . identifier[SubElement] ( identifier[doc] , literal[string] )
identifier[open_tag] . identifier[text] = literal[string]
identifier[self] . identifier[_CreateStopsFolder] ( identifier[schedule] , identifier[doc] )
keyword[if] identifier[self] . identifier[split_routes] :
identifier[route_types] = identifier[set] ()
keyword[for] identifier[route] keyword[in] identifier[schedule] . identifier[GetRouteList] ():
identifier[route_types] . identifier[add] ( identifier[route] . identifier[route_type] )
identifier[route_types] = identifier[list] ( identifier[route_types] )
identifier[route_types] . identifier[sort] ()
keyword[for] identifier[route_type] keyword[in] identifier[route_types] :
identifier[self] . identifier[_CreateRoutesFolder] ( identifier[schedule] , identifier[doc] , identifier[route_type] )
keyword[else] :
identifier[self] . identifier[_CreateRoutesFolder] ( identifier[schedule] , identifier[doc] )
identifier[self] . identifier[_CreateShapesFolder] ( identifier[schedule] , identifier[doc] )
identifier[self] . identifier[_SetIndentation] ( identifier[root] )
keyword[if] identifier[isinstance] ( identifier[output_file] , identifier[file] ):
identifier[output] = identifier[output_file]
keyword[else] :
identifier[output] = identifier[open] ( identifier[output_file] , literal[string] )
identifier[output] . identifier[write] ( literal[string] )
identifier[ET] . identifier[ElementTree] ( identifier[root] ). identifier[write] ( identifier[output] , literal[string] ) | def Write(self, schedule, output_file):
"""Writes out a feed as KML.
Args:
schedule: A transitfeed.Schedule object containing the feed to write.
output_file: The name of the output KML file, or file object to use.
"""
# Generate the DOM to write
root = ET.Element('kml')
root.attrib['xmlns'] = 'http://earth.google.com/kml/2.1'
doc = ET.SubElement(root, 'Document')
open_tag = ET.SubElement(doc, 'open')
open_tag.text = '1'
self._CreateStopsFolder(schedule, doc)
if self.split_routes:
route_types = set()
for route in schedule.GetRouteList():
route_types.add(route.route_type) # depends on [control=['for'], data=['route']]
route_types = list(route_types)
route_types.sort()
for route_type in route_types:
self._CreateRoutesFolder(schedule, doc, route_type) # depends on [control=['for'], data=['route_type']] # depends on [control=['if'], data=[]]
else:
self._CreateRoutesFolder(schedule, doc)
self._CreateShapesFolder(schedule, doc)
# Make sure we pretty-print
self._SetIndentation(root)
# Now write the output
if isinstance(output_file, file):
output = output_file # depends on [control=['if'], data=[]]
else:
output = open(output_file, 'w')
output.write('<?xml version="1.0" encoding="UTF-8"?>\n')
ET.ElementTree(root).write(output, 'utf-8') |
def is_callable(instance, attribute, value):
"""Raises a :exc:`TypeError` if the value is not a callable."""
if not callable(value):
raise TypeError("'{}' must be callable".format(attribute.name)) | def function[is_callable, parameter[instance, attribute, value]]:
constant[Raises a :exc:`TypeError` if the value is not a callable.]
if <ast.UnaryOp object at 0x7da20e962590> begin[:]
<ast.Raise object at 0x7da20e960ac0> | keyword[def] identifier[is_callable] ( identifier[instance] , identifier[attribute] , identifier[value] ):
literal[string]
keyword[if] keyword[not] identifier[callable] ( identifier[value] ):
keyword[raise] identifier[TypeError] ( literal[string] . identifier[format] ( identifier[attribute] . identifier[name] )) | def is_callable(instance, attribute, value):
"""Raises a :exc:`TypeError` if the value is not a callable."""
if not callable(value):
raise TypeError("'{}' must be callable".format(attribute.name)) # depends on [control=['if'], data=[]] |
def get_auth_settings(self, account_id):
"""
Return the authentication settings for the passed account_id.
https://canvas.instructure.com/doc/api/authentication_providers.html#method.account_authorization_configs.show_sso_settings
"""
url = ACCOUNTS_API.format(account_id) + "/sso_settings"
return CanvasSSOSettings(data=self._get_resource(url)) | def function[get_auth_settings, parameter[self, account_id]]:
constant[
Return the authentication settings for the passed account_id.
https://canvas.instructure.com/doc/api/authentication_providers.html#method.account_authorization_configs.show_sso_settings
]
variable[url] assign[=] binary_operation[call[name[ACCOUNTS_API].format, parameter[name[account_id]]] + constant[/sso_settings]]
return[call[name[CanvasSSOSettings], parameter[]]] | keyword[def] identifier[get_auth_settings] ( identifier[self] , identifier[account_id] ):
literal[string]
identifier[url] = identifier[ACCOUNTS_API] . identifier[format] ( identifier[account_id] )+ literal[string]
keyword[return] identifier[CanvasSSOSettings] ( identifier[data] = identifier[self] . identifier[_get_resource] ( identifier[url] )) | def get_auth_settings(self, account_id):
"""
Return the authentication settings for the passed account_id.
https://canvas.instructure.com/doc/api/authentication_providers.html#method.account_authorization_configs.show_sso_settings
"""
url = ACCOUNTS_API.format(account_id) + '/sso_settings'
return CanvasSSOSettings(data=self._get_resource(url)) |
def _get_schema(self, tag):
"""Return the child schema for the given C{tag}.
@raises L{WSDLParseError}: If the tag doesn't belong to the schema.
"""
schema = self._schema.children.get(tag)
if not schema:
raise WSDLParseError("Unknown tag '%s'" % tag)
return schema | def function[_get_schema, parameter[self, tag]]:
constant[Return the child schema for the given C{tag}.
@raises L{WSDLParseError}: If the tag doesn't belong to the schema.
]
variable[schema] assign[=] call[name[self]._schema.children.get, parameter[name[tag]]]
if <ast.UnaryOp object at 0x7da18dc9ae30> begin[:]
<ast.Raise object at 0x7da18dc9ad70>
return[name[schema]] | keyword[def] identifier[_get_schema] ( identifier[self] , identifier[tag] ):
literal[string]
identifier[schema] = identifier[self] . identifier[_schema] . identifier[children] . identifier[get] ( identifier[tag] )
keyword[if] keyword[not] identifier[schema] :
keyword[raise] identifier[WSDLParseError] ( literal[string] % identifier[tag] )
keyword[return] identifier[schema] | def _get_schema(self, tag):
"""Return the child schema for the given C{tag}.
@raises L{WSDLParseError}: If the tag doesn't belong to the schema.
"""
schema = self._schema.children.get(tag)
if not schema:
raise WSDLParseError("Unknown tag '%s'" % tag) # depends on [control=['if'], data=[]]
return schema |
def validate_config(key: str, config: dict) -> None:
"""
Call jsonschema validation to raise JSONValidation on non-compliance or silently pass.
:param key: validation schema key of interest
:param config: configuration dict to validate
"""
try:
jsonschema.validate(config, CONFIG_JSON_SCHEMA[key])
except jsonschema.ValidationError as x_validation:
raise JSONValidation('JSON validation error on {} configuration: {}'.format(key, x_validation.message))
except jsonschema.SchemaError as x_schema:
raise JSONValidation('JSON schema error on {} specification: {}'.format(key, x_schema.message)) | def function[validate_config, parameter[key, config]]:
constant[
Call jsonschema validation to raise JSONValidation on non-compliance or silently pass.
:param key: validation schema key of interest
:param config: configuration dict to validate
]
<ast.Try object at 0x7da20cabf730> | keyword[def] identifier[validate_config] ( identifier[key] : identifier[str] , identifier[config] : identifier[dict] )-> keyword[None] :
literal[string]
keyword[try] :
identifier[jsonschema] . identifier[validate] ( identifier[config] , identifier[CONFIG_JSON_SCHEMA] [ identifier[key] ])
keyword[except] identifier[jsonschema] . identifier[ValidationError] keyword[as] identifier[x_validation] :
keyword[raise] identifier[JSONValidation] ( literal[string] . identifier[format] ( identifier[key] , identifier[x_validation] . identifier[message] ))
keyword[except] identifier[jsonschema] . identifier[SchemaError] keyword[as] identifier[x_schema] :
keyword[raise] identifier[JSONValidation] ( literal[string] . identifier[format] ( identifier[key] , identifier[x_schema] . identifier[message] )) | def validate_config(key: str, config: dict) -> None:
"""
Call jsonschema validation to raise JSONValidation on non-compliance or silently pass.
:param key: validation schema key of interest
:param config: configuration dict to validate
"""
try:
jsonschema.validate(config, CONFIG_JSON_SCHEMA[key]) # depends on [control=['try'], data=[]]
except jsonschema.ValidationError as x_validation:
raise JSONValidation('JSON validation error on {} configuration: {}'.format(key, x_validation.message)) # depends on [control=['except'], data=['x_validation']]
except jsonschema.SchemaError as x_schema:
raise JSONValidation('JSON schema error on {} specification: {}'.format(key, x_schema.message)) # depends on [control=['except'], data=['x_schema']] |
def GOE(N):
"""Creates an NxN element of the Gaussian Orthogonal Ensemble"""
m = ra.standard_normal((N,N))
m += m.T
return m/2 | def function[GOE, parameter[N]]:
constant[Creates an NxN element of the Gaussian Orthogonal Ensemble]
variable[m] assign[=] call[name[ra].standard_normal, parameter[tuple[[<ast.Name object at 0x7da2041d96f0>, <ast.Name object at 0x7da2041d99c0>]]]]
<ast.AugAssign object at 0x7da2041dad40>
return[binary_operation[name[m] / constant[2]]] | keyword[def] identifier[GOE] ( identifier[N] ):
literal[string]
identifier[m] = identifier[ra] . identifier[standard_normal] (( identifier[N] , identifier[N] ))
identifier[m] += identifier[m] . identifier[T]
keyword[return] identifier[m] / literal[int] | def GOE(N):
"""Creates an NxN element of the Gaussian Orthogonal Ensemble"""
m = ra.standard_normal((N, N))
m += m.T
return m / 2 |
def Brokaw(T, ys, mus, MWs, molecular_diameters, Stockmayers):
r'''Calculates viscosity of a gas mixture according to
mixing rules in [1]_.
.. math::
\eta_{mix} = \sum_{i=1}^n \frac{y_i \eta_i}{\sum_{j=1}^n y_j \phi_{ij}}
\phi_{ij} = \left( \frac{\eta_i}{\eta_j} \right)^{0.5} S_{ij} A_{ij}
A_{ij} = m_{ij} M_{ij}^{-0.5} \left[1 +
\frac{M_{ij} - M_{ij}^{0.45}}
{2(1+M_{ij}) + \frac{(1 + M_{ij}^{0.45}) m_{ij}^{-0.5}}{1 + m_{ij}}} \right]
m_{ij} = \left[ \frac{4}{(1+M_{ij}^{-1})(1+M_{ij})}\right]^{0.25}
M_{ij} = \frac{M_i}{M_j}
S_{ij} = \frac{1 + (T_i^* T_j^*)^{0.5} + (\delta_i \delta_j/4)}
{[1+T_i^* + (\delta_i^2/4)]^{0.5}[1+T_j^*+(\delta_j^2/4)]^{0.5}}
T^* = kT/\epsilon
Parameters
----------
T : float
Temperature of fluid, [K]
ys : float
Mole fractions of gas components
mus : float
Gas viscosities of all components, [Pa*S]
MWs : float
Molecular weights of all components, [g/mol]
molecular_diameters : float
L-J molecular diameter of all components, [angstroms]
Stockmayers : float
L-J Stockmayer energy parameters of all components, []
Returns
-------
mug : float
Viscosity of gas mixture, [Pa*S]
Notes
-----
This equation is entirely dimensionless; all dimensions cancel.
The original source has not been reviewed.
This is DIPPR Procedure 8D: Method for the Viscosity of Nonhydrocarbon
Vapor Mixtures at Low Pressure (Polar and Nonpolar)
Examples
--------
>>> Brokaw(308.2, [0.05, 0.95], [1.34E-5, 9.5029E-6], [64.06, 46.07], [0.42, 0.19], [347, 432])
9.699085099801568e-06
References
----------
.. [1] Brokaw, R. S. "Predicting Transport Properties of Dilute Gases."
Industrial & Engineering Chemistry Process Design and Development
8, no. 2 (April 1, 1969): 240-53. doi:10.1021/i260030a015.
.. [2] Brokaw, R. S. Viscosity of Gas Mixtures, NASA-TN-D-4496, 1968.
.. [3] Danner, Ronald P, and Design Institute for Physical Property Data.
Manual for Predicting Chemical Process Design Data. New York, N.Y, 1982.
'''
cmps = range(len(ys))
MDs = molecular_diameters
if not none_and_length_check([ys, mus, MWs, molecular_diameters, Stockmayers]): # check same-length inputs
raise Exception('Function inputs are incorrect format')
Tsts = [T/Stockmayer_i for Stockmayer_i in Stockmayers]
Sij = [[0 for i in cmps] for j in cmps]
Mij = [[0 for i in cmps] for j in cmps]
mij = [[0 for i in cmps] for j in cmps]
Aij = [[0 for i in cmps] for j in cmps]
phiij =[[0 for i in cmps] for j in cmps]
for i in cmps:
for j in cmps:
Sij[i][j] = (1+(Tsts[i]*Tsts[j])**0.5 + (MDs[i]*MDs[j])/4.)/(1 + Tsts[i] + (MDs[i]**2/4.))**0.5/(1 + Tsts[j] + (MDs[j]**2/4.))**0.5
if MDs[i] <= 0.1 and MDs[j] <= 0.1:
Sij[i][j] = 1
Mij[i][j] = MWs[i]/MWs[j]
mij[i][j] = (4./(1+Mij[i][j]**-1)/(1+Mij[i][j]))**0.25
Aij[i][j] = mij[i][j]*Mij[i][j]**-0.5*(1 + (Mij[i][j]-Mij[i][j]**0.45)/(2*(1+Mij[i][j]) + (1+Mij[i][j]**0.45)*mij[i][j]**-0.5/(1+mij[i][j])))
phiij[i][j] = (mus[i]/mus[j])**0.5*Sij[i][j]*Aij[i][j]
return sum([ys[i]*mus[i]/sum([ys[j]*phiij[i][j] for j in cmps]) for i in cmps]) | def function[Brokaw, parameter[T, ys, mus, MWs, molecular_diameters, Stockmayers]]:
constant[Calculates viscosity of a gas mixture according to
mixing rules in [1]_.
.. math::
\eta_{mix} = \sum_{i=1}^n \frac{y_i \eta_i}{\sum_{j=1}^n y_j \phi_{ij}}
\phi_{ij} = \left( \frac{\eta_i}{\eta_j} \right)^{0.5} S_{ij} A_{ij}
A_{ij} = m_{ij} M_{ij}^{-0.5} \left[1 +
\frac{M_{ij} - M_{ij}^{0.45}}
{2(1+M_{ij}) + \frac{(1 + M_{ij}^{0.45}) m_{ij}^{-0.5}}{1 + m_{ij}}} \right]
m_{ij} = \left[ \frac{4}{(1+M_{ij}^{-1})(1+M_{ij})}\right]^{0.25}
M_{ij} = \frac{M_i}{M_j}
S_{ij} = \frac{1 + (T_i^* T_j^*)^{0.5} + (\delta_i \delta_j/4)}
{[1+T_i^* + (\delta_i^2/4)]^{0.5}[1+T_j^*+(\delta_j^2/4)]^{0.5}}
T^* = kT/\epsilon
Parameters
----------
T : float
Temperature of fluid, [K]
ys : float
Mole fractions of gas components
mus : float
Gas viscosities of all components, [Pa*S]
MWs : float
Molecular weights of all components, [g/mol]
molecular_diameters : float
L-J molecular diameter of all components, [angstroms]
Stockmayers : float
L-J Stockmayer energy parameters of all components, []
Returns
-------
mug : float
Viscosity of gas mixture, [Pa*S]
Notes
-----
This equation is entirely dimensionless; all dimensions cancel.
The original source has not been reviewed.
This is DIPPR Procedure 8D: Method for the Viscosity of Nonhydrocarbon
Vapor Mixtures at Low Pressure (Polar and Nonpolar)
Examples
--------
>>> Brokaw(308.2, [0.05, 0.95], [1.34E-5, 9.5029E-6], [64.06, 46.07], [0.42, 0.19], [347, 432])
9.699085099801568e-06
References
----------
.. [1] Brokaw, R. S. "Predicting Transport Properties of Dilute Gases."
Industrial & Engineering Chemistry Process Design and Development
8, no. 2 (April 1, 1969): 240-53. doi:10.1021/i260030a015.
.. [2] Brokaw, R. S. Viscosity of Gas Mixtures, NASA-TN-D-4496, 1968.
.. [3] Danner, Ronald P, and Design Institute for Physical Property Data.
Manual for Predicting Chemical Process Design Data. New York, N.Y, 1982.
]
variable[cmps] assign[=] call[name[range], parameter[call[name[len], parameter[name[ys]]]]]
variable[MDs] assign[=] name[molecular_diameters]
if <ast.UnaryOp object at 0x7da18f00e1d0> begin[:]
<ast.Raise object at 0x7da18f00d720>
variable[Tsts] assign[=] <ast.ListComp object at 0x7da18f00e2c0>
variable[Sij] assign[=] <ast.ListComp object at 0x7da18f00d6f0>
variable[Mij] assign[=] <ast.ListComp object at 0x7da20e963cd0>
variable[mij] assign[=] <ast.ListComp object at 0x7da20e963d30>
variable[Aij] assign[=] <ast.ListComp object at 0x7da20e9603a0>
variable[phiij] assign[=] <ast.ListComp object at 0x7da20e963a30>
for taget[name[i]] in starred[name[cmps]] begin[:]
for taget[name[j]] in starred[name[cmps]] begin[:]
call[call[name[Sij]][name[i]]][name[j]] assign[=] binary_operation[binary_operation[binary_operation[binary_operation[constant[1] + binary_operation[binary_operation[call[name[Tsts]][name[i]] * call[name[Tsts]][name[j]]] ** constant[0.5]]] + binary_operation[binary_operation[call[name[MDs]][name[i]] * call[name[MDs]][name[j]]] / constant[4.0]]] / binary_operation[binary_operation[binary_operation[constant[1] + call[name[Tsts]][name[i]]] + binary_operation[binary_operation[call[name[MDs]][name[i]] ** constant[2]] / constant[4.0]]] ** constant[0.5]]] / binary_operation[binary_operation[binary_operation[constant[1] + call[name[Tsts]][name[j]]] + binary_operation[binary_operation[call[name[MDs]][name[j]] ** constant[2]] / constant[4.0]]] ** constant[0.5]]]
if <ast.BoolOp object at 0x7da20e960be0> begin[:]
call[call[name[Sij]][name[i]]][name[j]] assign[=] constant[1]
call[call[name[Mij]][name[i]]][name[j]] assign[=] binary_operation[call[name[MWs]][name[i]] / call[name[MWs]][name[j]]]
call[call[name[mij]][name[i]]][name[j]] assign[=] binary_operation[binary_operation[binary_operation[constant[4.0] / binary_operation[constant[1] + binary_operation[call[call[name[Mij]][name[i]]][name[j]] ** <ast.UnaryOp object at 0x7da18c4ceb00>]]] / binary_operation[constant[1] + call[call[name[Mij]][name[i]]][name[j]]]] ** constant[0.25]]
call[call[name[Aij]][name[i]]][name[j]] assign[=] binary_operation[binary_operation[call[call[name[mij]][name[i]]][name[j]] * binary_operation[call[call[name[Mij]][name[i]]][name[j]] ** <ast.UnaryOp object at 0x7da18c4ccdc0>]] * binary_operation[constant[1] + binary_operation[binary_operation[call[call[name[Mij]][name[i]]][name[j]] - binary_operation[call[call[name[Mij]][name[i]]][name[j]] ** constant[0.45]]] / binary_operation[binary_operation[constant[2] * binary_operation[constant[1] + call[call[name[Mij]][name[i]]][name[j]]]] + binary_operation[binary_operation[binary_operation[constant[1] + binary_operation[call[call[name[Mij]][name[i]]][name[j]] ** constant[0.45]]] * binary_operation[call[call[name[mij]][name[i]]][name[j]] ** <ast.UnaryOp object at 0x7da18c4ce770>]] / binary_operation[constant[1] + call[call[name[mij]][name[i]]][name[j]]]]]]]]
call[call[name[phiij]][name[i]]][name[j]] assign[=] binary_operation[binary_operation[binary_operation[binary_operation[call[name[mus]][name[i]] / call[name[mus]][name[j]]] ** constant[0.5]] * call[call[name[Sij]][name[i]]][name[j]]] * call[call[name[Aij]][name[i]]][name[j]]]
return[call[name[sum], parameter[<ast.ListComp object at 0x7da18c4ce3e0>]]] | keyword[def] identifier[Brokaw] ( identifier[T] , identifier[ys] , identifier[mus] , identifier[MWs] , identifier[molecular_diameters] , identifier[Stockmayers] ):
literal[string]
identifier[cmps] = identifier[range] ( identifier[len] ( identifier[ys] ))
identifier[MDs] = identifier[molecular_diameters]
keyword[if] keyword[not] identifier[none_and_length_check] ([ identifier[ys] , identifier[mus] , identifier[MWs] , identifier[molecular_diameters] , identifier[Stockmayers] ]):
keyword[raise] identifier[Exception] ( literal[string] )
identifier[Tsts] =[ identifier[T] / identifier[Stockmayer_i] keyword[for] identifier[Stockmayer_i] keyword[in] identifier[Stockmayers] ]
identifier[Sij] =[[ literal[int] keyword[for] identifier[i] keyword[in] identifier[cmps] ] keyword[for] identifier[j] keyword[in] identifier[cmps] ]
identifier[Mij] =[[ literal[int] keyword[for] identifier[i] keyword[in] identifier[cmps] ] keyword[for] identifier[j] keyword[in] identifier[cmps] ]
identifier[mij] =[[ literal[int] keyword[for] identifier[i] keyword[in] identifier[cmps] ] keyword[for] identifier[j] keyword[in] identifier[cmps] ]
identifier[Aij] =[[ literal[int] keyword[for] identifier[i] keyword[in] identifier[cmps] ] keyword[for] identifier[j] keyword[in] identifier[cmps] ]
identifier[phiij] =[[ literal[int] keyword[for] identifier[i] keyword[in] identifier[cmps] ] keyword[for] identifier[j] keyword[in] identifier[cmps] ]
keyword[for] identifier[i] keyword[in] identifier[cmps] :
keyword[for] identifier[j] keyword[in] identifier[cmps] :
identifier[Sij] [ identifier[i] ][ identifier[j] ]=( literal[int] +( identifier[Tsts] [ identifier[i] ]* identifier[Tsts] [ identifier[j] ])** literal[int] +( identifier[MDs] [ identifier[i] ]* identifier[MDs] [ identifier[j] ])/ literal[int] )/( literal[int] + identifier[Tsts] [ identifier[i] ]+( identifier[MDs] [ identifier[i] ]** literal[int] / literal[int] ))** literal[int] /( literal[int] + identifier[Tsts] [ identifier[j] ]+( identifier[MDs] [ identifier[j] ]** literal[int] / literal[int] ))** literal[int]
keyword[if] identifier[MDs] [ identifier[i] ]<= literal[int] keyword[and] identifier[MDs] [ identifier[j] ]<= literal[int] :
identifier[Sij] [ identifier[i] ][ identifier[j] ]= literal[int]
identifier[Mij] [ identifier[i] ][ identifier[j] ]= identifier[MWs] [ identifier[i] ]/ identifier[MWs] [ identifier[j] ]
identifier[mij] [ identifier[i] ][ identifier[j] ]=( literal[int] /( literal[int] + identifier[Mij] [ identifier[i] ][ identifier[j] ]**- literal[int] )/( literal[int] + identifier[Mij] [ identifier[i] ][ identifier[j] ]))** literal[int]
identifier[Aij] [ identifier[i] ][ identifier[j] ]= identifier[mij] [ identifier[i] ][ identifier[j] ]* identifier[Mij] [ identifier[i] ][ identifier[j] ]**- literal[int] *( literal[int] +( identifier[Mij] [ identifier[i] ][ identifier[j] ]- identifier[Mij] [ identifier[i] ][ identifier[j] ]** literal[int] )/( literal[int] *( literal[int] + identifier[Mij] [ identifier[i] ][ identifier[j] ])+( literal[int] + identifier[Mij] [ identifier[i] ][ identifier[j] ]** literal[int] )* identifier[mij] [ identifier[i] ][ identifier[j] ]**- literal[int] /( literal[int] + identifier[mij] [ identifier[i] ][ identifier[j] ])))
identifier[phiij] [ identifier[i] ][ identifier[j] ]=( identifier[mus] [ identifier[i] ]/ identifier[mus] [ identifier[j] ])** literal[int] * identifier[Sij] [ identifier[i] ][ identifier[j] ]* identifier[Aij] [ identifier[i] ][ identifier[j] ]
keyword[return] identifier[sum] ([ identifier[ys] [ identifier[i] ]* identifier[mus] [ identifier[i] ]/ identifier[sum] ([ identifier[ys] [ identifier[j] ]* identifier[phiij] [ identifier[i] ][ identifier[j] ] keyword[for] identifier[j] keyword[in] identifier[cmps] ]) keyword[for] identifier[i] keyword[in] identifier[cmps] ]) | def Brokaw(T, ys, mus, MWs, molecular_diameters, Stockmayers):
"""Calculates viscosity of a gas mixture according to
mixing rules in [1]_.
.. math::
\\eta_{mix} = \\sum_{i=1}^n \\frac{y_i \\eta_i}{\\sum_{j=1}^n y_j \\phi_{ij}}
\\phi_{ij} = \\left( \\frac{\\eta_i}{\\eta_j} \\right)^{0.5} S_{ij} A_{ij}
A_{ij} = m_{ij} M_{ij}^{-0.5} \\left[1 +
\\frac{M_{ij} - M_{ij}^{0.45}}
{2(1+M_{ij}) + \\frac{(1 + M_{ij}^{0.45}) m_{ij}^{-0.5}}{1 + m_{ij}}} \\right]
m_{ij} = \\left[ \\frac{4}{(1+M_{ij}^{-1})(1+M_{ij})}\\right]^{0.25}
M_{ij} = \\frac{M_i}{M_j}
S_{ij} = \\frac{1 + (T_i^* T_j^*)^{0.5} + (\\delta_i \\delta_j/4)}
{[1+T_i^* + (\\delta_i^2/4)]^{0.5}[1+T_j^*+(\\delta_j^2/4)]^{0.5}}
T^* = kT/\\epsilon
Parameters
----------
T : float
Temperature of fluid, [K]
ys : float
Mole fractions of gas components
mus : float
Gas viscosities of all components, [Pa*S]
MWs : float
Molecular weights of all components, [g/mol]
molecular_diameters : float
L-J molecular diameter of all components, [angstroms]
Stockmayers : float
L-J Stockmayer energy parameters of all components, []
Returns
-------
mug : float
Viscosity of gas mixture, [Pa*S]
Notes
-----
This equation is entirely dimensionless; all dimensions cancel.
The original source has not been reviewed.
This is DIPPR Procedure 8D: Method for the Viscosity of Nonhydrocarbon
Vapor Mixtures at Low Pressure (Polar and Nonpolar)
Examples
--------
>>> Brokaw(308.2, [0.05, 0.95], [1.34E-5, 9.5029E-6], [64.06, 46.07], [0.42, 0.19], [347, 432])
9.699085099801568e-06
References
----------
.. [1] Brokaw, R. S. "Predicting Transport Properties of Dilute Gases."
Industrial & Engineering Chemistry Process Design and Development
8, no. 2 (April 1, 1969): 240-53. doi:10.1021/i260030a015.
.. [2] Brokaw, R. S. Viscosity of Gas Mixtures, NASA-TN-D-4496, 1968.
.. [3] Danner, Ronald P, and Design Institute for Physical Property Data.
Manual for Predicting Chemical Process Design Data. New York, N.Y, 1982.
"""
cmps = range(len(ys))
MDs = molecular_diameters
if not none_and_length_check([ys, mus, MWs, molecular_diameters, Stockmayers]): # check same-length inputs
raise Exception('Function inputs are incorrect format') # depends on [control=['if'], data=[]]
Tsts = [T / Stockmayer_i for Stockmayer_i in Stockmayers]
Sij = [[0 for i in cmps] for j in cmps]
Mij = [[0 for i in cmps] for j in cmps]
mij = [[0 for i in cmps] for j in cmps]
Aij = [[0 for i in cmps] for j in cmps]
phiij = [[0 for i in cmps] for j in cmps]
for i in cmps:
for j in cmps:
Sij[i][j] = (1 + (Tsts[i] * Tsts[j]) ** 0.5 + MDs[i] * MDs[j] / 4.0) / (1 + Tsts[i] + MDs[i] ** 2 / 4.0) ** 0.5 / (1 + Tsts[j] + MDs[j] ** 2 / 4.0) ** 0.5
if MDs[i] <= 0.1 and MDs[j] <= 0.1:
Sij[i][j] = 1 # depends on [control=['if'], data=[]]
Mij[i][j] = MWs[i] / MWs[j]
mij[i][j] = (4.0 / (1 + Mij[i][j] ** (-1)) / (1 + Mij[i][j])) ** 0.25
Aij[i][j] = mij[i][j] * Mij[i][j] ** (-0.5) * (1 + (Mij[i][j] - Mij[i][j] ** 0.45) / (2 * (1 + Mij[i][j]) + (1 + Mij[i][j] ** 0.45) * mij[i][j] ** (-0.5) / (1 + mij[i][j])))
phiij[i][j] = (mus[i] / mus[j]) ** 0.5 * Sij[i][j] * Aij[i][j] # depends on [control=['for'], data=['j']] # depends on [control=['for'], data=['i']]
return sum([ys[i] * mus[i] / sum([ys[j] * phiij[i][j] for j in cmps]) for i in cmps]) |
def all(self):
r"""Returns all content in this expression, regardless of whitespace or
not. This includes all LaTeX needed to reconstruct the original source.
>>> expr1 = TexExpr('textbf', ('\n', 'hi'))
>>> expr2 = TexExpr('textbf', ('\n', 'hi'), preserve_whitespace=True)
>>> list(expr1.all) == list(expr2.all)
True
"""
for arg in self.args:
for expr in arg:
yield expr
for content in self._contents:
yield content | def function[all, parameter[self]]:
constant[Returns all content in this expression, regardless of whitespace or
not. This includes all LaTeX needed to reconstruct the original source.
>>> expr1 = TexExpr('textbf', ('\n', 'hi'))
>>> expr2 = TexExpr('textbf', ('\n', 'hi'), preserve_whitespace=True)
>>> list(expr1.all) == list(expr2.all)
True
]
for taget[name[arg]] in starred[name[self].args] begin[:]
for taget[name[expr]] in starred[name[arg]] begin[:]
<ast.Yield object at 0x7da1b0604100>
for taget[name[content]] in starred[name[self]._contents] begin[:]
<ast.Yield object at 0x7da1b06055d0> | keyword[def] identifier[all] ( identifier[self] ):
literal[string]
keyword[for] identifier[arg] keyword[in] identifier[self] . identifier[args] :
keyword[for] identifier[expr] keyword[in] identifier[arg] :
keyword[yield] identifier[expr]
keyword[for] identifier[content] keyword[in] identifier[self] . identifier[_contents] :
keyword[yield] identifier[content] | def all(self):
"""Returns all content in this expression, regardless of whitespace or
not. This includes all LaTeX needed to reconstruct the original source.
>>> expr1 = TexExpr('textbf', ('\\n', 'hi'))
>>> expr2 = TexExpr('textbf', ('\\n', 'hi'), preserve_whitespace=True)
>>> list(expr1.all) == list(expr2.all)
True
"""
for arg in self.args:
for expr in arg:
yield expr # depends on [control=['for'], data=['expr']] # depends on [control=['for'], data=['arg']]
for content in self._contents:
yield content # depends on [control=['for'], data=['content']] |
def render_json(self):
'Returns a JSON summary of the validation operation.'
types = {0: 'unknown',
1: 'extension',
2: 'theme',
3: 'dictionary',
4: 'langpack',
5: 'search',
8: 'webapp'}
output = {'detected_type': types[self.detected_type],
'ending_tier': self.ending_tier,
'success': not self.failed(),
'messages': [],
'errors': len(self.errors),
'warnings': len(self.warnings),
'notices': len(self.notices),
'message_tree': self.message_tree,
'compatibility_summary': self.compat_summary,
'signing_summary': self.signing_summary,
'metadata': self.metadata}
messages = output['messages']
# Copy messages to the JSON output
for error in self.errors:
error['type'] = 'error'
messages.append(error)
for warning in self.warnings:
warning['type'] = 'warning'
messages.append(warning)
for notice in self.notices:
notice['type'] = 'notice'
messages.append(notice)
# Output the JSON.
return json.dumps(output) | def function[render_json, parameter[self]]:
constant[Returns a JSON summary of the validation operation.]
variable[types] assign[=] dictionary[[<ast.Constant object at 0x7da20e954fa0>, <ast.Constant object at 0x7da20e954610>, <ast.Constant object at 0x7da20e955390>, <ast.Constant object at 0x7da20e9578b0>, <ast.Constant object at 0x7da20e956a70>, <ast.Constant object at 0x7da20e955360>, <ast.Constant object at 0x7da20e9557e0>], [<ast.Constant object at 0x7da20e9559c0>, <ast.Constant object at 0x7da20e954e80>, <ast.Constant object at 0x7da20e956650>, <ast.Constant object at 0x7da20e9557b0>, <ast.Constant object at 0x7da20e954040>, <ast.Constant object at 0x7da20e954400>, <ast.Constant object at 0x7da20e956620>]]
variable[output] assign[=] dictionary[[<ast.Constant object at 0x7da20e956f80>, <ast.Constant object at 0x7da20e956140>, <ast.Constant object at 0x7da20e957d90>, <ast.Constant object at 0x7da20e956890>, <ast.Constant object at 0x7da20e9543a0>, <ast.Constant object at 0x7da20e957d30>, <ast.Constant object at 0x7da20e957100>, <ast.Constant object at 0x7da20e954490>, <ast.Constant object at 0x7da20e9547f0>, <ast.Constant object at 0x7da20e954100>, <ast.Constant object at 0x7da20e9572e0>], [<ast.Subscript object at 0x7da20e954190>, <ast.Attribute object at 0x7da20e957670>, <ast.UnaryOp object at 0x7da20e9554b0>, <ast.List object at 0x7da20e9562f0>, <ast.Call object at 0x7da20e956cb0>, <ast.Call object at 0x7da20e9552a0>, <ast.Call object at 0x7da20e954b20>, <ast.Attribute object at 0x7da20e956260>, <ast.Attribute object at 0x7da20e957a00>, <ast.Attribute object at 0x7da20e9541c0>, <ast.Attribute object at 0x7da20e956590>]]
variable[messages] assign[=] call[name[output]][constant[messages]]
for taget[name[error]] in starred[name[self].errors] begin[:]
call[name[error]][constant[type]] assign[=] constant[error]
call[name[messages].append, parameter[name[error]]]
for taget[name[warning]] in starred[name[self].warnings] begin[:]
call[name[warning]][constant[type]] assign[=] constant[warning]
call[name[messages].append, parameter[name[warning]]]
for taget[name[notice]] in starred[name[self].notices] begin[:]
call[name[notice]][constant[type]] assign[=] constant[notice]
call[name[messages].append, parameter[name[notice]]]
return[call[name[json].dumps, parameter[name[output]]]] | keyword[def] identifier[render_json] ( identifier[self] ):
literal[string]
identifier[types] ={ literal[int] : literal[string] ,
literal[int] : literal[string] ,
literal[int] : literal[string] ,
literal[int] : literal[string] ,
literal[int] : literal[string] ,
literal[int] : literal[string] ,
literal[int] : literal[string] }
identifier[output] ={ literal[string] : identifier[types] [ identifier[self] . identifier[detected_type] ],
literal[string] : identifier[self] . identifier[ending_tier] ,
literal[string] : keyword[not] identifier[self] . identifier[failed] (),
literal[string] :[],
literal[string] : identifier[len] ( identifier[self] . identifier[errors] ),
literal[string] : identifier[len] ( identifier[self] . identifier[warnings] ),
literal[string] : identifier[len] ( identifier[self] . identifier[notices] ),
literal[string] : identifier[self] . identifier[message_tree] ,
literal[string] : identifier[self] . identifier[compat_summary] ,
literal[string] : identifier[self] . identifier[signing_summary] ,
literal[string] : identifier[self] . identifier[metadata] }
identifier[messages] = identifier[output] [ literal[string] ]
keyword[for] identifier[error] keyword[in] identifier[self] . identifier[errors] :
identifier[error] [ literal[string] ]= literal[string]
identifier[messages] . identifier[append] ( identifier[error] )
keyword[for] identifier[warning] keyword[in] identifier[self] . identifier[warnings] :
identifier[warning] [ literal[string] ]= literal[string]
identifier[messages] . identifier[append] ( identifier[warning] )
keyword[for] identifier[notice] keyword[in] identifier[self] . identifier[notices] :
identifier[notice] [ literal[string] ]= literal[string]
identifier[messages] . identifier[append] ( identifier[notice] )
keyword[return] identifier[json] . identifier[dumps] ( identifier[output] ) | def render_json(self):
"""Returns a JSON summary of the validation operation."""
types = {0: 'unknown', 1: 'extension', 2: 'theme', 3: 'dictionary', 4: 'langpack', 5: 'search', 8: 'webapp'}
output = {'detected_type': types[self.detected_type], 'ending_tier': self.ending_tier, 'success': not self.failed(), 'messages': [], 'errors': len(self.errors), 'warnings': len(self.warnings), 'notices': len(self.notices), 'message_tree': self.message_tree, 'compatibility_summary': self.compat_summary, 'signing_summary': self.signing_summary, 'metadata': self.metadata}
messages = output['messages']
# Copy messages to the JSON output
for error in self.errors:
error['type'] = 'error'
messages.append(error) # depends on [control=['for'], data=['error']]
for warning in self.warnings:
warning['type'] = 'warning'
messages.append(warning) # depends on [control=['for'], data=['warning']]
for notice in self.notices:
notice['type'] = 'notice'
messages.append(notice) # depends on [control=['for'], data=['notice']]
# Output the JSON.
return json.dumps(output) |
def _perform_file_action(self, filename, action):
"""Perform action on specific file types.
Dynamic dispatch function for performing actions on
specific file types.
"""
ext = os.path.splitext(filename)[1]
try:
func = getattr(self, '_{}_{}'.format(action, ext[1:]))
except AttributeError:
raise Exception('Unsupported file type {}'.format(ext[1:]))
func(filename) | def function[_perform_file_action, parameter[self, filename, action]]:
constant[Perform action on specific file types.
Dynamic dispatch function for performing actions on
specific file types.
]
variable[ext] assign[=] call[call[name[os].path.splitext, parameter[name[filename]]]][constant[1]]
<ast.Try object at 0x7da20cabc400>
call[name[func], parameter[name[filename]]] | keyword[def] identifier[_perform_file_action] ( identifier[self] , identifier[filename] , identifier[action] ):
literal[string]
identifier[ext] = identifier[os] . identifier[path] . identifier[splitext] ( identifier[filename] )[ literal[int] ]
keyword[try] :
identifier[func] = identifier[getattr] ( identifier[self] , literal[string] . identifier[format] ( identifier[action] , identifier[ext] [ literal[int] :]))
keyword[except] identifier[AttributeError] :
keyword[raise] identifier[Exception] ( literal[string] . identifier[format] ( identifier[ext] [ literal[int] :]))
identifier[func] ( identifier[filename] ) | def _perform_file_action(self, filename, action):
"""Perform action on specific file types.
Dynamic dispatch function for performing actions on
specific file types.
"""
ext = os.path.splitext(filename)[1]
try:
func = getattr(self, '_{}_{}'.format(action, ext[1:])) # depends on [control=['try'], data=[]]
except AttributeError:
raise Exception('Unsupported file type {}'.format(ext[1:])) # depends on [control=['except'], data=[]]
func(filename) |
def _construct_arrow_tip(self, pos):
"""returns arrow tip as (width, widget)"""
arrow_tip = None
width = 0
if self._arrow_tip_char:
txt = urwid.Text(self._arrow_tip_char)
arrow_tip = urwid.AttrMap(
txt, self._arrow_tip_att or self._arrow_att)
width = len(self._arrow_tip_char)
return width, arrow_tip | def function[_construct_arrow_tip, parameter[self, pos]]:
constant[returns arrow tip as (width, widget)]
variable[arrow_tip] assign[=] constant[None]
variable[width] assign[=] constant[0]
if name[self]._arrow_tip_char begin[:]
variable[txt] assign[=] call[name[urwid].Text, parameter[name[self]._arrow_tip_char]]
variable[arrow_tip] assign[=] call[name[urwid].AttrMap, parameter[name[txt], <ast.BoolOp object at 0x7da20e960700>]]
variable[width] assign[=] call[name[len], parameter[name[self]._arrow_tip_char]]
return[tuple[[<ast.Name object at 0x7da2041da380>, <ast.Name object at 0x7da2041d8640>]]] | keyword[def] identifier[_construct_arrow_tip] ( identifier[self] , identifier[pos] ):
literal[string]
identifier[arrow_tip] = keyword[None]
identifier[width] = literal[int]
keyword[if] identifier[self] . identifier[_arrow_tip_char] :
identifier[txt] = identifier[urwid] . identifier[Text] ( identifier[self] . identifier[_arrow_tip_char] )
identifier[arrow_tip] = identifier[urwid] . identifier[AttrMap] (
identifier[txt] , identifier[self] . identifier[_arrow_tip_att] keyword[or] identifier[self] . identifier[_arrow_att] )
identifier[width] = identifier[len] ( identifier[self] . identifier[_arrow_tip_char] )
keyword[return] identifier[width] , identifier[arrow_tip] | def _construct_arrow_tip(self, pos):
"""returns arrow tip as (width, widget)"""
arrow_tip = None
width = 0
if self._arrow_tip_char:
txt = urwid.Text(self._arrow_tip_char)
arrow_tip = urwid.AttrMap(txt, self._arrow_tip_att or self._arrow_att)
width = len(self._arrow_tip_char) # depends on [control=['if'], data=[]]
return (width, arrow_tip) |
def to_html(self, **kwargs):
"""Render as html
Args:
None
Returns:
Str the html representation
Raises:
Errors are propagated
We pass the kwargs on to the base class so an exception is raised
if invalid keywords were passed. See:
http://stackoverflow.com/questions/13124961/
how-to-pass-arguments-efficiently-kwargs-in-python
"""
super(LineBreak, self).__init__(**kwargs)
return '<br%s/>\n' % self.html_attributes() | def function[to_html, parameter[self]]:
constant[Render as html
Args:
None
Returns:
Str the html representation
Raises:
Errors are propagated
We pass the kwargs on to the base class so an exception is raised
if invalid keywords were passed. See:
http://stackoverflow.com/questions/13124961/
how-to-pass-arguments-efficiently-kwargs-in-python
]
call[call[name[super], parameter[name[LineBreak], name[self]]].__init__, parameter[]]
return[binary_operation[constant[<br%s/>
] <ast.Mod object at 0x7da2590d6920> call[name[self].html_attributes, parameter[]]]] | keyword[def] identifier[to_html] ( identifier[self] ,** identifier[kwargs] ):
literal[string]
identifier[super] ( identifier[LineBreak] , identifier[self] ). identifier[__init__] (** identifier[kwargs] )
keyword[return] literal[string] % identifier[self] . identifier[html_attributes] () | def to_html(self, **kwargs):
"""Render as html
Args:
None
Returns:
Str the html representation
Raises:
Errors are propagated
We pass the kwargs on to the base class so an exception is raised
if invalid keywords were passed. See:
http://stackoverflow.com/questions/13124961/
how-to-pass-arguments-efficiently-kwargs-in-python
"""
super(LineBreak, self).__init__(**kwargs)
return '<br%s/>\n' % self.html_attributes() |
def load_grid_data(file_list, data_type="binary", sort=True, delim=" "):
"""
Loads data from one or multiple grid_task files.
Arguments:
file_list - either a string or a list of strings indicating files to
load data from. Files are assumed to be in grid_task.dat
format (space delimited values, one per cell).
data_type - a string representing what type of data is in the file.
Either "binary", "int", "float", or "string".
sort - If you're making a movie, you want the files to be in
chronological order. By default, they will be sorted.
If for some reason you don't want them in chronological
order, set sort to False.
Returns: A three-dimensional array. The first dimension is columns, the
second is rows. At each row,column index in the array is another list
which holds the values that each of the requested files has at that
location in the grid. If you want this list collapsed to a single
representative number, you should use agg_niche_grid.
"""
# If there's only one file, we pretend it's a list
if not type(file_list) is list:
file_list = [file_list]
elif sort:
# put file_list in chronological order
file_list.sort(key=lambda f: int(re.sub("[^0-9]", "", f)))
world_size = get_world_dimensions(file_list[0], delim)
# Initialize empty data array
data = initialize_grid(world_size, [])
# Loop through file list, reading in data
for f in file_list:
infile = open(f)
lines = infile.readlines()
for i in range(world_size[1]):
lines[i] = lines[i].strip().split(delim)
for j in range(world_size[0]):
if data_type == "binary":
val = bin(int(lines[i][j]))
elif data_type == "float":
val = float(lines[i][j])
elif data_type == "int":
val = int(lines[i][j])
elif data_type == "string":
val = str(lines[i][j])
else:
print("Unsupported data_type passed to load_grid")
return
data[i][j].append(val)
infile.close()
return data | def function[load_grid_data, parameter[file_list, data_type, sort, delim]]:
constant[
Loads data from one or multiple grid_task files.
Arguments:
file_list - either a string or a list of strings indicating files to
load data from. Files are assumed to be in grid_task.dat
format (space delimited values, one per cell).
data_type - a string representing what type of data is in the file.
Either "binary", "int", "float", or "string".
sort - If you're making a movie, you want the files to be in
chronological order. By default, they will be sorted.
If for some reason you don't want them in chronological
order, set sort to False.
Returns: A three-dimensional array. The first dimension is columns, the
second is rows. At each row,column index in the array is another list
which holds the values that each of the requested files has at that
location in the grid. If you want this list collapsed to a single
representative number, you should use agg_niche_grid.
]
if <ast.UnaryOp object at 0x7da1b16ab2e0> begin[:]
variable[file_list] assign[=] list[[<ast.Name object at 0x7da1b16a82e0>]]
variable[world_size] assign[=] call[name[get_world_dimensions], parameter[call[name[file_list]][constant[0]], name[delim]]]
variable[data] assign[=] call[name[initialize_grid], parameter[name[world_size], list[[]]]]
for taget[name[f]] in starred[name[file_list]] begin[:]
variable[infile] assign[=] call[name[open], parameter[name[f]]]
variable[lines] assign[=] call[name[infile].readlines, parameter[]]
for taget[name[i]] in starred[call[name[range], parameter[call[name[world_size]][constant[1]]]]] begin[:]
call[name[lines]][name[i]] assign[=] call[call[call[name[lines]][name[i]].strip, parameter[]].split, parameter[name[delim]]]
for taget[name[j]] in starred[call[name[range], parameter[call[name[world_size]][constant[0]]]]] begin[:]
if compare[name[data_type] equal[==] constant[binary]] begin[:]
variable[val] assign[=] call[name[bin], parameter[call[name[int], parameter[call[call[name[lines]][name[i]]][name[j]]]]]]
call[call[call[name[data]][name[i]]][name[j]].append, parameter[name[val]]]
call[name[infile].close, parameter[]]
return[name[data]] | keyword[def] identifier[load_grid_data] ( identifier[file_list] , identifier[data_type] = literal[string] , identifier[sort] = keyword[True] , identifier[delim] = literal[string] ):
literal[string]
keyword[if] keyword[not] identifier[type] ( identifier[file_list] ) keyword[is] identifier[list] :
identifier[file_list] =[ identifier[file_list] ]
keyword[elif] identifier[sort] :
identifier[file_list] . identifier[sort] ( identifier[key] = keyword[lambda] identifier[f] : identifier[int] ( identifier[re] . identifier[sub] ( literal[string] , literal[string] , identifier[f] )))
identifier[world_size] = identifier[get_world_dimensions] ( identifier[file_list] [ literal[int] ], identifier[delim] )
identifier[data] = identifier[initialize_grid] ( identifier[world_size] ,[])
keyword[for] identifier[f] keyword[in] identifier[file_list] :
identifier[infile] = identifier[open] ( identifier[f] )
identifier[lines] = identifier[infile] . identifier[readlines] ()
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[world_size] [ literal[int] ]):
identifier[lines] [ identifier[i] ]= identifier[lines] [ identifier[i] ]. identifier[strip] (). identifier[split] ( identifier[delim] )
keyword[for] identifier[j] keyword[in] identifier[range] ( identifier[world_size] [ literal[int] ]):
keyword[if] identifier[data_type] == literal[string] :
identifier[val] = identifier[bin] ( identifier[int] ( identifier[lines] [ identifier[i] ][ identifier[j] ]))
keyword[elif] identifier[data_type] == literal[string] :
identifier[val] = identifier[float] ( identifier[lines] [ identifier[i] ][ identifier[j] ])
keyword[elif] identifier[data_type] == literal[string] :
identifier[val] = identifier[int] ( identifier[lines] [ identifier[i] ][ identifier[j] ])
keyword[elif] identifier[data_type] == literal[string] :
identifier[val] = identifier[str] ( identifier[lines] [ identifier[i] ][ identifier[j] ])
keyword[else] :
identifier[print] ( literal[string] )
keyword[return]
identifier[data] [ identifier[i] ][ identifier[j] ]. identifier[append] ( identifier[val] )
identifier[infile] . identifier[close] ()
keyword[return] identifier[data] | def load_grid_data(file_list, data_type='binary', sort=True, delim=' '):
"""
Loads data from one or multiple grid_task files.
Arguments:
file_list - either a string or a list of strings indicating files to
load data from. Files are assumed to be in grid_task.dat
format (space delimited values, one per cell).
data_type - a string representing what type of data is in the file.
Either "binary", "int", "float", or "string".
sort - If you're making a movie, you want the files to be in
chronological order. By default, they will be sorted.
If for some reason you don't want them in chronological
order, set sort to False.
Returns: A three-dimensional array. The first dimension is columns, the
second is rows. At each row,column index in the array is another list
which holds the values that each of the requested files has at that
location in the grid. If you want this list collapsed to a single
representative number, you should use agg_niche_grid.
"""
# If there's only one file, we pretend it's a list
if not type(file_list) is list:
file_list = [file_list] # depends on [control=['if'], data=[]]
elif sort:
# put file_list in chronological order
file_list.sort(key=lambda f: int(re.sub('[^0-9]', '', f))) # depends on [control=['if'], data=[]]
world_size = get_world_dimensions(file_list[0], delim)
# Initialize empty data array
data = initialize_grid(world_size, [])
# Loop through file list, reading in data
for f in file_list:
infile = open(f)
lines = infile.readlines()
for i in range(world_size[1]):
lines[i] = lines[i].strip().split(delim)
for j in range(world_size[0]):
if data_type == 'binary':
val = bin(int(lines[i][j])) # depends on [control=['if'], data=[]]
elif data_type == 'float':
val = float(lines[i][j]) # depends on [control=['if'], data=[]]
elif data_type == 'int':
val = int(lines[i][j]) # depends on [control=['if'], data=[]]
elif data_type == 'string':
val = str(lines[i][j]) # depends on [control=['if'], data=[]]
else:
print('Unsupported data_type passed to load_grid')
return
data[i][j].append(val) # depends on [control=['for'], data=['j']] # depends on [control=['for'], data=['i']]
infile.close() # depends on [control=['for'], data=['f']]
return data |
def _func(self, volume, params):
"""
From Intermetallic compounds: Principles and Practice, Vol. I:
Principles Chapter 9 pages 195-210 by M. Mehl. B. Klein,
D. Papaconstantopoulos.
case where n=0
"""
e0, b0, b1, v0 = tuple(params)
return (e0
+ 9.0 / 8.0 * b0 * v0 * ((v0 / volume)**(2.0/3.0) - 1.0) ** 2
+ 9.0 / 16.0 * b0 * v0 * (b1 - 4.) *
((v0 / volume)**(2.0/3.0) - 1.0) ** 3) | def function[_func, parameter[self, volume, params]]:
constant[
From Intermetallic compounds: Principles and Practice, Vol. I:
Principles Chapter 9 pages 195-210 by M. Mehl. B. Klein,
D. Papaconstantopoulos.
case where n=0
]
<ast.Tuple object at 0x7da18dc9bd60> assign[=] call[name[tuple], parameter[name[params]]]
return[binary_operation[binary_operation[name[e0] + binary_operation[binary_operation[binary_operation[binary_operation[constant[9.0] / constant[8.0]] * name[b0]] * name[v0]] * binary_operation[binary_operation[binary_operation[binary_operation[name[v0] / name[volume]] ** binary_operation[constant[2.0] / constant[3.0]]] - constant[1.0]] ** constant[2]]]] + binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[constant[9.0] / constant[16.0]] * name[b0]] * name[v0]] * binary_operation[name[b1] - constant[4.0]]] * binary_operation[binary_operation[binary_operation[binary_operation[name[v0] / name[volume]] ** binary_operation[constant[2.0] / constant[3.0]]] - constant[1.0]] ** constant[3]]]]] | keyword[def] identifier[_func] ( identifier[self] , identifier[volume] , identifier[params] ):
literal[string]
identifier[e0] , identifier[b0] , identifier[b1] , identifier[v0] = identifier[tuple] ( identifier[params] )
keyword[return] ( identifier[e0]
+ literal[int] / literal[int] * identifier[b0] * identifier[v0] *(( identifier[v0] / identifier[volume] )**( literal[int] / literal[int] )- literal[int] )** literal[int]
+ literal[int] / literal[int] * identifier[b0] * identifier[v0] *( identifier[b1] - literal[int] )*
(( identifier[v0] / identifier[volume] )**( literal[int] / literal[int] )- literal[int] )** literal[int] ) | def _func(self, volume, params):
"""
From Intermetallic compounds: Principles and Practice, Vol. I:
Principles Chapter 9 pages 195-210 by M. Mehl. B. Klein,
D. Papaconstantopoulos.
case where n=0
"""
(e0, b0, b1, v0) = tuple(params)
return e0 + 9.0 / 8.0 * b0 * v0 * ((v0 / volume) ** (2.0 / 3.0) - 1.0) ** 2 + 9.0 / 16.0 * b0 * v0 * (b1 - 4.0) * ((v0 / volume) ** (2.0 / 3.0) - 1.0) ** 3 |
def ktotal(kind):
"""
Return the current number of kernels that have been loaded
via the KEEPER interface that are of a specified type.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/ktotal_c.html
:param kind: A list of kinds of kernels to count.
:type kind: str
:return: The number of kernels of type kind.
:rtype: int
"""
kind = stypes.stringToCharP(kind)
count = ctypes.c_int()
libspice.ktotal_c(kind, ctypes.byref(count))
return count.value | def function[ktotal, parameter[kind]]:
constant[
Return the current number of kernels that have been loaded
via the KEEPER interface that are of a specified type.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/ktotal_c.html
:param kind: A list of kinds of kernels to count.
:type kind: str
:return: The number of kernels of type kind.
:rtype: int
]
variable[kind] assign[=] call[name[stypes].stringToCharP, parameter[name[kind]]]
variable[count] assign[=] call[name[ctypes].c_int, parameter[]]
call[name[libspice].ktotal_c, parameter[name[kind], call[name[ctypes].byref, parameter[name[count]]]]]
return[name[count].value] | keyword[def] identifier[ktotal] ( identifier[kind] ):
literal[string]
identifier[kind] = identifier[stypes] . identifier[stringToCharP] ( identifier[kind] )
identifier[count] = identifier[ctypes] . identifier[c_int] ()
identifier[libspice] . identifier[ktotal_c] ( identifier[kind] , identifier[ctypes] . identifier[byref] ( identifier[count] ))
keyword[return] identifier[count] . identifier[value] | def ktotal(kind):
"""
Return the current number of kernels that have been loaded
via the KEEPER interface that are of a specified type.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/ktotal_c.html
:param kind: A list of kinds of kernels to count.
:type kind: str
:return: The number of kernels of type kind.
:rtype: int
"""
kind = stypes.stringToCharP(kind)
count = ctypes.c_int()
libspice.ktotal_c(kind, ctypes.byref(count))
return count.value |
def _new_sensor_reading(self, sensor_value):
"""
Call this method to signal a new sensor reading.
This method handles DB storage and triggers different events.
:param value:
New value to be stored in the system.
"""
if not self._active and not self._enabled:
return
if self._dimensions > 1:
for dimension in range(0, self._dimensions):
value = sensor_value[dimension]
self._sub_sensors[dimension]._new_sensor_reading(value)
else:
self._sensor_value.value = sensor_value | def function[_new_sensor_reading, parameter[self, sensor_value]]:
constant[
Call this method to signal a new sensor reading.
This method handles DB storage and triggers different events.
:param value:
New value to be stored in the system.
]
if <ast.BoolOp object at 0x7da18f09cfa0> begin[:]
return[None]
if compare[name[self]._dimensions greater[>] constant[1]] begin[:]
for taget[name[dimension]] in starred[call[name[range], parameter[constant[0], name[self]._dimensions]]] begin[:]
variable[value] assign[=] call[name[sensor_value]][name[dimension]]
call[call[name[self]._sub_sensors][name[dimension]]._new_sensor_reading, parameter[name[value]]] | keyword[def] identifier[_new_sensor_reading] ( identifier[self] , identifier[sensor_value] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[_active] keyword[and] keyword[not] identifier[self] . identifier[_enabled] :
keyword[return]
keyword[if] identifier[self] . identifier[_dimensions] > literal[int] :
keyword[for] identifier[dimension] keyword[in] identifier[range] ( literal[int] , identifier[self] . identifier[_dimensions] ):
identifier[value] = identifier[sensor_value] [ identifier[dimension] ]
identifier[self] . identifier[_sub_sensors] [ identifier[dimension] ]. identifier[_new_sensor_reading] ( identifier[value] )
keyword[else] :
identifier[self] . identifier[_sensor_value] . identifier[value] = identifier[sensor_value] | def _new_sensor_reading(self, sensor_value):
"""
Call this method to signal a new sensor reading.
This method handles DB storage and triggers different events.
:param value:
New value to be stored in the system.
"""
if not self._active and (not self._enabled):
return # depends on [control=['if'], data=[]]
if self._dimensions > 1:
for dimension in range(0, self._dimensions):
value = sensor_value[dimension]
self._sub_sensors[dimension]._new_sensor_reading(value) # depends on [control=['for'], data=['dimension']] # depends on [control=['if'], data=[]]
else:
self._sensor_value.value = sensor_value |
def get_dns_dir(self, config_file):
""" grab directory path from named{conf,boot}
"""
directory_list = self.do_regex_find_all(r"directory\s+\"(.*)\"",
config_file)
if directory_list:
return normpath(directory_list[0])
else:
return "" | def function[get_dns_dir, parameter[self, config_file]]:
constant[ grab directory path from named{conf,boot}
]
variable[directory_list] assign[=] call[name[self].do_regex_find_all, parameter[constant[directory\s+\"(.*)\"], name[config_file]]]
if name[directory_list] begin[:]
return[call[name[normpath], parameter[call[name[directory_list]][constant[0]]]]] | keyword[def] identifier[get_dns_dir] ( identifier[self] , identifier[config_file] ):
literal[string]
identifier[directory_list] = identifier[self] . identifier[do_regex_find_all] ( literal[string] ,
identifier[config_file] )
keyword[if] identifier[directory_list] :
keyword[return] identifier[normpath] ( identifier[directory_list] [ literal[int] ])
keyword[else] :
keyword[return] literal[string] | def get_dns_dir(self, config_file):
""" grab directory path from named{conf,boot}
"""
directory_list = self.do_regex_find_all('directory\\s+\\"(.*)\\"', config_file)
if directory_list:
return normpath(directory_list[0]) # depends on [control=['if'], data=[]]
else:
return '' |
def get_triangles(self, var, coords=None, convert_radian=True,
copy=False, src_crs=None, target_crs=None,
nans=None, stacklevel=1):
"""
Get the triangles for the variable
Parameters
----------
var: xarray.Variable or xarray.DataArray
The variable to use
coords: dict
Alternative coordinates to use. If None, the coordinates of the
:attr:`ds` dataset are used
convert_radian: bool
If True and the coordinate has units in 'radian', those are
converted to degrees
copy: bool
If True, vertice arrays are copied
src_crs: cartopy.crs.Crs
The source projection of the data. If not None, a transformation
to the given `target_crs` will be done
target_crs: cartopy.crs.Crs
The target projection for which the triangles shall be transformed.
Must only be provided if the `src_crs` is not None.
%(CFDecoder._check_triangular_bounds.parameters.nans)s
Returns
-------
matplotlib.tri.Triangulation
The spatial triangles of the variable
Raises
------
ValueError
If `src_crs` is not None and `target_crs` is None"""
warn("The 'get_triangles' method is depreceated and will be removed "
"soon! Use the 'get_cell_node_coord' method!",
DeprecationWarning, stacklevel=stacklevel)
from matplotlib.tri import Triangulation
def get_vertices(axis):
bounds = self._check_triangular_bounds(var, coords=coords,
axis=axis, nans=nans)[1]
if coords is not None:
bounds = coords.get(bounds.name, bounds)
vertices = bounds.values.ravel()
if convert_radian:
coord = getattr(self, 'get_' + axis)(var)
if coord.attrs.get('units') == 'radian':
vertices = vertices * 180. / np.pi
return vertices if not copy else vertices.copy()
if coords is None:
coords = self.ds.coords
xvert = get_vertices('x')
yvert = get_vertices('y')
if src_crs is not None and src_crs != target_crs:
if target_crs is None:
raise ValueError(
"Found %s for the source crs but got None for the "
"target_crs!" % (src_crs, ))
arr = target_crs.transform_points(src_crs, xvert, yvert)
xvert = arr[:, 0]
yvert = arr[:, 1]
triangles = np.reshape(range(len(xvert)), (len(xvert) // 3, 3))
return Triangulation(xvert, yvert, triangles) | def function[get_triangles, parameter[self, var, coords, convert_radian, copy, src_crs, target_crs, nans, stacklevel]]:
constant[
Get the triangles for the variable
Parameters
----------
var: xarray.Variable or xarray.DataArray
The variable to use
coords: dict
Alternative coordinates to use. If None, the coordinates of the
:attr:`ds` dataset are used
convert_radian: bool
If True and the coordinate has units in 'radian', those are
converted to degrees
copy: bool
If True, vertice arrays are copied
src_crs: cartopy.crs.Crs
The source projection of the data. If not None, a transformation
to the given `target_crs` will be done
target_crs: cartopy.crs.Crs
The target projection for which the triangles shall be transformed.
Must only be provided if the `src_crs` is not None.
%(CFDecoder._check_triangular_bounds.parameters.nans)s
Returns
-------
matplotlib.tri.Triangulation
The spatial triangles of the variable
Raises
------
ValueError
If `src_crs` is not None and `target_crs` is None]
call[name[warn], parameter[constant[The 'get_triangles' method is depreceated and will be removed soon! Use the 'get_cell_node_coord' method!], name[DeprecationWarning]]]
from relative_module[matplotlib.tri] import module[Triangulation]
def function[get_vertices, parameter[axis]]:
variable[bounds] assign[=] call[call[name[self]._check_triangular_bounds, parameter[name[var]]]][constant[1]]
if compare[name[coords] is_not constant[None]] begin[:]
variable[bounds] assign[=] call[name[coords].get, parameter[name[bounds].name, name[bounds]]]
variable[vertices] assign[=] call[name[bounds].values.ravel, parameter[]]
if name[convert_radian] begin[:]
variable[coord] assign[=] call[call[name[getattr], parameter[name[self], binary_operation[constant[get_] + name[axis]]]], parameter[name[var]]]
if compare[call[name[coord].attrs.get, parameter[constant[units]]] equal[==] constant[radian]] begin[:]
variable[vertices] assign[=] binary_operation[binary_operation[name[vertices] * constant[180.0]] / name[np].pi]
return[<ast.IfExp object at 0x7da18dc07bb0>]
if compare[name[coords] is constant[None]] begin[:]
variable[coords] assign[=] name[self].ds.coords
variable[xvert] assign[=] call[name[get_vertices], parameter[constant[x]]]
variable[yvert] assign[=] call[name[get_vertices], parameter[constant[y]]]
if <ast.BoolOp object at 0x7da18dc064d0> begin[:]
if compare[name[target_crs] is constant[None]] begin[:]
<ast.Raise object at 0x7da18dc04580>
variable[arr] assign[=] call[name[target_crs].transform_points, parameter[name[src_crs], name[xvert], name[yvert]]]
variable[xvert] assign[=] call[name[arr]][tuple[[<ast.Slice object at 0x7da1b1913550>, <ast.Constant object at 0x7da1b1912e30>]]]
variable[yvert] assign[=] call[name[arr]][tuple[[<ast.Slice object at 0x7da1b1910250>, <ast.Constant object at 0x7da1b1910bb0>]]]
variable[triangles] assign[=] call[name[np].reshape, parameter[call[name[range], parameter[call[name[len], parameter[name[xvert]]]]], tuple[[<ast.BinOp object at 0x7da1b19107c0>, <ast.Constant object at 0x7da1b1911ea0>]]]]
return[call[name[Triangulation], parameter[name[xvert], name[yvert], name[triangles]]]] | keyword[def] identifier[get_triangles] ( identifier[self] , identifier[var] , identifier[coords] = keyword[None] , identifier[convert_radian] = keyword[True] ,
identifier[copy] = keyword[False] , identifier[src_crs] = keyword[None] , identifier[target_crs] = keyword[None] ,
identifier[nans] = keyword[None] , identifier[stacklevel] = literal[int] ):
literal[string]
identifier[warn] ( literal[string]
literal[string] ,
identifier[DeprecationWarning] , identifier[stacklevel] = identifier[stacklevel] )
keyword[from] identifier[matplotlib] . identifier[tri] keyword[import] identifier[Triangulation]
keyword[def] identifier[get_vertices] ( identifier[axis] ):
identifier[bounds] = identifier[self] . identifier[_check_triangular_bounds] ( identifier[var] , identifier[coords] = identifier[coords] ,
identifier[axis] = identifier[axis] , identifier[nans] = identifier[nans] )[ literal[int] ]
keyword[if] identifier[coords] keyword[is] keyword[not] keyword[None] :
identifier[bounds] = identifier[coords] . identifier[get] ( identifier[bounds] . identifier[name] , identifier[bounds] )
identifier[vertices] = identifier[bounds] . identifier[values] . identifier[ravel] ()
keyword[if] identifier[convert_radian] :
identifier[coord] = identifier[getattr] ( identifier[self] , literal[string] + identifier[axis] )( identifier[var] )
keyword[if] identifier[coord] . identifier[attrs] . identifier[get] ( literal[string] )== literal[string] :
identifier[vertices] = identifier[vertices] * literal[int] / identifier[np] . identifier[pi]
keyword[return] identifier[vertices] keyword[if] keyword[not] identifier[copy] keyword[else] identifier[vertices] . identifier[copy] ()
keyword[if] identifier[coords] keyword[is] keyword[None] :
identifier[coords] = identifier[self] . identifier[ds] . identifier[coords]
identifier[xvert] = identifier[get_vertices] ( literal[string] )
identifier[yvert] = identifier[get_vertices] ( literal[string] )
keyword[if] identifier[src_crs] keyword[is] keyword[not] keyword[None] keyword[and] identifier[src_crs] != identifier[target_crs] :
keyword[if] identifier[target_crs] keyword[is] keyword[None] :
keyword[raise] identifier[ValueError] (
literal[string]
literal[string] %( identifier[src_crs] ,))
identifier[arr] = identifier[target_crs] . identifier[transform_points] ( identifier[src_crs] , identifier[xvert] , identifier[yvert] )
identifier[xvert] = identifier[arr] [:, literal[int] ]
identifier[yvert] = identifier[arr] [:, literal[int] ]
identifier[triangles] = identifier[np] . identifier[reshape] ( identifier[range] ( identifier[len] ( identifier[xvert] )),( identifier[len] ( identifier[xvert] )// literal[int] , literal[int] ))
keyword[return] identifier[Triangulation] ( identifier[xvert] , identifier[yvert] , identifier[triangles] ) | def get_triangles(self, var, coords=None, convert_radian=True, copy=False, src_crs=None, target_crs=None, nans=None, stacklevel=1):
"""
Get the triangles for the variable
Parameters
----------
var: xarray.Variable or xarray.DataArray
The variable to use
coords: dict
Alternative coordinates to use. If None, the coordinates of the
:attr:`ds` dataset are used
convert_radian: bool
If True and the coordinate has units in 'radian', those are
converted to degrees
copy: bool
If True, vertice arrays are copied
src_crs: cartopy.crs.Crs
The source projection of the data. If not None, a transformation
to the given `target_crs` will be done
target_crs: cartopy.crs.Crs
The target projection for which the triangles shall be transformed.
Must only be provided if the `src_crs` is not None.
%(CFDecoder._check_triangular_bounds.parameters.nans)s
Returns
-------
matplotlib.tri.Triangulation
The spatial triangles of the variable
Raises
------
ValueError
If `src_crs` is not None and `target_crs` is None"""
warn("The 'get_triangles' method is depreceated and will be removed soon! Use the 'get_cell_node_coord' method!", DeprecationWarning, stacklevel=stacklevel)
from matplotlib.tri import Triangulation
def get_vertices(axis):
bounds = self._check_triangular_bounds(var, coords=coords, axis=axis, nans=nans)[1]
if coords is not None:
bounds = coords.get(bounds.name, bounds) # depends on [control=['if'], data=['coords']]
vertices = bounds.values.ravel()
if convert_radian:
coord = getattr(self, 'get_' + axis)(var)
if coord.attrs.get('units') == 'radian':
vertices = vertices * 180.0 / np.pi # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
return vertices if not copy else vertices.copy()
if coords is None:
coords = self.ds.coords # depends on [control=['if'], data=['coords']]
xvert = get_vertices('x')
yvert = get_vertices('y')
if src_crs is not None and src_crs != target_crs:
if target_crs is None:
raise ValueError('Found %s for the source crs but got None for the target_crs!' % (src_crs,)) # depends on [control=['if'], data=[]]
arr = target_crs.transform_points(src_crs, xvert, yvert)
xvert = arr[:, 0]
yvert = arr[:, 1] # depends on [control=['if'], data=[]]
triangles = np.reshape(range(len(xvert)), (len(xvert) // 3, 3))
return Triangulation(xvert, yvert, triangles) |
def lograptor(files, patterns=None, matcher='ruled', cfgfiles=None, apps=None, hosts=None,
filters=None, time_period=None, time_range=None, case=False, invert=False,
word=False, files_with_match=None, count=False, quiet=False, max_count=0,
only_matching=False, line_number=False, with_filename=None,
ip_lookup=False, uid_lookup=False, anonymize=False, thread=False,
before_context=0, after_context=0, context=0):
"""
Run lograptor with arguments. Experimental feature to use the log processor into
generic Python scripts. This part is still under development, do not use.
:param files: Input files. Each argument can be a file path or a glob pathname.
:param patterns: Regex patterns, select the log line if at least one pattern matches.
:param matcher: Matcher engine, can be 'ruled' (default), 'unruled' or 'unparsed'.
:param cfgfiles: use a specific configuration file.
:param apps: process the log lines related to a list of applications.
:param hosts: process the log lines related to a list of hosts.
:param filters: process the log lines that match all the conditions for rule's field values.
:param time_range: process the log lines related to a time range.
:param time_period: restrict the search scope to a date or a date interval.
:param case: ignore case distinctions, defaults to `False`.
:param invert: invert the sense of patterns regexp matching.
:param word: force PATTERN to match only whole words.
:param files_with_match: get only names of FILEs containing matches, defaults is `False`.
:param count: get only a count of matching lines per FILE.
:param quiet: suppress all normal output.
:param max_count: stop after NUM matches.
:param only_matching: get only the part of a line matching PATTERN.
:param line_number: get line number with output lines.
:param with_filename: get or suppress the file name for each match.
:param ip_lookup: translate IP addresses to DNS names.
:param uid_lookup: translate numeric UIDs to usernames.
:param anonymize: anonymize defined rule's fields value.
:param thread: get the lines of logs related to each log line selected.
:param before_context: get NUM lines of leading context for each log line selected.
:param after_context: get NUM lines of trailing context for each log line selected.
:param context: get NUM lines of output context for each log line selected.
:return:
"""
cli_parser = create_argument_parser()
args = cli_parser.parse_args()
args.files = files
args.matcher = matcher
args.cfgfiles = cfgfiles
args.time_period = time_period
args.time_range = time_range
args.case = case
args.invert = invert
args.word = word
args.files_with_match = files_with_match
args.count = count
args.quiet = quiet
args.max_count = max_count
args.only_matching = only_matching
args.line_number = line_number
args.with_filename = with_filename
args.anonymize = anonymize
args.ip_lookup = ip_lookup
args.uid_lookup = uid_lookup
args.thread = thread
args.context = context
args.after_context = after_context
args.before_context = before_context
args.patterns = [''] if patterns is None else patterns
if apps is not None:
args.apps = apps
if hosts is not None:
args.hosts = hosts
if filters is not None:
args.filters = filters
_lograptor = LogRaptor(args)
return _lograptor() | def function[lograptor, parameter[files, patterns, matcher, cfgfiles, apps, hosts, filters, time_period, time_range, case, invert, word, files_with_match, count, quiet, max_count, only_matching, line_number, with_filename, ip_lookup, uid_lookup, anonymize, thread, before_context, after_context, context]]:
constant[
Run lograptor with arguments. Experimental feature to use the log processor into
generic Python scripts. This part is still under development, do not use.
:param files: Input files. Each argument can be a file path or a glob pathname.
:param patterns: Regex patterns, select the log line if at least one pattern matches.
:param matcher: Matcher engine, can be 'ruled' (default), 'unruled' or 'unparsed'.
:param cfgfiles: use a specific configuration file.
:param apps: process the log lines related to a list of applications.
:param hosts: process the log lines related to a list of hosts.
:param filters: process the log lines that match all the conditions for rule's field values.
:param time_range: process the log lines related to a time range.
:param time_period: restrict the search scope to a date or a date interval.
:param case: ignore case distinctions, defaults to `False`.
:param invert: invert the sense of patterns regexp matching.
:param word: force PATTERN to match only whole words.
:param files_with_match: get only names of FILEs containing matches, defaults is `False`.
:param count: get only a count of matching lines per FILE.
:param quiet: suppress all normal output.
:param max_count: stop after NUM matches.
:param only_matching: get only the part of a line matching PATTERN.
:param line_number: get line number with output lines.
:param with_filename: get or suppress the file name for each match.
:param ip_lookup: translate IP addresses to DNS names.
:param uid_lookup: translate numeric UIDs to usernames.
:param anonymize: anonymize defined rule's fields value.
:param thread: get the lines of logs related to each log line selected.
:param before_context: get NUM lines of leading context for each log line selected.
:param after_context: get NUM lines of trailing context for each log line selected.
:param context: get NUM lines of output context for each log line selected.
:return:
]
variable[cli_parser] assign[=] call[name[create_argument_parser], parameter[]]
variable[args] assign[=] call[name[cli_parser].parse_args, parameter[]]
name[args].files assign[=] name[files]
name[args].matcher assign[=] name[matcher]
name[args].cfgfiles assign[=] name[cfgfiles]
name[args].time_period assign[=] name[time_period]
name[args].time_range assign[=] name[time_range]
name[args].case assign[=] name[case]
name[args].invert assign[=] name[invert]
name[args].word assign[=] name[word]
name[args].files_with_match assign[=] name[files_with_match]
name[args].count assign[=] name[count]
name[args].quiet assign[=] name[quiet]
name[args].max_count assign[=] name[max_count]
name[args].only_matching assign[=] name[only_matching]
name[args].line_number assign[=] name[line_number]
name[args].with_filename assign[=] name[with_filename]
name[args].anonymize assign[=] name[anonymize]
name[args].ip_lookup assign[=] name[ip_lookup]
name[args].uid_lookup assign[=] name[uid_lookup]
name[args].thread assign[=] name[thread]
name[args].context assign[=] name[context]
name[args].after_context assign[=] name[after_context]
name[args].before_context assign[=] name[before_context]
name[args].patterns assign[=] <ast.IfExp object at 0x7da204623760>
if compare[name[apps] is_not constant[None]] begin[:]
name[args].apps assign[=] name[apps]
if compare[name[hosts] is_not constant[None]] begin[:]
name[args].hosts assign[=] name[hosts]
if compare[name[filters] is_not constant[None]] begin[:]
name[args].filters assign[=] name[filters]
variable[_lograptor] assign[=] call[name[LogRaptor], parameter[name[args]]]
return[call[name[_lograptor], parameter[]]] | keyword[def] identifier[lograptor] ( identifier[files] , identifier[patterns] = keyword[None] , identifier[matcher] = literal[string] , identifier[cfgfiles] = keyword[None] , identifier[apps] = keyword[None] , identifier[hosts] = keyword[None] ,
identifier[filters] = keyword[None] , identifier[time_period] = keyword[None] , identifier[time_range] = keyword[None] , identifier[case] = keyword[False] , identifier[invert] = keyword[False] ,
identifier[word] = keyword[False] , identifier[files_with_match] = keyword[None] , identifier[count] = keyword[False] , identifier[quiet] = keyword[False] , identifier[max_count] = literal[int] ,
identifier[only_matching] = keyword[False] , identifier[line_number] = keyword[False] , identifier[with_filename] = keyword[None] ,
identifier[ip_lookup] = keyword[False] , identifier[uid_lookup] = keyword[False] , identifier[anonymize] = keyword[False] , identifier[thread] = keyword[False] ,
identifier[before_context] = literal[int] , identifier[after_context] = literal[int] , identifier[context] = literal[int] ):
literal[string]
identifier[cli_parser] = identifier[create_argument_parser] ()
identifier[args] = identifier[cli_parser] . identifier[parse_args] ()
identifier[args] . identifier[files] = identifier[files]
identifier[args] . identifier[matcher] = identifier[matcher]
identifier[args] . identifier[cfgfiles] = identifier[cfgfiles]
identifier[args] . identifier[time_period] = identifier[time_period]
identifier[args] . identifier[time_range] = identifier[time_range]
identifier[args] . identifier[case] = identifier[case]
identifier[args] . identifier[invert] = identifier[invert]
identifier[args] . identifier[word] = identifier[word]
identifier[args] . identifier[files_with_match] = identifier[files_with_match]
identifier[args] . identifier[count] = identifier[count]
identifier[args] . identifier[quiet] = identifier[quiet]
identifier[args] . identifier[max_count] = identifier[max_count]
identifier[args] . identifier[only_matching] = identifier[only_matching]
identifier[args] . identifier[line_number] = identifier[line_number]
identifier[args] . identifier[with_filename] = identifier[with_filename]
identifier[args] . identifier[anonymize] = identifier[anonymize]
identifier[args] . identifier[ip_lookup] = identifier[ip_lookup]
identifier[args] . identifier[uid_lookup] = identifier[uid_lookup]
identifier[args] . identifier[thread] = identifier[thread]
identifier[args] . identifier[context] = identifier[context]
identifier[args] . identifier[after_context] = identifier[after_context]
identifier[args] . identifier[before_context] = identifier[before_context]
identifier[args] . identifier[patterns] =[ literal[string] ] keyword[if] identifier[patterns] keyword[is] keyword[None] keyword[else] identifier[patterns]
keyword[if] identifier[apps] keyword[is] keyword[not] keyword[None] :
identifier[args] . identifier[apps] = identifier[apps]
keyword[if] identifier[hosts] keyword[is] keyword[not] keyword[None] :
identifier[args] . identifier[hosts] = identifier[hosts]
keyword[if] identifier[filters] keyword[is] keyword[not] keyword[None] :
identifier[args] . identifier[filters] = identifier[filters]
identifier[_lograptor] = identifier[LogRaptor] ( identifier[args] )
keyword[return] identifier[_lograptor] () | def lograptor(files, patterns=None, matcher='ruled', cfgfiles=None, apps=None, hosts=None, filters=None, time_period=None, time_range=None, case=False, invert=False, word=False, files_with_match=None, count=False, quiet=False, max_count=0, only_matching=False, line_number=False, with_filename=None, ip_lookup=False, uid_lookup=False, anonymize=False, thread=False, before_context=0, after_context=0, context=0):
"""
Run lograptor with arguments. Experimental feature to use the log processor into
generic Python scripts. This part is still under development, do not use.
:param files: Input files. Each argument can be a file path or a glob pathname.
:param patterns: Regex patterns, select the log line if at least one pattern matches.
:param matcher: Matcher engine, can be 'ruled' (default), 'unruled' or 'unparsed'.
:param cfgfiles: use a specific configuration file.
:param apps: process the log lines related to a list of applications.
:param hosts: process the log lines related to a list of hosts.
:param filters: process the log lines that match all the conditions for rule's field values.
:param time_range: process the log lines related to a time range.
:param time_period: restrict the search scope to a date or a date interval.
:param case: ignore case distinctions, defaults to `False`.
:param invert: invert the sense of patterns regexp matching.
:param word: force PATTERN to match only whole words.
:param files_with_match: get only names of FILEs containing matches, defaults is `False`.
:param count: get only a count of matching lines per FILE.
:param quiet: suppress all normal output.
:param max_count: stop after NUM matches.
:param only_matching: get only the part of a line matching PATTERN.
:param line_number: get line number with output lines.
:param with_filename: get or suppress the file name for each match.
:param ip_lookup: translate IP addresses to DNS names.
:param uid_lookup: translate numeric UIDs to usernames.
:param anonymize: anonymize defined rule's fields value.
:param thread: get the lines of logs related to each log line selected.
:param before_context: get NUM lines of leading context for each log line selected.
:param after_context: get NUM lines of trailing context for each log line selected.
:param context: get NUM lines of output context for each log line selected.
:return:
"""
cli_parser = create_argument_parser()
args = cli_parser.parse_args()
args.files = files
args.matcher = matcher
args.cfgfiles = cfgfiles
args.time_period = time_period
args.time_range = time_range
args.case = case
args.invert = invert
args.word = word
args.files_with_match = files_with_match
args.count = count
args.quiet = quiet
args.max_count = max_count
args.only_matching = only_matching
args.line_number = line_number
args.with_filename = with_filename
args.anonymize = anonymize
args.ip_lookup = ip_lookup
args.uid_lookup = uid_lookup
args.thread = thread
args.context = context
args.after_context = after_context
args.before_context = before_context
args.patterns = [''] if patterns is None else patterns
if apps is not None:
args.apps = apps # depends on [control=['if'], data=['apps']]
if hosts is not None:
args.hosts = hosts # depends on [control=['if'], data=['hosts']]
if filters is not None:
args.filters = filters # depends on [control=['if'], data=['filters']]
_lograptor = LogRaptor(args)
return _lograptor() |
def save(self, *args, **kwargs):
"""
Validate that the rating falls between the min and max values.
"""
valid = map(str, settings.RATINGS_RANGE)
if str(self.value) not in valid:
raise ValueError("Invalid rating. %s is not in %s" % (self.value,
", ".join(valid)))
super(Rating, self).save(*args, **kwargs) | def function[save, parameter[self]]:
constant[
Validate that the rating falls between the min and max values.
]
variable[valid] assign[=] call[name[map], parameter[name[str], name[settings].RATINGS_RANGE]]
if compare[call[name[str], parameter[name[self].value]] <ast.NotIn object at 0x7da2590d7190> name[valid]] begin[:]
<ast.Raise object at 0x7da18f00dc60>
call[call[name[super], parameter[name[Rating], name[self]]].save, parameter[<ast.Starred object at 0x7da18f00c8b0>]] | keyword[def] identifier[save] ( identifier[self] ,* identifier[args] ,** identifier[kwargs] ):
literal[string]
identifier[valid] = identifier[map] ( identifier[str] , identifier[settings] . identifier[RATINGS_RANGE] )
keyword[if] identifier[str] ( identifier[self] . identifier[value] ) keyword[not] keyword[in] identifier[valid] :
keyword[raise] identifier[ValueError] ( literal[string] %( identifier[self] . identifier[value] ,
literal[string] . identifier[join] ( identifier[valid] )))
identifier[super] ( identifier[Rating] , identifier[self] ). identifier[save] (* identifier[args] ,** identifier[kwargs] ) | def save(self, *args, **kwargs):
"""
Validate that the rating falls between the min and max values.
"""
valid = map(str, settings.RATINGS_RANGE)
if str(self.value) not in valid:
raise ValueError('Invalid rating. %s is not in %s' % (self.value, ', '.join(valid))) # depends on [control=['if'], data=['valid']]
super(Rating, self).save(*args, **kwargs) |
def _validate_label(self, label):
"""Validates label, raising error if invalid."""
letter_pattern = compile("^[a-z]{1}$")
number_pattern = compile("^[0]{1}$|^[1-9]{1,2}$")
icon_pattern = compile("^[a-zA-Z ]{1,}$")
if not match(letter_pattern, label)\
and not match(number_pattern, label)\
and not match(icon_pattern, label):
raise InvalidLabelError(
"{} is not a valid label".format(label)
)
return label | def function[_validate_label, parameter[self, label]]:
constant[Validates label, raising error if invalid.]
variable[letter_pattern] assign[=] call[name[compile], parameter[constant[^[a-z]{1}$]]]
variable[number_pattern] assign[=] call[name[compile], parameter[constant[^[0]{1}$|^[1-9]{1,2}$]]]
variable[icon_pattern] assign[=] call[name[compile], parameter[constant[^[a-zA-Z ]{1,}$]]]
if <ast.BoolOp object at 0x7da1b18fc940> begin[:]
<ast.Raise object at 0x7da1b1844760>
return[name[label]] | keyword[def] identifier[_validate_label] ( identifier[self] , identifier[label] ):
literal[string]
identifier[letter_pattern] = identifier[compile] ( literal[string] )
identifier[number_pattern] = identifier[compile] ( literal[string] )
identifier[icon_pattern] = identifier[compile] ( literal[string] )
keyword[if] keyword[not] identifier[match] ( identifier[letter_pattern] , identifier[label] ) keyword[and] keyword[not] identifier[match] ( identifier[number_pattern] , identifier[label] ) keyword[and] keyword[not] identifier[match] ( identifier[icon_pattern] , identifier[label] ):
keyword[raise] identifier[InvalidLabelError] (
literal[string] . identifier[format] ( identifier[label] )
)
keyword[return] identifier[label] | def _validate_label(self, label):
"""Validates label, raising error if invalid."""
letter_pattern = compile('^[a-z]{1}$')
number_pattern = compile('^[0]{1}$|^[1-9]{1,2}$')
icon_pattern = compile('^[a-zA-Z ]{1,}$')
if not match(letter_pattern, label) and (not match(number_pattern, label)) and (not match(icon_pattern, label)):
raise InvalidLabelError('{} is not a valid label'.format(label)) # depends on [control=['if'], data=[]]
return label |
def get_or_create_candidate_election(
self, row, election, candidate, party
):
"""
For a given election, this function updates or creates the
CandidateElection object using the model method on the election.
"""
return election.update_or_create_candidate(
candidate, party.aggregate_candidates, row["uncontested"]
) | def function[get_or_create_candidate_election, parameter[self, row, election, candidate, party]]:
constant[
For a given election, this function updates or creates the
CandidateElection object using the model method on the election.
]
return[call[name[election].update_or_create_candidate, parameter[name[candidate], name[party].aggregate_candidates, call[name[row]][constant[uncontested]]]]] | keyword[def] identifier[get_or_create_candidate_election] (
identifier[self] , identifier[row] , identifier[election] , identifier[candidate] , identifier[party]
):
literal[string]
keyword[return] identifier[election] . identifier[update_or_create_candidate] (
identifier[candidate] , identifier[party] . identifier[aggregate_candidates] , identifier[row] [ literal[string] ]
) | def get_or_create_candidate_election(self, row, election, candidate, party):
"""
For a given election, this function updates or creates the
CandidateElection object using the model method on the election.
"""
return election.update_or_create_candidate(candidate, party.aggregate_candidates, row['uncontested']) |
def toDict(self):
"""To Dict
Returns the Hashed Node as a dictionary in the same format as is used in
constructing it
Returns:
dict
"""
# Init the dictionary we will return
dRet = {}
# Add the hash key
dRet['__hash__'] = self._key.toDict()
# Get the parents dict and add it to the return
dRet.update(super(HashNode,self).toDict())
# Get the nodes dict and also add it to the return
dRet.update(self._node.toDict())
# Return
return dRet | def function[toDict, parameter[self]]:
constant[To Dict
Returns the Hashed Node as a dictionary in the same format as is used in
constructing it
Returns:
dict
]
variable[dRet] assign[=] dictionary[[], []]
call[name[dRet]][constant[__hash__]] assign[=] call[name[self]._key.toDict, parameter[]]
call[name[dRet].update, parameter[call[call[name[super], parameter[name[HashNode], name[self]]].toDict, parameter[]]]]
call[name[dRet].update, parameter[call[name[self]._node.toDict, parameter[]]]]
return[name[dRet]] | keyword[def] identifier[toDict] ( identifier[self] ):
literal[string]
identifier[dRet] ={}
identifier[dRet] [ literal[string] ]= identifier[self] . identifier[_key] . identifier[toDict] ()
identifier[dRet] . identifier[update] ( identifier[super] ( identifier[HashNode] , identifier[self] ). identifier[toDict] ())
identifier[dRet] . identifier[update] ( identifier[self] . identifier[_node] . identifier[toDict] ())
keyword[return] identifier[dRet] | def toDict(self):
"""To Dict
Returns the Hashed Node as a dictionary in the same format as is used in
constructing it
Returns:
dict
""" # Init the dictionary we will return
dRet = {} # Add the hash key
dRet['__hash__'] = self._key.toDict() # Get the parents dict and add it to the return
dRet.update(super(HashNode, self).toDict()) # Get the nodes dict and also add it to the return
dRet.update(self._node.toDict()) # Return
return dRet |
def _project_on_ellipsoid(c, r, locations):
"""displace locations to the nearest point on ellipsoid surface"""
p0 = locations - c # original locations
l2 = 1 / np.sum(p0**2 / r**2, axis=1, keepdims=True)
p = p0 * np.sqrt(l2) # initial approximation (projection of points towards center of ellipsoid)
fun = lambda x: np.sum((x.reshape(p0.shape) - p0)**2) # minimize distance between new and old points
con = lambda x: np.sum(x.reshape(p0.shape)**2 / r**2, axis=1) - 1 # new points constrained to surface of ellipsoid
res = sp.optimize.minimize(fun, p, constraints={'type': 'eq', 'fun': con}, method='SLSQP')
return res['x'].reshape(p0.shape) + c | def function[_project_on_ellipsoid, parameter[c, r, locations]]:
constant[displace locations to the nearest point on ellipsoid surface]
variable[p0] assign[=] binary_operation[name[locations] - name[c]]
variable[l2] assign[=] binary_operation[constant[1] / call[name[np].sum, parameter[binary_operation[binary_operation[name[p0] ** constant[2]] / binary_operation[name[r] ** constant[2]]]]]]
variable[p] assign[=] binary_operation[name[p0] * call[name[np].sqrt, parameter[name[l2]]]]
variable[fun] assign[=] <ast.Lambda object at 0x7da1b26a2ef0>
variable[con] assign[=] <ast.Lambda object at 0x7da1b26a2230>
variable[res] assign[=] call[name[sp].optimize.minimize, parameter[name[fun], name[p]]]
return[binary_operation[call[call[name[res]][constant[x]].reshape, parameter[name[p0].shape]] + name[c]]] | keyword[def] identifier[_project_on_ellipsoid] ( identifier[c] , identifier[r] , identifier[locations] ):
literal[string]
identifier[p0] = identifier[locations] - identifier[c]
identifier[l2] = literal[int] / identifier[np] . identifier[sum] ( identifier[p0] ** literal[int] / identifier[r] ** literal[int] , identifier[axis] = literal[int] , identifier[keepdims] = keyword[True] )
identifier[p] = identifier[p0] * identifier[np] . identifier[sqrt] ( identifier[l2] )
identifier[fun] = keyword[lambda] identifier[x] : identifier[np] . identifier[sum] (( identifier[x] . identifier[reshape] ( identifier[p0] . identifier[shape] )- identifier[p0] )** literal[int] )
identifier[con] = keyword[lambda] identifier[x] : identifier[np] . identifier[sum] ( identifier[x] . identifier[reshape] ( identifier[p0] . identifier[shape] )** literal[int] / identifier[r] ** literal[int] , identifier[axis] = literal[int] )- literal[int]
identifier[res] = identifier[sp] . identifier[optimize] . identifier[minimize] ( identifier[fun] , identifier[p] , identifier[constraints] ={ literal[string] : literal[string] , literal[string] : identifier[con] }, identifier[method] = literal[string] )
keyword[return] identifier[res] [ literal[string] ]. identifier[reshape] ( identifier[p0] . identifier[shape] )+ identifier[c] | def _project_on_ellipsoid(c, r, locations):
"""displace locations to the nearest point on ellipsoid surface"""
p0 = locations - c # original locations
l2 = 1 / np.sum(p0 ** 2 / r ** 2, axis=1, keepdims=True)
p = p0 * np.sqrt(l2) # initial approximation (projection of points towards center of ellipsoid)
fun = lambda x: np.sum((x.reshape(p0.shape) - p0) ** 2) # minimize distance between new and old points
con = lambda x: np.sum(x.reshape(p0.shape) ** 2 / r ** 2, axis=1) - 1 # new points constrained to surface of ellipsoid
res = sp.optimize.minimize(fun, p, constraints={'type': 'eq', 'fun': con}, method='SLSQP')
return res['x'].reshape(p0.shape) + c |
def all_points_core_distance(distance_matrix, d=2.0):
"""
Compute the all-points-core-distance for all the points of a cluster.
Parameters
----------
distance_matrix : array (cluster_size, cluster_size)
The pairwise distance matrix between points in the cluster.
d : integer
The dimension of the data set, which is used in the computation
of the all-point-core-distance as per the paper.
Returns
-------
core_distances : array (cluster_size,)
The all-points-core-distance of each point in the cluster
References
----------
Moulavi, D., Jaskowiak, P.A., Campello, R.J., Zimek, A. and Sander, J.,
2014. Density-Based Clustering Validation. In SDM (pp. 839-847).
"""
distance_matrix[distance_matrix != 0] = (1.0 / distance_matrix[
distance_matrix != 0]) ** d
result = distance_matrix.sum(axis=1)
result /= distance_matrix.shape[0] - 1
result **= (-1.0 / d)
return result | def function[all_points_core_distance, parameter[distance_matrix, d]]:
constant[
Compute the all-points-core-distance for all the points of a cluster.
Parameters
----------
distance_matrix : array (cluster_size, cluster_size)
The pairwise distance matrix between points in the cluster.
d : integer
The dimension of the data set, which is used in the computation
of the all-point-core-distance as per the paper.
Returns
-------
core_distances : array (cluster_size,)
The all-points-core-distance of each point in the cluster
References
----------
Moulavi, D., Jaskowiak, P.A., Campello, R.J., Zimek, A. and Sander, J.,
2014. Density-Based Clustering Validation. In SDM (pp. 839-847).
]
call[name[distance_matrix]][compare[name[distance_matrix] not_equal[!=] constant[0]]] assign[=] binary_operation[binary_operation[constant[1.0] / call[name[distance_matrix]][compare[name[distance_matrix] not_equal[!=] constant[0]]]] ** name[d]]
variable[result] assign[=] call[name[distance_matrix].sum, parameter[]]
<ast.AugAssign object at 0x7da18fe91150>
<ast.AugAssign object at 0x7da18fe93f40>
return[name[result]] | keyword[def] identifier[all_points_core_distance] ( identifier[distance_matrix] , identifier[d] = literal[int] ):
literal[string]
identifier[distance_matrix] [ identifier[distance_matrix] != literal[int] ]=( literal[int] / identifier[distance_matrix] [
identifier[distance_matrix] != literal[int] ])** identifier[d]
identifier[result] = identifier[distance_matrix] . identifier[sum] ( identifier[axis] = literal[int] )
identifier[result] /= identifier[distance_matrix] . identifier[shape] [ literal[int] ]- literal[int]
identifier[result] **=(- literal[int] / identifier[d] )
keyword[return] identifier[result] | def all_points_core_distance(distance_matrix, d=2.0):
"""
Compute the all-points-core-distance for all the points of a cluster.
Parameters
----------
distance_matrix : array (cluster_size, cluster_size)
The pairwise distance matrix between points in the cluster.
d : integer
The dimension of the data set, which is used in the computation
of the all-point-core-distance as per the paper.
Returns
-------
core_distances : array (cluster_size,)
The all-points-core-distance of each point in the cluster
References
----------
Moulavi, D., Jaskowiak, P.A., Campello, R.J., Zimek, A. and Sander, J.,
2014. Density-Based Clustering Validation. In SDM (pp. 839-847).
"""
distance_matrix[distance_matrix != 0] = (1.0 / distance_matrix[distance_matrix != 0]) ** d
result = distance_matrix.sum(axis=1)
result /= distance_matrix.shape[0] - 1
result **= -1.0 / d
return result |
def hstack(tup):
'''
hstack(x) is equivalent to numpy.hstack(x) or scipy.sparse.hstack(x) except that it works
correctly with both sparse and dense arrays (if any inputs are dense, it converts all inputs
to dense arrays).
'''
if all([sps.issparse(u) for u in tup]): return sps.hstack(tup, format=tup[0].format)
else: return np.hstack([u.toarray() if sps.issparse(u) else u for u in tup]) | def function[hstack, parameter[tup]]:
constant[
hstack(x) is equivalent to numpy.hstack(x) or scipy.sparse.hstack(x) except that it works
correctly with both sparse and dense arrays (if any inputs are dense, it converts all inputs
to dense arrays).
]
if call[name[all], parameter[<ast.ListComp object at 0x7da1b26af0d0>]] begin[:]
return[call[name[sps].hstack, parameter[name[tup]]]] | keyword[def] identifier[hstack] ( identifier[tup] ):
literal[string]
keyword[if] identifier[all] ([ identifier[sps] . identifier[issparse] ( identifier[u] ) keyword[for] identifier[u] keyword[in] identifier[tup] ]): keyword[return] identifier[sps] . identifier[hstack] ( identifier[tup] , identifier[format] = identifier[tup] [ literal[int] ]. identifier[format] )
keyword[else] : keyword[return] identifier[np] . identifier[hstack] ([ identifier[u] . identifier[toarray] () keyword[if] identifier[sps] . identifier[issparse] ( identifier[u] ) keyword[else] identifier[u] keyword[for] identifier[u] keyword[in] identifier[tup] ]) | def hstack(tup):
"""
hstack(x) is equivalent to numpy.hstack(x) or scipy.sparse.hstack(x) except that it works
correctly with both sparse and dense arrays (if any inputs are dense, it converts all inputs
to dense arrays).
"""
if all([sps.issparse(u) for u in tup]):
return sps.hstack(tup, format=tup[0].format) # depends on [control=['if'], data=[]]
else:
return np.hstack([u.toarray() if sps.issparse(u) else u for u in tup]) |
def background_thread():
"""Example of how to send server generated events to clients."""
count = 0
while True:
socketio.sleep(10)
count += 1
socketio.emit('my_response',
{'data': 'Server generated event', 'count': count},
namespace='/test') | def function[background_thread, parameter[]]:
constant[Example of how to send server generated events to clients.]
variable[count] assign[=] constant[0]
while constant[True] begin[:]
call[name[socketio].sleep, parameter[constant[10]]]
<ast.AugAssign object at 0x7da20c993490>
call[name[socketio].emit, parameter[constant[my_response], dictionary[[<ast.Constant object at 0x7da20c990c70>, <ast.Constant object at 0x7da20c9909d0>], [<ast.Constant object at 0x7da20c993b80>, <ast.Name object at 0x7da20c990100>]]]] | keyword[def] identifier[background_thread] ():
literal[string]
identifier[count] = literal[int]
keyword[while] keyword[True] :
identifier[socketio] . identifier[sleep] ( literal[int] )
identifier[count] += literal[int]
identifier[socketio] . identifier[emit] ( literal[string] ,
{ literal[string] : literal[string] , literal[string] : identifier[count] },
identifier[namespace] = literal[string] ) | def background_thread():
"""Example of how to send server generated events to clients."""
count = 0
while True:
socketio.sleep(10)
count += 1
socketio.emit('my_response', {'data': 'Server generated event', 'count': count}, namespace='/test') # depends on [control=['while'], data=[]] |
def _load_data(self, reset=False):
''' loads the RDF/turtle application data to the triplestore
args:
reset(bool): True will delete the definition dataset and reload
all of the datafiles.
'''
log = logging.getLogger("%s.%s" % (self.log_name,
inspect.stack()[0][3]))
log.setLevel(self.log_level)
for attr, obj in self.datafile_obj.items():
if reset or obj['latest_mod'] > obj['last_json_mod']:
conn = obj['conn']
sparql = "DROP ALL;"
if os.path.isdir(obj['cache_path']):
shutil.rmtree(obj['cache_path'], ignore_errors=True)
os.makedirs(obj['cache_path'])
drop_extensions = conn.update_query(sparql)
rdf_resource_templates = []
rdf_data = []
for path, files in obj['files'].items():
for file in files:
file_path = os.path.join(path, file)
# data = open(file_path).read()
# log.info(" uploading file: %s | namespace: %s",
# file,
# conn.namespace)
# data_type = file.split('.')[-1]
result = conn.load_data(file_path,
#datatype=data_type,
graph=str(getattr(NSM.kdr,
file)),
is_file=True)
if result.status_code > 399:
raise ValueError("Cannot load '{}' into {}".format(
file_name, conn)) | def function[_load_data, parameter[self, reset]]:
constant[ loads the RDF/turtle application data to the triplestore
args:
reset(bool): True will delete the definition dataset and reload
all of the datafiles.
]
variable[log] assign[=] call[name[logging].getLogger, parameter[binary_operation[constant[%s.%s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Attribute object at 0x7da20c6a97e0>, <ast.Subscript object at 0x7da20c6a9ab0>]]]]]
call[name[log].setLevel, parameter[name[self].log_level]]
for taget[tuple[[<ast.Name object at 0x7da20c6a8c70>, <ast.Name object at 0x7da20c6ab040>]]] in starred[call[name[self].datafile_obj.items, parameter[]]] begin[:]
if <ast.BoolOp object at 0x7da20c6aa740> begin[:]
variable[conn] assign[=] call[name[obj]][constant[conn]]
variable[sparql] assign[=] constant[DROP ALL;]
if call[name[os].path.isdir, parameter[call[name[obj]][constant[cache_path]]]] begin[:]
call[name[shutil].rmtree, parameter[call[name[obj]][constant[cache_path]]]]
call[name[os].makedirs, parameter[call[name[obj]][constant[cache_path]]]]
variable[drop_extensions] assign[=] call[name[conn].update_query, parameter[name[sparql]]]
variable[rdf_resource_templates] assign[=] list[[]]
variable[rdf_data] assign[=] list[[]]
for taget[tuple[[<ast.Name object at 0x7da2043440d0>, <ast.Name object at 0x7da204344d90>]]] in starred[call[call[name[obj]][constant[files]].items, parameter[]]] begin[:]
for taget[name[file]] in starred[name[files]] begin[:]
variable[file_path] assign[=] call[name[os].path.join, parameter[name[path], name[file]]]
variable[result] assign[=] call[name[conn].load_data, parameter[name[file_path]]]
if compare[name[result].status_code greater[>] constant[399]] begin[:]
<ast.Raise object at 0x7da1b15e5f00> | keyword[def] identifier[_load_data] ( identifier[self] , identifier[reset] = keyword[False] ):
literal[string]
identifier[log] = identifier[logging] . identifier[getLogger] ( literal[string] %( identifier[self] . identifier[log_name] ,
identifier[inspect] . identifier[stack] ()[ literal[int] ][ literal[int] ]))
identifier[log] . identifier[setLevel] ( identifier[self] . identifier[log_level] )
keyword[for] identifier[attr] , identifier[obj] keyword[in] identifier[self] . identifier[datafile_obj] . identifier[items] ():
keyword[if] identifier[reset] keyword[or] identifier[obj] [ literal[string] ]> identifier[obj] [ literal[string] ]:
identifier[conn] = identifier[obj] [ literal[string] ]
identifier[sparql] = literal[string]
keyword[if] identifier[os] . identifier[path] . identifier[isdir] ( identifier[obj] [ literal[string] ]):
identifier[shutil] . identifier[rmtree] ( identifier[obj] [ literal[string] ], identifier[ignore_errors] = keyword[True] )
identifier[os] . identifier[makedirs] ( identifier[obj] [ literal[string] ])
identifier[drop_extensions] = identifier[conn] . identifier[update_query] ( identifier[sparql] )
identifier[rdf_resource_templates] =[]
identifier[rdf_data] =[]
keyword[for] identifier[path] , identifier[files] keyword[in] identifier[obj] [ literal[string] ]. identifier[items] ():
keyword[for] identifier[file] keyword[in] identifier[files] :
identifier[file_path] = identifier[os] . identifier[path] . identifier[join] ( identifier[path] , identifier[file] )
identifier[result] = identifier[conn] . identifier[load_data] ( identifier[file_path] ,
identifier[graph] = identifier[str] ( identifier[getattr] ( identifier[NSM] . identifier[kdr] ,
identifier[file] )),
identifier[is_file] = keyword[True] )
keyword[if] identifier[result] . identifier[status_code] > literal[int] :
keyword[raise] identifier[ValueError] ( literal[string] . identifier[format] (
identifier[file_name] , identifier[conn] )) | def _load_data(self, reset=False):
""" loads the RDF/turtle application data to the triplestore
args:
reset(bool): True will delete the definition dataset and reload
all of the datafiles.
"""
log = logging.getLogger('%s.%s' % (self.log_name, inspect.stack()[0][3]))
log.setLevel(self.log_level)
for (attr, obj) in self.datafile_obj.items():
if reset or obj['latest_mod'] > obj['last_json_mod']:
conn = obj['conn']
sparql = 'DROP ALL;'
if os.path.isdir(obj['cache_path']):
shutil.rmtree(obj['cache_path'], ignore_errors=True) # depends on [control=['if'], data=[]]
os.makedirs(obj['cache_path'])
drop_extensions = conn.update_query(sparql)
rdf_resource_templates = []
rdf_data = []
for (path, files) in obj['files'].items():
for file in files:
file_path = os.path.join(path, file)
# data = open(file_path).read()
# log.info(" uploading file: %s | namespace: %s",
# file,
# conn.namespace)
# data_type = file.split('.')[-1]
#datatype=data_type,
result = conn.load_data(file_path, graph=str(getattr(NSM.kdr, file)), is_file=True)
if result.status_code > 399:
raise ValueError("Cannot load '{}' into {}".format(file_name, conn)) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['file']] # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] |
def pckcov(pck, idcode, cover):
"""
Find the coverage window for a specified reference frame in a
specified binary PCK file.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/pckcov_c.html
:param pck: Name of PCK file.
:type pck: str
:param idcode: Class ID code of PCK reference frame.
:type idcode: int
:param cover: Window giving coverage in pck for idcode.
:type cover: SpiceCell
"""
pck = stypes.stringToCharP(pck)
idcode = ctypes.c_int(idcode)
assert isinstance(cover, stypes.SpiceCell)
assert cover.dtype == 1
libspice.pckcov_c(pck, idcode, ctypes.byref(cover)) | def function[pckcov, parameter[pck, idcode, cover]]:
constant[
Find the coverage window for a specified reference frame in a
specified binary PCK file.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/pckcov_c.html
:param pck: Name of PCK file.
:type pck: str
:param idcode: Class ID code of PCK reference frame.
:type idcode: int
:param cover: Window giving coverage in pck for idcode.
:type cover: SpiceCell
]
variable[pck] assign[=] call[name[stypes].stringToCharP, parameter[name[pck]]]
variable[idcode] assign[=] call[name[ctypes].c_int, parameter[name[idcode]]]
assert[call[name[isinstance], parameter[name[cover], name[stypes].SpiceCell]]]
assert[compare[name[cover].dtype equal[==] constant[1]]]
call[name[libspice].pckcov_c, parameter[name[pck], name[idcode], call[name[ctypes].byref, parameter[name[cover]]]]] | keyword[def] identifier[pckcov] ( identifier[pck] , identifier[idcode] , identifier[cover] ):
literal[string]
identifier[pck] = identifier[stypes] . identifier[stringToCharP] ( identifier[pck] )
identifier[idcode] = identifier[ctypes] . identifier[c_int] ( identifier[idcode] )
keyword[assert] identifier[isinstance] ( identifier[cover] , identifier[stypes] . identifier[SpiceCell] )
keyword[assert] identifier[cover] . identifier[dtype] == literal[int]
identifier[libspice] . identifier[pckcov_c] ( identifier[pck] , identifier[idcode] , identifier[ctypes] . identifier[byref] ( identifier[cover] )) | def pckcov(pck, idcode, cover):
"""
Find the coverage window for a specified reference frame in a
specified binary PCK file.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/pckcov_c.html
:param pck: Name of PCK file.
:type pck: str
:param idcode: Class ID code of PCK reference frame.
:type idcode: int
:param cover: Window giving coverage in pck for idcode.
:type cover: SpiceCell
"""
pck = stypes.stringToCharP(pck)
idcode = ctypes.c_int(idcode)
assert isinstance(cover, stypes.SpiceCell)
assert cover.dtype == 1
libspice.pckcov_c(pck, idcode, ctypes.byref(cover)) |
def template_slave_hcl(cl_args, masters):
'''
Template slave config file
'''
slave_config_template = "%s/standalone/templates/slave.template.hcl" % cl_args["config_path"]
slave_config_actual = "%s/standalone/resources/slave.hcl" % cl_args["config_path"]
masters_in_quotes = ['"%s"' % master for master in masters]
template_file(slave_config_template, slave_config_actual,
{"<nomad_masters:master_port>": ", ".join(masters_in_quotes)}) | def function[template_slave_hcl, parameter[cl_args, masters]]:
constant[
Template slave config file
]
variable[slave_config_template] assign[=] binary_operation[constant[%s/standalone/templates/slave.template.hcl] <ast.Mod object at 0x7da2590d6920> call[name[cl_args]][constant[config_path]]]
variable[slave_config_actual] assign[=] binary_operation[constant[%s/standalone/resources/slave.hcl] <ast.Mod object at 0x7da2590d6920> call[name[cl_args]][constant[config_path]]]
variable[masters_in_quotes] assign[=] <ast.ListComp object at 0x7da18f00c5b0>
call[name[template_file], parameter[name[slave_config_template], name[slave_config_actual], dictionary[[<ast.Constant object at 0x7da18f00cc70>], [<ast.Call object at 0x7da18f00c100>]]]] | keyword[def] identifier[template_slave_hcl] ( identifier[cl_args] , identifier[masters] ):
literal[string]
identifier[slave_config_template] = literal[string] % identifier[cl_args] [ literal[string] ]
identifier[slave_config_actual] = literal[string] % identifier[cl_args] [ literal[string] ]
identifier[masters_in_quotes] =[ literal[string] % identifier[master] keyword[for] identifier[master] keyword[in] identifier[masters] ]
identifier[template_file] ( identifier[slave_config_template] , identifier[slave_config_actual] ,
{ literal[string] : literal[string] . identifier[join] ( identifier[masters_in_quotes] )}) | def template_slave_hcl(cl_args, masters):
"""
Template slave config file
"""
slave_config_template = '%s/standalone/templates/slave.template.hcl' % cl_args['config_path']
slave_config_actual = '%s/standalone/resources/slave.hcl' % cl_args['config_path']
masters_in_quotes = ['"%s"' % master for master in masters]
template_file(slave_config_template, slave_config_actual, {'<nomad_masters:master_port>': ', '.join(masters_in_quotes)}) |
def to_df(
self,
rank="auto",
top_n=None,
threshold=None,
remove_zeros=True,
normalize="auto",
table_format="wide",
):
"""Takes the ClassificationsDataFrame associated with these samples, or SampleCollection,
does some filtering, and returns a ClassificationsDataFrame copy.
Parameters
----------
rank : {'auto', 'kingdom', 'phylum', 'class', 'order', 'family', 'genus', 'species'}, optional
Analysis will be restricted to abundances of taxa at the specified level.
top_n : `integer`, optional
Return only the top N most abundant taxa.
threshold : `float`, optional
Return only taxa more abundant than this threshold in one or more samples.
remove_zeros : `bool`, optional
Do not return taxa that have zero abundance in every sample.
normalize : {'auto', True, False}
Convert read counts to relative abundances (each sample sums to 1.0).
table_format : {'long', 'wide'}
If wide, rows are classifications, cols are taxa, elements are counts. If long, rows are
observations with three cols each: classification_id, tax_id, and count.
Returns
-------
`ClassificationsDataFrame`
"""
from onecodex.dataframes import ClassificationsDataFrame
rank = self._get_auto_rank(rank)
df = self._results.copy()
# subset by taxa
if rank:
if rank == "kingdom":
warnings.warn(
"Did you mean to specify rank=kingdom? Use rank=superkingdom to see Bacteria, "
"Archaea and Eukaryota."
)
tax_ids_to_keep = []
for tax_id in df.keys():
if self.taxonomy["rank"][tax_id] == rank:
tax_ids_to_keep.append(tax_id)
if len(tax_ids_to_keep) == 0:
raise OneCodexException("No taxa kept--is rank ({}) correct?".format(rank))
df = df.loc[:, tax_ids_to_keep]
# normalize
if normalize is False and self._guess_normalized():
raise OneCodexException("Data has already been normalized and this can not be undone.")
if normalize is True or (
normalize == "auto" and rank is not None and self._field != "abundance"
):
df = df.div(df.sum(axis=1), axis=0)
# remove columns (tax_ids) with no values that are > 0
if remove_zeros:
df = df.loc[:, (df != 0).any(axis=0)]
# restrict to taxa appearing in one or more samples at the given threshold
if threshold:
df = df.loc[:, df.max() >= threshold]
# restrict to N most abundant taxa
if top_n:
idx = df.sum(axis=0).sort_values(ascending=False).head(top_n).index
df = df.loc[:, idx]
# additional data to copy into the ClassificationsDataFrame
ocx_data = {
"ocx_metadata": self.metadata.copy(),
"ocx_rank": rank,
"ocx_field": self._field,
"ocx_taxonomy": self.taxonomy.copy(),
"ocx_normalized": normalize,
}
# generate long-format table
if table_format == "long":
long_df = {"classification_id": [], "tax_id": [], self._field: []}
for t_id in df:
for c_id, count in df[t_id].iteritems():
long_df["classification_id"].append(c_id)
long_df["tax_id"].append(t_id)
long_df[self._field].append(count)
results_df = ClassificationsDataFrame(long_df, **ocx_data)
elif table_format == "wide":
results_df = ClassificationsDataFrame(df, **ocx_data)
else:
raise OneCodexException("table_format must be one of: long, wide")
return results_df | def function[to_df, parameter[self, rank, top_n, threshold, remove_zeros, normalize, table_format]]:
constant[Takes the ClassificationsDataFrame associated with these samples, or SampleCollection,
does some filtering, and returns a ClassificationsDataFrame copy.
Parameters
----------
rank : {'auto', 'kingdom', 'phylum', 'class', 'order', 'family', 'genus', 'species'}, optional
Analysis will be restricted to abundances of taxa at the specified level.
top_n : `integer`, optional
Return only the top N most abundant taxa.
threshold : `float`, optional
Return only taxa more abundant than this threshold in one or more samples.
remove_zeros : `bool`, optional
Do not return taxa that have zero abundance in every sample.
normalize : {'auto', True, False}
Convert read counts to relative abundances (each sample sums to 1.0).
table_format : {'long', 'wide'}
If wide, rows are classifications, cols are taxa, elements are counts. If long, rows are
observations with three cols each: classification_id, tax_id, and count.
Returns
-------
`ClassificationsDataFrame`
]
from relative_module[onecodex.dataframes] import module[ClassificationsDataFrame]
variable[rank] assign[=] call[name[self]._get_auto_rank, parameter[name[rank]]]
variable[df] assign[=] call[name[self]._results.copy, parameter[]]
if name[rank] begin[:]
if compare[name[rank] equal[==] constant[kingdom]] begin[:]
call[name[warnings].warn, parameter[constant[Did you mean to specify rank=kingdom? Use rank=superkingdom to see Bacteria, Archaea and Eukaryota.]]]
variable[tax_ids_to_keep] assign[=] list[[]]
for taget[name[tax_id]] in starred[call[name[df].keys, parameter[]]] begin[:]
if compare[call[call[name[self].taxonomy][constant[rank]]][name[tax_id]] equal[==] name[rank]] begin[:]
call[name[tax_ids_to_keep].append, parameter[name[tax_id]]]
if compare[call[name[len], parameter[name[tax_ids_to_keep]]] equal[==] constant[0]] begin[:]
<ast.Raise object at 0x7da18c4cc550>
variable[df] assign[=] call[name[df].loc][tuple[[<ast.Slice object at 0x7da18c4cfeb0>, <ast.Name object at 0x7da18c4cdbd0>]]]
if <ast.BoolOp object at 0x7da18c4ccca0> begin[:]
<ast.Raise object at 0x7da18c4cdcc0>
if <ast.BoolOp object at 0x7da18c4cc5b0> begin[:]
variable[df] assign[=] call[name[df].div, parameter[call[name[df].sum, parameter[]]]]
if name[remove_zeros] begin[:]
variable[df] assign[=] call[name[df].loc][tuple[[<ast.Slice object at 0x7da1b0590a00>, <ast.Call object at 0x7da1b0592b00>]]]
if name[threshold] begin[:]
variable[df] assign[=] call[name[df].loc][tuple[[<ast.Slice object at 0x7da1b05927d0>, <ast.Compare object at 0x7da1b05908b0>]]]
if name[top_n] begin[:]
variable[idx] assign[=] call[call[call[name[df].sum, parameter[]].sort_values, parameter[]].head, parameter[name[top_n]]].index
variable[df] assign[=] call[name[df].loc][tuple[[<ast.Slice object at 0x7da1b05910c0>, <ast.Name object at 0x7da1b0591570>]]]
variable[ocx_data] assign[=] dictionary[[<ast.Constant object at 0x7da1b0592050>, <ast.Constant object at 0x7da1b05932e0>, <ast.Constant object at 0x7da1b0590f40>, <ast.Constant object at 0x7da1b0590910>, <ast.Constant object at 0x7da1b0591240>], [<ast.Call object at 0x7da1b05907f0>, <ast.Name object at 0x7da1b0592890>, <ast.Attribute object at 0x7da1b0592110>, <ast.Call object at 0x7da1b05913f0>, <ast.Name object at 0x7da1b0592d70>]]
if compare[name[table_format] equal[==] constant[long]] begin[:]
variable[long_df] assign[=] dictionary[[<ast.Constant object at 0x7da1b0590670>, <ast.Constant object at 0x7da1b0591090>, <ast.Attribute object at 0x7da1b0591b10>], [<ast.List object at 0x7da1b05915a0>, <ast.List object at 0x7da1b0592c50>, <ast.List object at 0x7da1b0593e20>]]
for taget[name[t_id]] in starred[name[df]] begin[:]
for taget[tuple[[<ast.Name object at 0x7da1b0593cd0>, <ast.Name object at 0x7da1b0590d60>]]] in starred[call[call[name[df]][name[t_id]].iteritems, parameter[]]] begin[:]
call[call[name[long_df]][constant[classification_id]].append, parameter[name[c_id]]]
call[call[name[long_df]][constant[tax_id]].append, parameter[name[t_id]]]
call[call[name[long_df]][name[self]._field].append, parameter[name[count]]]
variable[results_df] assign[=] call[name[ClassificationsDataFrame], parameter[name[long_df]]]
return[name[results_df]] | keyword[def] identifier[to_df] (
identifier[self] ,
identifier[rank] = literal[string] ,
identifier[top_n] = keyword[None] ,
identifier[threshold] = keyword[None] ,
identifier[remove_zeros] = keyword[True] ,
identifier[normalize] = literal[string] ,
identifier[table_format] = literal[string] ,
):
literal[string]
keyword[from] identifier[onecodex] . identifier[dataframes] keyword[import] identifier[ClassificationsDataFrame]
identifier[rank] = identifier[self] . identifier[_get_auto_rank] ( identifier[rank] )
identifier[df] = identifier[self] . identifier[_results] . identifier[copy] ()
keyword[if] identifier[rank] :
keyword[if] identifier[rank] == literal[string] :
identifier[warnings] . identifier[warn] (
literal[string]
literal[string]
)
identifier[tax_ids_to_keep] =[]
keyword[for] identifier[tax_id] keyword[in] identifier[df] . identifier[keys] ():
keyword[if] identifier[self] . identifier[taxonomy] [ literal[string] ][ identifier[tax_id] ]== identifier[rank] :
identifier[tax_ids_to_keep] . identifier[append] ( identifier[tax_id] )
keyword[if] identifier[len] ( identifier[tax_ids_to_keep] )== literal[int] :
keyword[raise] identifier[OneCodexException] ( literal[string] . identifier[format] ( identifier[rank] ))
identifier[df] = identifier[df] . identifier[loc] [:, identifier[tax_ids_to_keep] ]
keyword[if] identifier[normalize] keyword[is] keyword[False] keyword[and] identifier[self] . identifier[_guess_normalized] ():
keyword[raise] identifier[OneCodexException] ( literal[string] )
keyword[if] identifier[normalize] keyword[is] keyword[True] keyword[or] (
identifier[normalize] == literal[string] keyword[and] identifier[rank] keyword[is] keyword[not] keyword[None] keyword[and] identifier[self] . identifier[_field] != literal[string]
):
identifier[df] = identifier[df] . identifier[div] ( identifier[df] . identifier[sum] ( identifier[axis] = literal[int] ), identifier[axis] = literal[int] )
keyword[if] identifier[remove_zeros] :
identifier[df] = identifier[df] . identifier[loc] [:,( identifier[df] != literal[int] ). identifier[any] ( identifier[axis] = literal[int] )]
keyword[if] identifier[threshold] :
identifier[df] = identifier[df] . identifier[loc] [:, identifier[df] . identifier[max] ()>= identifier[threshold] ]
keyword[if] identifier[top_n] :
identifier[idx] = identifier[df] . identifier[sum] ( identifier[axis] = literal[int] ). identifier[sort_values] ( identifier[ascending] = keyword[False] ). identifier[head] ( identifier[top_n] ). identifier[index]
identifier[df] = identifier[df] . identifier[loc] [:, identifier[idx] ]
identifier[ocx_data] ={
literal[string] : identifier[self] . identifier[metadata] . identifier[copy] (),
literal[string] : identifier[rank] ,
literal[string] : identifier[self] . identifier[_field] ,
literal[string] : identifier[self] . identifier[taxonomy] . identifier[copy] (),
literal[string] : identifier[normalize] ,
}
keyword[if] identifier[table_format] == literal[string] :
identifier[long_df] ={ literal[string] :[], literal[string] :[], identifier[self] . identifier[_field] :[]}
keyword[for] identifier[t_id] keyword[in] identifier[df] :
keyword[for] identifier[c_id] , identifier[count] keyword[in] identifier[df] [ identifier[t_id] ]. identifier[iteritems] ():
identifier[long_df] [ literal[string] ]. identifier[append] ( identifier[c_id] )
identifier[long_df] [ literal[string] ]. identifier[append] ( identifier[t_id] )
identifier[long_df] [ identifier[self] . identifier[_field] ]. identifier[append] ( identifier[count] )
identifier[results_df] = identifier[ClassificationsDataFrame] ( identifier[long_df] ,** identifier[ocx_data] )
keyword[elif] identifier[table_format] == literal[string] :
identifier[results_df] = identifier[ClassificationsDataFrame] ( identifier[df] ,** identifier[ocx_data] )
keyword[else] :
keyword[raise] identifier[OneCodexException] ( literal[string] )
keyword[return] identifier[results_df] | def to_df(self, rank='auto', top_n=None, threshold=None, remove_zeros=True, normalize='auto', table_format='wide'):
"""Takes the ClassificationsDataFrame associated with these samples, or SampleCollection,
does some filtering, and returns a ClassificationsDataFrame copy.
Parameters
----------
rank : {'auto', 'kingdom', 'phylum', 'class', 'order', 'family', 'genus', 'species'}, optional
Analysis will be restricted to abundances of taxa at the specified level.
top_n : `integer`, optional
Return only the top N most abundant taxa.
threshold : `float`, optional
Return only taxa more abundant than this threshold in one or more samples.
remove_zeros : `bool`, optional
Do not return taxa that have zero abundance in every sample.
normalize : {'auto', True, False}
Convert read counts to relative abundances (each sample sums to 1.0).
table_format : {'long', 'wide'}
If wide, rows are classifications, cols are taxa, elements are counts. If long, rows are
observations with three cols each: classification_id, tax_id, and count.
Returns
-------
`ClassificationsDataFrame`
"""
from onecodex.dataframes import ClassificationsDataFrame
rank = self._get_auto_rank(rank)
df = self._results.copy()
# subset by taxa
if rank:
if rank == 'kingdom':
warnings.warn('Did you mean to specify rank=kingdom? Use rank=superkingdom to see Bacteria, Archaea and Eukaryota.') # depends on [control=['if'], data=[]]
tax_ids_to_keep = []
for tax_id in df.keys():
if self.taxonomy['rank'][tax_id] == rank:
tax_ids_to_keep.append(tax_id) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['tax_id']]
if len(tax_ids_to_keep) == 0:
raise OneCodexException('No taxa kept--is rank ({}) correct?'.format(rank)) # depends on [control=['if'], data=[]]
df = df.loc[:, tax_ids_to_keep] # depends on [control=['if'], data=[]]
# normalize
if normalize is False and self._guess_normalized():
raise OneCodexException('Data has already been normalized and this can not be undone.') # depends on [control=['if'], data=[]]
if normalize is True or (normalize == 'auto' and rank is not None and (self._field != 'abundance')):
df = df.div(df.sum(axis=1), axis=0) # depends on [control=['if'], data=[]]
# remove columns (tax_ids) with no values that are > 0
if remove_zeros:
df = df.loc[:, (df != 0).any(axis=0)] # depends on [control=['if'], data=[]]
# restrict to taxa appearing in one or more samples at the given threshold
if threshold:
df = df.loc[:, df.max() >= threshold] # depends on [control=['if'], data=[]]
# restrict to N most abundant taxa
if top_n:
idx = df.sum(axis=0).sort_values(ascending=False).head(top_n).index
df = df.loc[:, idx] # depends on [control=['if'], data=[]]
# additional data to copy into the ClassificationsDataFrame
ocx_data = {'ocx_metadata': self.metadata.copy(), 'ocx_rank': rank, 'ocx_field': self._field, 'ocx_taxonomy': self.taxonomy.copy(), 'ocx_normalized': normalize}
# generate long-format table
if table_format == 'long':
long_df = {'classification_id': [], 'tax_id': [], self._field: []}
for t_id in df:
for (c_id, count) in df[t_id].iteritems():
long_df['classification_id'].append(c_id)
long_df['tax_id'].append(t_id)
long_df[self._field].append(count) # depends on [control=['for'], data=[]] # depends on [control=['for'], data=['t_id']]
results_df = ClassificationsDataFrame(long_df, **ocx_data) # depends on [control=['if'], data=[]]
elif table_format == 'wide':
results_df = ClassificationsDataFrame(df, **ocx_data) # depends on [control=['if'], data=[]]
else:
raise OneCodexException('table_format must be one of: long, wide')
return results_df |
def create_frame(command):
"""Create and return empty Frame from Command."""
# pylint: disable=too-many-branches,too-many-return-statements
if command == Command.GW_ERROR_NTF:
return FrameErrorNotification()
if command == Command.GW_COMMAND_SEND_REQ:
return FrameCommandSendRequest()
if command == Command.GW_COMMAND_SEND_CFM:
return FrameCommandSendConfirmation()
if command == Command.GW_COMMAND_RUN_STATUS_NTF:
return FrameCommandRunStatusNotification()
if command == Command.GW_COMMAND_REMAINING_TIME_NTF:
return FrameCommandRemainingTimeNotification()
if command == Command.GW_SESSION_FINISHED_NTF:
return FrameSessionFinishedNotification()
if command == Command.GW_PASSWORD_ENTER_REQ:
return FramePasswordEnterRequest()
if command == Command.GW_PASSWORD_ENTER_CFM:
return FramePasswordEnterConfirmation()
if command == Command.GW_CS_DISCOVER_NODES_REQ:
return FrameDiscoverNodesRequest()
if command == Command.GW_CS_DISCOVER_NODES_CFM:
return FrameDiscoverNodesConfirmation()
if command == Command.GW_CS_DISCOVER_NODES_NTF:
return FrameDiscoverNodesNotification()
if command == Command.GW_GET_SCENE_LIST_REQ:
return FrameGetSceneListRequest()
if command == Command.GW_GET_SCENE_LIST_CFM:
return FrameGetSceneListConfirmation()
if command == Command.GW_GET_SCENE_LIST_NTF:
return FrameGetSceneListNotification()
if command == Command.GW_GET_NODE_INFORMATION_REQ:
return FrameGetNodeInformationRequest()
if command == Command.GW_GET_NODE_INFORMATION_CFM:
return FrameGetNodeInformationConfirmation()
if command == Command.GW_GET_NODE_INFORMATION_NTF:
return FrameGetNodeInformationNotification()
if command == Command.GW_GET_ALL_NODES_INFORMATION_REQ:
return FrameGetAllNodesInformationRequest()
if command == Command.GW_GET_ALL_NODES_INFORMATION_CFM:
return FrameGetAllNodesInformationConfirmation()
if command == Command.GW_GET_ALL_NODES_INFORMATION_NTF:
return FrameGetAllNodesInformationNotification()
if command == Command.GW_GET_ALL_NODES_INFORMATION_FINISHED_NTF:
return FrameGetAllNodesInformationFinishedNotification()
if command == Command.GW_ACTIVATE_SCENE_REQ:
return FrameActivateSceneRequest()
if command == Command.GW_ACTIVATE_SCENE_CFM:
return FrameActivateSceneConfirmation()
if command == Command.GW_GET_VERSION_REQ:
return FrameGetVersionRequest()
if command == Command.GW_GET_VERSION_CFM:
return FrameGetVersionConfirmation()
if command == Command.GW_GET_PROTOCOL_VERSION_REQ:
return FrameGetProtocolVersionRequest()
if command == Command.GW_GET_PROTOCOL_VERSION_CFM:
return FrameGetProtocolVersionConfirmation()
if command == Command.GW_SET_NODE_NAME_REQ:
return FrameSetNodeNameRequest()
if command == Command.GW_SET_NODE_NAME_CFM:
return FrameSetNodeNameConfirmation()
if command == Command.GW_NODE_INFORMATION_CHANGED_NTF:
return FrameNodeInformationChangedNotification()
if command == Command.GW_GET_STATE_REQ:
return FrameGetStateRequest()
if command == Command.GW_GET_STATE_CFM:
return FrameGetStateConfirmation()
if command == Command.GW_SET_UTC_REQ:
return FrameSetUTCRequest()
if command == Command.GW_SET_UTC_CFM:
return FrameSetUTCConfirmation()
if command == Command.GW_ACTIVATION_LOG_UPDATED_NTF:
return FrameActivationLogUpdatedNotification()
if command == Command.GW_HOUSE_STATUS_MONITOR_ENABLE_REQ:
return FrameHouseStatusMonitorEnableRequest()
if command == Command.GW_HOUSE_STATUS_MONITOR_ENABLE_CFM:
return FrameHouseStatusMonitorEnableConfirmation()
if command == Command.GW_HOUSE_STATUS_MONITOR_DISABLE_REQ:
return FrameHouseStatusMonitorDisableRequest()
if command == Command.GW_HOUSE_STATUS_MONITOR_DISABLE_CFM:
return FrameHouseStatusMonitorDisableConfirmation()
if command == Command.GW_NODE_STATE_POSITION_CHANGED_NTF:
return FrameNodeStatePositionChangedNotification()
return None | def function[create_frame, parameter[command]]:
constant[Create and return empty Frame from Command.]
if compare[name[command] equal[==] name[Command].GW_ERROR_NTF] begin[:]
return[call[name[FrameErrorNotification], parameter[]]]
if compare[name[command] equal[==] name[Command].GW_COMMAND_SEND_REQ] begin[:]
return[call[name[FrameCommandSendRequest], parameter[]]]
if compare[name[command] equal[==] name[Command].GW_COMMAND_SEND_CFM] begin[:]
return[call[name[FrameCommandSendConfirmation], parameter[]]]
if compare[name[command] equal[==] name[Command].GW_COMMAND_RUN_STATUS_NTF] begin[:]
return[call[name[FrameCommandRunStatusNotification], parameter[]]]
if compare[name[command] equal[==] name[Command].GW_COMMAND_REMAINING_TIME_NTF] begin[:]
return[call[name[FrameCommandRemainingTimeNotification], parameter[]]]
if compare[name[command] equal[==] name[Command].GW_SESSION_FINISHED_NTF] begin[:]
return[call[name[FrameSessionFinishedNotification], parameter[]]]
if compare[name[command] equal[==] name[Command].GW_PASSWORD_ENTER_REQ] begin[:]
return[call[name[FramePasswordEnterRequest], parameter[]]]
if compare[name[command] equal[==] name[Command].GW_PASSWORD_ENTER_CFM] begin[:]
return[call[name[FramePasswordEnterConfirmation], parameter[]]]
if compare[name[command] equal[==] name[Command].GW_CS_DISCOVER_NODES_REQ] begin[:]
return[call[name[FrameDiscoverNodesRequest], parameter[]]]
if compare[name[command] equal[==] name[Command].GW_CS_DISCOVER_NODES_CFM] begin[:]
return[call[name[FrameDiscoverNodesConfirmation], parameter[]]]
if compare[name[command] equal[==] name[Command].GW_CS_DISCOVER_NODES_NTF] begin[:]
return[call[name[FrameDiscoverNodesNotification], parameter[]]]
if compare[name[command] equal[==] name[Command].GW_GET_SCENE_LIST_REQ] begin[:]
return[call[name[FrameGetSceneListRequest], parameter[]]]
if compare[name[command] equal[==] name[Command].GW_GET_SCENE_LIST_CFM] begin[:]
return[call[name[FrameGetSceneListConfirmation], parameter[]]]
if compare[name[command] equal[==] name[Command].GW_GET_SCENE_LIST_NTF] begin[:]
return[call[name[FrameGetSceneListNotification], parameter[]]]
if compare[name[command] equal[==] name[Command].GW_GET_NODE_INFORMATION_REQ] begin[:]
return[call[name[FrameGetNodeInformationRequest], parameter[]]]
if compare[name[command] equal[==] name[Command].GW_GET_NODE_INFORMATION_CFM] begin[:]
return[call[name[FrameGetNodeInformationConfirmation], parameter[]]]
if compare[name[command] equal[==] name[Command].GW_GET_NODE_INFORMATION_NTF] begin[:]
return[call[name[FrameGetNodeInformationNotification], parameter[]]]
if compare[name[command] equal[==] name[Command].GW_GET_ALL_NODES_INFORMATION_REQ] begin[:]
return[call[name[FrameGetAllNodesInformationRequest], parameter[]]]
if compare[name[command] equal[==] name[Command].GW_GET_ALL_NODES_INFORMATION_CFM] begin[:]
return[call[name[FrameGetAllNodesInformationConfirmation], parameter[]]]
if compare[name[command] equal[==] name[Command].GW_GET_ALL_NODES_INFORMATION_NTF] begin[:]
return[call[name[FrameGetAllNodesInformationNotification], parameter[]]]
if compare[name[command] equal[==] name[Command].GW_GET_ALL_NODES_INFORMATION_FINISHED_NTF] begin[:]
return[call[name[FrameGetAllNodesInformationFinishedNotification], parameter[]]]
if compare[name[command] equal[==] name[Command].GW_ACTIVATE_SCENE_REQ] begin[:]
return[call[name[FrameActivateSceneRequest], parameter[]]]
if compare[name[command] equal[==] name[Command].GW_ACTIVATE_SCENE_CFM] begin[:]
return[call[name[FrameActivateSceneConfirmation], parameter[]]]
if compare[name[command] equal[==] name[Command].GW_GET_VERSION_REQ] begin[:]
return[call[name[FrameGetVersionRequest], parameter[]]]
if compare[name[command] equal[==] name[Command].GW_GET_VERSION_CFM] begin[:]
return[call[name[FrameGetVersionConfirmation], parameter[]]]
if compare[name[command] equal[==] name[Command].GW_GET_PROTOCOL_VERSION_REQ] begin[:]
return[call[name[FrameGetProtocolVersionRequest], parameter[]]]
if compare[name[command] equal[==] name[Command].GW_GET_PROTOCOL_VERSION_CFM] begin[:]
return[call[name[FrameGetProtocolVersionConfirmation], parameter[]]]
if compare[name[command] equal[==] name[Command].GW_SET_NODE_NAME_REQ] begin[:]
return[call[name[FrameSetNodeNameRequest], parameter[]]]
if compare[name[command] equal[==] name[Command].GW_SET_NODE_NAME_CFM] begin[:]
return[call[name[FrameSetNodeNameConfirmation], parameter[]]]
if compare[name[command] equal[==] name[Command].GW_NODE_INFORMATION_CHANGED_NTF] begin[:]
return[call[name[FrameNodeInformationChangedNotification], parameter[]]]
if compare[name[command] equal[==] name[Command].GW_GET_STATE_REQ] begin[:]
return[call[name[FrameGetStateRequest], parameter[]]]
if compare[name[command] equal[==] name[Command].GW_GET_STATE_CFM] begin[:]
return[call[name[FrameGetStateConfirmation], parameter[]]]
if compare[name[command] equal[==] name[Command].GW_SET_UTC_REQ] begin[:]
return[call[name[FrameSetUTCRequest], parameter[]]]
if compare[name[command] equal[==] name[Command].GW_SET_UTC_CFM] begin[:]
return[call[name[FrameSetUTCConfirmation], parameter[]]]
if compare[name[command] equal[==] name[Command].GW_ACTIVATION_LOG_UPDATED_NTF] begin[:]
return[call[name[FrameActivationLogUpdatedNotification], parameter[]]]
if compare[name[command] equal[==] name[Command].GW_HOUSE_STATUS_MONITOR_ENABLE_REQ] begin[:]
return[call[name[FrameHouseStatusMonitorEnableRequest], parameter[]]]
if compare[name[command] equal[==] name[Command].GW_HOUSE_STATUS_MONITOR_ENABLE_CFM] begin[:]
return[call[name[FrameHouseStatusMonitorEnableConfirmation], parameter[]]]
if compare[name[command] equal[==] name[Command].GW_HOUSE_STATUS_MONITOR_DISABLE_REQ] begin[:]
return[call[name[FrameHouseStatusMonitorDisableRequest], parameter[]]]
if compare[name[command] equal[==] name[Command].GW_HOUSE_STATUS_MONITOR_DISABLE_CFM] begin[:]
return[call[name[FrameHouseStatusMonitorDisableConfirmation], parameter[]]]
if compare[name[command] equal[==] name[Command].GW_NODE_STATE_POSITION_CHANGED_NTF] begin[:]
return[call[name[FrameNodeStatePositionChangedNotification], parameter[]]]
return[constant[None]] | keyword[def] identifier[create_frame] ( identifier[command] ):
literal[string]
keyword[if] identifier[command] == identifier[Command] . identifier[GW_ERROR_NTF] :
keyword[return] identifier[FrameErrorNotification] ()
keyword[if] identifier[command] == identifier[Command] . identifier[GW_COMMAND_SEND_REQ] :
keyword[return] identifier[FrameCommandSendRequest] ()
keyword[if] identifier[command] == identifier[Command] . identifier[GW_COMMAND_SEND_CFM] :
keyword[return] identifier[FrameCommandSendConfirmation] ()
keyword[if] identifier[command] == identifier[Command] . identifier[GW_COMMAND_RUN_STATUS_NTF] :
keyword[return] identifier[FrameCommandRunStatusNotification] ()
keyword[if] identifier[command] == identifier[Command] . identifier[GW_COMMAND_REMAINING_TIME_NTF] :
keyword[return] identifier[FrameCommandRemainingTimeNotification] ()
keyword[if] identifier[command] == identifier[Command] . identifier[GW_SESSION_FINISHED_NTF] :
keyword[return] identifier[FrameSessionFinishedNotification] ()
keyword[if] identifier[command] == identifier[Command] . identifier[GW_PASSWORD_ENTER_REQ] :
keyword[return] identifier[FramePasswordEnterRequest] ()
keyword[if] identifier[command] == identifier[Command] . identifier[GW_PASSWORD_ENTER_CFM] :
keyword[return] identifier[FramePasswordEnterConfirmation] ()
keyword[if] identifier[command] == identifier[Command] . identifier[GW_CS_DISCOVER_NODES_REQ] :
keyword[return] identifier[FrameDiscoverNodesRequest] ()
keyword[if] identifier[command] == identifier[Command] . identifier[GW_CS_DISCOVER_NODES_CFM] :
keyword[return] identifier[FrameDiscoverNodesConfirmation] ()
keyword[if] identifier[command] == identifier[Command] . identifier[GW_CS_DISCOVER_NODES_NTF] :
keyword[return] identifier[FrameDiscoverNodesNotification] ()
keyword[if] identifier[command] == identifier[Command] . identifier[GW_GET_SCENE_LIST_REQ] :
keyword[return] identifier[FrameGetSceneListRequest] ()
keyword[if] identifier[command] == identifier[Command] . identifier[GW_GET_SCENE_LIST_CFM] :
keyword[return] identifier[FrameGetSceneListConfirmation] ()
keyword[if] identifier[command] == identifier[Command] . identifier[GW_GET_SCENE_LIST_NTF] :
keyword[return] identifier[FrameGetSceneListNotification] ()
keyword[if] identifier[command] == identifier[Command] . identifier[GW_GET_NODE_INFORMATION_REQ] :
keyword[return] identifier[FrameGetNodeInformationRequest] ()
keyword[if] identifier[command] == identifier[Command] . identifier[GW_GET_NODE_INFORMATION_CFM] :
keyword[return] identifier[FrameGetNodeInformationConfirmation] ()
keyword[if] identifier[command] == identifier[Command] . identifier[GW_GET_NODE_INFORMATION_NTF] :
keyword[return] identifier[FrameGetNodeInformationNotification] ()
keyword[if] identifier[command] == identifier[Command] . identifier[GW_GET_ALL_NODES_INFORMATION_REQ] :
keyword[return] identifier[FrameGetAllNodesInformationRequest] ()
keyword[if] identifier[command] == identifier[Command] . identifier[GW_GET_ALL_NODES_INFORMATION_CFM] :
keyword[return] identifier[FrameGetAllNodesInformationConfirmation] ()
keyword[if] identifier[command] == identifier[Command] . identifier[GW_GET_ALL_NODES_INFORMATION_NTF] :
keyword[return] identifier[FrameGetAllNodesInformationNotification] ()
keyword[if] identifier[command] == identifier[Command] . identifier[GW_GET_ALL_NODES_INFORMATION_FINISHED_NTF] :
keyword[return] identifier[FrameGetAllNodesInformationFinishedNotification] ()
keyword[if] identifier[command] == identifier[Command] . identifier[GW_ACTIVATE_SCENE_REQ] :
keyword[return] identifier[FrameActivateSceneRequest] ()
keyword[if] identifier[command] == identifier[Command] . identifier[GW_ACTIVATE_SCENE_CFM] :
keyword[return] identifier[FrameActivateSceneConfirmation] ()
keyword[if] identifier[command] == identifier[Command] . identifier[GW_GET_VERSION_REQ] :
keyword[return] identifier[FrameGetVersionRequest] ()
keyword[if] identifier[command] == identifier[Command] . identifier[GW_GET_VERSION_CFM] :
keyword[return] identifier[FrameGetVersionConfirmation] ()
keyword[if] identifier[command] == identifier[Command] . identifier[GW_GET_PROTOCOL_VERSION_REQ] :
keyword[return] identifier[FrameGetProtocolVersionRequest] ()
keyword[if] identifier[command] == identifier[Command] . identifier[GW_GET_PROTOCOL_VERSION_CFM] :
keyword[return] identifier[FrameGetProtocolVersionConfirmation] ()
keyword[if] identifier[command] == identifier[Command] . identifier[GW_SET_NODE_NAME_REQ] :
keyword[return] identifier[FrameSetNodeNameRequest] ()
keyword[if] identifier[command] == identifier[Command] . identifier[GW_SET_NODE_NAME_CFM] :
keyword[return] identifier[FrameSetNodeNameConfirmation] ()
keyword[if] identifier[command] == identifier[Command] . identifier[GW_NODE_INFORMATION_CHANGED_NTF] :
keyword[return] identifier[FrameNodeInformationChangedNotification] ()
keyword[if] identifier[command] == identifier[Command] . identifier[GW_GET_STATE_REQ] :
keyword[return] identifier[FrameGetStateRequest] ()
keyword[if] identifier[command] == identifier[Command] . identifier[GW_GET_STATE_CFM] :
keyword[return] identifier[FrameGetStateConfirmation] ()
keyword[if] identifier[command] == identifier[Command] . identifier[GW_SET_UTC_REQ] :
keyword[return] identifier[FrameSetUTCRequest] ()
keyword[if] identifier[command] == identifier[Command] . identifier[GW_SET_UTC_CFM] :
keyword[return] identifier[FrameSetUTCConfirmation] ()
keyword[if] identifier[command] == identifier[Command] . identifier[GW_ACTIVATION_LOG_UPDATED_NTF] :
keyword[return] identifier[FrameActivationLogUpdatedNotification] ()
keyword[if] identifier[command] == identifier[Command] . identifier[GW_HOUSE_STATUS_MONITOR_ENABLE_REQ] :
keyword[return] identifier[FrameHouseStatusMonitorEnableRequest] ()
keyword[if] identifier[command] == identifier[Command] . identifier[GW_HOUSE_STATUS_MONITOR_ENABLE_CFM] :
keyword[return] identifier[FrameHouseStatusMonitorEnableConfirmation] ()
keyword[if] identifier[command] == identifier[Command] . identifier[GW_HOUSE_STATUS_MONITOR_DISABLE_REQ] :
keyword[return] identifier[FrameHouseStatusMonitorDisableRequest] ()
keyword[if] identifier[command] == identifier[Command] . identifier[GW_HOUSE_STATUS_MONITOR_DISABLE_CFM] :
keyword[return] identifier[FrameHouseStatusMonitorDisableConfirmation] ()
keyword[if] identifier[command] == identifier[Command] . identifier[GW_NODE_STATE_POSITION_CHANGED_NTF] :
keyword[return] identifier[FrameNodeStatePositionChangedNotification] ()
keyword[return] keyword[None] | def create_frame(command):
"""Create and return empty Frame from Command."""
# pylint: disable=too-many-branches,too-many-return-statements
if command == Command.GW_ERROR_NTF:
return FrameErrorNotification() # depends on [control=['if'], data=[]]
if command == Command.GW_COMMAND_SEND_REQ:
return FrameCommandSendRequest() # depends on [control=['if'], data=[]]
if command == Command.GW_COMMAND_SEND_CFM:
return FrameCommandSendConfirmation() # depends on [control=['if'], data=[]]
if command == Command.GW_COMMAND_RUN_STATUS_NTF:
return FrameCommandRunStatusNotification() # depends on [control=['if'], data=[]]
if command == Command.GW_COMMAND_REMAINING_TIME_NTF:
return FrameCommandRemainingTimeNotification() # depends on [control=['if'], data=[]]
if command == Command.GW_SESSION_FINISHED_NTF:
return FrameSessionFinishedNotification() # depends on [control=['if'], data=[]]
if command == Command.GW_PASSWORD_ENTER_REQ:
return FramePasswordEnterRequest() # depends on [control=['if'], data=[]]
if command == Command.GW_PASSWORD_ENTER_CFM:
return FramePasswordEnterConfirmation() # depends on [control=['if'], data=[]]
if command == Command.GW_CS_DISCOVER_NODES_REQ:
return FrameDiscoverNodesRequest() # depends on [control=['if'], data=[]]
if command == Command.GW_CS_DISCOVER_NODES_CFM:
return FrameDiscoverNodesConfirmation() # depends on [control=['if'], data=[]]
if command == Command.GW_CS_DISCOVER_NODES_NTF:
return FrameDiscoverNodesNotification() # depends on [control=['if'], data=[]]
if command == Command.GW_GET_SCENE_LIST_REQ:
return FrameGetSceneListRequest() # depends on [control=['if'], data=[]]
if command == Command.GW_GET_SCENE_LIST_CFM:
return FrameGetSceneListConfirmation() # depends on [control=['if'], data=[]]
if command == Command.GW_GET_SCENE_LIST_NTF:
return FrameGetSceneListNotification() # depends on [control=['if'], data=[]]
if command == Command.GW_GET_NODE_INFORMATION_REQ:
return FrameGetNodeInformationRequest() # depends on [control=['if'], data=[]]
if command == Command.GW_GET_NODE_INFORMATION_CFM:
return FrameGetNodeInformationConfirmation() # depends on [control=['if'], data=[]]
if command == Command.GW_GET_NODE_INFORMATION_NTF:
return FrameGetNodeInformationNotification() # depends on [control=['if'], data=[]]
if command == Command.GW_GET_ALL_NODES_INFORMATION_REQ:
return FrameGetAllNodesInformationRequest() # depends on [control=['if'], data=[]]
if command == Command.GW_GET_ALL_NODES_INFORMATION_CFM:
return FrameGetAllNodesInformationConfirmation() # depends on [control=['if'], data=[]]
if command == Command.GW_GET_ALL_NODES_INFORMATION_NTF:
return FrameGetAllNodesInformationNotification() # depends on [control=['if'], data=[]]
if command == Command.GW_GET_ALL_NODES_INFORMATION_FINISHED_NTF:
return FrameGetAllNodesInformationFinishedNotification() # depends on [control=['if'], data=[]]
if command == Command.GW_ACTIVATE_SCENE_REQ:
return FrameActivateSceneRequest() # depends on [control=['if'], data=[]]
if command == Command.GW_ACTIVATE_SCENE_CFM:
return FrameActivateSceneConfirmation() # depends on [control=['if'], data=[]]
if command == Command.GW_GET_VERSION_REQ:
return FrameGetVersionRequest() # depends on [control=['if'], data=[]]
if command == Command.GW_GET_VERSION_CFM:
return FrameGetVersionConfirmation() # depends on [control=['if'], data=[]]
if command == Command.GW_GET_PROTOCOL_VERSION_REQ:
return FrameGetProtocolVersionRequest() # depends on [control=['if'], data=[]]
if command == Command.GW_GET_PROTOCOL_VERSION_CFM:
return FrameGetProtocolVersionConfirmation() # depends on [control=['if'], data=[]]
if command == Command.GW_SET_NODE_NAME_REQ:
return FrameSetNodeNameRequest() # depends on [control=['if'], data=[]]
if command == Command.GW_SET_NODE_NAME_CFM:
return FrameSetNodeNameConfirmation() # depends on [control=['if'], data=[]]
if command == Command.GW_NODE_INFORMATION_CHANGED_NTF:
return FrameNodeInformationChangedNotification() # depends on [control=['if'], data=[]]
if command == Command.GW_GET_STATE_REQ:
return FrameGetStateRequest() # depends on [control=['if'], data=[]]
if command == Command.GW_GET_STATE_CFM:
return FrameGetStateConfirmation() # depends on [control=['if'], data=[]]
if command == Command.GW_SET_UTC_REQ:
return FrameSetUTCRequest() # depends on [control=['if'], data=[]]
if command == Command.GW_SET_UTC_CFM:
return FrameSetUTCConfirmation() # depends on [control=['if'], data=[]]
if command == Command.GW_ACTIVATION_LOG_UPDATED_NTF:
return FrameActivationLogUpdatedNotification() # depends on [control=['if'], data=[]]
if command == Command.GW_HOUSE_STATUS_MONITOR_ENABLE_REQ:
return FrameHouseStatusMonitorEnableRequest() # depends on [control=['if'], data=[]]
if command == Command.GW_HOUSE_STATUS_MONITOR_ENABLE_CFM:
return FrameHouseStatusMonitorEnableConfirmation() # depends on [control=['if'], data=[]]
if command == Command.GW_HOUSE_STATUS_MONITOR_DISABLE_REQ:
return FrameHouseStatusMonitorDisableRequest() # depends on [control=['if'], data=[]]
if command == Command.GW_HOUSE_STATUS_MONITOR_DISABLE_CFM:
return FrameHouseStatusMonitorDisableConfirmation() # depends on [control=['if'], data=[]]
if command == Command.GW_NODE_STATE_POSITION_CHANGED_NTF:
return FrameNodeStatePositionChangedNotification() # depends on [control=['if'], data=[]]
return None |
def add_securitygroup_rule(self, group_id, remote_ip=None,
remote_group=None, direction=None,
ethertype=None, port_max=None,
port_min=None, protocol=None):
"""Add a rule to a security group
:param int group_id: The ID of the security group to add this rule to
:param str remote_ip: The remote IP or CIDR to enforce the rule on
:param int remote_group: The remote security group ID to enforce
the rule on
:param str direction: The direction to enforce (egress or ingress)
:param str ethertype: The ethertype to enforce (IPv4 or IPv6)
:param int port_max: The upper port bound to enforce
(icmp code if the protocol is icmp)
:param int port_min: The lower port bound to enforce
(icmp type if the protocol is icmp)
:param str protocol: The protocol to enforce (icmp, udp, tcp)
"""
rule = {'direction': direction}
if ethertype is not None:
rule['ethertype'] = ethertype
if port_max is not None:
rule['portRangeMax'] = port_max
if port_min is not None:
rule['portRangeMin'] = port_min
if protocol is not None:
rule['protocol'] = protocol
if remote_ip is not None:
rule['remoteIp'] = remote_ip
if remote_group is not None:
rule['remoteGroupId'] = remote_group
return self.add_securitygroup_rules(group_id, [rule]) | def function[add_securitygroup_rule, parameter[self, group_id, remote_ip, remote_group, direction, ethertype, port_max, port_min, protocol]]:
constant[Add a rule to a security group
:param int group_id: The ID of the security group to add this rule to
:param str remote_ip: The remote IP or CIDR to enforce the rule on
:param int remote_group: The remote security group ID to enforce
the rule on
:param str direction: The direction to enforce (egress or ingress)
:param str ethertype: The ethertype to enforce (IPv4 or IPv6)
:param int port_max: The upper port bound to enforce
(icmp code if the protocol is icmp)
:param int port_min: The lower port bound to enforce
(icmp type if the protocol is icmp)
:param str protocol: The protocol to enforce (icmp, udp, tcp)
]
variable[rule] assign[=] dictionary[[<ast.Constant object at 0x7da207f99690>], [<ast.Name object at 0x7da207f9ac80>]]
if compare[name[ethertype] is_not constant[None]] begin[:]
call[name[rule]][constant[ethertype]] assign[=] name[ethertype]
if compare[name[port_max] is_not constant[None]] begin[:]
call[name[rule]][constant[portRangeMax]] assign[=] name[port_max]
if compare[name[port_min] is_not constant[None]] begin[:]
call[name[rule]][constant[portRangeMin]] assign[=] name[port_min]
if compare[name[protocol] is_not constant[None]] begin[:]
call[name[rule]][constant[protocol]] assign[=] name[protocol]
if compare[name[remote_ip] is_not constant[None]] begin[:]
call[name[rule]][constant[remoteIp]] assign[=] name[remote_ip]
if compare[name[remote_group] is_not constant[None]] begin[:]
call[name[rule]][constant[remoteGroupId]] assign[=] name[remote_group]
return[call[name[self].add_securitygroup_rules, parameter[name[group_id], list[[<ast.Name object at 0x7da207f98790>]]]]] | keyword[def] identifier[add_securitygroup_rule] ( identifier[self] , identifier[group_id] , identifier[remote_ip] = keyword[None] ,
identifier[remote_group] = keyword[None] , identifier[direction] = keyword[None] ,
identifier[ethertype] = keyword[None] , identifier[port_max] = keyword[None] ,
identifier[port_min] = keyword[None] , identifier[protocol] = keyword[None] ):
literal[string]
identifier[rule] ={ literal[string] : identifier[direction] }
keyword[if] identifier[ethertype] keyword[is] keyword[not] keyword[None] :
identifier[rule] [ literal[string] ]= identifier[ethertype]
keyword[if] identifier[port_max] keyword[is] keyword[not] keyword[None] :
identifier[rule] [ literal[string] ]= identifier[port_max]
keyword[if] identifier[port_min] keyword[is] keyword[not] keyword[None] :
identifier[rule] [ literal[string] ]= identifier[port_min]
keyword[if] identifier[protocol] keyword[is] keyword[not] keyword[None] :
identifier[rule] [ literal[string] ]= identifier[protocol]
keyword[if] identifier[remote_ip] keyword[is] keyword[not] keyword[None] :
identifier[rule] [ literal[string] ]= identifier[remote_ip]
keyword[if] identifier[remote_group] keyword[is] keyword[not] keyword[None] :
identifier[rule] [ literal[string] ]= identifier[remote_group]
keyword[return] identifier[self] . identifier[add_securitygroup_rules] ( identifier[group_id] ,[ identifier[rule] ]) | def add_securitygroup_rule(self, group_id, remote_ip=None, remote_group=None, direction=None, ethertype=None, port_max=None, port_min=None, protocol=None):
"""Add a rule to a security group
:param int group_id: The ID of the security group to add this rule to
:param str remote_ip: The remote IP or CIDR to enforce the rule on
:param int remote_group: The remote security group ID to enforce
the rule on
:param str direction: The direction to enforce (egress or ingress)
:param str ethertype: The ethertype to enforce (IPv4 or IPv6)
:param int port_max: The upper port bound to enforce
(icmp code if the protocol is icmp)
:param int port_min: The lower port bound to enforce
(icmp type if the protocol is icmp)
:param str protocol: The protocol to enforce (icmp, udp, tcp)
"""
rule = {'direction': direction}
if ethertype is not None:
rule['ethertype'] = ethertype # depends on [control=['if'], data=['ethertype']]
if port_max is not None:
rule['portRangeMax'] = port_max # depends on [control=['if'], data=['port_max']]
if port_min is not None:
rule['portRangeMin'] = port_min # depends on [control=['if'], data=['port_min']]
if protocol is not None:
rule['protocol'] = protocol # depends on [control=['if'], data=['protocol']]
if remote_ip is not None:
rule['remoteIp'] = remote_ip # depends on [control=['if'], data=['remote_ip']]
if remote_group is not None:
rule['remoteGroupId'] = remote_group # depends on [control=['if'], data=['remote_group']]
return self.add_securitygroup_rules(group_id, [rule]) |
def get_interface_detail_output_interface_interface_name(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_interface_detail = ET.Element("get_interface_detail")
config = get_interface_detail
output = ET.SubElement(get_interface_detail, "output")
interface = ET.SubElement(output, "interface")
interface_type_key = ET.SubElement(interface, "interface-type")
interface_type_key.text = kwargs.pop('interface_type')
interface_name = ET.SubElement(interface, "interface-name")
interface_name.text = kwargs.pop('interface_name')
callback = kwargs.pop('callback', self._callback)
return callback(config) | def function[get_interface_detail_output_interface_interface_name, parameter[self]]:
constant[Auto Generated Code
]
variable[config] assign[=] call[name[ET].Element, parameter[constant[config]]]
variable[get_interface_detail] assign[=] call[name[ET].Element, parameter[constant[get_interface_detail]]]
variable[config] assign[=] name[get_interface_detail]
variable[output] assign[=] call[name[ET].SubElement, parameter[name[get_interface_detail], constant[output]]]
variable[interface] assign[=] call[name[ET].SubElement, parameter[name[output], constant[interface]]]
variable[interface_type_key] assign[=] call[name[ET].SubElement, parameter[name[interface], constant[interface-type]]]
name[interface_type_key].text assign[=] call[name[kwargs].pop, parameter[constant[interface_type]]]
variable[interface_name] assign[=] call[name[ET].SubElement, parameter[name[interface], constant[interface-name]]]
name[interface_name].text assign[=] call[name[kwargs].pop, parameter[constant[interface_name]]]
variable[callback] assign[=] call[name[kwargs].pop, parameter[constant[callback], name[self]._callback]]
return[call[name[callback], parameter[name[config]]]] | keyword[def] identifier[get_interface_detail_output_interface_interface_name] ( identifier[self] ,** identifier[kwargs] ):
literal[string]
identifier[config] = identifier[ET] . identifier[Element] ( literal[string] )
identifier[get_interface_detail] = identifier[ET] . identifier[Element] ( literal[string] )
identifier[config] = identifier[get_interface_detail]
identifier[output] = identifier[ET] . identifier[SubElement] ( identifier[get_interface_detail] , literal[string] )
identifier[interface] = identifier[ET] . identifier[SubElement] ( identifier[output] , literal[string] )
identifier[interface_type_key] = identifier[ET] . identifier[SubElement] ( identifier[interface] , literal[string] )
identifier[interface_type_key] . identifier[text] = identifier[kwargs] . identifier[pop] ( literal[string] )
identifier[interface_name] = identifier[ET] . identifier[SubElement] ( identifier[interface] , literal[string] )
identifier[interface_name] . identifier[text] = identifier[kwargs] . identifier[pop] ( literal[string] )
identifier[callback] = identifier[kwargs] . identifier[pop] ( literal[string] , identifier[self] . identifier[_callback] )
keyword[return] identifier[callback] ( identifier[config] ) | def get_interface_detail_output_interface_interface_name(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element('config')
get_interface_detail = ET.Element('get_interface_detail')
config = get_interface_detail
output = ET.SubElement(get_interface_detail, 'output')
interface = ET.SubElement(output, 'interface')
interface_type_key = ET.SubElement(interface, 'interface-type')
interface_type_key.text = kwargs.pop('interface_type')
interface_name = ET.SubElement(interface, 'interface-name')
interface_name.text = kwargs.pop('interface_name')
callback = kwargs.pop('callback', self._callback)
return callback(config) |
def jsmin_for_posers(script, keep_bang_comments=False):
r"""
Minify javascript based on `jsmin.c by Douglas Crockford`_\.
Instead of parsing the stream char by char, it uses a regular
expression approach which minifies the whole script with one big
substitution regex.
.. _jsmin.c by Douglas Crockford:
http://www.crockford.com/javascript/jsmin.c
:Warning: This function is the digest of a _make_jsmin() call. It just
utilizes the resulting regexes. It's here for fun and may
vanish any time. Use the `jsmin` function instead.
:Parameters:
`script` : ``str``
Script to minify
`keep_bang_comments` : ``bool``
Keep comments starting with an exclamation mark? (``/*!...*/``)
:Return: Minified script
:Rtype: ``str``
"""
if not keep_bang_comments:
rex = (
r'([^\047"/\000-\040]+)|((?:(?:\047[^\047\\\r\n]*(?:\\(?:[^\r\n]'
r'|\r?\n|\r)[^\047\\\r\n]*)*\047)|(?:"[^"\\\r\n]*(?:\\(?:[^\r\n]'
r'|\r?\n|\r)[^"\\\r\n]*)*"))[^\047"/\000-\040]*)|(?<=[(,=:\[!&|?'
r'{};\r\n])(?:[\000-\011\013\014\016-\040]|(?:/\*[^*]*\*+(?:[^/*'
r'][^*]*\*+)*/))*(?:(?:(?://[^\r\n]*)?[\r\n])(?:[\000-\011\013\0'
r'14\016-\040]|(?:/\*[^*]*\*+(?:[^/*][^*]*\*+)*/))*)*((?:/(?![\r'
r'\n/*])[^/\\\[\r\n]*(?:(?:\\[^\r\n]|(?:\[[^\\\]\r\n]*(?:\\[^\r'
r'\n][^\\\]\r\n]*)*\]))[^/\\\[\r\n]*)*/)[^\047"/\000-\040]*)|(?<'
r'=[\000-#%-,./:-@\[-^`{-~-]return)(?:[\000-\011\013\014\016-\04'
r'0]|(?:/\*[^*]*\*+(?:[^/*][^*]*\*+)*/))*(?:(?:(?://[^\r\n]*)?['
r'\r\n])(?:[\000-\011\013\014\016-\040]|(?:/\*[^*]*\*+(?:[^/*][^'
r'*]*\*+)*/)))*((?:/(?![\r\n/*])[^/\\\[\r\n]*(?:(?:\\[^\r\n]|(?:'
r'\[[^\\\]\r\n]*(?:\\[^\r\n][^\\\]\r\n]*)*\]))[^/\\\[\r\n]*)*/)['
r'^\047"/\000-\040]*)|(?<=[^\000-!#%&(*,./:-@\[\\^`{|~])(?:[\000'
r'-\011\013\014\016-\040]|(?:/\*[^*]*\*+(?:[^/*][^*]*\*+)*/))*(?'
r':((?:(?://[^\r\n]*)?[\r\n]))(?:[\000-\011\013\014\016-\040]|(?'
r':/\*[^*]*\*+(?:[^/*][^*]*\*+)*/))*)+(?=[^\000-\040"#%-\047)*,.'
r'/:-@\\-^`|-~])|(?<=[^\000-#%-,./:-@\[-^`{-~-])((?:[\000-\011\0'
r'13\014\016-\040]|(?:/\*[^*]*\*+(?:[^/*][^*]*\*+)*/)))+(?=[^\00'
r'0-#%-,./:-@\[-^`{-~-])|(?<=\+)((?:[\000-\011\013\014\016-\040]'
r'|(?:/\*[^*]*\*+(?:[^/*][^*]*\*+)*/)))+(?=\+)|(?<=-)((?:[\000-'
r'\011\013\014\016-\040]|(?:/\*[^*]*\*+(?:[^/*][^*]*\*+)*/)))+(?'
r'=-)|(?:[\000-\011\013\014\016-\040]|(?:/\*[^*]*\*+(?:[^/*][^*]'
r'*\*+)*/))+|(?:(?:(?://[^\r\n]*)?[\r\n])(?:[\000-\011\013\014\0'
r'16-\040]|(?:/\*[^*]*\*+(?:[^/*][^*]*\*+)*/))*)+'
)
def subber(match):
""" Substitution callback """
groups = match.groups()
return (
groups[0] or
groups[1] or
groups[2] or
groups[3] or
(groups[4] and '\n') or
(groups[5] and ' ') or
(groups[6] and ' ') or
(groups[7] and ' ') or
''
)
else:
rex = (
r'([^\047"/\000-\040]+)|((?:(?:\047[^\047\\\r\n]*(?:\\(?:[^\r\n]'
r'|\r?\n|\r)[^\047\\\r\n]*)*\047)|(?:"[^"\\\r\n]*(?:\\(?:[^\r\n]'
r'|\r?\n|\r)[^"\\\r\n]*)*"))[^\047"/\000-\040]*)|((?:/\*![^*]*\*'
r'+(?:[^/*][^*]*\*+)*/)[^\047"/\000-\040]*)|(?<=[(,=:\[!&|?{};\r'
r'\n])(?:[\000-\011\013\014\016-\040]|(?:/\*(?!!)[^*]*\*+(?:[^/*'
r'][^*]*\*+)*/))*(?:(?:(?://[^\r\n]*)?[\r\n])(?:[\000-\011\013\0'
r'14\016-\040]|(?:/\*(?!!)[^*]*\*+(?:[^/*][^*]*\*+)*/))*)*((?:/('
r'?![\r\n/*])[^/\\\[\r\n]*(?:(?:\\[^\r\n]|(?:\[[^\\\]\r\n]*(?:'
r'\\[^\r\n][^\\\]\r\n]*)*\]))[^/\\\[\r\n]*)*/)[^\047"/\000-\040]'
r'*)|(?<=[\000-#%-,./:-@\[-^`{-~-]return)(?:[\000-\011\013\014\0'
r'16-\040]|(?:/\*(?!!)[^*]*\*+(?:[^/*][^*]*\*+)*/))*(?:(?:(?://['
r'^\r\n]*)?[\r\n])(?:[\000-\011\013\014\016-\040]|(?:/\*(?!!)[^*'
r']*\*+(?:[^/*][^*]*\*+)*/)))*((?:/(?![\r\n/*])[^/\\\[\r\n]*(?:('
r'?:\\[^\r\n]|(?:\[[^\\\]\r\n]*(?:\\[^\r\n][^\\\]\r\n]*)*\]))[^/'
r'\\\[\r\n]*)*/)[^\047"/\000-\040]*)|(?<=[^\000-!#%&(*,./:-@\[\\'
r'^`{|~])(?:[\000-\011\013\014\016-\040]|(?:/\*(?!!)[^*]*\*+(?:['
r'^/*][^*]*\*+)*/))*(?:((?:(?://[^\r\n]*)?[\r\n]))(?:[\000-\011'
r'\013\014\016-\040]|(?:/\*(?!!)[^*]*\*+(?:[^/*][^*]*\*+)*/))*)+'
r'(?=[^\000-\040"#%-\047)*,./:-@\\-^`|-~])|(?<=[^\000-#%-,./:-@'
r'\[-^`{-~-])((?:[\000-\011\013\014\016-\040]|(?:/\*(?!!)[^*]*\*'
r'+(?:[^/*][^*]*\*+)*/)))+(?=[^\000-#%-,./:-@\[-^`{-~-])|(?<=\+)'
r'((?:[\000-\011\013\014\016-\040]|(?:/\*(?!!)[^*]*\*+(?:[^/*][^'
r'*]*\*+)*/)))+(?=\+)|(?<=-)((?:[\000-\011\013\014\016-\040]|(?:'
r'/\*(?!!)[^*]*\*+(?:[^/*][^*]*\*+)*/)))+(?=-)|(?:[\000-\011\013'
r'\014\016-\040]|(?:/\*(?!!)[^*]*\*+(?:[^/*][^*]*\*+)*/))+|(?:(?'
r':(?://[^\r\n]*)?[\r\n])(?:[\000-\011\013\014\016-\040]|(?:/\*('
r'?!!)[^*]*\*+(?:[^/*][^*]*\*+)*/))*)+'
)
def subber(match):
""" Substitution callback """
groups = match.groups()
return (
groups[0] or
groups[1] or
groups[2] or
groups[3] or
groups[4] or
(groups[5] and '\n') or
(groups[6] and ' ') or
(groups[7] and ' ') or
(groups[8] and ' ') or
''
)
return _re.sub(rex, subber, '\n%s\n' % script).strip() | def function[jsmin_for_posers, parameter[script, keep_bang_comments]]:
constant[
Minify javascript based on `jsmin.c by Douglas Crockford`_\.
Instead of parsing the stream char by char, it uses a regular
expression approach which minifies the whole script with one big
substitution regex.
.. _jsmin.c by Douglas Crockford:
http://www.crockford.com/javascript/jsmin.c
:Warning: This function is the digest of a _make_jsmin() call. It just
utilizes the resulting regexes. It's here for fun and may
vanish any time. Use the `jsmin` function instead.
:Parameters:
`script` : ``str``
Script to minify
`keep_bang_comments` : ``bool``
Keep comments starting with an exclamation mark? (``/*!...*/``)
:Return: Minified script
:Rtype: ``str``
]
if <ast.UnaryOp object at 0x7da1b0bf37f0> begin[:]
variable[rex] assign[=] constant[([^\047"/\000-\040]+)|((?:(?:\047[^\047\\\r\n]*(?:\\(?:[^\r\n]|\r?\n|\r)[^\047\\\r\n]*)*\047)|(?:"[^"\\\r\n]*(?:\\(?:[^\r\n]|\r?\n|\r)[^"\\\r\n]*)*"))[^\047"/\000-\040]*)|(?<=[(,=:\[!&|?{};\r\n])(?:[\000-\011\013\014\016-\040]|(?:/\*[^*]*\*+(?:[^/*][^*]*\*+)*/))*(?:(?:(?://[^\r\n]*)?[\r\n])(?:[\000-\011\013\014\016-\040]|(?:/\*[^*]*\*+(?:[^/*][^*]*\*+)*/))*)*((?:/(?![\r\n/*])[^/\\\[\r\n]*(?:(?:\\[^\r\n]|(?:\[[^\\\]\r\n]*(?:\\[^\r\n][^\\\]\r\n]*)*\]))[^/\\\[\r\n]*)*/)[^\047"/\000-\040]*)|(?<=[\000-#%-,./:-@\[-^`{-~-]return)(?:[\000-\011\013\014\016-\040]|(?:/\*[^*]*\*+(?:[^/*][^*]*\*+)*/))*(?:(?:(?://[^\r\n]*)?[\r\n])(?:[\000-\011\013\014\016-\040]|(?:/\*[^*]*\*+(?:[^/*][^*]*\*+)*/)))*((?:/(?![\r\n/*])[^/\\\[\r\n]*(?:(?:\\[^\r\n]|(?:\[[^\\\]\r\n]*(?:\\[^\r\n][^\\\]\r\n]*)*\]))[^/\\\[\r\n]*)*/)[^\047"/\000-\040]*)|(?<=[^\000-!#%&(*,./:-@\[\\^`{|~])(?:[\000-\011\013\014\016-\040]|(?:/\*[^*]*\*+(?:[^/*][^*]*\*+)*/))*(?:((?:(?://[^\r\n]*)?[\r\n]))(?:[\000-\011\013\014\016-\040]|(?:/\*[^*]*\*+(?:[^/*][^*]*\*+)*/))*)+(?=[^\000-\040"#%-\047)*,./:-@\\-^`|-~])|(?<=[^\000-#%-,./:-@\[-^`{-~-])((?:[\000-\011\013\014\016-\040]|(?:/\*[^*]*\*+(?:[^/*][^*]*\*+)*/)))+(?=[^\000-#%-,./:-@\[-^`{-~-])|(?<=\+)((?:[\000-\011\013\014\016-\040]|(?:/\*[^*]*\*+(?:[^/*][^*]*\*+)*/)))+(?=\+)|(?<=-)((?:[\000-\011\013\014\016-\040]|(?:/\*[^*]*\*+(?:[^/*][^*]*\*+)*/)))+(?=-)|(?:[\000-\011\013\014\016-\040]|(?:/\*[^*]*\*+(?:[^/*][^*]*\*+)*/))+|(?:(?:(?://[^\r\n]*)?[\r\n])(?:[\000-\011\013\014\016-\040]|(?:/\*[^*]*\*+(?:[^/*][^*]*\*+)*/))*)+]
def function[subber, parameter[match]]:
constant[ Substitution callback ]
variable[groups] assign[=] call[name[match].groups, parameter[]]
return[<ast.BoolOp object at 0x7da1b0bf1b40>]
return[call[call[name[_re].sub, parameter[name[rex], name[subber], binary_operation[constant[
%s
] <ast.Mod object at 0x7da2590d6920> name[script]]]].strip, parameter[]]] | keyword[def] identifier[jsmin_for_posers] ( identifier[script] , identifier[keep_bang_comments] = keyword[False] ):
literal[string]
keyword[if] keyword[not] identifier[keep_bang_comments] :
identifier[rex] =(
literal[string]
literal[string]
literal[string]
literal[string]
literal[string]
literal[string]
literal[string]
literal[string]
literal[string]
literal[string]
literal[string]
literal[string]
literal[string]
literal[string]
literal[string]
literal[string]
literal[string]
literal[string]
literal[string]
literal[string]
literal[string]
literal[string]
literal[string]
literal[string]
literal[string]
)
keyword[def] identifier[subber] ( identifier[match] ):
literal[string]
identifier[groups] = identifier[match] . identifier[groups] ()
keyword[return] (
identifier[groups] [ literal[int] ] keyword[or]
identifier[groups] [ literal[int] ] keyword[or]
identifier[groups] [ literal[int] ] keyword[or]
identifier[groups] [ literal[int] ] keyword[or]
( identifier[groups] [ literal[int] ] keyword[and] literal[string] ) keyword[or]
( identifier[groups] [ literal[int] ] keyword[and] literal[string] ) keyword[or]
( identifier[groups] [ literal[int] ] keyword[and] literal[string] ) keyword[or]
( identifier[groups] [ literal[int] ] keyword[and] literal[string] ) keyword[or]
literal[string]
)
keyword[else] :
identifier[rex] =(
literal[string]
literal[string]
literal[string]
literal[string]
literal[string]
literal[string]
literal[string]
literal[string]
literal[string]
literal[string]
literal[string]
literal[string]
literal[string]
literal[string]
literal[string]
literal[string]
literal[string]
literal[string]
literal[string]
literal[string]
literal[string]
literal[string]
literal[string]
literal[string]
literal[string]
literal[string]
literal[string]
)
keyword[def] identifier[subber] ( identifier[match] ):
literal[string]
identifier[groups] = identifier[match] . identifier[groups] ()
keyword[return] (
identifier[groups] [ literal[int] ] keyword[or]
identifier[groups] [ literal[int] ] keyword[or]
identifier[groups] [ literal[int] ] keyword[or]
identifier[groups] [ literal[int] ] keyword[or]
identifier[groups] [ literal[int] ] keyword[or]
( identifier[groups] [ literal[int] ] keyword[and] literal[string] ) keyword[or]
( identifier[groups] [ literal[int] ] keyword[and] literal[string] ) keyword[or]
( identifier[groups] [ literal[int] ] keyword[and] literal[string] ) keyword[or]
( identifier[groups] [ literal[int] ] keyword[and] literal[string] ) keyword[or]
literal[string]
)
keyword[return] identifier[_re] . identifier[sub] ( identifier[rex] , identifier[subber] , literal[string] % identifier[script] ). identifier[strip] () | def jsmin_for_posers(script, keep_bang_comments=False):
"""
Minify javascript based on `jsmin.c by Douglas Crockford`_\\.
Instead of parsing the stream char by char, it uses a regular
expression approach which minifies the whole script with one big
substitution regex.
.. _jsmin.c by Douglas Crockford:
http://www.crockford.com/javascript/jsmin.c
:Warning: This function is the digest of a _make_jsmin() call. It just
utilizes the resulting regexes. It's here for fun and may
vanish any time. Use the `jsmin` function instead.
:Parameters:
`script` : ``str``
Script to minify
`keep_bang_comments` : ``bool``
Keep comments starting with an exclamation mark? (``/*!...*/``)
:Return: Minified script
:Rtype: ``str``
"""
if not keep_bang_comments:
rex = '([^\\047"/\\000-\\040]+)|((?:(?:\\047[^\\047\\\\\\r\\n]*(?:\\\\(?:[^\\r\\n]|\\r?\\n|\\r)[^\\047\\\\\\r\\n]*)*\\047)|(?:"[^"\\\\\\r\\n]*(?:\\\\(?:[^\\r\\n]|\\r?\\n|\\r)[^"\\\\\\r\\n]*)*"))[^\\047"/\\000-\\040]*)|(?<=[(,=:\\[!&|?{};\\r\\n])(?:[\\000-\\011\\013\\014\\016-\\040]|(?:/\\*[^*]*\\*+(?:[^/*][^*]*\\*+)*/))*(?:(?:(?://[^\\r\\n]*)?[\\r\\n])(?:[\\000-\\011\\013\\014\\016-\\040]|(?:/\\*[^*]*\\*+(?:[^/*][^*]*\\*+)*/))*)*((?:/(?![\\r\\n/*])[^/\\\\\\[\\r\\n]*(?:(?:\\\\[^\\r\\n]|(?:\\[[^\\\\\\]\\r\\n]*(?:\\\\[^\\r\\n][^\\\\\\]\\r\\n]*)*\\]))[^/\\\\\\[\\r\\n]*)*/)[^\\047"/\\000-\\040]*)|(?<=[\\000-#%-,./:-@\\[-^`{-~-]return)(?:[\\000-\\011\\013\\014\\016-\\040]|(?:/\\*[^*]*\\*+(?:[^/*][^*]*\\*+)*/))*(?:(?:(?://[^\\r\\n]*)?[\\r\\n])(?:[\\000-\\011\\013\\014\\016-\\040]|(?:/\\*[^*]*\\*+(?:[^/*][^*]*\\*+)*/)))*((?:/(?![\\r\\n/*])[^/\\\\\\[\\r\\n]*(?:(?:\\\\[^\\r\\n]|(?:\\[[^\\\\\\]\\r\\n]*(?:\\\\[^\\r\\n][^\\\\\\]\\r\\n]*)*\\]))[^/\\\\\\[\\r\\n]*)*/)[^\\047"/\\000-\\040]*)|(?<=[^\\000-!#%&(*,./:-@\\[\\\\^`{|~])(?:[\\000-\\011\\013\\014\\016-\\040]|(?:/\\*[^*]*\\*+(?:[^/*][^*]*\\*+)*/))*(?:((?:(?://[^\\r\\n]*)?[\\r\\n]))(?:[\\000-\\011\\013\\014\\016-\\040]|(?:/\\*[^*]*\\*+(?:[^/*][^*]*\\*+)*/))*)+(?=[^\\000-\\040"#%-\\047)*,./:-@\\\\-^`|-~])|(?<=[^\\000-#%-,./:-@\\[-^`{-~-])((?:[\\000-\\011\\013\\014\\016-\\040]|(?:/\\*[^*]*\\*+(?:[^/*][^*]*\\*+)*/)))+(?=[^\\000-#%-,./:-@\\[-^`{-~-])|(?<=\\+)((?:[\\000-\\011\\013\\014\\016-\\040]|(?:/\\*[^*]*\\*+(?:[^/*][^*]*\\*+)*/)))+(?=\\+)|(?<=-)((?:[\\000-\\011\\013\\014\\016-\\040]|(?:/\\*[^*]*\\*+(?:[^/*][^*]*\\*+)*/)))+(?=-)|(?:[\\000-\\011\\013\\014\\016-\\040]|(?:/\\*[^*]*\\*+(?:[^/*][^*]*\\*+)*/))+|(?:(?:(?://[^\\r\\n]*)?[\\r\\n])(?:[\\000-\\011\\013\\014\\016-\\040]|(?:/\\*[^*]*\\*+(?:[^/*][^*]*\\*+)*/))*)+'
def subber(match):
""" Substitution callback """
groups = match.groups()
return groups[0] or groups[1] or groups[2] or groups[3] or (groups[4] and '\n') or (groups[5] and ' ') or (groups[6] and ' ') or (groups[7] and ' ') or '' # depends on [control=['if'], data=[]]
else:
rex = '([^\\047"/\\000-\\040]+)|((?:(?:\\047[^\\047\\\\\\r\\n]*(?:\\\\(?:[^\\r\\n]|\\r?\\n|\\r)[^\\047\\\\\\r\\n]*)*\\047)|(?:"[^"\\\\\\r\\n]*(?:\\\\(?:[^\\r\\n]|\\r?\\n|\\r)[^"\\\\\\r\\n]*)*"))[^\\047"/\\000-\\040]*)|((?:/\\*![^*]*\\*+(?:[^/*][^*]*\\*+)*/)[^\\047"/\\000-\\040]*)|(?<=[(,=:\\[!&|?{};\\r\\n])(?:[\\000-\\011\\013\\014\\016-\\040]|(?:/\\*(?!!)[^*]*\\*+(?:[^/*][^*]*\\*+)*/))*(?:(?:(?://[^\\r\\n]*)?[\\r\\n])(?:[\\000-\\011\\013\\014\\016-\\040]|(?:/\\*(?!!)[^*]*\\*+(?:[^/*][^*]*\\*+)*/))*)*((?:/(?![\\r\\n/*])[^/\\\\\\[\\r\\n]*(?:(?:\\\\[^\\r\\n]|(?:\\[[^\\\\\\]\\r\\n]*(?:\\\\[^\\r\\n][^\\\\\\]\\r\\n]*)*\\]))[^/\\\\\\[\\r\\n]*)*/)[^\\047"/\\000-\\040]*)|(?<=[\\000-#%-,./:-@\\[-^`{-~-]return)(?:[\\000-\\011\\013\\014\\016-\\040]|(?:/\\*(?!!)[^*]*\\*+(?:[^/*][^*]*\\*+)*/))*(?:(?:(?://[^\\r\\n]*)?[\\r\\n])(?:[\\000-\\011\\013\\014\\016-\\040]|(?:/\\*(?!!)[^*]*\\*+(?:[^/*][^*]*\\*+)*/)))*((?:/(?![\\r\\n/*])[^/\\\\\\[\\r\\n]*(?:(?:\\\\[^\\r\\n]|(?:\\[[^\\\\\\]\\r\\n]*(?:\\\\[^\\r\\n][^\\\\\\]\\r\\n]*)*\\]))[^/\\\\\\[\\r\\n]*)*/)[^\\047"/\\000-\\040]*)|(?<=[^\\000-!#%&(*,./:-@\\[\\\\^`{|~])(?:[\\000-\\011\\013\\014\\016-\\040]|(?:/\\*(?!!)[^*]*\\*+(?:[^/*][^*]*\\*+)*/))*(?:((?:(?://[^\\r\\n]*)?[\\r\\n]))(?:[\\000-\\011\\013\\014\\016-\\040]|(?:/\\*(?!!)[^*]*\\*+(?:[^/*][^*]*\\*+)*/))*)+(?=[^\\000-\\040"#%-\\047)*,./:-@\\\\-^`|-~])|(?<=[^\\000-#%-,./:-@\\[-^`{-~-])((?:[\\000-\\011\\013\\014\\016-\\040]|(?:/\\*(?!!)[^*]*\\*+(?:[^/*][^*]*\\*+)*/)))+(?=[^\\000-#%-,./:-@\\[-^`{-~-])|(?<=\\+)((?:[\\000-\\011\\013\\014\\016-\\040]|(?:/\\*(?!!)[^*]*\\*+(?:[^/*][^*]*\\*+)*/)))+(?=\\+)|(?<=-)((?:[\\000-\\011\\013\\014\\016-\\040]|(?:/\\*(?!!)[^*]*\\*+(?:[^/*][^*]*\\*+)*/)))+(?=-)|(?:[\\000-\\011\\013\\014\\016-\\040]|(?:/\\*(?!!)[^*]*\\*+(?:[^/*][^*]*\\*+)*/))+|(?:(?:(?://[^\\r\\n]*)?[\\r\\n])(?:[\\000-\\011\\013\\014\\016-\\040]|(?:/\\*(?!!)[^*]*\\*+(?:[^/*][^*]*\\*+)*/))*)+'
def subber(match):
""" Substitution callback """
groups = match.groups()
return groups[0] or groups[1] or groups[2] or groups[3] or groups[4] or (groups[5] and '\n') or (groups[6] and ' ') or (groups[7] and ' ') or (groups[8] and ' ') or ''
return _re.sub(rex, subber, '\n%s\n' % script).strip() |
def getChangeSets(self):
"""Get all the ChangeSets of this workitem
:return: a :class:`list` contains all the
:class:`rtcclient.models.ChangeSet` objects
:rtype: list
"""
changeset_tag = ("rtc_cm:com.ibm.team.filesystem.workitems."
"change_set.com.ibm.team.scm.ChangeSet")
return (self.rtc_obj
._get_paged_resources("ChangeSet",
workitem_id=self.identifier,
customized_attr=changeset_tag,
page_size="10")) | def function[getChangeSets, parameter[self]]:
constant[Get all the ChangeSets of this workitem
:return: a :class:`list` contains all the
:class:`rtcclient.models.ChangeSet` objects
:rtype: list
]
variable[changeset_tag] assign[=] constant[rtc_cm:com.ibm.team.filesystem.workitems.change_set.com.ibm.team.scm.ChangeSet]
return[call[name[self].rtc_obj._get_paged_resources, parameter[constant[ChangeSet]]]] | keyword[def] identifier[getChangeSets] ( identifier[self] ):
literal[string]
identifier[changeset_tag] =( literal[string]
literal[string] )
keyword[return] ( identifier[self] . identifier[rtc_obj]
. identifier[_get_paged_resources] ( literal[string] ,
identifier[workitem_id] = identifier[self] . identifier[identifier] ,
identifier[customized_attr] = identifier[changeset_tag] ,
identifier[page_size] = literal[string] )) | def getChangeSets(self):
"""Get all the ChangeSets of this workitem
:return: a :class:`list` contains all the
:class:`rtcclient.models.ChangeSet` objects
:rtype: list
"""
changeset_tag = 'rtc_cm:com.ibm.team.filesystem.workitems.change_set.com.ibm.team.scm.ChangeSet'
return self.rtc_obj._get_paged_resources('ChangeSet', workitem_id=self.identifier, customized_attr=changeset_tag, page_size='10') |
def py_list_to_tcl_list(py_list):
""" Convert Python list to Tcl list using Tcl interpreter.
:param py_list: Python list.
:type py_list: list
:return: string representing the Tcl string equivalent to the Python list.
"""
py_list_str = [str(s) for s in py_list]
return tcl_str(tcl_interp_g.eval('split' + tcl_str('\t'.join(py_list_str)) + '\\t')) | def function[py_list_to_tcl_list, parameter[py_list]]:
constant[ Convert Python list to Tcl list using Tcl interpreter.
:param py_list: Python list.
:type py_list: list
:return: string representing the Tcl string equivalent to the Python list.
]
variable[py_list_str] assign[=] <ast.ListComp object at 0x7da18f09e830>
return[call[name[tcl_str], parameter[call[name[tcl_interp_g].eval, parameter[binary_operation[binary_operation[constant[split] + call[name[tcl_str], parameter[call[constant[ ].join, parameter[name[py_list_str]]]]]] + constant[\t]]]]]]] | keyword[def] identifier[py_list_to_tcl_list] ( identifier[py_list] ):
literal[string]
identifier[py_list_str] =[ identifier[str] ( identifier[s] ) keyword[for] identifier[s] keyword[in] identifier[py_list] ]
keyword[return] identifier[tcl_str] ( identifier[tcl_interp_g] . identifier[eval] ( literal[string] + identifier[tcl_str] ( literal[string] . identifier[join] ( identifier[py_list_str] ))+ literal[string] )) | def py_list_to_tcl_list(py_list):
""" Convert Python list to Tcl list using Tcl interpreter.
:param py_list: Python list.
:type py_list: list
:return: string representing the Tcl string equivalent to the Python list.
"""
py_list_str = [str(s) for s in py_list]
return tcl_str(tcl_interp_g.eval('split' + tcl_str('\t'.join(py_list_str)) + '\\t')) |
def get_input_with_inactive(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get = ET.Element("get")
config = get
input = ET.SubElement(get, "input")
with_inactive = ET.SubElement(input, "with-inactive", xmlns="http://tail-f.com/ns/netconf/inactive/1.0")
callback = kwargs.pop('callback', self._callback)
return callback(config) | def function[get_input_with_inactive, parameter[self]]:
constant[Auto Generated Code
]
variable[config] assign[=] call[name[ET].Element, parameter[constant[config]]]
variable[get] assign[=] call[name[ET].Element, parameter[constant[get]]]
variable[config] assign[=] name[get]
variable[input] assign[=] call[name[ET].SubElement, parameter[name[get], constant[input]]]
variable[with_inactive] assign[=] call[name[ET].SubElement, parameter[name[input], constant[with-inactive]]]
variable[callback] assign[=] call[name[kwargs].pop, parameter[constant[callback], name[self]._callback]]
return[call[name[callback], parameter[name[config]]]] | keyword[def] identifier[get_input_with_inactive] ( identifier[self] ,** identifier[kwargs] ):
literal[string]
identifier[config] = identifier[ET] . identifier[Element] ( literal[string] )
identifier[get] = identifier[ET] . identifier[Element] ( literal[string] )
identifier[config] = identifier[get]
identifier[input] = identifier[ET] . identifier[SubElement] ( identifier[get] , literal[string] )
identifier[with_inactive] = identifier[ET] . identifier[SubElement] ( identifier[input] , literal[string] , identifier[xmlns] = literal[string] )
identifier[callback] = identifier[kwargs] . identifier[pop] ( literal[string] , identifier[self] . identifier[_callback] )
keyword[return] identifier[callback] ( identifier[config] ) | def get_input_with_inactive(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element('config')
get = ET.Element('get')
config = get
input = ET.SubElement(get, 'input')
with_inactive = ET.SubElement(input, 'with-inactive', xmlns='http://tail-f.com/ns/netconf/inactive/1.0')
callback = kwargs.pop('callback', self._callback)
return callback(config) |
def remove_out_of_bounds(self, data, low_bound, high_bound):
""" Remove out of bound datapoints from dataframe.
This function removes all points < low_bound and > high_bound.
To Do,
1. Add a different boundary for each column.
Parameters
----------
data : pd.DataFrame()
Dataframe to remove bounds from.
low_bound : int
Low bound of the data.
high_bound : int
High bound of the data.
Returns
-------
pd.DataFrame()
Dataframe with out of bounds removed.
"""
data = data.dropna()
data = data[(data > low_bound).all(axis=1) & (data < high_bound).all(axis=1)]
return data | def function[remove_out_of_bounds, parameter[self, data, low_bound, high_bound]]:
constant[ Remove out of bound datapoints from dataframe.
This function removes all points < low_bound and > high_bound.
To Do,
1. Add a different boundary for each column.
Parameters
----------
data : pd.DataFrame()
Dataframe to remove bounds from.
low_bound : int
Low bound of the data.
high_bound : int
High bound of the data.
Returns
-------
pd.DataFrame()
Dataframe with out of bounds removed.
]
variable[data] assign[=] call[name[data].dropna, parameter[]]
variable[data] assign[=] call[name[data]][binary_operation[call[compare[name[data] greater[>] name[low_bound]].all, parameter[]] <ast.BitAnd object at 0x7da2590d6b60> call[compare[name[data] less[<] name[high_bound]].all, parameter[]]]]
return[name[data]] | keyword[def] identifier[remove_out_of_bounds] ( identifier[self] , identifier[data] , identifier[low_bound] , identifier[high_bound] ):
literal[string]
identifier[data] = identifier[data] . identifier[dropna] ()
identifier[data] = identifier[data] [( identifier[data] > identifier[low_bound] ). identifier[all] ( identifier[axis] = literal[int] )&( identifier[data] < identifier[high_bound] ). identifier[all] ( identifier[axis] = literal[int] )]
keyword[return] identifier[data] | def remove_out_of_bounds(self, data, low_bound, high_bound):
""" Remove out of bound datapoints from dataframe.
This function removes all points < low_bound and > high_bound.
To Do,
1. Add a different boundary for each column.
Parameters
----------
data : pd.DataFrame()
Dataframe to remove bounds from.
low_bound : int
Low bound of the data.
high_bound : int
High bound of the data.
Returns
-------
pd.DataFrame()
Dataframe with out of bounds removed.
"""
data = data.dropna()
data = data[(data > low_bound).all(axis=1) & (data < high_bound).all(axis=1)]
return data |
def WriteSerializableArray(self, array):
"""
Write an array of serializable objects to the stream.
Args:
array(list): a list of serializable objects. i.e. extending neo.IO.Mixins.SerializableMixin
"""
if array is None:
self.WriteByte(0)
else:
self.WriteVarInt(len(array))
for item in array:
item.Serialize(self) | def function[WriteSerializableArray, parameter[self, array]]:
constant[
Write an array of serializable objects to the stream.
Args:
array(list): a list of serializable objects. i.e. extending neo.IO.Mixins.SerializableMixin
]
if compare[name[array] is constant[None]] begin[:]
call[name[self].WriteByte, parameter[constant[0]]] | keyword[def] identifier[WriteSerializableArray] ( identifier[self] , identifier[array] ):
literal[string]
keyword[if] identifier[array] keyword[is] keyword[None] :
identifier[self] . identifier[WriteByte] ( literal[int] )
keyword[else] :
identifier[self] . identifier[WriteVarInt] ( identifier[len] ( identifier[array] ))
keyword[for] identifier[item] keyword[in] identifier[array] :
identifier[item] . identifier[Serialize] ( identifier[self] ) | def WriteSerializableArray(self, array):
"""
Write an array of serializable objects to the stream.
Args:
array(list): a list of serializable objects. i.e. extending neo.IO.Mixins.SerializableMixin
"""
if array is None:
self.WriteByte(0) # depends on [control=['if'], data=[]]
else:
self.WriteVarInt(len(array))
for item in array:
item.Serialize(self) # depends on [control=['for'], data=['item']] |
def get_file(self, repository_fname):
"""
Retrieve an existing :class:`meteorpi_model.FileRecord` by its ID
:param string repository_fname:
The file ID
:return:
A :class:`meteorpi_model.FileRecord` instance, or None if not found
"""
search = mp.FileRecordSearch(repository_fname=repository_fname)
b = search_files_sql_builder(search)
sql = b.get_select_sql(columns='f.uid, o.publicId AS observationId, f.mimeType, '
'f.fileName, s2.name AS semanticType, f.fileTime, '
'f.fileSize, f.fileMD5, l.publicId AS obstory_id, l.name AS obstory_name, '
'f.repositoryFname',
skip=0, limit=1, order='f.fileTime DESC')
files = list(self.generators.file_generator(sql=sql, sql_args=b.sql_args))
if not files:
return None
return files[0] | def function[get_file, parameter[self, repository_fname]]:
constant[
Retrieve an existing :class:`meteorpi_model.FileRecord` by its ID
:param string repository_fname:
The file ID
:return:
A :class:`meteorpi_model.FileRecord` instance, or None if not found
]
variable[search] assign[=] call[name[mp].FileRecordSearch, parameter[]]
variable[b] assign[=] call[name[search_files_sql_builder], parameter[name[search]]]
variable[sql] assign[=] call[name[b].get_select_sql, parameter[]]
variable[files] assign[=] call[name[list], parameter[call[name[self].generators.file_generator, parameter[]]]]
if <ast.UnaryOp object at 0x7da1b0ab5300> begin[:]
return[constant[None]]
return[call[name[files]][constant[0]]] | keyword[def] identifier[get_file] ( identifier[self] , identifier[repository_fname] ):
literal[string]
identifier[search] = identifier[mp] . identifier[FileRecordSearch] ( identifier[repository_fname] = identifier[repository_fname] )
identifier[b] = identifier[search_files_sql_builder] ( identifier[search] )
identifier[sql] = identifier[b] . identifier[get_select_sql] ( identifier[columns] = literal[string]
literal[string]
literal[string]
literal[string] ,
identifier[skip] = literal[int] , identifier[limit] = literal[int] , identifier[order] = literal[string] )
identifier[files] = identifier[list] ( identifier[self] . identifier[generators] . identifier[file_generator] ( identifier[sql] = identifier[sql] , identifier[sql_args] = identifier[b] . identifier[sql_args] ))
keyword[if] keyword[not] identifier[files] :
keyword[return] keyword[None]
keyword[return] identifier[files] [ literal[int] ] | def get_file(self, repository_fname):
"""
Retrieve an existing :class:`meteorpi_model.FileRecord` by its ID
:param string repository_fname:
The file ID
:return:
A :class:`meteorpi_model.FileRecord` instance, or None if not found
"""
search = mp.FileRecordSearch(repository_fname=repository_fname)
b = search_files_sql_builder(search)
sql = b.get_select_sql(columns='f.uid, o.publicId AS observationId, f.mimeType, f.fileName, s2.name AS semanticType, f.fileTime, f.fileSize, f.fileMD5, l.publicId AS obstory_id, l.name AS obstory_name, f.repositoryFname', skip=0, limit=1, order='f.fileTime DESC')
files = list(self.generators.file_generator(sql=sql, sql_args=b.sql_args))
if not files:
return None # depends on [control=['if'], data=[]]
return files[0] |
def validate_gps(value):
"""Validate GPS value."""
try:
latitude, longitude, altitude = value.split(',')
vol.Coerce(float)(latitude)
vol.Coerce(float)(longitude)
vol.Coerce(float)(altitude)
except (TypeError, ValueError, vol.Invalid):
raise vol.Invalid(
'GPS value should be of format "latitude,longitude,altitude"')
return value | def function[validate_gps, parameter[value]]:
constant[Validate GPS value.]
<ast.Try object at 0x7da20cabc7c0>
return[name[value]] | keyword[def] identifier[validate_gps] ( identifier[value] ):
literal[string]
keyword[try] :
identifier[latitude] , identifier[longitude] , identifier[altitude] = identifier[value] . identifier[split] ( literal[string] )
identifier[vol] . identifier[Coerce] ( identifier[float] )( identifier[latitude] )
identifier[vol] . identifier[Coerce] ( identifier[float] )( identifier[longitude] )
identifier[vol] . identifier[Coerce] ( identifier[float] )( identifier[altitude] )
keyword[except] ( identifier[TypeError] , identifier[ValueError] , identifier[vol] . identifier[Invalid] ):
keyword[raise] identifier[vol] . identifier[Invalid] (
literal[string] )
keyword[return] identifier[value] | def validate_gps(value):
"""Validate GPS value."""
try:
(latitude, longitude, altitude) = value.split(',')
vol.Coerce(float)(latitude)
vol.Coerce(float)(longitude)
vol.Coerce(float)(altitude) # depends on [control=['try'], data=[]]
except (TypeError, ValueError, vol.Invalid):
raise vol.Invalid('GPS value should be of format "latitude,longitude,altitude"') # depends on [control=['except'], data=[]]
return value |
def _update(self, data: TransDict, *args, **kwargs):
"""
Propagate updates to listeners
:param data: Data to propagate
"""
for l in self.listeners:
l(data, *args, **kwargs) | def function[_update, parameter[self, data]]:
constant[
Propagate updates to listeners
:param data: Data to propagate
]
for taget[name[l]] in starred[name[self].listeners] begin[:]
call[name[l], parameter[name[data], <ast.Starred object at 0x7da18dc04f10>]] | keyword[def] identifier[_update] ( identifier[self] , identifier[data] : identifier[TransDict] ,* identifier[args] ,** identifier[kwargs] ):
literal[string]
keyword[for] identifier[l] keyword[in] identifier[self] . identifier[listeners] :
identifier[l] ( identifier[data] ,* identifier[args] ,** identifier[kwargs] ) | def _update(self, data: TransDict, *args, **kwargs):
"""
Propagate updates to listeners
:param data: Data to propagate
"""
for l in self.listeners:
l(data, *args, **kwargs) # depends on [control=['for'], data=['l']] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.