code stringlengths 75 104k | code_sememe stringlengths 47 309k | token_type stringlengths 215 214k | code_dependency stringlengths 75 155k |
|---|---|---|---|
def corrections(self, word, prefix=1, distance=2):
"""Get corrections for word, if word is an invalid word.
:prefix: is the number of characters the prefix of the word must
have in common with the suggested corrections.
:distance: is the character distance the corrections may have between
the input word. This limits the number of available corrections
but decreases the correction search space.
The return value of this function is a Result tuple, with the
:valid: member indicating whether the input word is a valid one and
:suggestions: member containing a list of suggestions.
"""
if word not in self._words:
return Dictionary.Result(False,
self._corrector.suggest(word,
prefix=prefix,
maxdist=distance))
else:
return Dictionary.Result(True, list()) | def function[corrections, parameter[self, word, prefix, distance]]:
constant[Get corrections for word, if word is an invalid word.
:prefix: is the number of characters the prefix of the word must
have in common with the suggested corrections.
:distance: is the character distance the corrections may have between
the input word. This limits the number of available corrections
but decreases the correction search space.
The return value of this function is a Result tuple, with the
:valid: member indicating whether the input word is a valid one and
:suggestions: member containing a list of suggestions.
]
if compare[name[word] <ast.NotIn object at 0x7da2590d7190> name[self]._words] begin[:]
return[call[name[Dictionary].Result, parameter[constant[False], call[name[self]._corrector.suggest, parameter[name[word]]]]]] | keyword[def] identifier[corrections] ( identifier[self] , identifier[word] , identifier[prefix] = literal[int] , identifier[distance] = literal[int] ):
literal[string]
keyword[if] identifier[word] keyword[not] keyword[in] identifier[self] . identifier[_words] :
keyword[return] identifier[Dictionary] . identifier[Result] ( keyword[False] ,
identifier[self] . identifier[_corrector] . identifier[suggest] ( identifier[word] ,
identifier[prefix] = identifier[prefix] ,
identifier[maxdist] = identifier[distance] ))
keyword[else] :
keyword[return] identifier[Dictionary] . identifier[Result] ( keyword[True] , identifier[list] ()) | def corrections(self, word, prefix=1, distance=2):
"""Get corrections for word, if word is an invalid word.
:prefix: is the number of characters the prefix of the word must
have in common with the suggested corrections.
:distance: is the character distance the corrections may have between
the input word. This limits the number of available corrections
but decreases the correction search space.
The return value of this function is a Result tuple, with the
:valid: member indicating whether the input word is a valid one and
:suggestions: member containing a list of suggestions.
"""
if word not in self._words:
return Dictionary.Result(False, self._corrector.suggest(word, prefix=prefix, maxdist=distance)) # depends on [control=['if'], data=['word']]
else:
return Dictionary.Result(True, list()) |
def request_and_check(self, url, method='get',
expected_content_type=None, **kwargs):
"""Performs a request, and checks that the status is OK, and that the
content-type matches expectations.
Args:
url: URL to request
method: either 'get' or 'post'
expected_content_type: prefix to match response content-type against
**kwargs: passed to the request method directly.
Raises:
RuntimeError if status_code does not match.
"""
assert method in ['get', 'post']
result = self.driver.request(method, url, **kwargs)
if result.status_code != requests.codes.ok:
raise RuntimeError('Error requesting %r, status = %d' %
(url, result.status_code))
if expected_content_type is not None:
content_type = result.headers.get('content-type', '')
if not re.match(expected_content_type, content_type):
raise RuntimeError(
'Error requesting %r, content type %r does not match %r' %
(url, content_type, expected_content_type))
return result | def function[request_and_check, parameter[self, url, method, expected_content_type]]:
constant[Performs a request, and checks that the status is OK, and that the
content-type matches expectations.
Args:
url: URL to request
method: either 'get' or 'post'
expected_content_type: prefix to match response content-type against
**kwargs: passed to the request method directly.
Raises:
RuntimeError if status_code does not match.
]
assert[compare[name[method] in list[[<ast.Constant object at 0x7da1b16b2f80>, <ast.Constant object at 0x7da1b16b0910>]]]]
variable[result] assign[=] call[name[self].driver.request, parameter[name[method], name[url]]]
if compare[name[result].status_code not_equal[!=] name[requests].codes.ok] begin[:]
<ast.Raise object at 0x7da1b16b02b0>
if compare[name[expected_content_type] is_not constant[None]] begin[:]
variable[content_type] assign[=] call[name[result].headers.get, parameter[constant[content-type], constant[]]]
if <ast.UnaryOp object at 0x7da1b16b04c0> begin[:]
<ast.Raise object at 0x7da1b16b0a00>
return[name[result]] | keyword[def] identifier[request_and_check] ( identifier[self] , identifier[url] , identifier[method] = literal[string] ,
identifier[expected_content_type] = keyword[None] ,** identifier[kwargs] ):
literal[string]
keyword[assert] identifier[method] keyword[in] [ literal[string] , literal[string] ]
identifier[result] = identifier[self] . identifier[driver] . identifier[request] ( identifier[method] , identifier[url] ,** identifier[kwargs] )
keyword[if] identifier[result] . identifier[status_code] != identifier[requests] . identifier[codes] . identifier[ok] :
keyword[raise] identifier[RuntimeError] ( literal[string] %
( identifier[url] , identifier[result] . identifier[status_code] ))
keyword[if] identifier[expected_content_type] keyword[is] keyword[not] keyword[None] :
identifier[content_type] = identifier[result] . identifier[headers] . identifier[get] ( literal[string] , literal[string] )
keyword[if] keyword[not] identifier[re] . identifier[match] ( identifier[expected_content_type] , identifier[content_type] ):
keyword[raise] identifier[RuntimeError] (
literal[string] %
( identifier[url] , identifier[content_type] , identifier[expected_content_type] ))
keyword[return] identifier[result] | def request_and_check(self, url, method='get', expected_content_type=None, **kwargs):
"""Performs a request, and checks that the status is OK, and that the
content-type matches expectations.
Args:
url: URL to request
method: either 'get' or 'post'
expected_content_type: prefix to match response content-type against
**kwargs: passed to the request method directly.
Raises:
RuntimeError if status_code does not match.
"""
assert method in ['get', 'post']
result = self.driver.request(method, url, **kwargs)
if result.status_code != requests.codes.ok:
raise RuntimeError('Error requesting %r, status = %d' % (url, result.status_code)) # depends on [control=['if'], data=[]]
if expected_content_type is not None:
content_type = result.headers.get('content-type', '')
if not re.match(expected_content_type, content_type):
raise RuntimeError('Error requesting %r, content type %r does not match %r' % (url, content_type, expected_content_type)) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['expected_content_type']]
return result |
def to_device(self, device, non_blocking=True):
""" Move a rollout to a selected device """
return Transitions(
size=self.size,
environment_information=self.environment_information,
transition_tensors={k: v.to(device, non_blocking=non_blocking) for k, v in self.transition_tensors.items()},
extra_data=self.extra_data
) | def function[to_device, parameter[self, device, non_blocking]]:
constant[ Move a rollout to a selected device ]
return[call[name[Transitions], parameter[]]] | keyword[def] identifier[to_device] ( identifier[self] , identifier[device] , identifier[non_blocking] = keyword[True] ):
literal[string]
keyword[return] identifier[Transitions] (
identifier[size] = identifier[self] . identifier[size] ,
identifier[environment_information] = identifier[self] . identifier[environment_information] ,
identifier[transition_tensors] ={ identifier[k] : identifier[v] . identifier[to] ( identifier[device] , identifier[non_blocking] = identifier[non_blocking] ) keyword[for] identifier[k] , identifier[v] keyword[in] identifier[self] . identifier[transition_tensors] . identifier[items] ()},
identifier[extra_data] = identifier[self] . identifier[extra_data]
) | def to_device(self, device, non_blocking=True):
""" Move a rollout to a selected device """
return Transitions(size=self.size, environment_information=self.environment_information, transition_tensors={k: v.to(device, non_blocking=non_blocking) for (k, v) in self.transition_tensors.items()}, extra_data=self.extra_data) |
def _(obj):
"""ISO 8601 format. Interprets naive datetime as UTC with zulu suffix."""
tz_offset = obj.utcoffset()
if not tz_offset or tz_offset == UTC_ZERO:
iso_datetime = obj.strftime('%Y-%m-%dT%H:%M:%S.%fZ')
else:
iso_datetime = obj.isoformat()
return iso_datetime | def function[_, parameter[obj]]:
constant[ISO 8601 format. Interprets naive datetime as UTC with zulu suffix.]
variable[tz_offset] assign[=] call[name[obj].utcoffset, parameter[]]
if <ast.BoolOp object at 0x7da18c4cc040> begin[:]
variable[iso_datetime] assign[=] call[name[obj].strftime, parameter[constant[%Y-%m-%dT%H:%M:%S.%fZ]]]
return[name[iso_datetime]] | keyword[def] identifier[_] ( identifier[obj] ):
literal[string]
identifier[tz_offset] = identifier[obj] . identifier[utcoffset] ()
keyword[if] keyword[not] identifier[tz_offset] keyword[or] identifier[tz_offset] == identifier[UTC_ZERO] :
identifier[iso_datetime] = identifier[obj] . identifier[strftime] ( literal[string] )
keyword[else] :
identifier[iso_datetime] = identifier[obj] . identifier[isoformat] ()
keyword[return] identifier[iso_datetime] | def _(obj):
"""ISO 8601 format. Interprets naive datetime as UTC with zulu suffix."""
tz_offset = obj.utcoffset()
if not tz_offset or tz_offset == UTC_ZERO:
iso_datetime = obj.strftime('%Y-%m-%dT%H:%M:%S.%fZ') # depends on [control=['if'], data=[]]
else:
iso_datetime = obj.isoformat()
return iso_datetime |
def metric(self):
"""
Compute a matrix of Hilbert-Schmidt inner products for the basis operators, update
self._metric, and return the value.
:return: The matrix of inner products.
:rtype: numpy.matrix
"""
if self._metric is None:
_log.debug("Computing and caching operator basis metric")
self._metric = np.matrix([[(j.dag() * k).tr() for k in self.ops] for j in self.ops])
return self._metric | def function[metric, parameter[self]]:
constant[
Compute a matrix of Hilbert-Schmidt inner products for the basis operators, update
self._metric, and return the value.
:return: The matrix of inner products.
:rtype: numpy.matrix
]
if compare[name[self]._metric is constant[None]] begin[:]
call[name[_log].debug, parameter[constant[Computing and caching operator basis metric]]]
name[self]._metric assign[=] call[name[np].matrix, parameter[<ast.ListComp object at 0x7da2041db280>]]
return[name[self]._metric] | keyword[def] identifier[metric] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[_metric] keyword[is] keyword[None] :
identifier[_log] . identifier[debug] ( literal[string] )
identifier[self] . identifier[_metric] = identifier[np] . identifier[matrix] ([[( identifier[j] . identifier[dag] ()* identifier[k] ). identifier[tr] () keyword[for] identifier[k] keyword[in] identifier[self] . identifier[ops] ] keyword[for] identifier[j] keyword[in] identifier[self] . identifier[ops] ])
keyword[return] identifier[self] . identifier[_metric] | def metric(self):
"""
Compute a matrix of Hilbert-Schmidt inner products for the basis operators, update
self._metric, and return the value.
:return: The matrix of inner products.
:rtype: numpy.matrix
"""
if self._metric is None:
_log.debug('Computing and caching operator basis metric')
self._metric = np.matrix([[(j.dag() * k).tr() for k in self.ops] for j in self.ops]) # depends on [control=['if'], data=[]]
return self._metric |
def run_example(example_name, environ):
"""
Run an example module from zipline.examples.
"""
mod = EXAMPLE_MODULES[example_name]
register_calendar("YAHOO", get_calendar("NYSE"), force=True)
return run_algorithm(
initialize=getattr(mod, 'initialize', None),
handle_data=getattr(mod, 'handle_data', None),
before_trading_start=getattr(mod, 'before_trading_start', None),
analyze=getattr(mod, 'analyze', None),
bundle='test',
environ=environ,
# Provide a default capital base, but allow the test to override.
**merge({'capital_base': 1e7}, mod._test_args())
) | def function[run_example, parameter[example_name, environ]]:
constant[
Run an example module from zipline.examples.
]
variable[mod] assign[=] call[name[EXAMPLE_MODULES]][name[example_name]]
call[name[register_calendar], parameter[constant[YAHOO], call[name[get_calendar], parameter[constant[NYSE]]]]]
return[call[name[run_algorithm], parameter[]]] | keyword[def] identifier[run_example] ( identifier[example_name] , identifier[environ] ):
literal[string]
identifier[mod] = identifier[EXAMPLE_MODULES] [ identifier[example_name] ]
identifier[register_calendar] ( literal[string] , identifier[get_calendar] ( literal[string] ), identifier[force] = keyword[True] )
keyword[return] identifier[run_algorithm] (
identifier[initialize] = identifier[getattr] ( identifier[mod] , literal[string] , keyword[None] ),
identifier[handle_data] = identifier[getattr] ( identifier[mod] , literal[string] , keyword[None] ),
identifier[before_trading_start] = identifier[getattr] ( identifier[mod] , literal[string] , keyword[None] ),
identifier[analyze] = identifier[getattr] ( identifier[mod] , literal[string] , keyword[None] ),
identifier[bundle] = literal[string] ,
identifier[environ] = identifier[environ] ,
** identifier[merge] ({ literal[string] : literal[int] }, identifier[mod] . identifier[_test_args] ())
) | def run_example(example_name, environ):
"""
Run an example module from zipline.examples.
"""
mod = EXAMPLE_MODULES[example_name]
register_calendar('YAHOO', get_calendar('NYSE'), force=True)
# Provide a default capital base, but allow the test to override.
return run_algorithm(initialize=getattr(mod, 'initialize', None), handle_data=getattr(mod, 'handle_data', None), before_trading_start=getattr(mod, 'before_trading_start', None), analyze=getattr(mod, 'analyze', None), bundle='test', environ=environ, **merge({'capital_base': 10000000.0}, mod._test_args())) |
def i2c_read(self, address, register, number_of_bytes, read_type, cb=None):
"""
This method requests the read of an i2c device. Results are retrieved by a call to
i2c_get_read_data().
If a callback method is provided, when data is received from the device it will be sent to the callback method
:param address: i2c device address
:param register: register number (can be set to zero)
:param number_of_bytes: number of bytes expected to be returned
:param read_type: I2C_READ or I2C_READ_CONTINUOUSLY
:param cb: Optional callback function to report i2c data as result of read command
"""
data = [address, read_type, register & 0x7f, (register >> 7) & 0x7f,
number_of_bytes & 0x7f, (number_of_bytes >> 7) & 0x7f]
# add or update entry in i2c_map for reply
self._command_handler.i2c_map[address] = [cb, None]
self._command_handler.send_sysex(self._command_handler.I2C_REQUEST, data) | def function[i2c_read, parameter[self, address, register, number_of_bytes, read_type, cb]]:
constant[
This method requests the read of an i2c device. Results are retrieved by a call to
i2c_get_read_data().
If a callback method is provided, when data is received from the device it will be sent to the callback method
:param address: i2c device address
:param register: register number (can be set to zero)
:param number_of_bytes: number of bytes expected to be returned
:param read_type: I2C_READ or I2C_READ_CONTINUOUSLY
:param cb: Optional callback function to report i2c data as result of read command
]
variable[data] assign[=] list[[<ast.Name object at 0x7da20e956bc0>, <ast.Name object at 0x7da20e957550>, <ast.BinOp object at 0x7da20e955150>, <ast.BinOp object at 0x7da20e956c80>, <ast.BinOp object at 0x7da20e956b00>, <ast.BinOp object at 0x7da20e9558a0>]]
call[name[self]._command_handler.i2c_map][name[address]] assign[=] list[[<ast.Name object at 0x7da20e955c00>, <ast.Constant object at 0x7da20e9565f0>]]
call[name[self]._command_handler.send_sysex, parameter[name[self]._command_handler.I2C_REQUEST, name[data]]] | keyword[def] identifier[i2c_read] ( identifier[self] , identifier[address] , identifier[register] , identifier[number_of_bytes] , identifier[read_type] , identifier[cb] = keyword[None] ):
literal[string]
identifier[data] =[ identifier[address] , identifier[read_type] , identifier[register] & literal[int] ,( identifier[register] >> literal[int] )& literal[int] ,
identifier[number_of_bytes] & literal[int] ,( identifier[number_of_bytes] >> literal[int] )& literal[int] ]
identifier[self] . identifier[_command_handler] . identifier[i2c_map] [ identifier[address] ]=[ identifier[cb] , keyword[None] ]
identifier[self] . identifier[_command_handler] . identifier[send_sysex] ( identifier[self] . identifier[_command_handler] . identifier[I2C_REQUEST] , identifier[data] ) | def i2c_read(self, address, register, number_of_bytes, read_type, cb=None):
"""
This method requests the read of an i2c device. Results are retrieved by a call to
i2c_get_read_data().
If a callback method is provided, when data is received from the device it will be sent to the callback method
:param address: i2c device address
:param register: register number (can be set to zero)
:param number_of_bytes: number of bytes expected to be returned
:param read_type: I2C_READ or I2C_READ_CONTINUOUSLY
:param cb: Optional callback function to report i2c data as result of read command
"""
data = [address, read_type, register & 127, register >> 7 & 127, number_of_bytes & 127, number_of_bytes >> 7 & 127]
# add or update entry in i2c_map for reply
self._command_handler.i2c_map[address] = [cb, None]
self._command_handler.send_sysex(self._command_handler.I2C_REQUEST, data) |
def save(self, *args, **kwargs):
"""
saves creates or updates current resource
returns new resource
"""
self._pre_save(*args, **kwargs)
response = self._save(*args, **kwargs)
response = self._post_save(response, *args, **kwargs)
return response | def function[save, parameter[self]]:
constant[
saves creates or updates current resource
returns new resource
]
call[name[self]._pre_save, parameter[<ast.Starred object at 0x7da18f09cfd0>]]
variable[response] assign[=] call[name[self]._save, parameter[<ast.Starred object at 0x7da18f09f520>]]
variable[response] assign[=] call[name[self]._post_save, parameter[name[response], <ast.Starred object at 0x7da18f09d8d0>]]
return[name[response]] | keyword[def] identifier[save] ( identifier[self] ,* identifier[args] ,** identifier[kwargs] ):
literal[string]
identifier[self] . identifier[_pre_save] (* identifier[args] ,** identifier[kwargs] )
identifier[response] = identifier[self] . identifier[_save] (* identifier[args] ,** identifier[kwargs] )
identifier[response] = identifier[self] . identifier[_post_save] ( identifier[response] ,* identifier[args] ,** identifier[kwargs] )
keyword[return] identifier[response] | def save(self, *args, **kwargs):
"""
saves creates or updates current resource
returns new resource
"""
self._pre_save(*args, **kwargs)
response = self._save(*args, **kwargs)
response = self._post_save(response, *args, **kwargs)
return response |
def _indentUpTo(self, text, width):
"""Add space to text, so text width will be at least width.
Return text, which must be added
"""
visibleTextWidth = self._realToVisibleColumn(text, len(text))
diff = width - visibleTextWidth
if diff <= 0:
return ''
elif self._qpart.indentUseTabs and \
all([char == '\t' for char in text]): # if using tabs and only tabs in text
return '\t' * (diff // self._qpart.indentWidth) + \
' ' * (diff % self._qpart.indentWidth)
else:
return ' ' * int(diff) | def function[_indentUpTo, parameter[self, text, width]]:
constant[Add space to text, so text width will be at least width.
Return text, which must be added
]
variable[visibleTextWidth] assign[=] call[name[self]._realToVisibleColumn, parameter[name[text], call[name[len], parameter[name[text]]]]]
variable[diff] assign[=] binary_operation[name[width] - name[visibleTextWidth]]
if compare[name[diff] less_or_equal[<=] constant[0]] begin[:]
return[constant[]] | keyword[def] identifier[_indentUpTo] ( identifier[self] , identifier[text] , identifier[width] ):
literal[string]
identifier[visibleTextWidth] = identifier[self] . identifier[_realToVisibleColumn] ( identifier[text] , identifier[len] ( identifier[text] ))
identifier[diff] = identifier[width] - identifier[visibleTextWidth]
keyword[if] identifier[diff] <= literal[int] :
keyword[return] literal[string]
keyword[elif] identifier[self] . identifier[_qpart] . identifier[indentUseTabs] keyword[and] identifier[all] ([ identifier[char] == literal[string] keyword[for] identifier[char] keyword[in] identifier[text] ]):
keyword[return] literal[string] *( identifier[diff] // identifier[self] . identifier[_qpart] . identifier[indentWidth] )+ literal[string] *( identifier[diff] % identifier[self] . identifier[_qpart] . identifier[indentWidth] )
keyword[else] :
keyword[return] literal[string] * identifier[int] ( identifier[diff] ) | def _indentUpTo(self, text, width):
"""Add space to text, so text width will be at least width.
Return text, which must be added
"""
visibleTextWidth = self._realToVisibleColumn(text, len(text))
diff = width - visibleTextWidth
if diff <= 0:
return '' # depends on [control=['if'], data=[]]
elif self._qpart.indentUseTabs and all([char == '\t' for char in text]): # if using tabs and only tabs in text
return '\t' * (diff // self._qpart.indentWidth) + ' ' * (diff % self._qpart.indentWidth) # depends on [control=['if'], data=[]]
else:
return ' ' * int(diff) |
def finalize(self, result=None):
"""
Clean up any created database and schema.
"""
if not self.settings_path:
# short circuit if no settings file can be found
return
from django.test.utils import teardown_test_environment
from django.db import connection
from django.conf import settings
self.call_plugins_method('beforeDestroyTestDb', settings, connection)
try:
connection.creation.destroy_test_db(
self.old_db,
verbosity=self.verbosity,
)
except Exception:
# If we can't tear down the test DB, don't worry about it.
pass
self.call_plugins_method('afterDestroyTestDb', settings, connection)
self.call_plugins_method(
'beforeTeardownTestEnv', settings, teardown_test_environment)
teardown_test_environment()
self.call_plugins_method('afterTeardownTestEnv', settings) | def function[finalize, parameter[self, result]]:
constant[
Clean up any created database and schema.
]
if <ast.UnaryOp object at 0x7da20c6e5030> begin[:]
return[None]
from relative_module[django.test.utils] import module[teardown_test_environment]
from relative_module[django.db] import module[connection]
from relative_module[django.conf] import module[settings]
call[name[self].call_plugins_method, parameter[constant[beforeDestroyTestDb], name[settings], name[connection]]]
<ast.Try object at 0x7da20c6e5c00>
call[name[self].call_plugins_method, parameter[constant[afterDestroyTestDb], name[settings], name[connection]]]
call[name[self].call_plugins_method, parameter[constant[beforeTeardownTestEnv], name[settings], name[teardown_test_environment]]]
call[name[teardown_test_environment], parameter[]]
call[name[self].call_plugins_method, parameter[constant[afterTeardownTestEnv], name[settings]]] | keyword[def] identifier[finalize] ( identifier[self] , identifier[result] = keyword[None] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[settings_path] :
keyword[return]
keyword[from] identifier[django] . identifier[test] . identifier[utils] keyword[import] identifier[teardown_test_environment]
keyword[from] identifier[django] . identifier[db] keyword[import] identifier[connection]
keyword[from] identifier[django] . identifier[conf] keyword[import] identifier[settings]
identifier[self] . identifier[call_plugins_method] ( literal[string] , identifier[settings] , identifier[connection] )
keyword[try] :
identifier[connection] . identifier[creation] . identifier[destroy_test_db] (
identifier[self] . identifier[old_db] ,
identifier[verbosity] = identifier[self] . identifier[verbosity] ,
)
keyword[except] identifier[Exception] :
keyword[pass]
identifier[self] . identifier[call_plugins_method] ( literal[string] , identifier[settings] , identifier[connection] )
identifier[self] . identifier[call_plugins_method] (
literal[string] , identifier[settings] , identifier[teardown_test_environment] )
identifier[teardown_test_environment] ()
identifier[self] . identifier[call_plugins_method] ( literal[string] , identifier[settings] ) | def finalize(self, result=None):
"""
Clean up any created database and schema.
"""
if not self.settings_path:
# short circuit if no settings file can be found
return # depends on [control=['if'], data=[]]
from django.test.utils import teardown_test_environment
from django.db import connection
from django.conf import settings
self.call_plugins_method('beforeDestroyTestDb', settings, connection)
try:
connection.creation.destroy_test_db(self.old_db, verbosity=self.verbosity) # depends on [control=['try'], data=[]]
except Exception:
# If we can't tear down the test DB, don't worry about it.
pass # depends on [control=['except'], data=[]]
self.call_plugins_method('afterDestroyTestDb', settings, connection)
self.call_plugins_method('beforeTeardownTestEnv', settings, teardown_test_environment)
teardown_test_environment()
self.call_plugins_method('afterTeardownTestEnv', settings) |
def _connect(self):
"""Connect to socket. This should be run in a new thread."""
while self.protocol:
_LOGGER.info('Trying to connect to %s', self.server_address)
try:
sock = socket.create_connection(
self.server_address, self.reconnect_timeout)
except socket.timeout:
_LOGGER.error(
'Connecting to socket timed out for %s',
self.server_address)
_LOGGER.info(
'Waiting %s secs before trying to connect again',
self.reconnect_timeout)
time.sleep(self.reconnect_timeout)
except OSError:
_LOGGER.error(
'Failed to connect to socket at %s', self.server_address)
_LOGGER.info(
'Waiting %s secs before trying to connect again',
self.reconnect_timeout)
time.sleep(self.reconnect_timeout)
else:
self.tcp_check_timer = time.time()
self.tcp_disconnect_timer = time.time()
transport = TCPTransport(
sock, lambda: self.protocol, self._check_connection)
poll_thread = threading.Thread(target=self._poll_queue)
self._stop_event.clear()
poll_thread.start()
transport.start()
transport.connect()
return | def function[_connect, parameter[self]]:
constant[Connect to socket. This should be run in a new thread.]
while name[self].protocol begin[:]
call[name[_LOGGER].info, parameter[constant[Trying to connect to %s], name[self].server_address]]
<ast.Try object at 0x7da2041da140> | keyword[def] identifier[_connect] ( identifier[self] ):
literal[string]
keyword[while] identifier[self] . identifier[protocol] :
identifier[_LOGGER] . identifier[info] ( literal[string] , identifier[self] . identifier[server_address] )
keyword[try] :
identifier[sock] = identifier[socket] . identifier[create_connection] (
identifier[self] . identifier[server_address] , identifier[self] . identifier[reconnect_timeout] )
keyword[except] identifier[socket] . identifier[timeout] :
identifier[_LOGGER] . identifier[error] (
literal[string] ,
identifier[self] . identifier[server_address] )
identifier[_LOGGER] . identifier[info] (
literal[string] ,
identifier[self] . identifier[reconnect_timeout] )
identifier[time] . identifier[sleep] ( identifier[self] . identifier[reconnect_timeout] )
keyword[except] identifier[OSError] :
identifier[_LOGGER] . identifier[error] (
literal[string] , identifier[self] . identifier[server_address] )
identifier[_LOGGER] . identifier[info] (
literal[string] ,
identifier[self] . identifier[reconnect_timeout] )
identifier[time] . identifier[sleep] ( identifier[self] . identifier[reconnect_timeout] )
keyword[else] :
identifier[self] . identifier[tcp_check_timer] = identifier[time] . identifier[time] ()
identifier[self] . identifier[tcp_disconnect_timer] = identifier[time] . identifier[time] ()
identifier[transport] = identifier[TCPTransport] (
identifier[sock] , keyword[lambda] : identifier[self] . identifier[protocol] , identifier[self] . identifier[_check_connection] )
identifier[poll_thread] = identifier[threading] . identifier[Thread] ( identifier[target] = identifier[self] . identifier[_poll_queue] )
identifier[self] . identifier[_stop_event] . identifier[clear] ()
identifier[poll_thread] . identifier[start] ()
identifier[transport] . identifier[start] ()
identifier[transport] . identifier[connect] ()
keyword[return] | def _connect(self):
"""Connect to socket. This should be run in a new thread."""
while self.protocol:
_LOGGER.info('Trying to connect to %s', self.server_address)
try:
sock = socket.create_connection(self.server_address, self.reconnect_timeout) # depends on [control=['try'], data=[]]
except socket.timeout:
_LOGGER.error('Connecting to socket timed out for %s', self.server_address)
_LOGGER.info('Waiting %s secs before trying to connect again', self.reconnect_timeout)
time.sleep(self.reconnect_timeout) # depends on [control=['except'], data=[]]
except OSError:
_LOGGER.error('Failed to connect to socket at %s', self.server_address)
_LOGGER.info('Waiting %s secs before trying to connect again', self.reconnect_timeout)
time.sleep(self.reconnect_timeout) # depends on [control=['except'], data=[]]
else:
self.tcp_check_timer = time.time()
self.tcp_disconnect_timer = time.time()
transport = TCPTransport(sock, lambda : self.protocol, self._check_connection)
poll_thread = threading.Thread(target=self._poll_queue)
self._stop_event.clear()
poll_thread.start()
transport.start()
transport.connect()
return # depends on [control=['while'], data=[]] |
def _calc_overlap_coef(
markers1: dict,
markers2: dict,
):
"""Calculate overlap coefficient between the values of two dictionaries
Note: dict values must be sets
"""
overlap_coef=np.zeros((len(markers1), len(markers2)))
j=0
for marker_group in markers1:
tmp = [len(markers2[i].intersection(markers1[marker_group]))/
max(min(len(markers2[i]), len(markers1[marker_group])),1) for i in markers2.keys()]
overlap_coef[j,:] = tmp
j += 1
return overlap_coef | def function[_calc_overlap_coef, parameter[markers1, markers2]]:
constant[Calculate overlap coefficient between the values of two dictionaries
Note: dict values must be sets
]
variable[overlap_coef] assign[=] call[name[np].zeros, parameter[tuple[[<ast.Call object at 0x7da1b2344b20>, <ast.Call object at 0x7da1b23474f0>]]]]
variable[j] assign[=] constant[0]
for taget[name[marker_group]] in starred[name[markers1]] begin[:]
variable[tmp] assign[=] <ast.ListComp object at 0x7da1b2347d60>
call[name[overlap_coef]][tuple[[<ast.Name object at 0x7da20e9b1d20>, <ast.Slice object at 0x7da20e9b1ea0>]]] assign[=] name[tmp]
<ast.AugAssign object at 0x7da20e9b2b00>
return[name[overlap_coef]] | keyword[def] identifier[_calc_overlap_coef] (
identifier[markers1] : identifier[dict] ,
identifier[markers2] : identifier[dict] ,
):
literal[string]
identifier[overlap_coef] = identifier[np] . identifier[zeros] (( identifier[len] ( identifier[markers1] ), identifier[len] ( identifier[markers2] )))
identifier[j] = literal[int]
keyword[for] identifier[marker_group] keyword[in] identifier[markers1] :
identifier[tmp] =[ identifier[len] ( identifier[markers2] [ identifier[i] ]. identifier[intersection] ( identifier[markers1] [ identifier[marker_group] ]))/
identifier[max] ( identifier[min] ( identifier[len] ( identifier[markers2] [ identifier[i] ]), identifier[len] ( identifier[markers1] [ identifier[marker_group] ])), literal[int] ) keyword[for] identifier[i] keyword[in] identifier[markers2] . identifier[keys] ()]
identifier[overlap_coef] [ identifier[j] ,:]= identifier[tmp]
identifier[j] += literal[int]
keyword[return] identifier[overlap_coef] | def _calc_overlap_coef(markers1: dict, markers2: dict):
"""Calculate overlap coefficient between the values of two dictionaries
Note: dict values must be sets
"""
overlap_coef = np.zeros((len(markers1), len(markers2)))
j = 0
for marker_group in markers1:
tmp = [len(markers2[i].intersection(markers1[marker_group])) / max(min(len(markers2[i]), len(markers1[marker_group])), 1) for i in markers2.keys()]
overlap_coef[j, :] = tmp
j += 1 # depends on [control=['for'], data=['marker_group']]
return overlap_coef |
def clean_df(df, header=None, **read_csv_kwargs):
""" Convert UTF8 characters in a CSV file or dataframe into ASCII
Args:
df (DataFrame or str): DataFrame or path or url to CSV
"""
df = read_csv(df, header=header, **read_csv_kwargs)
df = df.fillna(' ')
for col in df.columns:
df[col] = df[col].apply(unicode2ascii)
return df | def function[clean_df, parameter[df, header]]:
constant[ Convert UTF8 characters in a CSV file or dataframe into ASCII
Args:
df (DataFrame or str): DataFrame or path or url to CSV
]
variable[df] assign[=] call[name[read_csv], parameter[name[df]]]
variable[df] assign[=] call[name[df].fillna, parameter[constant[ ]]]
for taget[name[col]] in starred[name[df].columns] begin[:]
call[name[df]][name[col]] assign[=] call[call[name[df]][name[col]].apply, parameter[name[unicode2ascii]]]
return[name[df]] | keyword[def] identifier[clean_df] ( identifier[df] , identifier[header] = keyword[None] ,** identifier[read_csv_kwargs] ):
literal[string]
identifier[df] = identifier[read_csv] ( identifier[df] , identifier[header] = identifier[header] ,** identifier[read_csv_kwargs] )
identifier[df] = identifier[df] . identifier[fillna] ( literal[string] )
keyword[for] identifier[col] keyword[in] identifier[df] . identifier[columns] :
identifier[df] [ identifier[col] ]= identifier[df] [ identifier[col] ]. identifier[apply] ( identifier[unicode2ascii] )
keyword[return] identifier[df] | def clean_df(df, header=None, **read_csv_kwargs):
""" Convert UTF8 characters in a CSV file or dataframe into ASCII
Args:
df (DataFrame or str): DataFrame or path or url to CSV
"""
df = read_csv(df, header=header, **read_csv_kwargs)
df = df.fillna(' ')
for col in df.columns:
df[col] = df[col].apply(unicode2ascii) # depends on [control=['for'], data=['col']]
return df |
def _calculate_num_queries(self):
"""
Calculate the total number of request and response queries.
Used for count header and count table.
"""
request_totals = self._totals("request")
response_totals = self._totals("response")
return request_totals[2] + response_totals[2] | def function[_calculate_num_queries, parameter[self]]:
constant[
Calculate the total number of request and response queries.
Used for count header and count table.
]
variable[request_totals] assign[=] call[name[self]._totals, parameter[constant[request]]]
variable[response_totals] assign[=] call[name[self]._totals, parameter[constant[response]]]
return[binary_operation[call[name[request_totals]][constant[2]] + call[name[response_totals]][constant[2]]]] | keyword[def] identifier[_calculate_num_queries] ( identifier[self] ):
literal[string]
identifier[request_totals] = identifier[self] . identifier[_totals] ( literal[string] )
identifier[response_totals] = identifier[self] . identifier[_totals] ( literal[string] )
keyword[return] identifier[request_totals] [ literal[int] ]+ identifier[response_totals] [ literal[int] ] | def _calculate_num_queries(self):
"""
Calculate the total number of request and response queries.
Used for count header and count table.
"""
request_totals = self._totals('request')
response_totals = self._totals('response')
return request_totals[2] + response_totals[2] |
def ExpandRelativePath(method_config, params, relative_path=None):
"""Determine the relative path for request."""
path = relative_path or method_config.relative_path or ''
for param in method_config.path_params:
param_template = '{%s}' % param
# For more details about "reserved word expansion", see:
# http://tools.ietf.org/html/rfc6570#section-3.2.2
reserved_chars = ''
reserved_template = '{+%s}' % param
if reserved_template in path:
reserved_chars = _RESERVED_URI_CHARS
path = path.replace(reserved_template, param_template)
if param_template not in path:
raise exceptions.InvalidUserInputError(
'Missing path parameter %s' % param)
try:
# TODO(craigcitro): Do we want to support some sophisticated
# mapping here?
value = params[param]
except KeyError:
raise exceptions.InvalidUserInputError(
'Request missing required parameter %s' % param)
if value is None:
raise exceptions.InvalidUserInputError(
'Request missing required parameter %s' % param)
try:
if not isinstance(value, six.string_types):
value = str(value)
path = path.replace(param_template,
urllib_parse.quote(value.encode('utf_8'),
reserved_chars))
except TypeError as e:
raise exceptions.InvalidUserInputError(
'Error setting required parameter %s to value %s: %s' % (
param, value, e))
return path | def function[ExpandRelativePath, parameter[method_config, params, relative_path]]:
constant[Determine the relative path for request.]
variable[path] assign[=] <ast.BoolOp object at 0x7da1b07b9b10>
for taget[name[param]] in starred[name[method_config].path_params] begin[:]
variable[param_template] assign[=] binary_operation[constant[{%s}] <ast.Mod object at 0x7da2590d6920> name[param]]
variable[reserved_chars] assign[=] constant[]
variable[reserved_template] assign[=] binary_operation[constant[{+%s}] <ast.Mod object at 0x7da2590d6920> name[param]]
if compare[name[reserved_template] in name[path]] begin[:]
variable[reserved_chars] assign[=] name[_RESERVED_URI_CHARS]
variable[path] assign[=] call[name[path].replace, parameter[name[reserved_template], name[param_template]]]
if compare[name[param_template] <ast.NotIn object at 0x7da2590d7190> name[path]] begin[:]
<ast.Raise object at 0x7da1b07bb400>
<ast.Try object at 0x7da1b07bb6d0>
if compare[name[value] is constant[None]] begin[:]
<ast.Raise object at 0x7da1b07bbee0>
<ast.Try object at 0x7da1b07ba1a0>
return[name[path]] | keyword[def] identifier[ExpandRelativePath] ( identifier[method_config] , identifier[params] , identifier[relative_path] = keyword[None] ):
literal[string]
identifier[path] = identifier[relative_path] keyword[or] identifier[method_config] . identifier[relative_path] keyword[or] literal[string]
keyword[for] identifier[param] keyword[in] identifier[method_config] . identifier[path_params] :
identifier[param_template] = literal[string] % identifier[param]
identifier[reserved_chars] = literal[string]
identifier[reserved_template] = literal[string] % identifier[param]
keyword[if] identifier[reserved_template] keyword[in] identifier[path] :
identifier[reserved_chars] = identifier[_RESERVED_URI_CHARS]
identifier[path] = identifier[path] . identifier[replace] ( identifier[reserved_template] , identifier[param_template] )
keyword[if] identifier[param_template] keyword[not] keyword[in] identifier[path] :
keyword[raise] identifier[exceptions] . identifier[InvalidUserInputError] (
literal[string] % identifier[param] )
keyword[try] :
identifier[value] = identifier[params] [ identifier[param] ]
keyword[except] identifier[KeyError] :
keyword[raise] identifier[exceptions] . identifier[InvalidUserInputError] (
literal[string] % identifier[param] )
keyword[if] identifier[value] keyword[is] keyword[None] :
keyword[raise] identifier[exceptions] . identifier[InvalidUserInputError] (
literal[string] % identifier[param] )
keyword[try] :
keyword[if] keyword[not] identifier[isinstance] ( identifier[value] , identifier[six] . identifier[string_types] ):
identifier[value] = identifier[str] ( identifier[value] )
identifier[path] = identifier[path] . identifier[replace] ( identifier[param_template] ,
identifier[urllib_parse] . identifier[quote] ( identifier[value] . identifier[encode] ( literal[string] ),
identifier[reserved_chars] ))
keyword[except] identifier[TypeError] keyword[as] identifier[e] :
keyword[raise] identifier[exceptions] . identifier[InvalidUserInputError] (
literal[string] %(
identifier[param] , identifier[value] , identifier[e] ))
keyword[return] identifier[path] | def ExpandRelativePath(method_config, params, relative_path=None):
"""Determine the relative path for request."""
path = relative_path or method_config.relative_path or ''
for param in method_config.path_params:
param_template = '{%s}' % param
# For more details about "reserved word expansion", see:
# http://tools.ietf.org/html/rfc6570#section-3.2.2
reserved_chars = ''
reserved_template = '{+%s}' % param
if reserved_template in path:
reserved_chars = _RESERVED_URI_CHARS
path = path.replace(reserved_template, param_template) # depends on [control=['if'], data=['reserved_template', 'path']]
if param_template not in path:
raise exceptions.InvalidUserInputError('Missing path parameter %s' % param) # depends on [control=['if'], data=[]]
try:
# TODO(craigcitro): Do we want to support some sophisticated
# mapping here?
value = params[param] # depends on [control=['try'], data=[]]
except KeyError:
raise exceptions.InvalidUserInputError('Request missing required parameter %s' % param) # depends on [control=['except'], data=[]]
if value is None:
raise exceptions.InvalidUserInputError('Request missing required parameter %s' % param) # depends on [control=['if'], data=[]]
try:
if not isinstance(value, six.string_types):
value = str(value) # depends on [control=['if'], data=[]]
path = path.replace(param_template, urllib_parse.quote(value.encode('utf_8'), reserved_chars)) # depends on [control=['try'], data=[]]
except TypeError as e:
raise exceptions.InvalidUserInputError('Error setting required parameter %s to value %s: %s' % (param, value, e)) # depends on [control=['except'], data=['e']] # depends on [control=['for'], data=['param']]
return path |
def _metric_names_for_training_job(self):
"""Helper method to discover the metrics defined for a training job.
"""
training_description = self._sage_client.describe_training_job(TrainingJobName=self._training_job_name)
metric_definitions = training_description['AlgorithmSpecification']['MetricDefinitions']
metric_names = [md['Name'] for md in metric_definitions]
return metric_names | def function[_metric_names_for_training_job, parameter[self]]:
constant[Helper method to discover the metrics defined for a training job.
]
variable[training_description] assign[=] call[name[self]._sage_client.describe_training_job, parameter[]]
variable[metric_definitions] assign[=] call[call[name[training_description]][constant[AlgorithmSpecification]]][constant[MetricDefinitions]]
variable[metric_names] assign[=] <ast.ListComp object at 0x7da1b1c12e30>
return[name[metric_names]] | keyword[def] identifier[_metric_names_for_training_job] ( identifier[self] ):
literal[string]
identifier[training_description] = identifier[self] . identifier[_sage_client] . identifier[describe_training_job] ( identifier[TrainingJobName] = identifier[self] . identifier[_training_job_name] )
identifier[metric_definitions] = identifier[training_description] [ literal[string] ][ literal[string] ]
identifier[metric_names] =[ identifier[md] [ literal[string] ] keyword[for] identifier[md] keyword[in] identifier[metric_definitions] ]
keyword[return] identifier[metric_names] | def _metric_names_for_training_job(self):
"""Helper method to discover the metrics defined for a training job.
"""
training_description = self._sage_client.describe_training_job(TrainingJobName=self._training_job_name)
metric_definitions = training_description['AlgorithmSpecification']['MetricDefinitions']
metric_names = [md['Name'] for md in metric_definitions]
return metric_names |
def fix_reserved_word(word, is_module=False):
"""
Replaces words that may be problematic
In particular the term 'type' is used in the osid spec, primarily as an argument
parameter where a type is provided to a method. 'type' is a reserved word
in python, so we give ours a trailing underscore. If we come across any other
osid things that are reserved word they can be dealt with here.
Copied from the builder binder_helpers.py file
"""
if is_module:
if word == 'logging':
word = 'logging_' # Still deciding this
else:
if keyword.iskeyword(word):
word += '_'
elif word in ['id', 'type', 'str', 'max', 'input', 'license', 'copyright', 'credits', 'help']:
word += '_'
return word | def function[fix_reserved_word, parameter[word, is_module]]:
constant[
Replaces words that may be problematic
In particular the term 'type' is used in the osid spec, primarily as an argument
parameter where a type is provided to a method. 'type' is a reserved word
in python, so we give ours a trailing underscore. If we come across any other
osid things that are reserved word they can be dealt with here.
Copied from the builder binder_helpers.py file
]
if name[is_module] begin[:]
if compare[name[word] equal[==] constant[logging]] begin[:]
variable[word] assign[=] constant[logging_]
return[name[word]] | keyword[def] identifier[fix_reserved_word] ( identifier[word] , identifier[is_module] = keyword[False] ):
literal[string]
keyword[if] identifier[is_module] :
keyword[if] identifier[word] == literal[string] :
identifier[word] = literal[string]
keyword[else] :
keyword[if] identifier[keyword] . identifier[iskeyword] ( identifier[word] ):
identifier[word] += literal[string]
keyword[elif] identifier[word] keyword[in] [ literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ]:
identifier[word] += literal[string]
keyword[return] identifier[word] | def fix_reserved_word(word, is_module=False):
"""
Replaces words that may be problematic
In particular the term 'type' is used in the osid spec, primarily as an argument
parameter where a type is provided to a method. 'type' is a reserved word
in python, so we give ours a trailing underscore. If we come across any other
osid things that are reserved word they can be dealt with here.
Copied from the builder binder_helpers.py file
"""
if is_module:
if word == 'logging':
word = 'logging_' # Still deciding this # depends on [control=['if'], data=['word']] # depends on [control=['if'], data=[]]
elif keyword.iskeyword(word):
word += '_' # depends on [control=['if'], data=[]]
elif word in ['id', 'type', 'str', 'max', 'input', 'license', 'copyright', 'credits', 'help']:
word += '_' # depends on [control=['if'], data=['word']]
return word |
def asdict(self):
"""Encode the data in this event into a dictionary.
The dictionary returned from this method is a reference to the data
stored in the IOTileEvent, not a copy. It should be treated as read
only.
Returns:
dict: A dictionary containing the information from this event.
"""
return {
'stream': self.stream,
'device_timestamp': self.raw_time,
'streamer_local_id': self.reading_id,
'timestamp': self.reading_time,
'extra_data': self.summary_data,
'data': self.raw_data
} | def function[asdict, parameter[self]]:
constant[Encode the data in this event into a dictionary.
The dictionary returned from this method is a reference to the data
stored in the IOTileEvent, not a copy. It should be treated as read
only.
Returns:
dict: A dictionary containing the information from this event.
]
return[dictionary[[<ast.Constant object at 0x7da18f00e560>, <ast.Constant object at 0x7da18f00d420>, <ast.Constant object at 0x7da18f00f280>, <ast.Constant object at 0x7da18f00c4c0>, <ast.Constant object at 0x7da18f00c8e0>, <ast.Constant object at 0x7da18f00c7c0>], [<ast.Attribute object at 0x7da18f00e0e0>, <ast.Attribute object at 0x7da18f00cf10>, <ast.Attribute object at 0x7da18f00c700>, <ast.Attribute object at 0x7da18f00d3c0>, <ast.Attribute object at 0x7da18f00c9a0>, <ast.Attribute object at 0x7da18f00f700>]]] | keyword[def] identifier[asdict] ( identifier[self] ):
literal[string]
keyword[return] {
literal[string] : identifier[self] . identifier[stream] ,
literal[string] : identifier[self] . identifier[raw_time] ,
literal[string] : identifier[self] . identifier[reading_id] ,
literal[string] : identifier[self] . identifier[reading_time] ,
literal[string] : identifier[self] . identifier[summary_data] ,
literal[string] : identifier[self] . identifier[raw_data]
} | def asdict(self):
"""Encode the data in this event into a dictionary.
The dictionary returned from this method is a reference to the data
stored in the IOTileEvent, not a copy. It should be treated as read
only.
Returns:
dict: A dictionary containing the information from this event.
"""
return {'stream': self.stream, 'device_timestamp': self.raw_time, 'streamer_local_id': self.reading_id, 'timestamp': self.reading_time, 'extra_data': self.summary_data, 'data': self.raw_data} |
def top_comments(self):
"""Return a markdown representation of the top comments."""
num = min(10, len(self.comments))
if num <= 0:
return ''
top_comments = sorted(
self.comments, key=lambda x: (-x.score, str(x.author)))[:num]
retval = self.post_header.format('Top Comments')
for comment in top_comments:
title = self._safe_title(comment.submission)
retval += tt('1. {}: {}\'s [comment]({}) in {}\n').format(
self._points(comment.score), self._user(comment.author),
self._permalink(comment), title)
return tt('{}\n').format(retval) | def function[top_comments, parameter[self]]:
constant[Return a markdown representation of the top comments.]
variable[num] assign[=] call[name[min], parameter[constant[10], call[name[len], parameter[name[self].comments]]]]
if compare[name[num] less_or_equal[<=] constant[0]] begin[:]
return[constant[]]
variable[top_comments] assign[=] call[call[name[sorted], parameter[name[self].comments]]][<ast.Slice object at 0x7da204962110>]
variable[retval] assign[=] call[name[self].post_header.format, parameter[constant[Top Comments]]]
for taget[name[comment]] in starred[name[top_comments]] begin[:]
variable[title] assign[=] call[name[self]._safe_title, parameter[name[comment].submission]]
<ast.AugAssign object at 0x7da20c992aa0>
return[call[call[name[tt], parameter[constant[{}
]]].format, parameter[name[retval]]]] | keyword[def] identifier[top_comments] ( identifier[self] ):
literal[string]
identifier[num] = identifier[min] ( literal[int] , identifier[len] ( identifier[self] . identifier[comments] ))
keyword[if] identifier[num] <= literal[int] :
keyword[return] literal[string]
identifier[top_comments] = identifier[sorted] (
identifier[self] . identifier[comments] , identifier[key] = keyword[lambda] identifier[x] :(- identifier[x] . identifier[score] , identifier[str] ( identifier[x] . identifier[author] )))[: identifier[num] ]
identifier[retval] = identifier[self] . identifier[post_header] . identifier[format] ( literal[string] )
keyword[for] identifier[comment] keyword[in] identifier[top_comments] :
identifier[title] = identifier[self] . identifier[_safe_title] ( identifier[comment] . identifier[submission] )
identifier[retval] += identifier[tt] ( literal[string] ). identifier[format] (
identifier[self] . identifier[_points] ( identifier[comment] . identifier[score] ), identifier[self] . identifier[_user] ( identifier[comment] . identifier[author] ),
identifier[self] . identifier[_permalink] ( identifier[comment] ), identifier[title] )
keyword[return] identifier[tt] ( literal[string] ). identifier[format] ( identifier[retval] ) | def top_comments(self):
"""Return a markdown representation of the top comments."""
num = min(10, len(self.comments))
if num <= 0:
return '' # depends on [control=['if'], data=[]]
top_comments = sorted(self.comments, key=lambda x: (-x.score, str(x.author)))[:num]
retval = self.post_header.format('Top Comments')
for comment in top_comments:
title = self._safe_title(comment.submission)
retval += tt("1. {}: {}'s [comment]({}) in {}\n").format(self._points(comment.score), self._user(comment.author), self._permalink(comment), title) # depends on [control=['for'], data=['comment']]
return tt('{}\n').format(retval) |
def do_resolve(cls, executor, extra_args, ivyxml, jvm_options, workdir_report_paths_by_conf,
confs, ivy_resolution_cache_dir, ivy_cache_classpath_filename, resolve_hash_name,
workunit_factory, workunit_name):
"""Execute Ivy with the given ivy.xml and copies all relevant files into the workdir.
This method does an Ivy resolve, which may be either a Pants resolve or a Pants fetch depending
on whether there is an existing frozen resolution.
After it is run, the Ivy reports are copied into the workdir at the paths specified by
workdir_report_paths_by_conf along with a file containing a list of all the requested artifacts
and their transitive dependencies.
:param executor: A JVM executor to use to invoke ivy.
:param extra_args: Extra arguments to pass to ivy.
:param ivyxml: The input ivy.xml containing the dependencies to resolve.
:param jvm_options: A list of jvm option strings to use for the ivy invoke, or None.
:param workdir_report_paths_by_conf: A dict mapping confs to report paths in the workdir.
:param confs: The confs used in the resolve.
:param resolve_hash_name: The hash to use as the module name for finding the ivy report file.
:param workunit_factory: A workunit factory for the ivy invoke, or None.
:param workunit_name: A workunit name for the ivy invoke, or None.
"""
ivy = Bootstrapper.default_ivy(bootstrap_workunit_factory=workunit_factory)
with safe_concurrent_creation(ivy_cache_classpath_filename) as raw_target_classpath_file_tmp:
extra_args = extra_args or []
args = ['-cachepath', raw_target_classpath_file_tmp] + extra_args
with cls._ivy_lock:
cls._exec_ivy(ivy, confs, ivyxml, args,
jvm_options=jvm_options,
executor=executor,
workunit_name=workunit_name,
workunit_factory=workunit_factory)
if not os.path.exists(raw_target_classpath_file_tmp):
raise cls.IvyError('Ivy failed to create classpath file at {}'
.format(raw_target_classpath_file_tmp))
cls._copy_ivy_reports(workdir_report_paths_by_conf, confs, ivy_resolution_cache_dir, resolve_hash_name)
logger.debug('Moved ivy classfile file to {dest}'
.format(dest=ivy_cache_classpath_filename)) | def function[do_resolve, parameter[cls, executor, extra_args, ivyxml, jvm_options, workdir_report_paths_by_conf, confs, ivy_resolution_cache_dir, ivy_cache_classpath_filename, resolve_hash_name, workunit_factory, workunit_name]]:
constant[Execute Ivy with the given ivy.xml and copies all relevant files into the workdir.
This method does an Ivy resolve, which may be either a Pants resolve or a Pants fetch depending
on whether there is an existing frozen resolution.
After it is run, the Ivy reports are copied into the workdir at the paths specified by
workdir_report_paths_by_conf along with a file containing a list of all the requested artifacts
and their transitive dependencies.
:param executor: A JVM executor to use to invoke ivy.
:param extra_args: Extra arguments to pass to ivy.
:param ivyxml: The input ivy.xml containing the dependencies to resolve.
:param jvm_options: A list of jvm option strings to use for the ivy invoke, or None.
:param workdir_report_paths_by_conf: A dict mapping confs to report paths in the workdir.
:param confs: The confs used in the resolve.
:param resolve_hash_name: The hash to use as the module name for finding the ivy report file.
:param workunit_factory: A workunit factory for the ivy invoke, or None.
:param workunit_name: A workunit name for the ivy invoke, or None.
]
variable[ivy] assign[=] call[name[Bootstrapper].default_ivy, parameter[]]
with call[name[safe_concurrent_creation], parameter[name[ivy_cache_classpath_filename]]] begin[:]
variable[extra_args] assign[=] <ast.BoolOp object at 0x7da1b1ddca90>
variable[args] assign[=] binary_operation[list[[<ast.Constant object at 0x7da1b1ddca60>, <ast.Name object at 0x7da1b1dddb40>]] + name[extra_args]]
with name[cls]._ivy_lock begin[:]
call[name[cls]._exec_ivy, parameter[name[ivy], name[confs], name[ivyxml], name[args]]]
if <ast.UnaryOp object at 0x7da1b1ddd180> begin[:]
<ast.Raise object at 0x7da1b1ddfb20>
call[name[cls]._copy_ivy_reports, parameter[name[workdir_report_paths_by_conf], name[confs], name[ivy_resolution_cache_dir], name[resolve_hash_name]]]
call[name[logger].debug, parameter[call[constant[Moved ivy classfile file to {dest}].format, parameter[]]]] | keyword[def] identifier[do_resolve] ( identifier[cls] , identifier[executor] , identifier[extra_args] , identifier[ivyxml] , identifier[jvm_options] , identifier[workdir_report_paths_by_conf] ,
identifier[confs] , identifier[ivy_resolution_cache_dir] , identifier[ivy_cache_classpath_filename] , identifier[resolve_hash_name] ,
identifier[workunit_factory] , identifier[workunit_name] ):
literal[string]
identifier[ivy] = identifier[Bootstrapper] . identifier[default_ivy] ( identifier[bootstrap_workunit_factory] = identifier[workunit_factory] )
keyword[with] identifier[safe_concurrent_creation] ( identifier[ivy_cache_classpath_filename] ) keyword[as] identifier[raw_target_classpath_file_tmp] :
identifier[extra_args] = identifier[extra_args] keyword[or] []
identifier[args] =[ literal[string] , identifier[raw_target_classpath_file_tmp] ]+ identifier[extra_args]
keyword[with] identifier[cls] . identifier[_ivy_lock] :
identifier[cls] . identifier[_exec_ivy] ( identifier[ivy] , identifier[confs] , identifier[ivyxml] , identifier[args] ,
identifier[jvm_options] = identifier[jvm_options] ,
identifier[executor] = identifier[executor] ,
identifier[workunit_name] = identifier[workunit_name] ,
identifier[workunit_factory] = identifier[workunit_factory] )
keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[exists] ( identifier[raw_target_classpath_file_tmp] ):
keyword[raise] identifier[cls] . identifier[IvyError] ( literal[string]
. identifier[format] ( identifier[raw_target_classpath_file_tmp] ))
identifier[cls] . identifier[_copy_ivy_reports] ( identifier[workdir_report_paths_by_conf] , identifier[confs] , identifier[ivy_resolution_cache_dir] , identifier[resolve_hash_name] )
identifier[logger] . identifier[debug] ( literal[string]
. identifier[format] ( identifier[dest] = identifier[ivy_cache_classpath_filename] )) | def do_resolve(cls, executor, extra_args, ivyxml, jvm_options, workdir_report_paths_by_conf, confs, ivy_resolution_cache_dir, ivy_cache_classpath_filename, resolve_hash_name, workunit_factory, workunit_name):
"""Execute Ivy with the given ivy.xml and copies all relevant files into the workdir.
This method does an Ivy resolve, which may be either a Pants resolve or a Pants fetch depending
on whether there is an existing frozen resolution.
After it is run, the Ivy reports are copied into the workdir at the paths specified by
workdir_report_paths_by_conf along with a file containing a list of all the requested artifacts
and their transitive dependencies.
:param executor: A JVM executor to use to invoke ivy.
:param extra_args: Extra arguments to pass to ivy.
:param ivyxml: The input ivy.xml containing the dependencies to resolve.
:param jvm_options: A list of jvm option strings to use for the ivy invoke, or None.
:param workdir_report_paths_by_conf: A dict mapping confs to report paths in the workdir.
:param confs: The confs used in the resolve.
:param resolve_hash_name: The hash to use as the module name for finding the ivy report file.
:param workunit_factory: A workunit factory for the ivy invoke, or None.
:param workunit_name: A workunit name for the ivy invoke, or None.
"""
ivy = Bootstrapper.default_ivy(bootstrap_workunit_factory=workunit_factory)
with safe_concurrent_creation(ivy_cache_classpath_filename) as raw_target_classpath_file_tmp:
extra_args = extra_args or []
args = ['-cachepath', raw_target_classpath_file_tmp] + extra_args
with cls._ivy_lock:
cls._exec_ivy(ivy, confs, ivyxml, args, jvm_options=jvm_options, executor=executor, workunit_name=workunit_name, workunit_factory=workunit_factory) # depends on [control=['with'], data=[]]
if not os.path.exists(raw_target_classpath_file_tmp):
raise cls.IvyError('Ivy failed to create classpath file at {}'.format(raw_target_classpath_file_tmp)) # depends on [control=['if'], data=[]]
cls._copy_ivy_reports(workdir_report_paths_by_conf, confs, ivy_resolution_cache_dir, resolve_hash_name) # depends on [control=['with'], data=['raw_target_classpath_file_tmp']]
logger.debug('Moved ivy classfile file to {dest}'.format(dest=ivy_cache_classpath_filename)) |
def load_motif_db(filename, skipn_matrix=0):
"""Read the motif file in the following format
```
>motif_name
<skip n>0.1<delim>0.2<delim>0.5<delim>0.6
...
>motif_name2
....
```
Delim can be anything supported by np.loadtxt
# Arguments
filename: str, file path
skipn_matrix: integer, number of characters to skip when reading
the numeric matrix (for Encode = 2)
# Returns
Dictionary of numpy arrays
"""
# read-lines
if filename.endswith(".gz"):
f = gzip.open(filename, 'rt', encoding='utf-8')
else:
f = open(filename, 'r')
lines = f.readlines()
f.close()
motifs_dict = {}
motif_lines = ""
motif_name = None
def lines2matrix(lines):
return np.loadtxt(StringIO(lines))
for line in lines:
if line.startswith(">"):
if motif_lines:
# lines -> matrix
motifs_dict[motif_name] = lines2matrix(motif_lines)
motif_name = line[1:].strip()
motif_lines = ""
else:
motif_lines += line[skipn_matrix:]
if motif_lines and motif_name is not None:
motifs_dict[motif_name] = lines2matrix(motif_lines)
return motifs_dict | def function[load_motif_db, parameter[filename, skipn_matrix]]:
constant[Read the motif file in the following format
```
>motif_name
<skip n>0.1<delim>0.2<delim>0.5<delim>0.6
...
>motif_name2
....
```
Delim can be anything supported by np.loadtxt
# Arguments
filename: str, file path
skipn_matrix: integer, number of characters to skip when reading
the numeric matrix (for Encode = 2)
# Returns
Dictionary of numpy arrays
]
if call[name[filename].endswith, parameter[constant[.gz]]] begin[:]
variable[f] assign[=] call[name[gzip].open, parameter[name[filename], constant[rt]]]
variable[lines] assign[=] call[name[f].readlines, parameter[]]
call[name[f].close, parameter[]]
variable[motifs_dict] assign[=] dictionary[[], []]
variable[motif_lines] assign[=] constant[]
variable[motif_name] assign[=] constant[None]
def function[lines2matrix, parameter[lines]]:
return[call[name[np].loadtxt, parameter[call[name[StringIO], parameter[name[lines]]]]]]
for taget[name[line]] in starred[name[lines]] begin[:]
if call[name[line].startswith, parameter[constant[>]]] begin[:]
if name[motif_lines] begin[:]
call[name[motifs_dict]][name[motif_name]] assign[=] call[name[lines2matrix], parameter[name[motif_lines]]]
variable[motif_name] assign[=] call[call[name[line]][<ast.Slice object at 0x7da2046210f0>].strip, parameter[]]
variable[motif_lines] assign[=] constant[]
if <ast.BoolOp object at 0x7da204620c40> begin[:]
call[name[motifs_dict]][name[motif_name]] assign[=] call[name[lines2matrix], parameter[name[motif_lines]]]
return[name[motifs_dict]] | keyword[def] identifier[load_motif_db] ( identifier[filename] , identifier[skipn_matrix] = literal[int] ):
literal[string]
keyword[if] identifier[filename] . identifier[endswith] ( literal[string] ):
identifier[f] = identifier[gzip] . identifier[open] ( identifier[filename] , literal[string] , identifier[encoding] = literal[string] )
keyword[else] :
identifier[f] = identifier[open] ( identifier[filename] , literal[string] )
identifier[lines] = identifier[f] . identifier[readlines] ()
identifier[f] . identifier[close] ()
identifier[motifs_dict] ={}
identifier[motif_lines] = literal[string]
identifier[motif_name] = keyword[None]
keyword[def] identifier[lines2matrix] ( identifier[lines] ):
keyword[return] identifier[np] . identifier[loadtxt] ( identifier[StringIO] ( identifier[lines] ))
keyword[for] identifier[line] keyword[in] identifier[lines] :
keyword[if] identifier[line] . identifier[startswith] ( literal[string] ):
keyword[if] identifier[motif_lines] :
identifier[motifs_dict] [ identifier[motif_name] ]= identifier[lines2matrix] ( identifier[motif_lines] )
identifier[motif_name] = identifier[line] [ literal[int] :]. identifier[strip] ()
identifier[motif_lines] = literal[string]
keyword[else] :
identifier[motif_lines] += identifier[line] [ identifier[skipn_matrix] :]
keyword[if] identifier[motif_lines] keyword[and] identifier[motif_name] keyword[is] keyword[not] keyword[None] :
identifier[motifs_dict] [ identifier[motif_name] ]= identifier[lines2matrix] ( identifier[motif_lines] )
keyword[return] identifier[motifs_dict] | def load_motif_db(filename, skipn_matrix=0):
"""Read the motif file in the following format
```
>motif_name
<skip n>0.1<delim>0.2<delim>0.5<delim>0.6
...
>motif_name2
....
```
Delim can be anything supported by np.loadtxt
# Arguments
filename: str, file path
skipn_matrix: integer, number of characters to skip when reading
the numeric matrix (for Encode = 2)
# Returns
Dictionary of numpy arrays
"""
# read-lines
if filename.endswith('.gz'):
f = gzip.open(filename, 'rt', encoding='utf-8') # depends on [control=['if'], data=[]]
else:
f = open(filename, 'r')
lines = f.readlines()
f.close()
motifs_dict = {}
motif_lines = ''
motif_name = None
def lines2matrix(lines):
return np.loadtxt(StringIO(lines))
for line in lines:
if line.startswith('>'):
if motif_lines:
# lines -> matrix
motifs_dict[motif_name] = lines2matrix(motif_lines) # depends on [control=['if'], data=[]]
motif_name = line[1:].strip()
motif_lines = '' # depends on [control=['if'], data=[]]
else:
motif_lines += line[skipn_matrix:] # depends on [control=['for'], data=['line']]
if motif_lines and motif_name is not None:
motifs_dict[motif_name] = lines2matrix(motif_lines) # depends on [control=['if'], data=[]]
return motifs_dict |
def millipede(size, comment=None, reverse=False, template='default', position=0, opposite=False):
"""
Output the millipede
"""
padding_offsets = [2, 1, 0, 1, 2, 3, 4, 4, 3]
padding_suite_length = len(padding_offsets)
head_padding_extra_offset = 2
if opposite:
padding_offsets.reverse()
position = position or 0
templates = {
'frozen': {'bodyr': '╔═(❄❄❄)═╗', 'body': '╚═(❄❄❄)═╝',
'headr': '╔⊙ ⊙╗', 'head': '╚⊙ ⊙╝'},
'love': {'bodyr': '╔═(♥♥♥)═╗', 'body': '╚═(♥♥♥)═╝',
'headr': '╔⊙ ⊙╗', 'head': '╚⊙ ⊙╝'},
'corporate': {'bodyr': '╔═(©©©)═╗', 'body': '╚═(©©©)═╝',
'headr': '╔⊙ ⊙╗', 'head': '╚⊙ ⊙╝'},
'musician': {'bodyr': '╔═(♫♩♬)═╗', 'body': '╚═(♫♩♬)═╝',
'headr': '╔⊙ ⊙╗', 'head': '╚⊙ ⊙╝'},
'bocal': {'bodyr': '╔═(🐟🐟🐟)═╗', 'body': '╚═(🐟🐟🐟)═╝',
'headr': '╔⊙ ⊙╗', 'head': '╚⊙ ⊙╝'},
'ascii': {'bodyr': '|=(###)=|', 'body': '|=(###)=|',
'headr': '/⊙ ⊙\\', 'head': '\\⊙ ⊙/'},
'default': {'bodyr': '╔═(███)═╗', 'body': '╚═(███)═╝',
'headr': '╔⊙ ⊙╗', 'head': '╚⊙ ⊙╝'},
'inception': {'bodyr': '╔═(🐛🐛🐛)═╗', 'body': '╚═(🐛🐛🐛)═╝',
'headr': '╔⊙ ⊙╗', 'head': '╚⊙ ⊙╝'},
'humancentipede': {'bodyr': '╔═(😷😷😷)═╗', 'body': '╚═(😷😷😷)═╝',
'headr': '╔⊙ ⊙╗', 'head': '╚⊙ ⊙╝'},
'heart': {'bodyr': '╔═(❤️❤️❤️)═╗', 'body': '╚═(❤️❤️❤️)═╝',
'headr': '╔⊙ ⊙╗', 'head': '╚⊙ ⊙╝'},
}
template = templates.get(template, templates['default'])
head = "{}{}\n".format(
" " * (padding_offsets[position % padding_suite_length] + head_padding_extra_offset),
template['headr'] if reverse else template['head']
)
body_lines = [
"{}{}\n".format(
" " * padding_offsets[(x + position) % padding_suite_length],
template['bodyr'] if reverse else template['body']
)
for x in range(size)
]
if reverse:
body_lines.reverse()
body = "".join(body_lines)
output = ""
if reverse:
output += body + head
if comment:
output += "\n" + comment + "\n"
else:
if comment:
output += comment + "\n\n"
output += head + body
return output | def function[millipede, parameter[size, comment, reverse, template, position, opposite]]:
constant[
Output the millipede
]
variable[padding_offsets] assign[=] list[[<ast.Constant object at 0x7da1b11c0070>, <ast.Constant object at 0x7da1b11c04f0>, <ast.Constant object at 0x7da1b11c1210>, <ast.Constant object at 0x7da1b11c0790>, <ast.Constant object at 0x7da1b11c2b30>, <ast.Constant object at 0x7da1b11c0ac0>, <ast.Constant object at 0x7da1b11c23e0>, <ast.Constant object at 0x7da1b11c1870>, <ast.Constant object at 0x7da1b11c3160>]]
variable[padding_suite_length] assign[=] call[name[len], parameter[name[padding_offsets]]]
variable[head_padding_extra_offset] assign[=] constant[2]
if name[opposite] begin[:]
call[name[padding_offsets].reverse, parameter[]]
variable[position] assign[=] <ast.BoolOp object at 0x7da1b11c0f10>
variable[templates] assign[=] dictionary[[<ast.Constant object at 0x7da1b11c0310>, <ast.Constant object at 0x7da1b11c0760>, <ast.Constant object at 0x7da1b11c36a0>, <ast.Constant object at 0x7da1b11c0850>, <ast.Constant object at 0x7da1b11c2920>, <ast.Constant object at 0x7da1b11c2b00>, <ast.Constant object at 0x7da1b11c1990>, <ast.Constant object at 0x7da1b11c1360>, <ast.Constant object at 0x7da1b11c1840>, <ast.Constant object at 0x7da1b11c1d50>], [<ast.Dict object at 0x7da1b11c2230>, <ast.Dict object at 0x7da1b11c3400>, <ast.Dict object at 0x7da1b11c2200>, <ast.Dict object at 0x7da1b11c1e10>, <ast.Dict object at 0x7da1b11c1fc0>, <ast.Dict object at 0x7da1b11c14e0>, <ast.Dict object at 0x7da1b11c3820>, <ast.Dict object at 0x7da1b11c1e40>, <ast.Dict object at 0x7da1b11c0100>, <ast.Dict object at 0x7da1b11c3040>]]
variable[template] assign[=] call[name[templates].get, parameter[name[template], call[name[templates]][constant[default]]]]
variable[head] assign[=] call[constant[{}{}
].format, parameter[binary_operation[constant[ ] * binary_operation[call[name[padding_offsets]][binary_operation[name[position] <ast.Mod object at 0x7da2590d6920> name[padding_suite_length]]] + name[head_padding_extra_offset]]], <ast.IfExp object at 0x7da1b1026110>]]
variable[body_lines] assign[=] <ast.ListComp object at 0x7da1b1024a60>
if name[reverse] begin[:]
call[name[body_lines].reverse, parameter[]]
variable[body] assign[=] call[constant[].join, parameter[name[body_lines]]]
variable[output] assign[=] constant[]
if name[reverse] begin[:]
<ast.AugAssign object at 0x7da1b11a8b80>
if name[comment] begin[:]
<ast.AugAssign object at 0x7da1b1025720>
return[name[output]] | keyword[def] identifier[millipede] ( identifier[size] , identifier[comment] = keyword[None] , identifier[reverse] = keyword[False] , identifier[template] = literal[string] , identifier[position] = literal[int] , identifier[opposite] = keyword[False] ):
literal[string]
identifier[padding_offsets] =[ literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] ]
identifier[padding_suite_length] = identifier[len] ( identifier[padding_offsets] )
identifier[head_padding_extra_offset] = literal[int]
keyword[if] identifier[opposite] :
identifier[padding_offsets] . identifier[reverse] ()
identifier[position] = identifier[position] keyword[or] literal[int]
identifier[templates] ={
literal[string] :{ literal[string] : literal[string] , literal[string] : literal[string] ,
literal[string] : literal[string] , literal[string] : literal[string] },
literal[string] :{ literal[string] : literal[string] , literal[string] : literal[string] ,
literal[string] : literal[string] , literal[string] : literal[string] },
literal[string] :{ literal[string] : literal[string] , literal[string] : literal[string] ,
literal[string] : literal[string] , literal[string] : literal[string] },
literal[string] :{ literal[string] : literal[string] , literal[string] : literal[string] ,
literal[string] : literal[string] , literal[string] : literal[string] },
literal[string] :{ literal[string] : literal[string] , literal[string] : literal[string] ,
literal[string] : literal[string] , literal[string] : literal[string] },
literal[string] :{ literal[string] : literal[string] , literal[string] : literal[string] ,
literal[string] : literal[string] , literal[string] : literal[string] },
literal[string] :{ literal[string] : literal[string] , literal[string] : literal[string] ,
literal[string] : literal[string] , literal[string] : literal[string] },
literal[string] :{ literal[string] : literal[string] , literal[string] : literal[string] ,
literal[string] : literal[string] , literal[string] : literal[string] },
literal[string] :{ literal[string] : literal[string] , literal[string] : literal[string] ,
literal[string] : literal[string] , literal[string] : literal[string] },
literal[string] :{ literal[string] : literal[string] , literal[string] : literal[string] ,
literal[string] : literal[string] , literal[string] : literal[string] },
}
identifier[template] = identifier[templates] . identifier[get] ( identifier[template] , identifier[templates] [ literal[string] ])
identifier[head] = literal[string] . identifier[format] (
literal[string] *( identifier[padding_offsets] [ identifier[position] % identifier[padding_suite_length] ]+ identifier[head_padding_extra_offset] ),
identifier[template] [ literal[string] ] keyword[if] identifier[reverse] keyword[else] identifier[template] [ literal[string] ]
)
identifier[body_lines] =[
literal[string] . identifier[format] (
literal[string] * identifier[padding_offsets] [( identifier[x] + identifier[position] )% identifier[padding_suite_length] ],
identifier[template] [ literal[string] ] keyword[if] identifier[reverse] keyword[else] identifier[template] [ literal[string] ]
)
keyword[for] identifier[x] keyword[in] identifier[range] ( identifier[size] )
]
keyword[if] identifier[reverse] :
identifier[body_lines] . identifier[reverse] ()
identifier[body] = literal[string] . identifier[join] ( identifier[body_lines] )
identifier[output] = literal[string]
keyword[if] identifier[reverse] :
identifier[output] += identifier[body] + identifier[head]
keyword[if] identifier[comment] :
identifier[output] += literal[string] + identifier[comment] + literal[string]
keyword[else] :
keyword[if] identifier[comment] :
identifier[output] += identifier[comment] + literal[string]
identifier[output] += identifier[head] + identifier[body]
keyword[return] identifier[output] | def millipede(size, comment=None, reverse=False, template='default', position=0, opposite=False):
"""
Output the millipede
"""
padding_offsets = [2, 1, 0, 1, 2, 3, 4, 4, 3]
padding_suite_length = len(padding_offsets)
head_padding_extra_offset = 2
if opposite:
padding_offsets.reverse() # depends on [control=['if'], data=[]]
position = position or 0
templates = {'frozen': {'bodyr': '╔═(❄❄❄)═╗', 'body': '╚═(❄❄❄)═╝', 'headr': '╔⊙ ⊙╗', 'head': '╚⊙ ⊙╝'}, 'love': {'bodyr': '╔═(♥♥♥)═╗', 'body': '╚═(♥♥♥)═╝', 'headr': '╔⊙ ⊙╗', 'head': '╚⊙ ⊙╝'}, 'corporate': {'bodyr': '╔═(©©©)═╗', 'body': '╚═(©©©)═╝', 'headr': '╔⊙ ⊙╗', 'head': '╚⊙ ⊙╝'}, 'musician': {'bodyr': '╔═(♫♩♬)═╗', 'body': '╚═(♫♩♬)═╝', 'headr': '╔⊙ ⊙╗', 'head': '╚⊙ ⊙╝'}, 'bocal': {'bodyr': '╔═(🐟🐟🐟)═╗', 'body': '╚═(🐟🐟🐟)═╝', 'headr': '╔⊙ ⊙╗', 'head': '╚⊙ ⊙╝'}, 'ascii': {'bodyr': '|=(###)=|', 'body': '|=(###)=|', 'headr': '/⊙ ⊙\\', 'head': '\\⊙ ⊙/'}, 'default': {'bodyr': '╔═(███)═╗', 'body': '╚═(███)═╝', 'headr': '╔⊙ ⊙╗', 'head': '╚⊙ ⊙╝'}, 'inception': {'bodyr': '╔═(🐛🐛🐛)═╗', 'body': '╚═(🐛🐛🐛)═╝', 'headr': '╔⊙ ⊙╗', 'head': '╚⊙ ⊙╝'}, 'humancentipede': {'bodyr': '╔═(😷😷😷)═╗', 'body': '╚═(😷😷😷)═╝', 'headr': '╔⊙ ⊙╗', 'head': '╚⊙ ⊙╝'}, 'heart': {'bodyr': '╔═(❤️❤️❤️)═╗', 'body': '╚═(❤️❤️❤️)═╝', 'headr': '╔⊙ ⊙╗', 'head': '╚⊙ ⊙╝'}}
template = templates.get(template, templates['default'])
head = '{}{}\n'.format(' ' * (padding_offsets[position % padding_suite_length] + head_padding_extra_offset), template['headr'] if reverse else template['head'])
body_lines = ['{}{}\n'.format(' ' * padding_offsets[(x + position) % padding_suite_length], template['bodyr'] if reverse else template['body']) for x in range(size)]
if reverse:
body_lines.reverse() # depends on [control=['if'], data=[]]
body = ''.join(body_lines)
output = ''
if reverse:
output += body + head
if comment:
output += '\n' + comment + '\n' # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
else:
if comment:
output += comment + '\n\n' # depends on [control=['if'], data=[]]
output += head + body
return output |
def parse_patients(job, patient_dict, skip_fusions=False):
"""
Parse a dict of patient entries to retain only the useful entries (The user may provide more
than we need and we don't want to download redundant things)
:param dict patient_dict: The dict of patient entries parsed from the input config
:param bool skip_fusions: A flag to identify if we're skipping fusions
:return: A parsed dict of items
:rtype: dict
"""
output_dict = {'ssec_encrypted': patient_dict.get('ssec_encrypted') in (True, 'True', 'true'),
'patient_id': patient_dict['patient_id'],
'tumor_type': patient_dict['tumor_type'],
'filter_for_OxoG': patient_dict.get('filter_for_OxoG') in (True, 'True', 'true')}
out_keys = set()
if 'hla_haplotype_files' not in patient_dict:
# If we don't have the haplotype, we necessarily need all the fastqs
for stype in 'tumor_dna', 'normal_dna', 'tumor_rna':
out_keys.update([x for x in patient_dict if x.startswith(stype + '_fastq')])
else:
out_keys.add('hla_haplotype_files')
if 'mutation_vcf' in patient_dict:
out_keys.add('mutation_vcf')
# We either need a genome mapped RNA bam or fastqs for this to work
if 'tumor_rna_bam' in patient_dict:
out_keys.add('tumor_rna_bam')
if 'tumor_rna_bai' in patient_dict:
out_keys.add('tumor_rna_bai')
else:
out_keys.update([x for x in patient_dict if x.startswith('tumor_rna_fastq')])
else:
if 'fusion_bedpe' not in patient_dict:
# We are not looking at just fusions so we either need 3 bams/fastqs to run ProTECT
for stype in 'tumor_dna', 'normal_dna', 'tumor_rna':
if stype + '_bam' in patient_dict:
out_keys.add(stype + '_bam')
else:
out_keys.update([x for x in patient_dict if x.startswith(stype + '_fastq')])
if 'expression_files' in patient_dict:
out_keys.add('expression_files')
else:
# We need the transcriptome mapped RNA bam or fastqs
if 'tumor_rna_transcriptome_bam' in patient_dict:
out_keys.add('tumor_rna_transcriptome_bam')
else:
out_keys.update([x for x in patient_dict if x.startswith('tumor_rna_fastq')])
if 'fusion_bedpe' in patient_dict:
out_keys.add('fusion_bedpe')
for key in out_keys:
output_dict[key] = patient_dict[key]
fastq1s = [x for x in output_dict if x.endswith('fastq_1')]
for f in fastq1s:
f = f[:-8]
if f + '_fastq_2' not in output_dict:
output_dict[f + '_fastq_2'] = get_fastq_2(job, patient_dict['patient_id'], f,
output_dict[f + '_fastq_1'])
output_dict['gdc_inputs'] = [k for k, v in output_dict.items() if str(v).startswith('gdc')]
if not any('dna' in k for k in output_dict.keys()):
# There are no input DNA files so we cannot filter for oxog
output_dict['filter_for_OxoG'] = False
return output_dict | def function[parse_patients, parameter[job, patient_dict, skip_fusions]]:
constant[
Parse a dict of patient entries to retain only the useful entries (The user may provide more
than we need and we don't want to download redundant things)
:param dict patient_dict: The dict of patient entries parsed from the input config
:param bool skip_fusions: A flag to identify if we're skipping fusions
:return: A parsed dict of items
:rtype: dict
]
variable[output_dict] assign[=] dictionary[[<ast.Constant object at 0x7da20e9b3c70>, <ast.Constant object at 0x7da20e9b3be0>, <ast.Constant object at 0x7da20e9b30d0>, <ast.Constant object at 0x7da18fe90be0>], [<ast.Compare object at 0x7da18fe93a60>, <ast.Subscript object at 0x7da18fe92260>, <ast.Subscript object at 0x7da18fe92ce0>, <ast.Compare object at 0x7da18fe92e00>]]
variable[out_keys] assign[=] call[name[set], parameter[]]
if compare[constant[hla_haplotype_files] <ast.NotIn object at 0x7da2590d7190> name[patient_dict]] begin[:]
for taget[name[stype]] in starred[tuple[[<ast.Constant object at 0x7da18fe939d0>, <ast.Constant object at 0x7da18fe92350>, <ast.Constant object at 0x7da18fe90fa0>]]] begin[:]
call[name[out_keys].update, parameter[<ast.ListComp object at 0x7da18fe92920>]]
if compare[constant[mutation_vcf] in name[patient_dict]] begin[:]
call[name[out_keys].add, parameter[constant[mutation_vcf]]]
if compare[constant[tumor_rna_bam] in name[patient_dict]] begin[:]
call[name[out_keys].add, parameter[constant[tumor_rna_bam]]]
if compare[constant[tumor_rna_bai] in name[patient_dict]] begin[:]
call[name[out_keys].add, parameter[constant[tumor_rna_bai]]]
if compare[constant[expression_files] in name[patient_dict]] begin[:]
call[name[out_keys].add, parameter[constant[expression_files]]]
if compare[constant[fusion_bedpe] in name[patient_dict]] begin[:]
call[name[out_keys].add, parameter[constant[fusion_bedpe]]]
for taget[name[key]] in starred[name[out_keys]] begin[:]
call[name[output_dict]][name[key]] assign[=] call[name[patient_dict]][name[key]]
variable[fastq1s] assign[=] <ast.ListComp object at 0x7da20e9b0cd0>
for taget[name[f]] in starred[name[fastq1s]] begin[:]
variable[f] assign[=] call[name[f]][<ast.Slice object at 0x7da18bc71510>]
if compare[binary_operation[name[f] + constant[_fastq_2]] <ast.NotIn object at 0x7da2590d7190> name[output_dict]] begin[:]
call[name[output_dict]][binary_operation[name[f] + constant[_fastq_2]]] assign[=] call[name[get_fastq_2], parameter[name[job], call[name[patient_dict]][constant[patient_id]], name[f], call[name[output_dict]][binary_operation[name[f] + constant[_fastq_1]]]]]
call[name[output_dict]][constant[gdc_inputs]] assign[=] <ast.ListComp object at 0x7da18bc72500>
if <ast.UnaryOp object at 0x7da18bc71cf0> begin[:]
call[name[output_dict]][constant[filter_for_OxoG]] assign[=] constant[False]
return[name[output_dict]] | keyword[def] identifier[parse_patients] ( identifier[job] , identifier[patient_dict] , identifier[skip_fusions] = keyword[False] ):
literal[string]
identifier[output_dict] ={ literal[string] : identifier[patient_dict] . identifier[get] ( literal[string] ) keyword[in] ( keyword[True] , literal[string] , literal[string] ),
literal[string] : identifier[patient_dict] [ literal[string] ],
literal[string] : identifier[patient_dict] [ literal[string] ],
literal[string] : identifier[patient_dict] . identifier[get] ( literal[string] ) keyword[in] ( keyword[True] , literal[string] , literal[string] )}
identifier[out_keys] = identifier[set] ()
keyword[if] literal[string] keyword[not] keyword[in] identifier[patient_dict] :
keyword[for] identifier[stype] keyword[in] literal[string] , literal[string] , literal[string] :
identifier[out_keys] . identifier[update] ([ identifier[x] keyword[for] identifier[x] keyword[in] identifier[patient_dict] keyword[if] identifier[x] . identifier[startswith] ( identifier[stype] + literal[string] )])
keyword[else] :
identifier[out_keys] . identifier[add] ( literal[string] )
keyword[if] literal[string] keyword[in] identifier[patient_dict] :
identifier[out_keys] . identifier[add] ( literal[string] )
keyword[if] literal[string] keyword[in] identifier[patient_dict] :
identifier[out_keys] . identifier[add] ( literal[string] )
keyword[if] literal[string] keyword[in] identifier[patient_dict] :
identifier[out_keys] . identifier[add] ( literal[string] )
keyword[else] :
identifier[out_keys] . identifier[update] ([ identifier[x] keyword[for] identifier[x] keyword[in] identifier[patient_dict] keyword[if] identifier[x] . identifier[startswith] ( literal[string] )])
keyword[else] :
keyword[if] literal[string] keyword[not] keyword[in] identifier[patient_dict] :
keyword[for] identifier[stype] keyword[in] literal[string] , literal[string] , literal[string] :
keyword[if] identifier[stype] + literal[string] keyword[in] identifier[patient_dict] :
identifier[out_keys] . identifier[add] ( identifier[stype] + literal[string] )
keyword[else] :
identifier[out_keys] . identifier[update] ([ identifier[x] keyword[for] identifier[x] keyword[in] identifier[patient_dict] keyword[if] identifier[x] . identifier[startswith] ( identifier[stype] + literal[string] )])
keyword[if] literal[string] keyword[in] identifier[patient_dict] :
identifier[out_keys] . identifier[add] ( literal[string] )
keyword[else] :
keyword[if] literal[string] keyword[in] identifier[patient_dict] :
identifier[out_keys] . identifier[add] ( literal[string] )
keyword[else] :
identifier[out_keys] . identifier[update] ([ identifier[x] keyword[for] identifier[x] keyword[in] identifier[patient_dict] keyword[if] identifier[x] . identifier[startswith] ( literal[string] )])
keyword[if] literal[string] keyword[in] identifier[patient_dict] :
identifier[out_keys] . identifier[add] ( literal[string] )
keyword[for] identifier[key] keyword[in] identifier[out_keys] :
identifier[output_dict] [ identifier[key] ]= identifier[patient_dict] [ identifier[key] ]
identifier[fastq1s] =[ identifier[x] keyword[for] identifier[x] keyword[in] identifier[output_dict] keyword[if] identifier[x] . identifier[endswith] ( literal[string] )]
keyword[for] identifier[f] keyword[in] identifier[fastq1s] :
identifier[f] = identifier[f] [:- literal[int] ]
keyword[if] identifier[f] + literal[string] keyword[not] keyword[in] identifier[output_dict] :
identifier[output_dict] [ identifier[f] + literal[string] ]= identifier[get_fastq_2] ( identifier[job] , identifier[patient_dict] [ literal[string] ], identifier[f] ,
identifier[output_dict] [ identifier[f] + literal[string] ])
identifier[output_dict] [ literal[string] ]=[ identifier[k] keyword[for] identifier[k] , identifier[v] keyword[in] identifier[output_dict] . identifier[items] () keyword[if] identifier[str] ( identifier[v] ). identifier[startswith] ( literal[string] )]
keyword[if] keyword[not] identifier[any] ( literal[string] keyword[in] identifier[k] keyword[for] identifier[k] keyword[in] identifier[output_dict] . identifier[keys] ()):
identifier[output_dict] [ literal[string] ]= keyword[False]
keyword[return] identifier[output_dict] | def parse_patients(job, patient_dict, skip_fusions=False):
"""
Parse a dict of patient entries to retain only the useful entries (The user may provide more
than we need and we don't want to download redundant things)
:param dict patient_dict: The dict of patient entries parsed from the input config
:param bool skip_fusions: A flag to identify if we're skipping fusions
:return: A parsed dict of items
:rtype: dict
"""
output_dict = {'ssec_encrypted': patient_dict.get('ssec_encrypted') in (True, 'True', 'true'), 'patient_id': patient_dict['patient_id'], 'tumor_type': patient_dict['tumor_type'], 'filter_for_OxoG': patient_dict.get('filter_for_OxoG') in (True, 'True', 'true')}
out_keys = set()
if 'hla_haplotype_files' not in patient_dict:
# If we don't have the haplotype, we necessarily need all the fastqs
for stype in ('tumor_dna', 'normal_dna', 'tumor_rna'):
out_keys.update([x for x in patient_dict if x.startswith(stype + '_fastq')]) # depends on [control=['for'], data=['stype']] # depends on [control=['if'], data=['patient_dict']]
else:
out_keys.add('hla_haplotype_files')
if 'mutation_vcf' in patient_dict:
out_keys.add('mutation_vcf')
# We either need a genome mapped RNA bam or fastqs for this to work
if 'tumor_rna_bam' in patient_dict:
out_keys.add('tumor_rna_bam')
if 'tumor_rna_bai' in patient_dict:
out_keys.add('tumor_rna_bai') # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['patient_dict']]
else:
out_keys.update([x for x in patient_dict if x.startswith('tumor_rna_fastq')]) # depends on [control=['if'], data=['patient_dict']]
elif 'fusion_bedpe' not in patient_dict:
# We are not looking at just fusions so we either need 3 bams/fastqs to run ProTECT
for stype in ('tumor_dna', 'normal_dna', 'tumor_rna'):
if stype + '_bam' in patient_dict:
out_keys.add(stype + '_bam') # depends on [control=['if'], data=[]]
else:
out_keys.update([x for x in patient_dict if x.startswith(stype + '_fastq')]) # depends on [control=['for'], data=['stype']] # depends on [control=['if'], data=['patient_dict']]
if 'expression_files' in patient_dict:
out_keys.add('expression_files') # depends on [control=['if'], data=[]]
# We need the transcriptome mapped RNA bam or fastqs
elif 'tumor_rna_transcriptome_bam' in patient_dict:
out_keys.add('tumor_rna_transcriptome_bam') # depends on [control=['if'], data=[]]
else:
out_keys.update([x for x in patient_dict if x.startswith('tumor_rna_fastq')])
if 'fusion_bedpe' in patient_dict:
out_keys.add('fusion_bedpe') # depends on [control=['if'], data=[]]
for key in out_keys:
output_dict[key] = patient_dict[key] # depends on [control=['for'], data=['key']]
fastq1s = [x for x in output_dict if x.endswith('fastq_1')]
for f in fastq1s:
f = f[:-8]
if f + '_fastq_2' not in output_dict:
output_dict[f + '_fastq_2'] = get_fastq_2(job, patient_dict['patient_id'], f, output_dict[f + '_fastq_1']) # depends on [control=['if'], data=['output_dict']] # depends on [control=['for'], data=['f']]
output_dict['gdc_inputs'] = [k for (k, v) in output_dict.items() if str(v).startswith('gdc')]
if not any(('dna' in k for k in output_dict.keys())):
# There are no input DNA files so we cannot filter for oxog
output_dict['filter_for_OxoG'] = False # depends on [control=['if'], data=[]]
return output_dict |
def delete_config(self, media):
"""Deletes the files associated with this machine from disk. If medium objects are passed
in with the @a aMedia argument, they are closed and, if closing was successful, their
storage files are deleted as well. For convenience, this array of media files can be
the same as the one returned from a previous :py:func:`unregister` call.
This method must only be called on machines which are either write-locked (i.e. on instances
returned by :py:func:`ISession.machine` ) or on unregistered machines (i.e. not yet
registered machines created by :py:func:`IVirtualBox.create_machine` or opened by
:py:func:`IVirtualBox.open_machine` , or after having called :py:func:`unregister` ).
The following files will be deleted by this method:
If :py:func:`unregister` had been previously called with a @a cleanupMode
argument other than "UnregisterOnly", this will delete all saved state files that
the machine had in use; possibly one if the machine was in "Saved" state and one
for each online snapshot that the machine had.
On each medium object passed in the @a aMedia array, this will call
:py:func:`IMedium.close` . If that succeeds, this will attempt to delete the
medium's storage on disk. Since the :py:func:`IMedium.close` call will fail if the medium is still
in use, e.g. because it is still attached to a second machine; in that case the
storage will not be deleted.
Finally, the machine's own XML file will be deleted.
Since deleting large disk image files can be a time-consuming I/O operation, this
method operates asynchronously and returns an IProgress object to allow the caller
to monitor the progress. There will be one sub-operation for each file that is
being deleted (saved state or medium storage file).
:py:func:`settings_modified` will return @c true after this
method successfully returns.
in media of type :class:`IMedium`
List of media to be closed and whose storage files will be deleted.
return progress of type :class:`IProgress`
Progress object to track the operation completion.
raises :class:`VBoxErrorInvalidVmState`
Machine is registered but not write-locked.
raises :class:`VBoxErrorIprtError`
Could not delete the settings file.
"""
if not isinstance(media, list):
raise TypeError("media can only be an instance of type list")
for a in media[:10]:
if not isinstance(a, IMedium):
raise TypeError(
"array can only contain objects of type IMedium")
progress = self._call("deleteConfig",
in_p=[media])
progress = IProgress(progress)
return progress | def function[delete_config, parameter[self, media]]:
constant[Deletes the files associated with this machine from disk. If medium objects are passed
in with the @a aMedia argument, they are closed and, if closing was successful, their
storage files are deleted as well. For convenience, this array of media files can be
the same as the one returned from a previous :py:func:`unregister` call.
This method must only be called on machines which are either write-locked (i.e. on instances
returned by :py:func:`ISession.machine` ) or on unregistered machines (i.e. not yet
registered machines created by :py:func:`IVirtualBox.create_machine` or opened by
:py:func:`IVirtualBox.open_machine` , or after having called :py:func:`unregister` ).
The following files will be deleted by this method:
If :py:func:`unregister` had been previously called with a @a cleanupMode
argument other than "UnregisterOnly", this will delete all saved state files that
the machine had in use; possibly one if the machine was in "Saved" state and one
for each online snapshot that the machine had.
On each medium object passed in the @a aMedia array, this will call
:py:func:`IMedium.close` . If that succeeds, this will attempt to delete the
medium's storage on disk. Since the :py:func:`IMedium.close` call will fail if the medium is still
in use, e.g. because it is still attached to a second machine; in that case the
storage will not be deleted.
Finally, the machine's own XML file will be deleted.
Since deleting large disk image files can be a time-consuming I/O operation, this
method operates asynchronously and returns an IProgress object to allow the caller
to monitor the progress. There will be one sub-operation for each file that is
being deleted (saved state or medium storage file).
:py:func:`settings_modified` will return @c true after this
method successfully returns.
in media of type :class:`IMedium`
List of media to be closed and whose storage files will be deleted.
return progress of type :class:`IProgress`
Progress object to track the operation completion.
raises :class:`VBoxErrorInvalidVmState`
Machine is registered but not write-locked.
raises :class:`VBoxErrorIprtError`
Could not delete the settings file.
]
if <ast.UnaryOp object at 0x7da20c7caa40> begin[:]
<ast.Raise object at 0x7da20c7cbe50>
for taget[name[a]] in starred[call[name[media]][<ast.Slice object at 0x7da20c7c9060>]] begin[:]
if <ast.UnaryOp object at 0x7da18eb57490> begin[:]
<ast.Raise object at 0x7da20c6c7c70>
variable[progress] assign[=] call[name[self]._call, parameter[constant[deleteConfig]]]
variable[progress] assign[=] call[name[IProgress], parameter[name[progress]]]
return[name[progress]] | keyword[def] identifier[delete_config] ( identifier[self] , identifier[media] ):
literal[string]
keyword[if] keyword[not] identifier[isinstance] ( identifier[media] , identifier[list] ):
keyword[raise] identifier[TypeError] ( literal[string] )
keyword[for] identifier[a] keyword[in] identifier[media] [: literal[int] ]:
keyword[if] keyword[not] identifier[isinstance] ( identifier[a] , identifier[IMedium] ):
keyword[raise] identifier[TypeError] (
literal[string] )
identifier[progress] = identifier[self] . identifier[_call] ( literal[string] ,
identifier[in_p] =[ identifier[media] ])
identifier[progress] = identifier[IProgress] ( identifier[progress] )
keyword[return] identifier[progress] | def delete_config(self, media):
"""Deletes the files associated with this machine from disk. If medium objects are passed
in with the @a aMedia argument, they are closed and, if closing was successful, their
storage files are deleted as well. For convenience, this array of media files can be
the same as the one returned from a previous :py:func:`unregister` call.
This method must only be called on machines which are either write-locked (i.e. on instances
returned by :py:func:`ISession.machine` ) or on unregistered machines (i.e. not yet
registered machines created by :py:func:`IVirtualBox.create_machine` or opened by
:py:func:`IVirtualBox.open_machine` , or after having called :py:func:`unregister` ).
The following files will be deleted by this method:
If :py:func:`unregister` had been previously called with a @a cleanupMode
argument other than "UnregisterOnly", this will delete all saved state files that
the machine had in use; possibly one if the machine was in "Saved" state and one
for each online snapshot that the machine had.
On each medium object passed in the @a aMedia array, this will call
:py:func:`IMedium.close` . If that succeeds, this will attempt to delete the
medium's storage on disk. Since the :py:func:`IMedium.close` call will fail if the medium is still
in use, e.g. because it is still attached to a second machine; in that case the
storage will not be deleted.
Finally, the machine's own XML file will be deleted.
Since deleting large disk image files can be a time-consuming I/O operation, this
method operates asynchronously and returns an IProgress object to allow the caller
to monitor the progress. There will be one sub-operation for each file that is
being deleted (saved state or medium storage file).
:py:func:`settings_modified` will return @c true after this
method successfully returns.
in media of type :class:`IMedium`
List of media to be closed and whose storage files will be deleted.
return progress of type :class:`IProgress`
Progress object to track the operation completion.
raises :class:`VBoxErrorInvalidVmState`
Machine is registered but not write-locked.
raises :class:`VBoxErrorIprtError`
Could not delete the settings file.
"""
if not isinstance(media, list):
raise TypeError('media can only be an instance of type list') # depends on [control=['if'], data=[]]
for a in media[:10]:
if not isinstance(a, IMedium):
raise TypeError('array can only contain objects of type IMedium') # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['a']]
progress = self._call('deleteConfig', in_p=[media])
progress = IProgress(progress)
return progress |
def get_group(group, flags=FLAGS.BASE | FLAGS.INLINE_POLICIES | FLAGS.MANAGED_POLICIES, **conn):
"""
Orchestrates all the calls required to fully build out an IAM Group in the following format:
{
"Arn": ...,
"GroupName": ...,
"Path": ...,
"GroupId": ...,
"CreateDate": ..., # str
"InlinePolicies": ...,
"ManagedPolicies": ..., # These are just the names of the Managed Policies.
"Users": ..., # False by default -- these are just the names of the users.
"_version": 1
}
:param flags: By default, Users is disabled. This is somewhat expensive as it has to call the `get_group` call
multiple times.
:param group: dict MUST contain the GroupName and also a combination of either the ARN or the account_number.
:param output: Determines whether keys should be returned camelized or underscored.
:param conn: dict containing enough information to make a connection to the desired account.
Must at least have 'assume_role' key.
:return: dict containing fully built out Group.
"""
if not group.get('GroupName'):
raise MissingFieldException('Must include GroupName.')
group = modify(group, output='camelized')
_conn_from_args(group, conn)
return registry.build_out(flags, start_with=group, pass_datastructure=True, **conn) | def function[get_group, parameter[group, flags]]:
constant[
Orchestrates all the calls required to fully build out an IAM Group in the following format:
{
"Arn": ...,
"GroupName": ...,
"Path": ...,
"GroupId": ...,
"CreateDate": ..., # str
"InlinePolicies": ...,
"ManagedPolicies": ..., # These are just the names of the Managed Policies.
"Users": ..., # False by default -- these are just the names of the users.
"_version": 1
}
:param flags: By default, Users is disabled. This is somewhat expensive as it has to call the `get_group` call
multiple times.
:param group: dict MUST contain the GroupName and also a combination of either the ARN or the account_number.
:param output: Determines whether keys should be returned camelized or underscored.
:param conn: dict containing enough information to make a connection to the desired account.
Must at least have 'assume_role' key.
:return: dict containing fully built out Group.
]
if <ast.UnaryOp object at 0x7da1b0150460> begin[:]
<ast.Raise object at 0x7da1b0152800>
variable[group] assign[=] call[name[modify], parameter[name[group]]]
call[name[_conn_from_args], parameter[name[group], name[conn]]]
return[call[name[registry].build_out, parameter[name[flags]]]] | keyword[def] identifier[get_group] ( identifier[group] , identifier[flags] = identifier[FLAGS] . identifier[BASE] | identifier[FLAGS] . identifier[INLINE_POLICIES] | identifier[FLAGS] . identifier[MANAGED_POLICIES] ,** identifier[conn] ):
literal[string]
keyword[if] keyword[not] identifier[group] . identifier[get] ( literal[string] ):
keyword[raise] identifier[MissingFieldException] ( literal[string] )
identifier[group] = identifier[modify] ( identifier[group] , identifier[output] = literal[string] )
identifier[_conn_from_args] ( identifier[group] , identifier[conn] )
keyword[return] identifier[registry] . identifier[build_out] ( identifier[flags] , identifier[start_with] = identifier[group] , identifier[pass_datastructure] = keyword[True] ,** identifier[conn] ) | def get_group(group, flags=FLAGS.BASE | FLAGS.INLINE_POLICIES | FLAGS.MANAGED_POLICIES, **conn):
"""
Orchestrates all the calls required to fully build out an IAM Group in the following format:
{
"Arn": ...,
"GroupName": ...,
"Path": ...,
"GroupId": ...,
"CreateDate": ..., # str
"InlinePolicies": ...,
"ManagedPolicies": ..., # These are just the names of the Managed Policies.
"Users": ..., # False by default -- these are just the names of the users.
"_version": 1
}
:param flags: By default, Users is disabled. This is somewhat expensive as it has to call the `get_group` call
multiple times.
:param group: dict MUST contain the GroupName and also a combination of either the ARN or the account_number.
:param output: Determines whether keys should be returned camelized or underscored.
:param conn: dict containing enough information to make a connection to the desired account.
Must at least have 'assume_role' key.
:return: dict containing fully built out Group.
"""
if not group.get('GroupName'):
raise MissingFieldException('Must include GroupName.') # depends on [control=['if'], data=[]]
group = modify(group, output='camelized')
_conn_from_args(group, conn)
return registry.build_out(flags, start_with=group, pass_datastructure=True, **conn) |
def subset_bed_by_chrom(in_file, chrom, data, out_dir=None):
"""Subset a BED file to only have items from the specified chromosome.
"""
if out_dir is None:
out_dir = os.path.dirname(in_file)
base, ext = os.path.splitext(os.path.basename(in_file))
out_file = os.path.join(out_dir, "%s-%s%s" % (base, chrom, ext))
if not utils.file_uptodate(out_file, in_file):
with file_transaction(data, out_file) as tx_out_file:
_rewrite_bed_with_chrom(in_file, tx_out_file, chrom)
return out_file | def function[subset_bed_by_chrom, parameter[in_file, chrom, data, out_dir]]:
constant[Subset a BED file to only have items from the specified chromosome.
]
if compare[name[out_dir] is constant[None]] begin[:]
variable[out_dir] assign[=] call[name[os].path.dirname, parameter[name[in_file]]]
<ast.Tuple object at 0x7da1b178c6d0> assign[=] call[name[os].path.splitext, parameter[call[name[os].path.basename, parameter[name[in_file]]]]]
variable[out_file] assign[=] call[name[os].path.join, parameter[name[out_dir], binary_operation[constant[%s-%s%s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b17a8220>, <ast.Name object at 0x7da1b17aa2c0>, <ast.Name object at 0x7da1b17aa2f0>]]]]]
if <ast.UnaryOp object at 0x7da1b17aa350> begin[:]
with call[name[file_transaction], parameter[name[data], name[out_file]]] begin[:]
call[name[_rewrite_bed_with_chrom], parameter[name[in_file], name[tx_out_file], name[chrom]]]
return[name[out_file]] | keyword[def] identifier[subset_bed_by_chrom] ( identifier[in_file] , identifier[chrom] , identifier[data] , identifier[out_dir] = keyword[None] ):
literal[string]
keyword[if] identifier[out_dir] keyword[is] keyword[None] :
identifier[out_dir] = identifier[os] . identifier[path] . identifier[dirname] ( identifier[in_file] )
identifier[base] , identifier[ext] = identifier[os] . identifier[path] . identifier[splitext] ( identifier[os] . identifier[path] . identifier[basename] ( identifier[in_file] ))
identifier[out_file] = identifier[os] . identifier[path] . identifier[join] ( identifier[out_dir] , literal[string] %( identifier[base] , identifier[chrom] , identifier[ext] ))
keyword[if] keyword[not] identifier[utils] . identifier[file_uptodate] ( identifier[out_file] , identifier[in_file] ):
keyword[with] identifier[file_transaction] ( identifier[data] , identifier[out_file] ) keyword[as] identifier[tx_out_file] :
identifier[_rewrite_bed_with_chrom] ( identifier[in_file] , identifier[tx_out_file] , identifier[chrom] )
keyword[return] identifier[out_file] | def subset_bed_by_chrom(in_file, chrom, data, out_dir=None):
"""Subset a BED file to only have items from the specified chromosome.
"""
if out_dir is None:
out_dir = os.path.dirname(in_file) # depends on [control=['if'], data=['out_dir']]
(base, ext) = os.path.splitext(os.path.basename(in_file))
out_file = os.path.join(out_dir, '%s-%s%s' % (base, chrom, ext))
if not utils.file_uptodate(out_file, in_file):
with file_transaction(data, out_file) as tx_out_file:
_rewrite_bed_with_chrom(in_file, tx_out_file, chrom) # depends on [control=['with'], data=['tx_out_file']] # depends on [control=['if'], data=[]]
return out_file |
def histogram1d(x, bins, range, weights=None):
"""
Compute a 1D histogram assuming equally spaced bins.
Parameters
----------
x : `~numpy.ndarray`
The position of the points to bin in the 1D histogram
bins : int
The number of bins
range : iterable
The range as a tuple of (xmin, xmax)
weights : `~numpy.ndarray`
The weights of the points in the 1D histogram
Returns
-------
array : `~numpy.ndarray`
The 1D histogram array
"""
nx = bins
if not np.isscalar(bins):
raise TypeError('bins should be an integer')
xmin, xmax = range
if not np.isfinite(xmin):
raise ValueError("xmin should be finite")
if not np.isfinite(xmax):
raise ValueError("xmax should be finite")
if xmax <= xmin:
raise ValueError("xmax should be greater than xmin")
if nx <= 0:
raise ValueError("nx should be strictly positive")
if weights is None:
return _histogram1d(x, nx, xmin, xmax)
else:
return _histogram1d_weighted(x, weights, nx, xmin, xmax) | def function[histogram1d, parameter[x, bins, range, weights]]:
constant[
Compute a 1D histogram assuming equally spaced bins.
Parameters
----------
x : `~numpy.ndarray`
The position of the points to bin in the 1D histogram
bins : int
The number of bins
range : iterable
The range as a tuple of (xmin, xmax)
weights : `~numpy.ndarray`
The weights of the points in the 1D histogram
Returns
-------
array : `~numpy.ndarray`
The 1D histogram array
]
variable[nx] assign[=] name[bins]
if <ast.UnaryOp object at 0x7da18fe93100> begin[:]
<ast.Raise object at 0x7da18fe91ed0>
<ast.Tuple object at 0x7da18fe935e0> assign[=] name[range]
if <ast.UnaryOp object at 0x7da18fe927d0> begin[:]
<ast.Raise object at 0x7da18fe91480>
if <ast.UnaryOp object at 0x7da18fe91240> begin[:]
<ast.Raise object at 0x7da18fe914b0>
if compare[name[xmax] less_or_equal[<=] name[xmin]] begin[:]
<ast.Raise object at 0x7da18fe93c70>
if compare[name[nx] less_or_equal[<=] constant[0]] begin[:]
<ast.Raise object at 0x7da18fe90ca0>
if compare[name[weights] is constant[None]] begin[:]
return[call[name[_histogram1d], parameter[name[x], name[nx], name[xmin], name[xmax]]]] | keyword[def] identifier[histogram1d] ( identifier[x] , identifier[bins] , identifier[range] , identifier[weights] = keyword[None] ):
literal[string]
identifier[nx] = identifier[bins]
keyword[if] keyword[not] identifier[np] . identifier[isscalar] ( identifier[bins] ):
keyword[raise] identifier[TypeError] ( literal[string] )
identifier[xmin] , identifier[xmax] = identifier[range]
keyword[if] keyword[not] identifier[np] . identifier[isfinite] ( identifier[xmin] ):
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[if] keyword[not] identifier[np] . identifier[isfinite] ( identifier[xmax] ):
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[if] identifier[xmax] <= identifier[xmin] :
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[if] identifier[nx] <= literal[int] :
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[if] identifier[weights] keyword[is] keyword[None] :
keyword[return] identifier[_histogram1d] ( identifier[x] , identifier[nx] , identifier[xmin] , identifier[xmax] )
keyword[else] :
keyword[return] identifier[_histogram1d_weighted] ( identifier[x] , identifier[weights] , identifier[nx] , identifier[xmin] , identifier[xmax] ) | def histogram1d(x, bins, range, weights=None):
"""
Compute a 1D histogram assuming equally spaced bins.
Parameters
----------
x : `~numpy.ndarray`
The position of the points to bin in the 1D histogram
bins : int
The number of bins
range : iterable
The range as a tuple of (xmin, xmax)
weights : `~numpy.ndarray`
The weights of the points in the 1D histogram
Returns
-------
array : `~numpy.ndarray`
The 1D histogram array
"""
nx = bins
if not np.isscalar(bins):
raise TypeError('bins should be an integer') # depends on [control=['if'], data=[]]
(xmin, xmax) = range
if not np.isfinite(xmin):
raise ValueError('xmin should be finite') # depends on [control=['if'], data=[]]
if not np.isfinite(xmax):
raise ValueError('xmax should be finite') # depends on [control=['if'], data=[]]
if xmax <= xmin:
raise ValueError('xmax should be greater than xmin') # depends on [control=['if'], data=[]]
if nx <= 0:
raise ValueError('nx should be strictly positive') # depends on [control=['if'], data=[]]
if weights is None:
return _histogram1d(x, nx, xmin, xmax) # depends on [control=['if'], data=[]]
else:
return _histogram1d_weighted(x, weights, nx, xmin, xmax) |
def _group_tokens(cls, token_stream, range_list):
"""
Group tokens into snippet ranges.
`token_stream` is a generator that produces
`(token_type, value)` tuples,
`range_list` is a list of `(start, end)` tuples representing
the (inclusive) range of line numbers for each snippet.
Assumes that `range_list` is an ascending order by start value.
Returns a dict mapping ranges to lists of tokens:
{
(4, 10): [(ttype_1, val_1), (ttype_2, val_2), ...],
(29, 39): [(ttype_3, val_3), ...],
...
}
The algorithm is slightly complicated because a single token
can contain multiple line breaks.
"""
# Create a map from ranges (start/end tuples) to tokens
token_map = {rng: [] for rng in range_list}
# Keep track of the current line number; we will
# increment this as we encounter newlines in token values
line_num = 1
for ttype, val in token_stream:
# If there are newlines in this token,
# we need to split it up and check whether
# each line within the token is within one
# of our ranges.
if '\n' in val:
val_lines = val.split('\n')
# Check if the tokens match each range
for (start, end), filtered_tokens in six.iteritems(token_map):
# Filter out lines that are not in this range
include_vals = [
val_lines[i] for i in
range(0, len(val_lines))
if i + line_num in range(start, end + 1)
]
# If we found any lines, store the tokens
if len(include_vals) > 0:
token = (ttype, '\n'.join(include_vals))
filtered_tokens.append(token)
# Increment the line number
# by the number of lines we found
line_num += len(val_lines) - 1
# No newline in this token
# If we're in the line range, add it
else:
# Check if the tokens match each range
for (start, end), filtered_tokens in six.iteritems(token_map):
# If we got a match, store the token
if line_num in range(start, end + 1):
filtered_tokens.append((ttype, val))
# Otherwise, ignore the token
return token_map | def function[_group_tokens, parameter[cls, token_stream, range_list]]:
constant[
Group tokens into snippet ranges.
`token_stream` is a generator that produces
`(token_type, value)` tuples,
`range_list` is a list of `(start, end)` tuples representing
the (inclusive) range of line numbers for each snippet.
Assumes that `range_list` is an ascending order by start value.
Returns a dict mapping ranges to lists of tokens:
{
(4, 10): [(ttype_1, val_1), (ttype_2, val_2), ...],
(29, 39): [(ttype_3, val_3), ...],
...
}
The algorithm is slightly complicated because a single token
can contain multiple line breaks.
]
variable[token_map] assign[=] <ast.DictComp object at 0x7da20c990a30>
variable[line_num] assign[=] constant[1]
for taget[tuple[[<ast.Name object at 0x7da20c990d90>, <ast.Name object at 0x7da20c993a00>]]] in starred[name[token_stream]] begin[:]
if compare[constant[
] in name[val]] begin[:]
variable[val_lines] assign[=] call[name[val].split, parameter[constant[
]]]
for taget[tuple[[<ast.Tuple object at 0x7da20c991450>, <ast.Name object at 0x7da20c9929b0>]]] in starred[call[name[six].iteritems, parameter[name[token_map]]]] begin[:]
variable[include_vals] assign[=] <ast.ListComp object at 0x7da20c993e20>
if compare[call[name[len], parameter[name[include_vals]]] greater[>] constant[0]] begin[:]
variable[token] assign[=] tuple[[<ast.Name object at 0x7da20c993430>, <ast.Call object at 0x7da20c992080>]]
call[name[filtered_tokens].append, parameter[name[token]]]
<ast.AugAssign object at 0x7da20c9912d0>
return[name[token_map]] | keyword[def] identifier[_group_tokens] ( identifier[cls] , identifier[token_stream] , identifier[range_list] ):
literal[string]
identifier[token_map] ={ identifier[rng] :[] keyword[for] identifier[rng] keyword[in] identifier[range_list] }
identifier[line_num] = literal[int]
keyword[for] identifier[ttype] , identifier[val] keyword[in] identifier[token_stream] :
keyword[if] literal[string] keyword[in] identifier[val] :
identifier[val_lines] = identifier[val] . identifier[split] ( literal[string] )
keyword[for] ( identifier[start] , identifier[end] ), identifier[filtered_tokens] keyword[in] identifier[six] . identifier[iteritems] ( identifier[token_map] ):
identifier[include_vals] =[
identifier[val_lines] [ identifier[i] ] keyword[for] identifier[i] keyword[in]
identifier[range] ( literal[int] , identifier[len] ( identifier[val_lines] ))
keyword[if] identifier[i] + identifier[line_num] keyword[in] identifier[range] ( identifier[start] , identifier[end] + literal[int] )
]
keyword[if] identifier[len] ( identifier[include_vals] )> literal[int] :
identifier[token] =( identifier[ttype] , literal[string] . identifier[join] ( identifier[include_vals] ))
identifier[filtered_tokens] . identifier[append] ( identifier[token] )
identifier[line_num] += identifier[len] ( identifier[val_lines] )- literal[int]
keyword[else] :
keyword[for] ( identifier[start] , identifier[end] ), identifier[filtered_tokens] keyword[in] identifier[six] . identifier[iteritems] ( identifier[token_map] ):
keyword[if] identifier[line_num] keyword[in] identifier[range] ( identifier[start] , identifier[end] + literal[int] ):
identifier[filtered_tokens] . identifier[append] (( identifier[ttype] , identifier[val] ))
keyword[return] identifier[token_map] | def _group_tokens(cls, token_stream, range_list):
"""
Group tokens into snippet ranges.
`token_stream` is a generator that produces
`(token_type, value)` tuples,
`range_list` is a list of `(start, end)` tuples representing
the (inclusive) range of line numbers for each snippet.
Assumes that `range_list` is an ascending order by start value.
Returns a dict mapping ranges to lists of tokens:
{
(4, 10): [(ttype_1, val_1), (ttype_2, val_2), ...],
(29, 39): [(ttype_3, val_3), ...],
...
}
The algorithm is slightly complicated because a single token
can contain multiple line breaks.
"""
# Create a map from ranges (start/end tuples) to tokens
token_map = {rng: [] for rng in range_list}
# Keep track of the current line number; we will
# increment this as we encounter newlines in token values
line_num = 1
for (ttype, val) in token_stream:
# If there are newlines in this token,
# we need to split it up and check whether
# each line within the token is within one
# of our ranges.
if '\n' in val:
val_lines = val.split('\n')
# Check if the tokens match each range
for ((start, end), filtered_tokens) in six.iteritems(token_map):
# Filter out lines that are not in this range
include_vals = [val_lines[i] for i in range(0, len(val_lines)) if i + line_num in range(start, end + 1)]
# If we found any lines, store the tokens
if len(include_vals) > 0:
token = (ttype, '\n'.join(include_vals))
filtered_tokens.append(token) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
# Increment the line number
# by the number of lines we found
line_num += len(val_lines) - 1 # depends on [control=['if'], data=['val']]
else:
# No newline in this token
# If we're in the line range, add it
# Check if the tokens match each range
for ((start, end), filtered_tokens) in six.iteritems(token_map):
# If we got a match, store the token
if line_num in range(start, end + 1):
filtered_tokens.append((ttype, val)) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] # depends on [control=['for'], data=[]]
# Otherwise, ignore the token
return token_map |
def tax_class_based_on(self, tax_class_based_on):
"""Sets the tax_class_based_on of this TaxSettings.
:param tax_class_based_on: The tax_class_based_on of this TaxSettings.
:type: str
"""
allowed_values = ["shippingAddress", "billingAddress"] # noqa: E501
if tax_class_based_on is not None and tax_class_based_on not in allowed_values:
raise ValueError(
"Invalid value for `tax_class_based_on` ({0}), must be one of {1}" # noqa: E501
.format(tax_class_based_on, allowed_values)
)
self._tax_class_based_on = tax_class_based_on | def function[tax_class_based_on, parameter[self, tax_class_based_on]]:
constant[Sets the tax_class_based_on of this TaxSettings.
:param tax_class_based_on: The tax_class_based_on of this TaxSettings.
:type: str
]
variable[allowed_values] assign[=] list[[<ast.Constant object at 0x7da18fe937c0>, <ast.Constant object at 0x7da18fe92620>]]
if <ast.BoolOp object at 0x7da18fe93fd0> begin[:]
<ast.Raise object at 0x7da18fe904f0>
name[self]._tax_class_based_on assign[=] name[tax_class_based_on] | keyword[def] identifier[tax_class_based_on] ( identifier[self] , identifier[tax_class_based_on] ):
literal[string]
identifier[allowed_values] =[ literal[string] , literal[string] ]
keyword[if] identifier[tax_class_based_on] keyword[is] keyword[not] keyword[None] keyword[and] identifier[tax_class_based_on] keyword[not] keyword[in] identifier[allowed_values] :
keyword[raise] identifier[ValueError] (
literal[string]
. identifier[format] ( identifier[tax_class_based_on] , identifier[allowed_values] )
)
identifier[self] . identifier[_tax_class_based_on] = identifier[tax_class_based_on] | def tax_class_based_on(self, tax_class_based_on):
"""Sets the tax_class_based_on of this TaxSettings.
:param tax_class_based_on: The tax_class_based_on of this TaxSettings.
:type: str
"""
allowed_values = ['shippingAddress', 'billingAddress'] # noqa: E501
if tax_class_based_on is not None and tax_class_based_on not in allowed_values: # noqa: E501
raise ValueError('Invalid value for `tax_class_based_on` ({0}), must be one of {1}'.format(tax_class_based_on, allowed_values)) # depends on [control=['if'], data=[]]
self._tax_class_based_on = tax_class_based_on |
def setup_network_agents(self):
"""Initializes agents on nodes of graph and registers them to the SimPy environment"""
for i in self.env.G.nodes():
self.env.G.node[i]['agent'] = self.agent_type(environment=self.env, agent_id=i,
state=deepcopy(self.initial_states[i])) | def function[setup_network_agents, parameter[self]]:
constant[Initializes agents on nodes of graph and registers them to the SimPy environment]
for taget[name[i]] in starred[call[name[self].env.G.nodes, parameter[]]] begin[:]
call[call[name[self].env.G.node][name[i]]][constant[agent]] assign[=] call[name[self].agent_type, parameter[]] | keyword[def] identifier[setup_network_agents] ( identifier[self] ):
literal[string]
keyword[for] identifier[i] keyword[in] identifier[self] . identifier[env] . identifier[G] . identifier[nodes] ():
identifier[self] . identifier[env] . identifier[G] . identifier[node] [ identifier[i] ][ literal[string] ]= identifier[self] . identifier[agent_type] ( identifier[environment] = identifier[self] . identifier[env] , identifier[agent_id] = identifier[i] ,
identifier[state] = identifier[deepcopy] ( identifier[self] . identifier[initial_states] [ identifier[i] ])) | def setup_network_agents(self):
"""Initializes agents on nodes of graph and registers them to the SimPy environment"""
for i in self.env.G.nodes():
self.env.G.node[i]['agent'] = self.agent_type(environment=self.env, agent_id=i, state=deepcopy(self.initial_states[i])) # depends on [control=['for'], data=['i']] |
def func_on_enter(func):
"""
Register the `func` as a callback reacting only to ENTER.
Note:
This function doesn't bind the key to the element, just creates sort of
filter, which ignores all other events.
"""
def function_after_enter_pressed(ev):
ev.stopPropagation()
# if the key was `enter` ..
if ev.keyCode == 13:
func(ev)
return function_after_enter_pressed | def function[func_on_enter, parameter[func]]:
constant[
Register the `func` as a callback reacting only to ENTER.
Note:
This function doesn't bind the key to the element, just creates sort of
filter, which ignores all other events.
]
def function[function_after_enter_pressed, parameter[ev]]:
call[name[ev].stopPropagation, parameter[]]
if compare[name[ev].keyCode equal[==] constant[13]] begin[:]
call[name[func], parameter[name[ev]]]
return[name[function_after_enter_pressed]] | keyword[def] identifier[func_on_enter] ( identifier[func] ):
literal[string]
keyword[def] identifier[function_after_enter_pressed] ( identifier[ev] ):
identifier[ev] . identifier[stopPropagation] ()
keyword[if] identifier[ev] . identifier[keyCode] == literal[int] :
identifier[func] ( identifier[ev] )
keyword[return] identifier[function_after_enter_pressed] | def func_on_enter(func):
"""
Register the `func` as a callback reacting only to ENTER.
Note:
This function doesn't bind the key to the element, just creates sort of
filter, which ignores all other events.
"""
def function_after_enter_pressed(ev):
ev.stopPropagation()
# if the key was `enter` ..
if ev.keyCode == 13:
func(ev) # depends on [control=['if'], data=[]]
return function_after_enter_pressed |
def photeq(files='*_flt.fits', sciext='SCI', errext='ERR',
ref_phot=None, ref_phot_ext=None,
phot_kwd='PHOTFLAM', aux_phot_kwd='PHOTFNU',
search_primary=True,
readonly=True, clobber=False, logfile='photeq.log'):
"""
Adjust data values of images by equalizing each chip's PHOTFLAM value
to a single common value so that all chips can be treated equally
by ``AstroDrizzle``.
Parameters
----------
files : str (Default = ``'*_flt.fits'``)
A string containing one of the following:
* a comma-separated list of valid science image file names,
e.g.: ``'j1234567q_flt.fits, j1234568q_flt.fits'``;
* an @-file name, e.g., ``'@files_to_match.txt'``. See notes
section for details on the format of the @-files.
.. note::
**Valid science image file names** are:
* file names of existing FITS, GEIS, or WAIVER FITS files;
* partial file names containing wildcard characters, e.g.,
``'*_flt.fits'``;
* Association (ASN) tables (must have ``_asn``, or ``_asc``
suffix), e.g., ``'j12345670_asn.fits'``.
sciext : str (Default = 'SCI')
Extension *name* of extensions whose data and/or headers should
be corrected.
errext : str (Default = 'ERR')
Extension *name* of the extensions containing corresponding error
arrays. Error arrays are corrected in the same way as science data.
ref_phot : float, None (Default = None)
A number indicating the new value of PHOTFLAM or PHOTFNU
(set by 'phot_kwd') to which the data should be adjusted.
ref_phot_ext : int, str, tuple, None (Default = None)
Extension from which the `photeq` should get the reference photometric
value specified by the `phot_kwd` parameter. This parameter is ignored
if `ref_phot` **is not** `None`. When `ref_phot_ext` is `None`, then
the reference inverse sensitivity value will be picked from the
first `sciext` of the first input image containing `phot_kwd`.
phot_kwd : str (Default = 'PHOTFLAM')
Specifies the primary keyword which contains inverse sensitivity
(e.g., PHOTFLAM). It is used to compute conversion factors by
which data should be rescaled.
aux_phot_kwd : str, None, list of str (Default = 'PHOTFNU')
Same as `phot_kwd` but describes *other* photometric keyword(s)
that should be corrected by inverse of the scale factor used to correct
data. These keywords are *not* used to compute conversion factors.
Multiple keywords can be specified as a Python list of strings:
``['PHOTFNU', 'PHOTOHMY']``.
.. note::
If specifying multiple secondary photometric keywords in the TEAL
interface, use a comma-separated list of keywords.
search_primary : bool (Default = True)
Specifies whether to first search the primary header for the
presence of `phot_kwd` keyword and compute conversion factor based on
that value. This is (partially) ignored when `ref_phot` is not `None` in
the sense that the value specified by `ref_phot` will be used as the
reference *but* in all images primary will be searched for `phot_kwd`
and `aux_phot_kwd` and those values will be corrected
(if ``search_primary=True``).
readonly : bool (Default = True)
If `True`, `photeq` will not modify input files (nevertheless, it will
convert input GEIS or WAVERED FITS files to MEF and could overwrite
existing MEF files if `clobber` is set to `True`).
The (console or log file) output however will be identical to the case
when ``readonly=False`` and it can be examined before applying these
changes to input files.
clobber : bool (Default = False)
Overwrite existing MEF files when converting input WAVERED FITS or GEIS
to MEF.
logfile : str, None (Default = 'photeq.log')
File name of the log file.
Notes
-----
By default, `photeq` will search for the first inverse sensitivity
value (given by the header keyword specified by the `phot_kwd` parameter,
e.g., PHOTFLAM or PHOTFNU) found in the input images and it will equalize
all other images to this reference value.
It is possible to tell `photeq` to look for the reference inverse
sensitivity value only in a specific extension of input images, e.g.: 3,
('sci',3), etc. This can be done by setting `ref_phot_ext` to a specific
extension. This may be useful, for example, for WFPC2 images: WF3 chip was
one of the better calibrated chips, and so, if one prefers to have
inverse sensitivities equalized to the inverse sensitivity of the WF3 chip,
one can set ``ref_phot_ext=3``.
Alternatively, one can provide their own reference inverse sensitivity
value to which all other images should be "equalized" through the
parameter `ref_phot`.
.. note::
Default parameter values (except for `files`, `readonly`, and `clobber`)
should be acceptable for most HST images.
.. warning::
If images are intended to be used with ``AstroDrizzle``, it is
recommended that sky background measurement be performed on "equalized"
images as the `photeq` is not aware of sky user keyword in the image
headers and thus it cannot correct sky values already recorded in the
headers.
Examples
--------
#. In most cases the default parameters should suffice:
>>> from drizzlepac import photeq
>>> photeq.photeq(files='*_flt.fits', readonly=False)
#. If the re-calibration needs to be done on PHOTFNU rather than
PHOTFLAM, then:
>>> photeq.photeq(files='*_flt.fits', ref_phot='PHOTFNU',
... aux_phot_kwd='PHOTFLAM')
#. If for WFPC2 data one desires that PHOTFLAM from WF3 be used as the
reference in WFPC2 images, then:
>>> photeq.photeq(files='*_flt.fits', ref_phot_ext=3) # or ('sci',3)
"""
# Time it
runtime_begin = datetime.now()
# check that input file name is a string:
if not isinstance(files, str):
raise TypeError("Argument 'files' must be a comma-separated list of "
" file names")
# Set-up log files:
if isinstance(logfile, str):
# first, in case there are any "leftover" file handlers,
# close and remove them:
for h in _log.handlers:
if h is not _sh_log and isinstance(h, logging.FileHandler):
h.close()
_log.removeHandler(h)
# create file handler:
log_formatter = logging.Formatter('[%(levelname)s:] %(message)s')
log_file_handler = logging.FileHandler(logfile)
log_file_handler.setFormatter(log_formatter)
# add log_file_handler to logger
_log.addHandler(log_file_handler)
elif logfile is not None:
raise TypeError("Unsupported 'logfile' type")
# BEGIN:
_mlinfo("***** {0} started on {1}".format(__taskname__, runtime_begin))
_mlinfo(" Version {0} ({1})".format(__version__, __version_date__))
# check that extension names are strings (or None for error ext):
if sciext is None:
sci_ext4parse = '*'
ext2get = None
else:
if not isinstance(sciext, str):
raise TypeError("Argument 'sciext' must be a string or None")
sciext = sciext.strip()
if sciext.upper() == 'PRIMARY':
sciext = sciext.upper()
ext2get = (sciext, 1)
else:
ext2get = (sciext, '*')
sci_ext4parse = ext2get
if errext is not None and not isinstance(errext, str):
raise TypeError("Argument 'errext' must be a string or None")
# check that phot_kwd is supported:
if not isinstance(phot_kwd, str):
raise TypeError("Argument 'phot_kwd' must be a string")
phot_kwd = phot_kwd.strip().upper()
# check that ref_phot_ext has correct type:
if ref_phot_ext is not None and not \
(isinstance(ref_phot_ext, int) or isinstance(ref_phot_ext, str) \
or (isinstance(ref_phot_ext, tuple) and len(ref_phot_ext) == 2 \
and isinstance(ref_phot_ext[0], str) and \
isinstance(ref_phot_ext[1], int))):
raise TypeError("Unsupported 'ref_phot_ext' type")
if isinstance(ref_phot_ext, str):
ref_phot_ext = (ref_phot_ext, 1)
if aux_phot_kwd is None:
aux_phot_kwd = []
elif isinstance(aux_phot_kwd, str):
aux_phot_kwd = [aux_phot_kwd.strip().upper()]
if phot_kwd == aux_phot_kwd:
raise ValueError("Auxiliary photometric keyword must be different "
"from the main photometric keyword 'phot_kwd'.")
elif hasattr(aux_phot_kwd, '__iter__'):
if not all([isinstance(phot, str) for phot in aux_phot_kwd]):
raise TypeError("Argument 'aux_phot_kwd' must be a string, list of "
"strings, or None")
aux_phot_kwd = [phot.strip().upper() for phot in aux_phot_kwd]
if ref_phot in aux_phot_kwd:
raise ValueError("Auxiliary photometric keyword(s) must be "
"different from the main photometric keyword "
"'phot_kwd'.")
else:
raise TypeError("Argument 'aux_phot_kwd' must be a string, list of "
"strings, or None")
# read input file list:
fl = parseat.parse_cs_line(csline=files, default_ext=sci_ext4parse,
im_fmode='readonly' if readonly else 'update',
clobber=clobber, fnamesOnly=True,
doNotOpenDQ=True)
# check if user supplied file extensions, set them to the sciext,
# and warn that they will be ignored:
for f in fl:
if f.count > 1 or f.fext[0] != sci_ext4parse:
_mlwarn("WARNING: Extension specifications for file {:s} "
"will be ignored. Using all {:s} extensions instead."
.format(f.image, 'image-like' if sciext is None else \
"{:s}".format(utils.ext2str(sciext,
default_extver=None))))
# find the reference PHOTFLAM/PHOTNU:
flc = fl[:]
ref_hdu = None
ref_ext = None
ref_user = True
if ref_phot is None:
ref_user = False
for f in flc:
f.convert2ImageRef()
# get primary hdu:
pri_hdu = f.image.hdu[0]
# find all valid extensions:
if ref_phot_ext is None:
if sciext == 'PRIMARY':
extnum = [0]
else:
extnum = utils.get_ext_list(f.image, sciext)
is_pri_hdu = [f.image.hdu[ext] is pri_hdu for ext in extnum]
# if necessary, add primary header to the hdu list:
if search_primary:
try:
pri_index = is_pri_hdu.index(True)
extnum.insert(0, extnum.pop(pri_index))
except ValueError:
extnum.insert(0, 0)
else:
extnum = [ref_phot_ext]
for ext in extnum:
hdu = f.image.hdu[ext]
if phot_kwd in hdu.header:
ref_phot = hdu.header[phot_kwd]
ref_ext = ext
ref_hdu = hdu
break
if ref_phot is None:
_mlwarn("WARNING: Could not find specified inverse "
" sensitivity keyword '{:s}'\n"
" in any of the {} extensions of file '{}'.\n"
" This input file will be ignored."
.format(phot_kwd, 'image-like' if sciext is None else \
"{:s}".format(utils.ext2str(sciext,
default_extver=None)),
os.path.basename(f.image.original_fname)))
f.release_all_images()
fl.remove(f)
else:
break
if ref_phot is None:
raise RuntimeError("Could not find the inverse sensitivity keyword "
"'{:s}' in the specified headers of "
"the input image(s).\nCannot continue."
.format(phot_kwd))
aux_phot_kwd_list = ','.join(aux_phot_kwd)
_mlinfo("\nPRIMARY PHOTOMETRIC KEYWORD: {:s}".format(phot_kwd))
_mlinfo("SECONDARY PHOTOMETRIC KEYWORD(S): {:s}"
.format(aux_phot_kwd_list if aux_phot_kwd_list else 'None'))
if ref_user:
_mlinfo("REFERENCE VALUE PROVIDED BY USER: '{:s}'={}\n"
.format(phot_kwd, ref_phot))
else:
_mlinfo("REFERENCE VALUE FROM FILE: '{:s}[{:s}]'\n"
.format(os.path.basename(f.image.original_fname),
utils.ext2str(ref_ext)))
_mlinfo("REFERENCE '{:s}' VALUE IS: {}".format(phot_kwd, ref_phot))
# equalize PHOTFLAM/PHOTNU
for f in fl:
# open the file if necessary:
if f.fnamesOnly:
_mlinfo("\nProcessing file '{:s}'".format(f.image))
f.convert2ImageRef()
else:
_mlinfo("\nProcessing file '{:s}'".format(f.image.original_fname))
# first, see if photflam is in the primary header and save this value:
pri_conv = None
if search_primary:
whdu = f.image.hdu[0]
if phot_kwd in whdu.header:
_mlinfo(" * Primary header:")
if whdu is ref_hdu:
pri_conv = 1.0
_mlinfo(" - '{}' = {} found in the primary header."
.format(phot_kwd, whdu.header[phot_kwd]))
_mlinfo(" - Data conversion factor based on primary "
"header: {}".format(pri_conv))
else:
_mlinfo(" - '{}' found in the primary header."
.format(phot_kwd))
pri_conv = whdu.header[phot_kwd] / ref_phot
_mlinfo(" - Setting {:s} in the primary header to {} "
"(old value was {})"
.format(phot_kwd, ref_phot, whdu.header[phot_kwd]))
_mlinfo(" - Data conversion factor based on primary "
"header: {}".format(pri_conv))
whdu.header[phot_kwd] = ref_phot
# correct the "other" photometric keyword, if present:
if pri_conv is not None and whdu is not ref_hdu:
for aux_kwd in aux_phot_kwd:
if aux_kwd in whdu.header:
old_aux_phot = whdu.header[aux_kwd]
new_aux_phot = old_aux_phot / pri_conv
whdu.header[aux_kwd] = new_aux_phot
_mlinfo(" - Setting {:s} in the primary header "
"to {} (old value was {})"
.format(aux_kwd, new_aux_phot, old_aux_phot))
# process data and error arrays when 'sciext' was specifically set to
# 'PRIMARY':
if sciext == 'PRIMARY' and pri_conv is not None:
has_data = (hasattr(whdu, 'data') and
whdu.data is not None)
# correct data:
if has_data:
if np.issubdtype(whdu.data.dtype, np.floating):
whdu.data *= pri_conv
_mlinfo(" - Data have been multiplied by {}"
.format(pri_conv))
else:
_mlwarn("WARNING: Data not converted because it is of "
"non-floating point type.")
# correct error array:
if errext is not None:
eext = (errext, 1)
try:
whdu = f.image.hdu[eext]
except KeyError:
_mlwarn(" - WARNING: Error extension {:s} not found."
.format(utils.ext2str(eext)))
f.release_all_images()
continue
if hasattr(whdu, 'data') and whdu.data is not None:
if np.issubdtype(whdu.data.dtype, np.floating):
whdu.data *= pri_conv
_mlinfo(" - Error array (ext={}) has been "
"multiplied by {}".format(eext, pri_conv))
else:
_mlinfo(" - Error array in extension {:s} "
"contains non-floating point data.\n"
" Skipping this extension"
.format(utils.ext2str(ext)))
f.release_all_images()
continue
# find all valid extensions:
extnum = utils.get_ext_list(f.image, sciext)
for ext in extnum:
whdu = f.image.hdu[ext]
conv = None
if whdu is ref_hdu:
_mlinfo(" * EXT: {} - This is the \"reference\" extension.\n"
" Nothing to do. Skipping this extension..."
.format(ext))
continue
has_data = (hasattr(whdu, 'data') and
whdu.data is not None)
if has_data and not np.issubdtype(whdu.data.dtype, np.floating):
_mlinfo(" * EXT: {} contains non-floating point data. "
"Skipping this extension".format(ext))
# find all auxiliary photometric keywords present in the header:
paux = [aux_kwd for aux_kwd in aux_phot_kwd if aux_kwd \
in whdu.header]
if phot_kwd in whdu.header:
_mlinfo(" * EXT: {}".format(ext))
old_phot = whdu.header[phot_kwd]
conv = old_phot / ref_phot
_mlinfo(" - Setting {:s} to {} (old value was {})"
.format(phot_kwd, ref_phot, old_phot))
whdu.header[phot_kwd] = ref_phot
_mlinfo(" - Computed conversion factor for data: {}"
.format(conv))
elif pri_conv is None:
_mlinfo(" * EXT: {}".format(ext))
_mlinfo(" - '{:s} not found. Skipping this extension..."
.format(phot_kwd))
continue
else:
_mlinfo(" * EXT: {}".format(ext))
# if paux:
# print("ERROR: Primary photometric keyword ('{:s}') is "
# "missing but\n the secondary keywords ('{:s}') "
# "are present. This extension cannot be processed."
# .format(phot_kwd, ','.join(paux)))
# continue
_mlinfo(" - '{:s} not found. Using conversion factor "
"based\n on the primary header: {}"
.format(phot_kwd, pri_conv))
conv = pri_conv
# correct the "other" photometric keyword, if present:
if conv is not None:
for aux_kwd in paux:
old_aux_phot = whdu.header[aux_kwd]
new_aux_phot = old_aux_phot / conv
whdu.header[aux_kwd] = new_aux_phot
_mlinfo(" - Setting {:s} to {} (old value was {})"
.format(aux_kwd, new_aux_phot, old_aux_phot))
# correct data:
if has_data:
if conv is None:
_mlinfo(" * EXT: {}".format(ext))
if np.issubdtype(whdu.data.dtype, np.floating):
whdu.data *= conv
_mlinfo(" - Data have been multiplied by {}"
.format(conv))
else:
_mlinfo("WARNING: Non-floating point data. Data cannot "
"be re-scaled.")
# correct error array:
if errext is not None and isinstance(ext, tuple) and len(ext) == 2:
eext = (errext, ext[1])
try:
whdu = f.image.hdu[eext]
except KeyError:
continue
if hasattr(whdu, 'data') and whdu.data is not None:
if np.issubdtype(whdu.data.dtype, np.floating):
whdu.data *= conv
_mlinfo(" - Error array (ext={}) has been "
"multiplied by {}".format(eext, conv))
else:
_mlinfo(" - Error array in extension {:s} "
"contains non-floating point data.\n"
" Skipping this extension"
.format(utils.ext2str(ext)))
f.release_all_images()
_mlinfo("\nDone.")
if readonly:
_mlinfo("\nNOTE: '{:s}' was run in READONLY mode\n"
" and input image(s)' content WAS NOT MODIFIED."
.format(__taskname__))
# close all log file handlers:
for h in _log.handlers:
if h is not _sh_log and isinstance(h, logging.FileHandler):
h.close()
_log.removeHandler(h) | def function[photeq, parameter[files, sciext, errext, ref_phot, ref_phot_ext, phot_kwd, aux_phot_kwd, search_primary, readonly, clobber, logfile]]:
constant[
Adjust data values of images by equalizing each chip's PHOTFLAM value
to a single common value so that all chips can be treated equally
by ``AstroDrizzle``.
Parameters
----------
files : str (Default = ``'*_flt.fits'``)
A string containing one of the following:
* a comma-separated list of valid science image file names,
e.g.: ``'j1234567q_flt.fits, j1234568q_flt.fits'``;
* an @-file name, e.g., ``'@files_to_match.txt'``. See notes
section for details on the format of the @-files.
.. note::
**Valid science image file names** are:
* file names of existing FITS, GEIS, or WAIVER FITS files;
* partial file names containing wildcard characters, e.g.,
``'*_flt.fits'``;
* Association (ASN) tables (must have ``_asn``, or ``_asc``
suffix), e.g., ``'j12345670_asn.fits'``.
sciext : str (Default = 'SCI')
Extension *name* of extensions whose data and/or headers should
be corrected.
errext : str (Default = 'ERR')
Extension *name* of the extensions containing corresponding error
arrays. Error arrays are corrected in the same way as science data.
ref_phot : float, None (Default = None)
A number indicating the new value of PHOTFLAM or PHOTFNU
(set by 'phot_kwd') to which the data should be adjusted.
ref_phot_ext : int, str, tuple, None (Default = None)
Extension from which the `photeq` should get the reference photometric
value specified by the `phot_kwd` parameter. This parameter is ignored
if `ref_phot` **is not** `None`. When `ref_phot_ext` is `None`, then
the reference inverse sensitivity value will be picked from the
first `sciext` of the first input image containing `phot_kwd`.
phot_kwd : str (Default = 'PHOTFLAM')
Specifies the primary keyword which contains inverse sensitivity
(e.g., PHOTFLAM). It is used to compute conversion factors by
which data should be rescaled.
aux_phot_kwd : str, None, list of str (Default = 'PHOTFNU')
Same as `phot_kwd` but describes *other* photometric keyword(s)
that should be corrected by inverse of the scale factor used to correct
data. These keywords are *not* used to compute conversion factors.
Multiple keywords can be specified as a Python list of strings:
``['PHOTFNU', 'PHOTOHMY']``.
.. note::
If specifying multiple secondary photometric keywords in the TEAL
interface, use a comma-separated list of keywords.
search_primary : bool (Default = True)
Specifies whether to first search the primary header for the
presence of `phot_kwd` keyword and compute conversion factor based on
that value. This is (partially) ignored when `ref_phot` is not `None` in
the sense that the value specified by `ref_phot` will be used as the
reference *but* in all images primary will be searched for `phot_kwd`
and `aux_phot_kwd` and those values will be corrected
(if ``search_primary=True``).
readonly : bool (Default = True)
If `True`, `photeq` will not modify input files (nevertheless, it will
convert input GEIS or WAVERED FITS files to MEF and could overwrite
existing MEF files if `clobber` is set to `True`).
The (console or log file) output however will be identical to the case
when ``readonly=False`` and it can be examined before applying these
changes to input files.
clobber : bool (Default = False)
Overwrite existing MEF files when converting input WAVERED FITS or GEIS
to MEF.
logfile : str, None (Default = 'photeq.log')
File name of the log file.
Notes
-----
By default, `photeq` will search for the first inverse sensitivity
value (given by the header keyword specified by the `phot_kwd` parameter,
e.g., PHOTFLAM or PHOTFNU) found in the input images and it will equalize
all other images to this reference value.
It is possible to tell `photeq` to look for the reference inverse
sensitivity value only in a specific extension of input images, e.g.: 3,
('sci',3), etc. This can be done by setting `ref_phot_ext` to a specific
extension. This may be useful, for example, for WFPC2 images: WF3 chip was
one of the better calibrated chips, and so, if one prefers to have
inverse sensitivities equalized to the inverse sensitivity of the WF3 chip,
one can set ``ref_phot_ext=3``.
Alternatively, one can provide their own reference inverse sensitivity
value to which all other images should be "equalized" through the
parameter `ref_phot`.
.. note::
Default parameter values (except for `files`, `readonly`, and `clobber`)
should be acceptable for most HST images.
.. warning::
If images are intended to be used with ``AstroDrizzle``, it is
recommended that sky background measurement be performed on "equalized"
images as the `photeq` is not aware of sky user keyword in the image
headers and thus it cannot correct sky values already recorded in the
headers.
Examples
--------
#. In most cases the default parameters should suffice:
>>> from drizzlepac import photeq
>>> photeq.photeq(files='*_flt.fits', readonly=False)
#. If the re-calibration needs to be done on PHOTFNU rather than
PHOTFLAM, then:
>>> photeq.photeq(files='*_flt.fits', ref_phot='PHOTFNU',
... aux_phot_kwd='PHOTFLAM')
#. If for WFPC2 data one desires that PHOTFLAM from WF3 be used as the
reference in WFPC2 images, then:
>>> photeq.photeq(files='*_flt.fits', ref_phot_ext=3) # or ('sci',3)
]
variable[runtime_begin] assign[=] call[name[datetime].now, parameter[]]
if <ast.UnaryOp object at 0x7da1b1a85a50> begin[:]
<ast.Raise object at 0x7da1b1a85960>
if call[name[isinstance], parameter[name[logfile], name[str]]] begin[:]
for taget[name[h]] in starred[name[_log].handlers] begin[:]
if <ast.BoolOp object at 0x7da1b1a85600> begin[:]
call[name[h].close, parameter[]]
call[name[_log].removeHandler, parameter[name[h]]]
variable[log_formatter] assign[=] call[name[logging].Formatter, parameter[constant[[%(levelname)s:] %(message)s]]]
variable[log_file_handler] assign[=] call[name[logging].FileHandler, parameter[name[logfile]]]
call[name[log_file_handler].setFormatter, parameter[name[log_formatter]]]
call[name[_log].addHandler, parameter[name[log_file_handler]]]
call[name[_mlinfo], parameter[call[constant[***** {0} started on {1}].format, parameter[name[__taskname__], name[runtime_begin]]]]]
call[name[_mlinfo], parameter[call[constant[ Version {0} ({1})].format, parameter[name[__version__], name[__version_date__]]]]]
if compare[name[sciext] is constant[None]] begin[:]
variable[sci_ext4parse] assign[=] constant[*]
variable[ext2get] assign[=] constant[None]
if <ast.BoolOp object at 0x7da1b1b09120> begin[:]
<ast.Raise object at 0x7da1b1b08f70>
if <ast.UnaryOp object at 0x7da1b1b08e80> begin[:]
<ast.Raise object at 0x7da1b1b08d90>
variable[phot_kwd] assign[=] call[call[name[phot_kwd].strip, parameter[]].upper, parameter[]]
if <ast.BoolOp object at 0x7da1b1b08b50> begin[:]
<ast.Raise object at 0x7da1b1b08490>
if call[name[isinstance], parameter[name[ref_phot_ext], name[str]]] begin[:]
variable[ref_phot_ext] assign[=] tuple[[<ast.Name object at 0x7da1b1b08250>, <ast.Constant object at 0x7da1b1b08220>]]
if compare[name[aux_phot_kwd] is constant[None]] begin[:]
variable[aux_phot_kwd] assign[=] list[[]]
variable[fl] assign[=] call[name[parseat].parse_cs_line, parameter[]]
for taget[name[f]] in starred[name[fl]] begin[:]
if <ast.BoolOp object at 0x7da1b1b7cb80> begin[:]
call[name[_mlwarn], parameter[call[constant[WARNING: Extension specifications for file {:s} will be ignored. Using all {:s} extensions instead.].format, parameter[name[f].image, <ast.IfExp object at 0x7da1b1b7f040>]]]]
variable[flc] assign[=] call[name[fl]][<ast.Slice object at 0x7da1b1b7c850>]
variable[ref_hdu] assign[=] constant[None]
variable[ref_ext] assign[=] constant[None]
variable[ref_user] assign[=] constant[True]
if compare[name[ref_phot] is constant[None]] begin[:]
variable[ref_user] assign[=] constant[False]
for taget[name[f]] in starred[name[flc]] begin[:]
call[name[f].convert2ImageRef, parameter[]]
variable[pri_hdu] assign[=] call[name[f].image.hdu][constant[0]]
if compare[name[ref_phot_ext] is constant[None]] begin[:]
if compare[name[sciext] equal[==] constant[PRIMARY]] begin[:]
variable[extnum] assign[=] list[[<ast.Constant object at 0x7da1b1b7fb50>]]
variable[is_pri_hdu] assign[=] <ast.ListComp object at 0x7da1b1b7f940>
if name[search_primary] begin[:]
<ast.Try object at 0x7da1b1b7de70>
for taget[name[ext]] in starred[name[extnum]] begin[:]
variable[hdu] assign[=] call[name[f].image.hdu][name[ext]]
if compare[name[phot_kwd] in name[hdu].header] begin[:]
variable[ref_phot] assign[=] call[name[hdu].header][name[phot_kwd]]
variable[ref_ext] assign[=] name[ext]
variable[ref_hdu] assign[=] name[hdu]
break
if compare[name[ref_phot] is constant[None]] begin[:]
call[name[_mlwarn], parameter[call[constant[WARNING: Could not find specified inverse sensitivity keyword '{:s}'
in any of the {} extensions of file '{}'.
This input file will be ignored.].format, parameter[name[phot_kwd], <ast.IfExp object at 0x7da1b1b7f8e0>, call[name[os].path.basename, parameter[name[f].image.original_fname]]]]]]
call[name[f].release_all_images, parameter[]]
call[name[fl].remove, parameter[name[f]]]
if compare[name[ref_phot] is constant[None]] begin[:]
<ast.Raise object at 0x7da1b1a7d870>
variable[aux_phot_kwd_list] assign[=] call[constant[,].join, parameter[name[aux_phot_kwd]]]
call[name[_mlinfo], parameter[call[constant[
PRIMARY PHOTOMETRIC KEYWORD: {:s}].format, parameter[name[phot_kwd]]]]]
call[name[_mlinfo], parameter[call[constant[SECONDARY PHOTOMETRIC KEYWORD(S): {:s}].format, parameter[<ast.IfExp object at 0x7da1b1a7c430>]]]]
if name[ref_user] begin[:]
call[name[_mlinfo], parameter[call[constant[REFERENCE VALUE PROVIDED BY USER: '{:s}'={}
].format, parameter[name[phot_kwd], name[ref_phot]]]]]
for taget[name[f]] in starred[name[fl]] begin[:]
if name[f].fnamesOnly begin[:]
call[name[_mlinfo], parameter[call[constant[
Processing file '{:s}'].format, parameter[name[f].image]]]]
call[name[f].convert2ImageRef, parameter[]]
variable[pri_conv] assign[=] constant[None]
if name[search_primary] begin[:]
variable[whdu] assign[=] call[name[f].image.hdu][constant[0]]
if compare[name[phot_kwd] in name[whdu].header] begin[:]
call[name[_mlinfo], parameter[constant[ * Primary header:]]]
if compare[name[whdu] is name[ref_hdu]] begin[:]
variable[pri_conv] assign[=] constant[1.0]
call[name[_mlinfo], parameter[call[constant[ - '{}' = {} found in the primary header.].format, parameter[name[phot_kwd], call[name[whdu].header][name[phot_kwd]]]]]]
call[name[_mlinfo], parameter[call[constant[ - Data conversion factor based on primary header: {}].format, parameter[name[pri_conv]]]]]
if <ast.BoolOp object at 0x7da1b1a7eef0> begin[:]
for taget[name[aux_kwd]] in starred[name[aux_phot_kwd]] begin[:]
if compare[name[aux_kwd] in name[whdu].header] begin[:]
variable[old_aux_phot] assign[=] call[name[whdu].header][name[aux_kwd]]
variable[new_aux_phot] assign[=] binary_operation[name[old_aux_phot] / name[pri_conv]]
call[name[whdu].header][name[aux_kwd]] assign[=] name[new_aux_phot]
call[name[_mlinfo], parameter[call[constant[ - Setting {:s} in the primary header to {} (old value was {})].format, parameter[name[aux_kwd], name[new_aux_phot], name[old_aux_phot]]]]]
if <ast.BoolOp object at 0x7da1b1a7cb80> begin[:]
variable[has_data] assign[=] <ast.BoolOp object at 0x7da1b1a7e470>
if name[has_data] begin[:]
if call[name[np].issubdtype, parameter[name[whdu].data.dtype, name[np].floating]] begin[:]
<ast.AugAssign object at 0x7da1b1a7c4c0>
call[name[_mlinfo], parameter[call[constant[ - Data have been multiplied by {}].format, parameter[name[pri_conv]]]]]
if compare[name[errext] is_not constant[None]] begin[:]
variable[eext] assign[=] tuple[[<ast.Name object at 0x7da1b1a7df00>, <ast.Constant object at 0x7da1b1a7e110>]]
<ast.Try object at 0x7da1b1a7d540>
if <ast.BoolOp object at 0x7da1b1b84490> begin[:]
if call[name[np].issubdtype, parameter[name[whdu].data.dtype, name[np].floating]] begin[:]
<ast.AugAssign object at 0x7da1b1b844f0>
call[name[_mlinfo], parameter[call[constant[ - Error array (ext={}) has been multiplied by {}].format, parameter[name[eext], name[pri_conv]]]]]
call[name[f].release_all_images, parameter[]]
continue
variable[extnum] assign[=] call[name[utils].get_ext_list, parameter[name[f].image, name[sciext]]]
for taget[name[ext]] in starred[name[extnum]] begin[:]
variable[whdu] assign[=] call[name[f].image.hdu][name[ext]]
variable[conv] assign[=] constant[None]
if compare[name[whdu] is name[ref_hdu]] begin[:]
call[name[_mlinfo], parameter[call[constant[ * EXT: {} - This is the "reference" extension.
Nothing to do. Skipping this extension...].format, parameter[name[ext]]]]]
continue
variable[has_data] assign[=] <ast.BoolOp object at 0x7da1b1b86620>
if <ast.BoolOp object at 0x7da1b1b86290> begin[:]
call[name[_mlinfo], parameter[call[constant[ * EXT: {} contains non-floating point data. Skipping this extension].format, parameter[name[ext]]]]]
variable[paux] assign[=] <ast.ListComp object at 0x7da1b1b85f60>
if compare[name[phot_kwd] in name[whdu].header] begin[:]
call[name[_mlinfo], parameter[call[constant[ * EXT: {}].format, parameter[name[ext]]]]]
variable[old_phot] assign[=] call[name[whdu].header][name[phot_kwd]]
variable[conv] assign[=] binary_operation[name[old_phot] / name[ref_phot]]
call[name[_mlinfo], parameter[call[constant[ - Setting {:s} to {} (old value was {})].format, parameter[name[phot_kwd], name[ref_phot], name[old_phot]]]]]
call[name[whdu].header][name[phot_kwd]] assign[=] name[ref_phot]
call[name[_mlinfo], parameter[call[constant[ - Computed conversion factor for data: {}].format, parameter[name[conv]]]]]
if compare[name[conv] is_not constant[None]] begin[:]
for taget[name[aux_kwd]] in starred[name[paux]] begin[:]
variable[old_aux_phot] assign[=] call[name[whdu].header][name[aux_kwd]]
variable[new_aux_phot] assign[=] binary_operation[name[old_aux_phot] / name[conv]]
call[name[whdu].header][name[aux_kwd]] assign[=] name[new_aux_phot]
call[name[_mlinfo], parameter[call[constant[ - Setting {:s} to {} (old value was {})].format, parameter[name[aux_kwd], name[new_aux_phot], name[old_aux_phot]]]]]
if name[has_data] begin[:]
if compare[name[conv] is constant[None]] begin[:]
call[name[_mlinfo], parameter[call[constant[ * EXT: {}].format, parameter[name[ext]]]]]
if call[name[np].issubdtype, parameter[name[whdu].data.dtype, name[np].floating]] begin[:]
<ast.AugAssign object at 0x7da1b1c21780>
call[name[_mlinfo], parameter[call[constant[ - Data have been multiplied by {}].format, parameter[name[conv]]]]]
if <ast.BoolOp object at 0x7da1b1c22260> begin[:]
variable[eext] assign[=] tuple[[<ast.Name object at 0x7da1b1c217e0>, <ast.Subscript object at 0x7da1b1c23d60>]]
<ast.Try object at 0x7da1b1be7e80>
if <ast.BoolOp object at 0x7da1b1be7160> begin[:]
if call[name[np].issubdtype, parameter[name[whdu].data.dtype, name[np].floating]] begin[:]
<ast.AugAssign object at 0x7da1b1be7ee0>
call[name[_mlinfo], parameter[call[constant[ - Error array (ext={}) has been multiplied by {}].format, parameter[name[eext], name[conv]]]]]
call[name[f].release_all_images, parameter[]]
call[name[_mlinfo], parameter[constant[
Done.]]]
if name[readonly] begin[:]
call[name[_mlinfo], parameter[call[constant[
NOTE: '{:s}' was run in READONLY mode
and input image(s)' content WAS NOT MODIFIED.].format, parameter[name[__taskname__]]]]]
for taget[name[h]] in starred[name[_log].handlers] begin[:]
if <ast.BoolOp object at 0x7da1b1be5f30> begin[:]
call[name[h].close, parameter[]]
call[name[_log].removeHandler, parameter[name[h]]] | keyword[def] identifier[photeq] ( identifier[files] = literal[string] , identifier[sciext] = literal[string] , identifier[errext] = literal[string] ,
identifier[ref_phot] = keyword[None] , identifier[ref_phot_ext] = keyword[None] ,
identifier[phot_kwd] = literal[string] , identifier[aux_phot_kwd] = literal[string] ,
identifier[search_primary] = keyword[True] ,
identifier[readonly] = keyword[True] , identifier[clobber] = keyword[False] , identifier[logfile] = literal[string] ):
literal[string]
identifier[runtime_begin] = identifier[datetime] . identifier[now] ()
keyword[if] keyword[not] identifier[isinstance] ( identifier[files] , identifier[str] ):
keyword[raise] identifier[TypeError] ( literal[string]
literal[string] )
keyword[if] identifier[isinstance] ( identifier[logfile] , identifier[str] ):
keyword[for] identifier[h] keyword[in] identifier[_log] . identifier[handlers] :
keyword[if] identifier[h] keyword[is] keyword[not] identifier[_sh_log] keyword[and] identifier[isinstance] ( identifier[h] , identifier[logging] . identifier[FileHandler] ):
identifier[h] . identifier[close] ()
identifier[_log] . identifier[removeHandler] ( identifier[h] )
identifier[log_formatter] = identifier[logging] . identifier[Formatter] ( literal[string] )
identifier[log_file_handler] = identifier[logging] . identifier[FileHandler] ( identifier[logfile] )
identifier[log_file_handler] . identifier[setFormatter] ( identifier[log_formatter] )
identifier[_log] . identifier[addHandler] ( identifier[log_file_handler] )
keyword[elif] identifier[logfile] keyword[is] keyword[not] keyword[None] :
keyword[raise] identifier[TypeError] ( literal[string] )
identifier[_mlinfo] ( literal[string] . identifier[format] ( identifier[__taskname__] , identifier[runtime_begin] ))
identifier[_mlinfo] ( literal[string] . identifier[format] ( identifier[__version__] , identifier[__version_date__] ))
keyword[if] identifier[sciext] keyword[is] keyword[None] :
identifier[sci_ext4parse] = literal[string]
identifier[ext2get] = keyword[None]
keyword[else] :
keyword[if] keyword[not] identifier[isinstance] ( identifier[sciext] , identifier[str] ):
keyword[raise] identifier[TypeError] ( literal[string] )
identifier[sciext] = identifier[sciext] . identifier[strip] ()
keyword[if] identifier[sciext] . identifier[upper] ()== literal[string] :
identifier[sciext] = identifier[sciext] . identifier[upper] ()
identifier[ext2get] =( identifier[sciext] , literal[int] )
keyword[else] :
identifier[ext2get] =( identifier[sciext] , literal[string] )
identifier[sci_ext4parse] = identifier[ext2get]
keyword[if] identifier[errext] keyword[is] keyword[not] keyword[None] keyword[and] keyword[not] identifier[isinstance] ( identifier[errext] , identifier[str] ):
keyword[raise] identifier[TypeError] ( literal[string] )
keyword[if] keyword[not] identifier[isinstance] ( identifier[phot_kwd] , identifier[str] ):
keyword[raise] identifier[TypeError] ( literal[string] )
identifier[phot_kwd] = identifier[phot_kwd] . identifier[strip] (). identifier[upper] ()
keyword[if] identifier[ref_phot_ext] keyword[is] keyword[not] keyword[None] keyword[and] keyword[not] ( identifier[isinstance] ( identifier[ref_phot_ext] , identifier[int] ) keyword[or] identifier[isinstance] ( identifier[ref_phot_ext] , identifier[str] ) keyword[or] ( identifier[isinstance] ( identifier[ref_phot_ext] , identifier[tuple] ) keyword[and] identifier[len] ( identifier[ref_phot_ext] )== literal[int] keyword[and] identifier[isinstance] ( identifier[ref_phot_ext] [ literal[int] ], identifier[str] ) keyword[and] identifier[isinstance] ( identifier[ref_phot_ext] [ literal[int] ], identifier[int] ))):
keyword[raise] identifier[TypeError] ( literal[string] )
keyword[if] identifier[isinstance] ( identifier[ref_phot_ext] , identifier[str] ):
identifier[ref_phot_ext] =( identifier[ref_phot_ext] , literal[int] )
keyword[if] identifier[aux_phot_kwd] keyword[is] keyword[None] :
identifier[aux_phot_kwd] =[]
keyword[elif] identifier[isinstance] ( identifier[aux_phot_kwd] , identifier[str] ):
identifier[aux_phot_kwd] =[ identifier[aux_phot_kwd] . identifier[strip] (). identifier[upper] ()]
keyword[if] identifier[phot_kwd] == identifier[aux_phot_kwd] :
keyword[raise] identifier[ValueError] ( literal[string]
literal[string] )
keyword[elif] identifier[hasattr] ( identifier[aux_phot_kwd] , literal[string] ):
keyword[if] keyword[not] identifier[all] ([ identifier[isinstance] ( identifier[phot] , identifier[str] ) keyword[for] identifier[phot] keyword[in] identifier[aux_phot_kwd] ]):
keyword[raise] identifier[TypeError] ( literal[string]
literal[string] )
identifier[aux_phot_kwd] =[ identifier[phot] . identifier[strip] (). identifier[upper] () keyword[for] identifier[phot] keyword[in] identifier[aux_phot_kwd] ]
keyword[if] identifier[ref_phot] keyword[in] identifier[aux_phot_kwd] :
keyword[raise] identifier[ValueError] ( literal[string]
literal[string]
literal[string] )
keyword[else] :
keyword[raise] identifier[TypeError] ( literal[string]
literal[string] )
identifier[fl] = identifier[parseat] . identifier[parse_cs_line] ( identifier[csline] = identifier[files] , identifier[default_ext] = identifier[sci_ext4parse] ,
identifier[im_fmode] = literal[string] keyword[if] identifier[readonly] keyword[else] literal[string] ,
identifier[clobber] = identifier[clobber] , identifier[fnamesOnly] = keyword[True] ,
identifier[doNotOpenDQ] = keyword[True] )
keyword[for] identifier[f] keyword[in] identifier[fl] :
keyword[if] identifier[f] . identifier[count] > literal[int] keyword[or] identifier[f] . identifier[fext] [ literal[int] ]!= identifier[sci_ext4parse] :
identifier[_mlwarn] ( literal[string]
literal[string]
. identifier[format] ( identifier[f] . identifier[image] , literal[string] keyword[if] identifier[sciext] keyword[is] keyword[None] keyword[else] literal[string] . identifier[format] ( identifier[utils] . identifier[ext2str] ( identifier[sciext] ,
identifier[default_extver] = keyword[None] ))))
identifier[flc] = identifier[fl] [:]
identifier[ref_hdu] = keyword[None]
identifier[ref_ext] = keyword[None]
identifier[ref_user] = keyword[True]
keyword[if] identifier[ref_phot] keyword[is] keyword[None] :
identifier[ref_user] = keyword[False]
keyword[for] identifier[f] keyword[in] identifier[flc] :
identifier[f] . identifier[convert2ImageRef] ()
identifier[pri_hdu] = identifier[f] . identifier[image] . identifier[hdu] [ literal[int] ]
keyword[if] identifier[ref_phot_ext] keyword[is] keyword[None] :
keyword[if] identifier[sciext] == literal[string] :
identifier[extnum] =[ literal[int] ]
keyword[else] :
identifier[extnum] = identifier[utils] . identifier[get_ext_list] ( identifier[f] . identifier[image] , identifier[sciext] )
identifier[is_pri_hdu] =[ identifier[f] . identifier[image] . identifier[hdu] [ identifier[ext] ] keyword[is] identifier[pri_hdu] keyword[for] identifier[ext] keyword[in] identifier[extnum] ]
keyword[if] identifier[search_primary] :
keyword[try] :
identifier[pri_index] = identifier[is_pri_hdu] . identifier[index] ( keyword[True] )
identifier[extnum] . identifier[insert] ( literal[int] , identifier[extnum] . identifier[pop] ( identifier[pri_index] ))
keyword[except] identifier[ValueError] :
identifier[extnum] . identifier[insert] ( literal[int] , literal[int] )
keyword[else] :
identifier[extnum] =[ identifier[ref_phot_ext] ]
keyword[for] identifier[ext] keyword[in] identifier[extnum] :
identifier[hdu] = identifier[f] . identifier[image] . identifier[hdu] [ identifier[ext] ]
keyword[if] identifier[phot_kwd] keyword[in] identifier[hdu] . identifier[header] :
identifier[ref_phot] = identifier[hdu] . identifier[header] [ identifier[phot_kwd] ]
identifier[ref_ext] = identifier[ext]
identifier[ref_hdu] = identifier[hdu]
keyword[break]
keyword[if] identifier[ref_phot] keyword[is] keyword[None] :
identifier[_mlwarn] ( literal[string]
literal[string]
literal[string]
literal[string]
. identifier[format] ( identifier[phot_kwd] , literal[string] keyword[if] identifier[sciext] keyword[is] keyword[None] keyword[else] literal[string] . identifier[format] ( identifier[utils] . identifier[ext2str] ( identifier[sciext] ,
identifier[default_extver] = keyword[None] )),
identifier[os] . identifier[path] . identifier[basename] ( identifier[f] . identifier[image] . identifier[original_fname] )))
identifier[f] . identifier[release_all_images] ()
identifier[fl] . identifier[remove] ( identifier[f] )
keyword[else] :
keyword[break]
keyword[if] identifier[ref_phot] keyword[is] keyword[None] :
keyword[raise] identifier[RuntimeError] ( literal[string]
literal[string]
literal[string]
. identifier[format] ( identifier[phot_kwd] ))
identifier[aux_phot_kwd_list] = literal[string] . identifier[join] ( identifier[aux_phot_kwd] )
identifier[_mlinfo] ( literal[string] . identifier[format] ( identifier[phot_kwd] ))
identifier[_mlinfo] ( literal[string]
. identifier[format] ( identifier[aux_phot_kwd_list] keyword[if] identifier[aux_phot_kwd_list] keyword[else] literal[string] ))
keyword[if] identifier[ref_user] :
identifier[_mlinfo] ( literal[string]
. identifier[format] ( identifier[phot_kwd] , identifier[ref_phot] ))
keyword[else] :
identifier[_mlinfo] ( literal[string]
. identifier[format] ( identifier[os] . identifier[path] . identifier[basename] ( identifier[f] . identifier[image] . identifier[original_fname] ),
identifier[utils] . identifier[ext2str] ( identifier[ref_ext] )))
identifier[_mlinfo] ( literal[string] . identifier[format] ( identifier[phot_kwd] , identifier[ref_phot] ))
keyword[for] identifier[f] keyword[in] identifier[fl] :
keyword[if] identifier[f] . identifier[fnamesOnly] :
identifier[_mlinfo] ( literal[string] . identifier[format] ( identifier[f] . identifier[image] ))
identifier[f] . identifier[convert2ImageRef] ()
keyword[else] :
identifier[_mlinfo] ( literal[string] . identifier[format] ( identifier[f] . identifier[image] . identifier[original_fname] ))
identifier[pri_conv] = keyword[None]
keyword[if] identifier[search_primary] :
identifier[whdu] = identifier[f] . identifier[image] . identifier[hdu] [ literal[int] ]
keyword[if] identifier[phot_kwd] keyword[in] identifier[whdu] . identifier[header] :
identifier[_mlinfo] ( literal[string] )
keyword[if] identifier[whdu] keyword[is] identifier[ref_hdu] :
identifier[pri_conv] = literal[int]
identifier[_mlinfo] ( literal[string]
. identifier[format] ( identifier[phot_kwd] , identifier[whdu] . identifier[header] [ identifier[phot_kwd] ]))
identifier[_mlinfo] ( literal[string]
literal[string] . identifier[format] ( identifier[pri_conv] ))
keyword[else] :
identifier[_mlinfo] ( literal[string]
. identifier[format] ( identifier[phot_kwd] ))
identifier[pri_conv] = identifier[whdu] . identifier[header] [ identifier[phot_kwd] ]/ identifier[ref_phot]
identifier[_mlinfo] ( literal[string]
literal[string]
. identifier[format] ( identifier[phot_kwd] , identifier[ref_phot] , identifier[whdu] . identifier[header] [ identifier[phot_kwd] ]))
identifier[_mlinfo] ( literal[string]
literal[string] . identifier[format] ( identifier[pri_conv] ))
identifier[whdu] . identifier[header] [ identifier[phot_kwd] ]= identifier[ref_phot]
keyword[if] identifier[pri_conv] keyword[is] keyword[not] keyword[None] keyword[and] identifier[whdu] keyword[is] keyword[not] identifier[ref_hdu] :
keyword[for] identifier[aux_kwd] keyword[in] identifier[aux_phot_kwd] :
keyword[if] identifier[aux_kwd] keyword[in] identifier[whdu] . identifier[header] :
identifier[old_aux_phot] = identifier[whdu] . identifier[header] [ identifier[aux_kwd] ]
identifier[new_aux_phot] = identifier[old_aux_phot] / identifier[pri_conv]
identifier[whdu] . identifier[header] [ identifier[aux_kwd] ]= identifier[new_aux_phot]
identifier[_mlinfo] ( literal[string]
literal[string]
. identifier[format] ( identifier[aux_kwd] , identifier[new_aux_phot] , identifier[old_aux_phot] ))
keyword[if] identifier[sciext] == literal[string] keyword[and] identifier[pri_conv] keyword[is] keyword[not] keyword[None] :
identifier[has_data] =( identifier[hasattr] ( identifier[whdu] , literal[string] ) keyword[and]
identifier[whdu] . identifier[data] keyword[is] keyword[not] keyword[None] )
keyword[if] identifier[has_data] :
keyword[if] identifier[np] . identifier[issubdtype] ( identifier[whdu] . identifier[data] . identifier[dtype] , identifier[np] . identifier[floating] ):
identifier[whdu] . identifier[data] *= identifier[pri_conv]
identifier[_mlinfo] ( literal[string]
. identifier[format] ( identifier[pri_conv] ))
keyword[else] :
identifier[_mlwarn] ( literal[string]
literal[string] )
keyword[if] identifier[errext] keyword[is] keyword[not] keyword[None] :
identifier[eext] =( identifier[errext] , literal[int] )
keyword[try] :
identifier[whdu] = identifier[f] . identifier[image] . identifier[hdu] [ identifier[eext] ]
keyword[except] identifier[KeyError] :
identifier[_mlwarn] ( literal[string]
. identifier[format] ( identifier[utils] . identifier[ext2str] ( identifier[eext] )))
identifier[f] . identifier[release_all_images] ()
keyword[continue]
keyword[if] identifier[hasattr] ( identifier[whdu] , literal[string] ) keyword[and] identifier[whdu] . identifier[data] keyword[is] keyword[not] keyword[None] :
keyword[if] identifier[np] . identifier[issubdtype] ( identifier[whdu] . identifier[data] . identifier[dtype] , identifier[np] . identifier[floating] ):
identifier[whdu] . identifier[data] *= identifier[pri_conv]
identifier[_mlinfo] ( literal[string]
literal[string] . identifier[format] ( identifier[eext] , identifier[pri_conv] ))
keyword[else] :
identifier[_mlinfo] ( literal[string]
literal[string]
literal[string]
. identifier[format] ( identifier[utils] . identifier[ext2str] ( identifier[ext] )))
identifier[f] . identifier[release_all_images] ()
keyword[continue]
identifier[extnum] = identifier[utils] . identifier[get_ext_list] ( identifier[f] . identifier[image] , identifier[sciext] )
keyword[for] identifier[ext] keyword[in] identifier[extnum] :
identifier[whdu] = identifier[f] . identifier[image] . identifier[hdu] [ identifier[ext] ]
identifier[conv] = keyword[None]
keyword[if] identifier[whdu] keyword[is] identifier[ref_hdu] :
identifier[_mlinfo] ( literal[string]
literal[string]
. identifier[format] ( identifier[ext] ))
keyword[continue]
identifier[has_data] =( identifier[hasattr] ( identifier[whdu] , literal[string] ) keyword[and]
identifier[whdu] . identifier[data] keyword[is] keyword[not] keyword[None] )
keyword[if] identifier[has_data] keyword[and] keyword[not] identifier[np] . identifier[issubdtype] ( identifier[whdu] . identifier[data] . identifier[dtype] , identifier[np] . identifier[floating] ):
identifier[_mlinfo] ( literal[string]
literal[string] . identifier[format] ( identifier[ext] ))
identifier[paux] =[ identifier[aux_kwd] keyword[for] identifier[aux_kwd] keyword[in] identifier[aux_phot_kwd] keyword[if] identifier[aux_kwd] keyword[in] identifier[whdu] . identifier[header] ]
keyword[if] identifier[phot_kwd] keyword[in] identifier[whdu] . identifier[header] :
identifier[_mlinfo] ( literal[string] . identifier[format] ( identifier[ext] ))
identifier[old_phot] = identifier[whdu] . identifier[header] [ identifier[phot_kwd] ]
identifier[conv] = identifier[old_phot] / identifier[ref_phot]
identifier[_mlinfo] ( literal[string]
. identifier[format] ( identifier[phot_kwd] , identifier[ref_phot] , identifier[old_phot] ))
identifier[whdu] . identifier[header] [ identifier[phot_kwd] ]= identifier[ref_phot]
identifier[_mlinfo] ( literal[string]
. identifier[format] ( identifier[conv] ))
keyword[elif] identifier[pri_conv] keyword[is] keyword[None] :
identifier[_mlinfo] ( literal[string] . identifier[format] ( identifier[ext] ))
identifier[_mlinfo] ( literal[string]
. identifier[format] ( identifier[phot_kwd] ))
keyword[continue]
keyword[else] :
identifier[_mlinfo] ( literal[string] . identifier[format] ( identifier[ext] ))
identifier[_mlinfo] ( literal[string]
literal[string]
. identifier[format] ( identifier[phot_kwd] , identifier[pri_conv] ))
identifier[conv] = identifier[pri_conv]
keyword[if] identifier[conv] keyword[is] keyword[not] keyword[None] :
keyword[for] identifier[aux_kwd] keyword[in] identifier[paux] :
identifier[old_aux_phot] = identifier[whdu] . identifier[header] [ identifier[aux_kwd] ]
identifier[new_aux_phot] = identifier[old_aux_phot] / identifier[conv]
identifier[whdu] . identifier[header] [ identifier[aux_kwd] ]= identifier[new_aux_phot]
identifier[_mlinfo] ( literal[string]
. identifier[format] ( identifier[aux_kwd] , identifier[new_aux_phot] , identifier[old_aux_phot] ))
keyword[if] identifier[has_data] :
keyword[if] identifier[conv] keyword[is] keyword[None] :
identifier[_mlinfo] ( literal[string] . identifier[format] ( identifier[ext] ))
keyword[if] identifier[np] . identifier[issubdtype] ( identifier[whdu] . identifier[data] . identifier[dtype] , identifier[np] . identifier[floating] ):
identifier[whdu] . identifier[data] *= identifier[conv]
identifier[_mlinfo] ( literal[string]
. identifier[format] ( identifier[conv] ))
keyword[else] :
identifier[_mlinfo] ( literal[string]
literal[string] )
keyword[if] identifier[errext] keyword[is] keyword[not] keyword[None] keyword[and] identifier[isinstance] ( identifier[ext] , identifier[tuple] ) keyword[and] identifier[len] ( identifier[ext] )== literal[int] :
identifier[eext] =( identifier[errext] , identifier[ext] [ literal[int] ])
keyword[try] :
identifier[whdu] = identifier[f] . identifier[image] . identifier[hdu] [ identifier[eext] ]
keyword[except] identifier[KeyError] :
keyword[continue]
keyword[if] identifier[hasattr] ( identifier[whdu] , literal[string] ) keyword[and] identifier[whdu] . identifier[data] keyword[is] keyword[not] keyword[None] :
keyword[if] identifier[np] . identifier[issubdtype] ( identifier[whdu] . identifier[data] . identifier[dtype] , identifier[np] . identifier[floating] ):
identifier[whdu] . identifier[data] *= identifier[conv]
identifier[_mlinfo] ( literal[string]
literal[string] . identifier[format] ( identifier[eext] , identifier[conv] ))
keyword[else] :
identifier[_mlinfo] ( literal[string]
literal[string]
literal[string]
. identifier[format] ( identifier[utils] . identifier[ext2str] ( identifier[ext] )))
identifier[f] . identifier[release_all_images] ()
identifier[_mlinfo] ( literal[string] )
keyword[if] identifier[readonly] :
identifier[_mlinfo] ( literal[string]
literal[string]
. identifier[format] ( identifier[__taskname__] ))
keyword[for] identifier[h] keyword[in] identifier[_log] . identifier[handlers] :
keyword[if] identifier[h] keyword[is] keyword[not] identifier[_sh_log] keyword[and] identifier[isinstance] ( identifier[h] , identifier[logging] . identifier[FileHandler] ):
identifier[h] . identifier[close] ()
identifier[_log] . identifier[removeHandler] ( identifier[h] ) | def photeq(files='*_flt.fits', sciext='SCI', errext='ERR', ref_phot=None, ref_phot_ext=None, phot_kwd='PHOTFLAM', aux_phot_kwd='PHOTFNU', search_primary=True, readonly=True, clobber=False, logfile='photeq.log'):
"""
Adjust data values of images by equalizing each chip's PHOTFLAM value
to a single common value so that all chips can be treated equally
by ``AstroDrizzle``.
Parameters
----------
files : str (Default = ``'*_flt.fits'``)
A string containing one of the following:
* a comma-separated list of valid science image file names,
e.g.: ``'j1234567q_flt.fits, j1234568q_flt.fits'``;
* an @-file name, e.g., ``'@files_to_match.txt'``. See notes
section for details on the format of the @-files.
.. note::
**Valid science image file names** are:
* file names of existing FITS, GEIS, or WAIVER FITS files;
* partial file names containing wildcard characters, e.g.,
``'*_flt.fits'``;
* Association (ASN) tables (must have ``_asn``, or ``_asc``
suffix), e.g., ``'j12345670_asn.fits'``.
sciext : str (Default = 'SCI')
Extension *name* of extensions whose data and/or headers should
be corrected.
errext : str (Default = 'ERR')
Extension *name* of the extensions containing corresponding error
arrays. Error arrays are corrected in the same way as science data.
ref_phot : float, None (Default = None)
A number indicating the new value of PHOTFLAM or PHOTFNU
(set by 'phot_kwd') to which the data should be adjusted.
ref_phot_ext : int, str, tuple, None (Default = None)
Extension from which the `photeq` should get the reference photometric
value specified by the `phot_kwd` parameter. This parameter is ignored
if `ref_phot` **is not** `None`. When `ref_phot_ext` is `None`, then
the reference inverse sensitivity value will be picked from the
first `sciext` of the first input image containing `phot_kwd`.
phot_kwd : str (Default = 'PHOTFLAM')
Specifies the primary keyword which contains inverse sensitivity
(e.g., PHOTFLAM). It is used to compute conversion factors by
which data should be rescaled.
aux_phot_kwd : str, None, list of str (Default = 'PHOTFNU')
Same as `phot_kwd` but describes *other* photometric keyword(s)
that should be corrected by inverse of the scale factor used to correct
data. These keywords are *not* used to compute conversion factors.
Multiple keywords can be specified as a Python list of strings:
``['PHOTFNU', 'PHOTOHMY']``.
.. note::
If specifying multiple secondary photometric keywords in the TEAL
interface, use a comma-separated list of keywords.
search_primary : bool (Default = True)
Specifies whether to first search the primary header for the
presence of `phot_kwd` keyword and compute conversion factor based on
that value. This is (partially) ignored when `ref_phot` is not `None` in
the sense that the value specified by `ref_phot` will be used as the
reference *but* in all images primary will be searched for `phot_kwd`
and `aux_phot_kwd` and those values will be corrected
(if ``search_primary=True``).
readonly : bool (Default = True)
If `True`, `photeq` will not modify input files (nevertheless, it will
convert input GEIS or WAVERED FITS files to MEF and could overwrite
existing MEF files if `clobber` is set to `True`).
The (console or log file) output however will be identical to the case
when ``readonly=False`` and it can be examined before applying these
changes to input files.
clobber : bool (Default = False)
Overwrite existing MEF files when converting input WAVERED FITS or GEIS
to MEF.
logfile : str, None (Default = 'photeq.log')
File name of the log file.
Notes
-----
By default, `photeq` will search for the first inverse sensitivity
value (given by the header keyword specified by the `phot_kwd` parameter,
e.g., PHOTFLAM or PHOTFNU) found in the input images and it will equalize
all other images to this reference value.
It is possible to tell `photeq` to look for the reference inverse
sensitivity value only in a specific extension of input images, e.g.: 3,
('sci',3), etc. This can be done by setting `ref_phot_ext` to a specific
extension. This may be useful, for example, for WFPC2 images: WF3 chip was
one of the better calibrated chips, and so, if one prefers to have
inverse sensitivities equalized to the inverse sensitivity of the WF3 chip,
one can set ``ref_phot_ext=3``.
Alternatively, one can provide their own reference inverse sensitivity
value to which all other images should be "equalized" through the
parameter `ref_phot`.
.. note::
Default parameter values (except for `files`, `readonly`, and `clobber`)
should be acceptable for most HST images.
.. warning::
If images are intended to be used with ``AstroDrizzle``, it is
recommended that sky background measurement be performed on "equalized"
images as the `photeq` is not aware of sky user keyword in the image
headers and thus it cannot correct sky values already recorded in the
headers.
Examples
--------
#. In most cases the default parameters should suffice:
>>> from drizzlepac import photeq
>>> photeq.photeq(files='*_flt.fits', readonly=False)
#. If the re-calibration needs to be done on PHOTFNU rather than
PHOTFLAM, then:
>>> photeq.photeq(files='*_flt.fits', ref_phot='PHOTFNU',
... aux_phot_kwd='PHOTFLAM')
#. If for WFPC2 data one desires that PHOTFLAM from WF3 be used as the
reference in WFPC2 images, then:
>>> photeq.photeq(files='*_flt.fits', ref_phot_ext=3) # or ('sci',3)
"""
# Time it
runtime_begin = datetime.now()
# check that input file name is a string:
if not isinstance(files, str):
raise TypeError("Argument 'files' must be a comma-separated list of file names") # depends on [control=['if'], data=[]]
# Set-up log files:
if isinstance(logfile, str):
# first, in case there are any "leftover" file handlers,
# close and remove them:
for h in _log.handlers:
if h is not _sh_log and isinstance(h, logging.FileHandler):
h.close()
_log.removeHandler(h) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['h']]
# create file handler:
log_formatter = logging.Formatter('[%(levelname)s:] %(message)s')
log_file_handler = logging.FileHandler(logfile)
log_file_handler.setFormatter(log_formatter)
# add log_file_handler to logger
_log.addHandler(log_file_handler) # depends on [control=['if'], data=[]]
elif logfile is not None:
raise TypeError("Unsupported 'logfile' type") # depends on [control=['if'], data=[]]
# BEGIN:
_mlinfo('***** {0} started on {1}'.format(__taskname__, runtime_begin))
_mlinfo(' Version {0} ({1})'.format(__version__, __version_date__))
# check that extension names are strings (or None for error ext):
if sciext is None:
sci_ext4parse = '*'
ext2get = None # depends on [control=['if'], data=[]]
else:
if not isinstance(sciext, str):
raise TypeError("Argument 'sciext' must be a string or None") # depends on [control=['if'], data=[]]
sciext = sciext.strip()
if sciext.upper() == 'PRIMARY':
sciext = sciext.upper()
ext2get = (sciext, 1) # depends on [control=['if'], data=[]]
else:
ext2get = (sciext, '*')
sci_ext4parse = ext2get
if errext is not None and (not isinstance(errext, str)):
raise TypeError("Argument 'errext' must be a string or None") # depends on [control=['if'], data=[]]
# check that phot_kwd is supported:
if not isinstance(phot_kwd, str):
raise TypeError("Argument 'phot_kwd' must be a string") # depends on [control=['if'], data=[]]
phot_kwd = phot_kwd.strip().upper()
# check that ref_phot_ext has correct type:
if ref_phot_ext is not None and (not (isinstance(ref_phot_ext, int) or isinstance(ref_phot_ext, str) or (isinstance(ref_phot_ext, tuple) and len(ref_phot_ext) == 2 and isinstance(ref_phot_ext[0], str) and isinstance(ref_phot_ext[1], int)))):
raise TypeError("Unsupported 'ref_phot_ext' type") # depends on [control=['if'], data=[]]
if isinstance(ref_phot_ext, str):
ref_phot_ext = (ref_phot_ext, 1) # depends on [control=['if'], data=[]]
if aux_phot_kwd is None:
aux_phot_kwd = [] # depends on [control=['if'], data=['aux_phot_kwd']]
elif isinstance(aux_phot_kwd, str):
aux_phot_kwd = [aux_phot_kwd.strip().upper()]
if phot_kwd == aux_phot_kwd:
raise ValueError("Auxiliary photometric keyword must be different from the main photometric keyword 'phot_kwd'.") # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif hasattr(aux_phot_kwd, '__iter__'):
if not all([isinstance(phot, str) for phot in aux_phot_kwd]):
raise TypeError("Argument 'aux_phot_kwd' must be a string, list of strings, or None") # depends on [control=['if'], data=[]]
aux_phot_kwd = [phot.strip().upper() for phot in aux_phot_kwd]
if ref_phot in aux_phot_kwd:
raise ValueError("Auxiliary photometric keyword(s) must be different from the main photometric keyword 'phot_kwd'.") # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
else:
raise TypeError("Argument 'aux_phot_kwd' must be a string, list of strings, or None")
# read input file list:
fl = parseat.parse_cs_line(csline=files, default_ext=sci_ext4parse, im_fmode='readonly' if readonly else 'update', clobber=clobber, fnamesOnly=True, doNotOpenDQ=True)
# check if user supplied file extensions, set them to the sciext,
# and warn that they will be ignored:
for f in fl:
if f.count > 1 or f.fext[0] != sci_ext4parse:
_mlwarn('WARNING: Extension specifications for file {:s} will be ignored. Using all {:s} extensions instead.'.format(f.image, 'image-like' if sciext is None else '{:s}'.format(utils.ext2str(sciext, default_extver=None)))) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['f']]
# find the reference PHOTFLAM/PHOTNU:
flc = fl[:]
ref_hdu = None
ref_ext = None
ref_user = True
if ref_phot is None:
ref_user = False
for f in flc:
f.convert2ImageRef()
# get primary hdu:
pri_hdu = f.image.hdu[0]
# find all valid extensions:
if ref_phot_ext is None:
if sciext == 'PRIMARY':
extnum = [0] # depends on [control=['if'], data=[]]
else:
extnum = utils.get_ext_list(f.image, sciext)
is_pri_hdu = [f.image.hdu[ext] is pri_hdu for ext in extnum]
# if necessary, add primary header to the hdu list:
if search_primary:
try:
pri_index = is_pri_hdu.index(True)
extnum.insert(0, extnum.pop(pri_index)) # depends on [control=['try'], data=[]]
except ValueError:
extnum.insert(0, 0) # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
else:
extnum = [ref_phot_ext]
for ext in extnum:
hdu = f.image.hdu[ext]
if phot_kwd in hdu.header:
ref_phot = hdu.header[phot_kwd]
ref_ext = ext
ref_hdu = hdu
break # depends on [control=['if'], data=['phot_kwd']] # depends on [control=['for'], data=['ext']]
if ref_phot is None:
_mlwarn("WARNING: Could not find specified inverse sensitivity keyword '{:s}'\n in any of the {} extensions of file '{}'.\n This input file will be ignored.".format(phot_kwd, 'image-like' if sciext is None else '{:s}'.format(utils.ext2str(sciext, default_extver=None)), os.path.basename(f.image.original_fname)))
f.release_all_images()
fl.remove(f) # depends on [control=['if'], data=[]]
else:
break # depends on [control=['for'], data=['f']] # depends on [control=['if'], data=['ref_phot']]
if ref_phot is None:
raise RuntimeError("Could not find the inverse sensitivity keyword '{:s}' in the specified headers of the input image(s).\nCannot continue.".format(phot_kwd)) # depends on [control=['if'], data=[]]
aux_phot_kwd_list = ','.join(aux_phot_kwd)
_mlinfo('\nPRIMARY PHOTOMETRIC KEYWORD: {:s}'.format(phot_kwd))
_mlinfo('SECONDARY PHOTOMETRIC KEYWORD(S): {:s}'.format(aux_phot_kwd_list if aux_phot_kwd_list else 'None'))
if ref_user:
_mlinfo("REFERENCE VALUE PROVIDED BY USER: '{:s}'={}\n".format(phot_kwd, ref_phot)) # depends on [control=['if'], data=[]]
else:
_mlinfo("REFERENCE VALUE FROM FILE: '{:s}[{:s}]'\n".format(os.path.basename(f.image.original_fname), utils.ext2str(ref_ext)))
_mlinfo("REFERENCE '{:s}' VALUE IS: {}".format(phot_kwd, ref_phot))
# equalize PHOTFLAM/PHOTNU
for f in fl:
# open the file if necessary:
if f.fnamesOnly:
_mlinfo("\nProcessing file '{:s}'".format(f.image))
f.convert2ImageRef() # depends on [control=['if'], data=[]]
else:
_mlinfo("\nProcessing file '{:s}'".format(f.image.original_fname))
# first, see if photflam is in the primary header and save this value:
pri_conv = None
if search_primary:
whdu = f.image.hdu[0]
if phot_kwd in whdu.header:
_mlinfo(' * Primary header:')
if whdu is ref_hdu:
pri_conv = 1.0
_mlinfo(" - '{}' = {} found in the primary header.".format(phot_kwd, whdu.header[phot_kwd]))
_mlinfo(' - Data conversion factor based on primary header: {}'.format(pri_conv)) # depends on [control=['if'], data=['whdu']]
else:
_mlinfo(" - '{}' found in the primary header.".format(phot_kwd))
pri_conv = whdu.header[phot_kwd] / ref_phot
_mlinfo(' - Setting {:s} in the primary header to {} (old value was {})'.format(phot_kwd, ref_phot, whdu.header[phot_kwd]))
_mlinfo(' - Data conversion factor based on primary header: {}'.format(pri_conv))
whdu.header[phot_kwd] = ref_phot # depends on [control=['if'], data=['phot_kwd']]
# correct the "other" photometric keyword, if present:
if pri_conv is not None and whdu is not ref_hdu:
for aux_kwd in aux_phot_kwd:
if aux_kwd in whdu.header:
old_aux_phot = whdu.header[aux_kwd]
new_aux_phot = old_aux_phot / pri_conv
whdu.header[aux_kwd] = new_aux_phot
_mlinfo(' - Setting {:s} in the primary header to {} (old value was {})'.format(aux_kwd, new_aux_phot, old_aux_phot)) # depends on [control=['if'], data=['aux_kwd']] # depends on [control=['for'], data=['aux_kwd']] # depends on [control=['if'], data=[]]
# process data and error arrays when 'sciext' was specifically set to
# 'PRIMARY':
if sciext == 'PRIMARY' and pri_conv is not None:
has_data = hasattr(whdu, 'data') and whdu.data is not None
# correct data:
if has_data:
if np.issubdtype(whdu.data.dtype, np.floating):
whdu.data *= pri_conv
_mlinfo(' - Data have been multiplied by {}'.format(pri_conv)) # depends on [control=['if'], data=[]]
else:
_mlwarn('WARNING: Data not converted because it is of non-floating point type.') # depends on [control=['if'], data=[]]
# correct error array:
if errext is not None:
eext = (errext, 1)
try:
whdu = f.image.hdu[eext] # depends on [control=['try'], data=[]]
except KeyError:
_mlwarn(' - WARNING: Error extension {:s} not found.'.format(utils.ext2str(eext)))
f.release_all_images()
continue # depends on [control=['except'], data=[]]
if hasattr(whdu, 'data') and whdu.data is not None:
if np.issubdtype(whdu.data.dtype, np.floating):
whdu.data *= pri_conv
_mlinfo(' - Error array (ext={}) has been multiplied by {}'.format(eext, pri_conv)) # depends on [control=['if'], data=[]]
else:
_mlinfo(' - Error array in extension {:s} contains non-floating point data.\n Skipping this extension'.format(utils.ext2str(ext))) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['errext']]
f.release_all_images()
continue # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
# find all valid extensions:
extnum = utils.get_ext_list(f.image, sciext)
for ext in extnum:
whdu = f.image.hdu[ext]
conv = None
if whdu is ref_hdu:
_mlinfo(' * EXT: {} - This is the "reference" extension.\n Nothing to do. Skipping this extension...'.format(ext))
continue # depends on [control=['if'], data=[]]
has_data = hasattr(whdu, 'data') and whdu.data is not None
if has_data and (not np.issubdtype(whdu.data.dtype, np.floating)):
_mlinfo(' * EXT: {} contains non-floating point data. Skipping this extension'.format(ext)) # depends on [control=['if'], data=[]]
# find all auxiliary photometric keywords present in the header:
paux = [aux_kwd for aux_kwd in aux_phot_kwd if aux_kwd in whdu.header]
if phot_kwd in whdu.header:
_mlinfo(' * EXT: {}'.format(ext))
old_phot = whdu.header[phot_kwd]
conv = old_phot / ref_phot
_mlinfo(' - Setting {:s} to {} (old value was {})'.format(phot_kwd, ref_phot, old_phot))
whdu.header[phot_kwd] = ref_phot
_mlinfo(' - Computed conversion factor for data: {}'.format(conv)) # depends on [control=['if'], data=['phot_kwd']]
elif pri_conv is None:
_mlinfo(' * EXT: {}'.format(ext))
_mlinfo(" - '{:s} not found. Skipping this extension...".format(phot_kwd))
continue # depends on [control=['if'], data=[]]
else:
_mlinfo(' * EXT: {}'.format(ext))
# if paux:
# print("ERROR: Primary photometric keyword ('{:s}') is "
# "missing but\n the secondary keywords ('{:s}') "
# "are present. This extension cannot be processed."
# .format(phot_kwd, ','.join(paux)))
# continue
_mlinfo(" - '{:s} not found. Using conversion factor based\n on the primary header: {}".format(phot_kwd, pri_conv))
conv = pri_conv
# correct the "other" photometric keyword, if present:
if conv is not None:
for aux_kwd in paux:
old_aux_phot = whdu.header[aux_kwd]
new_aux_phot = old_aux_phot / conv
whdu.header[aux_kwd] = new_aux_phot
_mlinfo(' - Setting {:s} to {} (old value was {})'.format(aux_kwd, new_aux_phot, old_aux_phot)) # depends on [control=['for'], data=['aux_kwd']] # depends on [control=['if'], data=['conv']]
# correct data:
if has_data:
if conv is None:
_mlinfo(' * EXT: {}'.format(ext)) # depends on [control=['if'], data=[]]
if np.issubdtype(whdu.data.dtype, np.floating):
whdu.data *= conv
_mlinfo(' - Data have been multiplied by {}'.format(conv)) # depends on [control=['if'], data=[]]
else:
_mlinfo('WARNING: Non-floating point data. Data cannot be re-scaled.') # depends on [control=['if'], data=[]]
# correct error array:
if errext is not None and isinstance(ext, tuple) and (len(ext) == 2):
eext = (errext, ext[1])
try:
whdu = f.image.hdu[eext] # depends on [control=['try'], data=[]]
except KeyError:
continue # depends on [control=['except'], data=[]]
if hasattr(whdu, 'data') and whdu.data is not None:
if np.issubdtype(whdu.data.dtype, np.floating):
whdu.data *= conv
_mlinfo(' - Error array (ext={}) has been multiplied by {}'.format(eext, conv)) # depends on [control=['if'], data=[]]
else:
_mlinfo(' - Error array in extension {:s} contains non-floating point data.\n Skipping this extension'.format(utils.ext2str(ext))) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['ext']]
f.release_all_images() # depends on [control=['for'], data=['f']]
_mlinfo('\nDone.')
if readonly:
_mlinfo("\nNOTE: '{:s}' was run in READONLY mode\n and input image(s)' content WAS NOT MODIFIED.".format(__taskname__)) # depends on [control=['if'], data=[]]
# close all log file handlers:
for h in _log.handlers:
if h is not _sh_log and isinstance(h, logging.FileHandler):
h.close()
_log.removeHandler(h) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['h']] |
def _sync(original, processed):
"""
Add output to data if run sucessfully.
For now only macs2 is available, so no need
to consider multiple callers.
"""
for original_sample in original:
original_sample[0]["peaks_files"] = {}
for process_sample in processed:
if dd.get_sample_name(original_sample[0]) == dd.get_sample_name(process_sample[0]):
for key in ["peaks_files"]:
if process_sample[0].get(key):
original_sample[0][key] = process_sample[0][key]
return original | def function[_sync, parameter[original, processed]]:
constant[
Add output to data if run sucessfully.
For now only macs2 is available, so no need
to consider multiple callers.
]
for taget[name[original_sample]] in starred[name[original]] begin[:]
call[call[name[original_sample]][constant[0]]][constant[peaks_files]] assign[=] dictionary[[], []]
for taget[name[process_sample]] in starred[name[processed]] begin[:]
if compare[call[name[dd].get_sample_name, parameter[call[name[original_sample]][constant[0]]]] equal[==] call[name[dd].get_sample_name, parameter[call[name[process_sample]][constant[0]]]]] begin[:]
for taget[name[key]] in starred[list[[<ast.Constant object at 0x7da1b18d8eb0>]]] begin[:]
if call[call[name[process_sample]][constant[0]].get, parameter[name[key]]] begin[:]
call[call[name[original_sample]][constant[0]]][name[key]] assign[=] call[call[name[process_sample]][constant[0]]][name[key]]
return[name[original]] | keyword[def] identifier[_sync] ( identifier[original] , identifier[processed] ):
literal[string]
keyword[for] identifier[original_sample] keyword[in] identifier[original] :
identifier[original_sample] [ literal[int] ][ literal[string] ]={}
keyword[for] identifier[process_sample] keyword[in] identifier[processed] :
keyword[if] identifier[dd] . identifier[get_sample_name] ( identifier[original_sample] [ literal[int] ])== identifier[dd] . identifier[get_sample_name] ( identifier[process_sample] [ literal[int] ]):
keyword[for] identifier[key] keyword[in] [ literal[string] ]:
keyword[if] identifier[process_sample] [ literal[int] ]. identifier[get] ( identifier[key] ):
identifier[original_sample] [ literal[int] ][ identifier[key] ]= identifier[process_sample] [ literal[int] ][ identifier[key] ]
keyword[return] identifier[original] | def _sync(original, processed):
"""
Add output to data if run sucessfully.
For now only macs2 is available, so no need
to consider multiple callers.
"""
for original_sample in original:
original_sample[0]['peaks_files'] = {}
for process_sample in processed:
if dd.get_sample_name(original_sample[0]) == dd.get_sample_name(process_sample[0]):
for key in ['peaks_files']:
if process_sample[0].get(key):
original_sample[0][key] = process_sample[0][key] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['key']] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['process_sample']] # depends on [control=['for'], data=['original_sample']]
return original |
def normalize_choices(db_values, field_name, app=DEFAULT_APP, model_name='', human_readable=True, none_value='Null',
blank_value='Unknown', missing_value='Unknown DB Code'):
'''Output the human-readable strings associated with the list of database values for a model field.
Uses the translation dictionary `CHOICES_<FIELD_NAME>` attribute for the given `model_name`.
In addition, translate `None` into 'Null', or whatever string is indicated by `none_value`.
'''
if app and isinstance(app, basestring):
app = get_app(app)
if not db_values:
return
try:
db_values = dict(db_values)
except:
raise NotImplemented("This function can only handle objects that can be converted to a dict, not lists or querysets returned by django `.values().aggregate()`.")
if not field_name in db_values:
return db_values
if human_readable:
for i, db_value in enumerate(db_values[field_name]):
if db_value in (None, 'None') or app in (None, 'None'):
db_values[field_name][i] = none_value
continue
if isinstance(db_value, basestring):
normalized_code = str(db_value).strip().upper()
# the app is actually the models.py module, NOT the app_name package
# so don't look in app.models, you'll only find django.db.models there (app_name.models.models)
choices = getattr(app, 'CHOICES_%s' % field_name.upper(), [])
normalized_name = None
if choices:
normalized_name = str(choices.get(normalized_code, missing_value)).strip()
elif normalized_code:
normalized_name = 'DB Code: "%s"' % normalized_code
db_values[field_name][i] = normalized_name or blank_value
else:
raise NotImplemented("This function can only convert database choices to human-readable strings.")
return db_values | def function[normalize_choices, parameter[db_values, field_name, app, model_name, human_readable, none_value, blank_value, missing_value]]:
constant[Output the human-readable strings associated with the list of database values for a model field.
Uses the translation dictionary `CHOICES_<FIELD_NAME>` attribute for the given `model_name`.
In addition, translate `None` into 'Null', or whatever string is indicated by `none_value`.
]
if <ast.BoolOp object at 0x7da1b2346860> begin[:]
variable[app] assign[=] call[name[get_app], parameter[name[app]]]
if <ast.UnaryOp object at 0x7da1b2347040> begin[:]
return[None]
<ast.Try object at 0x7da1b23440d0>
if <ast.UnaryOp object at 0x7da1b2347400> begin[:]
return[name[db_values]]
if name[human_readable] begin[:]
for taget[tuple[[<ast.Name object at 0x7da1b2346e30>, <ast.Name object at 0x7da1b23463b0>]]] in starred[call[name[enumerate], parameter[call[name[db_values]][name[field_name]]]]] begin[:]
if <ast.BoolOp object at 0x7da1b2345000> begin[:]
call[call[name[db_values]][name[field_name]]][name[i]] assign[=] name[none_value]
continue
if call[name[isinstance], parameter[name[db_value], name[basestring]]] begin[:]
variable[normalized_code] assign[=] call[call[call[name[str], parameter[name[db_value]]].strip, parameter[]].upper, parameter[]]
variable[choices] assign[=] call[name[getattr], parameter[name[app], binary_operation[constant[CHOICES_%s] <ast.Mod object at 0x7da2590d6920> call[name[field_name].upper, parameter[]]], list[[]]]]
variable[normalized_name] assign[=] constant[None]
if name[choices] begin[:]
variable[normalized_name] assign[=] call[call[name[str], parameter[call[name[choices].get, parameter[name[normalized_code], name[missing_value]]]]].strip, parameter[]]
call[call[name[db_values]][name[field_name]]][name[i]] assign[=] <ast.BoolOp object at 0x7da1b2344dc0>
return[name[db_values]] | keyword[def] identifier[normalize_choices] ( identifier[db_values] , identifier[field_name] , identifier[app] = identifier[DEFAULT_APP] , identifier[model_name] = literal[string] , identifier[human_readable] = keyword[True] , identifier[none_value] = literal[string] ,
identifier[blank_value] = literal[string] , identifier[missing_value] = literal[string] ):
literal[string]
keyword[if] identifier[app] keyword[and] identifier[isinstance] ( identifier[app] , identifier[basestring] ):
identifier[app] = identifier[get_app] ( identifier[app] )
keyword[if] keyword[not] identifier[db_values] :
keyword[return]
keyword[try] :
identifier[db_values] = identifier[dict] ( identifier[db_values] )
keyword[except] :
keyword[raise] identifier[NotImplemented] ( literal[string] )
keyword[if] keyword[not] identifier[field_name] keyword[in] identifier[db_values] :
keyword[return] identifier[db_values]
keyword[if] identifier[human_readable] :
keyword[for] identifier[i] , identifier[db_value] keyword[in] identifier[enumerate] ( identifier[db_values] [ identifier[field_name] ]):
keyword[if] identifier[db_value] keyword[in] ( keyword[None] , literal[string] ) keyword[or] identifier[app] keyword[in] ( keyword[None] , literal[string] ):
identifier[db_values] [ identifier[field_name] ][ identifier[i] ]= identifier[none_value]
keyword[continue]
keyword[if] identifier[isinstance] ( identifier[db_value] , identifier[basestring] ):
identifier[normalized_code] = identifier[str] ( identifier[db_value] ). identifier[strip] (). identifier[upper] ()
identifier[choices] = identifier[getattr] ( identifier[app] , literal[string] % identifier[field_name] . identifier[upper] (),[])
identifier[normalized_name] = keyword[None]
keyword[if] identifier[choices] :
identifier[normalized_name] = identifier[str] ( identifier[choices] . identifier[get] ( identifier[normalized_code] , identifier[missing_value] )). identifier[strip] ()
keyword[elif] identifier[normalized_code] :
identifier[normalized_name] = literal[string] % identifier[normalized_code]
identifier[db_values] [ identifier[field_name] ][ identifier[i] ]= identifier[normalized_name] keyword[or] identifier[blank_value]
keyword[else] :
keyword[raise] identifier[NotImplemented] ( literal[string] )
keyword[return] identifier[db_values] | def normalize_choices(db_values, field_name, app=DEFAULT_APP, model_name='', human_readable=True, none_value='Null', blank_value='Unknown', missing_value='Unknown DB Code'):
"""Output the human-readable strings associated with the list of database values for a model field.
Uses the translation dictionary `CHOICES_<FIELD_NAME>` attribute for the given `model_name`.
In addition, translate `None` into 'Null', or whatever string is indicated by `none_value`.
"""
if app and isinstance(app, basestring):
app = get_app(app) # depends on [control=['if'], data=[]]
if not db_values:
return # depends on [control=['if'], data=[]]
try:
db_values = dict(db_values) # depends on [control=['try'], data=[]]
except:
raise NotImplemented('This function can only handle objects that can be converted to a dict, not lists or querysets returned by django `.values().aggregate()`.') # depends on [control=['except'], data=[]]
if not field_name in db_values:
return db_values # depends on [control=['if'], data=[]]
if human_readable:
for (i, db_value) in enumerate(db_values[field_name]):
if db_value in (None, 'None') or app in (None, 'None'):
db_values[field_name][i] = none_value
continue # depends on [control=['if'], data=[]]
if isinstance(db_value, basestring):
normalized_code = str(db_value).strip().upper() # depends on [control=['if'], data=[]]
# the app is actually the models.py module, NOT the app_name package
# so don't look in app.models, you'll only find django.db.models there (app_name.models.models)
choices = getattr(app, 'CHOICES_%s' % field_name.upper(), [])
normalized_name = None
if choices:
normalized_name = str(choices.get(normalized_code, missing_value)).strip() # depends on [control=['if'], data=[]]
elif normalized_code:
normalized_name = 'DB Code: "%s"' % normalized_code # depends on [control=['if'], data=[]]
db_values[field_name][i] = normalized_name or blank_value # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]]
else:
raise NotImplemented('This function can only convert database choices to human-readable strings.')
return db_values |
def match_query_to_corpus(self):
"""Q.match_query_to_corpus() -> index -- return the matched corpus
index of the user query
"""
ranking = []
for i,doc in enumerate(self.processed_corpus):
rank = 0.0
for word in self.processed_query:
if word in doc:
rank += self.term_frequencies[i][word] * self.inverse_document_frequencies[word]
ranking.append((rank,i))
matching_corpus_index = 0
max_rank = 0
for rank,index in ranking:
if rank > max_rank:
matching_corpus_index = index
max_rank = rank
return matching_corpus_index | def function[match_query_to_corpus, parameter[self]]:
constant[Q.match_query_to_corpus() -> index -- return the matched corpus
index of the user query
]
variable[ranking] assign[=] list[[]]
for taget[tuple[[<ast.Name object at 0x7da18ede6e30>, <ast.Name object at 0x7da18ede7970>]]] in starred[call[name[enumerate], parameter[name[self].processed_corpus]]] begin[:]
variable[rank] assign[=] constant[0.0]
for taget[name[word]] in starred[name[self].processed_query] begin[:]
if compare[name[word] in name[doc]] begin[:]
<ast.AugAssign object at 0x7da18ede7e20>
call[name[ranking].append, parameter[tuple[[<ast.Name object at 0x7da18ede6200>, <ast.Name object at 0x7da18ede4a30>]]]]
variable[matching_corpus_index] assign[=] constant[0]
variable[max_rank] assign[=] constant[0]
for taget[tuple[[<ast.Name object at 0x7da18ede4ac0>, <ast.Name object at 0x7da18ede76a0>]]] in starred[name[ranking]] begin[:]
if compare[name[rank] greater[>] name[max_rank]] begin[:]
variable[matching_corpus_index] assign[=] name[index]
variable[max_rank] assign[=] name[rank]
return[name[matching_corpus_index]] | keyword[def] identifier[match_query_to_corpus] ( identifier[self] ):
literal[string]
identifier[ranking] =[]
keyword[for] identifier[i] , identifier[doc] keyword[in] identifier[enumerate] ( identifier[self] . identifier[processed_corpus] ):
identifier[rank] = literal[int]
keyword[for] identifier[word] keyword[in] identifier[self] . identifier[processed_query] :
keyword[if] identifier[word] keyword[in] identifier[doc] :
identifier[rank] += identifier[self] . identifier[term_frequencies] [ identifier[i] ][ identifier[word] ]* identifier[self] . identifier[inverse_document_frequencies] [ identifier[word] ]
identifier[ranking] . identifier[append] (( identifier[rank] , identifier[i] ))
identifier[matching_corpus_index] = literal[int]
identifier[max_rank] = literal[int]
keyword[for] identifier[rank] , identifier[index] keyword[in] identifier[ranking] :
keyword[if] identifier[rank] > identifier[max_rank] :
identifier[matching_corpus_index] = identifier[index]
identifier[max_rank] = identifier[rank]
keyword[return] identifier[matching_corpus_index] | def match_query_to_corpus(self):
"""Q.match_query_to_corpus() -> index -- return the matched corpus
index of the user query
"""
ranking = []
for (i, doc) in enumerate(self.processed_corpus):
rank = 0.0
for word in self.processed_query:
if word in doc:
rank += self.term_frequencies[i][word] * self.inverse_document_frequencies[word] # depends on [control=['if'], data=['word']] # depends on [control=['for'], data=['word']]
ranking.append((rank, i)) # depends on [control=['for'], data=[]]
matching_corpus_index = 0
max_rank = 0
for (rank, index) in ranking:
if rank > max_rank:
matching_corpus_index = index
max_rank = rank # depends on [control=['if'], data=['rank', 'max_rank']] # depends on [control=['for'], data=[]]
return matching_corpus_index |
def from_url(cls, url):
"Construct a (possibly null) ContentChecker from a URL"
fragment = urlparse(url)[-1]
if not fragment:
return ContentChecker()
match = cls.pattern.search(fragment)
if not match:
return ContentChecker()
return cls(**match.groupdict()) | def function[from_url, parameter[cls, url]]:
constant[Construct a (possibly null) ContentChecker from a URL]
variable[fragment] assign[=] call[call[name[urlparse], parameter[name[url]]]][<ast.UnaryOp object at 0x7da20c6aa6b0>]
if <ast.UnaryOp object at 0x7da20c6aa050> begin[:]
return[call[name[ContentChecker], parameter[]]]
variable[match] assign[=] call[name[cls].pattern.search, parameter[name[fragment]]]
if <ast.UnaryOp object at 0x7da18bc72ec0> begin[:]
return[call[name[ContentChecker], parameter[]]]
return[call[name[cls], parameter[]]] | keyword[def] identifier[from_url] ( identifier[cls] , identifier[url] ):
literal[string]
identifier[fragment] = identifier[urlparse] ( identifier[url] )[- literal[int] ]
keyword[if] keyword[not] identifier[fragment] :
keyword[return] identifier[ContentChecker] ()
identifier[match] = identifier[cls] . identifier[pattern] . identifier[search] ( identifier[fragment] )
keyword[if] keyword[not] identifier[match] :
keyword[return] identifier[ContentChecker] ()
keyword[return] identifier[cls] (** identifier[match] . identifier[groupdict] ()) | def from_url(cls, url):
"""Construct a (possibly null) ContentChecker from a URL"""
fragment = urlparse(url)[-1]
if not fragment:
return ContentChecker() # depends on [control=['if'], data=[]]
match = cls.pattern.search(fragment)
if not match:
return ContentChecker() # depends on [control=['if'], data=[]]
return cls(**match.groupdict()) |
def model_page(self, request, app_label, model_name, rest_of_url=None):
"""
Handles the model-specific functionality of the databrowse site,
delegating<to the appropriate ModelDatabrowse class.
"""
try:
model = get_model(app_label, model_name)
except LookupError:
model = None
if model is None:
raise http.Http404("App %r, model %r, not found." %
(app_label, model_name))
try:
databrowse_class = self.registry[model]
except KeyError:
raise http.Http404("This model exists but has not been registered "
"with databrowse.")
return databrowse_class(model, self).root(request, rest_of_url) | def function[model_page, parameter[self, request, app_label, model_name, rest_of_url]]:
constant[
Handles the model-specific functionality of the databrowse site,
delegating<to the appropriate ModelDatabrowse class.
]
<ast.Try object at 0x7da204567340>
if compare[name[model] is constant[None]] begin[:]
<ast.Raise object at 0x7da204566ad0>
<ast.Try object at 0x7da2045666e0>
return[call[call[name[databrowse_class], parameter[name[model], name[self]]].root, parameter[name[request], name[rest_of_url]]]] | keyword[def] identifier[model_page] ( identifier[self] , identifier[request] , identifier[app_label] , identifier[model_name] , identifier[rest_of_url] = keyword[None] ):
literal[string]
keyword[try] :
identifier[model] = identifier[get_model] ( identifier[app_label] , identifier[model_name] )
keyword[except] identifier[LookupError] :
identifier[model] = keyword[None]
keyword[if] identifier[model] keyword[is] keyword[None] :
keyword[raise] identifier[http] . identifier[Http404] ( literal[string] %
( identifier[app_label] , identifier[model_name] ))
keyword[try] :
identifier[databrowse_class] = identifier[self] . identifier[registry] [ identifier[model] ]
keyword[except] identifier[KeyError] :
keyword[raise] identifier[http] . identifier[Http404] ( literal[string]
literal[string] )
keyword[return] identifier[databrowse_class] ( identifier[model] , identifier[self] ). identifier[root] ( identifier[request] , identifier[rest_of_url] ) | def model_page(self, request, app_label, model_name, rest_of_url=None):
"""
Handles the model-specific functionality of the databrowse site,
delegating<to the appropriate ModelDatabrowse class.
"""
try:
model = get_model(app_label, model_name) # depends on [control=['try'], data=[]]
except LookupError:
model = None # depends on [control=['except'], data=[]]
if model is None:
raise http.Http404('App %r, model %r, not found.' % (app_label, model_name)) # depends on [control=['if'], data=[]]
try:
databrowse_class = self.registry[model] # depends on [control=['try'], data=[]]
except KeyError:
raise http.Http404('This model exists but has not been registered with databrowse.') # depends on [control=['except'], data=[]]
return databrowse_class(model, self).root(request, rest_of_url) |
def download_to_file(self, local_file, headers=None, cookies=None,
chunk_size=1024):
"""Downloads link to local file
:param local_file: Save url as this path
:param headers: Headers to fetch url
:param cookies: Cookies to fetch url
:param chunk_size: int
"""
if not headers:
headers = HEADERS
if not cookies:
cookies = {}
req = requests.get(self.url, headers=headers, cookies=cookies,
stream=True)
with open(local_file, "wb") as local_download:
for chunk in req.iter_content(chunk_size):
if chunk:
local_download.write(chunk) | def function[download_to_file, parameter[self, local_file, headers, cookies, chunk_size]]:
constant[Downloads link to local file
:param local_file: Save url as this path
:param headers: Headers to fetch url
:param cookies: Cookies to fetch url
:param chunk_size: int
]
if <ast.UnaryOp object at 0x7da18bcca800> begin[:]
variable[headers] assign[=] name[HEADERS]
if <ast.UnaryOp object at 0x7da18bcc99f0> begin[:]
variable[cookies] assign[=] dictionary[[], []]
variable[req] assign[=] call[name[requests].get, parameter[name[self].url]]
with call[name[open], parameter[name[local_file], constant[wb]]] begin[:]
for taget[name[chunk]] in starred[call[name[req].iter_content, parameter[name[chunk_size]]]] begin[:]
if name[chunk] begin[:]
call[name[local_download].write, parameter[name[chunk]]] | keyword[def] identifier[download_to_file] ( identifier[self] , identifier[local_file] , identifier[headers] = keyword[None] , identifier[cookies] = keyword[None] ,
identifier[chunk_size] = literal[int] ):
literal[string]
keyword[if] keyword[not] identifier[headers] :
identifier[headers] = identifier[HEADERS]
keyword[if] keyword[not] identifier[cookies] :
identifier[cookies] ={}
identifier[req] = identifier[requests] . identifier[get] ( identifier[self] . identifier[url] , identifier[headers] = identifier[headers] , identifier[cookies] = identifier[cookies] ,
identifier[stream] = keyword[True] )
keyword[with] identifier[open] ( identifier[local_file] , literal[string] ) keyword[as] identifier[local_download] :
keyword[for] identifier[chunk] keyword[in] identifier[req] . identifier[iter_content] ( identifier[chunk_size] ):
keyword[if] identifier[chunk] :
identifier[local_download] . identifier[write] ( identifier[chunk] ) | def download_to_file(self, local_file, headers=None, cookies=None, chunk_size=1024):
"""Downloads link to local file
:param local_file: Save url as this path
:param headers: Headers to fetch url
:param cookies: Cookies to fetch url
:param chunk_size: int
"""
if not headers:
headers = HEADERS # depends on [control=['if'], data=[]]
if not cookies:
cookies = {} # depends on [control=['if'], data=[]]
req = requests.get(self.url, headers=headers, cookies=cookies, stream=True)
with open(local_file, 'wb') as local_download:
for chunk in req.iter_content(chunk_size):
if chunk:
local_download.write(chunk) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['chunk']] # depends on [control=['with'], data=['local_download']] |
def escape(pattern):
"Escape all non-alphanumeric characters in pattern."
s = list(pattern)
alphanum = _alphanum
for i, c in enumerate(pattern):
if c not in alphanum:
if c == "\000":
s[i] = "\\000"
else:
s[i] = "\\" + c
return pattern[:0].join(s) | def function[escape, parameter[pattern]]:
constant[Escape all non-alphanumeric characters in pattern.]
variable[s] assign[=] call[name[list], parameter[name[pattern]]]
variable[alphanum] assign[=] name[_alphanum]
for taget[tuple[[<ast.Name object at 0x7da18bcc8340>, <ast.Name object at 0x7da18bcca710>]]] in starred[call[name[enumerate], parameter[name[pattern]]]] begin[:]
if compare[name[c] <ast.NotIn object at 0x7da2590d7190> name[alphanum]] begin[:]
if compare[name[c] equal[==] constant[ ]] begin[:]
call[name[s]][name[i]] assign[=] constant[\000]
return[call[call[name[pattern]][<ast.Slice object at 0x7da20e9b0fd0>].join, parameter[name[s]]]] | keyword[def] identifier[escape] ( identifier[pattern] ):
literal[string]
identifier[s] = identifier[list] ( identifier[pattern] )
identifier[alphanum] = identifier[_alphanum]
keyword[for] identifier[i] , identifier[c] keyword[in] identifier[enumerate] ( identifier[pattern] ):
keyword[if] identifier[c] keyword[not] keyword[in] identifier[alphanum] :
keyword[if] identifier[c] == literal[string] :
identifier[s] [ identifier[i] ]= literal[string]
keyword[else] :
identifier[s] [ identifier[i] ]= literal[string] + identifier[c]
keyword[return] identifier[pattern] [: literal[int] ]. identifier[join] ( identifier[s] ) | def escape(pattern):
"""Escape all non-alphanumeric characters in pattern."""
s = list(pattern)
alphanum = _alphanum
for (i, c) in enumerate(pattern):
if c not in alphanum:
if c == '\x00':
s[i] = '\\000' # depends on [control=['if'], data=[]]
else:
s[i] = '\\' + c # depends on [control=['if'], data=['c']] # depends on [control=['for'], data=[]]
return pattern[:0].join(s) |
def ccmodmd_setcoef(k):
"""Set the coefficient maps for the ccmod stage. The only parameter is
the slice index `k` and there are no return values; all inputs and
outputs are from and to global variables.
"""
# Set working coefficient maps for ccmod step and compute DFT of
# coefficient maps Z
mp_Zf[k] = sl.rfftn(mp_Z_Y1[k], mp_cri.Nv, mp_cri.axisN) | def function[ccmodmd_setcoef, parameter[k]]:
constant[Set the coefficient maps for the ccmod stage. The only parameter is
the slice index `k` and there are no return values; all inputs and
outputs are from and to global variables.
]
call[name[mp_Zf]][name[k]] assign[=] call[name[sl].rfftn, parameter[call[name[mp_Z_Y1]][name[k]], name[mp_cri].Nv, name[mp_cri].axisN]] | keyword[def] identifier[ccmodmd_setcoef] ( identifier[k] ):
literal[string]
identifier[mp_Zf] [ identifier[k] ]= identifier[sl] . identifier[rfftn] ( identifier[mp_Z_Y1] [ identifier[k] ], identifier[mp_cri] . identifier[Nv] , identifier[mp_cri] . identifier[axisN] ) | def ccmodmd_setcoef(k):
"""Set the coefficient maps for the ccmod stage. The only parameter is
the slice index `k` and there are no return values; all inputs and
outputs are from and to global variables.
"""
# Set working coefficient maps for ccmod step and compute DFT of
# coefficient maps Z
mp_Zf[k] = sl.rfftn(mp_Z_Y1[k], mp_cri.Nv, mp_cri.axisN) |
def frequency(self, mapping):
"""
Returns frequency of a given :class:`caspo.core.mapping.Mapping`
Parameters
----------
mapping : :class:`caspo.core.mapping.Mapping`
A logical conjuntion mapping
Returns
-------
float
Frequency of the given mapping over all logical networks
Raises
------
ValueError
If the given mapping is not found in the mappings of the underlying hypergraph of this list
"""
return self.__matrix[:, self.hg.mappings[mapping]].mean() | def function[frequency, parameter[self, mapping]]:
constant[
Returns frequency of a given :class:`caspo.core.mapping.Mapping`
Parameters
----------
mapping : :class:`caspo.core.mapping.Mapping`
A logical conjuntion mapping
Returns
-------
float
Frequency of the given mapping over all logical networks
Raises
------
ValueError
If the given mapping is not found in the mappings of the underlying hypergraph of this list
]
return[call[call[name[self].__matrix][tuple[[<ast.Slice object at 0x7da18f09caf0>, <ast.Subscript object at 0x7da18f09dbd0>]]].mean, parameter[]]] | keyword[def] identifier[frequency] ( identifier[self] , identifier[mapping] ):
literal[string]
keyword[return] identifier[self] . identifier[__matrix] [:, identifier[self] . identifier[hg] . identifier[mappings] [ identifier[mapping] ]]. identifier[mean] () | def frequency(self, mapping):
"""
Returns frequency of a given :class:`caspo.core.mapping.Mapping`
Parameters
----------
mapping : :class:`caspo.core.mapping.Mapping`
A logical conjuntion mapping
Returns
-------
float
Frequency of the given mapping over all logical networks
Raises
------
ValueError
If the given mapping is not found in the mappings of the underlying hypergraph of this list
"""
return self.__matrix[:, self.hg.mappings[mapping]].mean() |
def publish(self, message):
"""
Publishes the message to all subscribers of this topic
:param message: (object), the message to be published.
"""
message_data = self._to_data(message)
self._encode_invoke(topic_publish_codec, message=message_data) | def function[publish, parameter[self, message]]:
constant[
Publishes the message to all subscribers of this topic
:param message: (object), the message to be published.
]
variable[message_data] assign[=] call[name[self]._to_data, parameter[name[message]]]
call[name[self]._encode_invoke, parameter[name[topic_publish_codec]]] | keyword[def] identifier[publish] ( identifier[self] , identifier[message] ):
literal[string]
identifier[message_data] = identifier[self] . identifier[_to_data] ( identifier[message] )
identifier[self] . identifier[_encode_invoke] ( identifier[topic_publish_codec] , identifier[message] = identifier[message_data] ) | def publish(self, message):
"""
Publishes the message to all subscribers of this topic
:param message: (object), the message to be published.
"""
message_data = self._to_data(message)
self._encode_invoke(topic_publish_codec, message=message_data) |
def _curl_bitmex(self, api, query=None, postdict=None, timeout=3, verb=None):
"""Send a request to BitMEX Servers."""
# Handle URL
url = self.base_url + api
# Default to POST if data is attached, GET otherwise
if not verb:
verb = 'POST' if postdict else 'GET'
# Auth: Use Access Token by default, API Key/Secret if provided
auth = AccessTokenAuth(self.token)
if self.apiKey:
auth = APIKeyAuthWithExpires(self.apiKey, self.apiSecret)
# Make the request
try:
# url = "http://httpbin.org/post"
req = requests.Request(verb, url, data=postdict, auth=auth, params=query)
prepped = self.session.prepare_request(req)
response = self.session.send(prepped, timeout=timeout)
# Make non-200s throw
response.raise_for_status()
except requests.exceptions.HTTPError as e:
# 401 - Auth error. Re-auth and re-run this request.
if response.status_code == 401:
if self.token is None:
self.logger.error("Login information or API Key incorrect, please check and restart.")
self.logger.error("Error: " + response.text)
if postdict:
self.logger.error(postdict)
self.logger.warning("Token expired, reauthenticating...")
sleep(1)
self.authenticate()
return self._curl_bitmex(api, query, postdict, timeout, verb)
# 404, can be thrown if order canceled does not exist.
elif response.status_code == 404:
if verb == 'DELETE':
self.logger.error("Order not found: %s" % postdict['orderID'])
return
self.logger.error("Unable to contact the BitMEX API (404). " +
"Request: %s \n %s" % (url, json.dumps(postdict)))
# 429, ratelimit
elif response.status_code == 429:
self.logger.error("Ratelimited on current request. Sleeping, then trying again. Try fewer " +
"order pairs or contact support@bitmex.com to raise your limits. " +
"Request: %s \n %s" % (url, json.dumps(postdict)))
sleep(1)
return self._curl_bitmex(api, query, postdict, timeout, verb)
# 503 - BitMEX temporary downtime, likely due to a deploy. Try again
elif response.status_code == 503:
self.logger.warning("Unable to contact the BitMEX API (503), retrying. " +
"Request: %s \n %s" % (url, json.dumps(postdict)))
sleep(1)
return self._curl_bitmex(api, query, postdict, timeout, verb)
# Unknown Error
else:
self.logger.error("Unhandled Error: %s: %s %s" % (e, response.text, json.dumps(response.json(), indent=4)))
self.logger.error("Endpoint was: %s %s" % (verb, api))
except requests.exceptions.Timeout as e:
# Timeout, re-run this request
self.logger.warning("Timed out, retrying...")
return self._curl_bitmex(api, query, postdict, timeout, verb)
except requests.exceptions.ConnectionError as e:
self.logger.warning("Unable to contact the BitMEX API (ConnectionError). Please check the URL. Retrying. " +
"Request: %s \n %s" % (url, json.dumps(postdict)))
sleep(1)
return self._curl_bitmex(api, query, postdict, timeout, verb)
return response.json() | def function[_curl_bitmex, parameter[self, api, query, postdict, timeout, verb]]:
constant[Send a request to BitMEX Servers.]
variable[url] assign[=] binary_operation[name[self].base_url + name[api]]
if <ast.UnaryOp object at 0x7da18dc07d00> begin[:]
variable[verb] assign[=] <ast.IfExp object at 0x7da18dc05330>
variable[auth] assign[=] call[name[AccessTokenAuth], parameter[name[self].token]]
if name[self].apiKey begin[:]
variable[auth] assign[=] call[name[APIKeyAuthWithExpires], parameter[name[self].apiKey, name[self].apiSecret]]
<ast.Try object at 0x7da18dc04e80>
return[call[name[response].json, parameter[]]] | keyword[def] identifier[_curl_bitmex] ( identifier[self] , identifier[api] , identifier[query] = keyword[None] , identifier[postdict] = keyword[None] , identifier[timeout] = literal[int] , identifier[verb] = keyword[None] ):
literal[string]
identifier[url] = identifier[self] . identifier[base_url] + identifier[api]
keyword[if] keyword[not] identifier[verb] :
identifier[verb] = literal[string] keyword[if] identifier[postdict] keyword[else] literal[string]
identifier[auth] = identifier[AccessTokenAuth] ( identifier[self] . identifier[token] )
keyword[if] identifier[self] . identifier[apiKey] :
identifier[auth] = identifier[APIKeyAuthWithExpires] ( identifier[self] . identifier[apiKey] , identifier[self] . identifier[apiSecret] )
keyword[try] :
identifier[req] = identifier[requests] . identifier[Request] ( identifier[verb] , identifier[url] , identifier[data] = identifier[postdict] , identifier[auth] = identifier[auth] , identifier[params] = identifier[query] )
identifier[prepped] = identifier[self] . identifier[session] . identifier[prepare_request] ( identifier[req] )
identifier[response] = identifier[self] . identifier[session] . identifier[send] ( identifier[prepped] , identifier[timeout] = identifier[timeout] )
identifier[response] . identifier[raise_for_status] ()
keyword[except] identifier[requests] . identifier[exceptions] . identifier[HTTPError] keyword[as] identifier[e] :
keyword[if] identifier[response] . identifier[status_code] == literal[int] :
keyword[if] identifier[self] . identifier[token] keyword[is] keyword[None] :
identifier[self] . identifier[logger] . identifier[error] ( literal[string] )
identifier[self] . identifier[logger] . identifier[error] ( literal[string] + identifier[response] . identifier[text] )
keyword[if] identifier[postdict] :
identifier[self] . identifier[logger] . identifier[error] ( identifier[postdict] )
identifier[self] . identifier[logger] . identifier[warning] ( literal[string] )
identifier[sleep] ( literal[int] )
identifier[self] . identifier[authenticate] ()
keyword[return] identifier[self] . identifier[_curl_bitmex] ( identifier[api] , identifier[query] , identifier[postdict] , identifier[timeout] , identifier[verb] )
keyword[elif] identifier[response] . identifier[status_code] == literal[int] :
keyword[if] identifier[verb] == literal[string] :
identifier[self] . identifier[logger] . identifier[error] ( literal[string] % identifier[postdict] [ literal[string] ])
keyword[return]
identifier[self] . identifier[logger] . identifier[error] ( literal[string] +
literal[string] %( identifier[url] , identifier[json] . identifier[dumps] ( identifier[postdict] )))
keyword[elif] identifier[response] . identifier[status_code] == literal[int] :
identifier[self] . identifier[logger] . identifier[error] ( literal[string] +
literal[string] +
literal[string] %( identifier[url] , identifier[json] . identifier[dumps] ( identifier[postdict] )))
identifier[sleep] ( literal[int] )
keyword[return] identifier[self] . identifier[_curl_bitmex] ( identifier[api] , identifier[query] , identifier[postdict] , identifier[timeout] , identifier[verb] )
keyword[elif] identifier[response] . identifier[status_code] == literal[int] :
identifier[self] . identifier[logger] . identifier[warning] ( literal[string] +
literal[string] %( identifier[url] , identifier[json] . identifier[dumps] ( identifier[postdict] )))
identifier[sleep] ( literal[int] )
keyword[return] identifier[self] . identifier[_curl_bitmex] ( identifier[api] , identifier[query] , identifier[postdict] , identifier[timeout] , identifier[verb] )
keyword[else] :
identifier[self] . identifier[logger] . identifier[error] ( literal[string] %( identifier[e] , identifier[response] . identifier[text] , identifier[json] . identifier[dumps] ( identifier[response] . identifier[json] (), identifier[indent] = literal[int] )))
identifier[self] . identifier[logger] . identifier[error] ( literal[string] %( identifier[verb] , identifier[api] ))
keyword[except] identifier[requests] . identifier[exceptions] . identifier[Timeout] keyword[as] identifier[e] :
identifier[self] . identifier[logger] . identifier[warning] ( literal[string] )
keyword[return] identifier[self] . identifier[_curl_bitmex] ( identifier[api] , identifier[query] , identifier[postdict] , identifier[timeout] , identifier[verb] )
keyword[except] identifier[requests] . identifier[exceptions] . identifier[ConnectionError] keyword[as] identifier[e] :
identifier[self] . identifier[logger] . identifier[warning] ( literal[string] +
literal[string] %( identifier[url] , identifier[json] . identifier[dumps] ( identifier[postdict] )))
identifier[sleep] ( literal[int] )
keyword[return] identifier[self] . identifier[_curl_bitmex] ( identifier[api] , identifier[query] , identifier[postdict] , identifier[timeout] , identifier[verb] )
keyword[return] identifier[response] . identifier[json] () | def _curl_bitmex(self, api, query=None, postdict=None, timeout=3, verb=None):
"""Send a request to BitMEX Servers."""
# Handle URL
url = self.base_url + api
# Default to POST if data is attached, GET otherwise
if not verb:
verb = 'POST' if postdict else 'GET' # depends on [control=['if'], data=[]]
# Auth: Use Access Token by default, API Key/Secret if provided
auth = AccessTokenAuth(self.token)
if self.apiKey:
auth = APIKeyAuthWithExpires(self.apiKey, self.apiSecret) # depends on [control=['if'], data=[]]
# Make the request
try:
# url = "http://httpbin.org/post"
req = requests.Request(verb, url, data=postdict, auth=auth, params=query)
prepped = self.session.prepare_request(req)
response = self.session.send(prepped, timeout=timeout)
# Make non-200s throw
response.raise_for_status() # depends on [control=['try'], data=[]]
except requests.exceptions.HTTPError as e:
# 401 - Auth error. Re-auth and re-run this request.
if response.status_code == 401:
if self.token is None:
self.logger.error('Login information or API Key incorrect, please check and restart.')
self.logger.error('Error: ' + response.text)
if postdict:
self.logger.error(postdict) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
self.logger.warning('Token expired, reauthenticating...')
sleep(1)
self.authenticate()
return self._curl_bitmex(api, query, postdict, timeout, verb) # depends on [control=['if'], data=[]]
# 404, can be thrown if order canceled does not exist.
elif response.status_code == 404:
if verb == 'DELETE':
self.logger.error('Order not found: %s' % postdict['orderID'])
return # depends on [control=['if'], data=[]]
self.logger.error('Unable to contact the BitMEX API (404). ' + 'Request: %s \n %s' % (url, json.dumps(postdict))) # depends on [control=['if'], data=[]]
# 429, ratelimit
elif response.status_code == 429:
self.logger.error('Ratelimited on current request. Sleeping, then trying again. Try fewer ' + 'order pairs or contact support@bitmex.com to raise your limits. ' + 'Request: %s \n %s' % (url, json.dumps(postdict)))
sleep(1)
return self._curl_bitmex(api, query, postdict, timeout, verb) # depends on [control=['if'], data=[]]
# 503 - BitMEX temporary downtime, likely due to a deploy. Try again
elif response.status_code == 503:
self.logger.warning('Unable to contact the BitMEX API (503), retrying. ' + 'Request: %s \n %s' % (url, json.dumps(postdict)))
sleep(1)
return self._curl_bitmex(api, query, postdict, timeout, verb) # depends on [control=['if'], data=[]]
else:
# Unknown Error
self.logger.error('Unhandled Error: %s: %s %s' % (e, response.text, json.dumps(response.json(), indent=4)))
self.logger.error('Endpoint was: %s %s' % (verb, api)) # depends on [control=['except'], data=['e']]
except requests.exceptions.Timeout as e:
# Timeout, re-run this request
self.logger.warning('Timed out, retrying...')
return self._curl_bitmex(api, query, postdict, timeout, verb) # depends on [control=['except'], data=[]]
except requests.exceptions.ConnectionError as e:
self.logger.warning('Unable to contact the BitMEX API (ConnectionError). Please check the URL. Retrying. ' + 'Request: %s \n %s' % (url, json.dumps(postdict)))
sleep(1)
return self._curl_bitmex(api, query, postdict, timeout, verb) # depends on [control=['except'], data=[]]
return response.json() |
def clear_stats(self):
"""Reset server stat counters.."""
self._start_time = None
self._run_time = 0
self.stats = {
'Enabled': False,
'Bind Address': lambda s: repr(self.bind_addr),
'Run time': lambda s: (not s['Enabled']) and -1 or self.runtime(),
'Accepts': 0,
'Accepts/sec': lambda s: s['Accepts'] / self.runtime(),
'Queue': lambda s: getattr(self.requests, 'qsize', None),
'Threads': lambda s: len(getattr(self.requests, '_threads', [])),
'Threads Idle': lambda s: getattr(self.requests, 'idle', None),
'Socket Errors': 0,
'Requests': lambda s: (not s['Enabled']) and -1 or sum(
[w['Requests'](w) for w in s['Worker Threads'].values()], 0,
),
'Bytes Read': lambda s: (not s['Enabled']) and -1 or sum(
[w['Bytes Read'](w) for w in s['Worker Threads'].values()], 0,
),
'Bytes Written': lambda s: (not s['Enabled']) and -1 or sum(
[w['Bytes Written'](w) for w in s['Worker Threads'].values()],
0,
),
'Work Time': lambda s: (not s['Enabled']) and -1 or sum(
[w['Work Time'](w) for w in s['Worker Threads'].values()], 0,
),
'Read Throughput': lambda s: (not s['Enabled']) and -1 or sum(
[w['Bytes Read'](w) / (w['Work Time'](w) or 1e-6)
for w in s['Worker Threads'].values()], 0,
),
'Write Throughput': lambda s: (not s['Enabled']) and -1 or sum(
[w['Bytes Written'](w) / (w['Work Time'](w) or 1e-6)
for w in s['Worker Threads'].values()], 0,
),
'Worker Threads': {},
}
logging.statistics['Cheroot HTTPServer %d' % id(self)] = self.stats | def function[clear_stats, parameter[self]]:
constant[Reset server stat counters..]
name[self]._start_time assign[=] constant[None]
name[self]._run_time assign[=] constant[0]
name[self].stats assign[=] dictionary[[<ast.Constant object at 0x7da18c4ce8c0>, <ast.Constant object at 0x7da18c4cd330>, <ast.Constant object at 0x7da18c4cce80>, <ast.Constant object at 0x7da18c4cc790>, <ast.Constant object at 0x7da18c4cf7f0>, <ast.Constant object at 0x7da18c4cf520>, <ast.Constant object at 0x7da18c4cc340>, <ast.Constant object at 0x7da18c4cf160>, <ast.Constant object at 0x7da18c4ce9e0>, <ast.Constant object at 0x7da18c4cecb0>, <ast.Constant object at 0x7da18c4cd000>, <ast.Constant object at 0x7da18c4ccee0>, <ast.Constant object at 0x7da18c4cc8b0>, <ast.Constant object at 0x7da18c4ce110>, <ast.Constant object at 0x7da18c4ce380>, <ast.Constant object at 0x7da18c4ce440>], [<ast.Constant object at 0x7da18c4cd570>, <ast.Lambda object at 0x7da18c4cec80>, <ast.Lambda object at 0x7da18c4cf6a0>, <ast.Constant object at 0x7da18c4cf1c0>, <ast.Lambda object at 0x7da18c4cdfc0>, <ast.Lambda object at 0x7da18c4cc730>, <ast.Lambda object at 0x7da18c4cf250>, <ast.Lambda object at 0x7da18c4cdde0>, <ast.Constant object at 0x7da18c4cc3d0>, <ast.Lambda object at 0x7da18c4cfee0>, <ast.Lambda object at 0x7da204623130>, <ast.Lambda object at 0x7da204622dd0>, <ast.Lambda object at 0x7da204623280>, <ast.Lambda object at 0x7da204623010>, <ast.Lambda object at 0x7da18c4cd4b0>, <ast.Dict object at 0x7da18c4cf490>]]
call[name[logging].statistics][binary_operation[constant[Cheroot HTTPServer %d] <ast.Mod object at 0x7da2590d6920> call[name[id], parameter[name[self]]]]] assign[=] name[self].stats | keyword[def] identifier[clear_stats] ( identifier[self] ):
literal[string]
identifier[self] . identifier[_start_time] = keyword[None]
identifier[self] . identifier[_run_time] = literal[int]
identifier[self] . identifier[stats] ={
literal[string] : keyword[False] ,
literal[string] : keyword[lambda] identifier[s] : identifier[repr] ( identifier[self] . identifier[bind_addr] ),
literal[string] : keyword[lambda] identifier[s] :( keyword[not] identifier[s] [ literal[string] ]) keyword[and] - literal[int] keyword[or] identifier[self] . identifier[runtime] (),
literal[string] : literal[int] ,
literal[string] : keyword[lambda] identifier[s] : identifier[s] [ literal[string] ]/ identifier[self] . identifier[runtime] (),
literal[string] : keyword[lambda] identifier[s] : identifier[getattr] ( identifier[self] . identifier[requests] , literal[string] , keyword[None] ),
literal[string] : keyword[lambda] identifier[s] : identifier[len] ( identifier[getattr] ( identifier[self] . identifier[requests] , literal[string] ,[])),
literal[string] : keyword[lambda] identifier[s] : identifier[getattr] ( identifier[self] . identifier[requests] , literal[string] , keyword[None] ),
literal[string] : literal[int] ,
literal[string] : keyword[lambda] identifier[s] :( keyword[not] identifier[s] [ literal[string] ]) keyword[and] - literal[int] keyword[or] identifier[sum] (
[ identifier[w] [ literal[string] ]( identifier[w] ) keyword[for] identifier[w] keyword[in] identifier[s] [ literal[string] ]. identifier[values] ()], literal[int] ,
),
literal[string] : keyword[lambda] identifier[s] :( keyword[not] identifier[s] [ literal[string] ]) keyword[and] - literal[int] keyword[or] identifier[sum] (
[ identifier[w] [ literal[string] ]( identifier[w] ) keyword[for] identifier[w] keyword[in] identifier[s] [ literal[string] ]. identifier[values] ()], literal[int] ,
),
literal[string] : keyword[lambda] identifier[s] :( keyword[not] identifier[s] [ literal[string] ]) keyword[and] - literal[int] keyword[or] identifier[sum] (
[ identifier[w] [ literal[string] ]( identifier[w] ) keyword[for] identifier[w] keyword[in] identifier[s] [ literal[string] ]. identifier[values] ()],
literal[int] ,
),
literal[string] : keyword[lambda] identifier[s] :( keyword[not] identifier[s] [ literal[string] ]) keyword[and] - literal[int] keyword[or] identifier[sum] (
[ identifier[w] [ literal[string] ]( identifier[w] ) keyword[for] identifier[w] keyword[in] identifier[s] [ literal[string] ]. identifier[values] ()], literal[int] ,
),
literal[string] : keyword[lambda] identifier[s] :( keyword[not] identifier[s] [ literal[string] ]) keyword[and] - literal[int] keyword[or] identifier[sum] (
[ identifier[w] [ literal[string] ]( identifier[w] )/( identifier[w] [ literal[string] ]( identifier[w] ) keyword[or] literal[int] )
keyword[for] identifier[w] keyword[in] identifier[s] [ literal[string] ]. identifier[values] ()], literal[int] ,
),
literal[string] : keyword[lambda] identifier[s] :( keyword[not] identifier[s] [ literal[string] ]) keyword[and] - literal[int] keyword[or] identifier[sum] (
[ identifier[w] [ literal[string] ]( identifier[w] )/( identifier[w] [ literal[string] ]( identifier[w] ) keyword[or] literal[int] )
keyword[for] identifier[w] keyword[in] identifier[s] [ literal[string] ]. identifier[values] ()], literal[int] ,
),
literal[string] :{},
}
identifier[logging] . identifier[statistics] [ literal[string] % identifier[id] ( identifier[self] )]= identifier[self] . identifier[stats] | def clear_stats(self):
"""Reset server stat counters.."""
self._start_time = None
self._run_time = 0
self.stats = {'Enabled': False, 'Bind Address': lambda s: repr(self.bind_addr), 'Run time': lambda s: not s['Enabled'] and -1 or self.runtime(), 'Accepts': 0, 'Accepts/sec': lambda s: s['Accepts'] / self.runtime(), 'Queue': lambda s: getattr(self.requests, 'qsize', None), 'Threads': lambda s: len(getattr(self.requests, '_threads', [])), 'Threads Idle': lambda s: getattr(self.requests, 'idle', None), 'Socket Errors': 0, 'Requests': lambda s: not s['Enabled'] and -1 or sum([w['Requests'](w) for w in s['Worker Threads'].values()], 0), 'Bytes Read': lambda s: not s['Enabled'] and -1 or sum([w['Bytes Read'](w) for w in s['Worker Threads'].values()], 0), 'Bytes Written': lambda s: not s['Enabled'] and -1 or sum([w['Bytes Written'](w) for w in s['Worker Threads'].values()], 0), 'Work Time': lambda s: not s['Enabled'] and -1 or sum([w['Work Time'](w) for w in s['Worker Threads'].values()], 0), 'Read Throughput': lambda s: not s['Enabled'] and -1 or sum([w['Bytes Read'](w) / (w['Work Time'](w) or 1e-06) for w in s['Worker Threads'].values()], 0), 'Write Throughput': lambda s: not s['Enabled'] and -1 or sum([w['Bytes Written'](w) / (w['Work Time'](w) or 1e-06) for w in s['Worker Threads'].values()], 0), 'Worker Threads': {}}
logging.statistics['Cheroot HTTPServer %d' % id(self)] = self.stats |
def parse_options(self, option_values):
""" Set the options (with parsing) and returns a dict of all options values
"""
self.set_options_values(option_values, parse=True)
return self.get_options_values(hidden=False) | def function[parse_options, parameter[self, option_values]]:
constant[ Set the options (with parsing) and returns a dict of all options values
]
call[name[self].set_options_values, parameter[name[option_values]]]
return[call[name[self].get_options_values, parameter[]]] | keyword[def] identifier[parse_options] ( identifier[self] , identifier[option_values] ):
literal[string]
identifier[self] . identifier[set_options_values] ( identifier[option_values] , identifier[parse] = keyword[True] )
keyword[return] identifier[self] . identifier[get_options_values] ( identifier[hidden] = keyword[False] ) | def parse_options(self, option_values):
""" Set the options (with parsing) and returns a dict of all options values
"""
self.set_options_values(option_values, parse=True)
return self.get_options_values(hidden=False) |
def except_keyword(source, start, keyword):
""" Returns position after keyword if found else None
Note: skips white space"""
start = pass_white(source, start)
kl = len(keyword) #keyword len
if kl + start > len(source):
return None
if source[start:start + kl] != keyword:
return None
if kl + start < len(source) and source[start + kl] in IDENTIFIER_PART:
return None
return start + kl | def function[except_keyword, parameter[source, start, keyword]]:
constant[ Returns position after keyword if found else None
Note: skips white space]
variable[start] assign[=] call[name[pass_white], parameter[name[source], name[start]]]
variable[kl] assign[=] call[name[len], parameter[name[keyword]]]
if compare[binary_operation[name[kl] + name[start]] greater[>] call[name[len], parameter[name[source]]]] begin[:]
return[constant[None]]
if compare[call[name[source]][<ast.Slice object at 0x7da20e956ec0>] not_equal[!=] name[keyword]] begin[:]
return[constant[None]]
if <ast.BoolOp object at 0x7da20e954af0> begin[:]
return[constant[None]]
return[binary_operation[name[start] + name[kl]]] | keyword[def] identifier[except_keyword] ( identifier[source] , identifier[start] , identifier[keyword] ):
literal[string]
identifier[start] = identifier[pass_white] ( identifier[source] , identifier[start] )
identifier[kl] = identifier[len] ( identifier[keyword] )
keyword[if] identifier[kl] + identifier[start] > identifier[len] ( identifier[source] ):
keyword[return] keyword[None]
keyword[if] identifier[source] [ identifier[start] : identifier[start] + identifier[kl] ]!= identifier[keyword] :
keyword[return] keyword[None]
keyword[if] identifier[kl] + identifier[start] < identifier[len] ( identifier[source] ) keyword[and] identifier[source] [ identifier[start] + identifier[kl] ] keyword[in] identifier[IDENTIFIER_PART] :
keyword[return] keyword[None]
keyword[return] identifier[start] + identifier[kl] | def except_keyword(source, start, keyword):
""" Returns position after keyword if found else None
Note: skips white space"""
start = pass_white(source, start)
kl = len(keyword) #keyword len
if kl + start > len(source):
return None # depends on [control=['if'], data=[]]
if source[start:start + kl] != keyword:
return None # depends on [control=['if'], data=[]]
if kl + start < len(source) and source[start + kl] in IDENTIFIER_PART:
return None # depends on [control=['if'], data=[]]
return start + kl |
def is_success(self, check_timeout=True):
'''
Check if Webpay response ``TBK_RESPUESTA`` is equal to ``0`` and if the lapse between initialization
and this call is less than ``self.timeout`` when ``check_timeout`` is ``True`` (default).
:param check_timeout: When ``True``, check time between initialization and call.
'''
if check_timeout and self.is_timeout():
return False
return self.payload.response == self.payload.SUCCESS_RESPONSE_CODE | def function[is_success, parameter[self, check_timeout]]:
constant[
Check if Webpay response ``TBK_RESPUESTA`` is equal to ``0`` and if the lapse between initialization
and this call is less than ``self.timeout`` when ``check_timeout`` is ``True`` (default).
:param check_timeout: When ``True``, check time between initialization and call.
]
if <ast.BoolOp object at 0x7da20c6c45b0> begin[:]
return[constant[False]]
return[compare[name[self].payload.response equal[==] name[self].payload.SUCCESS_RESPONSE_CODE]] | keyword[def] identifier[is_success] ( identifier[self] , identifier[check_timeout] = keyword[True] ):
literal[string]
keyword[if] identifier[check_timeout] keyword[and] identifier[self] . identifier[is_timeout] ():
keyword[return] keyword[False]
keyword[return] identifier[self] . identifier[payload] . identifier[response] == identifier[self] . identifier[payload] . identifier[SUCCESS_RESPONSE_CODE] | def is_success(self, check_timeout=True):
"""
Check if Webpay response ``TBK_RESPUESTA`` is equal to ``0`` and if the lapse between initialization
and this call is less than ``self.timeout`` when ``check_timeout`` is ``True`` (default).
:param check_timeout: When ``True``, check time between initialization and call.
"""
if check_timeout and self.is_timeout():
return False # depends on [control=['if'], data=[]]
return self.payload.response == self.payload.SUCCESS_RESPONSE_CODE |
def run(self, statement):
'''Execute the sql in the database and return the results.
The results are a list of tuples. Each tuple has 4 values
(title, rows, headers, status).
'''
# Remove spaces and EOL
statement = statement.strip()
if not statement: # Empty string
yield (None, None, None, None)
# Split the sql into separate queries and run each one.
components = sqlparse.split(statement)
for sql in components:
# Remove spaces, eol and semi-colons.
sql = sql.rstrip(';')
# \G is treated specially since we have to set the expanded output.
if sql.endswith('\\G'):
special.set_expanded_output(True)
sql = sql[:-2].strip()
cur = self.conn.cursor()
try:
for result in special.execute(cur, sql):
yield result
except special.CommandNotFound: # Regular SQL
cur.execute(sql)
yield self.get_result(cur) | def function[run, parameter[self, statement]]:
constant[Execute the sql in the database and return the results.
The results are a list of tuples. Each tuple has 4 values
(title, rows, headers, status).
]
variable[statement] assign[=] call[name[statement].strip, parameter[]]
if <ast.UnaryOp object at 0x7da20c76efb0> begin[:]
<ast.Yield object at 0x7da20c76d270>
variable[components] assign[=] call[name[sqlparse].split, parameter[name[statement]]]
for taget[name[sql]] in starred[name[components]] begin[:]
variable[sql] assign[=] call[name[sql].rstrip, parameter[constant[;]]]
if call[name[sql].endswith, parameter[constant[\G]]] begin[:]
call[name[special].set_expanded_output, parameter[constant[True]]]
variable[sql] assign[=] call[call[name[sql]][<ast.Slice object at 0x7da20c7ca9b0>].strip, parameter[]]
variable[cur] assign[=] call[name[self].conn.cursor, parameter[]]
<ast.Try object at 0x7da20c7c8070> | keyword[def] identifier[run] ( identifier[self] , identifier[statement] ):
literal[string]
identifier[statement] = identifier[statement] . identifier[strip] ()
keyword[if] keyword[not] identifier[statement] :
keyword[yield] ( keyword[None] , keyword[None] , keyword[None] , keyword[None] )
identifier[components] = identifier[sqlparse] . identifier[split] ( identifier[statement] )
keyword[for] identifier[sql] keyword[in] identifier[components] :
identifier[sql] = identifier[sql] . identifier[rstrip] ( literal[string] )
keyword[if] identifier[sql] . identifier[endswith] ( literal[string] ):
identifier[special] . identifier[set_expanded_output] ( keyword[True] )
identifier[sql] = identifier[sql] [:- literal[int] ]. identifier[strip] ()
identifier[cur] = identifier[self] . identifier[conn] . identifier[cursor] ()
keyword[try] :
keyword[for] identifier[result] keyword[in] identifier[special] . identifier[execute] ( identifier[cur] , identifier[sql] ):
keyword[yield] identifier[result]
keyword[except] identifier[special] . identifier[CommandNotFound] :
identifier[cur] . identifier[execute] ( identifier[sql] )
keyword[yield] identifier[self] . identifier[get_result] ( identifier[cur] ) | def run(self, statement):
"""Execute the sql in the database and return the results.
The results are a list of tuples. Each tuple has 4 values
(title, rows, headers, status).
"""
# Remove spaces and EOL
statement = statement.strip()
if not statement: # Empty string
yield (None, None, None, None) # depends on [control=['if'], data=[]]
# Split the sql into separate queries and run each one.
components = sqlparse.split(statement)
for sql in components:
# Remove spaces, eol and semi-colons.
sql = sql.rstrip(';')
# \G is treated specially since we have to set the expanded output.
if sql.endswith('\\G'):
special.set_expanded_output(True)
sql = sql[:-2].strip() # depends on [control=['if'], data=[]]
cur = self.conn.cursor()
try:
for result in special.execute(cur, sql):
yield result # depends on [control=['for'], data=['result']] # depends on [control=['try'], data=[]]
except special.CommandNotFound: # Regular SQL
cur.execute(sql)
yield self.get_result(cur) # depends on [control=['except'], data=[]] # depends on [control=['for'], data=['sql']] |
def _dataset_qa(self, dataset):
"""Chequea si el dataset tiene una calidad mínima para cosechar."""
# 1. VALIDACIONES
# chequea que haya por lo menos algún formato de datos reconocido
has_data_format = helpers.dataset_has_data_distributions(dataset)
# chequea que algunos campos tengan longitudes mínimas
has_title = "title" in dataset
has_description = "description" in dataset
if has_title:
has_min_title = len(dataset["title"]) >= MIN_DATASET_TITLE
else:
has_min_title = False
if has_description:
has_min_desc = len(
dataset["description"]) >= MIN_DATASET_DESCRIPTION
else:
has_min_desc = False
# 2. EVALUACION DE COSECHA: evalua si se cosecha o no el dataset
harvest = (has_title and has_description and
has_data_format and has_min_title and has_min_desc)
# 3. NOTAS: genera notas de validación
notes = []
if not has_data_format:
notes.append("No tiene distribuciones con datos.")
if not has_title:
notes.append("Dataset sin titulo {}".format(dataset))
else:
if not has_min_title:
notes.append("Titulo tiene menos de {} caracteres".format(
MIN_DATASET_TITLE))
if not has_description:
notes.append("Dataset sin descripcion {}".format(dataset))
else:
if not has_min_desc:
notes.append("Descripcion tiene menos de {} caracteres".format(
MIN_DATASET_DESCRIPTION))
return harvest, notes | def function[_dataset_qa, parameter[self, dataset]]:
constant[Chequea si el dataset tiene una calidad mínima para cosechar.]
variable[has_data_format] assign[=] call[name[helpers].dataset_has_data_distributions, parameter[name[dataset]]]
variable[has_title] assign[=] compare[constant[title] in name[dataset]]
variable[has_description] assign[=] compare[constant[description] in name[dataset]]
if name[has_title] begin[:]
variable[has_min_title] assign[=] compare[call[name[len], parameter[call[name[dataset]][constant[title]]]] greater_or_equal[>=] name[MIN_DATASET_TITLE]]
if name[has_description] begin[:]
variable[has_min_desc] assign[=] compare[call[name[len], parameter[call[name[dataset]][constant[description]]]] greater_or_equal[>=] name[MIN_DATASET_DESCRIPTION]]
variable[harvest] assign[=] <ast.BoolOp object at 0x7da1b02e77c0>
variable[notes] assign[=] list[[]]
if <ast.UnaryOp object at 0x7da1b02e5f60> begin[:]
call[name[notes].append, parameter[constant[No tiene distribuciones con datos.]]]
if <ast.UnaryOp object at 0x7da1b044d9c0> begin[:]
call[name[notes].append, parameter[call[constant[Dataset sin titulo {}].format, parameter[name[dataset]]]]]
if <ast.UnaryOp object at 0x7da1b044f2b0> begin[:]
call[name[notes].append, parameter[call[constant[Dataset sin descripcion {}].format, parameter[name[dataset]]]]]
return[tuple[[<ast.Name object at 0x7da1b044ce50>, <ast.Name object at 0x7da1b044ce80>]]] | keyword[def] identifier[_dataset_qa] ( identifier[self] , identifier[dataset] ):
literal[string]
identifier[has_data_format] = identifier[helpers] . identifier[dataset_has_data_distributions] ( identifier[dataset] )
identifier[has_title] = literal[string] keyword[in] identifier[dataset]
identifier[has_description] = literal[string] keyword[in] identifier[dataset]
keyword[if] identifier[has_title] :
identifier[has_min_title] = identifier[len] ( identifier[dataset] [ literal[string] ])>= identifier[MIN_DATASET_TITLE]
keyword[else] :
identifier[has_min_title] = keyword[False]
keyword[if] identifier[has_description] :
identifier[has_min_desc] = identifier[len] (
identifier[dataset] [ literal[string] ])>= identifier[MIN_DATASET_DESCRIPTION]
keyword[else] :
identifier[has_min_desc] = keyword[False]
identifier[harvest] =( identifier[has_title] keyword[and] identifier[has_description] keyword[and]
identifier[has_data_format] keyword[and] identifier[has_min_title] keyword[and] identifier[has_min_desc] )
identifier[notes] =[]
keyword[if] keyword[not] identifier[has_data_format] :
identifier[notes] . identifier[append] ( literal[string] )
keyword[if] keyword[not] identifier[has_title] :
identifier[notes] . identifier[append] ( literal[string] . identifier[format] ( identifier[dataset] ))
keyword[else] :
keyword[if] keyword[not] identifier[has_min_title] :
identifier[notes] . identifier[append] ( literal[string] . identifier[format] (
identifier[MIN_DATASET_TITLE] ))
keyword[if] keyword[not] identifier[has_description] :
identifier[notes] . identifier[append] ( literal[string] . identifier[format] ( identifier[dataset] ))
keyword[else] :
keyword[if] keyword[not] identifier[has_min_desc] :
identifier[notes] . identifier[append] ( literal[string] . identifier[format] (
identifier[MIN_DATASET_DESCRIPTION] ))
keyword[return] identifier[harvest] , identifier[notes] | def _dataset_qa(self, dataset):
"""Chequea si el dataset tiene una calidad mínima para cosechar."""
# 1. VALIDACIONES
# chequea que haya por lo menos algún formato de datos reconocido
has_data_format = helpers.dataset_has_data_distributions(dataset)
# chequea que algunos campos tengan longitudes mínimas
has_title = 'title' in dataset
has_description = 'description' in dataset
if has_title:
has_min_title = len(dataset['title']) >= MIN_DATASET_TITLE # depends on [control=['if'], data=[]]
else:
has_min_title = False
if has_description:
has_min_desc = len(dataset['description']) >= MIN_DATASET_DESCRIPTION # depends on [control=['if'], data=[]]
else:
has_min_desc = False
# 2. EVALUACION DE COSECHA: evalua si se cosecha o no el dataset
harvest = has_title and has_description and has_data_format and has_min_title and has_min_desc
# 3. NOTAS: genera notas de validación
notes = []
if not has_data_format:
notes.append('No tiene distribuciones con datos.') # depends on [control=['if'], data=[]]
if not has_title:
notes.append('Dataset sin titulo {}'.format(dataset)) # depends on [control=['if'], data=[]]
elif not has_min_title:
notes.append('Titulo tiene menos de {} caracteres'.format(MIN_DATASET_TITLE)) # depends on [control=['if'], data=[]]
if not has_description:
notes.append('Dataset sin descripcion {}'.format(dataset)) # depends on [control=['if'], data=[]]
elif not has_min_desc:
notes.append('Descripcion tiene menos de {} caracteres'.format(MIN_DATASET_DESCRIPTION)) # depends on [control=['if'], data=[]]
return (harvest, notes) |
def signal_to_noise_map(self):
"""The signal-to-noise_map of the data and noise-map which are fitted."""
signal_to_noise_map = np.divide(self.data, self.noise_map)
signal_to_noise_map[signal_to_noise_map < 0] = 0
return signal_to_noise_map | def function[signal_to_noise_map, parameter[self]]:
constant[The signal-to-noise_map of the data and noise-map which are fitted.]
variable[signal_to_noise_map] assign[=] call[name[np].divide, parameter[name[self].data, name[self].noise_map]]
call[name[signal_to_noise_map]][compare[name[signal_to_noise_map] less[<] constant[0]]] assign[=] constant[0]
return[name[signal_to_noise_map]] | keyword[def] identifier[signal_to_noise_map] ( identifier[self] ):
literal[string]
identifier[signal_to_noise_map] = identifier[np] . identifier[divide] ( identifier[self] . identifier[data] , identifier[self] . identifier[noise_map] )
identifier[signal_to_noise_map] [ identifier[signal_to_noise_map] < literal[int] ]= literal[int]
keyword[return] identifier[signal_to_noise_map] | def signal_to_noise_map(self):
"""The signal-to-noise_map of the data and noise-map which are fitted."""
signal_to_noise_map = np.divide(self.data, self.noise_map)
signal_to_noise_map[signal_to_noise_map < 0] = 0
return signal_to_noise_map |
def _aload8(ins):
''' Loads an 8 bit value from a memory address
If 2nd arg. start with '*', it is always treated as
an indirect value.
'''
output = _addr(ins.quad[2])
output.append('ld a, (hl)')
output.append('push af')
return output | def function[_aload8, parameter[ins]]:
constant[ Loads an 8 bit value from a memory address
If 2nd arg. start with '*', it is always treated as
an indirect value.
]
variable[output] assign[=] call[name[_addr], parameter[call[name[ins].quad][constant[2]]]]
call[name[output].append, parameter[constant[ld a, (hl)]]]
call[name[output].append, parameter[constant[push af]]]
return[name[output]] | keyword[def] identifier[_aload8] ( identifier[ins] ):
literal[string]
identifier[output] = identifier[_addr] ( identifier[ins] . identifier[quad] [ literal[int] ])
identifier[output] . identifier[append] ( literal[string] )
identifier[output] . identifier[append] ( literal[string] )
keyword[return] identifier[output] | def _aload8(ins):
""" Loads an 8 bit value from a memory address
If 2nd arg. start with '*', it is always treated as
an indirect value.
"""
output = _addr(ins.quad[2])
output.append('ld a, (hl)')
output.append('push af')
return output |
def __envelope(x, hop):
'''Compute the max-envelope of x at a stride/frame length of h'''
return util.frame(x, hop_length=hop, frame_length=hop).max(axis=0) | def function[__envelope, parameter[x, hop]]:
constant[Compute the max-envelope of x at a stride/frame length of h]
return[call[call[name[util].frame, parameter[name[x]]].max, parameter[]]] | keyword[def] identifier[__envelope] ( identifier[x] , identifier[hop] ):
literal[string]
keyword[return] identifier[util] . identifier[frame] ( identifier[x] , identifier[hop_length] = identifier[hop] , identifier[frame_length] = identifier[hop] ). identifier[max] ( identifier[axis] = literal[int] ) | def __envelope(x, hop):
"""Compute the max-envelope of x at a stride/frame length of h"""
return util.frame(x, hop_length=hop, frame_length=hop).max(axis=0) |
def match(self, subject: Expression) -> Iterator[Tuple[Expression, Substitution]]:
"""Match the subject against all the matcher's patterns.
Args:
subject: The subject to match.
Yields:
For every match, a tuple of the matching pattern and the match substitution.
"""
return _MatchIter(self, subject) | def function[match, parameter[self, subject]]:
constant[Match the subject against all the matcher's patterns.
Args:
subject: The subject to match.
Yields:
For every match, a tuple of the matching pattern and the match substitution.
]
return[call[name[_MatchIter], parameter[name[self], name[subject]]]] | keyword[def] identifier[match] ( identifier[self] , identifier[subject] : identifier[Expression] )-> identifier[Iterator] [ identifier[Tuple] [ identifier[Expression] , identifier[Substitution] ]]:
literal[string]
keyword[return] identifier[_MatchIter] ( identifier[self] , identifier[subject] ) | def match(self, subject: Expression) -> Iterator[Tuple[Expression, Substitution]]:
"""Match the subject against all the matcher's patterns.
Args:
subject: The subject to match.
Yields:
For every match, a tuple of the matching pattern and the match substitution.
"""
return _MatchIter(self, subject) |
def fit(self, X, y, sample_weight=None):
"""Fit the Genetic Program according to X, y.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target values.
sample_weight : array-like, shape = [n_samples], optional
Weights applied to individual samples.
Returns
-------
self : object
Returns self.
"""
random_state = check_random_state(self.random_state)
# Check arrays
if isinstance(self, ClassifierMixin):
X, y = check_X_y(X, y, y_numeric=False)
check_classification_targets(y)
self.classes_, y = np.unique(y, return_inverse=True)
n_trim_classes = np.count_nonzero(np.bincount(y, sample_weight))
if n_trim_classes != 2:
raise ValueError("y contains %d class after sample_weight "
"trimmed classes with zero weights, while 2 "
"classes are required."
% n_trim_classes)
self.n_classes_ = len(self.classes_)
else:
X, y = check_X_y(X, y, y_numeric=True)
if sample_weight is not None:
sample_weight = check_array(sample_weight, ensure_2d=False)
_, self.n_features_ = X.shape
hall_of_fame = self.hall_of_fame
if hall_of_fame is None:
hall_of_fame = self.population_size
if hall_of_fame > self.population_size or hall_of_fame < 1:
raise ValueError('hall_of_fame (%d) must be less than or equal to '
'population_size (%d).' % (self.hall_of_fame,
self.population_size))
n_components = self.n_components
if n_components is None:
n_components = hall_of_fame
if n_components > hall_of_fame or n_components < 1:
raise ValueError('n_components (%d) must be less than or equal to '
'hall_of_fame (%d).' % (self.n_components,
self.hall_of_fame))
self._function_set = []
for function in self.function_set:
if isinstance(function, str):
if function not in _function_map:
raise ValueError('invalid function name %s found in '
'`function_set`.' % function)
self._function_set.append(_function_map[function])
elif isinstance(function, _Function):
self._function_set.append(function)
else:
raise ValueError('invalid type %s found in `function_set`.'
% type(function))
if not self._function_set:
raise ValueError('No valid functions found in `function_set`.')
# For point-mutation to find a compatible replacement node
self._arities = {}
for function in self._function_set:
arity = function.arity
self._arities[arity] = self._arities.get(arity, [])
self._arities[arity].append(function)
if isinstance(self.metric, _Fitness):
self._metric = self.metric
elif isinstance(self, RegressorMixin):
if self.metric not in ('mean absolute error', 'mse', 'rmse',
'pearson', 'spearman'):
raise ValueError('Unsupported metric: %s' % self.metric)
self._metric = _fitness_map[self.metric]
elif isinstance(self, ClassifierMixin):
if self.metric != 'log loss':
raise ValueError('Unsupported metric: %s' % self.metric)
self._metric = _fitness_map[self.metric]
elif isinstance(self, TransformerMixin):
if self.metric not in ('pearson', 'spearman'):
raise ValueError('Unsupported metric: %s' % self.metric)
self._metric = _fitness_map[self.metric]
self._method_probs = np.array([self.p_crossover,
self.p_subtree_mutation,
self.p_hoist_mutation,
self.p_point_mutation])
self._method_probs = np.cumsum(self._method_probs)
if self._method_probs[-1] > 1:
raise ValueError('The sum of p_crossover, p_subtree_mutation, '
'p_hoist_mutation and p_point_mutation should '
'total to 1.0 or less.')
if self.init_method not in ('half and half', 'grow', 'full'):
raise ValueError('Valid program initializations methods include '
'"grow", "full" and "half and half". Given %s.'
% self.init_method)
if not((isinstance(self.const_range, tuple) and
len(self.const_range) == 2) or self.const_range is None):
raise ValueError('const_range should be a tuple with length two, '
'or None.')
if (not isinstance(self.init_depth, tuple) or
len(self.init_depth) != 2):
raise ValueError('init_depth should be a tuple with length two.')
if self.init_depth[0] > self.init_depth[1]:
raise ValueError('init_depth should be in increasing numerical '
'order: (min_depth, max_depth).')
if self.feature_names is not None:
if self.n_features_ != len(self.feature_names):
raise ValueError('The supplied `feature_names` has different '
'length to n_features. Expected %d, got %d.'
% (self.n_features_, len(self.feature_names)))
for feature_name in self.feature_names:
if not isinstance(feature_name, str):
raise ValueError('invalid type %s found in '
'`feature_names`.' % type(feature_name))
if self.transformer is not None:
if isinstance(self.transformer, _Function):
self._transformer = self.transformer
elif self.transformer == 'sigmoid':
self._transformer = sigmoid
else:
raise ValueError('Invalid `transformer`. Expected either '
'"sigmoid" or _Function object, got %s' %
type(self.transformer))
if self._transformer.arity != 1:
raise ValueError('Invalid arity for `transformer`. Expected 1, '
'got %d.' % (self._transformer.arity))
params = self.get_params()
params['_metric'] = self._metric
if hasattr(self, '_transformer'):
params['_transformer'] = self._transformer
else:
params['_transformer'] = None
params['function_set'] = self._function_set
params['arities'] = self._arities
params['method_probs'] = self._method_probs
if not self.warm_start or not hasattr(self, '_programs'):
# Free allocated memory, if any
self._programs = []
self.run_details_ = {'generation': [],
'average_length': [],
'average_fitness': [],
'best_length': [],
'best_fitness': [],
'best_oob_fitness': [],
'generation_time': []}
prior_generations = len(self._programs)
n_more_generations = self.generations - prior_generations
if n_more_generations < 0:
raise ValueError('generations=%d must be larger or equal to '
'len(_programs)=%d when warm_start==True'
% (self.generations, len(self._programs)))
elif n_more_generations == 0:
fitness = [program.raw_fitness_ for program in self._programs[-1]]
warn('Warm-start fitting without increasing n_estimators does not '
'fit new programs.')
if self.warm_start:
# Generate and discard seeds that would have been produced on the
# initial fit call.
for i in range(len(self._programs)):
_ = random_state.randint(MAX_INT, size=self.population_size)
if self.verbose:
# Print header fields
self._verbose_reporter()
for gen in range(prior_generations, self.generations):
start_time = time()
if gen == 0:
parents = None
else:
parents = self._programs[gen - 1]
# Parallel loop
n_jobs, n_programs, starts = _partition_estimators(
self.population_size, self.n_jobs)
seeds = random_state.randint(MAX_INT, size=self.population_size)
population = Parallel(n_jobs=n_jobs,
verbose=int(self.verbose > 1))(
delayed(_parallel_evolve)(n_programs[i],
parents,
X,
y,
sample_weight,
seeds[starts[i]:starts[i + 1]],
params)
for i in range(n_jobs))
# Reduce, maintaining order across different n_jobs
population = list(itertools.chain.from_iterable(population))
fitness = [program.raw_fitness_ for program in population]
length = [program.length_ for program in population]
parsimony_coefficient = None
if self.parsimony_coefficient == 'auto':
parsimony_coefficient = (np.cov(length, fitness)[1, 0] /
np.var(length))
for program in population:
program.fitness_ = program.fitness(parsimony_coefficient)
self._programs.append(population)
# Remove old programs that didn't make it into the new population.
if not self.low_memory:
for old_gen in np.arange(gen, 0, -1):
indices = []
for program in self._programs[old_gen]:
if program is not None:
for idx in program.parents:
if 'idx' in idx:
indices.append(program.parents[idx])
indices = set(indices)
for idx in range(self.population_size):
if idx not in indices:
self._programs[old_gen - 1][idx] = None
elif gen > 0:
# Remove old generations
self._programs[gen - 1] = None
# Record run details
if self._metric.greater_is_better:
best_program = population[np.argmax(fitness)]
else:
best_program = population[np.argmin(fitness)]
self.run_details_['generation'].append(gen)
self.run_details_['average_length'].append(np.mean(length))
self.run_details_['average_fitness'].append(np.mean(fitness))
self.run_details_['best_length'].append(best_program.length_)
self.run_details_['best_fitness'].append(best_program.raw_fitness_)
oob_fitness = np.nan
if self.max_samples < 1.0:
oob_fitness = best_program.oob_fitness_
self.run_details_['best_oob_fitness'].append(oob_fitness)
generation_time = time() - start_time
self.run_details_['generation_time'].append(generation_time)
if self.verbose:
self._verbose_reporter(self.run_details_)
# Check for early stopping
if self._metric.greater_is_better:
best_fitness = fitness[np.argmax(fitness)]
if best_fitness >= self.stopping_criteria:
break
else:
best_fitness = fitness[np.argmin(fitness)]
if best_fitness <= self.stopping_criteria:
break
if isinstance(self, TransformerMixin):
# Find the best individuals in the final generation
fitness = np.array(fitness)
if self._metric.greater_is_better:
hall_of_fame = fitness.argsort()[::-1][:self.hall_of_fame]
else:
hall_of_fame = fitness.argsort()[:self.hall_of_fame]
evaluation = np.array([gp.execute(X) for gp in
[self._programs[-1][i] for
i in hall_of_fame]])
if self.metric == 'spearman':
evaluation = np.apply_along_axis(rankdata, 1, evaluation)
with np.errstate(divide='ignore', invalid='ignore'):
correlations = np.abs(np.corrcoef(evaluation))
np.fill_diagonal(correlations, 0.)
components = list(range(self.hall_of_fame))
indices = list(range(self.hall_of_fame))
# Iteratively remove least fit individual of most correlated pair
while len(components) > self.n_components:
most_correlated = np.unravel_index(np.argmax(correlations),
correlations.shape)
# The correlation matrix is sorted by fitness, so identifying
# the least fit of the pair is simply getting the higher index
worst = max(most_correlated)
components.pop(worst)
indices.remove(worst)
correlations = correlations[:, indices][indices, :]
indices = list(range(len(components)))
self._best_programs = [self._programs[-1][i] for i in
hall_of_fame[components]]
else:
# Find the best individual in the final generation
if self._metric.greater_is_better:
self._program = self._programs[-1][np.argmax(fitness)]
else:
self._program = self._programs[-1][np.argmin(fitness)]
return self | def function[fit, parameter[self, X, y, sample_weight]]:
constant[Fit the Genetic Program according to X, y.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target values.
sample_weight : array-like, shape = [n_samples], optional
Weights applied to individual samples.
Returns
-------
self : object
Returns self.
]
variable[random_state] assign[=] call[name[check_random_state], parameter[name[self].random_state]]
if call[name[isinstance], parameter[name[self], name[ClassifierMixin]]] begin[:]
<ast.Tuple object at 0x7da1b1ddf700> assign[=] call[name[check_X_y], parameter[name[X], name[y]]]
call[name[check_classification_targets], parameter[name[y]]]
<ast.Tuple object at 0x7da1b1ddf430> assign[=] call[name[np].unique, parameter[name[y]]]
variable[n_trim_classes] assign[=] call[name[np].count_nonzero, parameter[call[name[np].bincount, parameter[name[y], name[sample_weight]]]]]
if compare[name[n_trim_classes] not_equal[!=] constant[2]] begin[:]
<ast.Raise object at 0x7da1b1ddefb0>
name[self].n_classes_ assign[=] call[name[len], parameter[name[self].classes_]]
if compare[name[sample_weight] is_not constant[None]] begin[:]
variable[sample_weight] assign[=] call[name[check_array], parameter[name[sample_weight]]]
<ast.Tuple object at 0x7da1b1dde920> assign[=] name[X].shape
variable[hall_of_fame] assign[=] name[self].hall_of_fame
if compare[name[hall_of_fame] is constant[None]] begin[:]
variable[hall_of_fame] assign[=] name[self].population_size
if <ast.BoolOp object at 0x7da1b1d5edd0> begin[:]
<ast.Raise object at 0x7da1b1d5e050>
variable[n_components] assign[=] name[self].n_components
if compare[name[n_components] is constant[None]] begin[:]
variable[n_components] assign[=] name[hall_of_fame]
if <ast.BoolOp object at 0x7da1b1d5d6f0> begin[:]
<ast.Raise object at 0x7da1b1d5ebf0>
name[self]._function_set assign[=] list[[]]
for taget[name[function]] in starred[name[self].function_set] begin[:]
if call[name[isinstance], parameter[name[function], name[str]]] begin[:]
if compare[name[function] <ast.NotIn object at 0x7da2590d7190> name[_function_map]] begin[:]
<ast.Raise object at 0x7da1b1d5e290>
call[name[self]._function_set.append, parameter[call[name[_function_map]][name[function]]]]
if <ast.UnaryOp object at 0x7da1b1dde1a0> begin[:]
<ast.Raise object at 0x7da1b1dde110>
name[self]._arities assign[=] dictionary[[], []]
for taget[name[function]] in starred[name[self]._function_set] begin[:]
variable[arity] assign[=] name[function].arity
call[name[self]._arities][name[arity]] assign[=] call[name[self]._arities.get, parameter[name[arity], list[[]]]]
call[call[name[self]._arities][name[arity]].append, parameter[name[function]]]
if call[name[isinstance], parameter[name[self].metric, name[_Fitness]]] begin[:]
name[self]._metric assign[=] name[self].metric
name[self]._method_probs assign[=] call[name[np].array, parameter[list[[<ast.Attribute object at 0x7da1b1ddc7f0>, <ast.Attribute object at 0x7da1b1ddc790>, <ast.Attribute object at 0x7da1b1ddc730>, <ast.Attribute object at 0x7da1b1ddc6d0>]]]]
name[self]._method_probs assign[=] call[name[np].cumsum, parameter[name[self]._method_probs]]
if compare[call[name[self]._method_probs][<ast.UnaryOp object at 0x7da1b1ddc400>] greater[>] constant[1]] begin[:]
<ast.Raise object at 0x7da1b1ddc370>
if compare[name[self].init_method <ast.NotIn object at 0x7da2590d7190> tuple[[<ast.Constant object at 0x7da1b1ddc1c0>, <ast.Constant object at 0x7da1b1ddc190>, <ast.Constant object at 0x7da1b1ddc160>]]] begin[:]
<ast.Raise object at 0x7da1b1ddc130>
if <ast.UnaryOp object at 0x7da1b1d77f70> begin[:]
<ast.Raise object at 0x7da1b1d77c10>
if <ast.BoolOp object at 0x7da1b1d77b20> begin[:]
<ast.Raise object at 0x7da1b1d4f190>
if compare[call[name[self].init_depth][constant[0]] greater[>] call[name[self].init_depth][constant[1]]] begin[:]
<ast.Raise object at 0x7da1b1d4eef0>
if compare[name[self].feature_names is_not constant[None]] begin[:]
if compare[name[self].n_features_ not_equal[!=] call[name[len], parameter[name[self].feature_names]]] begin[:]
<ast.Raise object at 0x7da1b1d2a9b0>
for taget[name[feature_name]] in starred[name[self].feature_names] begin[:]
if <ast.UnaryOp object at 0x7da1b1d2ad40> begin[:]
<ast.Raise object at 0x7da1b1d2ac50>
if compare[name[self].transformer is_not constant[None]] begin[:]
if call[name[isinstance], parameter[name[self].transformer, name[_Function]]] begin[:]
name[self]._transformer assign[=] name[self].transformer
if compare[name[self]._transformer.arity not_equal[!=] constant[1]] begin[:]
<ast.Raise object at 0x7da1b1d28160>
variable[params] assign[=] call[name[self].get_params, parameter[]]
call[name[params]][constant[_metric]] assign[=] name[self]._metric
if call[name[hasattr], parameter[name[self], constant[_transformer]]] begin[:]
call[name[params]][constant[_transformer]] assign[=] name[self]._transformer
call[name[params]][constant[function_set]] assign[=] name[self]._function_set
call[name[params]][constant[arities]] assign[=] name[self]._arities
call[name[params]][constant[method_probs]] assign[=] name[self]._method_probs
if <ast.BoolOp object at 0x7da1b1d2a950> begin[:]
name[self]._programs assign[=] list[[]]
name[self].run_details_ assign[=] dictionary[[<ast.Constant object at 0x7da1b1d296c0>, <ast.Constant object at 0x7da1b1d281c0>, <ast.Constant object at 0x7da1b1d2baf0>, <ast.Constant object at 0x7da1b1d2a350>, <ast.Constant object at 0x7da1b1d2b220>, <ast.Constant object at 0x7da1b1d2ab30>, <ast.Constant object at 0x7da1b1d28eb0>], [<ast.List object at 0x7da1b1d28430>, <ast.List object at 0x7da1b1d2add0>, <ast.List object at 0x7da1b1d2b340>, <ast.List object at 0x7da1b1d2a590>, <ast.List object at 0x7da1b1d2aec0>, <ast.List object at 0x7da1b1d293c0>, <ast.List object at 0x7da1b1d2b520>]]
variable[prior_generations] assign[=] call[name[len], parameter[name[self]._programs]]
variable[n_more_generations] assign[=] binary_operation[name[self].generations - name[prior_generations]]
if compare[name[n_more_generations] less[<] constant[0]] begin[:]
<ast.Raise object at 0x7da1b1d28670>
if name[self].warm_start begin[:]
for taget[name[i]] in starred[call[name[range], parameter[call[name[len], parameter[name[self]._programs]]]]] begin[:]
variable[_] assign[=] call[name[random_state].randint, parameter[name[MAX_INT]]]
if name[self].verbose begin[:]
call[name[self]._verbose_reporter, parameter[]]
for taget[name[gen]] in starred[call[name[range], parameter[name[prior_generations], name[self].generations]]] begin[:]
variable[start_time] assign[=] call[name[time], parameter[]]
if compare[name[gen] equal[==] constant[0]] begin[:]
variable[parents] assign[=] constant[None]
<ast.Tuple object at 0x7da1b22ac4c0> assign[=] call[name[_partition_estimators], parameter[name[self].population_size, name[self].n_jobs]]
variable[seeds] assign[=] call[name[random_state].randint, parameter[name[MAX_INT]]]
variable[population] assign[=] call[call[name[Parallel], parameter[]], parameter[<ast.GeneratorExp object at 0x7da1b22bb430>]]
variable[population] assign[=] call[name[list], parameter[call[name[itertools].chain.from_iterable, parameter[name[population]]]]]
variable[fitness] assign[=] <ast.ListComp object at 0x7da1b22b9c60>
variable[length] assign[=] <ast.ListComp object at 0x7da1b22b81c0>
variable[parsimony_coefficient] assign[=] constant[None]
if compare[name[self].parsimony_coefficient equal[==] constant[auto]] begin[:]
variable[parsimony_coefficient] assign[=] binary_operation[call[call[name[np].cov, parameter[name[length], name[fitness]]]][tuple[[<ast.Constant object at 0x7da1b22b9b70>, <ast.Constant object at 0x7da1b22b8e20>]]] / call[name[np].var, parameter[name[length]]]]
for taget[name[program]] in starred[name[population]] begin[:]
name[program].fitness_ assign[=] call[name[program].fitness, parameter[name[parsimony_coefficient]]]
call[name[self]._programs.append, parameter[name[population]]]
if <ast.UnaryOp object at 0x7da1b1d68730> begin[:]
for taget[name[old_gen]] in starred[call[name[np].arange, parameter[name[gen], constant[0], <ast.UnaryOp object at 0x7da1b1d68df0>]]] begin[:]
variable[indices] assign[=] list[[]]
for taget[name[program]] in starred[call[name[self]._programs][name[old_gen]]] begin[:]
if compare[name[program] is_not constant[None]] begin[:]
for taget[name[idx]] in starred[name[program].parents] begin[:]
if compare[constant[idx] in name[idx]] begin[:]
call[name[indices].append, parameter[call[name[program].parents][name[idx]]]]
variable[indices] assign[=] call[name[set], parameter[name[indices]]]
for taget[name[idx]] in starred[call[name[range], parameter[name[self].population_size]]] begin[:]
if compare[name[idx] <ast.NotIn object at 0x7da2590d7190> name[indices]] begin[:]
call[call[name[self]._programs][binary_operation[name[old_gen] - constant[1]]]][name[idx]] assign[=] constant[None]
if name[self]._metric.greater_is_better begin[:]
variable[best_program] assign[=] call[name[population]][call[name[np].argmax, parameter[name[fitness]]]]
call[call[name[self].run_details_][constant[generation]].append, parameter[name[gen]]]
call[call[name[self].run_details_][constant[average_length]].append, parameter[call[name[np].mean, parameter[name[length]]]]]
call[call[name[self].run_details_][constant[average_fitness]].append, parameter[call[name[np].mean, parameter[name[fitness]]]]]
call[call[name[self].run_details_][constant[best_length]].append, parameter[name[best_program].length_]]
call[call[name[self].run_details_][constant[best_fitness]].append, parameter[name[best_program].raw_fitness_]]
variable[oob_fitness] assign[=] name[np].nan
if compare[name[self].max_samples less[<] constant[1.0]] begin[:]
variable[oob_fitness] assign[=] name[best_program].oob_fitness_
call[call[name[self].run_details_][constant[best_oob_fitness]].append, parameter[name[oob_fitness]]]
variable[generation_time] assign[=] binary_operation[call[name[time], parameter[]] - name[start_time]]
call[call[name[self].run_details_][constant[generation_time]].append, parameter[name[generation_time]]]
if name[self].verbose begin[:]
call[name[self]._verbose_reporter, parameter[name[self].run_details_]]
if name[self]._metric.greater_is_better begin[:]
variable[best_fitness] assign[=] call[name[fitness]][call[name[np].argmax, parameter[name[fitness]]]]
if compare[name[best_fitness] greater_or_equal[>=] name[self].stopping_criteria] begin[:]
break
if call[name[isinstance], parameter[name[self], name[TransformerMixin]]] begin[:]
variable[fitness] assign[=] call[name[np].array, parameter[name[fitness]]]
if name[self]._metric.greater_is_better begin[:]
variable[hall_of_fame] assign[=] call[call[call[name[fitness].argsort, parameter[]]][<ast.Slice object at 0x7da1b1d6a4d0>]][<ast.Slice object at 0x7da1b1d69360>]
variable[evaluation] assign[=] call[name[np].array, parameter[<ast.ListComp object at 0x7da1b1d691b0>]]
if compare[name[self].metric equal[==] constant[spearman]] begin[:]
variable[evaluation] assign[=] call[name[np].apply_along_axis, parameter[name[rankdata], constant[1], name[evaluation]]]
with call[name[np].errstate, parameter[]] begin[:]
variable[correlations] assign[=] call[name[np].abs, parameter[call[name[np].corrcoef, parameter[name[evaluation]]]]]
call[name[np].fill_diagonal, parameter[name[correlations], constant[0.0]]]
variable[components] assign[=] call[name[list], parameter[call[name[range], parameter[name[self].hall_of_fame]]]]
variable[indices] assign[=] call[name[list], parameter[call[name[range], parameter[name[self].hall_of_fame]]]]
while compare[call[name[len], parameter[name[components]]] greater[>] name[self].n_components] begin[:]
variable[most_correlated] assign[=] call[name[np].unravel_index, parameter[call[name[np].argmax, parameter[name[correlations]]], name[correlations].shape]]
variable[worst] assign[=] call[name[max], parameter[name[most_correlated]]]
call[name[components].pop, parameter[name[worst]]]
call[name[indices].remove, parameter[name[worst]]]
variable[correlations] assign[=] call[call[name[correlations]][tuple[[<ast.Slice object at 0x7da1b1dc40d0>, <ast.Name object at 0x7da1b1dc4a90>]]]][tuple[[<ast.Name object at 0x7da1b1dc5b10>, <ast.Slice object at 0x7da1b1dc6da0>]]]
variable[indices] assign[=] call[name[list], parameter[call[name[range], parameter[call[name[len], parameter[name[components]]]]]]]
name[self]._best_programs assign[=] <ast.ListComp object at 0x7da1b1dc6230>
return[name[self]] | keyword[def] identifier[fit] ( identifier[self] , identifier[X] , identifier[y] , identifier[sample_weight] = keyword[None] ):
literal[string]
identifier[random_state] = identifier[check_random_state] ( identifier[self] . identifier[random_state] )
keyword[if] identifier[isinstance] ( identifier[self] , identifier[ClassifierMixin] ):
identifier[X] , identifier[y] = identifier[check_X_y] ( identifier[X] , identifier[y] , identifier[y_numeric] = keyword[False] )
identifier[check_classification_targets] ( identifier[y] )
identifier[self] . identifier[classes_] , identifier[y] = identifier[np] . identifier[unique] ( identifier[y] , identifier[return_inverse] = keyword[True] )
identifier[n_trim_classes] = identifier[np] . identifier[count_nonzero] ( identifier[np] . identifier[bincount] ( identifier[y] , identifier[sample_weight] ))
keyword[if] identifier[n_trim_classes] != literal[int] :
keyword[raise] identifier[ValueError] ( literal[string]
literal[string]
literal[string]
% identifier[n_trim_classes] )
identifier[self] . identifier[n_classes_] = identifier[len] ( identifier[self] . identifier[classes_] )
keyword[else] :
identifier[X] , identifier[y] = identifier[check_X_y] ( identifier[X] , identifier[y] , identifier[y_numeric] = keyword[True] )
keyword[if] identifier[sample_weight] keyword[is] keyword[not] keyword[None] :
identifier[sample_weight] = identifier[check_array] ( identifier[sample_weight] , identifier[ensure_2d] = keyword[False] )
identifier[_] , identifier[self] . identifier[n_features_] = identifier[X] . identifier[shape]
identifier[hall_of_fame] = identifier[self] . identifier[hall_of_fame]
keyword[if] identifier[hall_of_fame] keyword[is] keyword[None] :
identifier[hall_of_fame] = identifier[self] . identifier[population_size]
keyword[if] identifier[hall_of_fame] > identifier[self] . identifier[population_size] keyword[or] identifier[hall_of_fame] < literal[int] :
keyword[raise] identifier[ValueError] ( literal[string]
literal[string] %( identifier[self] . identifier[hall_of_fame] ,
identifier[self] . identifier[population_size] ))
identifier[n_components] = identifier[self] . identifier[n_components]
keyword[if] identifier[n_components] keyword[is] keyword[None] :
identifier[n_components] = identifier[hall_of_fame]
keyword[if] identifier[n_components] > identifier[hall_of_fame] keyword[or] identifier[n_components] < literal[int] :
keyword[raise] identifier[ValueError] ( literal[string]
literal[string] %( identifier[self] . identifier[n_components] ,
identifier[self] . identifier[hall_of_fame] ))
identifier[self] . identifier[_function_set] =[]
keyword[for] identifier[function] keyword[in] identifier[self] . identifier[function_set] :
keyword[if] identifier[isinstance] ( identifier[function] , identifier[str] ):
keyword[if] identifier[function] keyword[not] keyword[in] identifier[_function_map] :
keyword[raise] identifier[ValueError] ( literal[string]
literal[string] % identifier[function] )
identifier[self] . identifier[_function_set] . identifier[append] ( identifier[_function_map] [ identifier[function] ])
keyword[elif] identifier[isinstance] ( identifier[function] , identifier[_Function] ):
identifier[self] . identifier[_function_set] . identifier[append] ( identifier[function] )
keyword[else] :
keyword[raise] identifier[ValueError] ( literal[string]
% identifier[type] ( identifier[function] ))
keyword[if] keyword[not] identifier[self] . identifier[_function_set] :
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[self] . identifier[_arities] ={}
keyword[for] identifier[function] keyword[in] identifier[self] . identifier[_function_set] :
identifier[arity] = identifier[function] . identifier[arity]
identifier[self] . identifier[_arities] [ identifier[arity] ]= identifier[self] . identifier[_arities] . identifier[get] ( identifier[arity] ,[])
identifier[self] . identifier[_arities] [ identifier[arity] ]. identifier[append] ( identifier[function] )
keyword[if] identifier[isinstance] ( identifier[self] . identifier[metric] , identifier[_Fitness] ):
identifier[self] . identifier[_metric] = identifier[self] . identifier[metric]
keyword[elif] identifier[isinstance] ( identifier[self] , identifier[RegressorMixin] ):
keyword[if] identifier[self] . identifier[metric] keyword[not] keyword[in] ( literal[string] , literal[string] , literal[string] ,
literal[string] , literal[string] ):
keyword[raise] identifier[ValueError] ( literal[string] % identifier[self] . identifier[metric] )
identifier[self] . identifier[_metric] = identifier[_fitness_map] [ identifier[self] . identifier[metric] ]
keyword[elif] identifier[isinstance] ( identifier[self] , identifier[ClassifierMixin] ):
keyword[if] identifier[self] . identifier[metric] != literal[string] :
keyword[raise] identifier[ValueError] ( literal[string] % identifier[self] . identifier[metric] )
identifier[self] . identifier[_metric] = identifier[_fitness_map] [ identifier[self] . identifier[metric] ]
keyword[elif] identifier[isinstance] ( identifier[self] , identifier[TransformerMixin] ):
keyword[if] identifier[self] . identifier[metric] keyword[not] keyword[in] ( literal[string] , literal[string] ):
keyword[raise] identifier[ValueError] ( literal[string] % identifier[self] . identifier[metric] )
identifier[self] . identifier[_metric] = identifier[_fitness_map] [ identifier[self] . identifier[metric] ]
identifier[self] . identifier[_method_probs] = identifier[np] . identifier[array] ([ identifier[self] . identifier[p_crossover] ,
identifier[self] . identifier[p_subtree_mutation] ,
identifier[self] . identifier[p_hoist_mutation] ,
identifier[self] . identifier[p_point_mutation] ])
identifier[self] . identifier[_method_probs] = identifier[np] . identifier[cumsum] ( identifier[self] . identifier[_method_probs] )
keyword[if] identifier[self] . identifier[_method_probs] [- literal[int] ]> literal[int] :
keyword[raise] identifier[ValueError] ( literal[string]
literal[string]
literal[string] )
keyword[if] identifier[self] . identifier[init_method] keyword[not] keyword[in] ( literal[string] , literal[string] , literal[string] ):
keyword[raise] identifier[ValueError] ( literal[string]
literal[string]
% identifier[self] . identifier[init_method] )
keyword[if] keyword[not] (( identifier[isinstance] ( identifier[self] . identifier[const_range] , identifier[tuple] ) keyword[and]
identifier[len] ( identifier[self] . identifier[const_range] )== literal[int] ) keyword[or] identifier[self] . identifier[const_range] keyword[is] keyword[None] ):
keyword[raise] identifier[ValueError] ( literal[string]
literal[string] )
keyword[if] ( keyword[not] identifier[isinstance] ( identifier[self] . identifier[init_depth] , identifier[tuple] ) keyword[or]
identifier[len] ( identifier[self] . identifier[init_depth] )!= literal[int] ):
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[if] identifier[self] . identifier[init_depth] [ literal[int] ]> identifier[self] . identifier[init_depth] [ literal[int] ]:
keyword[raise] identifier[ValueError] ( literal[string]
literal[string] )
keyword[if] identifier[self] . identifier[feature_names] keyword[is] keyword[not] keyword[None] :
keyword[if] identifier[self] . identifier[n_features_] != identifier[len] ( identifier[self] . identifier[feature_names] ):
keyword[raise] identifier[ValueError] ( literal[string]
literal[string]
%( identifier[self] . identifier[n_features_] , identifier[len] ( identifier[self] . identifier[feature_names] )))
keyword[for] identifier[feature_name] keyword[in] identifier[self] . identifier[feature_names] :
keyword[if] keyword[not] identifier[isinstance] ( identifier[feature_name] , identifier[str] ):
keyword[raise] identifier[ValueError] ( literal[string]
literal[string] % identifier[type] ( identifier[feature_name] ))
keyword[if] identifier[self] . identifier[transformer] keyword[is] keyword[not] keyword[None] :
keyword[if] identifier[isinstance] ( identifier[self] . identifier[transformer] , identifier[_Function] ):
identifier[self] . identifier[_transformer] = identifier[self] . identifier[transformer]
keyword[elif] identifier[self] . identifier[transformer] == literal[string] :
identifier[self] . identifier[_transformer] = identifier[sigmoid]
keyword[else] :
keyword[raise] identifier[ValueError] ( literal[string]
literal[string] %
identifier[type] ( identifier[self] . identifier[transformer] ))
keyword[if] identifier[self] . identifier[_transformer] . identifier[arity] != literal[int] :
keyword[raise] identifier[ValueError] ( literal[string]
literal[string] %( identifier[self] . identifier[_transformer] . identifier[arity] ))
identifier[params] = identifier[self] . identifier[get_params] ()
identifier[params] [ literal[string] ]= identifier[self] . identifier[_metric]
keyword[if] identifier[hasattr] ( identifier[self] , literal[string] ):
identifier[params] [ literal[string] ]= identifier[self] . identifier[_transformer]
keyword[else] :
identifier[params] [ literal[string] ]= keyword[None]
identifier[params] [ literal[string] ]= identifier[self] . identifier[_function_set]
identifier[params] [ literal[string] ]= identifier[self] . identifier[_arities]
identifier[params] [ literal[string] ]= identifier[self] . identifier[_method_probs]
keyword[if] keyword[not] identifier[self] . identifier[warm_start] keyword[or] keyword[not] identifier[hasattr] ( identifier[self] , literal[string] ):
identifier[self] . identifier[_programs] =[]
identifier[self] . identifier[run_details_] ={ literal[string] :[],
literal[string] :[],
literal[string] :[],
literal[string] :[],
literal[string] :[],
literal[string] :[],
literal[string] :[]}
identifier[prior_generations] = identifier[len] ( identifier[self] . identifier[_programs] )
identifier[n_more_generations] = identifier[self] . identifier[generations] - identifier[prior_generations]
keyword[if] identifier[n_more_generations] < literal[int] :
keyword[raise] identifier[ValueError] ( literal[string]
literal[string]
%( identifier[self] . identifier[generations] , identifier[len] ( identifier[self] . identifier[_programs] )))
keyword[elif] identifier[n_more_generations] == literal[int] :
identifier[fitness] =[ identifier[program] . identifier[raw_fitness_] keyword[for] identifier[program] keyword[in] identifier[self] . identifier[_programs] [- literal[int] ]]
identifier[warn] ( literal[string]
literal[string] )
keyword[if] identifier[self] . identifier[warm_start] :
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[len] ( identifier[self] . identifier[_programs] )):
identifier[_] = identifier[random_state] . identifier[randint] ( identifier[MAX_INT] , identifier[size] = identifier[self] . identifier[population_size] )
keyword[if] identifier[self] . identifier[verbose] :
identifier[self] . identifier[_verbose_reporter] ()
keyword[for] identifier[gen] keyword[in] identifier[range] ( identifier[prior_generations] , identifier[self] . identifier[generations] ):
identifier[start_time] = identifier[time] ()
keyword[if] identifier[gen] == literal[int] :
identifier[parents] = keyword[None]
keyword[else] :
identifier[parents] = identifier[self] . identifier[_programs] [ identifier[gen] - literal[int] ]
identifier[n_jobs] , identifier[n_programs] , identifier[starts] = identifier[_partition_estimators] (
identifier[self] . identifier[population_size] , identifier[self] . identifier[n_jobs] )
identifier[seeds] = identifier[random_state] . identifier[randint] ( identifier[MAX_INT] , identifier[size] = identifier[self] . identifier[population_size] )
identifier[population] = identifier[Parallel] ( identifier[n_jobs] = identifier[n_jobs] ,
identifier[verbose] = identifier[int] ( identifier[self] . identifier[verbose] > literal[int] ))(
identifier[delayed] ( identifier[_parallel_evolve] )( identifier[n_programs] [ identifier[i] ],
identifier[parents] ,
identifier[X] ,
identifier[y] ,
identifier[sample_weight] ,
identifier[seeds] [ identifier[starts] [ identifier[i] ]: identifier[starts] [ identifier[i] + literal[int] ]],
identifier[params] )
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[n_jobs] ))
identifier[population] = identifier[list] ( identifier[itertools] . identifier[chain] . identifier[from_iterable] ( identifier[population] ))
identifier[fitness] =[ identifier[program] . identifier[raw_fitness_] keyword[for] identifier[program] keyword[in] identifier[population] ]
identifier[length] =[ identifier[program] . identifier[length_] keyword[for] identifier[program] keyword[in] identifier[population] ]
identifier[parsimony_coefficient] = keyword[None]
keyword[if] identifier[self] . identifier[parsimony_coefficient] == literal[string] :
identifier[parsimony_coefficient] =( identifier[np] . identifier[cov] ( identifier[length] , identifier[fitness] )[ literal[int] , literal[int] ]/
identifier[np] . identifier[var] ( identifier[length] ))
keyword[for] identifier[program] keyword[in] identifier[population] :
identifier[program] . identifier[fitness_] = identifier[program] . identifier[fitness] ( identifier[parsimony_coefficient] )
identifier[self] . identifier[_programs] . identifier[append] ( identifier[population] )
keyword[if] keyword[not] identifier[self] . identifier[low_memory] :
keyword[for] identifier[old_gen] keyword[in] identifier[np] . identifier[arange] ( identifier[gen] , literal[int] ,- literal[int] ):
identifier[indices] =[]
keyword[for] identifier[program] keyword[in] identifier[self] . identifier[_programs] [ identifier[old_gen] ]:
keyword[if] identifier[program] keyword[is] keyword[not] keyword[None] :
keyword[for] identifier[idx] keyword[in] identifier[program] . identifier[parents] :
keyword[if] literal[string] keyword[in] identifier[idx] :
identifier[indices] . identifier[append] ( identifier[program] . identifier[parents] [ identifier[idx] ])
identifier[indices] = identifier[set] ( identifier[indices] )
keyword[for] identifier[idx] keyword[in] identifier[range] ( identifier[self] . identifier[population_size] ):
keyword[if] identifier[idx] keyword[not] keyword[in] identifier[indices] :
identifier[self] . identifier[_programs] [ identifier[old_gen] - literal[int] ][ identifier[idx] ]= keyword[None]
keyword[elif] identifier[gen] > literal[int] :
identifier[self] . identifier[_programs] [ identifier[gen] - literal[int] ]= keyword[None]
keyword[if] identifier[self] . identifier[_metric] . identifier[greater_is_better] :
identifier[best_program] = identifier[population] [ identifier[np] . identifier[argmax] ( identifier[fitness] )]
keyword[else] :
identifier[best_program] = identifier[population] [ identifier[np] . identifier[argmin] ( identifier[fitness] )]
identifier[self] . identifier[run_details_] [ literal[string] ]. identifier[append] ( identifier[gen] )
identifier[self] . identifier[run_details_] [ literal[string] ]. identifier[append] ( identifier[np] . identifier[mean] ( identifier[length] ))
identifier[self] . identifier[run_details_] [ literal[string] ]. identifier[append] ( identifier[np] . identifier[mean] ( identifier[fitness] ))
identifier[self] . identifier[run_details_] [ literal[string] ]. identifier[append] ( identifier[best_program] . identifier[length_] )
identifier[self] . identifier[run_details_] [ literal[string] ]. identifier[append] ( identifier[best_program] . identifier[raw_fitness_] )
identifier[oob_fitness] = identifier[np] . identifier[nan]
keyword[if] identifier[self] . identifier[max_samples] < literal[int] :
identifier[oob_fitness] = identifier[best_program] . identifier[oob_fitness_]
identifier[self] . identifier[run_details_] [ literal[string] ]. identifier[append] ( identifier[oob_fitness] )
identifier[generation_time] = identifier[time] ()- identifier[start_time]
identifier[self] . identifier[run_details_] [ literal[string] ]. identifier[append] ( identifier[generation_time] )
keyword[if] identifier[self] . identifier[verbose] :
identifier[self] . identifier[_verbose_reporter] ( identifier[self] . identifier[run_details_] )
keyword[if] identifier[self] . identifier[_metric] . identifier[greater_is_better] :
identifier[best_fitness] = identifier[fitness] [ identifier[np] . identifier[argmax] ( identifier[fitness] )]
keyword[if] identifier[best_fitness] >= identifier[self] . identifier[stopping_criteria] :
keyword[break]
keyword[else] :
identifier[best_fitness] = identifier[fitness] [ identifier[np] . identifier[argmin] ( identifier[fitness] )]
keyword[if] identifier[best_fitness] <= identifier[self] . identifier[stopping_criteria] :
keyword[break]
keyword[if] identifier[isinstance] ( identifier[self] , identifier[TransformerMixin] ):
identifier[fitness] = identifier[np] . identifier[array] ( identifier[fitness] )
keyword[if] identifier[self] . identifier[_metric] . identifier[greater_is_better] :
identifier[hall_of_fame] = identifier[fitness] . identifier[argsort] ()[::- literal[int] ][: identifier[self] . identifier[hall_of_fame] ]
keyword[else] :
identifier[hall_of_fame] = identifier[fitness] . identifier[argsort] ()[: identifier[self] . identifier[hall_of_fame] ]
identifier[evaluation] = identifier[np] . identifier[array] ([ identifier[gp] . identifier[execute] ( identifier[X] ) keyword[for] identifier[gp] keyword[in]
[ identifier[self] . identifier[_programs] [- literal[int] ][ identifier[i] ] keyword[for]
identifier[i] keyword[in] identifier[hall_of_fame] ]])
keyword[if] identifier[self] . identifier[metric] == literal[string] :
identifier[evaluation] = identifier[np] . identifier[apply_along_axis] ( identifier[rankdata] , literal[int] , identifier[evaluation] )
keyword[with] identifier[np] . identifier[errstate] ( identifier[divide] = literal[string] , identifier[invalid] = literal[string] ):
identifier[correlations] = identifier[np] . identifier[abs] ( identifier[np] . identifier[corrcoef] ( identifier[evaluation] ))
identifier[np] . identifier[fill_diagonal] ( identifier[correlations] , literal[int] )
identifier[components] = identifier[list] ( identifier[range] ( identifier[self] . identifier[hall_of_fame] ))
identifier[indices] = identifier[list] ( identifier[range] ( identifier[self] . identifier[hall_of_fame] ))
keyword[while] identifier[len] ( identifier[components] )> identifier[self] . identifier[n_components] :
identifier[most_correlated] = identifier[np] . identifier[unravel_index] ( identifier[np] . identifier[argmax] ( identifier[correlations] ),
identifier[correlations] . identifier[shape] )
identifier[worst] = identifier[max] ( identifier[most_correlated] )
identifier[components] . identifier[pop] ( identifier[worst] )
identifier[indices] . identifier[remove] ( identifier[worst] )
identifier[correlations] = identifier[correlations] [:, identifier[indices] ][ identifier[indices] ,:]
identifier[indices] = identifier[list] ( identifier[range] ( identifier[len] ( identifier[components] )))
identifier[self] . identifier[_best_programs] =[ identifier[self] . identifier[_programs] [- literal[int] ][ identifier[i] ] keyword[for] identifier[i] keyword[in]
identifier[hall_of_fame] [ identifier[components] ]]
keyword[else] :
keyword[if] identifier[self] . identifier[_metric] . identifier[greater_is_better] :
identifier[self] . identifier[_program] = identifier[self] . identifier[_programs] [- literal[int] ][ identifier[np] . identifier[argmax] ( identifier[fitness] )]
keyword[else] :
identifier[self] . identifier[_program] = identifier[self] . identifier[_programs] [- literal[int] ][ identifier[np] . identifier[argmin] ( identifier[fitness] )]
keyword[return] identifier[self] | def fit(self, X, y, sample_weight=None):
"""Fit the Genetic Program according to X, y.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target values.
sample_weight : array-like, shape = [n_samples], optional
Weights applied to individual samples.
Returns
-------
self : object
Returns self.
"""
random_state = check_random_state(self.random_state)
# Check arrays
if isinstance(self, ClassifierMixin):
(X, y) = check_X_y(X, y, y_numeric=False)
check_classification_targets(y)
(self.classes_, y) = np.unique(y, return_inverse=True)
n_trim_classes = np.count_nonzero(np.bincount(y, sample_weight))
if n_trim_classes != 2:
raise ValueError('y contains %d class after sample_weight trimmed classes with zero weights, while 2 classes are required.' % n_trim_classes) # depends on [control=['if'], data=['n_trim_classes']]
self.n_classes_ = len(self.classes_) # depends on [control=['if'], data=[]]
else:
(X, y) = check_X_y(X, y, y_numeric=True)
if sample_weight is not None:
sample_weight = check_array(sample_weight, ensure_2d=False) # depends on [control=['if'], data=['sample_weight']]
(_, self.n_features_) = X.shape
hall_of_fame = self.hall_of_fame
if hall_of_fame is None:
hall_of_fame = self.population_size # depends on [control=['if'], data=['hall_of_fame']]
if hall_of_fame > self.population_size or hall_of_fame < 1:
raise ValueError('hall_of_fame (%d) must be less than or equal to population_size (%d).' % (self.hall_of_fame, self.population_size)) # depends on [control=['if'], data=[]]
n_components = self.n_components
if n_components is None:
n_components = hall_of_fame # depends on [control=['if'], data=['n_components']]
if n_components > hall_of_fame or n_components < 1:
raise ValueError('n_components (%d) must be less than or equal to hall_of_fame (%d).' % (self.n_components, self.hall_of_fame)) # depends on [control=['if'], data=[]]
self._function_set = []
for function in self.function_set:
if isinstance(function, str):
if function not in _function_map:
raise ValueError('invalid function name %s found in `function_set`.' % function) # depends on [control=['if'], data=['function']]
self._function_set.append(_function_map[function]) # depends on [control=['if'], data=[]]
elif isinstance(function, _Function):
self._function_set.append(function) # depends on [control=['if'], data=[]]
else:
raise ValueError('invalid type %s found in `function_set`.' % type(function)) # depends on [control=['for'], data=['function']]
if not self._function_set:
raise ValueError('No valid functions found in `function_set`.') # depends on [control=['if'], data=[]]
# For point-mutation to find a compatible replacement node
self._arities = {}
for function in self._function_set:
arity = function.arity
self._arities[arity] = self._arities.get(arity, [])
self._arities[arity].append(function) # depends on [control=['for'], data=['function']]
if isinstance(self.metric, _Fitness):
self._metric = self.metric # depends on [control=['if'], data=[]]
elif isinstance(self, RegressorMixin):
if self.metric not in ('mean absolute error', 'mse', 'rmse', 'pearson', 'spearman'):
raise ValueError('Unsupported metric: %s' % self.metric) # depends on [control=['if'], data=[]]
self._metric = _fitness_map[self.metric] # depends on [control=['if'], data=[]]
elif isinstance(self, ClassifierMixin):
if self.metric != 'log loss':
raise ValueError('Unsupported metric: %s' % self.metric) # depends on [control=['if'], data=[]]
self._metric = _fitness_map[self.metric] # depends on [control=['if'], data=[]]
elif isinstance(self, TransformerMixin):
if self.metric not in ('pearson', 'spearman'):
raise ValueError('Unsupported metric: %s' % self.metric) # depends on [control=['if'], data=[]]
self._metric = _fitness_map[self.metric] # depends on [control=['if'], data=[]]
self._method_probs = np.array([self.p_crossover, self.p_subtree_mutation, self.p_hoist_mutation, self.p_point_mutation])
self._method_probs = np.cumsum(self._method_probs)
if self._method_probs[-1] > 1:
raise ValueError('The sum of p_crossover, p_subtree_mutation, p_hoist_mutation and p_point_mutation should total to 1.0 or less.') # depends on [control=['if'], data=[]]
if self.init_method not in ('half and half', 'grow', 'full'):
raise ValueError('Valid program initializations methods include "grow", "full" and "half and half". Given %s.' % self.init_method) # depends on [control=['if'], data=[]]
if not (isinstance(self.const_range, tuple) and len(self.const_range) == 2 or self.const_range is None):
raise ValueError('const_range should be a tuple with length two, or None.') # depends on [control=['if'], data=[]]
if not isinstance(self.init_depth, tuple) or len(self.init_depth) != 2:
raise ValueError('init_depth should be a tuple with length two.') # depends on [control=['if'], data=[]]
if self.init_depth[0] > self.init_depth[1]:
raise ValueError('init_depth should be in increasing numerical order: (min_depth, max_depth).') # depends on [control=['if'], data=[]]
if self.feature_names is not None:
if self.n_features_ != len(self.feature_names):
raise ValueError('The supplied `feature_names` has different length to n_features. Expected %d, got %d.' % (self.n_features_, len(self.feature_names))) # depends on [control=['if'], data=[]]
for feature_name in self.feature_names:
if not isinstance(feature_name, str):
raise ValueError('invalid type %s found in `feature_names`.' % type(feature_name)) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['feature_name']] # depends on [control=['if'], data=[]]
if self.transformer is not None:
if isinstance(self.transformer, _Function):
self._transformer = self.transformer # depends on [control=['if'], data=[]]
elif self.transformer == 'sigmoid':
self._transformer = sigmoid # depends on [control=['if'], data=[]]
else:
raise ValueError('Invalid `transformer`. Expected either "sigmoid" or _Function object, got %s' % type(self.transformer))
if self._transformer.arity != 1:
raise ValueError('Invalid arity for `transformer`. Expected 1, got %d.' % self._transformer.arity) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
params = self.get_params()
params['_metric'] = self._metric
if hasattr(self, '_transformer'):
params['_transformer'] = self._transformer # depends on [control=['if'], data=[]]
else:
params['_transformer'] = None
params['function_set'] = self._function_set
params['arities'] = self._arities
params['method_probs'] = self._method_probs
if not self.warm_start or not hasattr(self, '_programs'):
# Free allocated memory, if any
self._programs = []
self.run_details_ = {'generation': [], 'average_length': [], 'average_fitness': [], 'best_length': [], 'best_fitness': [], 'best_oob_fitness': [], 'generation_time': []} # depends on [control=['if'], data=[]]
prior_generations = len(self._programs)
n_more_generations = self.generations - prior_generations
if n_more_generations < 0:
raise ValueError('generations=%d must be larger or equal to len(_programs)=%d when warm_start==True' % (self.generations, len(self._programs))) # depends on [control=['if'], data=[]]
elif n_more_generations == 0:
fitness = [program.raw_fitness_ for program in self._programs[-1]]
warn('Warm-start fitting without increasing n_estimators does not fit new programs.') # depends on [control=['if'], data=[]]
if self.warm_start:
# Generate and discard seeds that would have been produced on the
# initial fit call.
for i in range(len(self._programs)):
_ = random_state.randint(MAX_INT, size=self.population_size) # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]]
if self.verbose:
# Print header fields
self._verbose_reporter() # depends on [control=['if'], data=[]]
for gen in range(prior_generations, self.generations):
start_time = time()
if gen == 0:
parents = None # depends on [control=['if'], data=[]]
else:
parents = self._programs[gen - 1]
# Parallel loop
(n_jobs, n_programs, starts) = _partition_estimators(self.population_size, self.n_jobs)
seeds = random_state.randint(MAX_INT, size=self.population_size)
population = Parallel(n_jobs=n_jobs, verbose=int(self.verbose > 1))((delayed(_parallel_evolve)(n_programs[i], parents, X, y, sample_weight, seeds[starts[i]:starts[i + 1]], params) for i in range(n_jobs)))
# Reduce, maintaining order across different n_jobs
population = list(itertools.chain.from_iterable(population))
fitness = [program.raw_fitness_ for program in population]
length = [program.length_ for program in population]
parsimony_coefficient = None
if self.parsimony_coefficient == 'auto':
parsimony_coefficient = np.cov(length, fitness)[1, 0] / np.var(length) # depends on [control=['if'], data=[]]
for program in population:
program.fitness_ = program.fitness(parsimony_coefficient) # depends on [control=['for'], data=['program']]
self._programs.append(population)
# Remove old programs that didn't make it into the new population.
if not self.low_memory:
for old_gen in np.arange(gen, 0, -1):
indices = []
for program in self._programs[old_gen]:
if program is not None:
for idx in program.parents:
if 'idx' in idx:
indices.append(program.parents[idx]) # depends on [control=['if'], data=['idx']] # depends on [control=['for'], data=['idx']] # depends on [control=['if'], data=['program']] # depends on [control=['for'], data=['program']]
indices = set(indices)
for idx in range(self.population_size):
if idx not in indices:
self._programs[old_gen - 1][idx] = None # depends on [control=['if'], data=['idx']] # depends on [control=['for'], data=['idx']] # depends on [control=['for'], data=['old_gen']] # depends on [control=['if'], data=[]]
elif gen > 0:
# Remove old generations
self._programs[gen - 1] = None # depends on [control=['if'], data=['gen']]
# Record run details
if self._metric.greater_is_better:
best_program = population[np.argmax(fitness)] # depends on [control=['if'], data=[]]
else:
best_program = population[np.argmin(fitness)]
self.run_details_['generation'].append(gen)
self.run_details_['average_length'].append(np.mean(length))
self.run_details_['average_fitness'].append(np.mean(fitness))
self.run_details_['best_length'].append(best_program.length_)
self.run_details_['best_fitness'].append(best_program.raw_fitness_)
oob_fitness = np.nan
if self.max_samples < 1.0:
oob_fitness = best_program.oob_fitness_ # depends on [control=['if'], data=[]]
self.run_details_['best_oob_fitness'].append(oob_fitness)
generation_time = time() - start_time
self.run_details_['generation_time'].append(generation_time)
if self.verbose:
self._verbose_reporter(self.run_details_) # depends on [control=['if'], data=[]]
# Check for early stopping
if self._metric.greater_is_better:
best_fitness = fitness[np.argmax(fitness)]
if best_fitness >= self.stopping_criteria:
break # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
else:
best_fitness = fitness[np.argmin(fitness)]
if best_fitness <= self.stopping_criteria:
break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['gen']]
if isinstance(self, TransformerMixin):
# Find the best individuals in the final generation
fitness = np.array(fitness)
if self._metric.greater_is_better:
hall_of_fame = fitness.argsort()[::-1][:self.hall_of_fame] # depends on [control=['if'], data=[]]
else:
hall_of_fame = fitness.argsort()[:self.hall_of_fame]
evaluation = np.array([gp.execute(X) for gp in [self._programs[-1][i] for i in hall_of_fame]])
if self.metric == 'spearman':
evaluation = np.apply_along_axis(rankdata, 1, evaluation) # depends on [control=['if'], data=[]]
with np.errstate(divide='ignore', invalid='ignore'):
correlations = np.abs(np.corrcoef(evaluation)) # depends on [control=['with'], data=[]]
np.fill_diagonal(correlations, 0.0)
components = list(range(self.hall_of_fame))
indices = list(range(self.hall_of_fame))
# Iteratively remove least fit individual of most correlated pair
while len(components) > self.n_components:
most_correlated = np.unravel_index(np.argmax(correlations), correlations.shape)
# The correlation matrix is sorted by fitness, so identifying
# the least fit of the pair is simply getting the higher index
worst = max(most_correlated)
components.pop(worst)
indices.remove(worst)
correlations = correlations[:, indices][indices, :]
indices = list(range(len(components))) # depends on [control=['while'], data=[]]
self._best_programs = [self._programs[-1][i] for i in hall_of_fame[components]] # depends on [control=['if'], data=[]]
# Find the best individual in the final generation
elif self._metric.greater_is_better:
self._program = self._programs[-1][np.argmax(fitness)] # depends on [control=['if'], data=[]]
else:
self._program = self._programs[-1][np.argmin(fitness)]
return self |
def filled_quantity(self):
"""
[int] 订单已成交数量
"""
if np.isnan(self._filled_quantity):
raise RuntimeError("Filled quantity of order {} is not supposed to be nan.".format(self.order_id))
return self._filled_quantity | def function[filled_quantity, parameter[self]]:
constant[
[int] 订单已成交数量
]
if call[name[np].isnan, parameter[name[self]._filled_quantity]] begin[:]
<ast.Raise object at 0x7da1b211d300>
return[name[self]._filled_quantity] | keyword[def] identifier[filled_quantity] ( identifier[self] ):
literal[string]
keyword[if] identifier[np] . identifier[isnan] ( identifier[self] . identifier[_filled_quantity] ):
keyword[raise] identifier[RuntimeError] ( literal[string] . identifier[format] ( identifier[self] . identifier[order_id] ))
keyword[return] identifier[self] . identifier[_filled_quantity] | def filled_quantity(self):
"""
[int] 订单已成交数量
"""
if np.isnan(self._filled_quantity):
raise RuntimeError('Filled quantity of order {} is not supposed to be nan.'.format(self.order_id)) # depends on [control=['if'], data=[]]
return self._filled_quantity |
def wait(self, time):
"""Pauses the thread for a specified time.
Returns False if interrupted by another thread and True if the
time runs out normally.
"""
self._wait = Event()
return not self._wait.wait(time) | def function[wait, parameter[self, time]]:
constant[Pauses the thread for a specified time.
Returns False if interrupted by another thread and True if the
time runs out normally.
]
name[self]._wait assign[=] call[name[Event], parameter[]]
return[<ast.UnaryOp object at 0x7da20e9b2c20>] | keyword[def] identifier[wait] ( identifier[self] , identifier[time] ):
literal[string]
identifier[self] . identifier[_wait] = identifier[Event] ()
keyword[return] keyword[not] identifier[self] . identifier[_wait] . identifier[wait] ( identifier[time] ) | def wait(self, time):
"""Pauses the thread for a specified time.
Returns False if interrupted by another thread and True if the
time runs out normally.
"""
self._wait = Event()
return not self._wait.wait(time) |
def list_outputs(db, job_id, full=True):
"""
List the outputs for a given
:class:`~openquake.server.db.models.OqJob`.
:param db:
a :class:`openquake.server.dbapi.Db` instance
:param job_id:
ID of a calculation.
:param bool full:
If True produce a full listing, otherwise a short version
"""
outputs = get_outputs(db, job_id)
out = []
if len(outputs) > 0:
truncated = False
out.append(' id | name')
outs = sorted(outputs, key=operator.attrgetter('display_name'))
for i, o in enumerate(outs):
if not full and i >= 10:
out.append(' ... | %d additional output(s)' % (len(outs) - 10))
truncated = True
break
out.append('%4d | %s' % (o.id, o.display_name))
if truncated:
out.append('Some outputs where not shown. You can see the full '
'list with the command\n`oq engine --list-outputs`')
return out | def function[list_outputs, parameter[db, job_id, full]]:
constant[
List the outputs for a given
:class:`~openquake.server.db.models.OqJob`.
:param db:
a :class:`openquake.server.dbapi.Db` instance
:param job_id:
ID of a calculation.
:param bool full:
If True produce a full listing, otherwise a short version
]
variable[outputs] assign[=] call[name[get_outputs], parameter[name[db], name[job_id]]]
variable[out] assign[=] list[[]]
if compare[call[name[len], parameter[name[outputs]]] greater[>] constant[0]] begin[:]
variable[truncated] assign[=] constant[False]
call[name[out].append, parameter[constant[ id | name]]]
variable[outs] assign[=] call[name[sorted], parameter[name[outputs]]]
for taget[tuple[[<ast.Name object at 0x7da18bccb160>, <ast.Name object at 0x7da18bcc8c40>]]] in starred[call[name[enumerate], parameter[name[outs]]]] begin[:]
if <ast.BoolOp object at 0x7da18bccbaf0> begin[:]
call[name[out].append, parameter[binary_operation[constant[ ... | %d additional output(s)] <ast.Mod object at 0x7da2590d6920> binary_operation[call[name[len], parameter[name[outs]]] - constant[10]]]]]
variable[truncated] assign[=] constant[True]
break
call[name[out].append, parameter[binary_operation[constant[%4d | %s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Attribute object at 0x7da1b1306f20>, <ast.Attribute object at 0x7da1b13068c0>]]]]]
if name[truncated] begin[:]
call[name[out].append, parameter[constant[Some outputs where not shown. You can see the full list with the command
`oq engine --list-outputs`]]]
return[name[out]] | keyword[def] identifier[list_outputs] ( identifier[db] , identifier[job_id] , identifier[full] = keyword[True] ):
literal[string]
identifier[outputs] = identifier[get_outputs] ( identifier[db] , identifier[job_id] )
identifier[out] =[]
keyword[if] identifier[len] ( identifier[outputs] )> literal[int] :
identifier[truncated] = keyword[False]
identifier[out] . identifier[append] ( literal[string] )
identifier[outs] = identifier[sorted] ( identifier[outputs] , identifier[key] = identifier[operator] . identifier[attrgetter] ( literal[string] ))
keyword[for] identifier[i] , identifier[o] keyword[in] identifier[enumerate] ( identifier[outs] ):
keyword[if] keyword[not] identifier[full] keyword[and] identifier[i] >= literal[int] :
identifier[out] . identifier[append] ( literal[string] %( identifier[len] ( identifier[outs] )- literal[int] ))
identifier[truncated] = keyword[True]
keyword[break]
identifier[out] . identifier[append] ( literal[string] %( identifier[o] . identifier[id] , identifier[o] . identifier[display_name] ))
keyword[if] identifier[truncated] :
identifier[out] . identifier[append] ( literal[string]
literal[string] )
keyword[return] identifier[out] | def list_outputs(db, job_id, full=True):
"""
List the outputs for a given
:class:`~openquake.server.db.models.OqJob`.
:param db:
a :class:`openquake.server.dbapi.Db` instance
:param job_id:
ID of a calculation.
:param bool full:
If True produce a full listing, otherwise a short version
"""
outputs = get_outputs(db, job_id)
out = []
if len(outputs) > 0:
truncated = False
out.append(' id | name')
outs = sorted(outputs, key=operator.attrgetter('display_name'))
for (i, o) in enumerate(outs):
if not full and i >= 10:
out.append(' ... | %d additional output(s)' % (len(outs) - 10))
truncated = True
break # depends on [control=['if'], data=[]]
out.append('%4d | %s' % (o.id, o.display_name)) # depends on [control=['for'], data=[]]
if truncated:
out.append('Some outputs where not shown. You can see the full list with the command\n`oq engine --list-outputs`') # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
return out |
def paint(self, painter, option, widget):
"""
Draws this item with the inputed painter. This will call the
scene's renderer to draw this item.
"""
scene = self.scene()
if not scene:
return
scene.chart().renderer().drawItem(self, painter, option) | def function[paint, parameter[self, painter, option, widget]]:
constant[
Draws this item with the inputed painter. This will call the
scene's renderer to draw this item.
]
variable[scene] assign[=] call[name[self].scene, parameter[]]
if <ast.UnaryOp object at 0x7da2054a4b50> begin[:]
return[None]
call[call[call[name[scene].chart, parameter[]].renderer, parameter[]].drawItem, parameter[name[self], name[painter], name[option]]] | keyword[def] identifier[paint] ( identifier[self] , identifier[painter] , identifier[option] , identifier[widget] ):
literal[string]
identifier[scene] = identifier[self] . identifier[scene] ()
keyword[if] keyword[not] identifier[scene] :
keyword[return]
identifier[scene] . identifier[chart] (). identifier[renderer] (). identifier[drawItem] ( identifier[self] , identifier[painter] , identifier[option] ) | def paint(self, painter, option, widget):
"""
Draws this item with the inputed painter. This will call the
scene's renderer to draw this item.
"""
scene = self.scene()
if not scene:
return # depends on [control=['if'], data=[]]
scene.chart().renderer().drawItem(self, painter, option) |
def execute(self, argv=None, compile=None, preprocessor=None, compiler_factory=None):
"""Parse arguments and execute decorated function
argv: list of arguments
compile:
- None, pass args as keyword args to function
- True, pass args as single dictionary
- function, get args from parse_args() and return a pair of
tuple and dict to be passed as args and kwargs to function
"""
action, args, kwargs = self.compile_args(argv=argv, compile=compile, preprocessor=preprocessor, compiler_factory=compiler_factory)
return action(*args, **kwargs) | def function[execute, parameter[self, argv, compile, preprocessor, compiler_factory]]:
constant[Parse arguments and execute decorated function
argv: list of arguments
compile:
- None, pass args as keyword args to function
- True, pass args as single dictionary
- function, get args from parse_args() and return a pair of
tuple and dict to be passed as args and kwargs to function
]
<ast.Tuple object at 0x7da18bccb640> assign[=] call[name[self].compile_args, parameter[]]
return[call[name[action], parameter[<ast.Starred object at 0x7da2044c30a0>]]] | keyword[def] identifier[execute] ( identifier[self] , identifier[argv] = keyword[None] , identifier[compile] = keyword[None] , identifier[preprocessor] = keyword[None] , identifier[compiler_factory] = keyword[None] ):
literal[string]
identifier[action] , identifier[args] , identifier[kwargs] = identifier[self] . identifier[compile_args] ( identifier[argv] = identifier[argv] , identifier[compile] = identifier[compile] , identifier[preprocessor] = identifier[preprocessor] , identifier[compiler_factory] = identifier[compiler_factory] )
keyword[return] identifier[action] (* identifier[args] ,** identifier[kwargs] ) | def execute(self, argv=None, compile=None, preprocessor=None, compiler_factory=None):
"""Parse arguments and execute decorated function
argv: list of arguments
compile:
- None, pass args as keyword args to function
- True, pass args as single dictionary
- function, get args from parse_args() and return a pair of
tuple and dict to be passed as args and kwargs to function
"""
(action, args, kwargs) = self.compile_args(argv=argv, compile=compile, preprocessor=preprocessor, compiler_factory=compiler_factory)
return action(*args, **kwargs) |
def handle(self, message):
'''Attempts to send a message to the specified destination in Slack.
Extends Legobot.Lego.handle()
Args:
message (Legobot.Message): message w/ metadata to send.
'''
logger.debug(message)
if Utilities.isNotEmpty(message['metadata']['opts']):
target = message['metadata']['opts']['target']
thread = message['metadata']['opts'].get('thread')
# pattern = re.compile('@([a-zA-Z0-9._-]+)')
pattern = re.compile('^@([a-zA-Z0-9._-]+)|\s@([a-zA-Z0-9._-]+)')
matches = re.findall(pattern, message['text'])
matches = set(matches)
logger.debug('MATCHES!!!! {}'.format(matches))
for match in matches:
if isinstance(match, tuple):
if match[0] != '':
match = match[0]
else:
match = match[1]
if not match.startswith('@'):
match = '@' + match
message['text'] = message['text'].replace(
match,
'<{}>'.format(match)
)
pattern = re.compile('#([A-Za-z0-9-]+)')
matches = re.findall(pattern, message['text'])
matches = set(matches)
for match in matches:
channel_id = self.botThread.get_channel_id_by_name(match)
if channel_id:
message['text'] = message['text'].replace(
'#' + match,
'<#{}|{}>'.format(
channel_id,
match
)
)
if (message['text'].find('<<@') != -1
or message['text'].find('<<#') != -1):
message['text'] = message['text'].replace('<<', '<')
message['text'] = message['text'].replace('>>', '>')
if target.startswith('U'):
target = self.botThread.get_dm_channel(target)
attachment = message['metadata']['opts'].get('attachment')
if attachment:
text = message['metadata']['opts'].get('fallback')
attachment = self.build_attachment(
text, target, attachment, thread)
self.botThread.post_attachment(attachment)
else:
self.botThread.slack_client.rtm_send_message(
target, message['text'], thread=thread) | def function[handle, parameter[self, message]]:
constant[Attempts to send a message to the specified destination in Slack.
Extends Legobot.Lego.handle()
Args:
message (Legobot.Message): message w/ metadata to send.
]
call[name[logger].debug, parameter[name[message]]]
if call[name[Utilities].isNotEmpty, parameter[call[call[name[message]][constant[metadata]]][constant[opts]]]] begin[:]
variable[target] assign[=] call[call[call[name[message]][constant[metadata]]][constant[opts]]][constant[target]]
variable[thread] assign[=] call[call[call[name[message]][constant[metadata]]][constant[opts]].get, parameter[constant[thread]]]
variable[pattern] assign[=] call[name[re].compile, parameter[constant[^@([a-zA-Z0-9._-]+)|\s@([a-zA-Z0-9._-]+)]]]
variable[matches] assign[=] call[name[re].findall, parameter[name[pattern], call[name[message]][constant[text]]]]
variable[matches] assign[=] call[name[set], parameter[name[matches]]]
call[name[logger].debug, parameter[call[constant[MATCHES!!!! {}].format, parameter[name[matches]]]]]
for taget[name[match]] in starred[name[matches]] begin[:]
if call[name[isinstance], parameter[name[match], name[tuple]]] begin[:]
if compare[call[name[match]][constant[0]] not_equal[!=] constant[]] begin[:]
variable[match] assign[=] call[name[match]][constant[0]]
if <ast.UnaryOp object at 0x7da20e9b3fd0> begin[:]
variable[match] assign[=] binary_operation[constant[@] + name[match]]
call[name[message]][constant[text]] assign[=] call[call[name[message]][constant[text]].replace, parameter[name[match], call[constant[<{}>].format, parameter[name[match]]]]]
variable[pattern] assign[=] call[name[re].compile, parameter[constant[#([A-Za-z0-9-]+)]]]
variable[matches] assign[=] call[name[re].findall, parameter[name[pattern], call[name[message]][constant[text]]]]
variable[matches] assign[=] call[name[set], parameter[name[matches]]]
for taget[name[match]] in starred[name[matches]] begin[:]
variable[channel_id] assign[=] call[name[self].botThread.get_channel_id_by_name, parameter[name[match]]]
if name[channel_id] begin[:]
call[name[message]][constant[text]] assign[=] call[call[name[message]][constant[text]].replace, parameter[binary_operation[constant[#] + name[match]], call[constant[<#{}|{}>].format, parameter[name[channel_id], name[match]]]]]
if <ast.BoolOp object at 0x7da20e9b0fa0> begin[:]
call[name[message]][constant[text]] assign[=] call[call[name[message]][constant[text]].replace, parameter[constant[<<], constant[<]]]
call[name[message]][constant[text]] assign[=] call[call[name[message]][constant[text]].replace, parameter[constant[>>], constant[>]]]
if call[name[target].startswith, parameter[constant[U]]] begin[:]
variable[target] assign[=] call[name[self].botThread.get_dm_channel, parameter[name[target]]]
variable[attachment] assign[=] call[call[call[name[message]][constant[metadata]]][constant[opts]].get, parameter[constant[attachment]]]
if name[attachment] begin[:]
variable[text] assign[=] call[call[call[name[message]][constant[metadata]]][constant[opts]].get, parameter[constant[fallback]]]
variable[attachment] assign[=] call[name[self].build_attachment, parameter[name[text], name[target], name[attachment], name[thread]]]
call[name[self].botThread.post_attachment, parameter[name[attachment]]] | keyword[def] identifier[handle] ( identifier[self] , identifier[message] ):
literal[string]
identifier[logger] . identifier[debug] ( identifier[message] )
keyword[if] identifier[Utilities] . identifier[isNotEmpty] ( identifier[message] [ literal[string] ][ literal[string] ]):
identifier[target] = identifier[message] [ literal[string] ][ literal[string] ][ literal[string] ]
identifier[thread] = identifier[message] [ literal[string] ][ literal[string] ]. identifier[get] ( literal[string] )
identifier[pattern] = identifier[re] . identifier[compile] ( literal[string] )
identifier[matches] = identifier[re] . identifier[findall] ( identifier[pattern] , identifier[message] [ literal[string] ])
identifier[matches] = identifier[set] ( identifier[matches] )
identifier[logger] . identifier[debug] ( literal[string] . identifier[format] ( identifier[matches] ))
keyword[for] identifier[match] keyword[in] identifier[matches] :
keyword[if] identifier[isinstance] ( identifier[match] , identifier[tuple] ):
keyword[if] identifier[match] [ literal[int] ]!= literal[string] :
identifier[match] = identifier[match] [ literal[int] ]
keyword[else] :
identifier[match] = identifier[match] [ literal[int] ]
keyword[if] keyword[not] identifier[match] . identifier[startswith] ( literal[string] ):
identifier[match] = literal[string] + identifier[match]
identifier[message] [ literal[string] ]= identifier[message] [ literal[string] ]. identifier[replace] (
identifier[match] ,
literal[string] . identifier[format] ( identifier[match] )
)
identifier[pattern] = identifier[re] . identifier[compile] ( literal[string] )
identifier[matches] = identifier[re] . identifier[findall] ( identifier[pattern] , identifier[message] [ literal[string] ])
identifier[matches] = identifier[set] ( identifier[matches] )
keyword[for] identifier[match] keyword[in] identifier[matches] :
identifier[channel_id] = identifier[self] . identifier[botThread] . identifier[get_channel_id_by_name] ( identifier[match] )
keyword[if] identifier[channel_id] :
identifier[message] [ literal[string] ]= identifier[message] [ literal[string] ]. identifier[replace] (
literal[string] + identifier[match] ,
literal[string] . identifier[format] (
identifier[channel_id] ,
identifier[match]
)
)
keyword[if] ( identifier[message] [ literal[string] ]. identifier[find] ( literal[string] )!=- literal[int]
keyword[or] identifier[message] [ literal[string] ]. identifier[find] ( literal[string] )!=- literal[int] ):
identifier[message] [ literal[string] ]= identifier[message] [ literal[string] ]. identifier[replace] ( literal[string] , literal[string] )
identifier[message] [ literal[string] ]= identifier[message] [ literal[string] ]. identifier[replace] ( literal[string] , literal[string] )
keyword[if] identifier[target] . identifier[startswith] ( literal[string] ):
identifier[target] = identifier[self] . identifier[botThread] . identifier[get_dm_channel] ( identifier[target] )
identifier[attachment] = identifier[message] [ literal[string] ][ literal[string] ]. identifier[get] ( literal[string] )
keyword[if] identifier[attachment] :
identifier[text] = identifier[message] [ literal[string] ][ literal[string] ]. identifier[get] ( literal[string] )
identifier[attachment] = identifier[self] . identifier[build_attachment] (
identifier[text] , identifier[target] , identifier[attachment] , identifier[thread] )
identifier[self] . identifier[botThread] . identifier[post_attachment] ( identifier[attachment] )
keyword[else] :
identifier[self] . identifier[botThread] . identifier[slack_client] . identifier[rtm_send_message] (
identifier[target] , identifier[message] [ literal[string] ], identifier[thread] = identifier[thread] ) | def handle(self, message):
"""Attempts to send a message to the specified destination in Slack.
Extends Legobot.Lego.handle()
Args:
message (Legobot.Message): message w/ metadata to send.
"""
logger.debug(message)
if Utilities.isNotEmpty(message['metadata']['opts']):
target = message['metadata']['opts']['target']
thread = message['metadata']['opts'].get('thread')
# pattern = re.compile('@([a-zA-Z0-9._-]+)')
pattern = re.compile('^@([a-zA-Z0-9._-]+)|\\s@([a-zA-Z0-9._-]+)')
matches = re.findall(pattern, message['text'])
matches = set(matches)
logger.debug('MATCHES!!!! {}'.format(matches))
for match in matches:
if isinstance(match, tuple):
if match[0] != '':
match = match[0] # depends on [control=['if'], data=[]]
else:
match = match[1] # depends on [control=['if'], data=[]]
if not match.startswith('@'):
match = '@' + match # depends on [control=['if'], data=[]]
message['text'] = message['text'].replace(match, '<{}>'.format(match)) # depends on [control=['for'], data=['match']]
pattern = re.compile('#([A-Za-z0-9-]+)')
matches = re.findall(pattern, message['text'])
matches = set(matches)
for match in matches:
channel_id = self.botThread.get_channel_id_by_name(match)
if channel_id:
message['text'] = message['text'].replace('#' + match, '<#{}|{}>'.format(channel_id, match)) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['match']]
if message['text'].find('<<@') != -1 or message['text'].find('<<#') != -1:
message['text'] = message['text'].replace('<<', '<')
message['text'] = message['text'].replace('>>', '>') # depends on [control=['if'], data=[]]
if target.startswith('U'):
target = self.botThread.get_dm_channel(target) # depends on [control=['if'], data=[]]
attachment = message['metadata']['opts'].get('attachment')
if attachment:
text = message['metadata']['opts'].get('fallback')
attachment = self.build_attachment(text, target, attachment, thread)
self.botThread.post_attachment(attachment) # depends on [control=['if'], data=[]]
else:
self.botThread.slack_client.rtm_send_message(target, message['text'], thread=thread) # depends on [control=['if'], data=[]] |
def we_depend_on_them(decls):
"""Returns set of declarations. every item in the returned set,
depends on a declaration from the input"""
to_be_included = set()
for decl in decls:
to_be_included.update(dependency_info_t.i_depend_on_them(decl))
return to_be_included | def function[we_depend_on_them, parameter[decls]]:
constant[Returns set of declarations. every item in the returned set,
depends on a declaration from the input]
variable[to_be_included] assign[=] call[name[set], parameter[]]
for taget[name[decl]] in starred[name[decls]] begin[:]
call[name[to_be_included].update, parameter[call[name[dependency_info_t].i_depend_on_them, parameter[name[decl]]]]]
return[name[to_be_included]] | keyword[def] identifier[we_depend_on_them] ( identifier[decls] ):
literal[string]
identifier[to_be_included] = identifier[set] ()
keyword[for] identifier[decl] keyword[in] identifier[decls] :
identifier[to_be_included] . identifier[update] ( identifier[dependency_info_t] . identifier[i_depend_on_them] ( identifier[decl] ))
keyword[return] identifier[to_be_included] | def we_depend_on_them(decls):
"""Returns set of declarations. every item in the returned set,
depends on a declaration from the input"""
to_be_included = set()
for decl in decls:
to_be_included.update(dependency_info_t.i_depend_on_them(decl)) # depends on [control=['for'], data=['decl']]
return to_be_included |
def _spectrum(self, photon_energy):
"""
Compute differential spectrum from pp interactions using the
parametrization of Kafexhiu, E., Aharonian, F., Taylor, A.~M., and
Vila, G.~S.\ 2014, `arXiv:1406.7369
<http://www.arxiv.org/abs/1406.7369>`_.
Parameters
----------
photon_energy : :class:`~astropy.units.Quantity` instance
Photon energy array.
"""
# Load LUT if available, otherwise use self._diffsigma
if self.useLUT:
LUT_base = "PionDecayKafexhiu14_LUT_"
if self.nuclear_enhancement:
LUT_base += "NucEnh_"
LUT_fname = LUT_base + "{0}.npz".format(self.hiEmodel)
# only reload LUT if it has changed or hasn't been loaded yet
try:
if os.path.basename(self.diffsigma.fname) != LUT_fname:
self._loadLUT(LUT_fname)
except AttributeError:
self._loadLUT(LUT_fname)
else:
self.diffsigma = self._diffsigma
Egamma = _validate_ene(photon_energy).to("GeV")
Ep = self._Ep * u.GeV
J = self._J * u.Unit("1/GeV")
specpp = []
for Eg in Egamma:
diffsigma = self.diffsigma(Ep.value, Eg.value) * u.Unit("cm2/GeV")
specpp.append(trapz_loglog(diffsigma * J, Ep))
self.specpp = u.Quantity(specpp)
self.specpp *= self.nh * c.cgs
return self.specpp.to("1/(s eV)") | def function[_spectrum, parameter[self, photon_energy]]:
constant[
Compute differential spectrum from pp interactions using the
parametrization of Kafexhiu, E., Aharonian, F., Taylor, A.~M., and
Vila, G.~S.\ 2014, `arXiv:1406.7369
<http://www.arxiv.org/abs/1406.7369>`_.
Parameters
----------
photon_energy : :class:`~astropy.units.Quantity` instance
Photon energy array.
]
if name[self].useLUT begin[:]
variable[LUT_base] assign[=] constant[PionDecayKafexhiu14_LUT_]
if name[self].nuclear_enhancement begin[:]
<ast.AugAssign object at 0x7da1b0e263e0>
variable[LUT_fname] assign[=] binary_operation[name[LUT_base] + call[constant[{0}.npz].format, parameter[name[self].hiEmodel]]]
<ast.Try object at 0x7da1b0e25270>
variable[Egamma] assign[=] call[call[name[_validate_ene], parameter[name[photon_energy]]].to, parameter[constant[GeV]]]
variable[Ep] assign[=] binary_operation[name[self]._Ep * name[u].GeV]
variable[J] assign[=] binary_operation[name[self]._J * call[name[u].Unit, parameter[constant[1/GeV]]]]
variable[specpp] assign[=] list[[]]
for taget[name[Eg]] in starred[name[Egamma]] begin[:]
variable[diffsigma] assign[=] binary_operation[call[name[self].diffsigma, parameter[name[Ep].value, name[Eg].value]] * call[name[u].Unit, parameter[constant[cm2/GeV]]]]
call[name[specpp].append, parameter[call[name[trapz_loglog], parameter[binary_operation[name[diffsigma] * name[J]], name[Ep]]]]]
name[self].specpp assign[=] call[name[u].Quantity, parameter[name[specpp]]]
<ast.AugAssign object at 0x7da1b0cbbbb0>
return[call[name[self].specpp.to, parameter[constant[1/(s eV)]]]] | keyword[def] identifier[_spectrum] ( identifier[self] , identifier[photon_energy] ):
literal[string]
keyword[if] identifier[self] . identifier[useLUT] :
identifier[LUT_base] = literal[string]
keyword[if] identifier[self] . identifier[nuclear_enhancement] :
identifier[LUT_base] += literal[string]
identifier[LUT_fname] = identifier[LUT_base] + literal[string] . identifier[format] ( identifier[self] . identifier[hiEmodel] )
keyword[try] :
keyword[if] identifier[os] . identifier[path] . identifier[basename] ( identifier[self] . identifier[diffsigma] . identifier[fname] )!= identifier[LUT_fname] :
identifier[self] . identifier[_loadLUT] ( identifier[LUT_fname] )
keyword[except] identifier[AttributeError] :
identifier[self] . identifier[_loadLUT] ( identifier[LUT_fname] )
keyword[else] :
identifier[self] . identifier[diffsigma] = identifier[self] . identifier[_diffsigma]
identifier[Egamma] = identifier[_validate_ene] ( identifier[photon_energy] ). identifier[to] ( literal[string] )
identifier[Ep] = identifier[self] . identifier[_Ep] * identifier[u] . identifier[GeV]
identifier[J] = identifier[self] . identifier[_J] * identifier[u] . identifier[Unit] ( literal[string] )
identifier[specpp] =[]
keyword[for] identifier[Eg] keyword[in] identifier[Egamma] :
identifier[diffsigma] = identifier[self] . identifier[diffsigma] ( identifier[Ep] . identifier[value] , identifier[Eg] . identifier[value] )* identifier[u] . identifier[Unit] ( literal[string] )
identifier[specpp] . identifier[append] ( identifier[trapz_loglog] ( identifier[diffsigma] * identifier[J] , identifier[Ep] ))
identifier[self] . identifier[specpp] = identifier[u] . identifier[Quantity] ( identifier[specpp] )
identifier[self] . identifier[specpp] *= identifier[self] . identifier[nh] * identifier[c] . identifier[cgs]
keyword[return] identifier[self] . identifier[specpp] . identifier[to] ( literal[string] ) | def _spectrum(self, photon_energy):
"""
Compute differential spectrum from pp interactions using the
parametrization of Kafexhiu, E., Aharonian, F., Taylor, A.~M., and
Vila, G.~S.\\ 2014, `arXiv:1406.7369
<http://www.arxiv.org/abs/1406.7369>`_.
Parameters
----------
photon_energy : :class:`~astropy.units.Quantity` instance
Photon energy array.
"""
# Load LUT if available, otherwise use self._diffsigma
if self.useLUT:
LUT_base = 'PionDecayKafexhiu14_LUT_'
if self.nuclear_enhancement:
LUT_base += 'NucEnh_' # depends on [control=['if'], data=[]]
LUT_fname = LUT_base + '{0}.npz'.format(self.hiEmodel)
# only reload LUT if it has changed or hasn't been loaded yet
try:
if os.path.basename(self.diffsigma.fname) != LUT_fname:
self._loadLUT(LUT_fname) # depends on [control=['if'], data=['LUT_fname']] # depends on [control=['try'], data=[]]
except AttributeError:
self._loadLUT(LUT_fname) # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
else:
self.diffsigma = self._diffsigma
Egamma = _validate_ene(photon_energy).to('GeV')
Ep = self._Ep * u.GeV
J = self._J * u.Unit('1/GeV')
specpp = []
for Eg in Egamma:
diffsigma = self.diffsigma(Ep.value, Eg.value) * u.Unit('cm2/GeV')
specpp.append(trapz_loglog(diffsigma * J, Ep)) # depends on [control=['for'], data=['Eg']]
self.specpp = u.Quantity(specpp)
self.specpp *= self.nh * c.cgs
return self.specpp.to('1/(s eV)') |
def _makedirs(name, mode=0o777, exist_ok=False):
"""Source: https://github.com/python/cpython/blob/
3ce3dea60646d8a5a1c952469a2eb65f937875b3/Lib/os.py#L196-L226
"""
head, tail = os.path.split(name)
if not tail:
head, tail = os.path.split(head)
if head and tail and not os.path.exists(head):
try:
_makedirs(head, exist_ok=exist_ok)
except OSError as e:
if e.errno != errno.EEXIST:
raise
cdir = os.curdir
if isinstance(tail, bytes):
cdir = bytes(os.curdir, "ASCII")
if tail == cdir:
return
try:
os.mkdir(name, mode)
except OSError:
if not exist_ok or not os.path.isdir(name):
raise | def function[_makedirs, parameter[name, mode, exist_ok]]:
constant[Source: https://github.com/python/cpython/blob/
3ce3dea60646d8a5a1c952469a2eb65f937875b3/Lib/os.py#L196-L226
]
<ast.Tuple object at 0x7da20c6a91b0> assign[=] call[name[os].path.split, parameter[name[name]]]
if <ast.UnaryOp object at 0x7da20c6abdc0> begin[:]
<ast.Tuple object at 0x7da20c6a9a20> assign[=] call[name[os].path.split, parameter[name[head]]]
if <ast.BoolOp object at 0x7da20c6ab880> begin[:]
<ast.Try object at 0x7da20c6aa410>
variable[cdir] assign[=] name[os].curdir
if call[name[isinstance], parameter[name[tail], name[bytes]]] begin[:]
variable[cdir] assign[=] call[name[bytes], parameter[name[os].curdir, constant[ASCII]]]
if compare[name[tail] equal[==] name[cdir]] begin[:]
return[None]
<ast.Try object at 0x7da20c6a8550> | keyword[def] identifier[_makedirs] ( identifier[name] , identifier[mode] = literal[int] , identifier[exist_ok] = keyword[False] ):
literal[string]
identifier[head] , identifier[tail] = identifier[os] . identifier[path] . identifier[split] ( identifier[name] )
keyword[if] keyword[not] identifier[tail] :
identifier[head] , identifier[tail] = identifier[os] . identifier[path] . identifier[split] ( identifier[head] )
keyword[if] identifier[head] keyword[and] identifier[tail] keyword[and] keyword[not] identifier[os] . identifier[path] . identifier[exists] ( identifier[head] ):
keyword[try] :
identifier[_makedirs] ( identifier[head] , identifier[exist_ok] = identifier[exist_ok] )
keyword[except] identifier[OSError] keyword[as] identifier[e] :
keyword[if] identifier[e] . identifier[errno] != identifier[errno] . identifier[EEXIST] :
keyword[raise]
identifier[cdir] = identifier[os] . identifier[curdir]
keyword[if] identifier[isinstance] ( identifier[tail] , identifier[bytes] ):
identifier[cdir] = identifier[bytes] ( identifier[os] . identifier[curdir] , literal[string] )
keyword[if] identifier[tail] == identifier[cdir] :
keyword[return]
keyword[try] :
identifier[os] . identifier[mkdir] ( identifier[name] , identifier[mode] )
keyword[except] identifier[OSError] :
keyword[if] keyword[not] identifier[exist_ok] keyword[or] keyword[not] identifier[os] . identifier[path] . identifier[isdir] ( identifier[name] ):
keyword[raise] | def _makedirs(name, mode=511, exist_ok=False):
"""Source: https://github.com/python/cpython/blob/
3ce3dea60646d8a5a1c952469a2eb65f937875b3/Lib/os.py#L196-L226
"""
(head, tail) = os.path.split(name)
if not tail:
(head, tail) = os.path.split(head) # depends on [control=['if'], data=[]]
if head and tail and (not os.path.exists(head)):
try:
_makedirs(head, exist_ok=exist_ok) # depends on [control=['try'], data=[]]
except OSError as e:
if e.errno != errno.EEXIST:
raise # depends on [control=['if'], data=[]] # depends on [control=['except'], data=['e']]
cdir = os.curdir
if isinstance(tail, bytes):
cdir = bytes(os.curdir, 'ASCII') # depends on [control=['if'], data=[]]
if tail == cdir:
return # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
try:
os.mkdir(name, mode) # depends on [control=['try'], data=[]]
except OSError:
if not exist_ok or not os.path.isdir(name):
raise # depends on [control=['if'], data=[]] # depends on [control=['except'], data=[]] |
def count(self, minval, maxval, include_min=True, include_max=True):
'''Returns the number of elements in the skiplist with a score
between min and max.
'''
rank1 = self.rank(minval)
if rank1 < 0:
rank1 = -rank1 - 1
elif not include_min:
rank1 += 1
rank2 = self.rank(maxval)
if rank2 < 0:
rank2 = -rank2 - 1
elif include_max:
rank2 += 1
return max(rank2 - rank1, 0) | def function[count, parameter[self, minval, maxval, include_min, include_max]]:
constant[Returns the number of elements in the skiplist with a score
between min and max.
]
variable[rank1] assign[=] call[name[self].rank, parameter[name[minval]]]
if compare[name[rank1] less[<] constant[0]] begin[:]
variable[rank1] assign[=] binary_operation[<ast.UnaryOp object at 0x7da204564460> - constant[1]]
variable[rank2] assign[=] call[name[self].rank, parameter[name[maxval]]]
if compare[name[rank2] less[<] constant[0]] begin[:]
variable[rank2] assign[=] binary_operation[<ast.UnaryOp object at 0x7da2045669e0> - constant[1]]
return[call[name[max], parameter[binary_operation[name[rank2] - name[rank1]], constant[0]]]] | keyword[def] identifier[count] ( identifier[self] , identifier[minval] , identifier[maxval] , identifier[include_min] = keyword[True] , identifier[include_max] = keyword[True] ):
literal[string]
identifier[rank1] = identifier[self] . identifier[rank] ( identifier[minval] )
keyword[if] identifier[rank1] < literal[int] :
identifier[rank1] =- identifier[rank1] - literal[int]
keyword[elif] keyword[not] identifier[include_min] :
identifier[rank1] += literal[int]
identifier[rank2] = identifier[self] . identifier[rank] ( identifier[maxval] )
keyword[if] identifier[rank2] < literal[int] :
identifier[rank2] =- identifier[rank2] - literal[int]
keyword[elif] identifier[include_max] :
identifier[rank2] += literal[int]
keyword[return] identifier[max] ( identifier[rank2] - identifier[rank1] , literal[int] ) | def count(self, minval, maxval, include_min=True, include_max=True):
"""Returns the number of elements in the skiplist with a score
between min and max.
"""
rank1 = self.rank(minval)
if rank1 < 0:
rank1 = -rank1 - 1 # depends on [control=['if'], data=['rank1']]
elif not include_min:
rank1 += 1 # depends on [control=['if'], data=[]]
rank2 = self.rank(maxval)
if rank2 < 0:
rank2 = -rank2 - 1 # depends on [control=['if'], data=['rank2']]
elif include_max:
rank2 += 1 # depends on [control=['if'], data=[]]
return max(rank2 - rank1, 0) |
def listdir(dir_name, get_dirs=None, get_files=None, hide_ignored=False):
"""
Return list of all dirs and files inside given dir.
Also can filter contents to return only dirs or files.
Args:
- dir_name: Which directory we need to scan (relative)
- get_dirs: Return dirs list
- get_files: Return files list
- hide_ignored: Exclude files and dirs with initial underscore
"""
if get_dirs is None and get_files is None:
get_dirs = True
get_files = True
source_dir = os.path.join(settings.BASE_DIR, 'app', dir_name)
dirs = []
for dir_or_file_name in os.listdir(source_dir):
path = os.path.join(source_dir, dir_or_file_name)
if hide_ignored and dir_or_file_name.startswith('_'):
continue
is_dir = os.path.isdir(path)
if get_dirs and is_dir or get_files and not is_dir:
dirs.append(dir_or_file_name)
return dirs | def function[listdir, parameter[dir_name, get_dirs, get_files, hide_ignored]]:
constant[
Return list of all dirs and files inside given dir.
Also can filter contents to return only dirs or files.
Args:
- dir_name: Which directory we need to scan (relative)
- get_dirs: Return dirs list
- get_files: Return files list
- hide_ignored: Exclude files and dirs with initial underscore
]
if <ast.BoolOp object at 0x7da20c992bf0> begin[:]
variable[get_dirs] assign[=] constant[True]
variable[get_files] assign[=] constant[True]
variable[source_dir] assign[=] call[name[os].path.join, parameter[name[settings].BASE_DIR, constant[app], name[dir_name]]]
variable[dirs] assign[=] list[[]]
for taget[name[dir_or_file_name]] in starred[call[name[os].listdir, parameter[name[source_dir]]]] begin[:]
variable[path] assign[=] call[name[os].path.join, parameter[name[source_dir], name[dir_or_file_name]]]
if <ast.BoolOp object at 0x7da20c992320> begin[:]
continue
variable[is_dir] assign[=] call[name[os].path.isdir, parameter[name[path]]]
if <ast.BoolOp object at 0x7da20c993700> begin[:]
call[name[dirs].append, parameter[name[dir_or_file_name]]]
return[name[dirs]] | keyword[def] identifier[listdir] ( identifier[dir_name] , identifier[get_dirs] = keyword[None] , identifier[get_files] = keyword[None] , identifier[hide_ignored] = keyword[False] ):
literal[string]
keyword[if] identifier[get_dirs] keyword[is] keyword[None] keyword[and] identifier[get_files] keyword[is] keyword[None] :
identifier[get_dirs] = keyword[True]
identifier[get_files] = keyword[True]
identifier[source_dir] = identifier[os] . identifier[path] . identifier[join] ( identifier[settings] . identifier[BASE_DIR] , literal[string] , identifier[dir_name] )
identifier[dirs] =[]
keyword[for] identifier[dir_or_file_name] keyword[in] identifier[os] . identifier[listdir] ( identifier[source_dir] ):
identifier[path] = identifier[os] . identifier[path] . identifier[join] ( identifier[source_dir] , identifier[dir_or_file_name] )
keyword[if] identifier[hide_ignored] keyword[and] identifier[dir_or_file_name] . identifier[startswith] ( literal[string] ):
keyword[continue]
identifier[is_dir] = identifier[os] . identifier[path] . identifier[isdir] ( identifier[path] )
keyword[if] identifier[get_dirs] keyword[and] identifier[is_dir] keyword[or] identifier[get_files] keyword[and] keyword[not] identifier[is_dir] :
identifier[dirs] . identifier[append] ( identifier[dir_or_file_name] )
keyword[return] identifier[dirs] | def listdir(dir_name, get_dirs=None, get_files=None, hide_ignored=False):
"""
Return list of all dirs and files inside given dir.
Also can filter contents to return only dirs or files.
Args:
- dir_name: Which directory we need to scan (relative)
- get_dirs: Return dirs list
- get_files: Return files list
- hide_ignored: Exclude files and dirs with initial underscore
"""
if get_dirs is None and get_files is None:
get_dirs = True
get_files = True # depends on [control=['if'], data=[]]
source_dir = os.path.join(settings.BASE_DIR, 'app', dir_name)
dirs = []
for dir_or_file_name in os.listdir(source_dir):
path = os.path.join(source_dir, dir_or_file_name)
if hide_ignored and dir_or_file_name.startswith('_'):
continue # depends on [control=['if'], data=[]]
is_dir = os.path.isdir(path)
if get_dirs and is_dir or (get_files and (not is_dir)):
dirs.append(dir_or_file_name) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['dir_or_file_name']]
return dirs |
def load_params(self, path, exclude_free_params=False):
from deepy.core import graph
"""
Load parameters to the block.
"""
from deepy.core.comp_graph import ComputationalGraph
model = graph.compile(blocks=[self])
model.load_params(path, exclude_free_params=exclude_free_params) | def function[load_params, parameter[self, path, exclude_free_params]]:
from relative_module[deepy.core] import module[graph]
constant[
Load parameters to the block.
]
from relative_module[deepy.core.comp_graph] import module[ComputationalGraph]
variable[model] assign[=] call[name[graph].compile, parameter[]]
call[name[model].load_params, parameter[name[path]]] | keyword[def] identifier[load_params] ( identifier[self] , identifier[path] , identifier[exclude_free_params] = keyword[False] ):
keyword[from] identifier[deepy] . identifier[core] keyword[import] identifier[graph]
literal[string]
keyword[from] identifier[deepy] . identifier[core] . identifier[comp_graph] keyword[import] identifier[ComputationalGraph]
identifier[model] = identifier[graph] . identifier[compile] ( identifier[blocks] =[ identifier[self] ])
identifier[model] . identifier[load_params] ( identifier[path] , identifier[exclude_free_params] = identifier[exclude_free_params] ) | def load_params(self, path, exclude_free_params=False):
from deepy.core import graph
'\n Load parameters to the block.\n '
from deepy.core.comp_graph import ComputationalGraph
model = graph.compile(blocks=[self])
model.load_params(path, exclude_free_params=exclude_free_params) |
def _repair(record: Dict[str, Any]) -> Dict[str, Any]:
"""Repair a corrupted IterationRecord with a specific known issue."""
output_records = record.get("output_records")
if record.get("_type", None) == "IterationRecord" and output_records is not None:
birdsite_record = output_records.get("birdsite")
# check for the bug
if isinstance(birdsite_record, dict) and birdsite_record.get("_type") == "IterationRecord":
# get to the bottom of the corrupted record
failed = False
while birdsite_record.get("_type") == "IterationRecord":
sub_record = birdsite_record.get("output_records")
if sub_record is None:
failed = True
break
birdsite_record = sub_record.get("birdsite")
if birdsite_record is None:
failed = True
break
if failed:
return record
# add type
birdsite_record["_type"] = TweetRecord.__name__
# lift extra keys, just in case
if "extra_keys" in birdsite_record:
record_extra_values = record.get("extra_keys", {})
for key, value in birdsite_record["extra_keys"].items():
if key not in record_extra_values:
record_extra_values[key] = value
record["extra_keys"] = record_extra_values
del birdsite_record["extra_keys"]
output_records["birdsite"] = birdsite_record
# pull that correct record up to the top level, fixing corruption
record["output_records"] = output_records
return record | def function[_repair, parameter[record]]:
constant[Repair a corrupted IterationRecord with a specific known issue.]
variable[output_records] assign[=] call[name[record].get, parameter[constant[output_records]]]
if <ast.BoolOp object at 0x7da18f09f220> begin[:]
variable[birdsite_record] assign[=] call[name[output_records].get, parameter[constant[birdsite]]]
if <ast.BoolOp object at 0x7da2054a5960> begin[:]
variable[failed] assign[=] constant[False]
while compare[call[name[birdsite_record].get, parameter[constant[_type]]] equal[==] constant[IterationRecord]] begin[:]
variable[sub_record] assign[=] call[name[birdsite_record].get, parameter[constant[output_records]]]
if compare[name[sub_record] is constant[None]] begin[:]
variable[failed] assign[=] constant[True]
break
variable[birdsite_record] assign[=] call[name[sub_record].get, parameter[constant[birdsite]]]
if compare[name[birdsite_record] is constant[None]] begin[:]
variable[failed] assign[=] constant[True]
break
if name[failed] begin[:]
return[name[record]]
call[name[birdsite_record]][constant[_type]] assign[=] name[TweetRecord].__name__
if compare[constant[extra_keys] in name[birdsite_record]] begin[:]
variable[record_extra_values] assign[=] call[name[record].get, parameter[constant[extra_keys], dictionary[[], []]]]
for taget[tuple[[<ast.Name object at 0x7da18bcc9450>, <ast.Name object at 0x7da18bccba90>]]] in starred[call[call[name[birdsite_record]][constant[extra_keys]].items, parameter[]]] begin[:]
if compare[name[key] <ast.NotIn object at 0x7da2590d7190> name[record_extra_values]] begin[:]
call[name[record_extra_values]][name[key]] assign[=] name[value]
call[name[record]][constant[extra_keys]] assign[=] name[record_extra_values]
<ast.Delete object at 0x7da18bccbb50>
call[name[output_records]][constant[birdsite]] assign[=] name[birdsite_record]
call[name[record]][constant[output_records]] assign[=] name[output_records]
return[name[record]] | keyword[def] identifier[_repair] ( identifier[record] : identifier[Dict] [ identifier[str] , identifier[Any] ])-> identifier[Dict] [ identifier[str] , identifier[Any] ]:
literal[string]
identifier[output_records] = identifier[record] . identifier[get] ( literal[string] )
keyword[if] identifier[record] . identifier[get] ( literal[string] , keyword[None] )== literal[string] keyword[and] identifier[output_records] keyword[is] keyword[not] keyword[None] :
identifier[birdsite_record] = identifier[output_records] . identifier[get] ( literal[string] )
keyword[if] identifier[isinstance] ( identifier[birdsite_record] , identifier[dict] ) keyword[and] identifier[birdsite_record] . identifier[get] ( literal[string] )== literal[string] :
identifier[failed] = keyword[False]
keyword[while] identifier[birdsite_record] . identifier[get] ( literal[string] )== literal[string] :
identifier[sub_record] = identifier[birdsite_record] . identifier[get] ( literal[string] )
keyword[if] identifier[sub_record] keyword[is] keyword[None] :
identifier[failed] = keyword[True]
keyword[break]
identifier[birdsite_record] = identifier[sub_record] . identifier[get] ( literal[string] )
keyword[if] identifier[birdsite_record] keyword[is] keyword[None] :
identifier[failed] = keyword[True]
keyword[break]
keyword[if] identifier[failed] :
keyword[return] identifier[record]
identifier[birdsite_record] [ literal[string] ]= identifier[TweetRecord] . identifier[__name__]
keyword[if] literal[string] keyword[in] identifier[birdsite_record] :
identifier[record_extra_values] = identifier[record] . identifier[get] ( literal[string] ,{})
keyword[for] identifier[key] , identifier[value] keyword[in] identifier[birdsite_record] [ literal[string] ]. identifier[items] ():
keyword[if] identifier[key] keyword[not] keyword[in] identifier[record_extra_values] :
identifier[record_extra_values] [ identifier[key] ]= identifier[value]
identifier[record] [ literal[string] ]= identifier[record_extra_values]
keyword[del] identifier[birdsite_record] [ literal[string] ]
identifier[output_records] [ literal[string] ]= identifier[birdsite_record]
identifier[record] [ literal[string] ]= identifier[output_records]
keyword[return] identifier[record] | def _repair(record: Dict[str, Any]) -> Dict[str, Any]:
"""Repair a corrupted IterationRecord with a specific known issue."""
output_records = record.get('output_records')
if record.get('_type', None) == 'IterationRecord' and output_records is not None:
birdsite_record = output_records.get('birdsite')
# check for the bug
if isinstance(birdsite_record, dict) and birdsite_record.get('_type') == 'IterationRecord':
# get to the bottom of the corrupted record
failed = False
while birdsite_record.get('_type') == 'IterationRecord':
sub_record = birdsite_record.get('output_records')
if sub_record is None:
failed = True
break # depends on [control=['if'], data=[]]
birdsite_record = sub_record.get('birdsite')
if birdsite_record is None:
failed = True
break # depends on [control=['if'], data=[]] # depends on [control=['while'], data=[]]
if failed:
return record # depends on [control=['if'], data=[]]
# add type
birdsite_record['_type'] = TweetRecord.__name__
# lift extra keys, just in case
if 'extra_keys' in birdsite_record:
record_extra_values = record.get('extra_keys', {})
for (key, value) in birdsite_record['extra_keys'].items():
if key not in record_extra_values:
record_extra_values[key] = value # depends on [control=['if'], data=['key', 'record_extra_values']] # depends on [control=['for'], data=[]]
record['extra_keys'] = record_extra_values
del birdsite_record['extra_keys'] # depends on [control=['if'], data=['birdsite_record']]
output_records['birdsite'] = birdsite_record # depends on [control=['if'], data=[]]
# pull that correct record up to the top level, fixing corruption
record['output_records'] = output_records # depends on [control=['if'], data=[]]
return record |
def team(self, name=None, id=None, is_hidden=False, **kwargs):
"""
Team of KE-chain.
Provides a team of :class:`Team` of KE-chain. You can filter on team name or provide id.
:param name: (optional) team name to filter
:type name: basestring or None
:param id: (optional) id of the user to filter
:type id: basestring or None
:param is_hidden: (optional) boolean to show non-hidden or hidden teams or both (None) (default is non-hidden)
:type is_hidden: bool or None
:param kwargs: Additional filtering keyword=value arguments
:type kwargs: dict or None
:return: List of :class:`Team`
:raises NotFoundError: when a user could not be found
:raises MultipleFoundError: when more than a single user can be found
"""
_teams = self.teams(name=name, id=id, **kwargs)
if len(_teams) == 0:
raise NotFoundError("No team criteria matches")
if len(_teams) != 1:
raise MultipleFoundError("Multiple teams fit criteria")
return _teams[0] | def function[team, parameter[self, name, id, is_hidden]]:
constant[
Team of KE-chain.
Provides a team of :class:`Team` of KE-chain. You can filter on team name or provide id.
:param name: (optional) team name to filter
:type name: basestring or None
:param id: (optional) id of the user to filter
:type id: basestring or None
:param is_hidden: (optional) boolean to show non-hidden or hidden teams or both (None) (default is non-hidden)
:type is_hidden: bool or None
:param kwargs: Additional filtering keyword=value arguments
:type kwargs: dict or None
:return: List of :class:`Team`
:raises NotFoundError: when a user could not be found
:raises MultipleFoundError: when more than a single user can be found
]
variable[_teams] assign[=] call[name[self].teams, parameter[]]
if compare[call[name[len], parameter[name[_teams]]] equal[==] constant[0]] begin[:]
<ast.Raise object at 0x7da2049632e0>
if compare[call[name[len], parameter[name[_teams]]] not_equal[!=] constant[1]] begin[:]
<ast.Raise object at 0x7da204963a60>
return[call[name[_teams]][constant[0]]] | keyword[def] identifier[team] ( identifier[self] , identifier[name] = keyword[None] , identifier[id] = keyword[None] , identifier[is_hidden] = keyword[False] ,** identifier[kwargs] ):
literal[string]
identifier[_teams] = identifier[self] . identifier[teams] ( identifier[name] = identifier[name] , identifier[id] = identifier[id] ,** identifier[kwargs] )
keyword[if] identifier[len] ( identifier[_teams] )== literal[int] :
keyword[raise] identifier[NotFoundError] ( literal[string] )
keyword[if] identifier[len] ( identifier[_teams] )!= literal[int] :
keyword[raise] identifier[MultipleFoundError] ( literal[string] )
keyword[return] identifier[_teams] [ literal[int] ] | def team(self, name=None, id=None, is_hidden=False, **kwargs):
"""
Team of KE-chain.
Provides a team of :class:`Team` of KE-chain. You can filter on team name or provide id.
:param name: (optional) team name to filter
:type name: basestring or None
:param id: (optional) id of the user to filter
:type id: basestring or None
:param is_hidden: (optional) boolean to show non-hidden or hidden teams or both (None) (default is non-hidden)
:type is_hidden: bool or None
:param kwargs: Additional filtering keyword=value arguments
:type kwargs: dict or None
:return: List of :class:`Team`
:raises NotFoundError: when a user could not be found
:raises MultipleFoundError: when more than a single user can be found
"""
_teams = self.teams(name=name, id=id, **kwargs)
if len(_teams) == 0:
raise NotFoundError('No team criteria matches') # depends on [control=['if'], data=[]]
if len(_teams) != 1:
raise MultipleFoundError('Multiple teams fit criteria') # depends on [control=['if'], data=[]]
return _teams[0] |
def __hardparse(self,meterPos,pos_i=None,slot_i=None,num_slots=None,all_positions=None,parse=None):
import prosodic as p
#if meterPos.slots[0].i<2:
# print meterPos.slots[0].word
#print meterPos,pos_i,slot_i,num_slots,all_positions
#prevpos=all_positions[pos_i-1]
#print pos_i, meterPos, prevpos, pos_i,pos_i-1,all_positions, len(meterPos.slots)
if '.' in self.name: # kiparsky self.names
## load variables
#exception for first foot
#if 'skip_initial_foot' in parse.constraintNames:
# if meterPos.slots[0].i<2:
# return 0
if 'extrametrical-first-pos' in parse.constraintNames and pos_i==0:
return 0
elif 'skip_initial_foot' in parse.constraintNames and pos_i in [0,1]:
return 0
promSite = self.name.split(".")[1]
promType = self.name.split(".")[0]
promSite_meter = promSite.split("=>")[0].strip() # s/w
promSite_prom = promSite.split("=>")[1].strip() # +- u/p
if meterPos.meterVal != promSite_meter: # then this constraint does not apply
return 0
if promSite_prom[0:1] == "-": # -u or -p: eg, if s=>-u, then NOT EVEN ONE s can be u(nprom)
promSite_isneg = True
promSite_prom = promSite_prom[1:] # u or p
else:
promSite_isneg = False # u or p: eg, if s=>p, then AT LEAST ONE s must be p(rom)
"""
Removed 4/12/2017: apparently there was an option to restrict just 'P'rimary stresses
But required using an uppercase P in the meter config. This was nowhere stated elsewhere
and has never been used. I'm disabling it. Let's just use a separate prominence type
if we want to restrict only primary stresses.
if promSite_prom.lower()==promSite_prom:
promSite_prom = (promSite_prom == 'p') # string 2 boolean: p:True, u:False
else:
if promSite_prom=="P":
promSite_prom=1.0
#elif promSite_prom=="U":
else:
promSite_prom=0.0
"""
promSite_prom = (promSite_prom == 'p') # string 2 boolean: p:True, u:False
# NOT EVEN ONE unit_prom can be promSite_prom:
if promSite_isneg:
numtrue=0
for slot in meterPos.slots:
slot_prom=slot.feature('prom.'+promType,True)
if slot_prom==None: continue
#if type(promSite_prom)==type(True):
# slot_prom=bool(slot_prom)
pstress_thresh=self.meter.config.get('phrasal_stress_threshold',PSTRESS_THRESH_DEFAULT)
try:
pstress_thresh=float(pstress_thresh)
except ValueError:
pstress_thresh=PSTRESS_THRESH_DEFAULT
bool_prom_type = bool(slot_prom) if promType!='phrasal_stress' else slot_prom<=pstress_thresh
if bool_prom_type == promSite_prom:
#numtrue+=float(slot_prom)
return self.weight
#return 2 if numtrue else 0
#print self.weight, numtrue
## CHANGED 10/10/2016: This constraint returns its weight
## *times* the number of slots/syllables that violated it.
## CHANGED 4/12/2017: numtrue is actually float of the prominence
## so for phrasal stress is its p-stress value, for seconday stress is 0.5, etc.
return self.weight * numtrue
#return 0
# AT LEAST ONE unit_prom must be promSite_prom (or else, violate):
else:
violated=True
ran=False
for slot in meterPos.slots:
slot_prom=slot.feature('prom.'+promType,True)
if slot_prom==None:
continue
ran=True
if bool(slot_prom)==promSite_prom:
violated=False
if ran and violated:
return self.weight
else:
return 0
elif self.name.lower().startswith('initialstrong'):
#if meterPos.slots[0].i==0:
if pos_i==0:
if meterPos.meterVal == 's':
return self.weight
return 0
elif self.name.lower().startswith('functiontow'):
#exception for first foot
if p.config.get('skip_initial_foot',0):
if meterPos.slots[0].i<2:
return 0
if meterPos.meterVal != 's': # then this constraint does not apply
return 0
vio = 0
for slot in meterPos.slots:
if slot.word.feature('functionword'):
vio += self.weight
return vio
elif self.name.lower().startswith('footmin'):
if len(meterPos.slots) < 2:
return 0
elif len(meterPos.slots) > 2:
return self.weight
name=self.name.lower()
a = meterPos.slots[0]
b = meterPos.slots[1]
## should this apply to ALL foomin constraints?
#if ( bool(a.feature('prom.stress',True)) and bool(b.feature('prom.stress',True))):
# return self.weight
##
if name=='footmin-nohx':
if (bool(a.feature('prom.weight',True))):
return self.weight
if name=='footmin-w-resolution':
if a.word != b.word: return 0 # only applies within word-boundaries
firstsyll_islight=bool(a.feature('prom.weight',True)) == False
firstsyll_isstressed=bool(a.feature('prom.stress',True)) == True
if not (firstsyll_islight and firstsyll_isstressed):
return self.weight
if name=='footmin-f-resolution':
if a.word == b.word: return 0 # only applies to word-boundaries
if meterPos.meterVal=='s': return self.weight # cannot apply to strong positions
a_is_fw = bool(a.word.feature('functionword'))
b_is_fw = bool(b.word.feature('functionword'))
if not (a_is_fw and b_is_fw): return self.weight
if name=='footmin-s-nohx':
if meterPos.meterVal=='s':
if bool(a.feature('prom.weight',True)) or a.word!=b.word:
return self.weight
if "nolh" in name:
if ( (bool(b.feature('prom.weight',True))) ):
return self.weight
if "strongconstraint" in name:
if bool(b.feature('prom.strength',True)):
return self.weight
if bool(a.feature('prom.strength',True)):
if not bool(a.feature('prom.weight',True)):
if a.word==b.word and not a.wordpos[0]==a.wordpos[1]:
if not bool(b.feature('prom.stress',True)):
return 0
return self.weight
if name=='footmin-none':
return self.weight
if name=='footmin-none-unless-in-first-two-positions':
if pos_i!=0 and pos_i!=1:
return self.weight
if name=='footmin-none-unless-in-second-position':
if pos_i!=1:
return self.weight
if name=='footmin-no-s': return self.weight * int(meterPos.meterVal=='s')
if name=='footmin-no-w': return self.weight * int(meterPos.meterVal=='w')
if name=='footmin-no-s-unless-preceded-by-ww':
# @TODO: bug when number of syllables in maxW is > 2 ?
if meterPos.meterVal!='s': return 0
if pos_i==0: return self.weight
prevpos=all_positions[pos_i-1]
#print pos_i, meterPos, prevpos, pos_i,pos_i-1,all_positions
if len(prevpos.slots)>1 and prevpos.meterVal=='w':
return 0
return self.weight
if "wordbound" in name:
if name=='footmin-wordbound':
if a.word!=b.word:
return self.weight
if "nomono" in name:
if (a.word.numSyll==1 or b.word.numSyll==1):
return self.weight
if 'lexmono' in name:
#if a.word.numSyll==1 and a.word.stress=="P"
if a.word.isLexMono() or b.word.isLexMono():
return self.weight
## everyone is happy if both are function words
if a.word.feature('functionword') and b.word.feature('functionword'):
return 0
if a.word!=b.word:
if "bothnotfw" in name:
if not (a.word.feature('functionword') and b.word.feature('functionword')):
return self.weight
elif "neitherfw":
if not (a.word.feature('functionword') or b.word.feature('functionword')):
return self.weight
elif "leftfw":
if not (a.word.feature('functionword')):
return self.weight
elif "rightfw":
if not (b.word.feature('functionword')):
return self.weight
# only remaining possibilities:
# i) slots a,b are from the same word
# ii) slots a,b are from contiguous words which are the same (haPPY HAppy)
if a.wordpos[0]==a.wordpos[1]: # in the firs slot's (start,end) wordpos : if (start==end) : then poss. (ii) above
if "bothnotfw" in name:
if not (a.word.feature('functionword') and b.word.feature('functionword')):
return self.weight
elif "neitherfw":
if not (a.word.feature('functionword') or b.word.feature('functionword')):
return self.weight
elif "leftfw":
if not (a.word.feature('functionword')):
return self.weight
elif "rightfw":
if not (b.word.feature('functionword')):
return self.weight
# poss. (i) remains
return 0
## Constraints about words
if self.name=='word-elision':
words=set([slot.word for slot in meterPos.slots if hasattr(slot.word,'is_elision') and slot.word.is_elision])
sylls=[]
for slot in meterPos.slots: sylls+=slot.children
for word in words:
lastsyll=word.children[-1]
if lastsyll in sylls: # only break if this position contains the word's final syllable
return self.weight
# is this the end?
is_end = slot_i+1==num_slots and meterPos.slots==all_positions[-1].slots
## CONSTRAINTS ON PREVIOUS POSITIONS
"""
ABANDONED TEMPORARILY AS NOT POSSIBLE GIVEN THAT PARSES ARE BOUNDED AS PARSING GOES ON
"""
if self.name=='attridge-ss-not-by-ww':
#if meterPos.meterVal!='s': return 0
#if not is_end and meterPos.meterVal2 == 'ss':
# parse.pauseComparisons=True
if pos_i==0: return 0
prevpos=all_positions[pos_i-1]
prevprevpos=all_positions[pos_i-2] if (pos_i-2)>=0 else None
#print prevprevpos,prevpos,meterPos
#print prevprevpos.meterVal2 if prevprevpos else None,prevpos.meterVal2, meterPos.meterVal2
#print prevprevpos,prevpos,meterPos
#print prevprevpos.meterVal2 if prevprevpos else None,prevpos.meterVal2, meterPos.meterVal2
#print dir(prevprevpos) if prevprevpos else None
#print dir(prevpos) if prevprevpos else None
#print dir(meterPos)
#print
if prevpos.meterVal2 == 'ss':
#if (prevprevpos and prevprevpos.meterVal2=='ww')
if (prevprevpos and prevprevpos.meterVal2=='ww') and (not hasattr(prevprevpos,'_flag_already_served_as_ww')):
prevprevpos._flag_already_served_as_ww=True
pass
elif meterPos.meterVal2=='ww' and (not hasattr(meterPos,'_flag_already_served_as_ww')):
meterPos._flag_already_served_as_ww=True
pass
else:
#print 'ERROR!'
for cnstr in prevpos.constraintScores:
if cnstr.name==self.name:
prevpos.constraintScores[cnstr]=self.weight
parse.constraintScores[cnstr]+=self.weight
#parse.pauseComparisons=False
elif is_end and meterPos.meterVal2=='ss':
#parse.pauseComparisons=False
if prevpos.meterVal2=='ww':
pass
else:
#print 'ERROR!'
return self.weight
#print
#"""
## POST HOC CONSTRAINTS
if is_end:
final_meter_str=''.join([''.join(pos.meterVal for slot in pos.slots) for pos in all_positions])
#print final_meter_str
# headedness
if self.name.startswith('headedness'):
shouldbe = self.name.split('!=')[-1]
"""
Approach 1: This approach doesn't really work on individual lines:
# binary or ternary?
weak_pos = [pos for pos in all_positions if pos.meterVal=='w']
if len(weak_pos)<2: return 0
weak_pos_types = [''.join('w' for slot in pos.slots) for pos in weak_pos]
if weak_pos_types.count('ww')>weak_pos_types.count('w'): # ternary
if final_meter_str[3]=='w': # anapestic
headedness = 'rising'
else: # dactylic
headedness = 'falling'
else: # binary
if final_meter_str[3]=='w':
headedness = 'falling' # trochaic
else:
headedness = 'rising'
if shouldbe != headedness:
return self.weight
"""
"""
Approach 2: count 'ws' vs 'sw' pairs and give categorical violation
"""
quasi_feet=[''.join(x) for x in tools.slice([pos.meterVal for pos in all_positions],slice_length=2,runts=False)]
headedness = 'rising' if quasi_feet.count('ws')>=quasi_feet.count('sw') else 'falling'
#print final_meter_str
#print quasi_feet
#print headedness
#print
if shouldbe != headedness:
return self.weight
#"""
"""
Approach 3: count 'ws' vs 'sw' pairs and give violation/num-pos per off foot
quasi_feet=[''.join(x) for x in tools.slice([pos.meterVal for pos in all_positions],slice_length=2,runts=True)]
if shouldbe == 'rising':
num_not_rising = float(len([ft for ft in quasi_feet if ft!='ws']))
return num_not_rising / float(len(all_positions)) * float(self.weight)
elif shouldbe == 'falling':
num_not_falling = float(len([ft for ft in quasi_feet if ft!='sw']))
return num_not_falling / float(len(all_positions)) * float(self.weight)
"""
# number of feet
if self.name.startswith('number_feet'):
shouldbe = int(self.name.split('!=')[-1])
strong_pos = [pos for pos in all_positions if pos.meterVal=='s']
num_feet = len(strong_pos) # debatable
if shouldbe != num_feet:
return self.weight
# other posthoc constraints
if self.name.startswith('posthoc'):
if self.name=='posthoc-no-final-ww':
if len(all_positions[-1].slots)>1 and all_positions[-1].meterVal=='w':
return self.weight
if self.name=='posthoc-no-final-w':
if all_positions[-1].meterVal=='w':
return self.weight
if self.name=='posthoc-standardize-weakpos':
weak_pos = [pos for pos in all_positions if pos.meterVal=='w']
if len(weak_pos)<2: return 0
weak_pos_types = [''.join('w' for slot in pos.slots) for pos in weak_pos]
maxcount = max([weak_pos_types.count(wtype) for wtype in set(weak_pos_types)])
diff = len(weak_pos) - maxcount
return self.weight*diff
# made it through this minefield, eh?
return 0 | def function[__hardparse, parameter[self, meterPos, pos_i, slot_i, num_slots, all_positions, parse]]:
import module[prosodic] as alias[p]
if compare[constant[.] in name[self].name] begin[:]
if <ast.BoolOp object at 0x7da20c6e7be0> begin[:]
return[constant[0]]
variable[promSite] assign[=] call[call[name[self].name.split, parameter[constant[.]]]][constant[1]]
variable[promType] assign[=] call[call[name[self].name.split, parameter[constant[.]]]][constant[0]]
variable[promSite_meter] assign[=] call[call[call[name[promSite].split, parameter[constant[=>]]]][constant[0]].strip, parameter[]]
variable[promSite_prom] assign[=] call[call[call[name[promSite].split, parameter[constant[=>]]]][constant[1]].strip, parameter[]]
if compare[name[meterPos].meterVal not_equal[!=] name[promSite_meter]] begin[:]
return[constant[0]]
if compare[call[name[promSite_prom]][<ast.Slice object at 0x7da18f811240>] equal[==] constant[-]] begin[:]
variable[promSite_isneg] assign[=] constant[True]
variable[promSite_prom] assign[=] call[name[promSite_prom]][<ast.Slice object at 0x7da18f8118a0>]
constant[
Removed 4/12/2017: apparently there was an option to restrict just 'P'rimary stresses
But required using an uppercase P in the meter config. This was nowhere stated elsewhere
and has never been used. I'm disabling it. Let's just use a separate prominence type
if we want to restrict only primary stresses.
if promSite_prom.lower()==promSite_prom:
promSite_prom = (promSite_prom == 'p') # string 2 boolean: p:True, u:False
else:
if promSite_prom=="P":
promSite_prom=1.0
#elif promSite_prom=="U":
else:
promSite_prom=0.0
]
variable[promSite_prom] assign[=] compare[name[promSite_prom] equal[==] constant[p]]
if name[promSite_isneg] begin[:]
variable[numtrue] assign[=] constant[0]
for taget[name[slot]] in starred[name[meterPos].slots] begin[:]
variable[slot_prom] assign[=] call[name[slot].feature, parameter[binary_operation[constant[prom.] + name[promType]], constant[True]]]
if compare[name[slot_prom] equal[==] constant[None]] begin[:]
continue
variable[pstress_thresh] assign[=] call[name[self].meter.config.get, parameter[constant[phrasal_stress_threshold], name[PSTRESS_THRESH_DEFAULT]]]
<ast.Try object at 0x7da18f811d20>
variable[bool_prom_type] assign[=] <ast.IfExp object at 0x7da18f810340>
if compare[name[bool_prom_type] equal[==] name[promSite_prom]] begin[:]
return[name[self].weight]
return[binary_operation[name[self].weight * name[numtrue]]]
if compare[name[self].name equal[==] constant[word-elision]] begin[:]
variable[words] assign[=] call[name[set], parameter[<ast.ListComp object at 0x7da207f9b340>]]
variable[sylls] assign[=] list[[]]
for taget[name[slot]] in starred[name[meterPos].slots] begin[:]
<ast.AugAssign object at 0x7da207f9b8b0>
for taget[name[word]] in starred[name[words]] begin[:]
variable[lastsyll] assign[=] call[name[word].children][<ast.UnaryOp object at 0x7da207f9a590>]
if compare[name[lastsyll] in name[sylls]] begin[:]
return[name[self].weight]
variable[is_end] assign[=] <ast.BoolOp object at 0x7da207f9ad70>
constant[
ABANDONED TEMPORARILY AS NOT POSSIBLE GIVEN THAT PARSES ARE BOUNDED AS PARSING GOES ON
]
if compare[name[self].name equal[==] constant[attridge-ss-not-by-ww]] begin[:]
if compare[name[pos_i] equal[==] constant[0]] begin[:]
return[constant[0]]
variable[prevpos] assign[=] call[name[all_positions]][binary_operation[name[pos_i] - constant[1]]]
variable[prevprevpos] assign[=] <ast.IfExp object at 0x7da207f98a90>
if compare[name[prevpos].meterVal2 equal[==] constant[ss]] begin[:]
if <ast.BoolOp object at 0x7da207f995a0> begin[:]
name[prevprevpos]._flag_already_served_as_ww assign[=] constant[True]
pass
if name[is_end] begin[:]
variable[final_meter_str] assign[=] call[constant[].join, parameter[<ast.ListComp object at 0x7da207f9b6d0>]]
if call[name[self].name.startswith, parameter[constant[headedness]]] begin[:]
variable[shouldbe] assign[=] call[call[name[self].name.split, parameter[constant[!=]]]][<ast.UnaryOp object at 0x7da18f09cbe0>]
constant[
Approach 1: This approach doesn't really work on individual lines:
# binary or ternary?
weak_pos = [pos for pos in all_positions if pos.meterVal=='w']
if len(weak_pos)<2: return 0
weak_pos_types = [''.join('w' for slot in pos.slots) for pos in weak_pos]
if weak_pos_types.count('ww')>weak_pos_types.count('w'): # ternary
if final_meter_str[3]=='w': # anapestic
headedness = 'rising'
else: # dactylic
headedness = 'falling'
else: # binary
if final_meter_str[3]=='w':
headedness = 'falling' # trochaic
else:
headedness = 'rising'
if shouldbe != headedness:
return self.weight
]
constant[
Approach 2: count 'ws' vs 'sw' pairs and give categorical violation
]
variable[quasi_feet] assign[=] <ast.ListComp object at 0x7da18f09c9d0>
variable[headedness] assign[=] <ast.IfExp object at 0x7da18f09e740>
if compare[name[shouldbe] not_equal[!=] name[headedness]] begin[:]
return[name[self].weight]
constant[
Approach 3: count 'ws' vs 'sw' pairs and give violation/num-pos per off foot
quasi_feet=[''.join(x) for x in tools.slice([pos.meterVal for pos in all_positions],slice_length=2,runts=True)]
if shouldbe == 'rising':
num_not_rising = float(len([ft for ft in quasi_feet if ft!='ws']))
return num_not_rising / float(len(all_positions)) * float(self.weight)
elif shouldbe == 'falling':
num_not_falling = float(len([ft for ft in quasi_feet if ft!='sw']))
return num_not_falling / float(len(all_positions)) * float(self.weight)
]
if call[name[self].name.startswith, parameter[constant[number_feet]]] begin[:]
variable[shouldbe] assign[=] call[name[int], parameter[call[call[name[self].name.split, parameter[constant[!=]]]][<ast.UnaryOp object at 0x7da18f09c3a0>]]]
variable[strong_pos] assign[=] <ast.ListComp object at 0x7da18f09f220>
variable[num_feet] assign[=] call[name[len], parameter[name[strong_pos]]]
if compare[name[shouldbe] not_equal[!=] name[num_feet]] begin[:]
return[name[self].weight]
if call[name[self].name.startswith, parameter[constant[posthoc]]] begin[:]
if compare[name[self].name equal[==] constant[posthoc-no-final-ww]] begin[:]
if <ast.BoolOp object at 0x7da18f09dbd0> begin[:]
return[name[self].weight]
if compare[name[self].name equal[==] constant[posthoc-no-final-w]] begin[:]
if compare[call[name[all_positions]][<ast.UnaryOp object at 0x7da18f09fa90>].meterVal equal[==] constant[w]] begin[:]
return[name[self].weight]
if compare[name[self].name equal[==] constant[posthoc-standardize-weakpos]] begin[:]
variable[weak_pos] assign[=] <ast.ListComp object at 0x7da18f09f7f0>
if compare[call[name[len], parameter[name[weak_pos]]] less[<] constant[2]] begin[:]
return[constant[0]]
variable[weak_pos_types] assign[=] <ast.ListComp object at 0x7da18f09ecb0>
variable[maxcount] assign[=] call[name[max], parameter[<ast.ListComp object at 0x7da18f09ff40>]]
variable[diff] assign[=] binary_operation[call[name[len], parameter[name[weak_pos]]] - name[maxcount]]
return[binary_operation[name[self].weight * name[diff]]]
return[constant[0]] | keyword[def] identifier[__hardparse] ( identifier[self] , identifier[meterPos] , identifier[pos_i] = keyword[None] , identifier[slot_i] = keyword[None] , identifier[num_slots] = keyword[None] , identifier[all_positions] = keyword[None] , identifier[parse] = keyword[None] ):
keyword[import] identifier[prosodic] keyword[as] identifier[p]
keyword[if] literal[string] keyword[in] identifier[self] . identifier[name] :
keyword[if] literal[string] keyword[in] identifier[parse] . identifier[constraintNames] keyword[and] identifier[pos_i] == literal[int] :
keyword[return] literal[int]
keyword[elif] literal[string] keyword[in] identifier[parse] . identifier[constraintNames] keyword[and] identifier[pos_i] keyword[in] [ literal[int] , literal[int] ]:
keyword[return] literal[int]
identifier[promSite] = identifier[self] . identifier[name] . identifier[split] ( literal[string] )[ literal[int] ]
identifier[promType] = identifier[self] . identifier[name] . identifier[split] ( literal[string] )[ literal[int] ]
identifier[promSite_meter] = identifier[promSite] . identifier[split] ( literal[string] )[ literal[int] ]. identifier[strip] ()
identifier[promSite_prom] = identifier[promSite] . identifier[split] ( literal[string] )[ literal[int] ]. identifier[strip] ()
keyword[if] identifier[meterPos] . identifier[meterVal] != identifier[promSite_meter] :
keyword[return] literal[int]
keyword[if] identifier[promSite_prom] [ literal[int] : literal[int] ]== literal[string] :
identifier[promSite_isneg] = keyword[True]
identifier[promSite_prom] = identifier[promSite_prom] [ literal[int] :]
keyword[else] :
identifier[promSite_isneg] = keyword[False]
literal[string]
identifier[promSite_prom] =( identifier[promSite_prom] == literal[string] )
keyword[if] identifier[promSite_isneg] :
identifier[numtrue] = literal[int]
keyword[for] identifier[slot] keyword[in] identifier[meterPos] . identifier[slots] :
identifier[slot_prom] = identifier[slot] . identifier[feature] ( literal[string] + identifier[promType] , keyword[True] )
keyword[if] identifier[slot_prom] == keyword[None] : keyword[continue]
identifier[pstress_thresh] = identifier[self] . identifier[meter] . identifier[config] . identifier[get] ( literal[string] , identifier[PSTRESS_THRESH_DEFAULT] )
keyword[try] :
identifier[pstress_thresh] = identifier[float] ( identifier[pstress_thresh] )
keyword[except] identifier[ValueError] :
identifier[pstress_thresh] = identifier[PSTRESS_THRESH_DEFAULT]
identifier[bool_prom_type] = identifier[bool] ( identifier[slot_prom] ) keyword[if] identifier[promType] != literal[string] keyword[else] identifier[slot_prom] <= identifier[pstress_thresh]
keyword[if] identifier[bool_prom_type] == identifier[promSite_prom] :
keyword[return] identifier[self] . identifier[weight]
keyword[return] identifier[self] . identifier[weight] * identifier[numtrue]
keyword[else] :
identifier[violated] = keyword[True]
identifier[ran] = keyword[False]
keyword[for] identifier[slot] keyword[in] identifier[meterPos] . identifier[slots] :
identifier[slot_prom] = identifier[slot] . identifier[feature] ( literal[string] + identifier[promType] , keyword[True] )
keyword[if] identifier[slot_prom] == keyword[None] :
keyword[continue]
identifier[ran] = keyword[True]
keyword[if] identifier[bool] ( identifier[slot_prom] )== identifier[promSite_prom] :
identifier[violated] = keyword[False]
keyword[if] identifier[ran] keyword[and] identifier[violated] :
keyword[return] identifier[self] . identifier[weight]
keyword[else] :
keyword[return] literal[int]
keyword[elif] identifier[self] . identifier[name] . identifier[lower] (). identifier[startswith] ( literal[string] ):
keyword[if] identifier[pos_i] == literal[int] :
keyword[if] identifier[meterPos] . identifier[meterVal] == literal[string] :
keyword[return] identifier[self] . identifier[weight]
keyword[return] literal[int]
keyword[elif] identifier[self] . identifier[name] . identifier[lower] (). identifier[startswith] ( literal[string] ):
keyword[if] identifier[p] . identifier[config] . identifier[get] ( literal[string] , literal[int] ):
keyword[if] identifier[meterPos] . identifier[slots] [ literal[int] ]. identifier[i] < literal[int] :
keyword[return] literal[int]
keyword[if] identifier[meterPos] . identifier[meterVal] != literal[string] :
keyword[return] literal[int]
identifier[vio] = literal[int]
keyword[for] identifier[slot] keyword[in] identifier[meterPos] . identifier[slots] :
keyword[if] identifier[slot] . identifier[word] . identifier[feature] ( literal[string] ):
identifier[vio] += identifier[self] . identifier[weight]
keyword[return] identifier[vio]
keyword[elif] identifier[self] . identifier[name] . identifier[lower] (). identifier[startswith] ( literal[string] ):
keyword[if] identifier[len] ( identifier[meterPos] . identifier[slots] )< literal[int] :
keyword[return] literal[int]
keyword[elif] identifier[len] ( identifier[meterPos] . identifier[slots] )> literal[int] :
keyword[return] identifier[self] . identifier[weight]
identifier[name] = identifier[self] . identifier[name] . identifier[lower] ()
identifier[a] = identifier[meterPos] . identifier[slots] [ literal[int] ]
identifier[b] = identifier[meterPos] . identifier[slots] [ literal[int] ]
keyword[if] identifier[name] == literal[string] :
keyword[if] ( identifier[bool] ( identifier[a] . identifier[feature] ( literal[string] , keyword[True] ))):
keyword[return] identifier[self] . identifier[weight]
keyword[if] identifier[name] == literal[string] :
keyword[if] identifier[a] . identifier[word] != identifier[b] . identifier[word] : keyword[return] literal[int]
identifier[firstsyll_islight] = identifier[bool] ( identifier[a] . identifier[feature] ( literal[string] , keyword[True] ))== keyword[False]
identifier[firstsyll_isstressed] = identifier[bool] ( identifier[a] . identifier[feature] ( literal[string] , keyword[True] ))== keyword[True]
keyword[if] keyword[not] ( identifier[firstsyll_islight] keyword[and] identifier[firstsyll_isstressed] ):
keyword[return] identifier[self] . identifier[weight]
keyword[if] identifier[name] == literal[string] :
keyword[if] identifier[a] . identifier[word] == identifier[b] . identifier[word] : keyword[return] literal[int]
keyword[if] identifier[meterPos] . identifier[meterVal] == literal[string] : keyword[return] identifier[self] . identifier[weight]
identifier[a_is_fw] = identifier[bool] ( identifier[a] . identifier[word] . identifier[feature] ( literal[string] ))
identifier[b_is_fw] = identifier[bool] ( identifier[b] . identifier[word] . identifier[feature] ( literal[string] ))
keyword[if] keyword[not] ( identifier[a_is_fw] keyword[and] identifier[b_is_fw] ): keyword[return] identifier[self] . identifier[weight]
keyword[if] identifier[name] == literal[string] :
keyword[if] identifier[meterPos] . identifier[meterVal] == literal[string] :
keyword[if] identifier[bool] ( identifier[a] . identifier[feature] ( literal[string] , keyword[True] )) keyword[or] identifier[a] . identifier[word] != identifier[b] . identifier[word] :
keyword[return] identifier[self] . identifier[weight]
keyword[if] literal[string] keyword[in] identifier[name] :
keyword[if] (( identifier[bool] ( identifier[b] . identifier[feature] ( literal[string] , keyword[True] )))):
keyword[return] identifier[self] . identifier[weight]
keyword[if] literal[string] keyword[in] identifier[name] :
keyword[if] identifier[bool] ( identifier[b] . identifier[feature] ( literal[string] , keyword[True] )):
keyword[return] identifier[self] . identifier[weight]
keyword[if] identifier[bool] ( identifier[a] . identifier[feature] ( literal[string] , keyword[True] )):
keyword[if] keyword[not] identifier[bool] ( identifier[a] . identifier[feature] ( literal[string] , keyword[True] )):
keyword[if] identifier[a] . identifier[word] == identifier[b] . identifier[word] keyword[and] keyword[not] identifier[a] . identifier[wordpos] [ literal[int] ]== identifier[a] . identifier[wordpos] [ literal[int] ]:
keyword[if] keyword[not] identifier[bool] ( identifier[b] . identifier[feature] ( literal[string] , keyword[True] )):
keyword[return] literal[int]
keyword[return] identifier[self] . identifier[weight]
keyword[if] identifier[name] == literal[string] :
keyword[return] identifier[self] . identifier[weight]
keyword[if] identifier[name] == literal[string] :
keyword[if] identifier[pos_i] != literal[int] keyword[and] identifier[pos_i] != literal[int] :
keyword[return] identifier[self] . identifier[weight]
keyword[if] identifier[name] == literal[string] :
keyword[if] identifier[pos_i] != literal[int] :
keyword[return] identifier[self] . identifier[weight]
keyword[if] identifier[name] == literal[string] : keyword[return] identifier[self] . identifier[weight] * identifier[int] ( identifier[meterPos] . identifier[meterVal] == literal[string] )
keyword[if] identifier[name] == literal[string] : keyword[return] identifier[self] . identifier[weight] * identifier[int] ( identifier[meterPos] . identifier[meterVal] == literal[string] )
keyword[if] identifier[name] == literal[string] :
keyword[if] identifier[meterPos] . identifier[meterVal] != literal[string] : keyword[return] literal[int]
keyword[if] identifier[pos_i] == literal[int] : keyword[return] identifier[self] . identifier[weight]
identifier[prevpos] = identifier[all_positions] [ identifier[pos_i] - literal[int] ]
keyword[if] identifier[len] ( identifier[prevpos] . identifier[slots] )> literal[int] keyword[and] identifier[prevpos] . identifier[meterVal] == literal[string] :
keyword[return] literal[int]
keyword[return] identifier[self] . identifier[weight]
keyword[if] literal[string] keyword[in] identifier[name] :
keyword[if] identifier[name] == literal[string] :
keyword[if] identifier[a] . identifier[word] != identifier[b] . identifier[word] :
keyword[return] identifier[self] . identifier[weight]
keyword[if] literal[string] keyword[in] identifier[name] :
keyword[if] ( identifier[a] . identifier[word] . identifier[numSyll] == literal[int] keyword[or] identifier[b] . identifier[word] . identifier[numSyll] == literal[int] ):
keyword[return] identifier[self] . identifier[weight]
keyword[if] literal[string] keyword[in] identifier[name] :
keyword[if] identifier[a] . identifier[word] . identifier[isLexMono] () keyword[or] identifier[b] . identifier[word] . identifier[isLexMono] ():
keyword[return] identifier[self] . identifier[weight]
keyword[if] identifier[a] . identifier[word] . identifier[feature] ( literal[string] ) keyword[and] identifier[b] . identifier[word] . identifier[feature] ( literal[string] ):
keyword[return] literal[int]
keyword[if] identifier[a] . identifier[word] != identifier[b] . identifier[word] :
keyword[if] literal[string] keyword[in] identifier[name] :
keyword[if] keyword[not] ( identifier[a] . identifier[word] . identifier[feature] ( literal[string] ) keyword[and] identifier[b] . identifier[word] . identifier[feature] ( literal[string] )):
keyword[return] identifier[self] . identifier[weight]
keyword[elif] literal[string] :
keyword[if] keyword[not] ( identifier[a] . identifier[word] . identifier[feature] ( literal[string] ) keyword[or] identifier[b] . identifier[word] . identifier[feature] ( literal[string] )):
keyword[return] identifier[self] . identifier[weight]
keyword[elif] literal[string] :
keyword[if] keyword[not] ( identifier[a] . identifier[word] . identifier[feature] ( literal[string] )):
keyword[return] identifier[self] . identifier[weight]
keyword[elif] literal[string] :
keyword[if] keyword[not] ( identifier[b] . identifier[word] . identifier[feature] ( literal[string] )):
keyword[return] identifier[self] . identifier[weight]
keyword[if] identifier[a] . identifier[wordpos] [ literal[int] ]== identifier[a] . identifier[wordpos] [ literal[int] ]:
keyword[if] literal[string] keyword[in] identifier[name] :
keyword[if] keyword[not] ( identifier[a] . identifier[word] . identifier[feature] ( literal[string] ) keyword[and] identifier[b] . identifier[word] . identifier[feature] ( literal[string] )):
keyword[return] identifier[self] . identifier[weight]
keyword[elif] literal[string] :
keyword[if] keyword[not] ( identifier[a] . identifier[word] . identifier[feature] ( literal[string] ) keyword[or] identifier[b] . identifier[word] . identifier[feature] ( literal[string] )):
keyword[return] identifier[self] . identifier[weight]
keyword[elif] literal[string] :
keyword[if] keyword[not] ( identifier[a] . identifier[word] . identifier[feature] ( literal[string] )):
keyword[return] identifier[self] . identifier[weight]
keyword[elif] literal[string] :
keyword[if] keyword[not] ( identifier[b] . identifier[word] . identifier[feature] ( literal[string] )):
keyword[return] identifier[self] . identifier[weight]
keyword[return] literal[int]
keyword[if] identifier[self] . identifier[name] == literal[string] :
identifier[words] = identifier[set] ([ identifier[slot] . identifier[word] keyword[for] identifier[slot] keyword[in] identifier[meterPos] . identifier[slots] keyword[if] identifier[hasattr] ( identifier[slot] . identifier[word] , literal[string] ) keyword[and] identifier[slot] . identifier[word] . identifier[is_elision] ])
identifier[sylls] =[]
keyword[for] identifier[slot] keyword[in] identifier[meterPos] . identifier[slots] : identifier[sylls] += identifier[slot] . identifier[children]
keyword[for] identifier[word] keyword[in] identifier[words] :
identifier[lastsyll] = identifier[word] . identifier[children] [- literal[int] ]
keyword[if] identifier[lastsyll] keyword[in] identifier[sylls] :
keyword[return] identifier[self] . identifier[weight]
identifier[is_end] = identifier[slot_i] + literal[int] == identifier[num_slots] keyword[and] identifier[meterPos] . identifier[slots] == identifier[all_positions] [- literal[int] ]. identifier[slots]
literal[string]
keyword[if] identifier[self] . identifier[name] == literal[string] :
keyword[if] identifier[pos_i] == literal[int] : keyword[return] literal[int]
identifier[prevpos] = identifier[all_positions] [ identifier[pos_i] - literal[int] ]
identifier[prevprevpos] = identifier[all_positions] [ identifier[pos_i] - literal[int] ] keyword[if] ( identifier[pos_i] - literal[int] )>= literal[int] keyword[else] keyword[None]
keyword[if] identifier[prevpos] . identifier[meterVal2] == literal[string] :
keyword[if] ( identifier[prevprevpos] keyword[and] identifier[prevprevpos] . identifier[meterVal2] == literal[string] ) keyword[and] ( keyword[not] identifier[hasattr] ( identifier[prevprevpos] , literal[string] )):
identifier[prevprevpos] . identifier[_flag_already_served_as_ww] = keyword[True]
keyword[pass]
keyword[elif] identifier[meterPos] . identifier[meterVal2] == literal[string] keyword[and] ( keyword[not] identifier[hasattr] ( identifier[meterPos] , literal[string] )):
identifier[meterPos] . identifier[_flag_already_served_as_ww] = keyword[True]
keyword[pass]
keyword[else] :
keyword[for] identifier[cnstr] keyword[in] identifier[prevpos] . identifier[constraintScores] :
keyword[if] identifier[cnstr] . identifier[name] == identifier[self] . identifier[name] :
identifier[prevpos] . identifier[constraintScores] [ identifier[cnstr] ]= identifier[self] . identifier[weight]
identifier[parse] . identifier[constraintScores] [ identifier[cnstr] ]+= identifier[self] . identifier[weight]
keyword[elif] identifier[is_end] keyword[and] identifier[meterPos] . identifier[meterVal2] == literal[string] :
keyword[if] identifier[prevpos] . identifier[meterVal2] == literal[string] :
keyword[pass]
keyword[else] :
keyword[return] identifier[self] . identifier[weight]
keyword[if] identifier[is_end] :
identifier[final_meter_str] = literal[string] . identifier[join] ([ literal[string] . identifier[join] ( identifier[pos] . identifier[meterVal] keyword[for] identifier[slot] keyword[in] identifier[pos] . identifier[slots] ) keyword[for] identifier[pos] keyword[in] identifier[all_positions] ])
keyword[if] identifier[self] . identifier[name] . identifier[startswith] ( literal[string] ):
identifier[shouldbe] = identifier[self] . identifier[name] . identifier[split] ( literal[string] )[- literal[int] ]
literal[string]
literal[string]
identifier[quasi_feet] =[ literal[string] . identifier[join] ( identifier[x] ) keyword[for] identifier[x] keyword[in] identifier[tools] . identifier[slice] ([ identifier[pos] . identifier[meterVal] keyword[for] identifier[pos] keyword[in] identifier[all_positions] ], identifier[slice_length] = literal[int] , identifier[runts] = keyword[False] )]
identifier[headedness] = literal[string] keyword[if] identifier[quasi_feet] . identifier[count] ( literal[string] )>= identifier[quasi_feet] . identifier[count] ( literal[string] ) keyword[else] literal[string]
keyword[if] identifier[shouldbe] != identifier[headedness] :
keyword[return] identifier[self] . identifier[weight]
literal[string]
keyword[if] identifier[self] . identifier[name] . identifier[startswith] ( literal[string] ):
identifier[shouldbe] = identifier[int] ( identifier[self] . identifier[name] . identifier[split] ( literal[string] )[- literal[int] ])
identifier[strong_pos] =[ identifier[pos] keyword[for] identifier[pos] keyword[in] identifier[all_positions] keyword[if] identifier[pos] . identifier[meterVal] == literal[string] ]
identifier[num_feet] = identifier[len] ( identifier[strong_pos] )
keyword[if] identifier[shouldbe] != identifier[num_feet] :
keyword[return] identifier[self] . identifier[weight]
keyword[if] identifier[self] . identifier[name] . identifier[startswith] ( literal[string] ):
keyword[if] identifier[self] . identifier[name] == literal[string] :
keyword[if] identifier[len] ( identifier[all_positions] [- literal[int] ]. identifier[slots] )> literal[int] keyword[and] identifier[all_positions] [- literal[int] ]. identifier[meterVal] == literal[string] :
keyword[return] identifier[self] . identifier[weight]
keyword[if] identifier[self] . identifier[name] == literal[string] :
keyword[if] identifier[all_positions] [- literal[int] ]. identifier[meterVal] == literal[string] :
keyword[return] identifier[self] . identifier[weight]
keyword[if] identifier[self] . identifier[name] == literal[string] :
identifier[weak_pos] =[ identifier[pos] keyword[for] identifier[pos] keyword[in] identifier[all_positions] keyword[if] identifier[pos] . identifier[meterVal] == literal[string] ]
keyword[if] identifier[len] ( identifier[weak_pos] )< literal[int] : keyword[return] literal[int]
identifier[weak_pos_types] =[ literal[string] . identifier[join] ( literal[string] keyword[for] identifier[slot] keyword[in] identifier[pos] . identifier[slots] ) keyword[for] identifier[pos] keyword[in] identifier[weak_pos] ]
identifier[maxcount] = identifier[max] ([ identifier[weak_pos_types] . identifier[count] ( identifier[wtype] ) keyword[for] identifier[wtype] keyword[in] identifier[set] ( identifier[weak_pos_types] )])
identifier[diff] = identifier[len] ( identifier[weak_pos] )- identifier[maxcount]
keyword[return] identifier[self] . identifier[weight] * identifier[diff]
keyword[return] literal[int] | def __hardparse(self, meterPos, pos_i=None, slot_i=None, num_slots=None, all_positions=None, parse=None):
import prosodic as p #if meterPos.slots[0].i<2:
# print meterPos.slots[0].word
#print meterPos,pos_i,slot_i,num_slots,all_positions
#prevpos=all_positions[pos_i-1]
#print pos_i, meterPos, prevpos, pos_i,pos_i-1,all_positions, len(meterPos.slots)
if '.' in self.name: # kiparsky self.names
## load variables
#exception for first foot
#if 'skip_initial_foot' in parse.constraintNames:
# if meterPos.slots[0].i<2:
# return 0
if 'extrametrical-first-pos' in parse.constraintNames and pos_i == 0:
return 0 # depends on [control=['if'], data=[]]
elif 'skip_initial_foot' in parse.constraintNames and pos_i in [0, 1]:
return 0 # depends on [control=['if'], data=[]]
promSite = self.name.split('.')[1]
promType = self.name.split('.')[0]
promSite_meter = promSite.split('=>')[0].strip() # s/w
promSite_prom = promSite.split('=>')[1].strip() # +- u/p
if meterPos.meterVal != promSite_meter: # then this constraint does not apply
return 0 # depends on [control=['if'], data=[]]
if promSite_prom[0:1] == '-': # -u or -p: eg, if s=>-u, then NOT EVEN ONE s can be u(nprom)
promSite_isneg = True
promSite_prom = promSite_prom[1:] # u or p # depends on [control=['if'], data=[]]
else:
promSite_isneg = False # u or p: eg, if s=>p, then AT LEAST ONE s must be p(rom)
'\n\t\t\tRemoved 4/12/2017: apparently there was an option to restrict just \'P\'rimary stresses\n\t\t\tBut required using an uppercase P in the meter config. This was nowhere stated elsewhere\n\t\t\tand has never been used. I\'m disabling it. Let\'s just use a separate prominence type\n\t\t\tif we want to restrict only primary stresses.\n\n\t\t\tif promSite_prom.lower()==promSite_prom:\n\t\t\t\tpromSite_prom = (promSite_prom == \'p\')\t\t\t\t# string 2 boolean: p:True, u:False\n\t\t\telse:\n\t\t\t\tif promSite_prom=="P":\n\t\t\t\t\tpromSite_prom=1.0\n\t\t\t\t#elif promSite_prom=="U":\n\t\t\t\telse:\n\t\t\t\t\tpromSite_prom=0.0\n\t\t\t'
promSite_prom = promSite_prom == 'p' # string 2 boolean: p:True, u:False
# NOT EVEN ONE unit_prom can be promSite_prom:
if promSite_isneg:
numtrue = 0
for slot in meterPos.slots:
slot_prom = slot.feature('prom.' + promType, True)
if slot_prom == None:
continue # depends on [control=['if'], data=[]] #if type(promSite_prom)==type(True):
# slot_prom=bool(slot_prom)
pstress_thresh = self.meter.config.get('phrasal_stress_threshold', PSTRESS_THRESH_DEFAULT)
try:
pstress_thresh = float(pstress_thresh) # depends on [control=['try'], data=[]]
except ValueError:
pstress_thresh = PSTRESS_THRESH_DEFAULT # depends on [control=['except'], data=[]]
bool_prom_type = bool(slot_prom) if promType != 'phrasal_stress' else slot_prom <= pstress_thresh
if bool_prom_type == promSite_prom: #numtrue+=float(slot_prom)
return self.weight # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['slot']] #return 2 if numtrue else 0
#print self.weight, numtrue
## CHANGED 10/10/2016: This constraint returns its weight
## *times* the number of slots/syllables that violated it.
## CHANGED 4/12/2017: numtrue is actually float of the prominence
## so for phrasal stress is its p-stress value, for seconday stress is 0.5, etc.
return self.weight * numtrue # depends on [control=['if'], data=[]]
else: #return 0
# AT LEAST ONE unit_prom must be promSite_prom (or else, violate):
violated = True
ran = False
for slot in meterPos.slots:
slot_prom = slot.feature('prom.' + promType, True)
if slot_prom == None:
continue # depends on [control=['if'], data=[]]
ran = True
if bool(slot_prom) == promSite_prom:
violated = False # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['slot']]
if ran and violated:
return self.weight # depends on [control=['if'], data=[]]
else:
return 0 # depends on [control=['if'], data=[]]
elif self.name.lower().startswith('initialstrong'): #if meterPos.slots[0].i==0:
if pos_i == 0:
if meterPos.meterVal == 's':
return self.weight # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
return 0 # depends on [control=['if'], data=[]]
elif self.name.lower().startswith('functiontow'): #exception for first foot
if p.config.get('skip_initial_foot', 0):
if meterPos.slots[0].i < 2:
return 0 # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
if meterPos.meterVal != 's': # then this constraint does not apply
return 0 # depends on [control=['if'], data=[]]
vio = 0
for slot in meterPos.slots:
if slot.word.feature('functionword'):
vio += self.weight # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['slot']]
return vio # depends on [control=['if'], data=[]]
elif self.name.lower().startswith('footmin'):
if len(meterPos.slots) < 2:
return 0 # depends on [control=['if'], data=[]]
elif len(meterPos.slots) > 2:
return self.weight # depends on [control=['if'], data=[]]
name = self.name.lower()
a = meterPos.slots[0]
b = meterPos.slots[1] ## should this apply to ALL foomin constraints?
#if ( bool(a.feature('prom.stress',True)) and bool(b.feature('prom.stress',True))):
# return self.weight
##
if name == 'footmin-nohx':
if bool(a.feature('prom.weight', True)):
return self.weight # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
if name == 'footmin-w-resolution':
if a.word != b.word:
return 0 # only applies within word-boundaries # depends on [control=['if'], data=[]]
firstsyll_islight = bool(a.feature('prom.weight', True)) == False
firstsyll_isstressed = bool(a.feature('prom.stress', True)) == True
if not (firstsyll_islight and firstsyll_isstressed):
return self.weight # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
if name == 'footmin-f-resolution':
if a.word == b.word:
return 0 # only applies to word-boundaries # depends on [control=['if'], data=[]]
if meterPos.meterVal == 's':
return self.weight # cannot apply to strong positions # depends on [control=['if'], data=[]]
a_is_fw = bool(a.word.feature('functionword'))
b_is_fw = bool(b.word.feature('functionword'))
if not (a_is_fw and b_is_fw):
return self.weight # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
if name == 'footmin-s-nohx':
if meterPos.meterVal == 's':
if bool(a.feature('prom.weight', True)) or a.word != b.word:
return self.weight # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
if 'nolh' in name:
if bool(b.feature('prom.weight', True)):
return self.weight # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
if 'strongconstraint' in name:
if bool(b.feature('prom.strength', True)):
return self.weight # depends on [control=['if'], data=[]]
if bool(a.feature('prom.strength', True)):
if not bool(a.feature('prom.weight', True)):
if a.word == b.word and (not a.wordpos[0] == a.wordpos[1]):
if not bool(b.feature('prom.stress', True)):
return 0 # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
return self.weight # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
if name == 'footmin-none':
return self.weight # depends on [control=['if'], data=[]]
if name == 'footmin-none-unless-in-first-two-positions':
if pos_i != 0 and pos_i != 1:
return self.weight # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
if name == 'footmin-none-unless-in-second-position':
if pos_i != 1:
return self.weight # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
if name == 'footmin-no-s':
return self.weight * int(meterPos.meterVal == 's') # depends on [control=['if'], data=[]]
if name == 'footmin-no-w':
return self.weight * int(meterPos.meterVal == 'w') # depends on [control=['if'], data=[]]
if name == 'footmin-no-s-unless-preceded-by-ww': # @TODO: bug when number of syllables in maxW is > 2 ?
if meterPos.meterVal != 's':
return 0 # depends on [control=['if'], data=[]]
if pos_i == 0:
return self.weight # depends on [control=['if'], data=[]]
prevpos = all_positions[pos_i - 1] #print pos_i, meterPos, prevpos, pos_i,pos_i-1,all_positions
if len(prevpos.slots) > 1 and prevpos.meterVal == 'w':
return 0 # depends on [control=['if'], data=[]]
return self.weight # depends on [control=['if'], data=[]]
if 'wordbound' in name:
if name == 'footmin-wordbound':
if a.word != b.word:
return self.weight # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
if 'nomono' in name:
if a.word.numSyll == 1 or b.word.numSyll == 1:
return self.weight # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
if 'lexmono' in name: #if a.word.numSyll==1 and a.word.stress=="P"
if a.word.isLexMono() or b.word.isLexMono():
return self.weight # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] ## everyone is happy if both are function words
if a.word.feature('functionword') and b.word.feature('functionword'):
return 0 # depends on [control=['if'], data=[]]
if a.word != b.word:
if 'bothnotfw' in name:
if not (a.word.feature('functionword') and b.word.feature('functionword')):
return self.weight # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif 'neitherfw':
if not (a.word.feature('functionword') or b.word.feature('functionword')):
return self.weight # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif 'leftfw':
if not a.word.feature('functionword'):
return self.weight # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif 'rightfw':
if not b.word.feature('functionword'):
return self.weight # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # only remaining possibilities:
# i) slots a,b are from the same word
# ii) slots a,b are from contiguous words which are the same (haPPY HAppy)
if a.wordpos[0] == a.wordpos[1]: # in the firs slot's (start,end) wordpos : if (start==end) : then poss. (ii) above
if 'bothnotfw' in name:
if not (a.word.feature('functionword') and b.word.feature('functionword')):
return self.weight # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif 'neitherfw':
if not (a.word.feature('functionword') or b.word.feature('functionword')):
return self.weight # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif 'leftfw':
if not a.word.feature('functionword'):
return self.weight # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif 'rightfw':
if not b.word.feature('functionword'):
return self.weight # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # poss. (i) remains
return 0 # depends on [control=['if'], data=['name']] # depends on [control=['if'], data=[]] ## Constraints about words
if self.name == 'word-elision':
words = set([slot.word for slot in meterPos.slots if hasattr(slot.word, 'is_elision') and slot.word.is_elision])
sylls = []
for slot in meterPos.slots:
sylls += slot.children # depends on [control=['for'], data=['slot']]
for word in words:
lastsyll = word.children[-1]
if lastsyll in sylls: # only break if this position contains the word's final syllable
return self.weight # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['word']] # depends on [control=['if'], data=[]] # is this the end?
is_end = slot_i + 1 == num_slots and meterPos.slots == all_positions[-1].slots ## CONSTRAINTS ON PREVIOUS POSITIONS
'\n\t\tABANDONED TEMPORARILY AS NOT POSSIBLE GIVEN THAT PARSES ARE BOUNDED AS PARSING GOES ON\n\t\t'
if self.name == 'attridge-ss-not-by-ww': #if meterPos.meterVal!='s': return 0
#if not is_end and meterPos.meterVal2 == 'ss':
# parse.pauseComparisons=True
if pos_i == 0:
return 0 # depends on [control=['if'], data=[]]
prevpos = all_positions[pos_i - 1]
prevprevpos = all_positions[pos_i - 2] if pos_i - 2 >= 0 else None #print prevprevpos,prevpos,meterPos
#print prevprevpos.meterVal2 if prevprevpos else None,prevpos.meterVal2, meterPos.meterVal2
#print prevprevpos,prevpos,meterPos
#print prevprevpos.meterVal2 if prevprevpos else None,prevpos.meterVal2, meterPos.meterVal2
#print dir(prevprevpos) if prevprevpos else None
#print dir(prevpos) if prevprevpos else None
#print dir(meterPos)
#print
if prevpos.meterVal2 == 'ss': #if (prevprevpos and prevprevpos.meterVal2=='ww')
if (prevprevpos and prevprevpos.meterVal2 == 'ww') and (not hasattr(prevprevpos, '_flag_already_served_as_ww')):
prevprevpos._flag_already_served_as_ww = True
pass # depends on [control=['if'], data=[]]
elif meterPos.meterVal2 == 'ww' and (not hasattr(meterPos, '_flag_already_served_as_ww')):
meterPos._flag_already_served_as_ww = True
pass # depends on [control=['if'], data=[]]
else: #print 'ERROR!'
for cnstr in prevpos.constraintScores:
if cnstr.name == self.name:
prevpos.constraintScores[cnstr] = self.weight
parse.constraintScores[cnstr] += self.weight # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['cnstr']] # depends on [control=['if'], data=[]] #parse.pauseComparisons=False
elif is_end and meterPos.meterVal2 == 'ss': #parse.pauseComparisons=False
if prevpos.meterVal2 == 'ww':
pass # depends on [control=['if'], data=[]]
else: #print 'ERROR!'
return self.weight # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] #print
#"""
## POST HOC CONSTRAINTS
if is_end:
final_meter_str = ''.join([''.join((pos.meterVal for slot in pos.slots)) for pos in all_positions]) #print final_meter_str
# headedness
if self.name.startswith('headedness'):
shouldbe = self.name.split('!=')[-1]
"\n\t\t\t\tApproach 1: This approach doesn't really work on individual lines:\n\n\t\t\t\t# binary or ternary?\n\t\t\t\tweak_pos = [pos for pos in all_positions if pos.meterVal=='w']\n\t\t\t\tif len(weak_pos)<2: return 0\n\t\t\t\tweak_pos_types = [''.join('w' for slot in pos.slots) for pos in weak_pos]\n\n\t\t\t\tif weak_pos_types.count('ww')>weak_pos_types.count('w'): # ternary\n\t\t\t\t\tif final_meter_str[3]=='w': # anapestic\n\t\t\t\t\t\theadedness = 'rising'\n\t\t\t\t\telse: # dactylic\n\t\t\t\t\t\theadedness = 'falling'\n\t\t\t\telse: # binary\n\t\t\t\t\tif final_meter_str[3]=='w':\n\t\t\t\t\t\theadedness = 'falling' # trochaic\n\t\t\t\t\telse:\n\t\t\t\t\t\theadedness = 'rising'\n\n\t\t\t\tif shouldbe != headedness:\n\t\t\t\t\treturn self.weight\n\t\t\t\t"
"\n\t\t\t\tApproach 2: count 'ws' vs 'sw' pairs and give categorical violation\n\t\t\t\t"
quasi_feet = [''.join(x) for x in tools.slice([pos.meterVal for pos in all_positions], slice_length=2, runts=False)]
headedness = 'rising' if quasi_feet.count('ws') >= quasi_feet.count('sw') else 'falling' #print final_meter_str
#print quasi_feet
#print headedness
#print
if shouldbe != headedness:
return self.weight # depends on [control=['if'], data=[]] #"""
"\n\t\t\t\tApproach 3: count 'ws' vs 'sw' pairs and give violation/num-pos per off foot\n\n\t\t\t\tquasi_feet=[''.join(x) for x in tools.slice([pos.meterVal for pos in all_positions],slice_length=2,runts=True)]\n\t\t\t\tif shouldbe == 'rising':\n\t\t\t\t\tnum_not_rising = float(len([ft for ft in quasi_feet if ft!='ws']))\n\t\t\t\t\treturn num_not_rising / float(len(all_positions)) * float(self.weight)\n\t\t\t\telif shouldbe == 'falling':\n\t\t\t\t\tnum_not_falling = float(len([ft for ft in quasi_feet if ft!='sw']))\n\t\t\t\t\treturn num_not_falling / float(len(all_positions)) * float(self.weight)\n\t\t\t\t" # depends on [control=['if'], data=[]] # number of feet
if self.name.startswith('number_feet'):
shouldbe = int(self.name.split('!=')[-1])
strong_pos = [pos for pos in all_positions if pos.meterVal == 's']
num_feet = len(strong_pos) # debatable
if shouldbe != num_feet:
return self.weight # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # other posthoc constraints
if self.name.startswith('posthoc'):
if self.name == 'posthoc-no-final-ww':
if len(all_positions[-1].slots) > 1 and all_positions[-1].meterVal == 'w':
return self.weight # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
if self.name == 'posthoc-no-final-w':
if all_positions[-1].meterVal == 'w':
return self.weight # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
if self.name == 'posthoc-standardize-weakpos':
weak_pos = [pos for pos in all_positions if pos.meterVal == 'w']
if len(weak_pos) < 2:
return 0 # depends on [control=['if'], data=[]]
weak_pos_types = [''.join(('w' for slot in pos.slots)) for pos in weak_pos]
maxcount = max([weak_pos_types.count(wtype) for wtype in set(weak_pos_types)])
diff = len(weak_pos) - maxcount
return self.weight * diff # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # made it through this minefield, eh?
return 0 |
def latex(self, force=False):
"""
Build PDF documentation.
"""
if sys.platform == 'win32':
sys.stderr.write('latex build has not been tested on windows\n')
else:
ret_code = self._sphinx_build('latex')
os.chdir(os.path.join(BUILD_PATH, 'latex'))
if force:
for i in range(3):
self._run_os('pdflatex',
'-interaction=nonstopmode',
'pandas.tex')
raise SystemExit('You should check the file '
'"build/latex/pandas.pdf" for problems.')
else:
self._run_os('make')
return ret_code | def function[latex, parameter[self, force]]:
constant[
Build PDF documentation.
]
if compare[name[sys].platform equal[==] constant[win32]] begin[:]
call[name[sys].stderr.write, parameter[constant[latex build has not been tested on windows
]]] | keyword[def] identifier[latex] ( identifier[self] , identifier[force] = keyword[False] ):
literal[string]
keyword[if] identifier[sys] . identifier[platform] == literal[string] :
identifier[sys] . identifier[stderr] . identifier[write] ( literal[string] )
keyword[else] :
identifier[ret_code] = identifier[self] . identifier[_sphinx_build] ( literal[string] )
identifier[os] . identifier[chdir] ( identifier[os] . identifier[path] . identifier[join] ( identifier[BUILD_PATH] , literal[string] ))
keyword[if] identifier[force] :
keyword[for] identifier[i] keyword[in] identifier[range] ( literal[int] ):
identifier[self] . identifier[_run_os] ( literal[string] ,
literal[string] ,
literal[string] )
keyword[raise] identifier[SystemExit] ( literal[string]
literal[string] )
keyword[else] :
identifier[self] . identifier[_run_os] ( literal[string] )
keyword[return] identifier[ret_code] | def latex(self, force=False):
"""
Build PDF documentation.
"""
if sys.platform == 'win32':
sys.stderr.write('latex build has not been tested on windows\n') # depends on [control=['if'], data=[]]
else:
ret_code = self._sphinx_build('latex')
os.chdir(os.path.join(BUILD_PATH, 'latex'))
if force:
for i in range(3):
self._run_os('pdflatex', '-interaction=nonstopmode', 'pandas.tex') # depends on [control=['for'], data=[]]
raise SystemExit('You should check the file "build/latex/pandas.pdf" for problems.') # depends on [control=['if'], data=[]]
else:
self._run_os('make')
return ret_code |
def iter_successors(self, graph, orig, branch, turn, tick, *, forward=None):
"""Iterate over successors of a given origin node at a given time."""
if self.db._no_kc:
yield from self._adds_dels_sucpred(self.successors[graph, orig], branch, turn, tick)[0]
return
if forward is None:
forward = self.db._forward
yield from self._get_destcache(graph, orig, branch, turn, tick, forward=forward) | def function[iter_successors, parameter[self, graph, orig, branch, turn, tick]]:
constant[Iterate over successors of a given origin node at a given time.]
if name[self].db._no_kc begin[:]
<ast.YieldFrom object at 0x7da1b0ba7010>
return[None]
if compare[name[forward] is constant[None]] begin[:]
variable[forward] assign[=] name[self].db._forward
<ast.YieldFrom object at 0x7da1b0cb6d10> | keyword[def] identifier[iter_successors] ( identifier[self] , identifier[graph] , identifier[orig] , identifier[branch] , identifier[turn] , identifier[tick] ,*, identifier[forward] = keyword[None] ):
literal[string]
keyword[if] identifier[self] . identifier[db] . identifier[_no_kc] :
keyword[yield] keyword[from] identifier[self] . identifier[_adds_dels_sucpred] ( identifier[self] . identifier[successors] [ identifier[graph] , identifier[orig] ], identifier[branch] , identifier[turn] , identifier[tick] )[ literal[int] ]
keyword[return]
keyword[if] identifier[forward] keyword[is] keyword[None] :
identifier[forward] = identifier[self] . identifier[db] . identifier[_forward]
keyword[yield] keyword[from] identifier[self] . identifier[_get_destcache] ( identifier[graph] , identifier[orig] , identifier[branch] , identifier[turn] , identifier[tick] , identifier[forward] = identifier[forward] ) | def iter_successors(self, graph, orig, branch, turn, tick, *, forward=None):
"""Iterate over successors of a given origin node at a given time."""
if self.db._no_kc:
yield from self._adds_dels_sucpred(self.successors[graph, orig], branch, turn, tick)[0]
return # depends on [control=['if'], data=[]]
if forward is None:
forward = self.db._forward # depends on [control=['if'], data=['forward']]
yield from self._get_destcache(graph, orig, branch, turn, tick, forward=forward) |
def load(fileobj):
"""Load the submission from a file-like object
:param fileobj: File-like object
:return: the loaded submission
"""
with gzip.GzipFile(fileobj=fileobj, mode='r') as z:
submission = Submission(metadata=json.loads(z.readline()))
for line in z:
token_id, token = json.loads(line)
submission['tokens'][token_id] = token
return submission | def function[load, parameter[fileobj]]:
constant[Load the submission from a file-like object
:param fileobj: File-like object
:return: the loaded submission
]
with call[name[gzip].GzipFile, parameter[]] begin[:]
variable[submission] assign[=] call[name[Submission], parameter[]]
for taget[name[line]] in starred[name[z]] begin[:]
<ast.Tuple object at 0x7da18dc98d30> assign[=] call[name[json].loads, parameter[name[line]]]
call[call[name[submission]][constant[tokens]]][name[token_id]] assign[=] name[token]
return[name[submission]] | keyword[def] identifier[load] ( identifier[fileobj] ):
literal[string]
keyword[with] identifier[gzip] . identifier[GzipFile] ( identifier[fileobj] = identifier[fileobj] , identifier[mode] = literal[string] ) keyword[as] identifier[z] :
identifier[submission] = identifier[Submission] ( identifier[metadata] = identifier[json] . identifier[loads] ( identifier[z] . identifier[readline] ()))
keyword[for] identifier[line] keyword[in] identifier[z] :
identifier[token_id] , identifier[token] = identifier[json] . identifier[loads] ( identifier[line] )
identifier[submission] [ literal[string] ][ identifier[token_id] ]= identifier[token]
keyword[return] identifier[submission] | def load(fileobj):
"""Load the submission from a file-like object
:param fileobj: File-like object
:return: the loaded submission
"""
with gzip.GzipFile(fileobj=fileobj, mode='r') as z:
submission = Submission(metadata=json.loads(z.readline()))
for line in z:
(token_id, token) = json.loads(line)
submission['tokens'][token_id] = token # depends on [control=['for'], data=['line']] # depends on [control=['with'], data=['z']]
return submission |
def create_supervised_tbptt_trainer(
model,
optimizer,
loss_fn,
tbtt_step,
dim=0,
device=None,
non_blocking=False,
prepare_batch=_prepare_batch
):
"""Create a trainer for truncated backprop through time supervised models.
Training recurrent model on long sequences is computationally intensive as
it requires to process the whole sequence before getting a gradient.
However, when the training loss is computed over many outputs
(`X to many <https://karpathy.github.io/2015/05/21/rnn-effectiveness/>`_),
there is an opportunity to compute a gradient over a subsequence. This is
known as
`truncated backpropagation through time <https://machinelearningmastery.com/
gentle-introduction-backpropagation-time/>`_.
This supervised trainer apply gradient optimization step every `tbtt_step`
time steps of the sequence, while backpropagating through the same
`tbtt_step` time steps.
Args:
model (`torch.nn.Module`): the model to train.
optimizer (`torch.optim.Optimizer`): the optimizer to use.
loss_fn (torch.nn loss function): the loss function to use.
tbtt_step (int): the length of time chunks (last one may be smaller).
dim (int): axis representing the time dimension.
device (str, optional): device type specification (default: None).
Applies to both model and batches.
non_blocking (bool, optional): if True and this copy is between CPU and GPU,
the copy may occur asynchronously with respect to the host. For other cases,
this argument has no effect.
prepare_batch (callable, optional): function that receives `batch`, `device`,
`non_blocking` and outputs tuple of tensors `(batch_x, batch_y)`.
Returns:
Engine: a trainer engine with supervised update function.
"""
if device:
model.to(device)
def _update(engine, batch):
loss_list = []
hidden = None
x, y = batch
for batch_t in zip(x.split(tbtt_step, dim=dim), y.split(tbtt_step, dim=dim)):
x_t, y_t = prepare_batch(batch_t, device=device, non_blocking=non_blocking)
# Fire event for start of iteration
engine.fire_event(Tbptt_Events.TIME_ITERATION_STARTED)
# Forward, backward and
model.train()
optimizer.zero_grad()
if hidden is None:
y_pred_t, hidden = model(x_t)
else:
hidden = _detach_hidden(hidden)
y_pred_t, hidden = model(x_t, hidden)
loss_t = loss_fn(y_pred_t, y_t)
loss_t.backward()
optimizer.step()
# Setting state of engine for consistent behaviour
engine.state.output = loss_t.item()
loss_list.append(loss_t.item())
# Fire event for end of iteration
engine.fire_event(Tbptt_Events.TIME_ITERATION_COMPLETED)
# return average loss over the time splits
return sum(loss_list) / len(loss_list)
engine = Engine(_update)
engine.register_events(*Tbptt_Events)
return engine | def function[create_supervised_tbptt_trainer, parameter[model, optimizer, loss_fn, tbtt_step, dim, device, non_blocking, prepare_batch]]:
constant[Create a trainer for truncated backprop through time supervised models.
Training recurrent model on long sequences is computationally intensive as
it requires to process the whole sequence before getting a gradient.
However, when the training loss is computed over many outputs
(`X to many <https://karpathy.github.io/2015/05/21/rnn-effectiveness/>`_),
there is an opportunity to compute a gradient over a subsequence. This is
known as
`truncated backpropagation through time <https://machinelearningmastery.com/
gentle-introduction-backpropagation-time/>`_.
This supervised trainer apply gradient optimization step every `tbtt_step`
time steps of the sequence, while backpropagating through the same
`tbtt_step` time steps.
Args:
model (`torch.nn.Module`): the model to train.
optimizer (`torch.optim.Optimizer`): the optimizer to use.
loss_fn (torch.nn loss function): the loss function to use.
tbtt_step (int): the length of time chunks (last one may be smaller).
dim (int): axis representing the time dimension.
device (str, optional): device type specification (default: None).
Applies to both model and batches.
non_blocking (bool, optional): if True and this copy is between CPU and GPU,
the copy may occur asynchronously with respect to the host. For other cases,
this argument has no effect.
prepare_batch (callable, optional): function that receives `batch`, `device`,
`non_blocking` and outputs tuple of tensors `(batch_x, batch_y)`.
Returns:
Engine: a trainer engine with supervised update function.
]
if name[device] begin[:]
call[name[model].to, parameter[name[device]]]
def function[_update, parameter[engine, batch]]:
variable[loss_list] assign[=] list[[]]
variable[hidden] assign[=] constant[None]
<ast.Tuple object at 0x7da20c6a8ac0> assign[=] name[batch]
for taget[name[batch_t]] in starred[call[name[zip], parameter[call[name[x].split, parameter[name[tbtt_step]]], call[name[y].split, parameter[name[tbtt_step]]]]]] begin[:]
<ast.Tuple object at 0x7da20c6aaa70> assign[=] call[name[prepare_batch], parameter[name[batch_t]]]
call[name[engine].fire_event, parameter[name[Tbptt_Events].TIME_ITERATION_STARTED]]
call[name[model].train, parameter[]]
call[name[optimizer].zero_grad, parameter[]]
if compare[name[hidden] is constant[None]] begin[:]
<ast.Tuple object at 0x7da20c6ab8e0> assign[=] call[name[model], parameter[name[x_t]]]
variable[loss_t] assign[=] call[name[loss_fn], parameter[name[y_pred_t], name[y_t]]]
call[name[loss_t].backward, parameter[]]
call[name[optimizer].step, parameter[]]
name[engine].state.output assign[=] call[name[loss_t].item, parameter[]]
call[name[loss_list].append, parameter[call[name[loss_t].item, parameter[]]]]
call[name[engine].fire_event, parameter[name[Tbptt_Events].TIME_ITERATION_COMPLETED]]
return[binary_operation[call[name[sum], parameter[name[loss_list]]] / call[name[len], parameter[name[loss_list]]]]]
variable[engine] assign[=] call[name[Engine], parameter[name[_update]]]
call[name[engine].register_events, parameter[<ast.Starred object at 0x7da20c6a8e80>]]
return[name[engine]] | keyword[def] identifier[create_supervised_tbptt_trainer] (
identifier[model] ,
identifier[optimizer] ,
identifier[loss_fn] ,
identifier[tbtt_step] ,
identifier[dim] = literal[int] ,
identifier[device] = keyword[None] ,
identifier[non_blocking] = keyword[False] ,
identifier[prepare_batch] = identifier[_prepare_batch]
):
literal[string]
keyword[if] identifier[device] :
identifier[model] . identifier[to] ( identifier[device] )
keyword[def] identifier[_update] ( identifier[engine] , identifier[batch] ):
identifier[loss_list] =[]
identifier[hidden] = keyword[None]
identifier[x] , identifier[y] = identifier[batch]
keyword[for] identifier[batch_t] keyword[in] identifier[zip] ( identifier[x] . identifier[split] ( identifier[tbtt_step] , identifier[dim] = identifier[dim] ), identifier[y] . identifier[split] ( identifier[tbtt_step] , identifier[dim] = identifier[dim] )):
identifier[x_t] , identifier[y_t] = identifier[prepare_batch] ( identifier[batch_t] , identifier[device] = identifier[device] , identifier[non_blocking] = identifier[non_blocking] )
identifier[engine] . identifier[fire_event] ( identifier[Tbptt_Events] . identifier[TIME_ITERATION_STARTED] )
identifier[model] . identifier[train] ()
identifier[optimizer] . identifier[zero_grad] ()
keyword[if] identifier[hidden] keyword[is] keyword[None] :
identifier[y_pred_t] , identifier[hidden] = identifier[model] ( identifier[x_t] )
keyword[else] :
identifier[hidden] = identifier[_detach_hidden] ( identifier[hidden] )
identifier[y_pred_t] , identifier[hidden] = identifier[model] ( identifier[x_t] , identifier[hidden] )
identifier[loss_t] = identifier[loss_fn] ( identifier[y_pred_t] , identifier[y_t] )
identifier[loss_t] . identifier[backward] ()
identifier[optimizer] . identifier[step] ()
identifier[engine] . identifier[state] . identifier[output] = identifier[loss_t] . identifier[item] ()
identifier[loss_list] . identifier[append] ( identifier[loss_t] . identifier[item] ())
identifier[engine] . identifier[fire_event] ( identifier[Tbptt_Events] . identifier[TIME_ITERATION_COMPLETED] )
keyword[return] identifier[sum] ( identifier[loss_list] )/ identifier[len] ( identifier[loss_list] )
identifier[engine] = identifier[Engine] ( identifier[_update] )
identifier[engine] . identifier[register_events] (* identifier[Tbptt_Events] )
keyword[return] identifier[engine] | def create_supervised_tbptt_trainer(model, optimizer, loss_fn, tbtt_step, dim=0, device=None, non_blocking=False, prepare_batch=_prepare_batch):
"""Create a trainer for truncated backprop through time supervised models.
Training recurrent model on long sequences is computationally intensive as
it requires to process the whole sequence before getting a gradient.
However, when the training loss is computed over many outputs
(`X to many <https://karpathy.github.io/2015/05/21/rnn-effectiveness/>`_),
there is an opportunity to compute a gradient over a subsequence. This is
known as
`truncated backpropagation through time <https://machinelearningmastery.com/
gentle-introduction-backpropagation-time/>`_.
This supervised trainer apply gradient optimization step every `tbtt_step`
time steps of the sequence, while backpropagating through the same
`tbtt_step` time steps.
Args:
model (`torch.nn.Module`): the model to train.
optimizer (`torch.optim.Optimizer`): the optimizer to use.
loss_fn (torch.nn loss function): the loss function to use.
tbtt_step (int): the length of time chunks (last one may be smaller).
dim (int): axis representing the time dimension.
device (str, optional): device type specification (default: None).
Applies to both model and batches.
non_blocking (bool, optional): if True and this copy is between CPU and GPU,
the copy may occur asynchronously with respect to the host. For other cases,
this argument has no effect.
prepare_batch (callable, optional): function that receives `batch`, `device`,
`non_blocking` and outputs tuple of tensors `(batch_x, batch_y)`.
Returns:
Engine: a trainer engine with supervised update function.
"""
if device:
model.to(device) # depends on [control=['if'], data=[]]
def _update(engine, batch):
loss_list = []
hidden = None
(x, y) = batch
for batch_t in zip(x.split(tbtt_step, dim=dim), y.split(tbtt_step, dim=dim)):
(x_t, y_t) = prepare_batch(batch_t, device=device, non_blocking=non_blocking)
# Fire event for start of iteration
engine.fire_event(Tbptt_Events.TIME_ITERATION_STARTED)
# Forward, backward and
model.train()
optimizer.zero_grad()
if hidden is None:
(y_pred_t, hidden) = model(x_t) # depends on [control=['if'], data=['hidden']]
else:
hidden = _detach_hidden(hidden)
(y_pred_t, hidden) = model(x_t, hidden)
loss_t = loss_fn(y_pred_t, y_t)
loss_t.backward()
optimizer.step()
# Setting state of engine for consistent behaviour
engine.state.output = loss_t.item()
loss_list.append(loss_t.item())
# Fire event for end of iteration
engine.fire_event(Tbptt_Events.TIME_ITERATION_COMPLETED) # depends on [control=['for'], data=['batch_t']]
# return average loss over the time splits
return sum(loss_list) / len(loss_list)
engine = Engine(_update)
engine.register_events(*Tbptt_Events)
return engine |
def run_cell(self, cell):
"""Run the Cell code using the IPython globals and locals
Args:
cell (str): Python code to be executed
"""
globals = self.ipy_shell.user_global_ns
locals = self.ipy_shell.user_ns
globals.update({
"__ipy_scope__": None,
})
try:
with redirect_stdout(self.stdout):
self.run(cell, globals, locals)
except:
self.code_error = True
if self.options.debug:
raise BdbQuit
finally:
self.finalize() | def function[run_cell, parameter[self, cell]]:
constant[Run the Cell code using the IPython globals and locals
Args:
cell (str): Python code to be executed
]
variable[globals] assign[=] name[self].ipy_shell.user_global_ns
variable[locals] assign[=] name[self].ipy_shell.user_ns
call[name[globals].update, parameter[dictionary[[<ast.Constant object at 0x7da1b0066d10>], [<ast.Constant object at 0x7da1b00666e0>]]]]
<ast.Try object at 0x7da1b00663b0> | keyword[def] identifier[run_cell] ( identifier[self] , identifier[cell] ):
literal[string]
identifier[globals] = identifier[self] . identifier[ipy_shell] . identifier[user_global_ns]
identifier[locals] = identifier[self] . identifier[ipy_shell] . identifier[user_ns]
identifier[globals] . identifier[update] ({
literal[string] : keyword[None] ,
})
keyword[try] :
keyword[with] identifier[redirect_stdout] ( identifier[self] . identifier[stdout] ):
identifier[self] . identifier[run] ( identifier[cell] , identifier[globals] , identifier[locals] )
keyword[except] :
identifier[self] . identifier[code_error] = keyword[True]
keyword[if] identifier[self] . identifier[options] . identifier[debug] :
keyword[raise] identifier[BdbQuit]
keyword[finally] :
identifier[self] . identifier[finalize] () | def run_cell(self, cell):
"""Run the Cell code using the IPython globals and locals
Args:
cell (str): Python code to be executed
"""
globals = self.ipy_shell.user_global_ns
locals = self.ipy_shell.user_ns
globals.update({'__ipy_scope__': None})
try:
with redirect_stdout(self.stdout):
self.run(cell, globals, locals) # depends on [control=['with'], data=[]] # depends on [control=['try'], data=[]]
except:
self.code_error = True
if self.options.debug:
raise BdbQuit # depends on [control=['if'], data=[]] # depends on [control=['except'], data=[]]
finally:
self.finalize() |
def css_files(self):
''' The CSS files in the BokehJS directory.
'''
bokehjsdir = self.bokehjsdir()
js_files = []
for root, dirnames, files in os.walk(bokehjsdir):
for fname in files:
if fname.endswith(".css"):
js_files.append(join(root, fname))
return js_files | def function[css_files, parameter[self]]:
constant[ The CSS files in the BokehJS directory.
]
variable[bokehjsdir] assign[=] call[name[self].bokehjsdir, parameter[]]
variable[js_files] assign[=] list[[]]
for taget[tuple[[<ast.Name object at 0x7da207f9b910>, <ast.Name object at 0x7da207f98f10>, <ast.Name object at 0x7da207f98760>]]] in starred[call[name[os].walk, parameter[name[bokehjsdir]]]] begin[:]
for taget[name[fname]] in starred[name[files]] begin[:]
if call[name[fname].endswith, parameter[constant[.css]]] begin[:]
call[name[js_files].append, parameter[call[name[join], parameter[name[root], name[fname]]]]]
return[name[js_files]] | keyword[def] identifier[css_files] ( identifier[self] ):
literal[string]
identifier[bokehjsdir] = identifier[self] . identifier[bokehjsdir] ()
identifier[js_files] =[]
keyword[for] identifier[root] , identifier[dirnames] , identifier[files] keyword[in] identifier[os] . identifier[walk] ( identifier[bokehjsdir] ):
keyword[for] identifier[fname] keyword[in] identifier[files] :
keyword[if] identifier[fname] . identifier[endswith] ( literal[string] ):
identifier[js_files] . identifier[append] ( identifier[join] ( identifier[root] , identifier[fname] ))
keyword[return] identifier[js_files] | def css_files(self):
""" The CSS files in the BokehJS directory.
"""
bokehjsdir = self.bokehjsdir()
js_files = []
for (root, dirnames, files) in os.walk(bokehjsdir):
for fname in files:
if fname.endswith('.css'):
js_files.append(join(root, fname)) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['fname']] # depends on [control=['for'], data=[]]
return js_files |
def import_global(node: Node, key: str, path: Any):
"""Import passed module, class, function full name and stores it to node's globals"""
node.node_globals[key] = import_path(path) | def function[import_global, parameter[node, key, path]]:
constant[Import passed module, class, function full name and stores it to node's globals]
call[name[node].node_globals][name[key]] assign[=] call[name[import_path], parameter[name[path]]] | keyword[def] identifier[import_global] ( identifier[node] : identifier[Node] , identifier[key] : identifier[str] , identifier[path] : identifier[Any] ):
literal[string]
identifier[node] . identifier[node_globals] [ identifier[key] ]= identifier[import_path] ( identifier[path] ) | def import_global(node: Node, key: str, path: Any):
"""Import passed module, class, function full name and stores it to node's globals"""
node.node_globals[key] = import_path(path) |
def get_status_from_async(self, response):
"""Process the latest status update retrieved from a
'azure-asyncoperation' header.
:param requests.Response response: latest REST call response.
:raises: BadResponse if response has no body, or body does not
contain status.
"""
self._raise_if_bad_http_status_and_method(response)
if self._is_empty(response):
raise BadResponse('The response from long running operation '
'does not contain a body.')
self.status = self._get_async_status(response)
if not self.status:
raise BadResponse("No status found in body")
# Status can contains information, see ARM spec:
# https://github.com/Azure/azure-resource-manager-rpc/blob/master/v1.0/Addendum.md#operation-resource-format
# "properties": {
# /\* The resource provider can choose the values here, but it should only be
# returned on a successful operation (status being "Succeeded"). \*/
#},
# So try to parse it
try:
self.resource = self._deserialize(response)
except Exception:
self.resource = None | def function[get_status_from_async, parameter[self, response]]:
constant[Process the latest status update retrieved from a
'azure-asyncoperation' header.
:param requests.Response response: latest REST call response.
:raises: BadResponse if response has no body, or body does not
contain status.
]
call[name[self]._raise_if_bad_http_status_and_method, parameter[name[response]]]
if call[name[self]._is_empty, parameter[name[response]]] begin[:]
<ast.Raise object at 0x7da18bcc9e40>
name[self].status assign[=] call[name[self]._get_async_status, parameter[name[response]]]
if <ast.UnaryOp object at 0x7da18bcc9570> begin[:]
<ast.Raise object at 0x7da18bccbc40>
<ast.Try object at 0x7da18bcc85b0> | keyword[def] identifier[get_status_from_async] ( identifier[self] , identifier[response] ):
literal[string]
identifier[self] . identifier[_raise_if_bad_http_status_and_method] ( identifier[response] )
keyword[if] identifier[self] . identifier[_is_empty] ( identifier[response] ):
keyword[raise] identifier[BadResponse] ( literal[string]
literal[string] )
identifier[self] . identifier[status] = identifier[self] . identifier[_get_async_status] ( identifier[response] )
keyword[if] keyword[not] identifier[self] . identifier[status] :
keyword[raise] identifier[BadResponse] ( literal[string] )
keyword[try] :
identifier[self] . identifier[resource] = identifier[self] . identifier[_deserialize] ( identifier[response] )
keyword[except] identifier[Exception] :
identifier[self] . identifier[resource] = keyword[None] | def get_status_from_async(self, response):
"""Process the latest status update retrieved from a
'azure-asyncoperation' header.
:param requests.Response response: latest REST call response.
:raises: BadResponse if response has no body, or body does not
contain status.
"""
self._raise_if_bad_http_status_and_method(response)
if self._is_empty(response):
raise BadResponse('The response from long running operation does not contain a body.') # depends on [control=['if'], data=[]]
self.status = self._get_async_status(response)
if not self.status:
raise BadResponse('No status found in body') # depends on [control=['if'], data=[]]
# Status can contains information, see ARM spec:
# https://github.com/Azure/azure-resource-manager-rpc/blob/master/v1.0/Addendum.md#operation-resource-format
# "properties": {
# /\* The resource provider can choose the values here, but it should only be
# returned on a successful operation (status being "Succeeded"). \*/
#},
# So try to parse it
try:
self.resource = self._deserialize(response) # depends on [control=['try'], data=[]]
except Exception:
self.resource = None # depends on [control=['except'], data=[]] |
def check_assumptions(
self, training_df, advice=True, show_plots=False, p_value_threshold=0.01, plot_n_bootstraps=10
):
"""
Use this function to test the proportional hazards assumption. See usage example at
https://lifelines.readthedocs.io/en/latest/jupyter_notebooks/Proportional%20hazard%20assumption.html
Parameters
-----------
training_df: DataFrame
the original DataFrame used in the call to ``fit(...)`` or a sub-sampled version.
advice: boolean, optional
display advice as output to the user's screen
show_plots: boolean, optional
display plots of the scaled schoenfeld residuals and loess curves. This is an eyeball test for violations.
This will slow down the function significantly.
p_value_threshold: float, optional
the threshold to use to alert the user of violations. See note below.
plot_n_bootstraps:
in the plots displayed, also display plot_n_bootstraps bootstrapped loess curves. This will slow down
the function significantly.
Examples
----------
>>> from lifelines.datasets import load_rossi
>>> from lifelines import CoxPHFitter
>>>
>>> rossi = load_rossi()
>>> cph = CoxPHFitter().fit(rossi, 'week', 'arrest')
>>>
>>> cph.check_assumptions(rossi)
Notes
-------
The ``p_value_threshold`` is arbitrarily set at 0.01. Under the null, some covariates
will be below the threshold (i.e. by chance). This is compounded when there are many covariates.
Similarly, when there are lots of observations, even minor deviances from the proportional hazard
assumption will be flagged.
With that in mind, it's best to use a combination of statistical tests and eyeball tests to
determine the most serious violations.
References
-----------
section 5 in https://socialsciences.mcmaster.ca/jfox/Books/Companion/appendices/Appendix-Cox-Regression.pdf,
http://www.mwsug.org/proceedings/2006/stats/MWSUG-2006-SD08.pdf,
http://eprints.lse.ac.uk/84988/1/06_ParkHendry2015-ReassessingSchoenfeldTests_Final.pdf
"""
if not training_df.index.is_unique:
raise IndexError(
"`training_df` index should be unique for this exercise. Please make it unique or use `.reset_index(drop=True)` to force a unique index"
)
residuals = self.compute_residuals(training_df, kind="scaled_schoenfeld")
test_results = proportional_hazard_test(
self, training_df, time_transform=["rank", "km"], precomputed_residuals=residuals
)
residuals_and_duration = residuals.join(training_df[self.duration_col])
counter = 0
n = residuals_and_duration.shape[0]
for variable in self.hazards_.index:
minumum_observed_p_value = test_results.summary.loc[variable, "p"].min()
if np.round(minumum_observed_p_value, 2) > p_value_threshold:
continue
counter += 1
if counter == 1:
if advice:
print(
fill(
"""The ``p_value_threshold`` is set at %g. Even under the null hypothesis of no violations, some covariates will be below the threshold by chance. This is compounded when there are many covariates. Similarly, when there are lots of observations, even minor deviances from the proportional hazard assumption will be flagged."""
% p_value_threshold,
width=100,
)
)
print()
print(
fill(
"""With that in mind, it's best to use a combination of statistical tests and visual tests to determine the most serious violations. Produce visual plots using ``check_assumptions(..., show_plots=True)`` and looking for non-constant lines. See link [A] below for a full example.""",
width=100,
)
)
print()
test_results.print_summary()
print()
print()
print(
"%d. Variable '%s' failed the non-proportional test: p-value is %s."
% (counter, variable, format_p_value(4)(minumum_observed_p_value)),
end="\n\n",
)
if advice:
values = training_df[variable]
value_counts = values.value_counts()
n_uniques = value_counts.shape[0]
# Arbitrary chosen 10 and 4 to check for ability to use strata col.
# This should capture dichotomous / low cardinality values.
if n_uniques <= 10 and value_counts.min() >= 5:
print(
fill(
" Advice: with so few unique values (only {0}), you can include `strata=['{1}', ...]` in the call in `.fit`. See documentation in link [E] below.".format(
n_uniques, variable
),
width=100,
)
)
else:
print(
fill(
""" Advice 1: the functional form of the variable '{var}' might be incorrect. That is, there may be non-linear terms missing. The proportional hazard test used is very sensitive to incorrect functional forms. See documentation in link [D] below on how to specify a functional form.""".format(
var=variable
),
width=100,
),
end="\n\n",
)
print(
fill(
""" Advice 2: try binning the variable '{var}' using pd.cut, and then specify it in `strata=['{var}', ...]` in the call in `.fit`. See documentation in link [B] below.""".format(
var=variable
),
width=100,
),
end="\n\n",
)
print(
fill(
""" Advice 3: try adding an interaction term with your time variable. See documentation in link [C] below.""".format(
var=variable
),
width=100,
),
end="\n\n",
)
if show_plots:
from matplotlib import pyplot as plt
fig = plt.figure()
# plot variable against all time transformations.
for i, (transform_name, transformer) in enumerate(TimeTransformers().iter(["rank", "km"]), start=1):
p_value = test_results.summary.loc[(variable, transform_name), "p"]
ax = fig.add_subplot(1, 2, i)
y = residuals_and_duration[variable]
tt = transformer(self.durations, self.event_observed, self.weights)[self.event_observed.values]
ax.scatter(tt, y, alpha=0.75)
y_lowess = lowess(tt.values, y.values)
ax.plot(tt, y_lowess, color="k", alpha=1.0, linewidth=2)
# bootstrap some possible other lowess lines. This is an approximation of the 100% confidence intervals
for _ in range(plot_n_bootstraps):
ix = sorted(np.random.choice(n, n))
tt_ = tt.values[ix]
y_lowess = lowess(tt_, y.values[ix])
ax.plot(tt_, y_lowess, color="k", alpha=0.30)
best_xlim = ax.get_xlim()
ax.hlines(0, 0, tt.max(), linestyles="dashed", linewidths=1)
ax.set_xlim(best_xlim)
ax.set_xlabel("%s-transformed time\n(p=%.4f)" % (transform_name, p_value), fontsize=10)
fig.suptitle("Scaled Schoenfeld residuals of '%s'" % variable, fontsize=14)
plt.tight_layout()
plt.subplots_adjust(top=0.90)
if advice and counter > 0:
print(
dedent(
r"""
---
[A] https://lifelines.readthedocs.io/en/latest/jupyter_notebooks/Proportional%20hazard%20assumption.html
[B] https://lifelines.readthedocs.io/en/latest/jupyter_notebooks/Proportional%20hazard%20assumption.html#Bin-variable-and-stratify-on-it
[C] https://lifelines.readthedocs.io/en/latest/jupyter_notebooks/Proportional%20hazard%20assumption.html#Introduce-time-varying-covariates
[D] https://lifelines.readthedocs.io/en/latest/jupyter_notebooks/Proportional%20hazard%20assumption.html#Modify-the-functional-form
[E] https://lifelines.readthedocs.io/en/latest/jupyter_notebooks/Proportional%20hazard%20assumption.html#Stratification
"""
)
)
if counter == 0:
print("Proportional hazard assumption looks okay.") | def function[check_assumptions, parameter[self, training_df, advice, show_plots, p_value_threshold, plot_n_bootstraps]]:
constant[
Use this function to test the proportional hazards assumption. See usage example at
https://lifelines.readthedocs.io/en/latest/jupyter_notebooks/Proportional%20hazard%20assumption.html
Parameters
-----------
training_df: DataFrame
the original DataFrame used in the call to ``fit(...)`` or a sub-sampled version.
advice: boolean, optional
display advice as output to the user's screen
show_plots: boolean, optional
display plots of the scaled schoenfeld residuals and loess curves. This is an eyeball test for violations.
This will slow down the function significantly.
p_value_threshold: float, optional
the threshold to use to alert the user of violations. See note below.
plot_n_bootstraps:
in the plots displayed, also display plot_n_bootstraps bootstrapped loess curves. This will slow down
the function significantly.
Examples
----------
>>> from lifelines.datasets import load_rossi
>>> from lifelines import CoxPHFitter
>>>
>>> rossi = load_rossi()
>>> cph = CoxPHFitter().fit(rossi, 'week', 'arrest')
>>>
>>> cph.check_assumptions(rossi)
Notes
-------
The ``p_value_threshold`` is arbitrarily set at 0.01. Under the null, some covariates
will be below the threshold (i.e. by chance). This is compounded when there are many covariates.
Similarly, when there are lots of observations, even minor deviances from the proportional hazard
assumption will be flagged.
With that in mind, it's best to use a combination of statistical tests and eyeball tests to
determine the most serious violations.
References
-----------
section 5 in https://socialsciences.mcmaster.ca/jfox/Books/Companion/appendices/Appendix-Cox-Regression.pdf,
http://www.mwsug.org/proceedings/2006/stats/MWSUG-2006-SD08.pdf,
http://eprints.lse.ac.uk/84988/1/06_ParkHendry2015-ReassessingSchoenfeldTests_Final.pdf
]
if <ast.UnaryOp object at 0x7da18f811660> begin[:]
<ast.Raise object at 0x7da18f8112d0>
variable[residuals] assign[=] call[name[self].compute_residuals, parameter[name[training_df]]]
variable[test_results] assign[=] call[name[proportional_hazard_test], parameter[name[self], name[training_df]]]
variable[residuals_and_duration] assign[=] call[name[residuals].join, parameter[call[name[training_df]][name[self].duration_col]]]
variable[counter] assign[=] constant[0]
variable[n] assign[=] call[name[residuals_and_duration].shape][constant[0]]
for taget[name[variable]] in starred[name[self].hazards_.index] begin[:]
variable[minumum_observed_p_value] assign[=] call[call[name[test_results].summary.loc][tuple[[<ast.Name object at 0x7da18f813a00>, <ast.Constant object at 0x7da18f813010>]]].min, parameter[]]
if compare[call[name[np].round, parameter[name[minumum_observed_p_value], constant[2]]] greater[>] name[p_value_threshold]] begin[:]
continue
<ast.AugAssign object at 0x7da18f812c20>
if compare[name[counter] equal[==] constant[1]] begin[:]
if name[advice] begin[:]
call[name[print], parameter[call[name[fill], parameter[binary_operation[constant[The ``p_value_threshold`` is set at %g. Even under the null hypothesis of no violations, some covariates will be below the threshold by chance. This is compounded when there are many covariates. Similarly, when there are lots of observations, even minor deviances from the proportional hazard assumption will be flagged.] <ast.Mod object at 0x7da2590d6920> name[p_value_threshold]]]]]]
call[name[print], parameter[]]
call[name[print], parameter[call[name[fill], parameter[constant[With that in mind, it's best to use a combination of statistical tests and visual tests to determine the most serious violations. Produce visual plots using ``check_assumptions(..., show_plots=True)`` and looking for non-constant lines. See link [A] below for a full example.]]]]]
call[name[print], parameter[]]
call[name[test_results].print_summary, parameter[]]
call[name[print], parameter[]]
call[name[print], parameter[]]
call[name[print], parameter[binary_operation[constant[%d. Variable '%s' failed the non-proportional test: p-value is %s.] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da18f8122c0>, <ast.Name object at 0x7da18f8114e0>, <ast.Call object at 0x7da18f8134f0>]]]]]
if name[advice] begin[:]
variable[values] assign[=] call[name[training_df]][name[variable]]
variable[value_counts] assign[=] call[name[values].value_counts, parameter[]]
variable[n_uniques] assign[=] call[name[value_counts].shape][constant[0]]
if <ast.BoolOp object at 0x7da18f811d50> begin[:]
call[name[print], parameter[call[name[fill], parameter[call[constant[ Advice: with so few unique values (only {0}), you can include `strata=['{1}', ...]` in the call in `.fit`. See documentation in link [E] below.].format, parameter[name[n_uniques], name[variable]]]]]]]
if name[show_plots] begin[:]
from relative_module[matplotlib] import module[pyplot]
variable[fig] assign[=] call[name[plt].figure, parameter[]]
for taget[tuple[[<ast.Name object at 0x7da20c6aae60>, <ast.Tuple object at 0x7da20c6ab790>]]] in starred[call[name[enumerate], parameter[call[call[name[TimeTransformers], parameter[]].iter, parameter[list[[<ast.Constant object at 0x7da20c6ab850>, <ast.Constant object at 0x7da20c6a8e50>]]]]]]] begin[:]
variable[p_value] assign[=] call[name[test_results].summary.loc][tuple[[<ast.Tuple object at 0x7da20c6ab100>, <ast.Constant object at 0x7da20c6abf10>]]]
variable[ax] assign[=] call[name[fig].add_subplot, parameter[constant[1], constant[2], name[i]]]
variable[y] assign[=] call[name[residuals_and_duration]][name[variable]]
variable[tt] assign[=] call[call[name[transformer], parameter[name[self].durations, name[self].event_observed, name[self].weights]]][name[self].event_observed.values]
call[name[ax].scatter, parameter[name[tt], name[y]]]
variable[y_lowess] assign[=] call[name[lowess], parameter[name[tt].values, name[y].values]]
call[name[ax].plot, parameter[name[tt], name[y_lowess]]]
for taget[name[_]] in starred[call[name[range], parameter[name[plot_n_bootstraps]]]] begin[:]
variable[ix] assign[=] call[name[sorted], parameter[call[name[np].random.choice, parameter[name[n], name[n]]]]]
variable[tt_] assign[=] call[name[tt].values][name[ix]]
variable[y_lowess] assign[=] call[name[lowess], parameter[name[tt_], call[name[y].values][name[ix]]]]
call[name[ax].plot, parameter[name[tt_], name[y_lowess]]]
variable[best_xlim] assign[=] call[name[ax].get_xlim, parameter[]]
call[name[ax].hlines, parameter[constant[0], constant[0], call[name[tt].max, parameter[]]]]
call[name[ax].set_xlim, parameter[name[best_xlim]]]
call[name[ax].set_xlabel, parameter[binary_operation[constant[%s-transformed time
(p=%.4f)] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da20c7cab00>, <ast.Name object at 0x7da20c7cad10>]]]]]
call[name[fig].suptitle, parameter[binary_operation[constant[Scaled Schoenfeld residuals of '%s'] <ast.Mod object at 0x7da2590d6920> name[variable]]]]
call[name[plt].tight_layout, parameter[]]
call[name[plt].subplots_adjust, parameter[]]
if <ast.BoolOp object at 0x7da20c794550> begin[:]
call[name[print], parameter[call[name[dedent], parameter[constant[
---
[A] https://lifelines.readthedocs.io/en/latest/jupyter_notebooks/Proportional%20hazard%20assumption.html
[B] https://lifelines.readthedocs.io/en/latest/jupyter_notebooks/Proportional%20hazard%20assumption.html#Bin-variable-and-stratify-on-it
[C] https://lifelines.readthedocs.io/en/latest/jupyter_notebooks/Proportional%20hazard%20assumption.html#Introduce-time-varying-covariates
[D] https://lifelines.readthedocs.io/en/latest/jupyter_notebooks/Proportional%20hazard%20assumption.html#Modify-the-functional-form
[E] https://lifelines.readthedocs.io/en/latest/jupyter_notebooks/Proportional%20hazard%20assumption.html#Stratification
]]]]]
if compare[name[counter] equal[==] constant[0]] begin[:]
call[name[print], parameter[constant[Proportional hazard assumption looks okay.]]] | keyword[def] identifier[check_assumptions] (
identifier[self] , identifier[training_df] , identifier[advice] = keyword[True] , identifier[show_plots] = keyword[False] , identifier[p_value_threshold] = literal[int] , identifier[plot_n_bootstraps] = literal[int]
):
literal[string]
keyword[if] keyword[not] identifier[training_df] . identifier[index] . identifier[is_unique] :
keyword[raise] identifier[IndexError] (
literal[string]
)
identifier[residuals] = identifier[self] . identifier[compute_residuals] ( identifier[training_df] , identifier[kind] = literal[string] )
identifier[test_results] = identifier[proportional_hazard_test] (
identifier[self] , identifier[training_df] , identifier[time_transform] =[ literal[string] , literal[string] ], identifier[precomputed_residuals] = identifier[residuals]
)
identifier[residuals_and_duration] = identifier[residuals] . identifier[join] ( identifier[training_df] [ identifier[self] . identifier[duration_col] ])
identifier[counter] = literal[int]
identifier[n] = identifier[residuals_and_duration] . identifier[shape] [ literal[int] ]
keyword[for] identifier[variable] keyword[in] identifier[self] . identifier[hazards_] . identifier[index] :
identifier[minumum_observed_p_value] = identifier[test_results] . identifier[summary] . identifier[loc] [ identifier[variable] , literal[string] ]. identifier[min] ()
keyword[if] identifier[np] . identifier[round] ( identifier[minumum_observed_p_value] , literal[int] )> identifier[p_value_threshold] :
keyword[continue]
identifier[counter] += literal[int]
keyword[if] identifier[counter] == literal[int] :
keyword[if] identifier[advice] :
identifier[print] (
identifier[fill] (
literal[string]
% identifier[p_value_threshold] ,
identifier[width] = literal[int] ,
)
)
identifier[print] ()
identifier[print] (
identifier[fill] (
literal[string] ,
identifier[width] = literal[int] ,
)
)
identifier[print] ()
identifier[test_results] . identifier[print_summary] ()
identifier[print] ()
identifier[print] ()
identifier[print] (
literal[string]
%( identifier[counter] , identifier[variable] , identifier[format_p_value] ( literal[int] )( identifier[minumum_observed_p_value] )),
identifier[end] = literal[string] ,
)
keyword[if] identifier[advice] :
identifier[values] = identifier[training_df] [ identifier[variable] ]
identifier[value_counts] = identifier[values] . identifier[value_counts] ()
identifier[n_uniques] = identifier[value_counts] . identifier[shape] [ literal[int] ]
keyword[if] identifier[n_uniques] <= literal[int] keyword[and] identifier[value_counts] . identifier[min] ()>= literal[int] :
identifier[print] (
identifier[fill] (
literal[string] . identifier[format] (
identifier[n_uniques] , identifier[variable]
),
identifier[width] = literal[int] ,
)
)
keyword[else] :
identifier[print] (
identifier[fill] (
literal[string] . identifier[format] (
identifier[var] = identifier[variable]
),
identifier[width] = literal[int] ,
),
identifier[end] = literal[string] ,
)
identifier[print] (
identifier[fill] (
literal[string] . identifier[format] (
identifier[var] = identifier[variable]
),
identifier[width] = literal[int] ,
),
identifier[end] = literal[string] ,
)
identifier[print] (
identifier[fill] (
literal[string] . identifier[format] (
identifier[var] = identifier[variable]
),
identifier[width] = literal[int] ,
),
identifier[end] = literal[string] ,
)
keyword[if] identifier[show_plots] :
keyword[from] identifier[matplotlib] keyword[import] identifier[pyplot] keyword[as] identifier[plt]
identifier[fig] = identifier[plt] . identifier[figure] ()
keyword[for] identifier[i] ,( identifier[transform_name] , identifier[transformer] ) keyword[in] identifier[enumerate] ( identifier[TimeTransformers] (). identifier[iter] ([ literal[string] , literal[string] ]), identifier[start] = literal[int] ):
identifier[p_value] = identifier[test_results] . identifier[summary] . identifier[loc] [( identifier[variable] , identifier[transform_name] ), literal[string] ]
identifier[ax] = identifier[fig] . identifier[add_subplot] ( literal[int] , literal[int] , identifier[i] )
identifier[y] = identifier[residuals_and_duration] [ identifier[variable] ]
identifier[tt] = identifier[transformer] ( identifier[self] . identifier[durations] , identifier[self] . identifier[event_observed] , identifier[self] . identifier[weights] )[ identifier[self] . identifier[event_observed] . identifier[values] ]
identifier[ax] . identifier[scatter] ( identifier[tt] , identifier[y] , identifier[alpha] = literal[int] )
identifier[y_lowess] = identifier[lowess] ( identifier[tt] . identifier[values] , identifier[y] . identifier[values] )
identifier[ax] . identifier[plot] ( identifier[tt] , identifier[y_lowess] , identifier[color] = literal[string] , identifier[alpha] = literal[int] , identifier[linewidth] = literal[int] )
keyword[for] identifier[_] keyword[in] identifier[range] ( identifier[plot_n_bootstraps] ):
identifier[ix] = identifier[sorted] ( identifier[np] . identifier[random] . identifier[choice] ( identifier[n] , identifier[n] ))
identifier[tt_] = identifier[tt] . identifier[values] [ identifier[ix] ]
identifier[y_lowess] = identifier[lowess] ( identifier[tt_] , identifier[y] . identifier[values] [ identifier[ix] ])
identifier[ax] . identifier[plot] ( identifier[tt_] , identifier[y_lowess] , identifier[color] = literal[string] , identifier[alpha] = literal[int] )
identifier[best_xlim] = identifier[ax] . identifier[get_xlim] ()
identifier[ax] . identifier[hlines] ( literal[int] , literal[int] , identifier[tt] . identifier[max] (), identifier[linestyles] = literal[string] , identifier[linewidths] = literal[int] )
identifier[ax] . identifier[set_xlim] ( identifier[best_xlim] )
identifier[ax] . identifier[set_xlabel] ( literal[string] %( identifier[transform_name] , identifier[p_value] ), identifier[fontsize] = literal[int] )
identifier[fig] . identifier[suptitle] ( literal[string] % identifier[variable] , identifier[fontsize] = literal[int] )
identifier[plt] . identifier[tight_layout] ()
identifier[plt] . identifier[subplots_adjust] ( identifier[top] = literal[int] )
keyword[if] identifier[advice] keyword[and] identifier[counter] > literal[int] :
identifier[print] (
identifier[dedent] (
literal[string]
)
)
keyword[if] identifier[counter] == literal[int] :
identifier[print] ( literal[string] ) | def check_assumptions(self, training_df, advice=True, show_plots=False, p_value_threshold=0.01, plot_n_bootstraps=10):
"""
Use this function to test the proportional hazards assumption. See usage example at
https://lifelines.readthedocs.io/en/latest/jupyter_notebooks/Proportional%20hazard%20assumption.html
Parameters
-----------
training_df: DataFrame
the original DataFrame used in the call to ``fit(...)`` or a sub-sampled version.
advice: boolean, optional
display advice as output to the user's screen
show_plots: boolean, optional
display plots of the scaled schoenfeld residuals and loess curves. This is an eyeball test for violations.
This will slow down the function significantly.
p_value_threshold: float, optional
the threshold to use to alert the user of violations. See note below.
plot_n_bootstraps:
in the plots displayed, also display plot_n_bootstraps bootstrapped loess curves. This will slow down
the function significantly.
Examples
----------
>>> from lifelines.datasets import load_rossi
>>> from lifelines import CoxPHFitter
>>>
>>> rossi = load_rossi()
>>> cph = CoxPHFitter().fit(rossi, 'week', 'arrest')
>>>
>>> cph.check_assumptions(rossi)
Notes
-------
The ``p_value_threshold`` is arbitrarily set at 0.01. Under the null, some covariates
will be below the threshold (i.e. by chance). This is compounded when there are many covariates.
Similarly, when there are lots of observations, even minor deviances from the proportional hazard
assumption will be flagged.
With that in mind, it's best to use a combination of statistical tests and eyeball tests to
determine the most serious violations.
References
-----------
section 5 in https://socialsciences.mcmaster.ca/jfox/Books/Companion/appendices/Appendix-Cox-Regression.pdf,
http://www.mwsug.org/proceedings/2006/stats/MWSUG-2006-SD08.pdf,
http://eprints.lse.ac.uk/84988/1/06_ParkHendry2015-ReassessingSchoenfeldTests_Final.pdf
"""
if not training_df.index.is_unique:
raise IndexError('`training_df` index should be unique for this exercise. Please make it unique or use `.reset_index(drop=True)` to force a unique index') # depends on [control=['if'], data=[]]
residuals = self.compute_residuals(training_df, kind='scaled_schoenfeld')
test_results = proportional_hazard_test(self, training_df, time_transform=['rank', 'km'], precomputed_residuals=residuals)
residuals_and_duration = residuals.join(training_df[self.duration_col])
counter = 0
n = residuals_and_duration.shape[0]
for variable in self.hazards_.index:
minumum_observed_p_value = test_results.summary.loc[variable, 'p'].min()
if np.round(minumum_observed_p_value, 2) > p_value_threshold:
continue # depends on [control=['if'], data=[]]
counter += 1
if counter == 1:
if advice:
print(fill('The ``p_value_threshold`` is set at %g. Even under the null hypothesis of no violations, some covariates will be below the threshold by chance. This is compounded when there are many covariates. Similarly, when there are lots of observations, even minor deviances from the proportional hazard assumption will be flagged.' % p_value_threshold, width=100))
print()
print(fill("With that in mind, it's best to use a combination of statistical tests and visual tests to determine the most serious violations. Produce visual plots using ``check_assumptions(..., show_plots=True)`` and looking for non-constant lines. See link [A] below for a full example.", width=100))
print() # depends on [control=['if'], data=[]]
test_results.print_summary()
print() # depends on [control=['if'], data=[]]
print()
print("%d. Variable '%s' failed the non-proportional test: p-value is %s." % (counter, variable, format_p_value(4)(minumum_observed_p_value)), end='\n\n')
if advice:
values = training_df[variable]
value_counts = values.value_counts()
n_uniques = value_counts.shape[0]
# Arbitrary chosen 10 and 4 to check for ability to use strata col.
# This should capture dichotomous / low cardinality values.
if n_uniques <= 10 and value_counts.min() >= 5:
print(fill(" Advice: with so few unique values (only {0}), you can include `strata=['{1}', ...]` in the call in `.fit`. See documentation in link [E] below.".format(n_uniques, variable), width=100)) # depends on [control=['if'], data=[]]
else:
print(fill(" Advice 1: the functional form of the variable '{var}' might be incorrect. That is, there may be non-linear terms missing. The proportional hazard test used is very sensitive to incorrect functional forms. See documentation in link [D] below on how to specify a functional form.".format(var=variable), width=100), end='\n\n')
print(fill(" Advice 2: try binning the variable '{var}' using pd.cut, and then specify it in `strata=['{var}', ...]` in the call in `.fit`. See documentation in link [B] below.".format(var=variable), width=100), end='\n\n')
print(fill(' Advice 3: try adding an interaction term with your time variable. See documentation in link [C] below.'.format(var=variable), width=100), end='\n\n') # depends on [control=['if'], data=[]]
if show_plots:
from matplotlib import pyplot as plt
fig = plt.figure()
# plot variable against all time transformations.
for (i, (transform_name, transformer)) in enumerate(TimeTransformers().iter(['rank', 'km']), start=1):
p_value = test_results.summary.loc[(variable, transform_name), 'p']
ax = fig.add_subplot(1, 2, i)
y = residuals_and_duration[variable]
tt = transformer(self.durations, self.event_observed, self.weights)[self.event_observed.values]
ax.scatter(tt, y, alpha=0.75)
y_lowess = lowess(tt.values, y.values)
ax.plot(tt, y_lowess, color='k', alpha=1.0, linewidth=2)
# bootstrap some possible other lowess lines. This is an approximation of the 100% confidence intervals
for _ in range(plot_n_bootstraps):
ix = sorted(np.random.choice(n, n))
tt_ = tt.values[ix]
y_lowess = lowess(tt_, y.values[ix])
ax.plot(tt_, y_lowess, color='k', alpha=0.3) # depends on [control=['for'], data=[]]
best_xlim = ax.get_xlim()
ax.hlines(0, 0, tt.max(), linestyles='dashed', linewidths=1)
ax.set_xlim(best_xlim)
ax.set_xlabel('%s-transformed time\n(p=%.4f)' % (transform_name, p_value), fontsize=10) # depends on [control=['for'], data=[]]
fig.suptitle("Scaled Schoenfeld residuals of '%s'" % variable, fontsize=14)
plt.tight_layout()
plt.subplots_adjust(top=0.9) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['variable']]
if advice and counter > 0:
print(dedent('\n ---\n [A] https://lifelines.readthedocs.io/en/latest/jupyter_notebooks/Proportional%20hazard%20assumption.html\n [B] https://lifelines.readthedocs.io/en/latest/jupyter_notebooks/Proportional%20hazard%20assumption.html#Bin-variable-and-stratify-on-it\n [C] https://lifelines.readthedocs.io/en/latest/jupyter_notebooks/Proportional%20hazard%20assumption.html#Introduce-time-varying-covariates\n [D] https://lifelines.readthedocs.io/en/latest/jupyter_notebooks/Proportional%20hazard%20assumption.html#Modify-the-functional-form\n [E] https://lifelines.readthedocs.io/en/latest/jupyter_notebooks/Proportional%20hazard%20assumption.html#Stratification\n ')) # depends on [control=['if'], data=[]]
if counter == 0:
print('Proportional hazard assumption looks okay.') # depends on [control=['if'], data=[]] |
def yosys_area_delay(library, abc_cmd=None, block=None):
""" Synthesize with Yosys and return estimate of area and delay.
:param library: stdcell library file to target in liberty format
:param abc_cmd: string of commands for yosys to pass to abc for synthesis
:param block: pyrtl block to analyze
:return: a tuple of numbers: area, delay
The area and delay are returned in units as defined by the stdcell
library. In the standard vsc 130nm library, the area is in a number of
"tracks", each of which is about 1.74 square um (see area estimation
for more details) and the delay is in ps.
http://www.vlsitechnology.org/html/vsc_description.html
May raise `PyrtlError` if yosys is not configured correctly, and
`PyrtlInternalError` if the call to yosys was not able successfully
"""
if abc_cmd is None:
abc_cmd = 'strash;scorr;ifraig;retime;dch,-f;map;print_stats;'
else:
# first, replace whitespace with commas as per yosys requirements
re.sub(r"\s+", ',', abc_cmd)
# then append with "print_stats" to generate the area and delay info
abc_cmd = '%s;print_stats;' % abc_cmd
def extract_area_delay_from_yosys_output(yosys_output):
report_lines = [line for line in yosys_output.split('\n') if 'ABC: netlist' in line]
area = re.match('.*area\s*=\s*([0-9\.]*)', report_lines[0]).group(1)
delay = re.match('.*delay\s*=\s*([0-9\.]*)', report_lines[0]).group(1)
return float(area), float(delay)
yosys_arg_template = """-p
read_verilog %s;
synth -top toplevel;
dfflibmap -liberty %s;
abc -liberty %s -script +%s
"""
temp_d, temp_path = tempfile.mkstemp(suffix='.v')
try:
# write the verilog to a temp
with os.fdopen(temp_d, 'w') as f:
OutputToVerilog(f, block=block)
# call yosys on the temp, and grab the output
yosys_arg = yosys_arg_template % (temp_path, library, library, abc_cmd)
yosys_output = subprocess.check_output(['yosys', yosys_arg])
area, delay = extract_area_delay_from_yosys_output(yosys_output)
except (subprocess.CalledProcessError, ValueError) as e:
print('Error with call to yosys...', file=sys.stderr)
print('---------------------------------------------', file=sys.stderr)
print(e.output, file=sys.stderr)
print('---------------------------------------------', file=sys.stderr)
raise PyrtlError('Yosys callfailed')
except OSError as e:
print('Error with call to yosys...', file=sys.stderr)
raise PyrtlError('Call to yosys failed (not installed or on path?)')
finally:
os.remove(temp_path)
return area, delay | def function[yosys_area_delay, parameter[library, abc_cmd, block]]:
constant[ Synthesize with Yosys and return estimate of area and delay.
:param library: stdcell library file to target in liberty format
:param abc_cmd: string of commands for yosys to pass to abc for synthesis
:param block: pyrtl block to analyze
:return: a tuple of numbers: area, delay
The area and delay are returned in units as defined by the stdcell
library. In the standard vsc 130nm library, the area is in a number of
"tracks", each of which is about 1.74 square um (see area estimation
for more details) and the delay is in ps.
http://www.vlsitechnology.org/html/vsc_description.html
May raise `PyrtlError` if yosys is not configured correctly, and
`PyrtlInternalError` if the call to yosys was not able successfully
]
if compare[name[abc_cmd] is constant[None]] begin[:]
variable[abc_cmd] assign[=] constant[strash;scorr;ifraig;retime;dch,-f;map;print_stats;]
def function[extract_area_delay_from_yosys_output, parameter[yosys_output]]:
variable[report_lines] assign[=] <ast.ListComp object at 0x7da20e9b34c0>
variable[area] assign[=] call[call[name[re].match, parameter[constant[.*area\s*=\s*([0-9\.]*)], call[name[report_lines]][constant[0]]]].group, parameter[constant[1]]]
variable[delay] assign[=] call[call[name[re].match, parameter[constant[.*delay\s*=\s*([0-9\.]*)], call[name[report_lines]][constant[0]]]].group, parameter[constant[1]]]
return[tuple[[<ast.Call object at 0x7da20e9b0730>, <ast.Call object at 0x7da20e9b35b0>]]]
variable[yosys_arg_template] assign[=] constant[-p
read_verilog %s;
synth -top toplevel;
dfflibmap -liberty %s;
abc -liberty %s -script +%s
]
<ast.Tuple object at 0x7da20e9b3af0> assign[=] call[name[tempfile].mkstemp, parameter[]]
<ast.Try object at 0x7da20e9b2ce0>
return[tuple[[<ast.Name object at 0x7da20c6ab7f0>, <ast.Name object at 0x7da20c6a81c0>]]] | keyword[def] identifier[yosys_area_delay] ( identifier[library] , identifier[abc_cmd] = keyword[None] , identifier[block] = keyword[None] ):
literal[string]
keyword[if] identifier[abc_cmd] keyword[is] keyword[None] :
identifier[abc_cmd] = literal[string]
keyword[else] :
identifier[re] . identifier[sub] ( literal[string] , literal[string] , identifier[abc_cmd] )
identifier[abc_cmd] = literal[string] % identifier[abc_cmd]
keyword[def] identifier[extract_area_delay_from_yosys_output] ( identifier[yosys_output] ):
identifier[report_lines] =[ identifier[line] keyword[for] identifier[line] keyword[in] identifier[yosys_output] . identifier[split] ( literal[string] ) keyword[if] literal[string] keyword[in] identifier[line] ]
identifier[area] = identifier[re] . identifier[match] ( literal[string] , identifier[report_lines] [ literal[int] ]). identifier[group] ( literal[int] )
identifier[delay] = identifier[re] . identifier[match] ( literal[string] , identifier[report_lines] [ literal[int] ]). identifier[group] ( literal[int] )
keyword[return] identifier[float] ( identifier[area] ), identifier[float] ( identifier[delay] )
identifier[yosys_arg_template] = literal[string]
identifier[temp_d] , identifier[temp_path] = identifier[tempfile] . identifier[mkstemp] ( identifier[suffix] = literal[string] )
keyword[try] :
keyword[with] identifier[os] . identifier[fdopen] ( identifier[temp_d] , literal[string] ) keyword[as] identifier[f] :
identifier[OutputToVerilog] ( identifier[f] , identifier[block] = identifier[block] )
identifier[yosys_arg] = identifier[yosys_arg_template] %( identifier[temp_path] , identifier[library] , identifier[library] , identifier[abc_cmd] )
identifier[yosys_output] = identifier[subprocess] . identifier[check_output] ([ literal[string] , identifier[yosys_arg] ])
identifier[area] , identifier[delay] = identifier[extract_area_delay_from_yosys_output] ( identifier[yosys_output] )
keyword[except] ( identifier[subprocess] . identifier[CalledProcessError] , identifier[ValueError] ) keyword[as] identifier[e] :
identifier[print] ( literal[string] , identifier[file] = identifier[sys] . identifier[stderr] )
identifier[print] ( literal[string] , identifier[file] = identifier[sys] . identifier[stderr] )
identifier[print] ( identifier[e] . identifier[output] , identifier[file] = identifier[sys] . identifier[stderr] )
identifier[print] ( literal[string] , identifier[file] = identifier[sys] . identifier[stderr] )
keyword[raise] identifier[PyrtlError] ( literal[string] )
keyword[except] identifier[OSError] keyword[as] identifier[e] :
identifier[print] ( literal[string] , identifier[file] = identifier[sys] . identifier[stderr] )
keyword[raise] identifier[PyrtlError] ( literal[string] )
keyword[finally] :
identifier[os] . identifier[remove] ( identifier[temp_path] )
keyword[return] identifier[area] , identifier[delay] | def yosys_area_delay(library, abc_cmd=None, block=None):
""" Synthesize with Yosys and return estimate of area and delay.
:param library: stdcell library file to target in liberty format
:param abc_cmd: string of commands for yosys to pass to abc for synthesis
:param block: pyrtl block to analyze
:return: a tuple of numbers: area, delay
The area and delay are returned in units as defined by the stdcell
library. In the standard vsc 130nm library, the area is in a number of
"tracks", each of which is about 1.74 square um (see area estimation
for more details) and the delay is in ps.
http://www.vlsitechnology.org/html/vsc_description.html
May raise `PyrtlError` if yosys is not configured correctly, and
`PyrtlInternalError` if the call to yosys was not able successfully
"""
if abc_cmd is None:
abc_cmd = 'strash;scorr;ifraig;retime;dch,-f;map;print_stats;' # depends on [control=['if'], data=['abc_cmd']]
else:
# first, replace whitespace with commas as per yosys requirements
re.sub('\\s+', ',', abc_cmd)
# then append with "print_stats" to generate the area and delay info
abc_cmd = '%s;print_stats;' % abc_cmd
def extract_area_delay_from_yosys_output(yosys_output):
report_lines = [line for line in yosys_output.split('\n') if 'ABC: netlist' in line]
area = re.match('.*area\\s*=\\s*([0-9\\.]*)', report_lines[0]).group(1)
delay = re.match('.*delay\\s*=\\s*([0-9\\.]*)', report_lines[0]).group(1)
return (float(area), float(delay))
yosys_arg_template = '-p\n read_verilog %s;\n synth -top toplevel;\n dfflibmap -liberty %s;\n abc -liberty %s -script +%s\n '
(temp_d, temp_path) = tempfile.mkstemp(suffix='.v')
try:
# write the verilog to a temp
with os.fdopen(temp_d, 'w') as f:
OutputToVerilog(f, block=block) # depends on [control=['with'], data=['f']]
# call yosys on the temp, and grab the output
yosys_arg = yosys_arg_template % (temp_path, library, library, abc_cmd)
yosys_output = subprocess.check_output(['yosys', yosys_arg])
(area, delay) = extract_area_delay_from_yosys_output(yosys_output) # depends on [control=['try'], data=[]]
except (subprocess.CalledProcessError, ValueError) as e:
print('Error with call to yosys...', file=sys.stderr)
print('---------------------------------------------', file=sys.stderr)
print(e.output, file=sys.stderr)
print('---------------------------------------------', file=sys.stderr)
raise PyrtlError('Yosys callfailed') # depends on [control=['except'], data=['e']]
except OSError as e:
print('Error with call to yosys...', file=sys.stderr)
raise PyrtlError('Call to yosys failed (not installed or on path?)') # depends on [control=['except'], data=[]]
finally:
os.remove(temp_path)
return (area, delay) |
def check_mac(original_mac):
'''
Checks the format of a MAC address and returns it without double-colons and in capital letters, if it is correct. Otherwise it returns None.
* it accepts the format of the double colons and a single hex string
'''
mac = (original_mac.upper()).strip()
parts = mac.split(':')
if len(parts) == 6:
# let's think that it is a : separated mac
for p in parts:
if len(p) != 2:
return None
mac = ''.join(parts)
elif len(parts) > 1:
return None
for c in mac:
if c not in '0123456789ABCDEF':
return None
return mac | def function[check_mac, parameter[original_mac]]:
constant[
Checks the format of a MAC address and returns it without double-colons and in capital letters, if it is correct. Otherwise it returns None.
* it accepts the format of the double colons and a single hex string
]
variable[mac] assign[=] call[call[name[original_mac].upper, parameter[]].strip, parameter[]]
variable[parts] assign[=] call[name[mac].split, parameter[constant[:]]]
if compare[call[name[len], parameter[name[parts]]] equal[==] constant[6]] begin[:]
for taget[name[p]] in starred[name[parts]] begin[:]
if compare[call[name[len], parameter[name[p]]] not_equal[!=] constant[2]] begin[:]
return[constant[None]]
variable[mac] assign[=] call[constant[].join, parameter[name[parts]]]
for taget[name[c]] in starred[name[mac]] begin[:]
if compare[name[c] <ast.NotIn object at 0x7da2590d7190> constant[0123456789ABCDEF]] begin[:]
return[constant[None]]
return[name[mac]] | keyword[def] identifier[check_mac] ( identifier[original_mac] ):
literal[string]
identifier[mac] =( identifier[original_mac] . identifier[upper] ()). identifier[strip] ()
identifier[parts] = identifier[mac] . identifier[split] ( literal[string] )
keyword[if] identifier[len] ( identifier[parts] )== literal[int] :
keyword[for] identifier[p] keyword[in] identifier[parts] :
keyword[if] identifier[len] ( identifier[p] )!= literal[int] :
keyword[return] keyword[None]
identifier[mac] = literal[string] . identifier[join] ( identifier[parts] )
keyword[elif] identifier[len] ( identifier[parts] )> literal[int] :
keyword[return] keyword[None]
keyword[for] identifier[c] keyword[in] identifier[mac] :
keyword[if] identifier[c] keyword[not] keyword[in] literal[string] :
keyword[return] keyword[None]
keyword[return] identifier[mac] | def check_mac(original_mac):
"""
Checks the format of a MAC address and returns it without double-colons and in capital letters, if it is correct. Otherwise it returns None.
* it accepts the format of the double colons and a single hex string
"""
mac = original_mac.upper().strip()
parts = mac.split(':')
if len(parts) == 6:
# let's think that it is a : separated mac
for p in parts:
if len(p) != 2:
return None # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['p']]
mac = ''.join(parts) # depends on [control=['if'], data=[]]
elif len(parts) > 1:
return None # depends on [control=['if'], data=[]]
for c in mac:
if c not in '0123456789ABCDEF':
return None # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['c']]
return mac |
def _generateFind(self, **kwargs):
"""Generator which yields matches on AXChildren."""
for needle in self._generateChildren():
if needle._match(**kwargs):
yield needle | def function[_generateFind, parameter[self]]:
constant[Generator which yields matches on AXChildren.]
for taget[name[needle]] in starred[call[name[self]._generateChildren, parameter[]]] begin[:]
if call[name[needle]._match, parameter[]] begin[:]
<ast.Yield object at 0x7da20c7cb5e0> | keyword[def] identifier[_generateFind] ( identifier[self] ,** identifier[kwargs] ):
literal[string]
keyword[for] identifier[needle] keyword[in] identifier[self] . identifier[_generateChildren] ():
keyword[if] identifier[needle] . identifier[_match] (** identifier[kwargs] ):
keyword[yield] identifier[needle] | def _generateFind(self, **kwargs):
"""Generator which yields matches on AXChildren."""
for needle in self._generateChildren():
if needle._match(**kwargs):
yield needle # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['needle']] |
def get_new_locations(self, urls):
"""Get valid location header values for all given URLs.
The returned values are new, that is: they do not repeat any
value contained in the original input. Only unique values
are yielded.
:param urls: a list of URL addresses
:returns: valid location header values from responses
to the URLs
"""
seen = set(urls)
for i in urls:
for k in self.get_locations(i):
if k not in seen:
seen.add(k)
yield k | def function[get_new_locations, parameter[self, urls]]:
constant[Get valid location header values for all given URLs.
The returned values are new, that is: they do not repeat any
value contained in the original input. Only unique values
are yielded.
:param urls: a list of URL addresses
:returns: valid location header values from responses
to the URLs
]
variable[seen] assign[=] call[name[set], parameter[name[urls]]]
for taget[name[i]] in starred[name[urls]] begin[:]
for taget[name[k]] in starred[call[name[self].get_locations, parameter[name[i]]]] begin[:]
if compare[name[k] <ast.NotIn object at 0x7da2590d7190> name[seen]] begin[:]
call[name[seen].add, parameter[name[k]]]
<ast.Yield object at 0x7da1b25ea320> | keyword[def] identifier[get_new_locations] ( identifier[self] , identifier[urls] ):
literal[string]
identifier[seen] = identifier[set] ( identifier[urls] )
keyword[for] identifier[i] keyword[in] identifier[urls] :
keyword[for] identifier[k] keyword[in] identifier[self] . identifier[get_locations] ( identifier[i] ):
keyword[if] identifier[k] keyword[not] keyword[in] identifier[seen] :
identifier[seen] . identifier[add] ( identifier[k] )
keyword[yield] identifier[k] | def get_new_locations(self, urls):
"""Get valid location header values for all given URLs.
The returned values are new, that is: they do not repeat any
value contained in the original input. Only unique values
are yielded.
:param urls: a list of URL addresses
:returns: valid location header values from responses
to the URLs
"""
seen = set(urls)
for i in urls:
for k in self.get_locations(i):
if k not in seen:
seen.add(k)
yield k # depends on [control=['if'], data=['k', 'seen']] # depends on [control=['for'], data=['k']] # depends on [control=['for'], data=['i']] |
def __shpFileLength(self):
"""Calculates the file length of the shp file."""
# Start with header length
size = 100
# Calculate size of all shapes
for s in self._shapes:
# Add in record header and shape type fields
size += 12
# nParts and nPoints do not apply to all shapes
#if self.shapeType not in (0,1):
# nParts = len(s.parts)
# nPoints = len(s.points)
if hasattr(s,'parts'):
nParts = len(s.parts)
if hasattr(s,'points'):
nPoints = len(s.points)
# All shape types capable of having a bounding box
if self.shapeType in (3,5,8,13,15,18,23,25,28,31):
size += 32
# Shape types with parts
if self.shapeType in (3,5,13,15,23,25,31):
# Parts count
size += 4
# Parts index array
size += nParts * 4
# Shape types with points
if self.shapeType in (3,5,8,13,15,23,25,31):
# Points count
size += 4
# Points array
size += 16 * nPoints
# Calc size of part types for Multipatch (31)
if self.shapeType == 31:
size += nParts * 4
# Calc z extremes and values
if self.shapeType in (13,15,18,31):
# z extremes
size += 16
# z array
size += 8 * nPoints
# Calc m extremes and values
if self.shapeType in (23,25,31):
# m extremes
size += 16
# m array
size += 8 * nPoints
# Calc a single point
if self.shapeType in (1,11,21):
size += 16
# Calc a single Z value
if self.shapeType == 11:
size += 8
# Calc a single M value
if self.shapeType in (11,21):
size += 8
# Calculate size as 16-bit words
size //= 2
return size | def function[__shpFileLength, parameter[self]]:
constant[Calculates the file length of the shp file.]
variable[size] assign[=] constant[100]
for taget[name[s]] in starred[name[self]._shapes] begin[:]
<ast.AugAssign object at 0x7da1b084f340>
if call[name[hasattr], parameter[name[s], constant[parts]]] begin[:]
variable[nParts] assign[=] call[name[len], parameter[name[s].parts]]
if call[name[hasattr], parameter[name[s], constant[points]]] begin[:]
variable[nPoints] assign[=] call[name[len], parameter[name[s].points]]
if compare[name[self].shapeType in tuple[[<ast.Constant object at 0x7da1b084f010>, <ast.Constant object at 0x7da1b084ef20>, <ast.Constant object at 0x7da1b084db40>, <ast.Constant object at 0x7da1b07b4c70>, <ast.Constant object at 0x7da1b07b4940>, <ast.Constant object at 0x7da1b07b4ee0>, <ast.Constant object at 0x7da1b07b56c0>, <ast.Constant object at 0x7da1b07b5f30>, <ast.Constant object at 0x7da1b07b6a40>, <ast.Constant object at 0x7da1b07b5330>]]] begin[:]
<ast.AugAssign object at 0x7da1b07b70d0>
if compare[name[self].shapeType in tuple[[<ast.Constant object at 0x7da1b07b73a0>, <ast.Constant object at 0x7da1b07b6a70>, <ast.Constant object at 0x7da1b07b7730>, <ast.Constant object at 0x7da1b07b45e0>, <ast.Constant object at 0x7da1b07b71f0>, <ast.Constant object at 0x7da1b07b42b0>, <ast.Constant object at 0x7da1b07b6050>]]] begin[:]
<ast.AugAssign object at 0x7da1b07b4df0>
<ast.AugAssign object at 0x7da1b07b7220>
if compare[name[self].shapeType in tuple[[<ast.Constant object at 0x7da1b07b41c0>, <ast.Constant object at 0x7da1b07b7a90>, <ast.Constant object at 0x7da1b07b64a0>, <ast.Constant object at 0x7da1b07b4f40>, <ast.Constant object at 0x7da1b07b4a60>, <ast.Constant object at 0x7da1b07b6e30>, <ast.Constant object at 0x7da1b07b4fd0>, <ast.Constant object at 0x7da1b07b4ca0>]]] begin[:]
<ast.AugAssign object at 0x7da1b07b4250>
<ast.AugAssign object at 0x7da1b07b6590>
if compare[name[self].shapeType equal[==] constant[31]] begin[:]
<ast.AugAssign object at 0x7da1b07b7e80>
if compare[name[self].shapeType in tuple[[<ast.Constant object at 0x7da1b07b5ea0>, <ast.Constant object at 0x7da1b07b5720>, <ast.Constant object at 0x7da1b07b5690>, <ast.Constant object at 0x7da1b07b4460>]]] begin[:]
<ast.AugAssign object at 0x7da1b07b7e20>
<ast.AugAssign object at 0x7da1b07b5a80>
if compare[name[self].shapeType in tuple[[<ast.Constant object at 0x7da1b07b4220>, <ast.Constant object at 0x7da1b07b7010>, <ast.Constant object at 0x7da1b07b7cd0>]]] begin[:]
<ast.AugAssign object at 0x7da1b07b7310>
<ast.AugAssign object at 0x7da1b07b57e0>
if compare[name[self].shapeType in tuple[[<ast.Constant object at 0x7da1b07b5750>, <ast.Constant object at 0x7da1b07b5f90>, <ast.Constant object at 0x7da1b07b75b0>]]] begin[:]
<ast.AugAssign object at 0x7da1b07b5210>
if compare[name[self].shapeType equal[==] constant[11]] begin[:]
<ast.AugAssign object at 0x7da1b07a4610>
if compare[name[self].shapeType in tuple[[<ast.Constant object at 0x7da1b07a5480>, <ast.Constant object at 0x7da1b07a53f0>]]] begin[:]
<ast.AugAssign object at 0x7da1b07a54b0>
<ast.AugAssign object at 0x7da1b07a6770>
return[name[size]] | keyword[def] identifier[__shpFileLength] ( identifier[self] ):
literal[string]
identifier[size] = literal[int]
keyword[for] identifier[s] keyword[in] identifier[self] . identifier[_shapes] :
identifier[size] += literal[int]
keyword[if] identifier[hasattr] ( identifier[s] , literal[string] ):
identifier[nParts] = identifier[len] ( identifier[s] . identifier[parts] )
keyword[if] identifier[hasattr] ( identifier[s] , literal[string] ):
identifier[nPoints] = identifier[len] ( identifier[s] . identifier[points] )
keyword[if] identifier[self] . identifier[shapeType] keyword[in] ( literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] ):
identifier[size] += literal[int]
keyword[if] identifier[self] . identifier[shapeType] keyword[in] ( literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] ):
identifier[size] += literal[int]
identifier[size] += identifier[nParts] * literal[int]
keyword[if] identifier[self] . identifier[shapeType] keyword[in] ( literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] ):
identifier[size] += literal[int]
identifier[size] += literal[int] * identifier[nPoints]
keyword[if] identifier[self] . identifier[shapeType] == literal[int] :
identifier[size] += identifier[nParts] * literal[int]
keyword[if] identifier[self] . identifier[shapeType] keyword[in] ( literal[int] , literal[int] , literal[int] , literal[int] ):
identifier[size] += literal[int]
identifier[size] += literal[int] * identifier[nPoints]
keyword[if] identifier[self] . identifier[shapeType] keyword[in] ( literal[int] , literal[int] , literal[int] ):
identifier[size] += literal[int]
identifier[size] += literal[int] * identifier[nPoints]
keyword[if] identifier[self] . identifier[shapeType] keyword[in] ( literal[int] , literal[int] , literal[int] ):
identifier[size] += literal[int]
keyword[if] identifier[self] . identifier[shapeType] == literal[int] :
identifier[size] += literal[int]
keyword[if] identifier[self] . identifier[shapeType] keyword[in] ( literal[int] , literal[int] ):
identifier[size] += literal[int]
identifier[size] //= literal[int]
keyword[return] identifier[size] | def __shpFileLength(self):
"""Calculates the file length of the shp file.""" # Start with header length
size = 100 # Calculate size of all shapes
for s in self._shapes: # Add in record header and shape type fields
size += 12 # nParts and nPoints do not apply to all shapes
#if self.shapeType not in (0,1):
# nParts = len(s.parts)
# nPoints = len(s.points)
if hasattr(s, 'parts'):
nParts = len(s.parts) # depends on [control=['if'], data=[]]
if hasattr(s, 'points'):
nPoints = len(s.points) # depends on [control=['if'], data=[]] # All shape types capable of having a bounding box
if self.shapeType in (3, 5, 8, 13, 15, 18, 23, 25, 28, 31):
size += 32 # depends on [control=['if'], data=[]] # Shape types with parts
if self.shapeType in (3, 5, 13, 15, 23, 25, 31): # Parts count
size += 4 # Parts index array
size += nParts * 4 # depends on [control=['if'], data=[]] # Shape types with points
if self.shapeType in (3, 5, 8, 13, 15, 23, 25, 31): # Points count
size += 4 # Points array
size += 16 * nPoints # depends on [control=['if'], data=[]] # Calc size of part types for Multipatch (31)
if self.shapeType == 31:
size += nParts * 4 # depends on [control=['if'], data=[]] # Calc z extremes and values
if self.shapeType in (13, 15, 18, 31): # z extremes
size += 16 # z array
size += 8 * nPoints # depends on [control=['if'], data=[]] # Calc m extremes and values
if self.shapeType in (23, 25, 31): # m extremes
size += 16 # m array
size += 8 * nPoints # depends on [control=['if'], data=[]] # Calc a single point
if self.shapeType in (1, 11, 21):
size += 16 # depends on [control=['if'], data=[]] # Calc a single Z value
if self.shapeType == 11:
size += 8 # depends on [control=['if'], data=[]] # Calc a single M value
if self.shapeType in (11, 21):
size += 8 # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['s']] # Calculate size as 16-bit words
size //= 2
return size |
def infer(args):
"""
%prog infer scaffolds.fasta genome.fasta
Infer where the components are in the genome. This function is rarely used,
but can be useful when distributor does not ship an AGP file.
"""
from jcvi.apps.grid import WriteJobs
from jcvi.formats.bed import sort
p = OptionParser(infer.__doc__)
p.set_cpus()
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
scaffoldsf, genomef = args
inferbed = "infer-components.bed"
if need_update((scaffoldsf, genomef), inferbed):
scaffolds = Fasta(scaffoldsf, lazy=True)
genome = Fasta(genomef)
genome = genome.tostring()
args = [(scaffold_name, scaffold, genome) \
for scaffold_name, scaffold in scaffolds.iteritems_ordered()]
pool = WriteJobs(map_one_scaffold, args, inferbed, cpus=opts.cpus)
pool.run()
sort([inferbed, "-i"])
bed = Bed(inferbed)
inferagpbed = "infer.bed"
fw = open(inferagpbed, "w")
seen = []
for b in bed:
r = (b.seqid, b.start, b.end)
if check_seen(r, seen):
continue
print("\t".join(str(x) for x in \
(b.accn, 0, b.span, b.seqid, b.score, b.strand)), file=fw)
seen.append(r)
fw.close()
frombed([inferagpbed]) | def function[infer, parameter[args]]:
constant[
%prog infer scaffolds.fasta genome.fasta
Infer where the components are in the genome. This function is rarely used,
but can be useful when distributor does not ship an AGP file.
]
from relative_module[jcvi.apps.grid] import module[WriteJobs]
from relative_module[jcvi.formats.bed] import module[sort]
variable[p] assign[=] call[name[OptionParser], parameter[name[infer].__doc__]]
call[name[p].set_cpus, parameter[]]
<ast.Tuple object at 0x7da1b09be170> assign[=] call[name[p].parse_args, parameter[name[args]]]
if compare[call[name[len], parameter[name[args]]] not_equal[!=] constant[2]] begin[:]
call[name[sys].exit, parameter[<ast.UnaryOp object at 0x7da1b09be770>]]
<ast.Tuple object at 0x7da1b09bc310> assign[=] name[args]
variable[inferbed] assign[=] constant[infer-components.bed]
if call[name[need_update], parameter[tuple[[<ast.Name object at 0x7da1b09befe0>, <ast.Name object at 0x7da1b09bfdc0>]], name[inferbed]]] begin[:]
variable[scaffolds] assign[=] call[name[Fasta], parameter[name[scaffoldsf]]]
variable[genome] assign[=] call[name[Fasta], parameter[name[genomef]]]
variable[genome] assign[=] call[name[genome].tostring, parameter[]]
variable[args] assign[=] <ast.ListComp object at 0x7da18bccb9a0>
variable[pool] assign[=] call[name[WriteJobs], parameter[name[map_one_scaffold], name[args], name[inferbed]]]
call[name[pool].run, parameter[]]
call[name[sort], parameter[list[[<ast.Name object at 0x7da207f9bc10>, <ast.Constant object at 0x7da207f98cd0>]]]]
variable[bed] assign[=] call[name[Bed], parameter[name[inferbed]]]
variable[inferagpbed] assign[=] constant[infer.bed]
variable[fw] assign[=] call[name[open], parameter[name[inferagpbed], constant[w]]]
variable[seen] assign[=] list[[]]
for taget[name[b]] in starred[name[bed]] begin[:]
variable[r] assign[=] tuple[[<ast.Attribute object at 0x7da207f98640>, <ast.Attribute object at 0x7da207f9bfa0>, <ast.Attribute object at 0x7da207f987c0>]]
if call[name[check_seen], parameter[name[r], name[seen]]] begin[:]
continue
call[name[print], parameter[call[constant[ ].join, parameter[<ast.GeneratorExp object at 0x7da207f994e0>]]]]
call[name[seen].append, parameter[name[r]]]
call[name[fw].close, parameter[]]
call[name[frombed], parameter[list[[<ast.Name object at 0x7da207f9a1d0>]]]] | keyword[def] identifier[infer] ( identifier[args] ):
literal[string]
keyword[from] identifier[jcvi] . identifier[apps] . identifier[grid] keyword[import] identifier[WriteJobs]
keyword[from] identifier[jcvi] . identifier[formats] . identifier[bed] keyword[import] identifier[sort]
identifier[p] = identifier[OptionParser] ( identifier[infer] . identifier[__doc__] )
identifier[p] . identifier[set_cpus] ()
identifier[opts] , identifier[args] = identifier[p] . identifier[parse_args] ( identifier[args] )
keyword[if] identifier[len] ( identifier[args] )!= literal[int] :
identifier[sys] . identifier[exit] ( keyword[not] identifier[p] . identifier[print_help] ())
identifier[scaffoldsf] , identifier[genomef] = identifier[args]
identifier[inferbed] = literal[string]
keyword[if] identifier[need_update] (( identifier[scaffoldsf] , identifier[genomef] ), identifier[inferbed] ):
identifier[scaffolds] = identifier[Fasta] ( identifier[scaffoldsf] , identifier[lazy] = keyword[True] )
identifier[genome] = identifier[Fasta] ( identifier[genomef] )
identifier[genome] = identifier[genome] . identifier[tostring] ()
identifier[args] =[( identifier[scaffold_name] , identifier[scaffold] , identifier[genome] ) keyword[for] identifier[scaffold_name] , identifier[scaffold] keyword[in] identifier[scaffolds] . identifier[iteritems_ordered] ()]
identifier[pool] = identifier[WriteJobs] ( identifier[map_one_scaffold] , identifier[args] , identifier[inferbed] , identifier[cpus] = identifier[opts] . identifier[cpus] )
identifier[pool] . identifier[run] ()
identifier[sort] ([ identifier[inferbed] , literal[string] ])
identifier[bed] = identifier[Bed] ( identifier[inferbed] )
identifier[inferagpbed] = literal[string]
identifier[fw] = identifier[open] ( identifier[inferagpbed] , literal[string] )
identifier[seen] =[]
keyword[for] identifier[b] keyword[in] identifier[bed] :
identifier[r] =( identifier[b] . identifier[seqid] , identifier[b] . identifier[start] , identifier[b] . identifier[end] )
keyword[if] identifier[check_seen] ( identifier[r] , identifier[seen] ):
keyword[continue]
identifier[print] ( literal[string] . identifier[join] ( identifier[str] ( identifier[x] ) keyword[for] identifier[x] keyword[in] ( identifier[b] . identifier[accn] , literal[int] , identifier[b] . identifier[span] , identifier[b] . identifier[seqid] , identifier[b] . identifier[score] , identifier[b] . identifier[strand] )), identifier[file] = identifier[fw] )
identifier[seen] . identifier[append] ( identifier[r] )
identifier[fw] . identifier[close] ()
identifier[frombed] ([ identifier[inferagpbed] ]) | def infer(args):
"""
%prog infer scaffolds.fasta genome.fasta
Infer where the components are in the genome. This function is rarely used,
but can be useful when distributor does not ship an AGP file.
"""
from jcvi.apps.grid import WriteJobs
from jcvi.formats.bed import sort
p = OptionParser(infer.__doc__)
p.set_cpus()
(opts, args) = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help()) # depends on [control=['if'], data=[]]
(scaffoldsf, genomef) = args
inferbed = 'infer-components.bed'
if need_update((scaffoldsf, genomef), inferbed):
scaffolds = Fasta(scaffoldsf, lazy=True)
genome = Fasta(genomef)
genome = genome.tostring()
args = [(scaffold_name, scaffold, genome) for (scaffold_name, scaffold) in scaffolds.iteritems_ordered()]
pool = WriteJobs(map_one_scaffold, args, inferbed, cpus=opts.cpus)
pool.run() # depends on [control=['if'], data=[]]
sort([inferbed, '-i'])
bed = Bed(inferbed)
inferagpbed = 'infer.bed'
fw = open(inferagpbed, 'w')
seen = []
for b in bed:
r = (b.seqid, b.start, b.end)
if check_seen(r, seen):
continue # depends on [control=['if'], data=[]]
print('\t'.join((str(x) for x in (b.accn, 0, b.span, b.seqid, b.score, b.strand))), file=fw)
seen.append(r) # depends on [control=['for'], data=['b']]
fw.close()
frombed([inferagpbed]) |
def radianceSpectrum(Omegas,AbsorptionCoefficient,Environment={'l':100.,'T':296.},
File=None, Format='%e %e', Wavenumber=None):
"""
INPUT PARAMETERS:
Wavenumber/Omegas: wavenumber grid (required)
AbsorptionCoefficient: absorption coefficient on grid (required)
Environment: dictionary containing path length in cm.
and temperature in Kelvin.
Default={'l':100.,'T':296.}
File: name of the output file (optional)
Format: c format used in file output, default '%e %e' (optional)
OUTPUT PARAMETERS:
Wavenum: wavenumber grid
Xsect: radiance spectrum calculated on the grid
---
DESCRIPTION:
Calculate a radiance spectrum (in W/sr/cm^2/cm-1) based
on previously calculated absorption coefficient.
Radiance spectrum is calculated at an arbitrary
optical path length 'l' (1 m by default) and
temperature 'T' (296 K by default). For obtaining a
physically meaningful result 'T' must be the same
as a temperature which was used in absorption coefficient.
---
EXAMPLE OF USAGE:
nu,radi = radianceSpectrum(nu,coef)
---
"""
# compatibility with older versions
if Wavenumber: Omegas=Wavenumber
l = Environment['l']
T = Environment['T']
Alw = 1-exp(-AbsorptionCoefficient*l)
LBBTw = 2*hh*cc**2*Omegas**3 / (exp(hh*cc*Omegas/(cBolts*T)) - 1) * 1.0E-7
Xsect = Alw*LBBTw # W/sr/cm**2/cm**-1
if File: save_to_file(File,Format,Omegas,Xsect)
return Omegas,Xsect | def function[radianceSpectrum, parameter[Omegas, AbsorptionCoefficient, Environment, File, Format, Wavenumber]]:
constant[
INPUT PARAMETERS:
Wavenumber/Omegas: wavenumber grid (required)
AbsorptionCoefficient: absorption coefficient on grid (required)
Environment: dictionary containing path length in cm.
and temperature in Kelvin.
Default={'l':100.,'T':296.}
File: name of the output file (optional)
Format: c format used in file output, default '%e %e' (optional)
OUTPUT PARAMETERS:
Wavenum: wavenumber grid
Xsect: radiance spectrum calculated on the grid
---
DESCRIPTION:
Calculate a radiance spectrum (in W/sr/cm^2/cm-1) based
on previously calculated absorption coefficient.
Radiance spectrum is calculated at an arbitrary
optical path length 'l' (1 m by default) and
temperature 'T' (296 K by default). For obtaining a
physically meaningful result 'T' must be the same
as a temperature which was used in absorption coefficient.
---
EXAMPLE OF USAGE:
nu,radi = radianceSpectrum(nu,coef)
---
]
if name[Wavenumber] begin[:]
variable[Omegas] assign[=] name[Wavenumber]
variable[l] assign[=] call[name[Environment]][constant[l]]
variable[T] assign[=] call[name[Environment]][constant[T]]
variable[Alw] assign[=] binary_operation[constant[1] - call[name[exp], parameter[binary_operation[<ast.UnaryOp object at 0x7da18dc99c00> * name[l]]]]]
variable[LBBTw] assign[=] binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[constant[2] * name[hh]] * binary_operation[name[cc] ** constant[2]]] * binary_operation[name[Omegas] ** constant[3]]] / binary_operation[call[name[exp], parameter[binary_operation[binary_operation[binary_operation[name[hh] * name[cc]] * name[Omegas]] / binary_operation[name[cBolts] * name[T]]]]] - constant[1]]] * constant[1e-07]]
variable[Xsect] assign[=] binary_operation[name[Alw] * name[LBBTw]]
if name[File] begin[:]
call[name[save_to_file], parameter[name[File], name[Format], name[Omegas], name[Xsect]]]
return[tuple[[<ast.Name object at 0x7da18dc9b040>, <ast.Name object at 0x7da18dc9b760>]]] | keyword[def] identifier[radianceSpectrum] ( identifier[Omegas] , identifier[AbsorptionCoefficient] , identifier[Environment] ={ literal[string] : literal[int] , literal[string] : literal[int] },
identifier[File] = keyword[None] , identifier[Format] = literal[string] , identifier[Wavenumber] = keyword[None] ):
literal[string]
keyword[if] identifier[Wavenumber] : identifier[Omegas] = identifier[Wavenumber]
identifier[l] = identifier[Environment] [ literal[string] ]
identifier[T] = identifier[Environment] [ literal[string] ]
identifier[Alw] = literal[int] - identifier[exp] (- identifier[AbsorptionCoefficient] * identifier[l] )
identifier[LBBTw] = literal[int] * identifier[hh] * identifier[cc] ** literal[int] * identifier[Omegas] ** literal[int] /( identifier[exp] ( identifier[hh] * identifier[cc] * identifier[Omegas] /( identifier[cBolts] * identifier[T] ))- literal[int] )* literal[int]
identifier[Xsect] = identifier[Alw] * identifier[LBBTw]
keyword[if] identifier[File] : identifier[save_to_file] ( identifier[File] , identifier[Format] , identifier[Omegas] , identifier[Xsect] )
keyword[return] identifier[Omegas] , identifier[Xsect] | def radianceSpectrum(Omegas, AbsorptionCoefficient, Environment={'l': 100.0, 'T': 296.0}, File=None, Format='%e %e', Wavenumber=None):
"""
INPUT PARAMETERS:
Wavenumber/Omegas: wavenumber grid (required)
AbsorptionCoefficient: absorption coefficient on grid (required)
Environment: dictionary containing path length in cm.
and temperature in Kelvin.
Default={'l':100.,'T':296.}
File: name of the output file (optional)
Format: c format used in file output, default '%e %e' (optional)
OUTPUT PARAMETERS:
Wavenum: wavenumber grid
Xsect: radiance spectrum calculated on the grid
---
DESCRIPTION:
Calculate a radiance spectrum (in W/sr/cm^2/cm-1) based
on previously calculated absorption coefficient.
Radiance spectrum is calculated at an arbitrary
optical path length 'l' (1 m by default) and
temperature 'T' (296 K by default). For obtaining a
physically meaningful result 'T' must be the same
as a temperature which was used in absorption coefficient.
---
EXAMPLE OF USAGE:
nu,radi = radianceSpectrum(nu,coef)
---
"""
# compatibility with older versions
if Wavenumber:
Omegas = Wavenumber # depends on [control=['if'], data=[]]
l = Environment['l']
T = Environment['T']
Alw = 1 - exp(-AbsorptionCoefficient * l)
LBBTw = 2 * hh * cc ** 2 * Omegas ** 3 / (exp(hh * cc * Omegas / (cBolts * T)) - 1) * 1e-07
Xsect = Alw * LBBTw # W/sr/cm**2/cm**-1
if File:
save_to_file(File, Format, Omegas, Xsect) # depends on [control=['if'], data=[]]
return (Omegas, Xsect) |
def exit(code=0, text=''):
"""Exit and print text (if defined) to stderr if code > 0 or stdout
otherwise.
>>> exit(code=1, text='Invalid directory path')
"""
if not isinstance(text, basestring_type):
text = unicode_type(text)
if code > 0:
if text:
if not isinstance(text, basestring_type):
text = unicode_type(text)
sys.stderr.write(text)
sys.exit(code)
else:
if text:
print(text)
sys.exit(0) | def function[exit, parameter[code, text]]:
constant[Exit and print text (if defined) to stderr if code > 0 or stdout
otherwise.
>>> exit(code=1, text='Invalid directory path')
]
if <ast.UnaryOp object at 0x7da20e9618a0> begin[:]
variable[text] assign[=] call[name[unicode_type], parameter[name[text]]]
if compare[name[code] greater[>] constant[0]] begin[:]
if name[text] begin[:]
if <ast.UnaryOp object at 0x7da20c6c4940> begin[:]
variable[text] assign[=] call[name[unicode_type], parameter[name[text]]]
call[name[sys].stderr.write, parameter[name[text]]]
call[name[sys].exit, parameter[name[code]]] | keyword[def] identifier[exit] ( identifier[code] = literal[int] , identifier[text] = literal[string] ):
literal[string]
keyword[if] keyword[not] identifier[isinstance] ( identifier[text] , identifier[basestring_type] ):
identifier[text] = identifier[unicode_type] ( identifier[text] )
keyword[if] identifier[code] > literal[int] :
keyword[if] identifier[text] :
keyword[if] keyword[not] identifier[isinstance] ( identifier[text] , identifier[basestring_type] ):
identifier[text] = identifier[unicode_type] ( identifier[text] )
identifier[sys] . identifier[stderr] . identifier[write] ( identifier[text] )
identifier[sys] . identifier[exit] ( identifier[code] )
keyword[else] :
keyword[if] identifier[text] :
identifier[print] ( identifier[text] )
identifier[sys] . identifier[exit] ( literal[int] ) | def exit(code=0, text=''):
"""Exit and print text (if defined) to stderr if code > 0 or stdout
otherwise.
>>> exit(code=1, text='Invalid directory path')
"""
if not isinstance(text, basestring_type):
text = unicode_type(text) # depends on [control=['if'], data=[]]
if code > 0:
if text:
if not isinstance(text, basestring_type):
text = unicode_type(text) # depends on [control=['if'], data=[]]
sys.stderr.write(text) # depends on [control=['if'], data=[]]
sys.exit(code) # depends on [control=['if'], data=['code']]
else:
if text:
print(text) # depends on [control=['if'], data=[]]
sys.exit(0) |
def generate_parameters_validator(api_path, path_definition, parameters,
context, **kwargs):
"""
Generates a validator function to validate.
- request.path against the path parameters.
- request.query against the query parameters.
- request.headers against the header parameters.
- TODO: request.body against the body parameters.
- TODO: request.formData against any form data.
"""
# TODO: figure out how to merge this with the same code in response
# validation.
validators = ValidationDict()
path_level_parameters = dereference_parameter_list(
path_definition.get('parameters', []),
context,
)
operation_level_parameters = dereference_parameter_list(
parameters,
context,
)
all_parameters = merge_parameter_lists(
path_level_parameters,
operation_level_parameters,
)
# PATH
in_path_parameters = filter_parameters(all_parameters, in_=PATH)
validators.add_validator(
'path',
chain_reduce_partial(
attrgetter('path'),
generate_path_parameters_validator(api_path, in_path_parameters, context),
),
)
# QUERY
in_query_parameters = filter_parameters(all_parameters, in_=QUERY)
validators.add_validator(
'query',
chain_reduce_partial(
attrgetter('query_data'),
functools.partial(
validate_query_parameters,
query_parameters=in_query_parameters,
context=context,
),
),
)
# HEADERS
in_header_parameters = filter_parameters(all_parameters, in_=HEADER)
validators.add_validator(
'headers',
chain_reduce_partial(
attrgetter('headers'),
generate_header_validator(in_header_parameters, context),
),
)
# FORM_DATA
# in_form_data_parameters = filter_parameters(all_parameters, in_=FORM_DATA)
# validators.add_validator(
# 'form_data',
# chain_reduce_partial(
# attrgetter('data'),
# generate_form_data_validator(in_form_data_parameters, context),
# )
# )
# REQUEST_BODY
in_request_body_parameters = filter_parameters(all_parameters, in_=BODY)
validators.add_validator(
'request_body',
chain_reduce_partial(
attrgetter('data'),
generate_request_body_validator(in_request_body_parameters, context),
)
)
return generate_object_validator(field_validators=validators) | def function[generate_parameters_validator, parameter[api_path, path_definition, parameters, context]]:
constant[
Generates a validator function to validate.
- request.path against the path parameters.
- request.query against the query parameters.
- request.headers against the header parameters.
- TODO: request.body against the body parameters.
- TODO: request.formData against any form data.
]
variable[validators] assign[=] call[name[ValidationDict], parameter[]]
variable[path_level_parameters] assign[=] call[name[dereference_parameter_list], parameter[call[name[path_definition].get, parameter[constant[parameters], list[[]]]], name[context]]]
variable[operation_level_parameters] assign[=] call[name[dereference_parameter_list], parameter[name[parameters], name[context]]]
variable[all_parameters] assign[=] call[name[merge_parameter_lists], parameter[name[path_level_parameters], name[operation_level_parameters]]]
variable[in_path_parameters] assign[=] call[name[filter_parameters], parameter[name[all_parameters]]]
call[name[validators].add_validator, parameter[constant[path], call[name[chain_reduce_partial], parameter[call[name[attrgetter], parameter[constant[path]]], call[name[generate_path_parameters_validator], parameter[name[api_path], name[in_path_parameters], name[context]]]]]]]
variable[in_query_parameters] assign[=] call[name[filter_parameters], parameter[name[all_parameters]]]
call[name[validators].add_validator, parameter[constant[query], call[name[chain_reduce_partial], parameter[call[name[attrgetter], parameter[constant[query_data]]], call[name[functools].partial, parameter[name[validate_query_parameters]]]]]]]
variable[in_header_parameters] assign[=] call[name[filter_parameters], parameter[name[all_parameters]]]
call[name[validators].add_validator, parameter[constant[headers], call[name[chain_reduce_partial], parameter[call[name[attrgetter], parameter[constant[headers]]], call[name[generate_header_validator], parameter[name[in_header_parameters], name[context]]]]]]]
variable[in_request_body_parameters] assign[=] call[name[filter_parameters], parameter[name[all_parameters]]]
call[name[validators].add_validator, parameter[constant[request_body], call[name[chain_reduce_partial], parameter[call[name[attrgetter], parameter[constant[data]]], call[name[generate_request_body_validator], parameter[name[in_request_body_parameters], name[context]]]]]]]
return[call[name[generate_object_validator], parameter[]]] | keyword[def] identifier[generate_parameters_validator] ( identifier[api_path] , identifier[path_definition] , identifier[parameters] ,
identifier[context] ,** identifier[kwargs] ):
literal[string]
identifier[validators] = identifier[ValidationDict] ()
identifier[path_level_parameters] = identifier[dereference_parameter_list] (
identifier[path_definition] . identifier[get] ( literal[string] ,[]),
identifier[context] ,
)
identifier[operation_level_parameters] = identifier[dereference_parameter_list] (
identifier[parameters] ,
identifier[context] ,
)
identifier[all_parameters] = identifier[merge_parameter_lists] (
identifier[path_level_parameters] ,
identifier[operation_level_parameters] ,
)
identifier[in_path_parameters] = identifier[filter_parameters] ( identifier[all_parameters] , identifier[in_] = identifier[PATH] )
identifier[validators] . identifier[add_validator] (
literal[string] ,
identifier[chain_reduce_partial] (
identifier[attrgetter] ( literal[string] ),
identifier[generate_path_parameters_validator] ( identifier[api_path] , identifier[in_path_parameters] , identifier[context] ),
),
)
identifier[in_query_parameters] = identifier[filter_parameters] ( identifier[all_parameters] , identifier[in_] = identifier[QUERY] )
identifier[validators] . identifier[add_validator] (
literal[string] ,
identifier[chain_reduce_partial] (
identifier[attrgetter] ( literal[string] ),
identifier[functools] . identifier[partial] (
identifier[validate_query_parameters] ,
identifier[query_parameters] = identifier[in_query_parameters] ,
identifier[context] = identifier[context] ,
),
),
)
identifier[in_header_parameters] = identifier[filter_parameters] ( identifier[all_parameters] , identifier[in_] = identifier[HEADER] )
identifier[validators] . identifier[add_validator] (
literal[string] ,
identifier[chain_reduce_partial] (
identifier[attrgetter] ( literal[string] ),
identifier[generate_header_validator] ( identifier[in_header_parameters] , identifier[context] ),
),
)
identifier[in_request_body_parameters] = identifier[filter_parameters] ( identifier[all_parameters] , identifier[in_] = identifier[BODY] )
identifier[validators] . identifier[add_validator] (
literal[string] ,
identifier[chain_reduce_partial] (
identifier[attrgetter] ( literal[string] ),
identifier[generate_request_body_validator] ( identifier[in_request_body_parameters] , identifier[context] ),
)
)
keyword[return] identifier[generate_object_validator] ( identifier[field_validators] = identifier[validators] ) | def generate_parameters_validator(api_path, path_definition, parameters, context, **kwargs):
"""
Generates a validator function to validate.
- request.path against the path parameters.
- request.query against the query parameters.
- request.headers against the header parameters.
- TODO: request.body against the body parameters.
- TODO: request.formData against any form data.
"""
# TODO: figure out how to merge this with the same code in response
# validation.
validators = ValidationDict()
path_level_parameters = dereference_parameter_list(path_definition.get('parameters', []), context)
operation_level_parameters = dereference_parameter_list(parameters, context)
all_parameters = merge_parameter_lists(path_level_parameters, operation_level_parameters)
# PATH
in_path_parameters = filter_parameters(all_parameters, in_=PATH)
validators.add_validator('path', chain_reduce_partial(attrgetter('path'), generate_path_parameters_validator(api_path, in_path_parameters, context)))
# QUERY
in_query_parameters = filter_parameters(all_parameters, in_=QUERY)
validators.add_validator('query', chain_reduce_partial(attrgetter('query_data'), functools.partial(validate_query_parameters, query_parameters=in_query_parameters, context=context)))
# HEADERS
in_header_parameters = filter_parameters(all_parameters, in_=HEADER)
validators.add_validator('headers', chain_reduce_partial(attrgetter('headers'), generate_header_validator(in_header_parameters, context)))
# FORM_DATA
# in_form_data_parameters = filter_parameters(all_parameters, in_=FORM_DATA)
# validators.add_validator(
# 'form_data',
# chain_reduce_partial(
# attrgetter('data'),
# generate_form_data_validator(in_form_data_parameters, context),
# )
# )
# REQUEST_BODY
in_request_body_parameters = filter_parameters(all_parameters, in_=BODY)
validators.add_validator('request_body', chain_reduce_partial(attrgetter('data'), generate_request_body_validator(in_request_body_parameters, context)))
return generate_object_validator(field_validators=validators) |
def simxReadCollision(clientID, collisionObjectHandle, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
collisionState = ct.c_ubyte()
return c_ReadCollision(clientID, collisionObjectHandle, ct.byref(collisionState), operationMode), bool(collisionState.value!=0) | def function[simxReadCollision, parameter[clientID, collisionObjectHandle, operationMode]]:
constant[
Please have a look at the function description/documentation in the V-REP user manual
]
variable[collisionState] assign[=] call[name[ct].c_ubyte, parameter[]]
return[tuple[[<ast.Call object at 0x7da1b138cf70>, <ast.Call object at 0x7da1b1307310>]]] | keyword[def] identifier[simxReadCollision] ( identifier[clientID] , identifier[collisionObjectHandle] , identifier[operationMode] ):
literal[string]
identifier[collisionState] = identifier[ct] . identifier[c_ubyte] ()
keyword[return] identifier[c_ReadCollision] ( identifier[clientID] , identifier[collisionObjectHandle] , identifier[ct] . identifier[byref] ( identifier[collisionState] ), identifier[operationMode] ), identifier[bool] ( identifier[collisionState] . identifier[value] != literal[int] ) | def simxReadCollision(clientID, collisionObjectHandle, operationMode):
"""
Please have a look at the function description/documentation in the V-REP user manual
"""
collisionState = ct.c_ubyte()
return (c_ReadCollision(clientID, collisionObjectHandle, ct.byref(collisionState), operationMode), bool(collisionState.value != 0)) |
def add(self, fn, name=None):
"""Add a function that the dispatcher will know about.
fn -- a callable object
name -- optional alias for the function
"""
if not name:
name = fn.__name__
self.functions[name] = fn | def function[add, parameter[self, fn, name]]:
constant[Add a function that the dispatcher will know about.
fn -- a callable object
name -- optional alias for the function
]
if <ast.UnaryOp object at 0x7da1b09122c0> begin[:]
variable[name] assign[=] name[fn].__name__
call[name[self].functions][name[name]] assign[=] name[fn] | keyword[def] identifier[add] ( identifier[self] , identifier[fn] , identifier[name] = keyword[None] ):
literal[string]
keyword[if] keyword[not] identifier[name] :
identifier[name] = identifier[fn] . identifier[__name__]
identifier[self] . identifier[functions] [ identifier[name] ]= identifier[fn] | def add(self, fn, name=None):
"""Add a function that the dispatcher will know about.
fn -- a callable object
name -- optional alias for the function
"""
if not name:
name = fn.__name__ # depends on [control=['if'], data=[]]
self.functions[name] = fn |
def returnValueList(self, key_list, last=False):
'''Return a list of key values for the first entry in the current list.
If 'last=True', then the last entry is referenced."
Returns None is the list is empty. If a key is missing, then
that entry in the list is None.
Example of use:
>>> test = [
... {"name": "Jim", "age": 18, "income": 93000, "order": 2},
... {"name": "Larry", "age": 18, "order": 3},
... {"name": "Joe", "age": 20, "income": 15000, "order": 1},
... {"name": "Bill", "age": 19, "income": 29000, "order": 4},
... ]
>>> print PLOD(test).returnValueList(["name", "income"])
['Jim', 93000]
>>> print PLOD(test).sort("name").returnValueList(["name", "income"], last=True)
['Larry', None]
:param last:
If True, the last entry is used rather than the first.
:return:
A value, or None if the list is empty.
'''
result = []
row = self.returnOneEntry(last=last)
if not row:
return None
dict_row = internal.convert_to_dict(row)
for field in key_list:
result.append(dict_row.get(field, None))
return result | def function[returnValueList, parameter[self, key_list, last]]:
constant[Return a list of key values for the first entry in the current list.
If 'last=True', then the last entry is referenced."
Returns None is the list is empty. If a key is missing, then
that entry in the list is None.
Example of use:
>>> test = [
... {"name": "Jim", "age": 18, "income": 93000, "order": 2},
... {"name": "Larry", "age": 18, "order": 3},
... {"name": "Joe", "age": 20, "income": 15000, "order": 1},
... {"name": "Bill", "age": 19, "income": 29000, "order": 4},
... ]
>>> print PLOD(test).returnValueList(["name", "income"])
['Jim', 93000]
>>> print PLOD(test).sort("name").returnValueList(["name", "income"], last=True)
['Larry', None]
:param last:
If True, the last entry is used rather than the first.
:return:
A value, or None if the list is empty.
]
variable[result] assign[=] list[[]]
variable[row] assign[=] call[name[self].returnOneEntry, parameter[]]
if <ast.UnaryOp object at 0x7da20c795030> begin[:]
return[constant[None]]
variable[dict_row] assign[=] call[name[internal].convert_to_dict, parameter[name[row]]]
for taget[name[field]] in starred[name[key_list]] begin[:]
call[name[result].append, parameter[call[name[dict_row].get, parameter[name[field], constant[None]]]]]
return[name[result]] | keyword[def] identifier[returnValueList] ( identifier[self] , identifier[key_list] , identifier[last] = keyword[False] ):
literal[string]
identifier[result] =[]
identifier[row] = identifier[self] . identifier[returnOneEntry] ( identifier[last] = identifier[last] )
keyword[if] keyword[not] identifier[row] :
keyword[return] keyword[None]
identifier[dict_row] = identifier[internal] . identifier[convert_to_dict] ( identifier[row] )
keyword[for] identifier[field] keyword[in] identifier[key_list] :
identifier[result] . identifier[append] ( identifier[dict_row] . identifier[get] ( identifier[field] , keyword[None] ))
keyword[return] identifier[result] | def returnValueList(self, key_list, last=False):
"""Return a list of key values for the first entry in the current list.
If 'last=True', then the last entry is referenced."
Returns None is the list is empty. If a key is missing, then
that entry in the list is None.
Example of use:
>>> test = [
... {"name": "Jim", "age": 18, "income": 93000, "order": 2},
... {"name": "Larry", "age": 18, "order": 3},
... {"name": "Joe", "age": 20, "income": 15000, "order": 1},
... {"name": "Bill", "age": 19, "income": 29000, "order": 4},
... ]
>>> print PLOD(test).returnValueList(["name", "income"])
['Jim', 93000]
>>> print PLOD(test).sort("name").returnValueList(["name", "income"], last=True)
['Larry', None]
:param last:
If True, the last entry is used rather than the first.
:return:
A value, or None if the list is empty.
"""
result = []
row = self.returnOneEntry(last=last)
if not row:
return None # depends on [control=['if'], data=[]]
dict_row = internal.convert_to_dict(row)
for field in key_list:
result.append(dict_row.get(field, None)) # depends on [control=['for'], data=['field']]
return result |
def modify_qc(self, qc_id=None, **kwargs):
'''
modify_qc(self, qc_id=None, **kwargs)
Modify a Quality Criteria
:Parameters:
* *qc_id* (`string`) -- The Quality criteria identifier
'''
if qc_id:
request_data = {'id': qc_id}
request_data.update(**kwargs)
return self._call_rest_api('post', '/qc', data=request_data, error='Failed to modify criteria')
else:
return self.create_qc(**kwargs) | def function[modify_qc, parameter[self, qc_id]]:
constant[
modify_qc(self, qc_id=None, **kwargs)
Modify a Quality Criteria
:Parameters:
* *qc_id* (`string`) -- The Quality criteria identifier
]
if name[qc_id] begin[:]
variable[request_data] assign[=] dictionary[[<ast.Constant object at 0x7da1b28fab00>], [<ast.Name object at 0x7da1b28fb670>]]
call[name[request_data].update, parameter[]]
return[call[name[self]._call_rest_api, parameter[constant[post], constant[/qc]]]] | keyword[def] identifier[modify_qc] ( identifier[self] , identifier[qc_id] = keyword[None] ,** identifier[kwargs] ):
literal[string]
keyword[if] identifier[qc_id] :
identifier[request_data] ={ literal[string] : identifier[qc_id] }
identifier[request_data] . identifier[update] (** identifier[kwargs] )
keyword[return] identifier[self] . identifier[_call_rest_api] ( literal[string] , literal[string] , identifier[data] = identifier[request_data] , identifier[error] = literal[string] )
keyword[else] :
keyword[return] identifier[self] . identifier[create_qc] (** identifier[kwargs] ) | def modify_qc(self, qc_id=None, **kwargs):
"""
modify_qc(self, qc_id=None, **kwargs)
Modify a Quality Criteria
:Parameters:
* *qc_id* (`string`) -- The Quality criteria identifier
"""
if qc_id:
request_data = {'id': qc_id}
request_data.update(**kwargs)
return self._call_rest_api('post', '/qc', data=request_data, error='Failed to modify criteria') # depends on [control=['if'], data=[]]
else:
return self.create_qc(**kwargs) |
def record_diff(old, new):
"""Return a JSON-compatible structure capable turn the `new` record back
into the `old` record. The parameters must be structures compatible with
json.dumps *or* strings compatible with json.loads. Note that by design,
`old == record_patch(new, record_diff(old, new))`"""
old, new = _norm_json_params(old, new)
return json_delta.diff(new, old, verbose=False) | def function[record_diff, parameter[old, new]]:
constant[Return a JSON-compatible structure capable turn the `new` record back
into the `old` record. The parameters must be structures compatible with
json.dumps *or* strings compatible with json.loads. Note that by design,
`old == record_patch(new, record_diff(old, new))`]
<ast.Tuple object at 0x7da18dc9aef0> assign[=] call[name[_norm_json_params], parameter[name[old], name[new]]]
return[call[name[json_delta].diff, parameter[name[new], name[old]]]] | keyword[def] identifier[record_diff] ( identifier[old] , identifier[new] ):
literal[string]
identifier[old] , identifier[new] = identifier[_norm_json_params] ( identifier[old] , identifier[new] )
keyword[return] identifier[json_delta] . identifier[diff] ( identifier[new] , identifier[old] , identifier[verbose] = keyword[False] ) | def record_diff(old, new):
"""Return a JSON-compatible structure capable turn the `new` record back
into the `old` record. The parameters must be structures compatible with
json.dumps *or* strings compatible with json.loads. Note that by design,
`old == record_patch(new, record_diff(old, new))`"""
(old, new) = _norm_json_params(old, new)
return json_delta.diff(new, old, verbose=False) |
def chunk(self, maxSize):
"""Splits the `Collection` into _maxSize_ size or smaller `Collections`
# Parameters
_maxSize_ : `int`
> The maximum number of elements in a retuned `Collection`
# Returns
`list [Collection]`
> A list of `Collections` that if all merged (`|` operator) would create the original
"""
chunks = []
currentSize = maxSize + 1
for i in self:
if currentSize >= maxSize:
currentSize = 0
chunks.append(type(self)({i}, name = 'Chunk-{}-of-{}'.format(len(chunks), self.name), quietStart = True))
else:
chunks[-1].add(i)
currentSize += 1
return chunks | def function[chunk, parameter[self, maxSize]]:
constant[Splits the `Collection` into _maxSize_ size or smaller `Collections`
# Parameters
_maxSize_ : `int`
> The maximum number of elements in a retuned `Collection`
# Returns
`list [Collection]`
> A list of `Collections` that if all merged (`|` operator) would create the original
]
variable[chunks] assign[=] list[[]]
variable[currentSize] assign[=] binary_operation[name[maxSize] + constant[1]]
for taget[name[i]] in starred[name[self]] begin[:]
if compare[name[currentSize] greater_or_equal[>=] name[maxSize]] begin[:]
variable[currentSize] assign[=] constant[0]
call[name[chunks].append, parameter[call[call[name[type], parameter[name[self]]], parameter[<ast.Set object at 0x7da1b0f68d00>]]]]
<ast.AugAssign object at 0x7da1b0f6ad40>
return[name[chunks]] | keyword[def] identifier[chunk] ( identifier[self] , identifier[maxSize] ):
literal[string]
identifier[chunks] =[]
identifier[currentSize] = identifier[maxSize] + literal[int]
keyword[for] identifier[i] keyword[in] identifier[self] :
keyword[if] identifier[currentSize] >= identifier[maxSize] :
identifier[currentSize] = literal[int]
identifier[chunks] . identifier[append] ( identifier[type] ( identifier[self] )({ identifier[i] }, identifier[name] = literal[string] . identifier[format] ( identifier[len] ( identifier[chunks] ), identifier[self] . identifier[name] ), identifier[quietStart] = keyword[True] ))
keyword[else] :
identifier[chunks] [- literal[int] ]. identifier[add] ( identifier[i] )
identifier[currentSize] += literal[int]
keyword[return] identifier[chunks] | def chunk(self, maxSize):
"""Splits the `Collection` into _maxSize_ size or smaller `Collections`
# Parameters
_maxSize_ : `int`
> The maximum number of elements in a retuned `Collection`
# Returns
`list [Collection]`
> A list of `Collections` that if all merged (`|` operator) would create the original
"""
chunks = []
currentSize = maxSize + 1
for i in self:
if currentSize >= maxSize:
currentSize = 0
chunks.append(type(self)({i}, name='Chunk-{}-of-{}'.format(len(chunks), self.name), quietStart=True)) # depends on [control=['if'], data=['currentSize']]
else:
chunks[-1].add(i)
currentSize += 1 # depends on [control=['for'], data=['i']]
return chunks |
def fem(ab, off, angle, zsrc, zrec, lsrc, lrec, depth, freq, etaH, etaV, zetaH,
zetaV, xdirect, isfullspace, ht, htarg, use_ne_eval, msrc, mrec,
loop_freq, loop_off, conv=True):
r"""Return the electromagnetic frequency-domain response.
This function is called from one of the above modelling routines. No
input-check is carried out here. See the main description of :mod:`model`
for information regarding input and output parameters.
This function can be directly used if you are sure the provided input is in
the correct format. This is useful for inversion routines and similar, as
it can speed-up the calculation by omitting input-checks.
"""
# Preallocate array
fEM = np.zeros((freq.size, off.size), dtype=complex)
# Initialize kernel count
# (how many times the wavenumber-domain kernel was calld)
kcount = 0
# If <ab> = 36 (or 63), fEM-field is zero
if ab in [36, ]:
return fEM, kcount, conv
# Get full-space-solution if xdirect=True and model is a full-space or
# if src and rec are in the same layer.
if xdirect and (isfullspace or lsrc == lrec):
fEM += kernel.fullspace(off, angle, zsrc, zrec, etaH[:, lrec],
etaV[:, lrec], zetaH[:, lrec], zetaV[:, lrec],
ab, msrc, mrec)
# If `xdirect = None` we set it here to True, so it is NOT calculated in
# the wavenumber domain. (Only reflected fields are returned.)
if xdirect is None:
xdir = True
else:
xdir = xdirect
# Get angle dependent factors
factAng = kernel.angle_factor(angle, ab, msrc, mrec)
# Compute required lambdas for given hankel-filter-base
# This should be in utils, but this is a backwards-incompatible change.
# Move this to utils for version 2.0.
if ht == 'fht':
# htarg[0] = filter; htarg[1] = pts_per_dec
lambd, int_pts = transform.get_spline_values(htarg[0], off, htarg[1])
if not loop_off:
htarg = (htarg[0], htarg[1], lambd, int_pts)
# If not full-space with xdirect calculate fEM-field
if not isfullspace*xdir:
calc = getattr(transform, ht)
if loop_freq:
for i in range(freq.size):
out = calc(zsrc, zrec, lsrc, lrec, off, factAng, depth, ab,
etaH[None, i, :], etaV[None, i, :],
zetaH[None, i, :], zetaV[None, i, :], xdir,
htarg, use_ne_eval, msrc, mrec)
fEM[None, i, :] += out[0]
kcount += out[1]
conv *= out[2]
elif loop_off:
for i in range(off.size):
# See comments above where it says "ht == 'fht'".
# Get pre-calculated lambd, int_pts for this offset
if ht == 'fht':
htarg = (htarg[0], htarg[1], lambd[None, i, :], int_pts[i])
out = calc(zsrc, zrec, lsrc, lrec, off[None, i],
factAng[None, i], depth, ab, etaH, etaV, zetaH,
zetaV, xdir, htarg, use_ne_eval, msrc, mrec)
fEM[:, None, i] += out[0]
kcount += out[1]
conv *= out[2]
else:
out = calc(zsrc, zrec, lsrc, lrec, off, factAng, depth, ab, etaH,
etaV, zetaH, zetaV, xdir, htarg, use_ne_eval, msrc,
mrec)
fEM += out[0]
kcount += out[1]
conv *= out[2]
return fEM, kcount, conv | def function[fem, parameter[ab, off, angle, zsrc, zrec, lsrc, lrec, depth, freq, etaH, etaV, zetaH, zetaV, xdirect, isfullspace, ht, htarg, use_ne_eval, msrc, mrec, loop_freq, loop_off, conv]]:
constant[Return the electromagnetic frequency-domain response.
This function is called from one of the above modelling routines. No
input-check is carried out here. See the main description of :mod:`model`
for information regarding input and output parameters.
This function can be directly used if you are sure the provided input is in
the correct format. This is useful for inversion routines and similar, as
it can speed-up the calculation by omitting input-checks.
]
variable[fEM] assign[=] call[name[np].zeros, parameter[tuple[[<ast.Attribute object at 0x7da20c993040>, <ast.Attribute object at 0x7da20c992d10>]]]]
variable[kcount] assign[=] constant[0]
if compare[name[ab] in list[[<ast.Constant object at 0x7da20c990c40>]]] begin[:]
return[tuple[[<ast.Name object at 0x7da20c992a40>, <ast.Name object at 0x7da20c990b50>, <ast.Name object at 0x7da20c992ce0>]]]
if <ast.BoolOp object at 0x7da20c993c10> begin[:]
<ast.AugAssign object at 0x7da20c990cd0>
if compare[name[xdirect] is constant[None]] begin[:]
variable[xdir] assign[=] constant[True]
variable[factAng] assign[=] call[name[kernel].angle_factor, parameter[name[angle], name[ab], name[msrc], name[mrec]]]
if compare[name[ht] equal[==] constant[fht]] begin[:]
<ast.Tuple object at 0x7da20c993ee0> assign[=] call[name[transform].get_spline_values, parameter[call[name[htarg]][constant[0]], name[off], call[name[htarg]][constant[1]]]]
if <ast.UnaryOp object at 0x7da20c9926b0> begin[:]
variable[htarg] assign[=] tuple[[<ast.Subscript object at 0x7da20c992d70>, <ast.Subscript object at 0x7da20c992860>, <ast.Name object at 0x7da20c993280>, <ast.Name object at 0x7da20c993580>]]
if <ast.UnaryOp object at 0x7da20c993eb0> begin[:]
variable[calc] assign[=] call[name[getattr], parameter[name[transform], name[ht]]]
if name[loop_freq] begin[:]
for taget[name[i]] in starred[call[name[range], parameter[name[freq].size]]] begin[:]
variable[out] assign[=] call[name[calc], parameter[name[zsrc], name[zrec], name[lsrc], name[lrec], name[off], name[factAng], name[depth], name[ab], call[name[etaH]][tuple[[<ast.Constant object at 0x7da20c992080>, <ast.Name object at 0x7da20c990190>, <ast.Slice object at 0x7da20c990160>]]], call[name[etaV]][tuple[[<ast.Constant object at 0x7da20c992500>, <ast.Name object at 0x7da20c990880>, <ast.Slice object at 0x7da20c991f00>]]], call[name[zetaH]][tuple[[<ast.Constant object at 0x7da20c991450>, <ast.Name object at 0x7da20c9905e0>, <ast.Slice object at 0x7da20c991390>]]], call[name[zetaV]][tuple[[<ast.Constant object at 0x7da207f9bd90>, <ast.Name object at 0x7da207f9b910>, <ast.Slice object at 0x7da207f98f10>]]], name[xdir], name[htarg], name[use_ne_eval], name[msrc], name[mrec]]]
<ast.AugAssign object at 0x7da207f9b8e0>
<ast.AugAssign object at 0x7da207f997e0>
<ast.AugAssign object at 0x7da207f98070>
return[tuple[[<ast.Name object at 0x7da18f811240>, <ast.Name object at 0x7da18f810dc0>, <ast.Name object at 0x7da18f810ac0>]]] | keyword[def] identifier[fem] ( identifier[ab] , identifier[off] , identifier[angle] , identifier[zsrc] , identifier[zrec] , identifier[lsrc] , identifier[lrec] , identifier[depth] , identifier[freq] , identifier[etaH] , identifier[etaV] , identifier[zetaH] ,
identifier[zetaV] , identifier[xdirect] , identifier[isfullspace] , identifier[ht] , identifier[htarg] , identifier[use_ne_eval] , identifier[msrc] , identifier[mrec] ,
identifier[loop_freq] , identifier[loop_off] , identifier[conv] = keyword[True] ):
literal[string]
identifier[fEM] = identifier[np] . identifier[zeros] (( identifier[freq] . identifier[size] , identifier[off] . identifier[size] ), identifier[dtype] = identifier[complex] )
identifier[kcount] = literal[int]
keyword[if] identifier[ab] keyword[in] [ literal[int] ,]:
keyword[return] identifier[fEM] , identifier[kcount] , identifier[conv]
keyword[if] identifier[xdirect] keyword[and] ( identifier[isfullspace] keyword[or] identifier[lsrc] == identifier[lrec] ):
identifier[fEM] += identifier[kernel] . identifier[fullspace] ( identifier[off] , identifier[angle] , identifier[zsrc] , identifier[zrec] , identifier[etaH] [:, identifier[lrec] ],
identifier[etaV] [:, identifier[lrec] ], identifier[zetaH] [:, identifier[lrec] ], identifier[zetaV] [:, identifier[lrec] ],
identifier[ab] , identifier[msrc] , identifier[mrec] )
keyword[if] identifier[xdirect] keyword[is] keyword[None] :
identifier[xdir] = keyword[True]
keyword[else] :
identifier[xdir] = identifier[xdirect]
identifier[factAng] = identifier[kernel] . identifier[angle_factor] ( identifier[angle] , identifier[ab] , identifier[msrc] , identifier[mrec] )
keyword[if] identifier[ht] == literal[string] :
identifier[lambd] , identifier[int_pts] = identifier[transform] . identifier[get_spline_values] ( identifier[htarg] [ literal[int] ], identifier[off] , identifier[htarg] [ literal[int] ])
keyword[if] keyword[not] identifier[loop_off] :
identifier[htarg] =( identifier[htarg] [ literal[int] ], identifier[htarg] [ literal[int] ], identifier[lambd] , identifier[int_pts] )
keyword[if] keyword[not] identifier[isfullspace] * identifier[xdir] :
identifier[calc] = identifier[getattr] ( identifier[transform] , identifier[ht] )
keyword[if] identifier[loop_freq] :
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[freq] . identifier[size] ):
identifier[out] = identifier[calc] ( identifier[zsrc] , identifier[zrec] , identifier[lsrc] , identifier[lrec] , identifier[off] , identifier[factAng] , identifier[depth] , identifier[ab] ,
identifier[etaH] [ keyword[None] , identifier[i] ,:], identifier[etaV] [ keyword[None] , identifier[i] ,:],
identifier[zetaH] [ keyword[None] , identifier[i] ,:], identifier[zetaV] [ keyword[None] , identifier[i] ,:], identifier[xdir] ,
identifier[htarg] , identifier[use_ne_eval] , identifier[msrc] , identifier[mrec] )
identifier[fEM] [ keyword[None] , identifier[i] ,:]+= identifier[out] [ literal[int] ]
identifier[kcount] += identifier[out] [ literal[int] ]
identifier[conv] *= identifier[out] [ literal[int] ]
keyword[elif] identifier[loop_off] :
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[off] . identifier[size] ):
keyword[if] identifier[ht] == literal[string] :
identifier[htarg] =( identifier[htarg] [ literal[int] ], identifier[htarg] [ literal[int] ], identifier[lambd] [ keyword[None] , identifier[i] ,:], identifier[int_pts] [ identifier[i] ])
identifier[out] = identifier[calc] ( identifier[zsrc] , identifier[zrec] , identifier[lsrc] , identifier[lrec] , identifier[off] [ keyword[None] , identifier[i] ],
identifier[factAng] [ keyword[None] , identifier[i] ], identifier[depth] , identifier[ab] , identifier[etaH] , identifier[etaV] , identifier[zetaH] ,
identifier[zetaV] , identifier[xdir] , identifier[htarg] , identifier[use_ne_eval] , identifier[msrc] , identifier[mrec] )
identifier[fEM] [:, keyword[None] , identifier[i] ]+= identifier[out] [ literal[int] ]
identifier[kcount] += identifier[out] [ literal[int] ]
identifier[conv] *= identifier[out] [ literal[int] ]
keyword[else] :
identifier[out] = identifier[calc] ( identifier[zsrc] , identifier[zrec] , identifier[lsrc] , identifier[lrec] , identifier[off] , identifier[factAng] , identifier[depth] , identifier[ab] , identifier[etaH] ,
identifier[etaV] , identifier[zetaH] , identifier[zetaV] , identifier[xdir] , identifier[htarg] , identifier[use_ne_eval] , identifier[msrc] ,
identifier[mrec] )
identifier[fEM] += identifier[out] [ literal[int] ]
identifier[kcount] += identifier[out] [ literal[int] ]
identifier[conv] *= identifier[out] [ literal[int] ]
keyword[return] identifier[fEM] , identifier[kcount] , identifier[conv] | def fem(ab, off, angle, zsrc, zrec, lsrc, lrec, depth, freq, etaH, etaV, zetaH, zetaV, xdirect, isfullspace, ht, htarg, use_ne_eval, msrc, mrec, loop_freq, loop_off, conv=True):
"""Return the electromagnetic frequency-domain response.
This function is called from one of the above modelling routines. No
input-check is carried out here. See the main description of :mod:`model`
for information regarding input and output parameters.
This function can be directly used if you are sure the provided input is in
the correct format. This is useful for inversion routines and similar, as
it can speed-up the calculation by omitting input-checks.
"""
# Preallocate array
fEM = np.zeros((freq.size, off.size), dtype=complex)
# Initialize kernel count
# (how many times the wavenumber-domain kernel was calld)
kcount = 0
# If <ab> = 36 (or 63), fEM-field is zero
if ab in [36]:
return (fEM, kcount, conv) # depends on [control=['if'], data=[]]
# Get full-space-solution if xdirect=True and model is a full-space or
# if src and rec are in the same layer.
if xdirect and (isfullspace or lsrc == lrec):
fEM += kernel.fullspace(off, angle, zsrc, zrec, etaH[:, lrec], etaV[:, lrec], zetaH[:, lrec], zetaV[:, lrec], ab, msrc, mrec) # depends on [control=['if'], data=[]]
# If `xdirect = None` we set it here to True, so it is NOT calculated in
# the wavenumber domain. (Only reflected fields are returned.)
if xdirect is None:
xdir = True # depends on [control=['if'], data=[]]
else:
xdir = xdirect
# Get angle dependent factors
factAng = kernel.angle_factor(angle, ab, msrc, mrec)
# Compute required lambdas for given hankel-filter-base
# This should be in utils, but this is a backwards-incompatible change.
# Move this to utils for version 2.0.
if ht == 'fht':
# htarg[0] = filter; htarg[1] = pts_per_dec
(lambd, int_pts) = transform.get_spline_values(htarg[0], off, htarg[1])
if not loop_off:
htarg = (htarg[0], htarg[1], lambd, int_pts) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
# If not full-space with xdirect calculate fEM-field
if not isfullspace * xdir:
calc = getattr(transform, ht)
if loop_freq:
for i in range(freq.size):
out = calc(zsrc, zrec, lsrc, lrec, off, factAng, depth, ab, etaH[None, i, :], etaV[None, i, :], zetaH[None, i, :], zetaV[None, i, :], xdir, htarg, use_ne_eval, msrc, mrec)
fEM[None, i, :] += out[0]
kcount += out[1]
conv *= out[2] # depends on [control=['for'], data=['i']] # depends on [control=['if'], data=[]]
elif loop_off:
for i in range(off.size):
# See comments above where it says "ht == 'fht'".
# Get pre-calculated lambd, int_pts for this offset
if ht == 'fht':
htarg = (htarg[0], htarg[1], lambd[None, i, :], int_pts[i]) # depends on [control=['if'], data=[]]
out = calc(zsrc, zrec, lsrc, lrec, off[None, i], factAng[None, i], depth, ab, etaH, etaV, zetaH, zetaV, xdir, htarg, use_ne_eval, msrc, mrec)
fEM[:, None, i] += out[0]
kcount += out[1]
conv *= out[2] # depends on [control=['for'], data=['i']] # depends on [control=['if'], data=[]]
else:
out = calc(zsrc, zrec, lsrc, lrec, off, factAng, depth, ab, etaH, etaV, zetaH, zetaV, xdir, htarg, use_ne_eval, msrc, mrec)
fEM += out[0]
kcount += out[1]
conv *= out[2] # depends on [control=['if'], data=[]]
return (fEM, kcount, conv) |
def _load(formula):
'''
Generates a list of salt://<formula>/defaults.(json|yaml) files
and fetches them from the Salt master.
Returns first defaults file as python dict.
'''
# Compute possibilities
_mk_client()
paths = []
for ext in ('yaml', 'json'):
source_url = salt.utils.url.create(formula + '/defaults.' + ext)
paths.append(source_url)
# Fetch files from master
defaults_files = __context__['cp.fileclient'].cache_files(paths)
for file_ in defaults_files:
if not file_:
# Skip empty string returned by cp.fileclient.cache_files.
continue
suffix = file_.rsplit('.', 1)[-1]
if suffix == 'yaml':
loader = salt.utils.yaml.safe_load
elif suffix == 'json':
loader = salt.utils.json.load
else:
log.debug("Failed to determine loader for %r", file_)
continue
if os.path.exists(file_):
log.debug("Reading defaults from %r", file_)
with salt.utils.files.fopen(file_) as fhr:
defaults = loader(fhr)
log.debug("Read defaults %r", defaults)
return defaults or {} | def function[_load, parameter[formula]]:
constant[
Generates a list of salt://<formula>/defaults.(json|yaml) files
and fetches them from the Salt master.
Returns first defaults file as python dict.
]
call[name[_mk_client], parameter[]]
variable[paths] assign[=] list[[]]
for taget[name[ext]] in starred[tuple[[<ast.Constant object at 0x7da1b26ae110>, <ast.Constant object at 0x7da1b26ac9d0>]]] begin[:]
variable[source_url] assign[=] call[name[salt].utils.url.create, parameter[binary_operation[binary_operation[name[formula] + constant[/defaults.]] + name[ext]]]]
call[name[paths].append, parameter[name[source_url]]]
variable[defaults_files] assign[=] call[call[name[__context__]][constant[cp.fileclient]].cache_files, parameter[name[paths]]]
for taget[name[file_]] in starred[name[defaults_files]] begin[:]
if <ast.UnaryOp object at 0x7da1b21d4820> begin[:]
continue
variable[suffix] assign[=] call[call[name[file_].rsplit, parameter[constant[.], constant[1]]]][<ast.UnaryOp object at 0x7da1b21d6ef0>]
if compare[name[suffix] equal[==] constant[yaml]] begin[:]
variable[loader] assign[=] name[salt].utils.yaml.safe_load
if call[name[os].path.exists, parameter[name[file_]]] begin[:]
call[name[log].debug, parameter[constant[Reading defaults from %r], name[file_]]]
with call[name[salt].utils.files.fopen, parameter[name[file_]]] begin[:]
variable[defaults] assign[=] call[name[loader], parameter[name[fhr]]]
call[name[log].debug, parameter[constant[Read defaults %r], name[defaults]]]
return[<ast.BoolOp object at 0x7da1b21d5960>] | keyword[def] identifier[_load] ( identifier[formula] ):
literal[string]
identifier[_mk_client] ()
identifier[paths] =[]
keyword[for] identifier[ext] keyword[in] ( literal[string] , literal[string] ):
identifier[source_url] = identifier[salt] . identifier[utils] . identifier[url] . identifier[create] ( identifier[formula] + literal[string] + identifier[ext] )
identifier[paths] . identifier[append] ( identifier[source_url] )
identifier[defaults_files] = identifier[__context__] [ literal[string] ]. identifier[cache_files] ( identifier[paths] )
keyword[for] identifier[file_] keyword[in] identifier[defaults_files] :
keyword[if] keyword[not] identifier[file_] :
keyword[continue]
identifier[suffix] = identifier[file_] . identifier[rsplit] ( literal[string] , literal[int] )[- literal[int] ]
keyword[if] identifier[suffix] == literal[string] :
identifier[loader] = identifier[salt] . identifier[utils] . identifier[yaml] . identifier[safe_load]
keyword[elif] identifier[suffix] == literal[string] :
identifier[loader] = identifier[salt] . identifier[utils] . identifier[json] . identifier[load]
keyword[else] :
identifier[log] . identifier[debug] ( literal[string] , identifier[file_] )
keyword[continue]
keyword[if] identifier[os] . identifier[path] . identifier[exists] ( identifier[file_] ):
identifier[log] . identifier[debug] ( literal[string] , identifier[file_] )
keyword[with] identifier[salt] . identifier[utils] . identifier[files] . identifier[fopen] ( identifier[file_] ) keyword[as] identifier[fhr] :
identifier[defaults] = identifier[loader] ( identifier[fhr] )
identifier[log] . identifier[debug] ( literal[string] , identifier[defaults] )
keyword[return] identifier[defaults] keyword[or] {} | def _load(formula):
"""
Generates a list of salt://<formula>/defaults.(json|yaml) files
and fetches them from the Salt master.
Returns first defaults file as python dict.
"""
# Compute possibilities
_mk_client()
paths = []
for ext in ('yaml', 'json'):
source_url = salt.utils.url.create(formula + '/defaults.' + ext)
paths.append(source_url) # depends on [control=['for'], data=['ext']]
# Fetch files from master
defaults_files = __context__['cp.fileclient'].cache_files(paths)
for file_ in defaults_files:
if not file_:
# Skip empty string returned by cp.fileclient.cache_files.
continue # depends on [control=['if'], data=[]]
suffix = file_.rsplit('.', 1)[-1]
if suffix == 'yaml':
loader = salt.utils.yaml.safe_load # depends on [control=['if'], data=[]]
elif suffix == 'json':
loader = salt.utils.json.load # depends on [control=['if'], data=[]]
else:
log.debug('Failed to determine loader for %r', file_)
continue
if os.path.exists(file_):
log.debug('Reading defaults from %r', file_)
with salt.utils.files.fopen(file_) as fhr:
defaults = loader(fhr)
log.debug('Read defaults %r', defaults) # depends on [control=['with'], data=['fhr']]
return defaults or {} # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['file_']] |
def keyPressEvent( self, event ):
"""
Overloads the keyPressEvent method to support backtab operations.
:param event | <QKeyPressEvent>
"""
if ( event.key() == Qt.Key_Backtab ):
self.unindentSelection()
else:
super(XScintillaEdit, self).keyPressEvent(event) | def function[keyPressEvent, parameter[self, event]]:
constant[
Overloads the keyPressEvent method to support backtab operations.
:param event | <QKeyPressEvent>
]
if compare[call[name[event].key, parameter[]] equal[==] name[Qt].Key_Backtab] begin[:]
call[name[self].unindentSelection, parameter[]] | keyword[def] identifier[keyPressEvent] ( identifier[self] , identifier[event] ):
literal[string]
keyword[if] ( identifier[event] . identifier[key] ()== identifier[Qt] . identifier[Key_Backtab] ):
identifier[self] . identifier[unindentSelection] ()
keyword[else] :
identifier[super] ( identifier[XScintillaEdit] , identifier[self] ). identifier[keyPressEvent] ( identifier[event] ) | def keyPressEvent(self, event):
"""
Overloads the keyPressEvent method to support backtab operations.
:param event | <QKeyPressEvent>
"""
if event.key() == Qt.Key_Backtab:
self.unindentSelection() # depends on [control=['if'], data=[]]
else:
super(XScintillaEdit, self).keyPressEvent(event) |
def lock(self, timeout=10):
"""
Advisory lock.
Use to ensure that only one LocalSyncClient is working on the Target at the same time.
"""
logger.debug("Locking %s", self.lock_file)
if not os.path.exists(self.lock_file):
self.ensure_path(self.lock_file)
with open(self.lock_file, "w"):
os.utime(self.lock_file)
self._lock.acquire(timeout=timeout) | def function[lock, parameter[self, timeout]]:
constant[
Advisory lock.
Use to ensure that only one LocalSyncClient is working on the Target at the same time.
]
call[name[logger].debug, parameter[constant[Locking %s], name[self].lock_file]]
if <ast.UnaryOp object at 0x7da18eb57310> begin[:]
call[name[self].ensure_path, parameter[name[self].lock_file]]
with call[name[open], parameter[name[self].lock_file, constant[w]]] begin[:]
call[name[os].utime, parameter[name[self].lock_file]]
call[name[self]._lock.acquire, parameter[]] | keyword[def] identifier[lock] ( identifier[self] , identifier[timeout] = literal[int] ):
literal[string]
identifier[logger] . identifier[debug] ( literal[string] , identifier[self] . identifier[lock_file] )
keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[exists] ( identifier[self] . identifier[lock_file] ):
identifier[self] . identifier[ensure_path] ( identifier[self] . identifier[lock_file] )
keyword[with] identifier[open] ( identifier[self] . identifier[lock_file] , literal[string] ):
identifier[os] . identifier[utime] ( identifier[self] . identifier[lock_file] )
identifier[self] . identifier[_lock] . identifier[acquire] ( identifier[timeout] = identifier[timeout] ) | def lock(self, timeout=10):
"""
Advisory lock.
Use to ensure that only one LocalSyncClient is working on the Target at the same time.
"""
logger.debug('Locking %s', self.lock_file)
if not os.path.exists(self.lock_file):
self.ensure_path(self.lock_file)
with open(self.lock_file, 'w'):
os.utime(self.lock_file) # depends on [control=['with'], data=[]] # depends on [control=['if'], data=[]]
self._lock.acquire(timeout=timeout) |
def execute(self, source, interval, input_plate_value):
"""
Execute the tool over the given time interval.
:param source: The source stream
:param interval: The time interval
:param input_plate_value: The value of the plate where data comes from (can be None)
:type source: Stream
:type interval: TimeInterval
:type input_plate_value: tuple[tuple[str, str]] | None
:return: None
"""
if not isinstance(interval, TimeInterval):
raise TypeError('Expected TimeInterval, got {}'.format(type(interval)))
# logging.info(self.message(interval))
output_plate_values = set()
for item in self._execute(source=source, interval=interval):
# Join the output meta data with the parent plate meta data
# meta_data = input_plate_value + (item.meta_data,)
# sink.writer(item.stream_instance)
output_plate_values.add(item.meta_data, )
if not output_plate_values:
logging.debug("{} did not produce any data for time interval {} on stream {}".format(
self.name, interval, source))
self.write_to_history(
interval=interval,
tool=self.name,
document_count=len(output_plate_values)
)
return output_plate_values | def function[execute, parameter[self, source, interval, input_plate_value]]:
constant[
Execute the tool over the given time interval.
:param source: The source stream
:param interval: The time interval
:param input_plate_value: The value of the plate where data comes from (can be None)
:type source: Stream
:type interval: TimeInterval
:type input_plate_value: tuple[tuple[str, str]] | None
:return: None
]
if <ast.UnaryOp object at 0x7da1b26af370> begin[:]
<ast.Raise object at 0x7da1b26ac0a0>
variable[output_plate_values] assign[=] call[name[set], parameter[]]
for taget[name[item]] in starred[call[name[self]._execute, parameter[]]] begin[:]
call[name[output_plate_values].add, parameter[name[item].meta_data]]
if <ast.UnaryOp object at 0x7da20c6e4dc0> begin[:]
call[name[logging].debug, parameter[call[constant[{} did not produce any data for time interval {} on stream {}].format, parameter[name[self].name, name[interval], name[source]]]]]
call[name[self].write_to_history, parameter[]]
return[name[output_plate_values]] | keyword[def] identifier[execute] ( identifier[self] , identifier[source] , identifier[interval] , identifier[input_plate_value] ):
literal[string]
keyword[if] keyword[not] identifier[isinstance] ( identifier[interval] , identifier[TimeInterval] ):
keyword[raise] identifier[TypeError] ( literal[string] . identifier[format] ( identifier[type] ( identifier[interval] )))
identifier[output_plate_values] = identifier[set] ()
keyword[for] identifier[item] keyword[in] identifier[self] . identifier[_execute] ( identifier[source] = identifier[source] , identifier[interval] = identifier[interval] ):
identifier[output_plate_values] . identifier[add] ( identifier[item] . identifier[meta_data] ,)
keyword[if] keyword[not] identifier[output_plate_values] :
identifier[logging] . identifier[debug] ( literal[string] . identifier[format] (
identifier[self] . identifier[name] , identifier[interval] , identifier[source] ))
identifier[self] . identifier[write_to_history] (
identifier[interval] = identifier[interval] ,
identifier[tool] = identifier[self] . identifier[name] ,
identifier[document_count] = identifier[len] ( identifier[output_plate_values] )
)
keyword[return] identifier[output_plate_values] | def execute(self, source, interval, input_plate_value):
"""
Execute the tool over the given time interval.
:param source: The source stream
:param interval: The time interval
:param input_plate_value: The value of the plate where data comes from (can be None)
:type source: Stream
:type interval: TimeInterval
:type input_plate_value: tuple[tuple[str, str]] | None
:return: None
"""
if not isinstance(interval, TimeInterval):
raise TypeError('Expected TimeInterval, got {}'.format(type(interval))) # depends on [control=['if'], data=[]]
# logging.info(self.message(interval))
output_plate_values = set()
for item in self._execute(source=source, interval=interval):
# Join the output meta data with the parent plate meta data
# meta_data = input_plate_value + (item.meta_data,)
# sink.writer(item.stream_instance)
output_plate_values.add(item.meta_data) # depends on [control=['for'], data=['item']]
if not output_plate_values:
logging.debug('{} did not produce any data for time interval {} on stream {}'.format(self.name, interval, source)) # depends on [control=['if'], data=[]]
self.write_to_history(interval=interval, tool=self.name, document_count=len(output_plate_values))
return output_plate_values |
def _get_data_from_bigquery(self, queries):
"""Get data from bigquery table or query."""
all_df = []
for query in queries:
all_df.append(query.execute().result().to_dataframe())
df = pd.concat(all_df, ignore_index=True)
return df | def function[_get_data_from_bigquery, parameter[self, queries]]:
constant[Get data from bigquery table or query.]
variable[all_df] assign[=] list[[]]
for taget[name[query]] in starred[name[queries]] begin[:]
call[name[all_df].append, parameter[call[call[call[name[query].execute, parameter[]].result, parameter[]].to_dataframe, parameter[]]]]
variable[df] assign[=] call[name[pd].concat, parameter[name[all_df]]]
return[name[df]] | keyword[def] identifier[_get_data_from_bigquery] ( identifier[self] , identifier[queries] ):
literal[string]
identifier[all_df] =[]
keyword[for] identifier[query] keyword[in] identifier[queries] :
identifier[all_df] . identifier[append] ( identifier[query] . identifier[execute] (). identifier[result] (). identifier[to_dataframe] ())
identifier[df] = identifier[pd] . identifier[concat] ( identifier[all_df] , identifier[ignore_index] = keyword[True] )
keyword[return] identifier[df] | def _get_data_from_bigquery(self, queries):
"""Get data from bigquery table or query."""
all_df = []
for query in queries:
all_df.append(query.execute().result().to_dataframe()) # depends on [control=['for'], data=['query']]
df = pd.concat(all_df, ignore_index=True)
return df |
def closenessScores(self, expValues, actValues, fractional=True):
"""
See the function description in base.py
"""
# Compute the percent error in log space
if expValues[0] > 0:
expValue = math.log10(expValues[0])
else:
expValue = self.minScaledValue
if actValues [0] > 0:
actValue = math.log10(actValues[0])
else:
actValue = self.minScaledValue
if fractional:
err = abs(expValue - actValue)
pctErr = err / (self.maxScaledValue - self.minScaledValue)
pctErr = min(1.0, pctErr)
closeness = 1.0 - pctErr
else:
err = abs(expValue - actValue)
closeness = err
#print "log::", "expValue:", expValues[0], "actValue:", actValues[0], \
# "closeness", closeness
#import pdb; pdb.set_trace()
return numpy.array([closeness]) | def function[closenessScores, parameter[self, expValues, actValues, fractional]]:
constant[
See the function description in base.py
]
if compare[call[name[expValues]][constant[0]] greater[>] constant[0]] begin[:]
variable[expValue] assign[=] call[name[math].log10, parameter[call[name[expValues]][constant[0]]]]
if compare[call[name[actValues]][constant[0]] greater[>] constant[0]] begin[:]
variable[actValue] assign[=] call[name[math].log10, parameter[call[name[actValues]][constant[0]]]]
if name[fractional] begin[:]
variable[err] assign[=] call[name[abs], parameter[binary_operation[name[expValue] - name[actValue]]]]
variable[pctErr] assign[=] binary_operation[name[err] / binary_operation[name[self].maxScaledValue - name[self].minScaledValue]]
variable[pctErr] assign[=] call[name[min], parameter[constant[1.0], name[pctErr]]]
variable[closeness] assign[=] binary_operation[constant[1.0] - name[pctErr]]
return[call[name[numpy].array, parameter[list[[<ast.Name object at 0x7da20e9b3910>]]]]] | keyword[def] identifier[closenessScores] ( identifier[self] , identifier[expValues] , identifier[actValues] , identifier[fractional] = keyword[True] ):
literal[string]
keyword[if] identifier[expValues] [ literal[int] ]> literal[int] :
identifier[expValue] = identifier[math] . identifier[log10] ( identifier[expValues] [ literal[int] ])
keyword[else] :
identifier[expValue] = identifier[self] . identifier[minScaledValue]
keyword[if] identifier[actValues] [ literal[int] ]> literal[int] :
identifier[actValue] = identifier[math] . identifier[log10] ( identifier[actValues] [ literal[int] ])
keyword[else] :
identifier[actValue] = identifier[self] . identifier[minScaledValue]
keyword[if] identifier[fractional] :
identifier[err] = identifier[abs] ( identifier[expValue] - identifier[actValue] )
identifier[pctErr] = identifier[err] /( identifier[self] . identifier[maxScaledValue] - identifier[self] . identifier[minScaledValue] )
identifier[pctErr] = identifier[min] ( literal[int] , identifier[pctErr] )
identifier[closeness] = literal[int] - identifier[pctErr]
keyword[else] :
identifier[err] = identifier[abs] ( identifier[expValue] - identifier[actValue] )
identifier[closeness] = identifier[err]
keyword[return] identifier[numpy] . identifier[array] ([ identifier[closeness] ]) | def closenessScores(self, expValues, actValues, fractional=True):
"""
See the function description in base.py
"""
# Compute the percent error in log space
if expValues[0] > 0:
expValue = math.log10(expValues[0]) # depends on [control=['if'], data=[]]
else:
expValue = self.minScaledValue
if actValues[0] > 0:
actValue = math.log10(actValues[0]) # depends on [control=['if'], data=[]]
else:
actValue = self.minScaledValue
if fractional:
err = abs(expValue - actValue)
pctErr = err / (self.maxScaledValue - self.minScaledValue)
pctErr = min(1.0, pctErr)
closeness = 1.0 - pctErr # depends on [control=['if'], data=[]]
else:
err = abs(expValue - actValue)
closeness = err
#print "log::", "expValue:", expValues[0], "actValue:", actValues[0], \
# "closeness", closeness
#import pdb; pdb.set_trace()
return numpy.array([closeness]) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.